aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-07-17 11:44:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-17 11:44:51 -0400
commita018540141a931f5299a866907b27886916b4374 (patch)
tree63fd1f1a80bf2e89a7798ab4d9c026fa5f1866fd
parent635ac11964dd1ab955dcd2f888d3ac6fd25419b4 (diff)
parent602e65a3b0c4f6b09fba19817ff798647a08e706 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) IPVS oops'ers: a) Should not reset skb->nf_bridge in forwarding hook (Lin Ming) b) 3.4 commit can cause ip_vs_control_cleanup to be invoked after the ipvs_core_ops are unregistered during rmmod (Julian ANastasov) 2) ixgbevf bringup failure can crash in TX descriptor cleanup (Alexander Duyck) 3) AX25 switch missing break statement hoses ROSE sockets (Alan Cox) 4) CAIF accesses freed per-net memory (Sjur Brandeland) 5) Network cgroup code has out-or-bounds accesses (Eric DUmazet), and accesses freed memory (Gao Feng) 6) Fix a crash in SCTP reported by Dave Jones caused by freeing an association still on a list (Neil HOrman) 7) __netdev_alloc_skb() regresses on GFP_DMA using drivers because that GFP flag is not being retained for the allocation (Eric Dumazet). 8) Missing NULL hceck in sch_sfb netlink message parsing (Alan Cox) 9) bnx2 crashes because TX index iteration is not bounded correctly (Michael Chan) 10) IPoIB generates warnings in TCP queue collapsing (via skb_try_coalesce) because it does not set skb->truesize correctly (Eric Dumazet) 11) vlan_info objects leak for the implicit vlan with ID 0 (Amir Hanania) 12) A fix for TX time stamp handling in gianfar does not transfer socket ownership from one packet to another correctly, resulting in a socket write space imbalance (Eric Dumazet) 13) Julia Lawall found several cases where we do a list iteration, and then at the loop termination unconditionally assume we ended up with real list object, rather than the list head itself (CNIC, RXRPC, mISDN). 14) The bonding driver handles procfs moving incorrectly when a device it manages is moved from one namespace to another (Eric Biederman) 15) Missing memory barriers in stmmac descriptor accesses result in various crashes (Deepak Sikri) 16) Fix handling of broadcast packets in batman-adv (Simon Wunderlich) 17) Properly check the sanity of sendmsg() lengths in ieee802154's dgram_sendmsg(). Dave Jones and others have hit and reported this bug (Sasha Levin) 18) Some drivers (b44 and b43legacy) on 64-bit machines stopped working because of how netdev_alloc_skb() was adjusted. Such drivers should now use alloc_skb() for obtaining bounce buffers. (Eric Dumazet) 19) atl1c mis-managed it's link state in that it stops the queue by hand on link down. The generic networking takes care of that and this double stop locks the queue down. So simply removing the driver's queue stop call fixes the problem (Cloud Ren) 20) Fix out-of-memory due to mis-accounting in net_em packet scheduler (Eric Dumazet) 21) If DCB and SR-IOV are configured at the same time in IXGBE the chip will hang because this is not supported (Alexander Duyck) 22) A commit to stop drivers using netdev->base_addr broke the CNIC driver (Michael Chan) 23) Timeout regression in ipset caused by an attempt to fix an overflow bug (Jozsef Kadlecsik). 24) mac80211 minstrel code allocates memory using incorrect size (Thomas Huehn) 25) llcp_sock_getname() needs to check for a NULL device otherwise we OOPS (Sasha Levin) 26) mwifiex leaks memory (Bing Zhao) 27) Propagate iwlwifi fix to iwlegacy, even when we're not associated we need to monitor for stuck queues in the watchdog handler (Stanislaw Geuszka) * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (44 commits) ipvs: fix oops in ip_vs_dst_event on rmmod ipvs: fix oops on NAT reply in br_nf context ixgbevf: Fix panic when loading driver ax25: Fix missing break MAINTAINERS: reflect actual changes in IEEE 802.15.4 maintainership caif: Fix access to freed pernet memory net: cgroup: fix access the unallocated memory in netprio cgroup ixgbevf: Prevent RX/TX statistics getting reset to zero sctp: Fix list corruption resulting from freeing an association on a list net: respect GFP_DMA in __netdev_alloc_skb() e1000e: fix test for PHY being accessible on 82577/8/9 and I217 e1000e: Correct link check logic for 82571 serdes sch_sfb: Fix missing NULL check bnx2: Fix bug in bnx2_free_tx_skbs(). IPoIB: fix skb truesize underestimatiom net: Fix memory leak - vlan_info struct gianfar: fix potential sk_wmem_alloc imbalance drivers/net/ethernet/broadcom/cnic.c: remove invalid reference to list iterator variable net/rxrpc/ar-peer.c: remove invalid reference to list iterator variable drivers/isdn/mISDN/stack.c: remove invalid reference to list iterator variable ...
-rw-r--r--MAINTAINERS3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c12
-rw-r--r--drivers/isdn/mISDN/stack.c4
-rw-r--r--drivers/net/bonding/bond_debugfs.c2
-rw-r--r--drivers/net/bonding/bond_main.c9
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c6
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c10
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c42
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c3
-rw-r--r--drivers/net/phy/mdio-mux.c10
-rw-r--r--drivers/net/usb/qmi_wwan.c18
-rw-r--r--drivers/net/wireless/b43legacy/dma.c2
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c4
-rw-r--r--drivers/net/wireless/iwlegacy/common.c14
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c2
-rw-r--r--include/net/ip_vs.h2
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h2
-rw-r--r--net/8021q/vlan.c3
-rw-r--r--net/ax25/af_ax25.c1
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c15
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h5
-rw-r--r--net/batman-adv/soft-interface.c6
-rw-r--r--net/caif/caif_dev.c2
-rw-r--r--net/core/dev.c8
-rw-r--r--net/core/netprio_cgroup.c78
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/ieee802154/dgram.c12
-rw-r--r--net/mac80211/mlme.c6
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c5
-rw-r--r--net/netfilter/xt_set.c4
-rw-r--r--net/nfc/llcp/sock.c2
-rw-r--r--net/rxrpc/ar-peer.c2
-rw-r--r--net/sched/sch_netem.c42
-rw-r--r--net/sched/sch_sfb.c2
-rw-r--r--net/sctp/input.c7
-rw-r--r--net/sctp/socket.c12
45 files changed, 256 insertions, 144 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 1b71f6ceae0a..c82c343168e8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3433,13 +3433,14 @@ S: Supported
3433F: drivers/idle/i7300_idle.c 3433F: drivers/idle/i7300_idle.c
3434 3434
3435IEEE 802.15.4 SUBSYSTEM 3435IEEE 802.15.4 SUBSYSTEM
3436M: Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
3436M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> 3437M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
3437M: Sergey Lapin <slapin@ossfans.org>
3438L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers) 3438L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
3439W: http://apps.sourceforge.net/trac/linux-zigbee 3439W: http://apps.sourceforge.net/trac/linux-zigbee
3440T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git 3440T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
3441S: Maintained 3441S: Maintained
3442F: net/ieee802154/ 3442F: net/ieee802154/
3443F: net/mac802154/
3443F: drivers/ieee802154/ 3444F: drivers/ieee802154/
3444 3445
3445IIO SUBSYSTEM AND DRIVERS 3446IIO SUBSYSTEM AND DRIVERS
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 5c1bc995e560..f10221f40803 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -123,7 +123,7 @@ static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
123 123
124 skb_frag_size_set(frag, size); 124 skb_frag_size_set(frag, size);
125 skb->data_len += size; 125 skb->data_len += size;
126 skb->truesize += size; 126 skb->truesize += PAGE_SIZE;
127 } else 127 } else
128 skb_put(skb, length); 128 skb_put(skb, length);
129 129
@@ -156,14 +156,18 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
156 struct ipoib_dev_priv *priv = netdev_priv(dev); 156 struct ipoib_dev_priv *priv = netdev_priv(dev);
157 struct sk_buff *skb; 157 struct sk_buff *skb;
158 int buf_size; 158 int buf_size;
159 int tailroom;
159 u64 *mapping; 160 u64 *mapping;
160 161
161 if (ipoib_ud_need_sg(priv->max_ib_mtu)) 162 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
162 buf_size = IPOIB_UD_HEAD_SIZE; 163 buf_size = IPOIB_UD_HEAD_SIZE;
163 else 164 tailroom = 128; /* reserve some tailroom for IP/TCP headers */
165 } else {
164 buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); 166 buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
167 tailroom = 0;
168 }
165 169
166 skb = dev_alloc_skb(buf_size + 4); 170 skb = dev_alloc_skb(buf_size + tailroom + 4);
167 if (unlikely(!skb)) 171 if (unlikely(!skb))
168 return NULL; 172 return NULL;
169 173
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 1a0ae4445ff2..5f21f629b7ae 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -135,8 +135,8 @@ send_layer2(struct mISDNstack *st, struct sk_buff *skb)
135 skb = NULL; 135 skb = NULL;
136 else if (*debug & DEBUG_SEND_ERR) 136 else if (*debug & DEBUG_SEND_ERR)
137 printk(KERN_DEBUG 137 printk(KERN_DEBUG
138 "%s ch%d mgr prim(%x) addr(%x) err %d\n", 138 "%s mgr prim(%x) err %d\n",
139 __func__, ch->nr, hh->prim, ch->addr, ret); 139 __func__, hh->prim, ret);
140 } 140 }
141out: 141out:
142 mutex_unlock(&st->lmutex); 142 mutex_unlock(&st->lmutex);
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 3680aa251dea..2cf084eb9d52 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -6,7 +6,7 @@
6#include "bonding.h" 6#include "bonding.h"
7#include "bond_alb.h" 7#include "bond_alb.h"
8 8
9#ifdef CONFIG_DEBUG_FS 9#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS)
10 10
11#include <linux/debugfs.h> 11#include <linux/debugfs.h>
12#include <linux/seq_file.h> 12#include <linux/seq_file.h>
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b9c2ae62166d..2ee76993f052 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3227,6 +3227,12 @@ static int bond_master_netdev_event(unsigned long event,
3227 switch (event) { 3227 switch (event) {
3228 case NETDEV_CHANGENAME: 3228 case NETDEV_CHANGENAME:
3229 return bond_event_changename(event_bond); 3229 return bond_event_changename(event_bond);
3230 case NETDEV_UNREGISTER:
3231 bond_remove_proc_entry(event_bond);
3232 break;
3233 case NETDEV_REGISTER:
3234 bond_create_proc_entry(event_bond);
3235 break;
3230 default: 3236 default:
3231 break; 3237 break;
3232 } 3238 }
@@ -4411,8 +4417,6 @@ static void bond_uninit(struct net_device *bond_dev)
4411 4417
4412 bond_work_cancel_all(bond); 4418 bond_work_cancel_all(bond);
4413 4419
4414 bond_remove_proc_entry(bond);
4415
4416 bond_debug_unregister(bond); 4420 bond_debug_unregister(bond);
4417 4421
4418 __hw_addr_flush(&bond->mc_list); 4422 __hw_addr_flush(&bond->mc_list);
@@ -4814,7 +4818,6 @@ static int bond_init(struct net_device *bond_dev)
4814 4818
4815 bond_set_lockdep_class(bond_dev); 4819 bond_set_lockdep_class(bond_dev);
4816 4820
4817 bond_create_proc_entry(bond);
4818 list_add_tail(&bond->bond_list, &bn->dev_list); 4821 list_add_tail(&bond->bond_list, &bn->dev_list);
4819 4822
4820 bond_prepare_sysfs_group(bond); 4823 bond_prepare_sysfs_group(bond);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 9cc15701101b..1f78b63d5efe 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -261,7 +261,6 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
261 if ((phy_data & BMSR_LSTATUS) == 0) { 261 if ((phy_data & BMSR_LSTATUS) == 0) {
262 /* link down */ 262 /* link down */
263 netif_carrier_off(netdev); 263 netif_carrier_off(netdev);
264 netif_stop_queue(netdev);
265 hw->hibernate = true; 264 hw->hibernate = true;
266 if (atl1c_reset_mac(hw) != 0) 265 if (atl1c_reset_mac(hw) != 0)
267 if (netif_msg_hw(adapter)) 266 if (netif_msg_hw(adapter))
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 46b8b7d81633..d09c6b583d17 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -656,7 +656,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
656 dma_unmap_single(bp->sdev->dma_dev, mapping, 656 dma_unmap_single(bp->sdev->dma_dev, mapping,
657 RX_PKT_BUF_SZ, DMA_FROM_DEVICE); 657 RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
658 dev_kfree_skb_any(skb); 658 dev_kfree_skb_any(skb);
659 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); 659 skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
660 if (skb == NULL) 660 if (skb == NULL)
661 return -ENOMEM; 661 return -ENOMEM;
662 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, 662 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
@@ -967,7 +967,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
967 dma_unmap_single(bp->sdev->dma_dev, mapping, len, 967 dma_unmap_single(bp->sdev->dma_dev, mapping, len,
968 DMA_TO_DEVICE); 968 DMA_TO_DEVICE);
969 969
970 bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA); 970 bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
971 if (!bounce_skb) 971 if (!bounce_skb)
972 goto err_out; 972 goto err_out;
973 973
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index ac7b74488531..1fa4927a45b1 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -5372,7 +5372,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
5372 int k, last; 5372 int k, last;
5373 5373
5374 if (skb == NULL) { 5374 if (skb == NULL) {
5375 j++; 5375 j = NEXT_TX_BD(j);
5376 continue; 5376 continue;
5377 } 5377 }
5378 5378
@@ -5384,8 +5384,8 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
5384 tx_buf->skb = NULL; 5384 tx_buf->skb = NULL;
5385 5385
5386 last = tx_buf->nr_frags; 5386 last = tx_buf->nr_frags;
5387 j++; 5387 j = NEXT_TX_BD(j);
5388 for (k = 0; k < last; k++, j++) { 5388 for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) {
5389 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)]; 5389 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5390 dma_unmap_page(&bp->pdev->dev, 5390 dma_unmap_page(&bp->pdev->dev,
5391 dma_unmap_addr(tx_buf, mapping), 5391 dma_unmap_addr(tx_buf, mapping),
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index c95e7b5e2b85..2c89d17cbb29 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -534,7 +534,8 @@ int cnic_unregister_driver(int ulp_type)
534 } 534 }
535 535
536 if (atomic_read(&ulp_ops->ref_count) != 0) 536 if (atomic_read(&ulp_ops->ref_count) != 0)
537 netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n"); 537 pr_warn("%s: Failed waiting for ref count to go to zero\n",
538 __func__);
538 return 0; 539 return 0;
539 540
540out_unlock: 541out_unlock:
@@ -1053,12 +1054,13 @@ static int cnic_init_uio(struct cnic_dev *dev)
1053 1054
1054 uinfo = &udev->cnic_uinfo; 1055 uinfo = &udev->cnic_uinfo;
1055 1056
1056 uinfo->mem[0].addr = dev->netdev->base_addr; 1057 uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
1057 uinfo->mem[0].internal_addr = dev->regview; 1058 uinfo->mem[0].internal_addr = dev->regview;
1058 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
1059 uinfo->mem[0].memtype = UIO_MEM_PHYS; 1059 uinfo->mem[0].memtype = UIO_MEM_PHYS;
1060 1060
1061 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 1061 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
1062 uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
1063 TX_MAX_TSS_RINGS + 1);
1062 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & 1064 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
1063 PAGE_MASK; 1065 PAGE_MASK;
1064 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) 1066 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
@@ -1068,6 +1070,8 @@ static int cnic_init_uio(struct cnic_dev *dev)
1068 1070
1069 uinfo->name = "bnx2_cnic"; 1071 uinfo->name = "bnx2_cnic";
1070 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 1072 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1073 uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
1074
1071 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & 1075 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1072 PAGE_MASK; 1076 PAGE_MASK;
1073 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); 1077 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index f2db8fca46a1..ab1d80ff0791 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2063,10 +2063,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2063 return NETDEV_TX_OK; 2063 return NETDEV_TX_OK;
2064 } 2064 }
2065 2065
2066 /* Steal sock reference for processing TX time stamps */ 2066 if (skb->sk)
2067 swap(skb_new->sk, skb->sk); 2067 skb_set_owner_w(skb_new, skb->sk);
2068 swap(skb_new->destructor, skb->destructor); 2068 consume_skb(skb);
2069 kfree_skb(skb);
2070 skb = skb_new; 2069 skb = skb_new;
2071 } 2070 }
2072 2071
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 36db4df09aed..1f063dcd8f85 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1572,6 +1572,9 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1572 ctrl = er32(CTRL); 1572 ctrl = er32(CTRL);
1573 status = er32(STATUS); 1573 status = er32(STATUS);
1574 rxcw = er32(RXCW); 1574 rxcw = er32(RXCW);
1575 /* SYNCH bit and IV bit are sticky */
1576 udelay(10);
1577 rxcw = er32(RXCW);
1575 1578
1576 if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) { 1579 if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
1577 1580
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 238ab2f8a5e7..e3a7b07df629 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -325,24 +325,46 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
325 **/ 325 **/
326static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) 326static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
327{ 327{
328 u16 phy_reg; 328 u16 phy_reg = 0;
329 u32 phy_id; 329 u32 phy_id = 0;
330 s32 ret_val;
331 u16 retry_count;
332
333 for (retry_count = 0; retry_count < 2; retry_count++) {
334 ret_val = e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
335 if (ret_val || (phy_reg == 0xFFFF))
336 continue;
337 phy_id = (u32)(phy_reg << 16);
330 338
331 e1e_rphy_locked(hw, PHY_ID1, &phy_reg); 339 ret_val = e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
332 phy_id = (u32)(phy_reg << 16); 340 if (ret_val || (phy_reg == 0xFFFF)) {
333 e1e_rphy_locked(hw, PHY_ID2, &phy_reg); 341 phy_id = 0;
334 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK); 342 continue;
343 }
344 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
345 break;
346 }
335 347
336 if (hw->phy.id) { 348 if (hw->phy.id) {
337 if (hw->phy.id == phy_id) 349 if (hw->phy.id == phy_id)
338 return true; 350 return true;
339 } else { 351 } else if (phy_id) {
340 if ((phy_id != 0) && (phy_id != PHY_REVISION_MASK)) 352 hw->phy.id = phy_id;
341 hw->phy.id = phy_id; 353 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
342 return true; 354 return true;
343 } 355 }
344 356
345 return false; 357 /*
358 * In case the PHY needs to be in mdio slow mode,
359 * set slow mode and try to get the PHY id again.
360 */
361 hw->phy.ops.release(hw);
362 ret_val = e1000_set_mdio_slow_mode_hv(hw);
363 if (!ret_val)
364 ret_val = e1000e_get_phy_id(hw);
365 hw->phy.ops.acquire(hw);
366
367 return !ret_val;
346} 368}
347 369
348/** 370/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 18ca3bcadf0c..e242104ab471 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6647,6 +6647,11 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
6647 return -EINVAL; 6647 return -EINVAL;
6648 } 6648 }
6649 6649
6650 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
6651 e_err(drv, "Enable failed, SR-IOV enabled\n");
6652 return -EINVAL;
6653 }
6654
6650 /* Hardware supports up to 8 traffic classes */ 6655 /* Hardware supports up to 8 traffic classes */
6651 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || 6656 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
6652 (hw->mac.type == ixgbe_mac_82598EB && 6657 (hw->mac.type == ixgbe_mac_82598EB &&
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index f69ec4288b10..41e32257a4e8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -201,6 +201,9 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
201 unsigned int i, eop, count = 0; 201 unsigned int i, eop, count = 0;
202 unsigned int total_bytes = 0, total_packets = 0; 202 unsigned int total_bytes = 0, total_packets = 0;
203 203
204 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
205 return true;
206
204 i = tx_ring->next_to_clean; 207 i = tx_ring->next_to_clean;
205 eop = tx_ring->tx_buffer_info[i].next_to_watch; 208 eop = tx_ring->tx_buffer_info[i].next_to_watch;
206 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 209 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
@@ -969,8 +972,6 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
969 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 972 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
970 for (i = 0; i < q_vector->txr_count; i++) { 973 for (i = 0; i < q_vector->txr_count; i++) {
971 tx_ring = &(adapter->tx_ring[r_idx]); 974 tx_ring = &(adapter->tx_ring[r_idx]);
972 tx_ring->total_bytes = 0;
973 tx_ring->total_packets = 0;
974 ixgbevf_clean_tx_irq(adapter, tx_ring); 975 ixgbevf_clean_tx_irq(adapter, tx_ring);
975 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 976 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
976 r_idx + 1); 977 r_idx + 1);
@@ -994,16 +995,6 @@ static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
994 struct ixgbe_hw *hw = &adapter->hw; 995 struct ixgbe_hw *hw = &adapter->hw;
995 struct ixgbevf_ring *rx_ring; 996 struct ixgbevf_ring *rx_ring;
996 int r_idx; 997 int r_idx;
997 int i;
998
999 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1000 for (i = 0; i < q_vector->rxr_count; i++) {
1001 rx_ring = &(adapter->rx_ring[r_idx]);
1002 rx_ring->total_bytes = 0;
1003 rx_ring->total_packets = 0;
1004 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1005 r_idx + 1);
1006 }
1007 998
1008 if (!q_vector->rxr_count) 999 if (!q_vector->rxr_count)
1009 return IRQ_HANDLED; 1000 return IRQ_HANDLED;
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index fb8377da1687..4b785e10f2ed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -51,7 +51,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
51 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 51 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
52 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, 52 priv->hw->desc->prepare_tx_desc(desc, 1, bmax,
53 csum); 53 csum);
54 54 wmb();
55 entry = (++priv->cur_tx) % txsize; 55 entry = (++priv->cur_tx) % txsize;
56 desc = priv->dma_tx + entry; 56 desc = priv->dma_tx + entry;
57 57
@@ -59,6 +59,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
59 len, DMA_TO_DEVICE); 59 len, DMA_TO_DEVICE);
60 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 60 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
61 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum); 61 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum);
62 wmb();
62 priv->hw->desc->set_tx_owner(desc); 63 priv->hw->desc->set_tx_owner(desc);
63 priv->tx_skbuff[entry] = NULL; 64 priv->tx_skbuff[entry] = NULL;
64 } else { 65 } else {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 51b3b68528ee..ea3003edde18 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1212,6 +1212,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1212 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion); 1212 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
1213 wmb(); 1213 wmb();
1214 priv->hw->desc->set_tx_owner(desc); 1214 priv->hw->desc->set_tx_owner(desc);
1215 wmb();
1215 } 1216 }
1216 1217
1217 /* Interrupt on completition only for the latest segment */ 1218 /* Interrupt on completition only for the latest segment */
@@ -1227,6 +1228,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1227 1228
1228 /* To avoid raise condition */ 1229 /* To avoid raise condition */
1229 priv->hw->desc->set_tx_owner(first); 1230 priv->hw->desc->set_tx_owner(first);
1231 wmb();
1230 1232
1231 priv->cur_tx++; 1233 priv->cur_tx++;
1232 1234
@@ -1290,6 +1292,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1290 } 1292 }
1291 wmb(); 1293 wmb();
1292 priv->hw->desc->set_rx_owner(p + entry); 1294 priv->hw->desc->set_rx_owner(p + entry);
1295 wmb();
1293 } 1296 }
1294} 1297}
1295 1298
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 39ea0674dcde..5c120189ec86 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -46,7 +46,13 @@ static int mdio_mux_read(struct mii_bus *bus, int phy_id, int regnum)
46 struct mdio_mux_parent_bus *pb = cb->parent; 46 struct mdio_mux_parent_bus *pb = cb->parent;
47 int r; 47 int r;
48 48
49 mutex_lock(&pb->mii_bus->mdio_lock); 49 /* In theory multiple mdio_mux could be stacked, thus creating
50 * more than a single level of nesting. But in practice,
51 * SINGLE_DEPTH_NESTING will cover the vast majority of use
52 * cases. We use it, instead of trying to handle the general
53 * case.
54 */
55 mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
50 r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); 56 r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
51 if (r) 57 if (r)
52 goto out; 58 goto out;
@@ -71,7 +77,7 @@ static int mdio_mux_write(struct mii_bus *bus, int phy_id,
71 77
72 int r; 78 int r;
73 79
74 mutex_lock(&pb->mii_bus->mdio_lock); 80 mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
75 r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); 81 r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
76 if (r) 82 if (r)
77 goto out; 83 goto out;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index b01960fcfbc9..a051cedd64bd 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -346,6 +346,15 @@ static const struct driver_info qmi_wwan_force_int1 = {
346 .data = BIT(1), /* interface whitelist bitmap */ 346 .data = BIT(1), /* interface whitelist bitmap */
347}; 347};
348 348
349static const struct driver_info qmi_wwan_force_int2 = {
350 .description = "Qualcomm WWAN/QMI device",
351 .flags = FLAG_WWAN,
352 .bind = qmi_wwan_bind_shared,
353 .unbind = qmi_wwan_unbind_shared,
354 .manage_power = qmi_wwan_manage_power,
355 .data = BIT(2), /* interface whitelist bitmap */
356};
357
349static const struct driver_info qmi_wwan_force_int3 = { 358static const struct driver_info qmi_wwan_force_int3 = {
350 .description = "Qualcomm WWAN/QMI device", 359 .description = "Qualcomm WWAN/QMI device",
351 .flags = FLAG_WWAN, 360 .flags = FLAG_WWAN,
@@ -498,6 +507,15 @@ static const struct usb_device_id products[] = {
498 .bInterfaceProtocol = 0xff, 507 .bInterfaceProtocol = 0xff,
499 .driver_info = (unsigned long)&qmi_wwan_force_int4, 508 .driver_info = (unsigned long)&qmi_wwan_force_int4,
500 }, 509 },
510 { /* ZTE MF60 */
511 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
512 .idVendor = 0x19d2,
513 .idProduct = 0x1402,
514 .bInterfaceClass = 0xff,
515 .bInterfaceSubClass = 0xff,
516 .bInterfaceProtocol = 0xff,
517 .driver_info = (unsigned long)&qmi_wwan_force_int2,
518 },
501 { /* Sierra Wireless MC77xx in QMI mode */ 519 { /* Sierra Wireless MC77xx in QMI mode */
502 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, 520 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
503 .idVendor = 0x1199, 521 .idVendor = 0x1199,
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index f1f8bd09bd87..c8baf020c20f 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -1072,7 +1072,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1072 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1072 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1073 /* create a bounce buffer in zone_dma on mapping failure. */ 1073 /* create a bounce buffer in zone_dma on mapping failure. */
1074 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1074 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1075 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); 1075 bounce_skb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1076 if (!bounce_skb) { 1076 if (!bounce_skb) {
1077 ring->current_slot = old_top_slot; 1077 ring->current_slot = old_top_slot;
1078 ring->used_slots = old_used_slots; 1078 ring->used_slots = old_used_slots;
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 509301a5e7e2..ff5d689e13f3 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -3405,7 +3405,7 @@ il4965_remove_dynamic_key(struct il_priv *il,
3405 return 0; 3405 return 0;
3406 } 3406 }
3407 3407
3408 if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) { 3408 if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
3409 IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx, 3409 IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
3410 key_flags); 3410 key_flags);
3411 spin_unlock_irqrestore(&il->sta_lock, flags); 3411 spin_unlock_irqrestore(&il->sta_lock, flags);
@@ -3420,7 +3420,7 @@ il4965_remove_dynamic_key(struct il_priv *il,
3420 memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo)); 3420 memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
3421 il->stations[sta_id].sta.key.key_flags = 3421 il->stations[sta_id].sta.key.key_flags =
3422 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID; 3422 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
3423 il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET; 3423 il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
3424 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 3424 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3425 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 3425 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3426 3426
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index cbf2dc18341f..5d4807c2b56d 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -4767,14 +4767,12 @@ il_bg_watchdog(unsigned long data)
4767 return; 4767 return;
4768 4768
4769 /* monitor and check for other stuck queues */ 4769 /* monitor and check for other stuck queues */
4770 if (il_is_any_associated(il)) { 4770 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
4771 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) { 4771 /* skip as we already checked the command queue */
4772 /* skip as we already checked the command queue */ 4772 if (cnt == il->cmd_queue)
4773 if (cnt == il->cmd_queue) 4773 continue;
4774 continue; 4774 if (il_check_stuck_queue(il, cnt))
4775 if (il_check_stuck_queue(il, cnt)) 4775 return;
4776 return;
4777 }
4778 } 4776 }
4779 4777
4780 mod_timer(&il->watchdog, 4778 mod_timer(&il->watchdog,
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index ce61b6fae1c9..5c7fd185373c 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -958,6 +958,7 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
958 case NL80211_HIDDEN_SSID_ZERO_CONTENTS: 958 case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
959 /* firmware doesn't support this type of hidden SSID */ 959 /* firmware doesn't support this type of hidden SSID */
960 default: 960 default:
961 kfree(bss_cfg);
961 return -EINVAL; 962 return -EINVAL;
962 } 963 }
963 964
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index d357d1ed92f6..74ecc33fdd90 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -436,8 +436,8 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
436 case QID_RX: 436 case QID_RX:
437 if (!rt2x00queue_full(queue)) 437 if (!rt2x00queue_full(queue))
438 rt2x00queue_for_each_entry(queue, 438 rt2x00queue_for_each_entry(queue,
439 Q_INDEX_DONE,
440 Q_INDEX, 439 Q_INDEX,
440 Q_INDEX_DONE,
441 NULL, 441 NULL,
442 rt2x00usb_kick_rx_entry); 442 rt2x00usb_kick_rx_entry);
443 break; 443 break;
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index d6146b4811c2..95374d1696a1 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -1425,7 +1425,7 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
1425 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1425 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1426 1426
1427 if (!ct || !nf_ct_is_untracked(ct)) { 1427 if (!ct || !nf_ct_is_untracked(ct)) {
1428 nf_reset(skb); 1428 nf_conntrack_put(skb->nfct);
1429 skb->nfct = &nf_ct_untracked_get()->ct_general; 1429 skb->nfct = &nf_ct_untracked_get()->ct_general;
1430 skb->nfctinfo = IP_CT_NEW; 1430 skb->nfctinfo = IP_CT_NEW;
1431 nf_conntrack_get(skb->nfct); 1431 nf_conntrack_get(skb->nfct);
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index a88fb6939387..e1ce1048fe5f 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -78,7 +78,7 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
78 struct net *net = nf_ct_net(ct); 78 struct net *net = nf_ct_net(ct);
79 struct nf_conntrack_ecache *e; 79 struct nf_conntrack_ecache *e;
80 80
81 if (net->ct.nf_conntrack_event_cb == NULL) 81 if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
82 return; 82 return;
83 83
84 e = nf_ct_ecache_find(ct); 84 e = nf_ct_ecache_find(ct);
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 6089f0cf23b4..9096bcb08132 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -403,6 +403,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
403 break; 403 break;
404 404
405 case NETDEV_DOWN: 405 case NETDEV_DOWN:
406 if (dev->features & NETIF_F_HW_VLAN_FILTER)
407 vlan_vid_del(dev, 0);
408
406 /* Put all VLANs for this dev in the down state too. */ 409 /* Put all VLANs for this dev in the down state too. */
407 for (i = 0; i < VLAN_N_VID; i++) { 410 for (i = 0; i < VLAN_N_VID; i++) {
408 vlandev = vlan_group_get_device(grp, i); 411 vlandev = vlan_group_get_device(grp, i);
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 051f7abae66d..779095ded689 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -842,6 +842,7 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol,
842 case AX25_P_NETROM: 842 case AX25_P_NETROM:
843 if (ax25_protocol_is_registered(AX25_P_NETROM)) 843 if (ax25_protocol_is_registered(AX25_P_NETROM))
844 return -ESOCKTNOSUPPORT; 844 return -ESOCKTNOSUPPORT;
845 break;
845#endif 846#endif
846#ifdef CONFIG_ROSE_MODULE 847#ifdef CONFIG_ROSE_MODULE
847 case AX25_P_ROSE: 848 case AX25_P_ROSE:
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 8bf97515a77d..c5863f499133 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1351,6 +1351,7 @@ void bla_free(struct bat_priv *bat_priv)
1351 * @bat_priv: the bat priv with all the soft interface information 1351 * @bat_priv: the bat priv with all the soft interface information
1352 * @skb: the frame to be checked 1352 * @skb: the frame to be checked
1353 * @vid: the VLAN ID of the frame 1353 * @vid: the VLAN ID of the frame
1354 * @is_bcast: the packet came in a broadcast packet type.
1354 * 1355 *
1355 * bla_rx avoidance checks if: 1356 * bla_rx avoidance checks if:
1356 * * we have to race for a claim 1357 * * we have to race for a claim
@@ -1361,7 +1362,8 @@ void bla_free(struct bat_priv *bat_priv)
1361 * process the skb. 1362 * process the skb.
1362 * 1363 *
1363 */ 1364 */
1364int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid) 1365int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid,
1366 bool is_bcast)
1365{ 1367{
1366 struct ethhdr *ethhdr; 1368 struct ethhdr *ethhdr;
1367 struct claim search_claim, *claim = NULL; 1369 struct claim search_claim, *claim = NULL;
@@ -1380,7 +1382,7 @@ int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
1380 1382
1381 if (unlikely(atomic_read(&bat_priv->bla_num_requests))) 1383 if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
1382 /* don't allow broadcasts while requests are in flight */ 1384 /* don't allow broadcasts while requests are in flight */
1383 if (is_multicast_ether_addr(ethhdr->h_dest)) 1385 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
1384 goto handled; 1386 goto handled;
1385 1387
1386 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN); 1388 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
@@ -1406,8 +1408,13 @@ int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
1406 } 1408 }
1407 1409
1408 /* if it is a broadcast ... */ 1410 /* if it is a broadcast ... */
1409 if (is_multicast_ether_addr(ethhdr->h_dest)) { 1411 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
1410 /* ... drop it. the responsible gateway is in charge. */ 1412 /* ... drop it. the responsible gateway is in charge.
1413 *
1414 * We need to check is_bcast because with the gateway
1415 * feature, broadcasts (like DHCP requests) may be sent
1416 * using a unicast packet type.
1417 */
1411 goto handled; 1418 goto handled;
1412 } else { 1419 } else {
1413 /* seems the client considers us as its best gateway. 1420 /* seems the client considers us as its best gateway.
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index e39f93acc28f..dc5227b398d4 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -23,7 +23,8 @@
23#define _NET_BATMAN_ADV_BLA_H_ 23#define _NET_BATMAN_ADV_BLA_H_
24 24
25#ifdef CONFIG_BATMAN_ADV_BLA 25#ifdef CONFIG_BATMAN_ADV_BLA
26int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid); 26int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid,
27 bool is_bcast);
27int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid); 28int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
28int bla_is_backbone_gw(struct sk_buff *skb, 29int bla_is_backbone_gw(struct sk_buff *skb,
29 struct orig_node *orig_node, int hdr_size); 30 struct orig_node *orig_node, int hdr_size);
@@ -41,7 +42,7 @@ void bla_free(struct bat_priv *bat_priv);
41#else /* ifdef CONFIG_BATMAN_ADV_BLA */ 42#else /* ifdef CONFIG_BATMAN_ADV_BLA */
42 43
43static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, 44static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb,
44 short vid) 45 short vid, bool is_bcast)
45{ 46{
46 return 0; 47 return 0;
47} 48}
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 6e2530b02043..a0ec0e4ada4c 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -256,7 +256,11 @@ void interface_rx(struct net_device *soft_iface,
256 struct bat_priv *bat_priv = netdev_priv(soft_iface); 256 struct bat_priv *bat_priv = netdev_priv(soft_iface);
257 struct ethhdr *ethhdr; 257 struct ethhdr *ethhdr;
258 struct vlan_ethhdr *vhdr; 258 struct vlan_ethhdr *vhdr;
259 struct batman_header *batadv_header = (struct batman_header *)skb->data;
259 short vid __maybe_unused = -1; 260 short vid __maybe_unused = -1;
261 bool is_bcast;
262
263 is_bcast = (batadv_header->packet_type == BAT_BCAST);
260 264
261 /* check if enough space is available for pulling, and pull */ 265 /* check if enough space is available for pulling, and pull */
262 if (!pskb_may_pull(skb, hdr_size)) 266 if (!pskb_may_pull(skb, hdr_size))
@@ -302,7 +306,7 @@ void interface_rx(struct net_device *soft_iface,
302 /* Let the bridge loop avoidance check the packet. If will 306 /* Let the bridge loop avoidance check the packet. If will
303 * not handle it, we can safely push it up. 307 * not handle it, we can safely push it up.
304 */ 308 */
305 if (bla_rx(bat_priv, skb, vid)) 309 if (bla_rx(bat_priv, skb, vid, is_bcast))
306 goto out; 310 goto out;
307 311
308 netif_rx(skb); 312 netif_rx(skb);
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 554b31289607..8c83c175b03a 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -561,9 +561,9 @@ static int __init caif_device_init(void)
561 561
562static void __exit caif_device_exit(void) 562static void __exit caif_device_exit(void)
563{ 563{
564 unregister_pernet_subsys(&caif_net_ops);
565 unregister_netdevice_notifier(&caif_device_notifier); 564 unregister_netdevice_notifier(&caif_device_notifier);
566 dev_remove_pack(&caif_packet_type); 565 dev_remove_pack(&caif_packet_type);
566 unregister_pernet_subsys(&caif_net_ops);
567} 567}
568 568
569module_init(caif_device_init); 569module_init(caif_device_init);
diff --git a/net/core/dev.c b/net/core/dev.c
index 84f01ba81a34..0f28a9e0b8ad 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2444,8 +2444,12 @@ static void skb_update_prio(struct sk_buff *skb)
2444{ 2444{
2445 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); 2445 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2446 2446
2447 if ((!skb->priority) && (skb->sk) && map) 2447 if (!skb->priority && skb->sk && map) {
2448 skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx]; 2448 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2449
2450 if (prioidx < map->priomap_len)
2451 skb->priority = map->priomap[prioidx];
2452 }
2449} 2453}
2450#else 2454#else
2451#define skb_update_prio(skb) 2455#define skb_update_prio(skb)
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 5b8aa2fae48b..b2e9caa1ad1a 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -49,8 +49,9 @@ static int get_prioidx(u32 *prio)
49 return -ENOSPC; 49 return -ENOSPC;
50 } 50 }
51 set_bit(prioidx, prioidx_map); 51 set_bit(prioidx, prioidx_map);
52 if (atomic_read(&max_prioidx) < prioidx)
53 atomic_set(&max_prioidx, prioidx);
52 spin_unlock_irqrestore(&prioidx_map_lock, flags); 54 spin_unlock_irqrestore(&prioidx_map_lock, flags);
53 atomic_set(&max_prioidx, prioidx);
54 *prio = prioidx; 55 *prio = prioidx;
55 return 0; 56 return 0;
56} 57}
@@ -64,7 +65,7 @@ static void put_prioidx(u32 idx)
64 spin_unlock_irqrestore(&prioidx_map_lock, flags); 65 spin_unlock_irqrestore(&prioidx_map_lock, flags);
65} 66}
66 67
67static void extend_netdev_table(struct net_device *dev, u32 new_len) 68static int extend_netdev_table(struct net_device *dev, u32 new_len)
68{ 69{
69 size_t new_size = sizeof(struct netprio_map) + 70 size_t new_size = sizeof(struct netprio_map) +
70 ((sizeof(u32) * new_len)); 71 ((sizeof(u32) * new_len));
@@ -76,7 +77,7 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
76 77
77 if (!new_priomap) { 78 if (!new_priomap) {
78 pr_warn("Unable to alloc new priomap!\n"); 79 pr_warn("Unable to alloc new priomap!\n");
79 return; 80 return -ENOMEM;
80 } 81 }
81 82
82 for (i = 0; 83 for (i = 0;
@@ -89,46 +90,79 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
89 rcu_assign_pointer(dev->priomap, new_priomap); 90 rcu_assign_pointer(dev->priomap, new_priomap);
90 if (old_priomap) 91 if (old_priomap)
91 kfree_rcu(old_priomap, rcu); 92 kfree_rcu(old_priomap, rcu);
93 return 0;
92} 94}
93 95
94static void update_netdev_tables(void) 96static int write_update_netdev_table(struct net_device *dev)
95{ 97{
98 int ret = 0;
99 u32 max_len;
100 struct netprio_map *map;
101
102 rtnl_lock();
103 max_len = atomic_read(&max_prioidx) + 1;
104 map = rtnl_dereference(dev->priomap);
105 if (!map || map->priomap_len < max_len)
106 ret = extend_netdev_table(dev, max_len);
107 rtnl_unlock();
108
109 return ret;
110}
111
112static int update_netdev_tables(void)
113{
114 int ret = 0;
96 struct net_device *dev; 115 struct net_device *dev;
97 u32 max_len = atomic_read(&max_prioidx) + 1; 116 u32 max_len;
98 struct netprio_map *map; 117 struct netprio_map *map;
99 118
100 rtnl_lock(); 119 rtnl_lock();
120 max_len = atomic_read(&max_prioidx) + 1;
101 for_each_netdev(&init_net, dev) { 121 for_each_netdev(&init_net, dev) {
102 map = rtnl_dereference(dev->priomap); 122 map = rtnl_dereference(dev->priomap);
103 if ((!map) || 123 /*
104 (map->priomap_len < max_len)) 124 * don't allocate priomap if we didn't
105 extend_netdev_table(dev, max_len); 125 * change net_prio.ifpriomap (map == NULL),
126 * this will speed up skb_update_prio.
127 */
128 if (map && map->priomap_len < max_len) {
129 ret = extend_netdev_table(dev, max_len);
130 if (ret < 0)
131 break;
132 }
106 } 133 }
107 rtnl_unlock(); 134 rtnl_unlock();
135 return ret;
108} 136}
109 137
110static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp) 138static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
111{ 139{
112 struct cgroup_netprio_state *cs; 140 struct cgroup_netprio_state *cs;
113 int ret; 141 int ret = -EINVAL;
114 142
115 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 143 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
116 if (!cs) 144 if (!cs)
117 return ERR_PTR(-ENOMEM); 145 return ERR_PTR(-ENOMEM);
118 146
119 if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx) { 147 if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx)
120 kfree(cs); 148 goto out;
121 return ERR_PTR(-EINVAL);
122 }
123 149
124 ret = get_prioidx(&cs->prioidx); 150 ret = get_prioidx(&cs->prioidx);
125 if (ret != 0) { 151 if (ret < 0) {
126 pr_warn("No space in priority index array\n"); 152 pr_warn("No space in priority index array\n");
127 kfree(cs); 153 goto out;
128 return ERR_PTR(ret); 154 }
155
156 ret = update_netdev_tables();
157 if (ret < 0) {
158 put_prioidx(cs->prioidx);
159 goto out;
129 } 160 }
130 161
131 return &cs->css; 162 return &cs->css;
163out:
164 kfree(cs);
165 return ERR_PTR(ret);
132} 166}
133 167
134static void cgrp_destroy(struct cgroup *cgrp) 168static void cgrp_destroy(struct cgroup *cgrp)
@@ -141,7 +175,7 @@ static void cgrp_destroy(struct cgroup *cgrp)
141 rtnl_lock(); 175 rtnl_lock();
142 for_each_netdev(&init_net, dev) { 176 for_each_netdev(&init_net, dev) {
143 map = rtnl_dereference(dev->priomap); 177 map = rtnl_dereference(dev->priomap);
144 if (map) 178 if (map && cs->prioidx < map->priomap_len)
145 map->priomap[cs->prioidx] = 0; 179 map->priomap[cs->prioidx] = 0;
146 } 180 }
147 rtnl_unlock(); 181 rtnl_unlock();
@@ -165,7 +199,7 @@ static int read_priomap(struct cgroup *cont, struct cftype *cft,
165 rcu_read_lock(); 199 rcu_read_lock();
166 for_each_netdev_rcu(&init_net, dev) { 200 for_each_netdev_rcu(&init_net, dev) {
167 map = rcu_dereference(dev->priomap); 201 map = rcu_dereference(dev->priomap);
168 priority = map ? map->priomap[prioidx] : 0; 202 priority = (map && prioidx < map->priomap_len) ? map->priomap[prioidx] : 0;
169 cb->fill(cb, dev->name, priority); 203 cb->fill(cb, dev->name, priority);
170 } 204 }
171 rcu_read_unlock(); 205 rcu_read_unlock();
@@ -220,13 +254,17 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
220 if (!dev) 254 if (!dev)
221 goto out_free_devname; 255 goto out_free_devname;
222 256
223 update_netdev_tables(); 257 ret = write_update_netdev_table(dev);
224 ret = 0; 258 if (ret < 0)
259 goto out_put_dev;
260
225 rcu_read_lock(); 261 rcu_read_lock();
226 map = rcu_dereference(dev->priomap); 262 map = rcu_dereference(dev->priomap);
227 if (map) 263 if (map)
228 map->priomap[prioidx] = priority; 264 map->priomap[prioidx] = priority;
229 rcu_read_unlock(); 265 rcu_read_unlock();
266
267out_put_dev:
230 dev_put(dev); 268 dev_put(dev);
231 269
232out_free_devname: 270out_free_devname:
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 46a3d23d259e..d124306b81fd 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -353,7 +353,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
353 unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) + 353 unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
354 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 354 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
355 355
356 if (fragsz <= PAGE_SIZE && !(gfp_mask & __GFP_WAIT)) { 356 if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
357 void *data = netdev_alloc_frag(fragsz); 357 void *data = netdev_alloc_frag(fragsz);
358 358
359 if (likely(data)) { 359 if (likely(data)) {
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index 6fbb2ad7bb6d..16705611589a 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -230,6 +230,12 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
230 mtu = dev->mtu; 230 mtu = dev->mtu;
231 pr_debug("name = %s, mtu = %u\n", dev->name, mtu); 231 pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
232 232
233 if (size > mtu) {
234 pr_debug("size = %Zu, mtu = %u\n", size, mtu);
235 err = -EINVAL;
236 goto out_dev;
237 }
238
233 hlen = LL_RESERVED_SPACE(dev); 239 hlen = LL_RESERVED_SPACE(dev);
234 tlen = dev->needed_tailroom; 240 tlen = dev->needed_tailroom;
235 skb = sock_alloc_send_skb(sk, hlen + tlen + size, 241 skb = sock_alloc_send_skb(sk, hlen + tlen + size,
@@ -258,12 +264,6 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
258 if (err < 0) 264 if (err < 0)
259 goto out_skb; 265 goto out_skb;
260 266
261 if (size > mtu) {
262 pr_debug("size = %Zu, mtu = %u\n", size, mtu);
263 err = -EINVAL;
264 goto out_skb;
265 }
266
267 skb->dev = dev; 267 skb->dev = dev;
268 skb->sk = sk; 268 skb->sk = sk;
269 skb->protocol = htons(ETH_P_IEEE802154); 269 skb->protocol = htons(ETH_P_IEEE802154);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index a4bb856de08f..0db5d34a06b6 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2174,15 +2174,13 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2174 sdata->name, mgmt->sa, status_code); 2174 sdata->name, mgmt->sa, status_code);
2175 ieee80211_destroy_assoc_data(sdata, false); 2175 ieee80211_destroy_assoc_data(sdata, false);
2176 } else { 2176 } else {
2177 printk(KERN_DEBUG "%s: associated\n", sdata->name);
2178
2179 if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) { 2177 if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) {
2180 /* oops -- internal error -- send timeout for now */ 2178 /* oops -- internal error -- send timeout for now */
2181 ieee80211_destroy_assoc_data(sdata, true); 2179 ieee80211_destroy_assoc_data(sdata, false);
2182 sta_info_destroy_addr(sdata, mgmt->bssid);
2183 cfg80211_put_bss(*bss); 2180 cfg80211_put_bss(*bss);
2184 return RX_MGMT_CFG80211_ASSOC_TIMEOUT; 2181 return RX_MGMT_CFG80211_ASSOC_TIMEOUT;
2185 } 2182 }
2183 printk(KERN_DEBUG "%s: associated\n", sdata->name);
2186 2184
2187 /* 2185 /*
2188 * destroy assoc_data afterwards, as otherwise an idle 2186 * destroy assoc_data afterwards, as otherwise an idle
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 2d1acc6c5445..f9e51ef8dfa2 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -809,7 +809,7 @@ minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
809 max_rates = sband->n_bitrates; 809 max_rates = sband->n_bitrates;
810 } 810 }
811 811
812 msp = kzalloc(sizeof(struct minstrel_ht_sta), gfp); 812 msp = kzalloc(sizeof(*msp), gfp);
813 if (!msp) 813 if (!msp)
814 return NULL; 814 return NULL;
815 815
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index d43e3c122f7b..84444dda194b 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1521,11 +1521,12 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
1521{ 1521{
1522 struct net_device *dev = ptr; 1522 struct net_device *dev = ptr;
1523 struct net *net = dev_net(dev); 1523 struct net *net = dev_net(dev);
1524 struct netns_ipvs *ipvs = net_ipvs(net);
1524 struct ip_vs_service *svc; 1525 struct ip_vs_service *svc;
1525 struct ip_vs_dest *dest; 1526 struct ip_vs_dest *dest;
1526 unsigned int idx; 1527 unsigned int idx;
1527 1528
1528 if (event != NETDEV_UNREGISTER) 1529 if (event != NETDEV_UNREGISTER || !ipvs)
1529 return NOTIFY_DONE; 1530 return NOTIFY_DONE;
1530 IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name); 1531 IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name);
1531 EnterFunction(2); 1532 EnterFunction(2);
@@ -1551,7 +1552,7 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
1551 } 1552 }
1552 } 1553 }
1553 1554
1554 list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) { 1555 list_for_each_entry(dest, &ipvs->dest_trash, n_list) {
1555 __ip_vs_dev_reset(dest, dev); 1556 __ip_vs_dev_reset(dest, dev);
1556 } 1557 }
1557 mutex_unlock(&__ip_vs_mutex); 1558 mutex_unlock(&__ip_vs_mutex);
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 035960ec5cb9..c6f7db720d84 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/netfilter/x_tables.h> 17#include <linux/netfilter/x_tables.h>
18#include <linux/netfilter/xt_set.h> 18#include <linux/netfilter/xt_set.h>
19#include <linux/netfilter/ipset/ip_set_timeout.h>
19 20
20MODULE_LICENSE("GPL"); 21MODULE_LICENSE("GPL");
21MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); 22MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -310,7 +311,8 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
310 info->del_set.flags, 0, UINT_MAX); 311 info->del_set.flags, 0, UINT_MAX);
311 312
312 /* Normalize to fit into jiffies */ 313 /* Normalize to fit into jiffies */
313 if (add_opt.timeout > UINT_MAX/MSEC_PER_SEC) 314 if (add_opt.timeout != IPSET_NO_TIMEOUT &&
315 add_opt.timeout > UINT_MAX/MSEC_PER_SEC)
314 add_opt.timeout = UINT_MAX/MSEC_PER_SEC; 316 add_opt.timeout = UINT_MAX/MSEC_PER_SEC;
315 if (info->add_set.index != IPSET_INVALID_ID) 317 if (info->add_set.index != IPSET_INVALID_ID)
316 ip_set_add(info->add_set.index, skb, par, &add_opt); 318 ip_set_add(info->add_set.index, skb, par, &add_opt);
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index 17a707db40eb..e06d458fc719 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -292,7 +292,7 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr,
292 292
293 pr_debug("%p\n", sk); 293 pr_debug("%p\n", sk);
294 294
295 if (llcp_sock == NULL) 295 if (llcp_sock == NULL || llcp_sock->dev == NULL)
296 return -EBADFD; 296 return -EBADFD;
297 297
298 addr->sa_family = AF_NFC; 298 addr->sa_family = AF_NFC;
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
index 2754f098d436..bebaa43484bc 100644
--- a/net/rxrpc/ar-peer.c
+++ b/net/rxrpc/ar-peer.c
@@ -229,7 +229,7 @@ found_UDP_peer:
229 return peer; 229 return peer;
230 230
231new_UDP_peer: 231new_UDP_peer:
232 _net("Rx UDP DGRAM from NEW peer %d", peer->debug_id); 232 _net("Rx UDP DGRAM from NEW peer");
233 read_unlock_bh(&rxrpc_peer_lock); 233 read_unlock_bh(&rxrpc_peer_lock);
234 _leave(" = -EBUSY [new]"); 234 _leave(" = -EBUSY [new]");
235 return ERR_PTR(-EBUSY); 235 return ERR_PTR(-EBUSY);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index a2a95aabf9c2..c412ad0d0308 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -331,29 +331,22 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
331 return PSCHED_NS2TICKS(ticks); 331 return PSCHED_NS2TICKS(ticks);
332} 332}
333 333
334static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) 334static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
335{ 335{
336 struct sk_buff_head *list = &sch->q; 336 struct sk_buff_head *list = &sch->q;
337 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; 337 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
338 struct sk_buff *skb; 338 struct sk_buff *skb = skb_peek_tail(list);
339
340 if (likely(skb_queue_len(list) < sch->limit)) {
341 skb = skb_peek_tail(list);
342 /* Optimize for add at tail */
343 if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
344 return qdisc_enqueue_tail(nskb, sch);
345 339
346 skb_queue_reverse_walk(list, skb) { 340 /* Optimize for add at tail */
347 if (tnext >= netem_skb_cb(skb)->time_to_send) 341 if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
348 break; 342 return __skb_queue_tail(list, nskb);
349 }
350 343
351 __skb_queue_after(list, skb, nskb); 344 skb_queue_reverse_walk(list, skb) {
352 sch->qstats.backlog += qdisc_pkt_len(nskb); 345 if (tnext >= netem_skb_cb(skb)->time_to_send)
353 return NET_XMIT_SUCCESS; 346 break;
354 } 347 }
355 348
356 return qdisc_reshape_fail(nskb, sch); 349 __skb_queue_after(list, skb, nskb);
357} 350}
358 351
359/* 352/*
@@ -368,7 +361,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
368 /* We don't fill cb now as skb_unshare() may invalidate it */ 361 /* We don't fill cb now as skb_unshare() may invalidate it */
369 struct netem_skb_cb *cb; 362 struct netem_skb_cb *cb;
370 struct sk_buff *skb2; 363 struct sk_buff *skb2;
371 int ret;
372 int count = 1; 364 int count = 1;
373 365
374 /* Random duplication */ 366 /* Random duplication */
@@ -419,6 +411,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
419 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); 411 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
420 } 412 }
421 413
414 if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
415 return qdisc_reshape_fail(skb, sch);
416
417 sch->qstats.backlog += qdisc_pkt_len(skb);
418
422 cb = netem_skb_cb(skb); 419 cb = netem_skb_cb(skb);
423 if (q->gap == 0 || /* not doing reordering */ 420 if (q->gap == 0 || /* not doing reordering */
424 q->counter < q->gap - 1 || /* inside last reordering gap */ 421 q->counter < q->gap - 1 || /* inside last reordering gap */
@@ -450,7 +447,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
450 447
451 cb->time_to_send = now + delay; 448 cb->time_to_send = now + delay;
452 ++q->counter; 449 ++q->counter;
453 ret = tfifo_enqueue(skb, sch); 450 tfifo_enqueue(skb, sch);
454 } else { 451 } else {
455 /* 452 /*
456 * Do re-ordering by putting one out of N packets at the front 453 * Do re-ordering by putting one out of N packets at the front
@@ -460,16 +457,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
460 q->counter = 0; 457 q->counter = 0;
461 458
462 __skb_queue_head(&sch->q, skb); 459 __skb_queue_head(&sch->q, skb);
463 sch->qstats.backlog += qdisc_pkt_len(skb);
464 sch->qstats.requeues++; 460 sch->qstats.requeues++;
465 ret = NET_XMIT_SUCCESS;
466 }
467
468 if (ret != NET_XMIT_SUCCESS) {
469 if (net_xmit_drop_count(ret)) {
470 sch->qstats.drops++;
471 return ret;
472 }
473 } 461 }
474 462
475 return NET_XMIT_SUCCESS; 463 return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 74305c883bd3..30ea4674cabd 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -570,6 +570,8 @@ static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
570 570
571 sch->qstats.backlog = q->qdisc->qstats.backlog; 571 sch->qstats.backlog = q->qdisc->qstats.backlog;
572 opts = nla_nest_start(skb, TCA_OPTIONS); 572 opts = nla_nest_start(skb, TCA_OPTIONS);
573 if (opts == NULL)
574 goto nla_put_failure;
573 if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt)) 575 if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
574 goto nla_put_failure; 576 goto nla_put_failure;
575 return nla_nest_end(skb, opts); 577 return nla_nest_end(skb, opts);
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 80564fe03024..8b9b6790a3df 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -736,15 +736,12 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
736 736
737 epb = &ep->base; 737 epb = &ep->base;
738 738
739 if (hlist_unhashed(&epb->node))
740 return;
741
742 epb->hashent = sctp_ep_hashfn(epb->bind_addr.port); 739 epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
743 740
744 head = &sctp_ep_hashtable[epb->hashent]; 741 head = &sctp_ep_hashtable[epb->hashent];
745 742
746 sctp_write_lock(&head->lock); 743 sctp_write_lock(&head->lock);
747 __hlist_del(&epb->node); 744 hlist_del_init(&epb->node);
748 sctp_write_unlock(&head->lock); 745 sctp_write_unlock(&head->lock);
749} 746}
750 747
@@ -825,7 +822,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
825 head = &sctp_assoc_hashtable[epb->hashent]; 822 head = &sctp_assoc_hashtable[epb->hashent];
826 823
827 sctp_write_lock(&head->lock); 824 sctp_write_lock(&head->lock);
828 __hlist_del(&epb->node); 825 hlist_del_init(&epb->node);
829 sctp_write_unlock(&head->lock); 826 sctp_write_unlock(&head->lock);
830} 827}
831 828
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index b3b8a8d813eb..31c7bfcd9b58 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1231,8 +1231,14 @@ out_free:
1231 SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p" 1231 SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
1232 " kaddrs: %p err: %d\n", 1232 " kaddrs: %p err: %d\n",
1233 asoc, kaddrs, err); 1233 asoc, kaddrs, err);
1234 if (asoc) 1234 if (asoc) {
1235 /* sctp_primitive_ASSOCIATE may have added this association
1236 * To the hash table, try to unhash it, just in case, its a noop
1237 * if it wasn't hashed so we're safe
1238 */
1239 sctp_unhash_established(asoc);
1235 sctp_association_free(asoc); 1240 sctp_association_free(asoc);
1241 }
1236 return err; 1242 return err;
1237} 1243}
1238 1244
@@ -1942,8 +1948,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1942 goto out_unlock; 1948 goto out_unlock;
1943 1949
1944out_free: 1950out_free:
1945 if (new_asoc) 1951 if (new_asoc) {
1952 sctp_unhash_established(asoc);
1946 sctp_association_free(asoc); 1953 sctp_association_free(asoc);
1954 }
1947out_unlock: 1955out_unlock:
1948 sctp_release_sock(sk); 1956 sctp_release_sock(sk);
1949 1957