aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-07-17 11:44:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-17 11:44:51 -0400
commita018540141a931f5299a866907b27886916b4374 (patch)
tree63fd1f1a80bf2e89a7798ab4d9c026fa5f1866fd /net
parent635ac11964dd1ab955dcd2f888d3ac6fd25419b4 (diff)
parent602e65a3b0c4f6b09fba19817ff798647a08e706 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) IPVS oops'ers: a) Should not reset skb->nf_bridge in forwarding hook (Lin Ming) b) 3.4 commit can cause ip_vs_control_cleanup to be invoked after the ipvs_core_ops are unregistered during rmmod (Julian ANastasov) 2) ixgbevf bringup failure can crash in TX descriptor cleanup (Alexander Duyck) 3) AX25 switch missing break statement hoses ROSE sockets (Alan Cox) 4) CAIF accesses freed per-net memory (Sjur Brandeland) 5) Network cgroup code has out-or-bounds accesses (Eric DUmazet), and accesses freed memory (Gao Feng) 6) Fix a crash in SCTP reported by Dave Jones caused by freeing an association still on a list (Neil HOrman) 7) __netdev_alloc_skb() regresses on GFP_DMA using drivers because that GFP flag is not being retained for the allocation (Eric Dumazet). 8) Missing NULL hceck in sch_sfb netlink message parsing (Alan Cox) 9) bnx2 crashes because TX index iteration is not bounded correctly (Michael Chan) 10) IPoIB generates warnings in TCP queue collapsing (via skb_try_coalesce) because it does not set skb->truesize correctly (Eric Dumazet) 11) vlan_info objects leak for the implicit vlan with ID 0 (Amir Hanania) 12) A fix for TX time stamp handling in gianfar does not transfer socket ownership from one packet to another correctly, resulting in a socket write space imbalance (Eric Dumazet) 13) Julia Lawall found several cases where we do a list iteration, and then at the loop termination unconditionally assume we ended up with real list object, rather than the list head itself (CNIC, RXRPC, mISDN). 14) The bonding driver handles procfs moving incorrectly when a device it manages is moved from one namespace to another (Eric Biederman) 15) Missing memory barriers in stmmac descriptor accesses result in various crashes (Deepak Sikri) 16) Fix handling of broadcast packets in batman-adv (Simon Wunderlich) 17) Properly check the sanity of sendmsg() lengths in ieee802154's dgram_sendmsg(). Dave Jones and others have hit and reported this bug (Sasha Levin) 18) Some drivers (b44 and b43legacy) on 64-bit machines stopped working because of how netdev_alloc_skb() was adjusted. Such drivers should now use alloc_skb() for obtaining bounce buffers. (Eric Dumazet) 19) atl1c mis-managed it's link state in that it stops the queue by hand on link down. The generic networking takes care of that and this double stop locks the queue down. So simply removing the driver's queue stop call fixes the problem (Cloud Ren) 20) Fix out-of-memory due to mis-accounting in net_em packet scheduler (Eric Dumazet) 21) If DCB and SR-IOV are configured at the same time in IXGBE the chip will hang because this is not supported (Alexander Duyck) 22) A commit to stop drivers using netdev->base_addr broke the CNIC driver (Michael Chan) 23) Timeout regression in ipset caused by an attempt to fix an overflow bug (Jozsef Kadlecsik). 24) mac80211 minstrel code allocates memory using incorrect size (Thomas Huehn) 25) llcp_sock_getname() needs to check for a NULL device otherwise we OOPS (Sasha Levin) 26) mwifiex leaks memory (Bing Zhao) 27) Propagate iwlwifi fix to iwlegacy, even when we're not associated we need to monitor for stuck queues in the watchdog handler (Stanislaw Geuszka) * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (44 commits) ipvs: fix oops in ip_vs_dst_event on rmmod ipvs: fix oops on NAT reply in br_nf context ixgbevf: Fix panic when loading driver ax25: Fix missing break MAINTAINERS: reflect actual changes in IEEE 802.15.4 maintainership caif: Fix access to freed pernet memory net: cgroup: fix access the unallocated memory in netprio cgroup ixgbevf: Prevent RX/TX statistics getting reset to zero sctp: Fix list corruption resulting from freeing an association on a list net: respect GFP_DMA in __netdev_alloc_skb() e1000e: fix test for PHY being accessible on 82577/8/9 and I217 e1000e: Correct link check logic for 82571 serdes sch_sfb: Fix missing NULL check bnx2: Fix bug in bnx2_free_tx_skbs(). IPoIB: fix skb truesize underestimatiom net: Fix memory leak - vlan_info struct gianfar: fix potential sk_wmem_alloc imbalance drivers/net/ethernet/broadcom/cnic.c: remove invalid reference to list iterator variable net/rxrpc/ar-peer.c: remove invalid reference to list iterator variable drivers/isdn/mISDN/stack.c: remove invalid reference to list iterator variable ...
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c3
-rw-r--r--net/ax25/af_ax25.c1
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c15
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h5
-rw-r--r--net/batman-adv/soft-interface.c6
-rw-r--r--net/caif/caif_dev.c2
-rw-r--r--net/core/dev.c8
-rw-r--r--net/core/netprio_cgroup.c78
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/ieee802154/dgram.c12
-rw-r--r--net/mac80211/mlme.c6
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c5
-rw-r--r--net/netfilter/xt_set.c4
-rw-r--r--net/nfc/llcp/sock.c2
-rw-r--r--net/rxrpc/ar-peer.c2
-rw-r--r--net/sched/sch_netem.c42
-rw-r--r--net/sched/sch_sfb.c2
-rw-r--r--net/sctp/input.c7
-rw-r--r--net/sctp/socket.c12
20 files changed, 135 insertions, 81 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 6089f0cf23b4..9096bcb08132 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -403,6 +403,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
403 break; 403 break;
404 404
405 case NETDEV_DOWN: 405 case NETDEV_DOWN:
406 if (dev->features & NETIF_F_HW_VLAN_FILTER)
407 vlan_vid_del(dev, 0);
408
406 /* Put all VLANs for this dev in the down state too. */ 409 /* Put all VLANs for this dev in the down state too. */
407 for (i = 0; i < VLAN_N_VID; i++) { 410 for (i = 0; i < VLAN_N_VID; i++) {
408 vlandev = vlan_group_get_device(grp, i); 411 vlandev = vlan_group_get_device(grp, i);
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 051f7abae66d..779095ded689 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -842,6 +842,7 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol,
842 case AX25_P_NETROM: 842 case AX25_P_NETROM:
843 if (ax25_protocol_is_registered(AX25_P_NETROM)) 843 if (ax25_protocol_is_registered(AX25_P_NETROM))
844 return -ESOCKTNOSUPPORT; 844 return -ESOCKTNOSUPPORT;
845 break;
845#endif 846#endif
846#ifdef CONFIG_ROSE_MODULE 847#ifdef CONFIG_ROSE_MODULE
847 case AX25_P_ROSE: 848 case AX25_P_ROSE:
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 8bf97515a77d..c5863f499133 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1351,6 +1351,7 @@ void bla_free(struct bat_priv *bat_priv)
1351 * @bat_priv: the bat priv with all the soft interface information 1351 * @bat_priv: the bat priv with all the soft interface information
1352 * @skb: the frame to be checked 1352 * @skb: the frame to be checked
1353 * @vid: the VLAN ID of the frame 1353 * @vid: the VLAN ID of the frame
1354 * @is_bcast: the packet came in a broadcast packet type.
1354 * 1355 *
1355 * bla_rx avoidance checks if: 1356 * bla_rx avoidance checks if:
1356 * * we have to race for a claim 1357 * * we have to race for a claim
@@ -1361,7 +1362,8 @@ void bla_free(struct bat_priv *bat_priv)
1361 * process the skb. 1362 * process the skb.
1362 * 1363 *
1363 */ 1364 */
1364int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid) 1365int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid,
1366 bool is_bcast)
1365{ 1367{
1366 struct ethhdr *ethhdr; 1368 struct ethhdr *ethhdr;
1367 struct claim search_claim, *claim = NULL; 1369 struct claim search_claim, *claim = NULL;
@@ -1380,7 +1382,7 @@ int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
1380 1382
1381 if (unlikely(atomic_read(&bat_priv->bla_num_requests))) 1383 if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
1382 /* don't allow broadcasts while requests are in flight */ 1384 /* don't allow broadcasts while requests are in flight */
1383 if (is_multicast_ether_addr(ethhdr->h_dest)) 1385 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
1384 goto handled; 1386 goto handled;
1385 1387
1386 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN); 1388 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
@@ -1406,8 +1408,13 @@ int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
1406 } 1408 }
1407 1409
1408 /* if it is a broadcast ... */ 1410 /* if it is a broadcast ... */
1409 if (is_multicast_ether_addr(ethhdr->h_dest)) { 1411 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
1410 /* ... drop it. the responsible gateway is in charge. */ 1412 /* ... drop it. the responsible gateway is in charge.
1413 *
1414 * We need to check is_bcast because with the gateway
1415 * feature, broadcasts (like DHCP requests) may be sent
1416 * using a unicast packet type.
1417 */
1411 goto handled; 1418 goto handled;
1412 } else { 1419 } else {
1413 /* seems the client considers us as its best gateway. 1420 /* seems the client considers us as its best gateway.
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index e39f93acc28f..dc5227b398d4 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -23,7 +23,8 @@
23#define _NET_BATMAN_ADV_BLA_H_ 23#define _NET_BATMAN_ADV_BLA_H_
24 24
25#ifdef CONFIG_BATMAN_ADV_BLA 25#ifdef CONFIG_BATMAN_ADV_BLA
26int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid); 26int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid,
27 bool is_bcast);
27int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid); 28int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
28int bla_is_backbone_gw(struct sk_buff *skb, 29int bla_is_backbone_gw(struct sk_buff *skb,
29 struct orig_node *orig_node, int hdr_size); 30 struct orig_node *orig_node, int hdr_size);
@@ -41,7 +42,7 @@ void bla_free(struct bat_priv *bat_priv);
41#else /* ifdef CONFIG_BATMAN_ADV_BLA */ 42#else /* ifdef CONFIG_BATMAN_ADV_BLA */
42 43
43static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, 44static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb,
44 short vid) 45 short vid, bool is_bcast)
45{ 46{
46 return 0; 47 return 0;
47} 48}
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 6e2530b02043..a0ec0e4ada4c 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -256,7 +256,11 @@ void interface_rx(struct net_device *soft_iface,
256 struct bat_priv *bat_priv = netdev_priv(soft_iface); 256 struct bat_priv *bat_priv = netdev_priv(soft_iface);
257 struct ethhdr *ethhdr; 257 struct ethhdr *ethhdr;
258 struct vlan_ethhdr *vhdr; 258 struct vlan_ethhdr *vhdr;
259 struct batman_header *batadv_header = (struct batman_header *)skb->data;
259 short vid __maybe_unused = -1; 260 short vid __maybe_unused = -1;
261 bool is_bcast;
262
263 is_bcast = (batadv_header->packet_type == BAT_BCAST);
260 264
261 /* check if enough space is available for pulling, and pull */ 265 /* check if enough space is available for pulling, and pull */
262 if (!pskb_may_pull(skb, hdr_size)) 266 if (!pskb_may_pull(skb, hdr_size))
@@ -302,7 +306,7 @@ void interface_rx(struct net_device *soft_iface,
302 /* Let the bridge loop avoidance check the packet. If will 306 /* Let the bridge loop avoidance check the packet. If will
303 * not handle it, we can safely push it up. 307 * not handle it, we can safely push it up.
304 */ 308 */
305 if (bla_rx(bat_priv, skb, vid)) 309 if (bla_rx(bat_priv, skb, vid, is_bcast))
306 goto out; 310 goto out;
307 311
308 netif_rx(skb); 312 netif_rx(skb);
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 554b31289607..8c83c175b03a 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -561,9 +561,9 @@ static int __init caif_device_init(void)
561 561
562static void __exit caif_device_exit(void) 562static void __exit caif_device_exit(void)
563{ 563{
564 unregister_pernet_subsys(&caif_net_ops);
565 unregister_netdevice_notifier(&caif_device_notifier); 564 unregister_netdevice_notifier(&caif_device_notifier);
566 dev_remove_pack(&caif_packet_type); 565 dev_remove_pack(&caif_packet_type);
566 unregister_pernet_subsys(&caif_net_ops);
567} 567}
568 568
569module_init(caif_device_init); 569module_init(caif_device_init);
diff --git a/net/core/dev.c b/net/core/dev.c
index 84f01ba81a34..0f28a9e0b8ad 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2444,8 +2444,12 @@ static void skb_update_prio(struct sk_buff *skb)
2444{ 2444{
2445 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); 2445 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2446 2446
2447 if ((!skb->priority) && (skb->sk) && map) 2447 if (!skb->priority && skb->sk && map) {
2448 skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx]; 2448 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2449
2450 if (prioidx < map->priomap_len)
2451 skb->priority = map->priomap[prioidx];
2452 }
2449} 2453}
2450#else 2454#else
2451#define skb_update_prio(skb) 2455#define skb_update_prio(skb)
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 5b8aa2fae48b..b2e9caa1ad1a 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -49,8 +49,9 @@ static int get_prioidx(u32 *prio)
49 return -ENOSPC; 49 return -ENOSPC;
50 } 50 }
51 set_bit(prioidx, prioidx_map); 51 set_bit(prioidx, prioidx_map);
52 if (atomic_read(&max_prioidx) < prioidx)
53 atomic_set(&max_prioidx, prioidx);
52 spin_unlock_irqrestore(&prioidx_map_lock, flags); 54 spin_unlock_irqrestore(&prioidx_map_lock, flags);
53 atomic_set(&max_prioidx, prioidx);
54 *prio = prioidx; 55 *prio = prioidx;
55 return 0; 56 return 0;
56} 57}
@@ -64,7 +65,7 @@ static void put_prioidx(u32 idx)
64 spin_unlock_irqrestore(&prioidx_map_lock, flags); 65 spin_unlock_irqrestore(&prioidx_map_lock, flags);
65} 66}
66 67
67static void extend_netdev_table(struct net_device *dev, u32 new_len) 68static int extend_netdev_table(struct net_device *dev, u32 new_len)
68{ 69{
69 size_t new_size = sizeof(struct netprio_map) + 70 size_t new_size = sizeof(struct netprio_map) +
70 ((sizeof(u32) * new_len)); 71 ((sizeof(u32) * new_len));
@@ -76,7 +77,7 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
76 77
77 if (!new_priomap) { 78 if (!new_priomap) {
78 pr_warn("Unable to alloc new priomap!\n"); 79 pr_warn("Unable to alloc new priomap!\n");
79 return; 80 return -ENOMEM;
80 } 81 }
81 82
82 for (i = 0; 83 for (i = 0;
@@ -89,46 +90,79 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
89 rcu_assign_pointer(dev->priomap, new_priomap); 90 rcu_assign_pointer(dev->priomap, new_priomap);
90 if (old_priomap) 91 if (old_priomap)
91 kfree_rcu(old_priomap, rcu); 92 kfree_rcu(old_priomap, rcu);
93 return 0;
92} 94}
93 95
94static void update_netdev_tables(void) 96static int write_update_netdev_table(struct net_device *dev)
95{ 97{
98 int ret = 0;
99 u32 max_len;
100 struct netprio_map *map;
101
102 rtnl_lock();
103 max_len = atomic_read(&max_prioidx) + 1;
104 map = rtnl_dereference(dev->priomap);
105 if (!map || map->priomap_len < max_len)
106 ret = extend_netdev_table(dev, max_len);
107 rtnl_unlock();
108
109 return ret;
110}
111
112static int update_netdev_tables(void)
113{
114 int ret = 0;
96 struct net_device *dev; 115 struct net_device *dev;
97 u32 max_len = atomic_read(&max_prioidx) + 1; 116 u32 max_len;
98 struct netprio_map *map; 117 struct netprio_map *map;
99 118
100 rtnl_lock(); 119 rtnl_lock();
120 max_len = atomic_read(&max_prioidx) + 1;
101 for_each_netdev(&init_net, dev) { 121 for_each_netdev(&init_net, dev) {
102 map = rtnl_dereference(dev->priomap); 122 map = rtnl_dereference(dev->priomap);
103 if ((!map) || 123 /*
104 (map->priomap_len < max_len)) 124 * don't allocate priomap if we didn't
105 extend_netdev_table(dev, max_len); 125 * change net_prio.ifpriomap (map == NULL),
126 * this will speed up skb_update_prio.
127 */
128 if (map && map->priomap_len < max_len) {
129 ret = extend_netdev_table(dev, max_len);
130 if (ret < 0)
131 break;
132 }
106 } 133 }
107 rtnl_unlock(); 134 rtnl_unlock();
135 return ret;
108} 136}
109 137
110static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp) 138static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
111{ 139{
112 struct cgroup_netprio_state *cs; 140 struct cgroup_netprio_state *cs;
113 int ret; 141 int ret = -EINVAL;
114 142
115 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 143 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
116 if (!cs) 144 if (!cs)
117 return ERR_PTR(-ENOMEM); 145 return ERR_PTR(-ENOMEM);
118 146
119 if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx) { 147 if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx)
120 kfree(cs); 148 goto out;
121 return ERR_PTR(-EINVAL);
122 }
123 149
124 ret = get_prioidx(&cs->prioidx); 150 ret = get_prioidx(&cs->prioidx);
125 if (ret != 0) { 151 if (ret < 0) {
126 pr_warn("No space in priority index array\n"); 152 pr_warn("No space in priority index array\n");
127 kfree(cs); 153 goto out;
128 return ERR_PTR(ret); 154 }
155
156 ret = update_netdev_tables();
157 if (ret < 0) {
158 put_prioidx(cs->prioidx);
159 goto out;
129 } 160 }
130 161
131 return &cs->css; 162 return &cs->css;
163out:
164 kfree(cs);
165 return ERR_PTR(ret);
132} 166}
133 167
134static void cgrp_destroy(struct cgroup *cgrp) 168static void cgrp_destroy(struct cgroup *cgrp)
@@ -141,7 +175,7 @@ static void cgrp_destroy(struct cgroup *cgrp)
141 rtnl_lock(); 175 rtnl_lock();
142 for_each_netdev(&init_net, dev) { 176 for_each_netdev(&init_net, dev) {
143 map = rtnl_dereference(dev->priomap); 177 map = rtnl_dereference(dev->priomap);
144 if (map) 178 if (map && cs->prioidx < map->priomap_len)
145 map->priomap[cs->prioidx] = 0; 179 map->priomap[cs->prioidx] = 0;
146 } 180 }
147 rtnl_unlock(); 181 rtnl_unlock();
@@ -165,7 +199,7 @@ static int read_priomap(struct cgroup *cont, struct cftype *cft,
165 rcu_read_lock(); 199 rcu_read_lock();
166 for_each_netdev_rcu(&init_net, dev) { 200 for_each_netdev_rcu(&init_net, dev) {
167 map = rcu_dereference(dev->priomap); 201 map = rcu_dereference(dev->priomap);
168 priority = map ? map->priomap[prioidx] : 0; 202 priority = (map && prioidx < map->priomap_len) ? map->priomap[prioidx] : 0;
169 cb->fill(cb, dev->name, priority); 203 cb->fill(cb, dev->name, priority);
170 } 204 }
171 rcu_read_unlock(); 205 rcu_read_unlock();
@@ -220,13 +254,17 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
220 if (!dev) 254 if (!dev)
221 goto out_free_devname; 255 goto out_free_devname;
222 256
223 update_netdev_tables(); 257 ret = write_update_netdev_table(dev);
224 ret = 0; 258 if (ret < 0)
259 goto out_put_dev;
260
225 rcu_read_lock(); 261 rcu_read_lock();
226 map = rcu_dereference(dev->priomap); 262 map = rcu_dereference(dev->priomap);
227 if (map) 263 if (map)
228 map->priomap[prioidx] = priority; 264 map->priomap[prioidx] = priority;
229 rcu_read_unlock(); 265 rcu_read_unlock();
266
267out_put_dev:
230 dev_put(dev); 268 dev_put(dev);
231 269
232out_free_devname: 270out_free_devname:
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 46a3d23d259e..d124306b81fd 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -353,7 +353,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
353 unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) + 353 unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
354 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 354 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
355 355
356 if (fragsz <= PAGE_SIZE && !(gfp_mask & __GFP_WAIT)) { 356 if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
357 void *data = netdev_alloc_frag(fragsz); 357 void *data = netdev_alloc_frag(fragsz);
358 358
359 if (likely(data)) { 359 if (likely(data)) {
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index 6fbb2ad7bb6d..16705611589a 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -230,6 +230,12 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
230 mtu = dev->mtu; 230 mtu = dev->mtu;
231 pr_debug("name = %s, mtu = %u\n", dev->name, mtu); 231 pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
232 232
233 if (size > mtu) {
234 pr_debug("size = %Zu, mtu = %u\n", size, mtu);
235 err = -EINVAL;
236 goto out_dev;
237 }
238
233 hlen = LL_RESERVED_SPACE(dev); 239 hlen = LL_RESERVED_SPACE(dev);
234 tlen = dev->needed_tailroom; 240 tlen = dev->needed_tailroom;
235 skb = sock_alloc_send_skb(sk, hlen + tlen + size, 241 skb = sock_alloc_send_skb(sk, hlen + tlen + size,
@@ -258,12 +264,6 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
258 if (err < 0) 264 if (err < 0)
259 goto out_skb; 265 goto out_skb;
260 266
261 if (size > mtu) {
262 pr_debug("size = %Zu, mtu = %u\n", size, mtu);
263 err = -EINVAL;
264 goto out_skb;
265 }
266
267 skb->dev = dev; 267 skb->dev = dev;
268 skb->sk = sk; 268 skb->sk = sk;
269 skb->protocol = htons(ETH_P_IEEE802154); 269 skb->protocol = htons(ETH_P_IEEE802154);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index a4bb856de08f..0db5d34a06b6 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2174,15 +2174,13 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2174 sdata->name, mgmt->sa, status_code); 2174 sdata->name, mgmt->sa, status_code);
2175 ieee80211_destroy_assoc_data(sdata, false); 2175 ieee80211_destroy_assoc_data(sdata, false);
2176 } else { 2176 } else {
2177 printk(KERN_DEBUG "%s: associated\n", sdata->name);
2178
2179 if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) { 2177 if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) {
2180 /* oops -- internal error -- send timeout for now */ 2178 /* oops -- internal error -- send timeout for now */
2181 ieee80211_destroy_assoc_data(sdata, true); 2179 ieee80211_destroy_assoc_data(sdata, false);
2182 sta_info_destroy_addr(sdata, mgmt->bssid);
2183 cfg80211_put_bss(*bss); 2180 cfg80211_put_bss(*bss);
2184 return RX_MGMT_CFG80211_ASSOC_TIMEOUT; 2181 return RX_MGMT_CFG80211_ASSOC_TIMEOUT;
2185 } 2182 }
2183 printk(KERN_DEBUG "%s: associated\n", sdata->name);
2186 2184
2187 /* 2185 /*
2188 * destroy assoc_data afterwards, as otherwise an idle 2186 * destroy assoc_data afterwards, as otherwise an idle
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 2d1acc6c5445..f9e51ef8dfa2 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -809,7 +809,7 @@ minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
809 max_rates = sband->n_bitrates; 809 max_rates = sband->n_bitrates;
810 } 810 }
811 811
812 msp = kzalloc(sizeof(struct minstrel_ht_sta), gfp); 812 msp = kzalloc(sizeof(*msp), gfp);
813 if (!msp) 813 if (!msp)
814 return NULL; 814 return NULL;
815 815
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index d43e3c122f7b..84444dda194b 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1521,11 +1521,12 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
1521{ 1521{
1522 struct net_device *dev = ptr; 1522 struct net_device *dev = ptr;
1523 struct net *net = dev_net(dev); 1523 struct net *net = dev_net(dev);
1524 struct netns_ipvs *ipvs = net_ipvs(net);
1524 struct ip_vs_service *svc; 1525 struct ip_vs_service *svc;
1525 struct ip_vs_dest *dest; 1526 struct ip_vs_dest *dest;
1526 unsigned int idx; 1527 unsigned int idx;
1527 1528
1528 if (event != NETDEV_UNREGISTER) 1529 if (event != NETDEV_UNREGISTER || !ipvs)
1529 return NOTIFY_DONE; 1530 return NOTIFY_DONE;
1530 IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name); 1531 IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name);
1531 EnterFunction(2); 1532 EnterFunction(2);
@@ -1551,7 +1552,7 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
1551 } 1552 }
1552 } 1553 }
1553 1554
1554 list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) { 1555 list_for_each_entry(dest, &ipvs->dest_trash, n_list) {
1555 __ip_vs_dev_reset(dest, dev); 1556 __ip_vs_dev_reset(dest, dev);
1556 } 1557 }
1557 mutex_unlock(&__ip_vs_mutex); 1558 mutex_unlock(&__ip_vs_mutex);
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 035960ec5cb9..c6f7db720d84 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/netfilter/x_tables.h> 17#include <linux/netfilter/x_tables.h>
18#include <linux/netfilter/xt_set.h> 18#include <linux/netfilter/xt_set.h>
19#include <linux/netfilter/ipset/ip_set_timeout.h>
19 20
20MODULE_LICENSE("GPL"); 21MODULE_LICENSE("GPL");
21MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); 22MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -310,7 +311,8 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
310 info->del_set.flags, 0, UINT_MAX); 311 info->del_set.flags, 0, UINT_MAX);
311 312
312 /* Normalize to fit into jiffies */ 313 /* Normalize to fit into jiffies */
313 if (add_opt.timeout > UINT_MAX/MSEC_PER_SEC) 314 if (add_opt.timeout != IPSET_NO_TIMEOUT &&
315 add_opt.timeout > UINT_MAX/MSEC_PER_SEC)
314 add_opt.timeout = UINT_MAX/MSEC_PER_SEC; 316 add_opt.timeout = UINT_MAX/MSEC_PER_SEC;
315 if (info->add_set.index != IPSET_INVALID_ID) 317 if (info->add_set.index != IPSET_INVALID_ID)
316 ip_set_add(info->add_set.index, skb, par, &add_opt); 318 ip_set_add(info->add_set.index, skb, par, &add_opt);
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index 17a707db40eb..e06d458fc719 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -292,7 +292,7 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr,
292 292
293 pr_debug("%p\n", sk); 293 pr_debug("%p\n", sk);
294 294
295 if (llcp_sock == NULL) 295 if (llcp_sock == NULL || llcp_sock->dev == NULL)
296 return -EBADFD; 296 return -EBADFD;
297 297
298 addr->sa_family = AF_NFC; 298 addr->sa_family = AF_NFC;
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
index 2754f098d436..bebaa43484bc 100644
--- a/net/rxrpc/ar-peer.c
+++ b/net/rxrpc/ar-peer.c
@@ -229,7 +229,7 @@ found_UDP_peer:
229 return peer; 229 return peer;
230 230
231new_UDP_peer: 231new_UDP_peer:
232 _net("Rx UDP DGRAM from NEW peer %d", peer->debug_id); 232 _net("Rx UDP DGRAM from NEW peer");
233 read_unlock_bh(&rxrpc_peer_lock); 233 read_unlock_bh(&rxrpc_peer_lock);
234 _leave(" = -EBUSY [new]"); 234 _leave(" = -EBUSY [new]");
235 return ERR_PTR(-EBUSY); 235 return ERR_PTR(-EBUSY);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index a2a95aabf9c2..c412ad0d0308 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -331,29 +331,22 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
331 return PSCHED_NS2TICKS(ticks); 331 return PSCHED_NS2TICKS(ticks);
332} 332}
333 333
334static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) 334static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
335{ 335{
336 struct sk_buff_head *list = &sch->q; 336 struct sk_buff_head *list = &sch->q;
337 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; 337 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
338 struct sk_buff *skb; 338 struct sk_buff *skb = skb_peek_tail(list);
339
340 if (likely(skb_queue_len(list) < sch->limit)) {
341 skb = skb_peek_tail(list);
342 /* Optimize for add at tail */
343 if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
344 return qdisc_enqueue_tail(nskb, sch);
345 339
346 skb_queue_reverse_walk(list, skb) { 340 /* Optimize for add at tail */
347 if (tnext >= netem_skb_cb(skb)->time_to_send) 341 if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
348 break; 342 return __skb_queue_tail(list, nskb);
349 }
350 343
351 __skb_queue_after(list, skb, nskb); 344 skb_queue_reverse_walk(list, skb) {
352 sch->qstats.backlog += qdisc_pkt_len(nskb); 345 if (tnext >= netem_skb_cb(skb)->time_to_send)
353 return NET_XMIT_SUCCESS; 346 break;
354 } 347 }
355 348
356 return qdisc_reshape_fail(nskb, sch); 349 __skb_queue_after(list, skb, nskb);
357} 350}
358 351
359/* 352/*
@@ -368,7 +361,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
368 /* We don't fill cb now as skb_unshare() may invalidate it */ 361 /* We don't fill cb now as skb_unshare() may invalidate it */
369 struct netem_skb_cb *cb; 362 struct netem_skb_cb *cb;
370 struct sk_buff *skb2; 363 struct sk_buff *skb2;
371 int ret;
372 int count = 1; 364 int count = 1;
373 365
374 /* Random duplication */ 366 /* Random duplication */
@@ -419,6 +411,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
419 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); 411 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
420 } 412 }
421 413
414 if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
415 return qdisc_reshape_fail(skb, sch);
416
417 sch->qstats.backlog += qdisc_pkt_len(skb);
418
422 cb = netem_skb_cb(skb); 419 cb = netem_skb_cb(skb);
423 if (q->gap == 0 || /* not doing reordering */ 420 if (q->gap == 0 || /* not doing reordering */
424 q->counter < q->gap - 1 || /* inside last reordering gap */ 421 q->counter < q->gap - 1 || /* inside last reordering gap */
@@ -450,7 +447,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
450 447
451 cb->time_to_send = now + delay; 448 cb->time_to_send = now + delay;
452 ++q->counter; 449 ++q->counter;
453 ret = tfifo_enqueue(skb, sch); 450 tfifo_enqueue(skb, sch);
454 } else { 451 } else {
455 /* 452 /*
456 * Do re-ordering by putting one out of N packets at the front 453 * Do re-ordering by putting one out of N packets at the front
@@ -460,16 +457,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
460 q->counter = 0; 457 q->counter = 0;
461 458
462 __skb_queue_head(&sch->q, skb); 459 __skb_queue_head(&sch->q, skb);
463 sch->qstats.backlog += qdisc_pkt_len(skb);
464 sch->qstats.requeues++; 460 sch->qstats.requeues++;
465 ret = NET_XMIT_SUCCESS;
466 }
467
468 if (ret != NET_XMIT_SUCCESS) {
469 if (net_xmit_drop_count(ret)) {
470 sch->qstats.drops++;
471 return ret;
472 }
473 } 461 }
474 462
475 return NET_XMIT_SUCCESS; 463 return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 74305c883bd3..30ea4674cabd 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -570,6 +570,8 @@ static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
570 570
571 sch->qstats.backlog = q->qdisc->qstats.backlog; 571 sch->qstats.backlog = q->qdisc->qstats.backlog;
572 opts = nla_nest_start(skb, TCA_OPTIONS); 572 opts = nla_nest_start(skb, TCA_OPTIONS);
573 if (opts == NULL)
574 goto nla_put_failure;
573 if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt)) 575 if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
574 goto nla_put_failure; 576 goto nla_put_failure;
575 return nla_nest_end(skb, opts); 577 return nla_nest_end(skb, opts);
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 80564fe03024..8b9b6790a3df 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -736,15 +736,12 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
736 736
737 epb = &ep->base; 737 epb = &ep->base;
738 738
739 if (hlist_unhashed(&epb->node))
740 return;
741
742 epb->hashent = sctp_ep_hashfn(epb->bind_addr.port); 739 epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
743 740
744 head = &sctp_ep_hashtable[epb->hashent]; 741 head = &sctp_ep_hashtable[epb->hashent];
745 742
746 sctp_write_lock(&head->lock); 743 sctp_write_lock(&head->lock);
747 __hlist_del(&epb->node); 744 hlist_del_init(&epb->node);
748 sctp_write_unlock(&head->lock); 745 sctp_write_unlock(&head->lock);
749} 746}
750 747
@@ -825,7 +822,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
825 head = &sctp_assoc_hashtable[epb->hashent]; 822 head = &sctp_assoc_hashtable[epb->hashent];
826 823
827 sctp_write_lock(&head->lock); 824 sctp_write_lock(&head->lock);
828 __hlist_del(&epb->node); 825 hlist_del_init(&epb->node);
829 sctp_write_unlock(&head->lock); 826 sctp_write_unlock(&head->lock);
830} 827}
831 828
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index b3b8a8d813eb..31c7bfcd9b58 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1231,8 +1231,14 @@ out_free:
1231 SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p" 1231 SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
1232 " kaddrs: %p err: %d\n", 1232 " kaddrs: %p err: %d\n",
1233 asoc, kaddrs, err); 1233 asoc, kaddrs, err);
1234 if (asoc) 1234 if (asoc) {
1235 /* sctp_primitive_ASSOCIATE may have added this association
1236 * To the hash table, try to unhash it, just in case, its a noop
1237 * if it wasn't hashed so we're safe
1238 */
1239 sctp_unhash_established(asoc);
1235 sctp_association_free(asoc); 1240 sctp_association_free(asoc);
1241 }
1236 return err; 1242 return err;
1237} 1243}
1238 1244
@@ -1942,8 +1948,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1942 goto out_unlock; 1948 goto out_unlock;
1943 1949
1944out_free: 1950out_free:
1945 if (new_asoc) 1951 if (new_asoc) {
1952 sctp_unhash_established(asoc);
1946 sctp_association_free(asoc); 1953 sctp_association_free(asoc);
1954 }
1947out_unlock: 1955out_unlock:
1948 sctp_release_sock(sk); 1956 sctp_release_sock(sk);
1949 1957