aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-06-26 13:27:00 -0400
committerDavid S. Miller <davem@davemloft.net>2010-06-26 13:27:00 -0400
commitc67dda14389205f0a223c5089307495290939b3b (patch)
treefad0bb26b28703d02a22ebdd44d94eabac4a2ade /net
parent43bc2db47292a824152145253b1dd2847e7312a3 (diff)
parent7e27d6e778cd87b6f2415515d7127eba53fe5d02 (diff)
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/caif/cfrfml.c2
-rw-r--r--net/caif/cfserl.c6
-rw-r--r--net/caif/cfveil.c2
-rw-r--r--net/core/datagram.c6
-rw-r--r--net/core/dev.c33
-rw-r--r--net/core/drop_monitor.c12
-rw-r--r--net/core/gen_estimator.c15
-rw-r--r--net/core/neighbour.c1
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/core/rtnetlink.c26
-rw-r--r--net/core/skbuff.c42
-rw-r--r--net/core/sock.c33
-rw-r--r--net/ipv4/Kconfig10
-rw-r--r--net/ipv4/ipmr.c6
-rw-r--r--net/ipv4/netfilter/ip_tables.c2
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/tcp_hybla.c4
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_ipv4.c7
-rw-r--r--net/ipv4/udp.c18
-rw-r--r--net/ipv6/icmp.c4
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ip6mr.c8
-rw-r--r--net/ipv6/mcast.c5
-rw-r--r--net/ipv6/netfilter/ip6_tables.c2
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/udp.c5
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/iucv/iucv.c9
-rw-r--r--net/mac80211/agg-tx.c6
-rw-r--r--net/mac80211/chan.c2
-rw-r--r--net/mac80211/driver-ops.h2
-rw-r--r--net/mac80211/mlme.c92
-rw-r--r--net/mac80211/rx.c16
-rw-r--r--net/netfilter/x_tables.c17
-rw-r--r--net/netfilter/xt_TEE.c4
-rw-r--r--net/phonet/pep.c6
-rw-r--r--net/rds/ib_cm.c1
-rw-r--r--net/rds/iw_cm.c1
-rw-r--r--net/sched/act_nat.c4
-rw-r--r--net/sched/act_pedit.c24
-rw-r--r--net/sched/cls_u32.c49
-rw-r--r--net/sunrpc/xprtsock.c29
-rw-r--r--net/xfrm/xfrm_output.c4
-rw-r--r--net/xfrm/xfrm_policy.c1
47 files changed, 379 insertions, 158 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index bd537fc10254..50f58f5f1c34 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -12,7 +12,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
12 return NET_RX_DROP; 12 return NET_RX_DROP;
13 13
14 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) 14 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
15 goto drop; 15 skb->deliver_no_wcard = 1;
16 16
17 skb->skb_iif = skb->dev->ifindex; 17 skb->skb_iif = skb->dev->ifindex;
18 __vlan_hwaccel_put_tag(skb, vlan_tci); 18 __vlan_hwaccel_put_tag(skb, vlan_tci);
@@ -84,7 +84,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
84 struct sk_buff *p; 84 struct sk_buff *p;
85 85
86 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) 86 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
87 goto drop; 87 skb->deliver_no_wcard = 1;
88 88
89 skb->skb_iif = skb->dev->ifindex; 89 skb->skb_iif = skb->dev->ifindex;
90 __vlan_hwaccel_put_tag(skb, vlan_tci); 90 __vlan_hwaccel_put_tag(skb, vlan_tci);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 55be90826f5f..529842677817 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -708,7 +708,8 @@ static int vlan_dev_init(struct net_device *dev)
708 netif_carrier_off(dev); 708 netif_carrier_off(dev);
709 709
710 /* IFF_BROADCAST|IFF_MULTICAST; ??? */ 710 /* IFF_BROADCAST|IFF_MULTICAST; ??? */
711 dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI); 711 dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
712 IFF_MASTER | IFF_SLAVE);
712 dev->iflink = real_dev->ifindex; 713 dev->iflink = real_dev->ifindex;
713 dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | 714 dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
714 (1<<__LINK_STATE_DORMANT))) | 715 (1<<__LINK_STATE_DORMANT))) |
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index cd2830fec935..fd27b172fb5d 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -83,7 +83,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
83 if (!cfsrvl_ready(service, &ret)) 83 if (!cfsrvl_ready(service, &ret))
84 return ret; 84 return ret;
85 85
86 if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { 86 if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
87 pr_err("CAIF: %s():Packet too large - size=%d\n", 87 pr_err("CAIF: %s():Packet too large - size=%d\n",
88 __func__, cfpkt_getlen(pkt)); 88 __func__, cfpkt_getlen(pkt));
89 return -EOVERFLOW; 89 return -EOVERFLOW;
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index cb4325a3dc83..965c5baace40 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -59,16 +59,18 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
59 u8 stx = CFSERL_STX; 59 u8 stx = CFSERL_STX;
60 int ret; 60 int ret;
61 u16 expectlen = 0; 61 u16 expectlen = 0;
62
62 caif_assert(newpkt != NULL); 63 caif_assert(newpkt != NULL);
63 spin_lock(&layr->sync); 64 spin_lock(&layr->sync);
64 65
65 if (layr->incomplete_frm != NULL) { 66 if (layr->incomplete_frm != NULL) {
66
67 layr->incomplete_frm = 67 layr->incomplete_frm =
68 cfpkt_append(layr->incomplete_frm, newpkt, expectlen); 68 cfpkt_append(layr->incomplete_frm, newpkt, expectlen);
69 pkt = layr->incomplete_frm; 69 pkt = layr->incomplete_frm;
70 if (pkt == NULL) 70 if (pkt == NULL) {
71 spin_unlock(&layr->sync);
71 return -ENOMEM; 72 return -ENOMEM;
73 }
72 } else { 74 } else {
73 pkt = newpkt; 75 pkt = newpkt;
74 } 76 }
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index 0fd827f49491..e04f7d964e83 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -84,7 +84,7 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
84 return ret; 84 return ret;
85 caif_assert(layr->dn != NULL); 85 caif_assert(layr->dn != NULL);
86 caif_assert(layr->dn->transmit != NULL); 86 caif_assert(layr->dn->transmit != NULL);
87 if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { 87 if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
88 pr_warning("CAIF: %s(): Packet too large - size=%d\n", 88 pr_warning("CAIF: %s(): Packet too large - size=%d\n",
89 __func__, cfpkt_getlen(pkt)); 89 __func__, cfpkt_getlen(pkt));
90 return -EOVERFLOW; 90 return -EOVERFLOW;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index e0097531417a..f5b6f43a4c2e 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -229,15 +229,17 @@ EXPORT_SYMBOL(skb_free_datagram);
229 229
230void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) 230void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
231{ 231{
232 bool slow;
233
232 if (likely(atomic_read(&skb->users) == 1)) 234 if (likely(atomic_read(&skb->users) == 1))
233 smp_rmb(); 235 smp_rmb();
234 else if (likely(!atomic_dec_and_test(&skb->users))) 236 else if (likely(!atomic_dec_and_test(&skb->users)))
235 return; 237 return;
236 238
237 lock_sock_bh(sk); 239 slow = lock_sock_fast(sk);
238 skb_orphan(skb); 240 skb_orphan(skb);
239 sk_mem_reclaim_partial(sk); 241 sk_mem_reclaim_partial(sk);
240 unlock_sock_bh(sk); 242 unlock_sock_fast(sk, slow);
241 243
242 /* skb is now orphaned, can be freed outside of locked section */ 244 /* skb is now orphaned, can be freed outside of locked section */
243 __kfree_skb(skb); 245 __kfree_skb(skb);
diff --git a/net/core/dev.c b/net/core/dev.c
index 1845b08c624e..2b3bf53bc687 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2253,11 +2253,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2253 if (skb_rx_queue_recorded(skb)) { 2253 if (skb_rx_queue_recorded(skb)) {
2254 u16 index = skb_get_rx_queue(skb); 2254 u16 index = skb_get_rx_queue(skb);
2255 if (unlikely(index >= dev->num_rx_queues)) { 2255 if (unlikely(index >= dev->num_rx_queues)) {
2256 if (net_ratelimit()) { 2256 WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
2257 pr_warning("%s received packet on queue " 2257 "on queue %u, but number of RX queues is %u\n",
2258 "%u, but number of RX queues is %u\n", 2258 dev->name, index, dev->num_rx_queues);
2259 dev->name, index, dev->num_rx_queues);
2260 }
2261 goto done; 2259 goto done;
2262 } 2260 }
2263 rxqueue = dev->_rx + index; 2261 rxqueue = dev->_rx + index;
@@ -2795,7 +2793,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
2795 struct net_device *orig_dev; 2793 struct net_device *orig_dev;
2796 struct net_device *master; 2794 struct net_device *master;
2797 struct net_device *null_or_orig; 2795 struct net_device *null_or_orig;
2798 struct net_device *null_or_bond; 2796 struct net_device *orig_or_bond;
2799 int ret = NET_RX_DROP; 2797 int ret = NET_RX_DROP;
2800 __be16 type; 2798 __be16 type;
2801 2799
@@ -2812,13 +2810,24 @@ static int __netif_receive_skb(struct sk_buff *skb)
2812 if (!skb->skb_iif) 2810 if (!skb->skb_iif)
2813 skb->skb_iif = skb->dev->ifindex; 2811 skb->skb_iif = skb->dev->ifindex;
2814 2812
2813 /*
2814 * bonding note: skbs received on inactive slaves should only
2815 * be delivered to pkt handlers that are exact matches. Also
2816 * the deliver_no_wcard flag will be set. If packet handlers
2817 * are sensitive to duplicate packets these skbs will need to
2818 * be dropped at the handler. The vlan accel path may have
2819 * already set the deliver_no_wcard flag.
2820 */
2815 null_or_orig = NULL; 2821 null_or_orig = NULL;
2816 orig_dev = skb->dev; 2822 orig_dev = skb->dev;
2817 master = ACCESS_ONCE(orig_dev->master); 2823 master = ACCESS_ONCE(orig_dev->master);
2818 if (master) { 2824 if (skb->deliver_no_wcard)
2819 if (skb_bond_should_drop(skb, master)) 2825 null_or_orig = orig_dev;
2826 else if (master) {
2827 if (skb_bond_should_drop(skb, master)) {
2828 skb->deliver_no_wcard = 1;
2820 null_or_orig = orig_dev; /* deliver only exact match */ 2829 null_or_orig = orig_dev; /* deliver only exact match */
2821 else 2830 } else
2822 skb->dev = master; 2831 skb->dev = master;
2823 } 2832 }
2824 2833
@@ -2868,10 +2877,10 @@ ncls:
2868 * device that may have registered for a specific ptype. The 2877 * device that may have registered for a specific ptype. The
2869 * handler may have to adjust skb->dev and orig_dev. 2878 * handler may have to adjust skb->dev and orig_dev.
2870 */ 2879 */
2871 null_or_bond = NULL; 2880 orig_or_bond = orig_dev;
2872 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) && 2881 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
2873 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) { 2882 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
2874 null_or_bond = vlan_dev_real_dev(skb->dev); 2883 orig_or_bond = vlan_dev_real_dev(skb->dev);
2875 } 2884 }
2876 2885
2877 type = skb->protocol; 2886 type = skb->protocol;
@@ -2879,7 +2888,7 @@ ncls:
2879 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 2888 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2880 if (ptype->type == type && (ptype->dev == null_or_orig || 2889 if (ptype->type == type && (ptype->dev == null_or_orig ||
2881 ptype->dev == skb->dev || ptype->dev == orig_dev || 2890 ptype->dev == skb->dev || ptype->dev == orig_dev ||
2882 ptype->dev == null_or_bond)) { 2891 ptype->dev == orig_or_bond)) {
2883 if (pt_prev) 2892 if (pt_prev)
2884 ret = deliver_skb(skb, pt_prev, orig_dev); 2893 ret = deliver_skb(skb, pt_prev, orig_dev);
2885 pt_prev = ptype; 2894 pt_prev = ptype;
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index cf208d8042b1..ad41529fb60f 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -172,12 +172,12 @@ out:
172 return; 172 return;
173} 173}
174 174
175static void trace_kfree_skb_hit(struct sk_buff *skb, void *location) 175static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location)
176{ 176{
177 trace_drop_common(skb, location); 177 trace_drop_common(skb, location);
178} 178}
179 179
180static void trace_napi_poll_hit(struct napi_struct *napi) 180static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi)
181{ 181{
182 struct dm_hw_stat_delta *new_stat; 182 struct dm_hw_stat_delta *new_stat;
183 183
@@ -225,12 +225,12 @@ static int set_all_monitor_traces(int state)
225 225
226 switch (state) { 226 switch (state) {
227 case TRACE_ON: 227 case TRACE_ON:
228 rc |= register_trace_kfree_skb(trace_kfree_skb_hit); 228 rc |= register_trace_kfree_skb(trace_kfree_skb_hit, NULL);
229 rc |= register_trace_napi_poll(trace_napi_poll_hit); 229 rc |= register_trace_napi_poll(trace_napi_poll_hit, NULL);
230 break; 230 break;
231 case TRACE_OFF: 231 case TRACE_OFF:
232 rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit); 232 rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit, NULL);
233 rc |= unregister_trace_napi_poll(trace_napi_poll_hit); 233 rc |= unregister_trace_napi_poll(trace_napi_poll_hit, NULL);
234 234
235 tracepoint_synchronize_unregister(); 235 tracepoint_synchronize_unregister();
236 236
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index cf8e70392fe0..785e5276a300 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -107,6 +107,7 @@ static DEFINE_RWLOCK(est_lock);
107 107
108/* Protects against soft lockup during large deletion */ 108/* Protects against soft lockup during large deletion */
109static struct rb_root est_root = RB_ROOT; 109static struct rb_root est_root = RB_ROOT;
110static DEFINE_SPINLOCK(est_tree_lock);
110 111
111static void est_timer(unsigned long arg) 112static void est_timer(unsigned long arg)
112{ 113{
@@ -201,7 +202,6 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats
201 * 202 *
202 * Returns 0 on success or a negative error code. 203 * Returns 0 on success or a negative error code.
203 * 204 *
204 * NOTE: Called under rtnl_mutex
205 */ 205 */
206int gen_new_estimator(struct gnet_stats_basic_packed *bstats, 206int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
207 struct gnet_stats_rate_est *rate_est, 207 struct gnet_stats_rate_est *rate_est,
@@ -232,6 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
232 est->last_packets = bstats->packets; 232 est->last_packets = bstats->packets;
233 est->avpps = rate_est->pps<<10; 233 est->avpps = rate_est->pps<<10;
234 234
235 spin_lock(&est_tree_lock);
235 if (!elist[idx].timer.function) { 236 if (!elist[idx].timer.function) {
236 INIT_LIST_HEAD(&elist[idx].list); 237 INIT_LIST_HEAD(&elist[idx].list);
237 setup_timer(&elist[idx].timer, est_timer, idx); 238 setup_timer(&elist[idx].timer, est_timer, idx);
@@ -242,6 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
242 243
243 list_add_rcu(&est->list, &elist[idx].list); 244 list_add_rcu(&est->list, &elist[idx].list);
244 gen_add_node(est); 245 gen_add_node(est);
246 spin_unlock(&est_tree_lock);
245 247
246 return 0; 248 return 0;
247} 249}
@@ -261,13 +263,13 @@ static void __gen_kill_estimator(struct rcu_head *head)
261 * 263 *
262 * Removes the rate estimator specified by &bstats and &rate_est. 264 * Removes the rate estimator specified by &bstats and &rate_est.
263 * 265 *
264 * NOTE: Called under rtnl_mutex
265 */ 266 */
266void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, 267void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
267 struct gnet_stats_rate_est *rate_est) 268 struct gnet_stats_rate_est *rate_est)
268{ 269{
269 struct gen_estimator *e; 270 struct gen_estimator *e;
270 271
272 spin_lock(&est_tree_lock);
271 while ((e = gen_find_node(bstats, rate_est))) { 273 while ((e = gen_find_node(bstats, rate_est))) {
272 rb_erase(&e->node, &est_root); 274 rb_erase(&e->node, &est_root);
273 275
@@ -278,6 +280,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
278 list_del_rcu(&e->list); 280 list_del_rcu(&e->list);
279 call_rcu(&e->e_rcu, __gen_kill_estimator); 281 call_rcu(&e->e_rcu, __gen_kill_estimator);
280 } 282 }
283 spin_unlock(&est_tree_lock);
281} 284}
282EXPORT_SYMBOL(gen_kill_estimator); 285EXPORT_SYMBOL(gen_kill_estimator);
283 286
@@ -312,8 +315,14 @@ EXPORT_SYMBOL(gen_replace_estimator);
312bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, 315bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
313 const struct gnet_stats_rate_est *rate_est) 316 const struct gnet_stats_rate_est *rate_est)
314{ 317{
318 bool res;
319
315 ASSERT_RTNL(); 320 ASSERT_RTNL();
316 321
317 return gen_find_node(bstats, rate_est) != NULL; 322 spin_lock(&est_tree_lock);
323 res = gen_find_node(bstats, rate_est) != NULL;
324 spin_unlock(&est_tree_lock);
325
326 return res;
318} 327}
319EXPORT_SYMBOL(gen_estimator_active); 328EXPORT_SYMBOL(gen_estimator_active);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index bff37908bd55..6ba1c0eece03 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -934,6 +934,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
934 kfree_skb(buff); 934 kfree_skb(buff);
935 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); 935 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
936 } 936 }
937 skb_dst_force(skb);
937 __skb_queue_tail(&neigh->arp_queue, skb); 938 __skb_queue_tail(&neigh->arp_queue, skb);
938 } 939 }
939 rc = 1; 940 rc = 1;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 2ad68da418df..1dacd7ba8dbb 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2170,7 +2170,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2170 end_time = ktime_now(); 2170 end_time = ktime_now();
2171 2171
2172 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); 2172 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
2173 pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay); 2173 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
2174} 2174}
2175 2175
2176static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) 2176static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 7ab86f3a1ea4..1a2af24e9e3d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -650,11 +650,12 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev)
650 if (dev->dev.parent && dev_is_pci(dev->dev.parent)) { 650 if (dev->dev.parent && dev_is_pci(dev->dev.parent)) {
651 651
652 int num_vfs = dev_num_vf(dev->dev.parent); 652 int num_vfs = dev_num_vf(dev->dev.parent);
653 size_t size = nlmsg_total_size(sizeof(struct nlattr)); 653 size_t size = nla_total_size(sizeof(struct nlattr));
654 size += nlmsg_total_size(num_vfs * sizeof(struct nlattr)); 654 size += nla_total_size(num_vfs * sizeof(struct nlattr));
655 size += num_vfs * (sizeof(struct ifla_vf_mac) + 655 size += num_vfs *
656 sizeof(struct ifla_vf_vlan) + 656 (nla_total_size(sizeof(struct ifla_vf_mac)) +
657 sizeof(struct ifla_vf_tx_rate)); 657 nla_total_size(sizeof(struct ifla_vf_vlan)) +
658 nla_total_size(sizeof(struct ifla_vf_tx_rate)));
658 return size; 659 return size;
659 } else 660 } else
660 return 0; 661 return 0;
@@ -722,14 +723,13 @@ static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
722 723
723 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { 724 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
724 vf_port = nla_nest_start(skb, IFLA_VF_PORT); 725 vf_port = nla_nest_start(skb, IFLA_VF_PORT);
725 if (!vf_port) { 726 if (!vf_port)
726 nla_nest_cancel(skb, vf_ports); 727 goto nla_put_failure;
727 return -EMSGSIZE;
728 }
729 NLA_PUT_U32(skb, IFLA_PORT_VF, vf); 728 NLA_PUT_U32(skb, IFLA_PORT_VF, vf);
730 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); 729 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
730 if (err == -EMSGSIZE)
731 goto nla_put_failure;
731 if (err) { 732 if (err) {
732nla_put_failure:
733 nla_nest_cancel(skb, vf_port); 733 nla_nest_cancel(skb, vf_port);
734 continue; 734 continue;
735 } 735 }
@@ -739,6 +739,10 @@ nla_put_failure:
739 nla_nest_end(skb, vf_ports); 739 nla_nest_end(skb, vf_ports);
740 740
741 return 0; 741 return 0;
742
743nla_put_failure:
744 nla_nest_cancel(skb, vf_ports);
745 return -EMSGSIZE;
742} 746}
743 747
744static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) 748static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
@@ -753,7 +757,7 @@ static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
753 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); 757 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
754 if (err) { 758 if (err) {
755 nla_nest_cancel(skb, port_self); 759 nla_nest_cancel(skb, port_self);
756 return err; 760 return (err == -EMSGSIZE) ? err : 0;
757 } 761 }
758 762
759 nla_nest_end(skb, port_self); 763 nla_nest_end(skb, port_self);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f8abf68e3988..9f07e749d7b1 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -482,22 +482,22 @@ EXPORT_SYMBOL(consume_skb);
482 * reference count dropping and cleans up the skbuff as if it 482 * reference count dropping and cleans up the skbuff as if it
483 * just came from __alloc_skb(). 483 * just came from __alloc_skb().
484 */ 484 */
485int skb_recycle_check(struct sk_buff *skb, int skb_size) 485bool skb_recycle_check(struct sk_buff *skb, int skb_size)
486{ 486{
487 struct skb_shared_info *shinfo; 487 struct skb_shared_info *shinfo;
488 488
489 if (irqs_disabled()) 489 if (irqs_disabled())
490 return 0; 490 return false;
491 491
492 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) 492 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
493 return 0; 493 return false;
494 494
495 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 495 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
496 if (skb_end_pointer(skb) - skb->head < skb_size) 496 if (skb_end_pointer(skb) - skb->head < skb_size)
497 return 0; 497 return false;
498 498
499 if (skb_shared(skb) || skb_cloned(skb)) 499 if (skb_shared(skb) || skb_cloned(skb))
500 return 0; 500 return false;
501 501
502 skb_release_head_state(skb); 502 skb_release_head_state(skb);
503 503
@@ -509,7 +509,7 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
509 skb->data = skb->head + NET_SKB_PAD; 509 skb->data = skb->head + NET_SKB_PAD;
510 skb_reset_tail_pointer(skb); 510 skb_reset_tail_pointer(skb);
511 511
512 return 1; 512 return true;
513} 513}
514EXPORT_SYMBOL(skb_recycle_check); 514EXPORT_SYMBOL(skb_recycle_check);
515 515
@@ -2965,6 +2965,34 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2965} 2965}
2966EXPORT_SYMBOL_GPL(skb_cow_data); 2966EXPORT_SYMBOL_GPL(skb_cow_data);
2967 2967
2968static void sock_rmem_free(struct sk_buff *skb)
2969{
2970 struct sock *sk = skb->sk;
2971
2972 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
2973}
2974
2975/*
2976 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
2977 */
2978int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
2979{
2980 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
2981 (unsigned)sk->sk_rcvbuf)
2982 return -ENOMEM;
2983
2984 skb_orphan(skb);
2985 skb->sk = sk;
2986 skb->destructor = sock_rmem_free;
2987 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
2988
2989 skb_queue_tail(&sk->sk_error_queue, skb);
2990 if (!sock_flag(sk, SOCK_DEAD))
2991 sk->sk_data_ready(sk, skb->len);
2992 return 0;
2993}
2994EXPORT_SYMBOL(sock_queue_err_skb);
2995
2968void skb_tstamp_tx(struct sk_buff *orig_skb, 2996void skb_tstamp_tx(struct sk_buff *orig_skb,
2969 struct skb_shared_hwtstamps *hwtstamps) 2997 struct skb_shared_hwtstamps *hwtstamps)
2970{ 2998{
@@ -2996,7 +3024,9 @@ void skb_tstamp_tx(struct sk_buff *orig_skb,
2996 memset(serr, 0, sizeof(*serr)); 3024 memset(serr, 0, sizeof(*serr));
2997 serr->ee.ee_errno = ENOMSG; 3025 serr->ee.ee_errno = ENOMSG;
2998 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 3026 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
3027
2999 err = sock_queue_err_skb(sk, skb); 3028 err = sock_queue_err_skb(sk, skb);
3029
3000 if (err) 3030 if (err)
3001 kfree_skb(skb); 3031 kfree_skb(skb);
3002} 3032}
diff --git a/net/core/sock.c b/net/core/sock.c
index 37fe9b6adade..2cf7f9f7e775 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2007,6 +2007,39 @@ void release_sock(struct sock *sk)
2007} 2007}
2008EXPORT_SYMBOL(release_sock); 2008EXPORT_SYMBOL(release_sock);
2009 2009
2010/**
2011 * lock_sock_fast - fast version of lock_sock
2012 * @sk: socket
2013 *
2014 * This version should be used for very small section, where process wont block
2015 * return false if fast path is taken
2016 * sk_lock.slock locked, owned = 0, BH disabled
2017 * return true if slow path is taken
2018 * sk_lock.slock unlocked, owned = 1, BH enabled
2019 */
2020bool lock_sock_fast(struct sock *sk)
2021{
2022 might_sleep();
2023 spin_lock_bh(&sk->sk_lock.slock);
2024
2025 if (!sk->sk_lock.owned)
2026 /*
2027 * Note : We must disable BH
2028 */
2029 return false;
2030
2031 __lock_sock(sk);
2032 sk->sk_lock.owned = 1;
2033 spin_unlock(&sk->sk_lock.slock);
2034 /*
2035 * The sk_lock has mutex_lock() semantics here:
2036 */
2037 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2038 local_bh_enable();
2039 return true;
2040}
2041EXPORT_SYMBOL(lock_sock_fast);
2042
2010int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) 2043int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2011{ 2044{
2012 struct timeval tv; 2045 struct timeval tv;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 8e3a1fd938ab..7c3a7d191249 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -303,7 +303,7 @@ config ARPD
303 If unsure, say N. 303 If unsure, say N.
304 304
305config SYN_COOKIES 305config SYN_COOKIES
306 bool "IP: TCP syncookie support (disabled per default)" 306 bool "IP: TCP syncookie support"
307 ---help--- 307 ---help---
308 Normal TCP/IP networking is open to an attack known as "SYN 308 Normal TCP/IP networking is open to an attack known as "SYN
309 flooding". This denial-of-service attack prevents legitimate remote 309 flooding". This denial-of-service attack prevents legitimate remote
@@ -328,13 +328,13 @@ config SYN_COOKIES
328 server is really overloaded. If this happens frequently better turn 328 server is really overloaded. If this happens frequently better turn
329 them off. 329 them off.
330 330
331 If you say Y here, note that SYN cookies aren't enabled by default; 331 If you say Y here, you can disable SYN cookies at run time by
332 you can enable them by saying Y to "/proc file system support" and 332 saying Y to "/proc file system support" and
333 "Sysctl support" below and executing the command 333 "Sysctl support" below and executing the command
334 334
335 echo 1 >/proc/sys/net/ipv4/tcp_syncookies 335 echo 0 > /proc/sys/net/ipv4/tcp_syncookies
336 336
337 at boot time after the /proc file system has been mounted. 337 after the /proc file system has been mounted.
338 338
339 If unsure, say N. 339 If unsure, say N.
340 340
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 45889103b3e2..757f25eb9b4b 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -267,8 +267,10 @@ static void __net_exit ipmr_rules_exit(struct net *net)
267{ 267{
268 struct mr_table *mrt, *next; 268 struct mr_table *mrt, *next;
269 269
270 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) 270 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
271 list_del(&mrt->list);
271 kfree(mrt); 272 kfree(mrt);
273 }
272 fib_rules_unregister(net->ipv4.mr_rules_ops); 274 fib_rules_unregister(net->ipv4.mr_rules_ops);
273} 275}
274#else 276#else
@@ -1911,7 +1913,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
1911 struct rtattr *mp_head; 1913 struct rtattr *mp_head;
1912 1914
1913 /* If cache is unresolved, don't try to parse IIF and OIF */ 1915 /* If cache is unresolved, don't try to parse IIF and OIF */
1914 if (c->mfc_parent > MAXVIFS) 1916 if (c->mfc_parent >= MAXVIFS)
1915 return -ENOENT; 1917 return -ENOENT;
1916 1918
1917 if (VIF_EXISTS(mrt, c->mfc_parent)) 1919 if (VIF_EXISTS(mrt, c->mfc_parent))
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 63958f3394a5..4b6c5ca610fc 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -336,7 +336,7 @@ ipt_do_table(struct sk_buff *skb,
336 cpu = smp_processor_id(); 336 cpu = smp_processor_id();
337 table_base = private->entries[cpu]; 337 table_base = private->entries[cpu];
338 jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; 338 jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
339 stackptr = &private->stackptr[cpu]; 339 stackptr = per_cpu_ptr(private->stackptr, cpu);
340 origptr = *stackptr; 340 origptr = *stackptr;
341 341
342 e = get_entry(table_base, private->hook_entry[hook]); 342 e = get_entry(table_base, private->hook_entry[hook]);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 5c24db4a3c91..9f6b22206c52 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -347,7 +347,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
347 { .sport = th->dest, 347 { .sport = th->dest,
348 .dport = th->source } } }; 348 .dport = th->source } } };
349 security_req_classify_flow(req, &fl); 349 security_req_classify_flow(req, &fl);
350 if (ip_route_output_key(&init_net, &rt, &fl)) { 350 if (ip_route_output_key(sock_net(sk), &rt, &fl)) {
351 reqsk_free(req); 351 reqsk_free(req);
352 goto out; 352 goto out;
353 } 353 }
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index c209e054a634..377bc9349371 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -126,8 +126,8 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
126 * calculate 2^fract in a <<7 value. 126 * calculate 2^fract in a <<7 value.
127 */ 127 */
128 is_slowstart = 1; 128 is_slowstart = 1;
129 increment = ((1 << ca->rho) * hybla_fraction(rho_fractions)) 129 increment = ((1 << min(ca->rho, 16U)) *
130 - 128; 130 hybla_fraction(rho_fractions)) - 128;
131 } else { 131 } else {
132 /* 132 /*
133 * congestion avoidance 133 * congestion avoidance
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3e6dafcb1071..548d575e6cc6 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2639,7 +2639,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
2639 if (sk->sk_family == AF_INET) { 2639 if (sk->sk_family == AF_INET) {
2640 printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", 2640 printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
2641 msg, 2641 msg,
2642 &inet->daddr, ntohs(inet->dport), 2642 &inet->inet_daddr, ntohs(inet->inet_dport),
2643 tp->snd_cwnd, tcp_left_out(tp), 2643 tp->snd_cwnd, tcp_left_out(tp),
2644 tp->snd_ssthresh, tp->prior_ssthresh, 2644 tp->snd_ssthresh, tp->prior_ssthresh,
2645 tp->packets_out); 2645 tp->packets_out);
@@ -2649,7 +2649,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
2649 struct ipv6_pinfo *np = inet6_sk(sk); 2649 struct ipv6_pinfo *np = inet6_sk(sk);
2650 printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", 2650 printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
2651 msg, 2651 msg,
2652 &np->daddr, ntohs(inet->dport), 2652 &np->daddr, ntohs(inet->inet_dport),
2653 tp->snd_cwnd, tcp_left_out(tp), 2653 tp->snd_cwnd, tcp_left_out(tp),
2654 tp->snd_ssthresh, tp->prior_ssthresh, 2654 tp->snd_ssthresh, tp->prior_ssthresh,
2655 tp->packets_out); 2655 tp->packets_out);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 202cf09c4cd4..fe193e53af44 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1555,6 +1555,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1555#endif 1555#endif
1556 1556
1557 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1557 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1558 sock_rps_save_rxhash(sk, skb->rxhash);
1558 TCP_CHECK_TIMER(sk); 1559 TCP_CHECK_TIMER(sk);
1559 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { 1560 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1560 rsk = sk; 1561 rsk = sk;
@@ -1579,7 +1580,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1579 } 1580 }
1580 return 0; 1581 return 0;
1581 } 1582 }
1582 } 1583 } else
1584 sock_rps_save_rxhash(sk, skb->rxhash);
1585
1583 1586
1584 TCP_CHECK_TIMER(sk); 1587 TCP_CHECK_TIMER(sk);
1585 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { 1588 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
@@ -1672,8 +1675,6 @@ process:
1672 1675
1673 skb->dev = NULL; 1676 skb->dev = NULL;
1674 1677
1675 sock_rps_save_rxhash(sk, skb->rxhash);
1676
1677 bh_lock_sock_nested(sk); 1678 bh_lock_sock_nested(sk);
1678 ret = 0; 1679 ret = 0;
1679 if (!sock_owned_by_user(sk)) { 1680 if (!sock_owned_by_user(sk)) {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index baeec29fe0f1..eec4ff456e33 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -633,9 +633,9 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
633 if (!inet->recverr) { 633 if (!inet->recverr) {
634 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 634 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
635 goto out; 635 goto out;
636 } else { 636 } else
637 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); 637 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
638 } 638
639 sk->sk_err = err; 639 sk->sk_err = err;
640 sk->sk_error_report(sk); 640 sk->sk_error_report(sk);
641out: 641out:
@@ -1063,10 +1063,11 @@ static unsigned int first_packet_length(struct sock *sk)
1063 spin_unlock_bh(&rcvq->lock); 1063 spin_unlock_bh(&rcvq->lock);
1064 1064
1065 if (!skb_queue_empty(&list_kill)) { 1065 if (!skb_queue_empty(&list_kill)) {
1066 lock_sock_bh(sk); 1066 bool slow = lock_sock_fast(sk);
1067
1067 __skb_queue_purge(&list_kill); 1068 __skb_queue_purge(&list_kill);
1068 sk_mem_reclaim_partial(sk); 1069 sk_mem_reclaim_partial(sk);
1069 unlock_sock_bh(sk); 1070 unlock_sock_fast(sk, slow);
1070 } 1071 }
1071 return res; 1072 return res;
1072} 1073}
@@ -1123,6 +1124,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1123 int peeked; 1124 int peeked;
1124 int err; 1125 int err;
1125 int is_udplite = IS_UDPLITE(sk); 1126 int is_udplite = IS_UDPLITE(sk);
1127 bool slow;
1126 1128
1127 /* 1129 /*
1128 * Check any passed addresses 1130 * Check any passed addresses
@@ -1197,10 +1199,10 @@ out:
1197 return err; 1199 return err;
1198 1200
1199csum_copy_err: 1201csum_copy_err:
1200 lock_sock_bh(sk); 1202 slow = lock_sock_fast(sk);
1201 if (!skb_kill_datagram(sk, skb, flags)) 1203 if (!skb_kill_datagram(sk, skb, flags))
1202 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 1204 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1203 unlock_sock_bh(sk); 1205 unlock_sock_fast(sk, slow);
1204 1206
1205 if (noblock) 1207 if (noblock)
1206 return -EAGAIN; 1208 return -EAGAIN;
@@ -1625,9 +1627,9 @@ int udp_rcv(struct sk_buff *skb)
1625 1627
1626void udp_destroy_sock(struct sock *sk) 1628void udp_destroy_sock(struct sock *sk)
1627{ 1629{
1628 lock_sock_bh(sk); 1630 bool slow = lock_sock_fast(sk);
1629 udp_flush_pending_frames(sk); 1631 udp_flush_pending_frames(sk);
1630 unlock_sock_bh(sk); 1632 unlock_sock_fast(sk, slow);
1631} 1633}
1632 1634
1633/* 1635/*
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index ce7992982557..03e62f94ff8e 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -483,7 +483,7 @@ route_done:
483 np->tclass, NULL, &fl, (struct rt6_info*)dst, 483 np->tclass, NULL, &fl, (struct rt6_info*)dst,
484 MSG_DONTWAIT, np->dontfrag); 484 MSG_DONTWAIT, np->dontfrag);
485 if (err) { 485 if (err) {
486 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); 486 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
487 ip6_flush_pending_frames(sk); 487 ip6_flush_pending_frames(sk);
488 goto out_put; 488 goto out_put;
489 } 489 }
@@ -565,7 +565,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
565 np->dontfrag); 565 np->dontfrag);
566 566
567 if (err) { 567 if (err) {
568 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); 568 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
569 ip6_flush_pending_frames(sk); 569 ip6_flush_pending_frames(sk);
570 goto out_put; 570 goto out_put;
571 } 571 }
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index cd963f64e27c..89425af0684c 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -507,7 +507,7 @@ int ip6_forward(struct sk_buff *skb)
507 if (mtu < IPV6_MIN_MTU) 507 if (mtu < IPV6_MIN_MTU)
508 mtu = IPV6_MIN_MTU; 508 mtu = IPV6_MIN_MTU;
509 509
510 if (skb->len > mtu) { 510 if (skb->len > mtu && !skb_is_gso(skb)) {
511 /* Again, force OUTPUT device used as source address */ 511 /* Again, force OUTPUT device used as source address */
512 skb->dev = dst->dev; 512 skb->dev = dst->dev;
513 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 513 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index bd9e7d3e9c8e..66078dad7fe8 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -120,7 +120,7 @@ static void mroute_clean_tables(struct mr6_table *mrt);
120static void ipmr_expire_process(unsigned long arg); 120static void ipmr_expire_process(unsigned long arg);
121 121
122#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES 122#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
123#define ip6mr_for_each_table(mrt, met) \ 123#define ip6mr_for_each_table(mrt, net) \
124 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list) 124 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
125 125
126static struct mr6_table *ip6mr_get_table(struct net *net, u32 id) 126static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
@@ -254,8 +254,10 @@ static void __net_exit ip6mr_rules_exit(struct net *net)
254{ 254{
255 struct mr6_table *mrt, *next; 255 struct mr6_table *mrt, *next;
256 256
257 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) 257 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
258 list_del(&mrt->list);
258 ip6mr_free_table(mrt); 259 ip6mr_free_table(mrt);
260 }
259 fib_rules_unregister(net->ipv6.mr6_rules_ops); 261 fib_rules_unregister(net->ipv6.mr6_rules_ops);
260} 262}
261#else 263#else
@@ -2017,7 +2019,7 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2017 struct rtattr *mp_head; 2019 struct rtattr *mp_head;
2018 2020
2019 /* If cache is unresolved, don't try to parse IIF and OIF */ 2021 /* If cache is unresolved, don't try to parse IIF and OIF */
2020 if (c->mf6c_parent > MAXMIFS) 2022 if (c->mf6c_parent >= MAXMIFS)
2021 return -ENOENT; 2023 return -ENOENT;
2022 2024
2023 if (MIF_EXISTS(mrt, c->mf6c_parent)) 2025 if (MIF_EXISTS(mrt, c->mf6c_parent))
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 59f1881968c7..ab1622d7d409 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1356,7 +1356,10 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
1356 IPV6_TLV_PADN, 0 }; 1356 IPV6_TLV_PADN, 0 };
1357 1357
1358 /* we assume size > sizeof(ra) here */ 1358 /* we assume size > sizeof(ra) here */
1359 skb = sock_alloc_send_skb(sk, size + LL_ALLOCATED_SPACE(dev), 1, &err); 1359 size += LL_ALLOCATED_SPACE(dev);
1360 /* limit our allocations to order-0 page */
1361 size = min_t(int, size, SKB_MAX_ORDER(0, 0));
1362 skb = sock_alloc_send_skb(sk, size, 1, &err);
1360 1363
1361 if (!skb) 1364 if (!skb)
1362 return NULL; 1365 return NULL;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 6f517bd83692..9d2d68f0e605 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -363,7 +363,7 @@ ip6t_do_table(struct sk_buff *skb,
363 cpu = smp_processor_id(); 363 cpu = smp_processor_id();
364 table_base = private->entries[cpu]; 364 table_base = private->entries[cpu];
365 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; 365 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
366 stackptr = &private->stackptr[cpu]; 366 stackptr = per_cpu_ptr(private->stackptr, cpu);
367 origptr = *stackptr; 367 origptr = *stackptr;
368 368
369 e = get_entry(table_base, private->hook_entry[hook]); 369 e = get_entry(table_base, private->hook_entry[hook]);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 294cbe8b0725..252d76199c41 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -814,7 +814,7 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk,
814{ 814{
815 int flags = 0; 815 int flags = 0;
816 816
817 if (fl->oif || rt6_need_strict(&fl->fl6_dst)) 817 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl->fl6_dst))
818 flags |= RT6_LOOKUP_F_IFACE; 818 flags |= RT6_LOOKUP_F_IFACE;
819 819
820 if (!ipv6_addr_any(&fl->fl6_src)) 820 if (!ipv6_addr_any(&fl->fl6_src))
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 3d7a2c0b836a..87be58673b55 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -328,6 +328,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
328 int err; 328 int err;
329 int is_udplite = IS_UDPLITE(sk); 329 int is_udplite = IS_UDPLITE(sk);
330 int is_udp4; 330 int is_udp4;
331 bool slow;
331 332
332 if (addr_len) 333 if (addr_len)
333 *addr_len=sizeof(struct sockaddr_in6); 334 *addr_len=sizeof(struct sockaddr_in6);
@@ -424,7 +425,7 @@ out:
424 return err; 425 return err;
425 426
426csum_copy_err: 427csum_copy_err:
427 lock_sock_bh(sk); 428 slow = lock_sock_fast(sk);
428 if (!skb_kill_datagram(sk, skb, flags)) { 429 if (!skb_kill_datagram(sk, skb, flags)) {
429 if (is_udp4) 430 if (is_udp4)
430 UDP_INC_STATS_USER(sock_net(sk), 431 UDP_INC_STATS_USER(sock_net(sk),
@@ -433,7 +434,7 @@ csum_copy_err:
433 UDP6_INC_STATS_USER(sock_net(sk), 434 UDP6_INC_STATS_USER(sock_net(sk),
434 UDP_MIB_INERRORS, is_udplite); 435 UDP_MIB_INERRORS, is_udplite);
435 } 436 }
436 unlock_sock_bh(sk); 437 unlock_sock_fast(sk, slow);
437 438
438 if (flags & MSG_DONTWAIT) 439 if (flags & MSG_DONTWAIT)
439 return -EAGAIN; 440 return -EAGAIN;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index c8b4599a752e..9637e45744fa 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1619,7 +1619,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1619save_message: 1619save_message:
1620 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); 1620 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1621 if (!save_msg) 1621 if (!save_msg)
1622 return; 1622 goto out_unlock;
1623 save_msg->path = path; 1623 save_msg->path = path;
1624 save_msg->msg = *msg; 1624 save_msg->msg = *msg;
1625 1625
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index fd8b28361a64..f28ad2cc8428 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -632,13 +632,14 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
632 iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), 632 iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
633 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 633 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
634 if (!iucv_irq_data[cpu]) 634 if (!iucv_irq_data[cpu])
635 return NOTIFY_BAD; 635 return notifier_from_errno(-ENOMEM);
636
636 iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), 637 iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
637 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 638 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
638 if (!iucv_param[cpu]) { 639 if (!iucv_param[cpu]) {
639 kfree(iucv_irq_data[cpu]); 640 kfree(iucv_irq_data[cpu]);
640 iucv_irq_data[cpu] = NULL; 641 iucv_irq_data[cpu] = NULL;
641 return NOTIFY_BAD; 642 return notifier_from_errno(-ENOMEM);
642 } 643 }
643 iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), 644 iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
644 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 645 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
@@ -647,7 +648,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
647 iucv_param[cpu] = NULL; 648 iucv_param[cpu] = NULL;
648 kfree(iucv_irq_data[cpu]); 649 kfree(iucv_irq_data[cpu]);
649 iucv_irq_data[cpu] = NULL; 650 iucv_irq_data[cpu] = NULL;
650 return NOTIFY_BAD; 651 return notifier_from_errno(-ENOMEM);
651 } 652 }
652 break; 653 break;
653 case CPU_UP_CANCELED: 654 case CPU_UP_CANCELED:
@@ -677,7 +678,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
677 cpu_clear(cpu, cpumask); 678 cpu_clear(cpu, cpumask);
678 if (cpus_empty(cpumask)) 679 if (cpus_empty(cpumask))
679 /* Can't offline last IUCV enabled cpu. */ 680 /* Can't offline last IUCV enabled cpu. */
680 return NOTIFY_BAD; 681 return notifier_from_errno(-EINVAL);
681 smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); 682 smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1);
682 if (cpus_empty(iucv_irq_cpumask)) 683 if (cpus_empty(iucv_irq_cpumask))
683 smp_call_function_single(first_cpu(iucv_buffer_cpumask), 684 smp_call_function_single(first_cpu(iucv_buffer_cpumask),
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index c163d0a149f4..98258b7341e3 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -332,14 +332,16 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
332 IEEE80211_QUEUE_STOP_REASON_AGGREGATION); 332 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
333 333
334 spin_unlock(&local->ampdu_lock); 334 spin_unlock(&local->ampdu_lock);
335 spin_unlock_bh(&sta->lock);
336 335
337 /* send an addBA request */ 336 /* prepare tid data */
338 sta->ampdu_mlme.dialog_token_allocator++; 337 sta->ampdu_mlme.dialog_token_allocator++;
339 sta->ampdu_mlme.tid_tx[tid]->dialog_token = 338 sta->ampdu_mlme.tid_tx[tid]->dialog_token =
340 sta->ampdu_mlme.dialog_token_allocator; 339 sta->ampdu_mlme.dialog_token_allocator;
341 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; 340 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
342 341
342 spin_unlock_bh(&sta->lock);
343
344 /* send AddBA request */
343 ieee80211_send_addba_request(sdata, pubsta->addr, tid, 345 ieee80211_send_addba_request(sdata, pubsta->addr, tid,
344 sta->ampdu_mlme.tid_tx[tid]->dialog_token, 346 sta->ampdu_mlme.tid_tx[tid]->dialog_token,
345 sta->ampdu_mlme.tid_tx[tid]->ssn, 347 sta->ampdu_mlme.tid_tx[tid]->ssn,
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 5d218c530a4e..32be11e4c4d9 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -5,7 +5,7 @@
5#include <linux/nl80211.h> 5#include <linux/nl80211.h>
6#include "ieee80211_i.h" 6#include "ieee80211_i.h"
7 7
8enum ieee80211_chan_mode 8static enum ieee80211_chan_mode
9__ieee80211_get_channel_mode(struct ieee80211_local *local, 9__ieee80211_get_channel_mode(struct ieee80211_local *local,
10 struct ieee80211_sub_if_data *ignore) 10 struct ieee80211_sub_if_data *ignore)
11{ 11{
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 4f2271316650..9c1da0809160 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -349,7 +349,7 @@ static inline int drv_get_survey(struct ieee80211_local *local, int idx,
349 struct survey_info *survey) 349 struct survey_info *survey)
350{ 350{
351 int ret = -EOPNOTSUPP; 351 int ret = -EOPNOTSUPP;
352 if (local->ops->conf_tx) 352 if (local->ops->get_survey)
353 ret = local->ops->get_survey(&local->hw, idx, survey); 353 ret = local->ops->get_survey(&local->hw, idx, survey);
354 /* trace_drv_get_survey(local, idx, survey, ret); */ 354 /* trace_drv_get_survey(local, idx, survey, ret); */
355 return ret; 355 return ret;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 0839c4e8fd2e..f803f8b72a93 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1692,14 +1692,52 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1692 rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); 1692 rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len);
1693 break; 1693 break;
1694 case IEEE80211_STYPE_ACTION: 1694 case IEEE80211_STYPE_ACTION:
1695 if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) 1695 switch (mgmt->u.action.category) {
1696 case WLAN_CATEGORY_BACK: {
1697 struct ieee80211_local *local = sdata->local;
1698 int len = skb->len;
1699 struct sta_info *sta;
1700
1701 rcu_read_lock();
1702 sta = sta_info_get(sdata, mgmt->sa);
1703 if (!sta) {
1704 rcu_read_unlock();
1705 break;
1706 }
1707
1708 local_bh_disable();
1709
1710 switch (mgmt->u.action.u.addba_req.action_code) {
1711 case WLAN_ACTION_ADDBA_REQ:
1712 if (len < (IEEE80211_MIN_ACTION_SIZE +
1713 sizeof(mgmt->u.action.u.addba_req)))
1714 break;
1715 ieee80211_process_addba_request(local, sta, mgmt, len);
1716 break;
1717 case WLAN_ACTION_ADDBA_RESP:
1718 if (len < (IEEE80211_MIN_ACTION_SIZE +
1719 sizeof(mgmt->u.action.u.addba_resp)))
1720 break;
1721 ieee80211_process_addba_resp(local, sta, mgmt, len);
1722 break;
1723 case WLAN_ACTION_DELBA:
1724 if (len < (IEEE80211_MIN_ACTION_SIZE +
1725 sizeof(mgmt->u.action.u.delba)))
1726 break;
1727 ieee80211_process_delba(sdata, sta, mgmt, len);
1728 break;
1729 }
1730 local_bh_enable();
1731 rcu_read_unlock();
1696 break; 1732 break;
1697 1733 }
1698 ieee80211_sta_process_chanswitch(sdata, 1734 case WLAN_CATEGORY_SPECTRUM_MGMT:
1699 &mgmt->u.action.u.chan_switch.sw_elem, 1735 ieee80211_sta_process_chanswitch(sdata,
1700 (void *)ifmgd->associated->priv, 1736 &mgmt->u.action.u.chan_switch.sw_elem,
1701 rx_status->mactime); 1737 (void *)ifmgd->associated->priv,
1702 break; 1738 rx_status->mactime);
1739 break;
1740 }
1703 } 1741 }
1704 mutex_unlock(&ifmgd->mtx); 1742 mutex_unlock(&ifmgd->mtx);
1705 1743
@@ -1722,9 +1760,45 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1722 mutex_unlock(&ifmgd->mtx); 1760 mutex_unlock(&ifmgd->mtx);
1723 1761
1724 if (skb->len >= 24 + 2 /* mgmt + deauth reason */ && 1762 if (skb->len >= 24 + 2 /* mgmt + deauth reason */ &&
1725 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) 1763 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) {
1726 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); 1764 struct ieee80211_local *local = sdata->local;
1765 struct ieee80211_work *wk;
1766
1767 mutex_lock(&local->work_mtx);
1768 list_for_each_entry(wk, &local->work_list, list) {
1769 if (wk->sdata != sdata)
1770 continue;
1771
1772 if (wk->type != IEEE80211_WORK_ASSOC)
1773 continue;
1774
1775 if (memcmp(mgmt->bssid, wk->filter_ta, ETH_ALEN))
1776 continue;
1777 if (memcmp(mgmt->sa, wk->filter_ta, ETH_ALEN))
1778 continue;
1727 1779
1780 /*
1781 * Printing the message only here means we can't
1782 * spuriously print it, but it also means that it
1783 * won't be printed when the frame comes in before
1784 * we even tried to associate or in similar cases.
1785 *
1786 * Ultimately, I suspect cfg80211 should print the
1787 * messages instead.
1788 */
1789 printk(KERN_DEBUG
1790 "%s: deauthenticated from %pM (Reason: %u)\n",
1791 sdata->name, mgmt->bssid,
1792 le16_to_cpu(mgmt->u.deauth.reason_code));
1793
1794 list_del_rcu(&wk->list);
1795 free_work(wk);
1796 break;
1797 }
1798 mutex_unlock(&local->work_mtx);
1799
1800 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
1801 }
1728 out: 1802 out:
1729 kfree_skb(skb); 1803 kfree_skb(skb);
1730} 1804}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 6e2a7bcd8cb8..be9abc2e6348 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1818,17 +1818,26 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
1818 return RX_CONTINUE; 1818 return RX_CONTINUE;
1819 1819
1820 if (ieee80211_is_back_req(bar->frame_control)) { 1820 if (ieee80211_is_back_req(bar->frame_control)) {
1821 struct {
1822 __le16 control, start_seq_num;
1823 } __packed bar_data;
1824
1821 if (!rx->sta) 1825 if (!rx->sta)
1822 return RX_DROP_MONITOR; 1826 return RX_DROP_MONITOR;
1827
1828 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
1829 &bar_data, sizeof(bar_data)))
1830 return RX_DROP_MONITOR;
1831
1823 spin_lock(&rx->sta->lock); 1832 spin_lock(&rx->sta->lock);
1824 tid = le16_to_cpu(bar->control) >> 12; 1833 tid = le16_to_cpu(bar_data.control) >> 12;
1825 if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) { 1834 if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) {
1826 spin_unlock(&rx->sta->lock); 1835 spin_unlock(&rx->sta->lock);
1827 return RX_DROP_MONITOR; 1836 return RX_DROP_MONITOR;
1828 } 1837 }
1829 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid]; 1838 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
1830 1839
1831 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4; 1840 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
1832 1841
1833 /* reset session timer */ 1842 /* reset session timer */
1834 if (tid_agg_rx->timeout) 1843 if (tid_agg_rx->timeout)
@@ -1935,6 +1944,9 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1935 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 1944 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
1936 break; 1945 break;
1937 1946
1947 if (sdata->vif.type == NL80211_IFTYPE_STATION)
1948 return ieee80211_sta_rx_mgmt(sdata, rx->skb);
1949
1938 switch (mgmt->u.action.u.addba_req.action_code) { 1950 switch (mgmt->u.action.u.addba_req.action_code) {
1939 case WLAN_ACTION_ADDBA_REQ: 1951 case WLAN_ACTION_ADDBA_REQ:
1940 if (len < (IEEE80211_MIN_ACTION_SIZE + 1952 if (len < (IEEE80211_MIN_ACTION_SIZE +
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 445de702b8b7..e34622fa0003 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -699,10 +699,8 @@ void xt_free_table_info(struct xt_table_info *info)
699 vfree(info->jumpstack); 699 vfree(info->jumpstack);
700 else 700 else
701 kfree(info->jumpstack); 701 kfree(info->jumpstack);
702 if (sizeof(unsigned int) * nr_cpu_ids > PAGE_SIZE) 702
703 vfree(info->stackptr); 703 free_percpu(info->stackptr);
704 else
705 kfree(info->stackptr);
706 704
707 kfree(info); 705 kfree(info);
708} 706}
@@ -753,14 +751,9 @@ static int xt_jumpstack_alloc(struct xt_table_info *i)
753 unsigned int size; 751 unsigned int size;
754 int cpu; 752 int cpu;
755 753
756 size = sizeof(unsigned int) * nr_cpu_ids; 754 i->stackptr = alloc_percpu(unsigned int);
757 if (size > PAGE_SIZE)
758 i->stackptr = vmalloc(size);
759 else
760 i->stackptr = kmalloc(size, GFP_KERNEL);
761 if (i->stackptr == NULL) 755 if (i->stackptr == NULL)
762 return -ENOMEM; 756 return -ENOMEM;
763 memset(i->stackptr, 0, size);
764 757
765 size = sizeof(void **) * nr_cpu_ids; 758 size = sizeof(void **) * nr_cpu_ids;
766 if (size > PAGE_SIZE) 759 if (size > PAGE_SIZE)
@@ -844,10 +837,6 @@ struct xt_table *xt_register_table(struct net *net,
844 struct xt_table_info *private; 837 struct xt_table_info *private;
845 struct xt_table *t, *table; 838 struct xt_table *t, *table;
846 839
847 ret = xt_jumpstack_alloc(newinfo);
848 if (ret < 0)
849 return ERR_PTR(ret);
850
851 /* Don't add one object to multiple lists. */ 840 /* Don't add one object to multiple lists. */
852 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); 841 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
853 if (!table) { 842 if (!table) {
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index d7920d9f49e9..859d9fd429c8 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -76,7 +76,7 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
76 if (ip_route_output_key(net, &rt, &fl) != 0) 76 if (ip_route_output_key(net, &rt, &fl) != 0)
77 return false; 77 return false;
78 78
79 dst_release(skb_dst(skb)); 79 skb_dst_drop(skb);
80 skb_dst_set(skb, &rt->u.dst); 80 skb_dst_set(skb, &rt->u.dst);
81 skb->dev = rt->u.dst.dev; 81 skb->dev = rt->u.dst.dev;
82 skb->protocol = htons(ETH_P_IP); 82 skb->protocol = htons(ETH_P_IP);
@@ -157,7 +157,7 @@ tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
157 if (dst == NULL) 157 if (dst == NULL)
158 return false; 158 return false;
159 159
160 dst_release(skb_dst(skb)); 160 skb_dst_drop(skb);
161 skb_dst_set(skb, dst); 161 skb_dst_set(skb, dst);
162 skb->dev = dst->dev; 162 skb->dev = dst->dev;
163 skb->protocol = htons(ETH_P_IPV6); 163 skb->protocol = htons(ETH_P_IPV6);
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 7b048a35ca58..94d72e85a475 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -1045,12 +1045,12 @@ static void pep_sock_unhash(struct sock *sk)
1045 lock_sock(sk); 1045 lock_sock(sk);
1046 if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) { 1046 if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) {
1047 skparent = pn->listener; 1047 skparent = pn->listener;
1048 sk_del_node_init(sk);
1049 release_sock(sk); 1048 release_sock(sk);
1050 1049
1051 sk = skparent;
1052 pn = pep_sk(skparent); 1050 pn = pep_sk(skparent);
1053 lock_sock(sk); 1051 lock_sock(skparent);
1052 sk_del_node_init(sk);
1053 sk = skparent;
1054 } 1054 }
1055 /* Unhash a listening sock only when it is closed 1055 /* Unhash a listening sock only when it is closed
1056 * and all of its active connected pipes are closed. */ 1056 * and all of its active connected pipes are closed. */
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 10ed0d55f759..f68832798db2 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -475,6 +475,7 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
475 err = rds_ib_setup_qp(conn); 475 err = rds_ib_setup_qp(conn);
476 if (err) { 476 if (err) {
477 rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err); 477 rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err);
478 mutex_unlock(&conn->c_cm_lock);
478 goto out; 479 goto out;
479 } 480 }
480 481
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index a9d951b4fbae..b5dd6ac39be8 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -452,6 +452,7 @@ int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id,
452 err = rds_iw_setup_qp(conn); 452 err = rds_iw_setup_qp(conn);
453 if (err) { 453 if (err) {
454 rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err); 454 rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err);
455 mutex_unlock(&conn->c_cm_lock);
455 goto out; 456 goto out;
456 } 457 }
457 458
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index d885ba311564..570949417f38 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -159,6 +159,9 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
159 iph->daddr = new_addr; 159 iph->daddr = new_addr;
160 160
161 csum_replace4(&iph->check, addr, new_addr); 161 csum_replace4(&iph->check, addr, new_addr);
162 } else if ((iph->frag_off & htons(IP_OFFSET)) ||
163 iph->protocol != IPPROTO_ICMP) {
164 goto out;
162 } 165 }
163 166
164 ihl = iph->ihl * 4; 167 ihl = iph->ihl * 4;
@@ -247,6 +250,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
247 break; 250 break;
248 } 251 }
249 252
253out:
250 return action; 254 return action;
251 255
252drop: 256drop:
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index fdbd0b7bd840..50e3d945e1f4 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -125,7 +125,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
125{ 125{
126 struct tcf_pedit *p = a->priv; 126 struct tcf_pedit *p = a->priv;
127 int i, munged = 0; 127 int i, munged = 0;
128 u8 *pptr; 128 unsigned int off;
129 129
130 if (!(skb->tc_verd & TC_OK2MUNGE)) { 130 if (!(skb->tc_verd & TC_OK2MUNGE)) {
131 /* should we set skb->cloned? */ 131 /* should we set skb->cloned? */
@@ -134,7 +134,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
134 } 134 }
135 } 135 }
136 136
137 pptr = skb_network_header(skb); 137 off = skb_network_offset(skb);
138 138
139 spin_lock(&p->tcf_lock); 139 spin_lock(&p->tcf_lock);
140 140
@@ -144,17 +144,17 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
144 struct tc_pedit_key *tkey = p->tcfp_keys; 144 struct tc_pedit_key *tkey = p->tcfp_keys;
145 145
146 for (i = p->tcfp_nkeys; i > 0; i--, tkey++) { 146 for (i = p->tcfp_nkeys; i > 0; i--, tkey++) {
147 u32 *ptr; 147 u32 *ptr, _data;
148 int offset = tkey->off; 148 int offset = tkey->off;
149 149
150 if (tkey->offmask) { 150 if (tkey->offmask) {
151 if (skb->len > tkey->at) { 151 char *d, _d;
152 char *j = pptr + tkey->at; 152
153 offset += ((*j & tkey->offmask) >> 153 d = skb_header_pointer(skb, off + tkey->at, 1,
154 tkey->shift); 154 &_d);
155 } else { 155 if (!d)
156 goto bad; 156 goto bad;
157 } 157 offset += (*d & tkey->offmask) >> tkey->shift;
158 } 158 }
159 159
160 if (offset % 4) { 160 if (offset % 4) {
@@ -169,9 +169,13 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
169 goto bad; 169 goto bad;
170 } 170 }
171 171
172 ptr = (u32 *)(pptr+offset); 172 ptr = skb_header_pointer(skb, off + offset, 4, &_data);
173 if (!ptr)
174 goto bad;
173 /* just do it, baby */ 175 /* just do it, baby */
174 *ptr = ((*ptr & tkey->mask) ^ tkey->val); 176 *ptr = ((*ptr & tkey->mask) ^ tkey->val);
177 if (ptr == &_data)
178 skb_store_bits(skb, off + offset, ptr, 4);
175 munged++; 179 munged++;
176 } 180 }
177 181
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 96275422c619..4f522143811e 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -98,11 +98,11 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
98{ 98{
99 struct { 99 struct {
100 struct tc_u_knode *knode; 100 struct tc_u_knode *knode;
101 u8 *ptr; 101 unsigned int off;
102 } stack[TC_U32_MAXDEPTH]; 102 } stack[TC_U32_MAXDEPTH];
103 103
104 struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root; 104 struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root;
105 u8 *ptr = skb_network_header(skb); 105 unsigned int off = skb_network_offset(skb);
106 struct tc_u_knode *n; 106 struct tc_u_knode *n;
107 int sdepth = 0; 107 int sdepth = 0;
108 int off2 = 0; 108 int off2 = 0;
@@ -134,8 +134,14 @@ next_knode:
134#endif 134#endif
135 135
136 for (i = n->sel.nkeys; i>0; i--, key++) { 136 for (i = n->sel.nkeys; i>0; i--, key++) {
137 137 unsigned int toff;
138 if ((*(__be32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) { 138 __be32 *data, _data;
139
140 toff = off + key->off + (off2 & key->offmask);
141 data = skb_header_pointer(skb, toff, 4, &_data);
142 if (!data)
143 goto out;
144 if ((*data ^ key->val) & key->mask) {
139 n = n->next; 145 n = n->next;
140 goto next_knode; 146 goto next_knode;
141 } 147 }
@@ -174,29 +180,45 @@ check_terminal:
174 if (sdepth >= TC_U32_MAXDEPTH) 180 if (sdepth >= TC_U32_MAXDEPTH)
175 goto deadloop; 181 goto deadloop;
176 stack[sdepth].knode = n; 182 stack[sdepth].knode = n;
177 stack[sdepth].ptr = ptr; 183 stack[sdepth].off = off;
178 sdepth++; 184 sdepth++;
179 185
180 ht = n->ht_down; 186 ht = n->ht_down;
181 sel = 0; 187 sel = 0;
182 if (ht->divisor) 188 if (ht->divisor) {
183 sel = ht->divisor&u32_hash_fold(*(__be32*)(ptr+n->sel.hoff), &n->sel,n->fshift); 189 __be32 *data, _data;
184 190
191 data = skb_header_pointer(skb, off + n->sel.hoff, 4,
192 &_data);
193 if (!data)
194 goto out;
195 sel = ht->divisor & u32_hash_fold(*data, &n->sel,
196 n->fshift);
197 }
185 if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT))) 198 if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT)))
186 goto next_ht; 199 goto next_ht;
187 200
188 if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) { 201 if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) {
189 off2 = n->sel.off + 3; 202 off2 = n->sel.off + 3;
190 if (n->sel.flags&TC_U32_VAROFFSET) 203 if (n->sel.flags & TC_U32_VAROFFSET) {
191 off2 += ntohs(n->sel.offmask & *(__be16*)(ptr+n->sel.offoff)) >>n->sel.offshift; 204 __be16 *data, _data;
205
206 data = skb_header_pointer(skb,
207 off + n->sel.offoff,
208 2, &_data);
209 if (!data)
210 goto out;
211 off2 += ntohs(n->sel.offmask & *data) >>
212 n->sel.offshift;
213 }
192 off2 &= ~3; 214 off2 &= ~3;
193 } 215 }
194 if (n->sel.flags&TC_U32_EAT) { 216 if (n->sel.flags&TC_U32_EAT) {
195 ptr += off2; 217 off += off2;
196 off2 = 0; 218 off2 = 0;
197 } 219 }
198 220
199 if (ptr < skb_tail_pointer(skb)) 221 if (off < skb->len)
200 goto next_ht; 222 goto next_ht;
201 } 223 }
202 224
@@ -204,9 +226,10 @@ check_terminal:
204 if (sdepth--) { 226 if (sdepth--) {
205 n = stack[sdepth].knode; 227 n = stack[sdepth].knode;
206 ht = n->ht_up; 228 ht = n->ht_up;
207 ptr = stack[sdepth].ptr; 229 off = stack[sdepth].off;
208 goto check_terminal; 230 goto check_terminal;
209 } 231 }
232out:
210 return -1; 233 return -1;
211 234
212deadloop: 235deadloop:
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index b7cd8cccbe72..2a9675136c68 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2293,6 +2293,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2293 struct sockaddr *addr = args->dstaddr; 2293 struct sockaddr *addr = args->dstaddr;
2294 struct rpc_xprt *xprt; 2294 struct rpc_xprt *xprt;
2295 struct sock_xprt *transport; 2295 struct sock_xprt *transport;
2296 struct rpc_xprt *ret;
2296 2297
2297 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries); 2298 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries);
2298 if (IS_ERR(xprt)) 2299 if (IS_ERR(xprt))
@@ -2330,8 +2331,8 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2330 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6); 2331 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
2331 break; 2332 break;
2332 default: 2333 default:
2333 kfree(xprt); 2334 ret = ERR_PTR(-EAFNOSUPPORT);
2334 return ERR_PTR(-EAFNOSUPPORT); 2335 goto out_err;
2335 } 2336 }
2336 2337
2337 if (xprt_bound(xprt)) 2338 if (xprt_bound(xprt))
@@ -2346,10 +2347,11 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2346 2347
2347 if (try_module_get(THIS_MODULE)) 2348 if (try_module_get(THIS_MODULE))
2348 return xprt; 2349 return xprt;
2349 2350 ret = ERR_PTR(-EINVAL);
2351out_err:
2350 kfree(xprt->slot); 2352 kfree(xprt->slot);
2351 kfree(xprt); 2353 kfree(xprt);
2352 return ERR_PTR(-EINVAL); 2354 return ret;
2353} 2355}
2354 2356
2355static const struct rpc_timeout xs_tcp_default_timeout = { 2357static const struct rpc_timeout xs_tcp_default_timeout = {
@@ -2368,6 +2370,7 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2368 struct sockaddr *addr = args->dstaddr; 2370 struct sockaddr *addr = args->dstaddr;
2369 struct rpc_xprt *xprt; 2371 struct rpc_xprt *xprt;
2370 struct sock_xprt *transport; 2372 struct sock_xprt *transport;
2373 struct rpc_xprt *ret;
2371 2374
2372 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); 2375 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
2373 if (IS_ERR(xprt)) 2376 if (IS_ERR(xprt))
@@ -2403,8 +2406,8 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2403 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6); 2406 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
2404 break; 2407 break;
2405 default: 2408 default:
2406 kfree(xprt); 2409 ret = ERR_PTR(-EAFNOSUPPORT);
2407 return ERR_PTR(-EAFNOSUPPORT); 2410 goto out_err;
2408 } 2411 }
2409 2412
2410 if (xprt_bound(xprt)) 2413 if (xprt_bound(xprt))
@@ -2420,10 +2423,11 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2420 2423
2421 if (try_module_get(THIS_MODULE)) 2424 if (try_module_get(THIS_MODULE))
2422 return xprt; 2425 return xprt;
2423 2426 ret = ERR_PTR(-EINVAL);
2427out_err:
2424 kfree(xprt->slot); 2428 kfree(xprt->slot);
2425 kfree(xprt); 2429 kfree(xprt);
2426 return ERR_PTR(-EINVAL); 2430 return ret;
2427} 2431}
2428 2432
2429/** 2433/**
@@ -2437,6 +2441,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2437 struct rpc_xprt *xprt; 2441 struct rpc_xprt *xprt;
2438 struct sock_xprt *transport; 2442 struct sock_xprt *transport;
2439 struct svc_sock *bc_sock; 2443 struct svc_sock *bc_sock;
2444 struct rpc_xprt *ret;
2440 2445
2441 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); 2446 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
2442 if (IS_ERR(xprt)) 2447 if (IS_ERR(xprt))
@@ -2476,8 +2481,8 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2476 RPCBIND_NETID_TCP6); 2481 RPCBIND_NETID_TCP6);
2477 break; 2482 break;
2478 default: 2483 default:
2479 kfree(xprt); 2484 ret = ERR_PTR(-EAFNOSUPPORT);
2480 return ERR_PTR(-EAFNOSUPPORT); 2485 goto out_err;
2481 } 2486 }
2482 2487
2483 if (xprt_bound(xprt)) 2488 if (xprt_bound(xprt))
@@ -2499,9 +2504,11 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2499 2504
2500 if (try_module_get(THIS_MODULE)) 2505 if (try_module_get(THIS_MODULE))
2501 return xprt; 2506 return xprt;
2507 ret = ERR_PTR(-EINVAL);
2508out_err:
2502 kfree(xprt->slot); 2509 kfree(xprt->slot);
2503 kfree(xprt); 2510 kfree(xprt);
2504 return ERR_PTR(-EINVAL); 2511 return ret;
2505} 2512}
2506 2513
2507static struct xprt_class xs_udp_transport = { 2514static struct xprt_class xs_udp_transport = {
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 6a329158bdfa..a3cca0a94346 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -95,13 +95,13 @@ resume:
95 goto error_nolock; 95 goto error_nolock;
96 } 96 }
97 97
98 dst = dst_pop(dst); 98 dst = skb_dst_pop(skb);
99 if (!dst) { 99 if (!dst) {
100 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); 100 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
101 err = -EHOSTUNREACH; 101 err = -EHOSTUNREACH;
102 goto error_nolock; 102 goto error_nolock;
103 } 103 }
104 skb_dst_set(skb, dst); 104 skb_dst_set_noref(skb, dst);
105 x = dst->xfrm; 105 x = dst->xfrm;
106 } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); 106 } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL));
107 107
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index d965a2bad8d3..4bf27d901333 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2153,6 +2153,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2153 return 0; 2153 return 0;
2154 } 2154 }
2155 2155
2156 skb_dst_force(skb);
2156 dst = skb_dst(skb); 2157 dst = skb_dst(skb);
2157 2158
2158 res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0; 2159 res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0;