aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/caif/cfrfml.c2
-rw-r--r--net/caif/cfserl.c6
-rw-r--r--net/caif/cfveil.c2
-rw-r--r--net/core/dev.c33
-rw-r--r--net/core/gen_estimator.c15
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/core/skbuff.c42
-rw-r--r--net/ipv4/Kconfig10
-rw-r--r--net/ipv4/ipmr.c4
-rw-r--r--net/ipv4/netfilter/ip_tables.c2
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/tcp_hybla.c4
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_ipv4.c7
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv6/icmp.c4
-rw-r--r--net/ipv6/ip6mr.c6
-rw-r--r--net/ipv6/mcast.c5
-rw-r--r--net/ipv6/netfilter/ip6_tables.c2
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/mac80211/agg-tx.c6
-rw-r--r--net/mac80211/chan.c2
-rw-r--r--net/mac80211/driver-ops.h2
-rw-r--r--net/mac80211/mlme.c92
-rw-r--r--net/mac80211/rx.c16
-rw-r--r--net/netfilter/x_tables.c17
-rw-r--r--net/phonet/pep.c6
-rw-r--r--net/rds/ib_cm.c1
-rw-r--r--net/rds/iw_cm.c1
-rw-r--r--net/sched/act_nat.c4
-rw-r--r--net/sched/act_pedit.c24
-rw-r--r--net/sched/cls_u32.c49
-rw-r--r--net/xfrm/xfrm_output.c4
-rw-r--r--net/xfrm/xfrm_policy.c1
36 files changed, 280 insertions, 110 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index bd537fc10254..50f58f5f1c34 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -12,7 +12,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
12 return NET_RX_DROP; 12 return NET_RX_DROP;
13 13
14 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) 14 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
15 goto drop; 15 skb->deliver_no_wcard = 1;
16 16
17 skb->skb_iif = skb->dev->ifindex; 17 skb->skb_iif = skb->dev->ifindex;
18 __vlan_hwaccel_put_tag(skb, vlan_tci); 18 __vlan_hwaccel_put_tag(skb, vlan_tci);
@@ -84,7 +84,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
84 struct sk_buff *p; 84 struct sk_buff *p;
85 85
86 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) 86 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
87 goto drop; 87 skb->deliver_no_wcard = 1;
88 88
89 skb->skb_iif = skb->dev->ifindex; 89 skb->skb_iif = skb->dev->ifindex;
90 __vlan_hwaccel_put_tag(skb, vlan_tci); 90 __vlan_hwaccel_put_tag(skb, vlan_tci);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 55be90826f5f..529842677817 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -708,7 +708,8 @@ static int vlan_dev_init(struct net_device *dev)
708 netif_carrier_off(dev); 708 netif_carrier_off(dev);
709 709
710 /* IFF_BROADCAST|IFF_MULTICAST; ??? */ 710 /* IFF_BROADCAST|IFF_MULTICAST; ??? */
711 dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI); 711 dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
712 IFF_MASTER | IFF_SLAVE);
712 dev->iflink = real_dev->ifindex; 713 dev->iflink = real_dev->ifindex;
713 dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | 714 dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
714 (1<<__LINK_STATE_DORMANT))) | 715 (1<<__LINK_STATE_DORMANT))) |
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index cd2830fec935..fd27b172fb5d 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -83,7 +83,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
83 if (!cfsrvl_ready(service, &ret)) 83 if (!cfsrvl_ready(service, &ret))
84 return ret; 84 return ret;
85 85
86 if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { 86 if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
87 pr_err("CAIF: %s():Packet too large - size=%d\n", 87 pr_err("CAIF: %s():Packet too large - size=%d\n",
88 __func__, cfpkt_getlen(pkt)); 88 __func__, cfpkt_getlen(pkt));
89 return -EOVERFLOW; 89 return -EOVERFLOW;
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index cb4325a3dc83..965c5baace40 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -59,16 +59,18 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
59 u8 stx = CFSERL_STX; 59 u8 stx = CFSERL_STX;
60 int ret; 60 int ret;
61 u16 expectlen = 0; 61 u16 expectlen = 0;
62
62 caif_assert(newpkt != NULL); 63 caif_assert(newpkt != NULL);
63 spin_lock(&layr->sync); 64 spin_lock(&layr->sync);
64 65
65 if (layr->incomplete_frm != NULL) { 66 if (layr->incomplete_frm != NULL) {
66
67 layr->incomplete_frm = 67 layr->incomplete_frm =
68 cfpkt_append(layr->incomplete_frm, newpkt, expectlen); 68 cfpkt_append(layr->incomplete_frm, newpkt, expectlen);
69 pkt = layr->incomplete_frm; 69 pkt = layr->incomplete_frm;
70 if (pkt == NULL) 70 if (pkt == NULL) {
71 spin_unlock(&layr->sync);
71 return -ENOMEM; 72 return -ENOMEM;
73 }
72 } else { 74 } else {
73 pkt = newpkt; 75 pkt = newpkt;
74 } 76 }
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index 0fd827f49491..e04f7d964e83 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -84,7 +84,7 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
84 return ret; 84 return ret;
85 caif_assert(layr->dn != NULL); 85 caif_assert(layr->dn != NULL);
86 caif_assert(layr->dn->transmit != NULL); 86 caif_assert(layr->dn->transmit != NULL);
87 if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { 87 if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
88 pr_warning("CAIF: %s(): Packet too large - size=%d\n", 88 pr_warning("CAIF: %s(): Packet too large - size=%d\n",
89 __func__, cfpkt_getlen(pkt)); 89 __func__, cfpkt_getlen(pkt));
90 return -EOVERFLOW; 90 return -EOVERFLOW;
diff --git a/net/core/dev.c b/net/core/dev.c
index 1845b08c624e..2b3bf53bc687 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2253,11 +2253,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2253 if (skb_rx_queue_recorded(skb)) { 2253 if (skb_rx_queue_recorded(skb)) {
2254 u16 index = skb_get_rx_queue(skb); 2254 u16 index = skb_get_rx_queue(skb);
2255 if (unlikely(index >= dev->num_rx_queues)) { 2255 if (unlikely(index >= dev->num_rx_queues)) {
2256 if (net_ratelimit()) { 2256 WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
2257 pr_warning("%s received packet on queue " 2257 "on queue %u, but number of RX queues is %u\n",
2258 "%u, but number of RX queues is %u\n", 2258 dev->name, index, dev->num_rx_queues);
2259 dev->name, index, dev->num_rx_queues);
2260 }
2261 goto done; 2259 goto done;
2262 } 2260 }
2263 rxqueue = dev->_rx + index; 2261 rxqueue = dev->_rx + index;
@@ -2795,7 +2793,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
2795 struct net_device *orig_dev; 2793 struct net_device *orig_dev;
2796 struct net_device *master; 2794 struct net_device *master;
2797 struct net_device *null_or_orig; 2795 struct net_device *null_or_orig;
2798 struct net_device *null_or_bond; 2796 struct net_device *orig_or_bond;
2799 int ret = NET_RX_DROP; 2797 int ret = NET_RX_DROP;
2800 __be16 type; 2798 __be16 type;
2801 2799
@@ -2812,13 +2810,24 @@ static int __netif_receive_skb(struct sk_buff *skb)
2812 if (!skb->skb_iif) 2810 if (!skb->skb_iif)
2813 skb->skb_iif = skb->dev->ifindex; 2811 skb->skb_iif = skb->dev->ifindex;
2814 2812
2813 /*
2814 * bonding note: skbs received on inactive slaves should only
2815 * be delivered to pkt handlers that are exact matches. Also
2816 * the deliver_no_wcard flag will be set. If packet handlers
2817 * are sensitive to duplicate packets these skbs will need to
2818 * be dropped at the handler. The vlan accel path may have
2819 * already set the deliver_no_wcard flag.
2820 */
2815 null_or_orig = NULL; 2821 null_or_orig = NULL;
2816 orig_dev = skb->dev; 2822 orig_dev = skb->dev;
2817 master = ACCESS_ONCE(orig_dev->master); 2823 master = ACCESS_ONCE(orig_dev->master);
2818 if (master) { 2824 if (skb->deliver_no_wcard)
2819 if (skb_bond_should_drop(skb, master)) 2825 null_or_orig = orig_dev;
2826 else if (master) {
2827 if (skb_bond_should_drop(skb, master)) {
2828 skb->deliver_no_wcard = 1;
2820 null_or_orig = orig_dev; /* deliver only exact match */ 2829 null_or_orig = orig_dev; /* deliver only exact match */
2821 else 2830 } else
2822 skb->dev = master; 2831 skb->dev = master;
2823 } 2832 }
2824 2833
@@ -2868,10 +2877,10 @@ ncls:
2868 * device that may have registered for a specific ptype. The 2877 * device that may have registered for a specific ptype. The
2869 * handler may have to adjust skb->dev and orig_dev. 2878 * handler may have to adjust skb->dev and orig_dev.
2870 */ 2879 */
2871 null_or_bond = NULL; 2880 orig_or_bond = orig_dev;
2872 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) && 2881 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
2873 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) { 2882 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
2874 null_or_bond = vlan_dev_real_dev(skb->dev); 2883 orig_or_bond = vlan_dev_real_dev(skb->dev);
2875 } 2884 }
2876 2885
2877 type = skb->protocol; 2886 type = skb->protocol;
@@ -2879,7 +2888,7 @@ ncls:
2879 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 2888 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2880 if (ptype->type == type && (ptype->dev == null_or_orig || 2889 if (ptype->type == type && (ptype->dev == null_or_orig ||
2881 ptype->dev == skb->dev || ptype->dev == orig_dev || 2890 ptype->dev == skb->dev || ptype->dev == orig_dev ||
2882 ptype->dev == null_or_bond)) { 2891 ptype->dev == orig_or_bond)) {
2883 if (pt_prev) 2892 if (pt_prev)
2884 ret = deliver_skb(skb, pt_prev, orig_dev); 2893 ret = deliver_skb(skb, pt_prev, orig_dev);
2885 pt_prev = ptype; 2894 pt_prev = ptype;
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index cf8e70392fe0..785e5276a300 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -107,6 +107,7 @@ static DEFINE_RWLOCK(est_lock);
107 107
108/* Protects against soft lockup during large deletion */ 108/* Protects against soft lockup during large deletion */
109static struct rb_root est_root = RB_ROOT; 109static struct rb_root est_root = RB_ROOT;
110static DEFINE_SPINLOCK(est_tree_lock);
110 111
111static void est_timer(unsigned long arg) 112static void est_timer(unsigned long arg)
112{ 113{
@@ -201,7 +202,6 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats
201 * 202 *
202 * Returns 0 on success or a negative error code. 203 * Returns 0 on success or a negative error code.
203 * 204 *
204 * NOTE: Called under rtnl_mutex
205 */ 205 */
206int gen_new_estimator(struct gnet_stats_basic_packed *bstats, 206int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
207 struct gnet_stats_rate_est *rate_est, 207 struct gnet_stats_rate_est *rate_est,
@@ -232,6 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
232 est->last_packets = bstats->packets; 232 est->last_packets = bstats->packets;
233 est->avpps = rate_est->pps<<10; 233 est->avpps = rate_est->pps<<10;
234 234
235 spin_lock(&est_tree_lock);
235 if (!elist[idx].timer.function) { 236 if (!elist[idx].timer.function) {
236 INIT_LIST_HEAD(&elist[idx].list); 237 INIT_LIST_HEAD(&elist[idx].list);
237 setup_timer(&elist[idx].timer, est_timer, idx); 238 setup_timer(&elist[idx].timer, est_timer, idx);
@@ -242,6 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
242 243
243 list_add_rcu(&est->list, &elist[idx].list); 244 list_add_rcu(&est->list, &elist[idx].list);
244 gen_add_node(est); 245 gen_add_node(est);
246 spin_unlock(&est_tree_lock);
245 247
246 return 0; 248 return 0;
247} 249}
@@ -261,13 +263,13 @@ static void __gen_kill_estimator(struct rcu_head *head)
261 * 263 *
262 * Removes the rate estimator specified by &bstats and &rate_est. 264 * Removes the rate estimator specified by &bstats and &rate_est.
263 * 265 *
264 * NOTE: Called under rtnl_mutex
265 */ 266 */
266void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, 267void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
267 struct gnet_stats_rate_est *rate_est) 268 struct gnet_stats_rate_est *rate_est)
268{ 269{
269 struct gen_estimator *e; 270 struct gen_estimator *e;
270 271
272 spin_lock(&est_tree_lock);
271 while ((e = gen_find_node(bstats, rate_est))) { 273 while ((e = gen_find_node(bstats, rate_est))) {
272 rb_erase(&e->node, &est_root); 274 rb_erase(&e->node, &est_root);
273 275
@@ -278,6 +280,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
278 list_del_rcu(&e->list); 280 list_del_rcu(&e->list);
279 call_rcu(&e->e_rcu, __gen_kill_estimator); 281 call_rcu(&e->e_rcu, __gen_kill_estimator);
280 } 282 }
283 spin_unlock(&est_tree_lock);
281} 284}
282EXPORT_SYMBOL(gen_kill_estimator); 285EXPORT_SYMBOL(gen_kill_estimator);
283 286
@@ -312,8 +315,14 @@ EXPORT_SYMBOL(gen_replace_estimator);
312bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, 315bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
313 const struct gnet_stats_rate_est *rate_est) 316 const struct gnet_stats_rate_est *rate_est)
314{ 317{
318 bool res;
319
315 ASSERT_RTNL(); 320 ASSERT_RTNL();
316 321
317 return gen_find_node(bstats, rate_est) != NULL; 322 spin_lock(&est_tree_lock);
323 res = gen_find_node(bstats, rate_est) != NULL;
324 spin_unlock(&est_tree_lock);
325
326 return res;
318} 327}
319EXPORT_SYMBOL(gen_estimator_active); 328EXPORT_SYMBOL(gen_estimator_active);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 2ad68da418df..1dacd7ba8dbb 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2170,7 +2170,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2170 end_time = ktime_now(); 2170 end_time = ktime_now();
2171 2171
2172 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); 2172 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
2173 pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay); 2173 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
2174} 2174}
2175 2175
2176static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) 2176static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f8abf68e3988..9f07e749d7b1 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -482,22 +482,22 @@ EXPORT_SYMBOL(consume_skb);
482 * reference count dropping and cleans up the skbuff as if it 482 * reference count dropping and cleans up the skbuff as if it
483 * just came from __alloc_skb(). 483 * just came from __alloc_skb().
484 */ 484 */
485int skb_recycle_check(struct sk_buff *skb, int skb_size) 485bool skb_recycle_check(struct sk_buff *skb, int skb_size)
486{ 486{
487 struct skb_shared_info *shinfo; 487 struct skb_shared_info *shinfo;
488 488
489 if (irqs_disabled()) 489 if (irqs_disabled())
490 return 0; 490 return false;
491 491
492 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) 492 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
493 return 0; 493 return false;
494 494
495 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 495 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
496 if (skb_end_pointer(skb) - skb->head < skb_size) 496 if (skb_end_pointer(skb) - skb->head < skb_size)
497 return 0; 497 return false;
498 498
499 if (skb_shared(skb) || skb_cloned(skb)) 499 if (skb_shared(skb) || skb_cloned(skb))
500 return 0; 500 return false;
501 501
502 skb_release_head_state(skb); 502 skb_release_head_state(skb);
503 503
@@ -509,7 +509,7 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
509 skb->data = skb->head + NET_SKB_PAD; 509 skb->data = skb->head + NET_SKB_PAD;
510 skb_reset_tail_pointer(skb); 510 skb_reset_tail_pointer(skb);
511 511
512 return 1; 512 return true;
513} 513}
514EXPORT_SYMBOL(skb_recycle_check); 514EXPORT_SYMBOL(skb_recycle_check);
515 515
@@ -2965,6 +2965,34 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2965} 2965}
2966EXPORT_SYMBOL_GPL(skb_cow_data); 2966EXPORT_SYMBOL_GPL(skb_cow_data);
2967 2967
2968static void sock_rmem_free(struct sk_buff *skb)
2969{
2970 struct sock *sk = skb->sk;
2971
2972 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
2973}
2974
2975/*
2976 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
2977 */
2978int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
2979{
2980 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
2981 (unsigned)sk->sk_rcvbuf)
2982 return -ENOMEM;
2983
2984 skb_orphan(skb);
2985 skb->sk = sk;
2986 skb->destructor = sock_rmem_free;
2987 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
2988
2989 skb_queue_tail(&sk->sk_error_queue, skb);
2990 if (!sock_flag(sk, SOCK_DEAD))
2991 sk->sk_data_ready(sk, skb->len);
2992 return 0;
2993}
2994EXPORT_SYMBOL(sock_queue_err_skb);
2995
2968void skb_tstamp_tx(struct sk_buff *orig_skb, 2996void skb_tstamp_tx(struct sk_buff *orig_skb,
2969 struct skb_shared_hwtstamps *hwtstamps) 2997 struct skb_shared_hwtstamps *hwtstamps)
2970{ 2998{
@@ -2996,7 +3024,9 @@ void skb_tstamp_tx(struct sk_buff *orig_skb,
2996 memset(serr, 0, sizeof(*serr)); 3024 memset(serr, 0, sizeof(*serr));
2997 serr->ee.ee_errno = ENOMSG; 3025 serr->ee.ee_errno = ENOMSG;
2998 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 3026 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
3027
2999 err = sock_queue_err_skb(sk, skb); 3028 err = sock_queue_err_skb(sk, skb);
3029
3000 if (err) 3030 if (err)
3001 kfree_skb(skb); 3031 kfree_skb(skb);
3002} 3032}
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 8e3a1fd938ab..7c3a7d191249 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -303,7 +303,7 @@ config ARPD
303 If unsure, say N. 303 If unsure, say N.
304 304
305config SYN_COOKIES 305config SYN_COOKIES
306 bool "IP: TCP syncookie support (disabled per default)" 306 bool "IP: TCP syncookie support"
307 ---help--- 307 ---help---
308 Normal TCP/IP networking is open to an attack known as "SYN 308 Normal TCP/IP networking is open to an attack known as "SYN
309 flooding". This denial-of-service attack prevents legitimate remote 309 flooding". This denial-of-service attack prevents legitimate remote
@@ -328,13 +328,13 @@ config SYN_COOKIES
328 server is really overloaded. If this happens frequently better turn 328 server is really overloaded. If this happens frequently better turn
329 them off. 329 them off.
330 330
331 If you say Y here, note that SYN cookies aren't enabled by default; 331 If you say Y here, you can disable SYN cookies at run time by
332 you can enable them by saying Y to "/proc file system support" and 332 saying Y to "/proc file system support" and
333 "Sysctl support" below and executing the command 333 "Sysctl support" below and executing the command
334 334
335 echo 1 >/proc/sys/net/ipv4/tcp_syncookies 335 echo 0 > /proc/sys/net/ipv4/tcp_syncookies
336 336
337 at boot time after the /proc file system has been mounted. 337 after the /proc file system has been mounted.
338 338
339 If unsure, say N. 339 If unsure, say N.
340 340
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 856123fe32f9..757f25eb9b4b 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -267,8 +267,10 @@ static void __net_exit ipmr_rules_exit(struct net *net)
267{ 267{
268 struct mr_table *mrt, *next; 268 struct mr_table *mrt, *next;
269 269
270 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) 270 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
271 list_del(&mrt->list);
271 kfree(mrt); 272 kfree(mrt);
273 }
272 fib_rules_unregister(net->ipv4.mr_rules_ops); 274 fib_rules_unregister(net->ipv4.mr_rules_ops);
273} 275}
274#else 276#else
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 63958f3394a5..4b6c5ca610fc 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -336,7 +336,7 @@ ipt_do_table(struct sk_buff *skb,
336 cpu = smp_processor_id(); 336 cpu = smp_processor_id();
337 table_base = private->entries[cpu]; 337 table_base = private->entries[cpu];
338 jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; 338 jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
339 stackptr = &private->stackptr[cpu]; 339 stackptr = per_cpu_ptr(private->stackptr, cpu);
340 origptr = *stackptr; 340 origptr = *stackptr;
341 341
342 e = get_entry(table_base, private->hook_entry[hook]); 342 e = get_entry(table_base, private->hook_entry[hook]);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 5c24db4a3c91..9f6b22206c52 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -347,7 +347,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
347 { .sport = th->dest, 347 { .sport = th->dest,
348 .dport = th->source } } }; 348 .dport = th->source } } };
349 security_req_classify_flow(req, &fl); 349 security_req_classify_flow(req, &fl);
350 if (ip_route_output_key(&init_net, &rt, &fl)) { 350 if (ip_route_output_key(sock_net(sk), &rt, &fl)) {
351 reqsk_free(req); 351 reqsk_free(req);
352 goto out; 352 goto out;
353 } 353 }
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index c209e054a634..377bc9349371 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -126,8 +126,8 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
126 * calculate 2^fract in a <<7 value. 126 * calculate 2^fract in a <<7 value.
127 */ 127 */
128 is_slowstart = 1; 128 is_slowstart = 1;
129 increment = ((1 << ca->rho) * hybla_fraction(rho_fractions)) 129 increment = ((1 << min(ca->rho, 16U)) *
130 - 128; 130 hybla_fraction(rho_fractions)) - 128;
131 } else { 131 } else {
132 /* 132 /*
133 * congestion avoidance 133 * congestion avoidance
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3e6dafcb1071..548d575e6cc6 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2639,7 +2639,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
2639 if (sk->sk_family == AF_INET) { 2639 if (sk->sk_family == AF_INET) {
2640 printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", 2640 printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
2641 msg, 2641 msg,
2642 &inet->daddr, ntohs(inet->dport), 2642 &inet->inet_daddr, ntohs(inet->inet_dport),
2643 tp->snd_cwnd, tcp_left_out(tp), 2643 tp->snd_cwnd, tcp_left_out(tp),
2644 tp->snd_ssthresh, tp->prior_ssthresh, 2644 tp->snd_ssthresh, tp->prior_ssthresh,
2645 tp->packets_out); 2645 tp->packets_out);
@@ -2649,7 +2649,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
2649 struct ipv6_pinfo *np = inet6_sk(sk); 2649 struct ipv6_pinfo *np = inet6_sk(sk);
2650 printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", 2650 printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
2651 msg, 2651 msg,
2652 &np->daddr, ntohs(inet->dport), 2652 &np->daddr, ntohs(inet->inet_dport),
2653 tp->snd_cwnd, tcp_left_out(tp), 2653 tp->snd_cwnd, tcp_left_out(tp),
2654 tp->snd_ssthresh, tp->prior_ssthresh, 2654 tp->snd_ssthresh, tp->prior_ssthresh,
2655 tp->packets_out); 2655 tp->packets_out);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 202cf09c4cd4..fe193e53af44 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1555,6 +1555,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1555#endif 1555#endif
1556 1556
1557 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1557 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1558 sock_rps_save_rxhash(sk, skb->rxhash);
1558 TCP_CHECK_TIMER(sk); 1559 TCP_CHECK_TIMER(sk);
1559 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { 1560 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1560 rsk = sk; 1561 rsk = sk;
@@ -1579,7 +1580,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1579 } 1580 }
1580 return 0; 1581 return 0;
1581 } 1582 }
1582 } 1583 } else
1584 sock_rps_save_rxhash(sk, skb->rxhash);
1585
1583 1586
1584 TCP_CHECK_TIMER(sk); 1587 TCP_CHECK_TIMER(sk);
1585 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { 1588 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
@@ -1672,8 +1675,6 @@ process:
1672 1675
1673 skb->dev = NULL; 1676 skb->dev = NULL;
1674 1677
1675 sock_rps_save_rxhash(sk, skb->rxhash);
1676
1677 bh_lock_sock_nested(sk); 1678 bh_lock_sock_nested(sk);
1678 ret = 0; 1679 ret = 0;
1679 if (!sock_owned_by_user(sk)) { 1680 if (!sock_owned_by_user(sk)) {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 58585748bdac..eec4ff456e33 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -633,9 +633,9 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
633 if (!inet->recverr) { 633 if (!inet->recverr) {
634 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 634 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
635 goto out; 635 goto out;
636 } else { 636 } else
637 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); 637 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
638 } 638
639 sk->sk_err = err; 639 sk->sk_err = err;
640 sk->sk_error_report(sk); 640 sk->sk_error_report(sk);
641out: 641out:
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index ce7992982557..03e62f94ff8e 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -483,7 +483,7 @@ route_done:
483 np->tclass, NULL, &fl, (struct rt6_info*)dst, 483 np->tclass, NULL, &fl, (struct rt6_info*)dst,
484 MSG_DONTWAIT, np->dontfrag); 484 MSG_DONTWAIT, np->dontfrag);
485 if (err) { 485 if (err) {
486 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); 486 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
487 ip6_flush_pending_frames(sk); 487 ip6_flush_pending_frames(sk);
488 goto out_put; 488 goto out_put;
489 } 489 }
@@ -565,7 +565,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
565 np->dontfrag); 565 np->dontfrag);
566 566
567 if (err) { 567 if (err) {
568 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); 568 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
569 ip6_flush_pending_frames(sk); 569 ip6_flush_pending_frames(sk);
570 goto out_put; 570 goto out_put;
571 } 571 }
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 073071f2b75b..66078dad7fe8 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -120,7 +120,7 @@ static void mroute_clean_tables(struct mr6_table *mrt);
120static void ipmr_expire_process(unsigned long arg); 120static void ipmr_expire_process(unsigned long arg);
121 121
122#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES 122#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
123#define ip6mr_for_each_table(mrt, met) \ 123#define ip6mr_for_each_table(mrt, net) \
124 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list) 124 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
125 125
126static struct mr6_table *ip6mr_get_table(struct net *net, u32 id) 126static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
@@ -254,8 +254,10 @@ static void __net_exit ip6mr_rules_exit(struct net *net)
254{ 254{
255 struct mr6_table *mrt, *next; 255 struct mr6_table *mrt, *next;
256 256
257 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) 257 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
258 list_del(&mrt->list);
258 ip6mr_free_table(mrt); 259 ip6mr_free_table(mrt);
260 }
259 fib_rules_unregister(net->ipv6.mr6_rules_ops); 261 fib_rules_unregister(net->ipv6.mr6_rules_ops);
260} 262}
261#else 263#else
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 59f1881968c7..ab1622d7d409 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1356,7 +1356,10 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
1356 IPV6_TLV_PADN, 0 }; 1356 IPV6_TLV_PADN, 0 };
1357 1357
1358 /* we assume size > sizeof(ra) here */ 1358 /* we assume size > sizeof(ra) here */
1359 skb = sock_alloc_send_skb(sk, size + LL_ALLOCATED_SPACE(dev), 1, &err); 1359 size += LL_ALLOCATED_SPACE(dev);
1360 /* limit our allocations to order-0 page */
1361 size = min_t(int, size, SKB_MAX_ORDER(0, 0));
1362 skb = sock_alloc_send_skb(sk, size, 1, &err);
1360 1363
1361 if (!skb) 1364 if (!skb)
1362 return NULL; 1365 return NULL;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 6f517bd83692..9d2d68f0e605 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -363,7 +363,7 @@ ip6t_do_table(struct sk_buff *skb,
363 cpu = smp_processor_id(); 363 cpu = smp_processor_id();
364 table_base = private->entries[cpu]; 364 table_base = private->entries[cpu];
365 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; 365 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
366 stackptr = &private->stackptr[cpu]; 366 stackptr = per_cpu_ptr(private->stackptr, cpu);
367 origptr = *stackptr; 367 origptr = *stackptr;
368 368
369 e = get_entry(table_base, private->hook_entry[hook]); 369 e = get_entry(table_base, private->hook_entry[hook]);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 294cbe8b0725..252d76199c41 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -814,7 +814,7 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk,
814{ 814{
815 int flags = 0; 815 int flags = 0;
816 816
817 if (fl->oif || rt6_need_strict(&fl->fl6_dst)) 817 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl->fl6_dst))
818 flags |= RT6_LOOKUP_F_IFACE; 818 flags |= RT6_LOOKUP_F_IFACE;
819 819
820 if (!ipv6_addr_any(&fl->fl6_src)) 820 if (!ipv6_addr_any(&fl->fl6_src))
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index c163d0a149f4..98258b7341e3 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -332,14 +332,16 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
332 IEEE80211_QUEUE_STOP_REASON_AGGREGATION); 332 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
333 333
334 spin_unlock(&local->ampdu_lock); 334 spin_unlock(&local->ampdu_lock);
335 spin_unlock_bh(&sta->lock);
336 335
337 /* send an addBA request */ 336 /* prepare tid data */
338 sta->ampdu_mlme.dialog_token_allocator++; 337 sta->ampdu_mlme.dialog_token_allocator++;
339 sta->ampdu_mlme.tid_tx[tid]->dialog_token = 338 sta->ampdu_mlme.tid_tx[tid]->dialog_token =
340 sta->ampdu_mlme.dialog_token_allocator; 339 sta->ampdu_mlme.dialog_token_allocator;
341 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; 340 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
342 341
342 spin_unlock_bh(&sta->lock);
343
344 /* send AddBA request */
343 ieee80211_send_addba_request(sdata, pubsta->addr, tid, 345 ieee80211_send_addba_request(sdata, pubsta->addr, tid,
344 sta->ampdu_mlme.tid_tx[tid]->dialog_token, 346 sta->ampdu_mlme.tid_tx[tid]->dialog_token,
345 sta->ampdu_mlme.tid_tx[tid]->ssn, 347 sta->ampdu_mlme.tid_tx[tid]->ssn,
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 5d218c530a4e..32be11e4c4d9 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -5,7 +5,7 @@
5#include <linux/nl80211.h> 5#include <linux/nl80211.h>
6#include "ieee80211_i.h" 6#include "ieee80211_i.h"
7 7
8enum ieee80211_chan_mode 8static enum ieee80211_chan_mode
9__ieee80211_get_channel_mode(struct ieee80211_local *local, 9__ieee80211_get_channel_mode(struct ieee80211_local *local,
10 struct ieee80211_sub_if_data *ignore) 10 struct ieee80211_sub_if_data *ignore)
11{ 11{
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 4f2271316650..9c1da0809160 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -349,7 +349,7 @@ static inline int drv_get_survey(struct ieee80211_local *local, int idx,
349 struct survey_info *survey) 349 struct survey_info *survey)
350{ 350{
351 int ret = -EOPNOTSUPP; 351 int ret = -EOPNOTSUPP;
352 if (local->ops->conf_tx) 352 if (local->ops->get_survey)
353 ret = local->ops->get_survey(&local->hw, idx, survey); 353 ret = local->ops->get_survey(&local->hw, idx, survey);
354 /* trace_drv_get_survey(local, idx, survey, ret); */ 354 /* trace_drv_get_survey(local, idx, survey, ret); */
355 return ret; 355 return ret;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 0839c4e8fd2e..f803f8b72a93 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1692,14 +1692,52 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1692 rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); 1692 rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len);
1693 break; 1693 break;
1694 case IEEE80211_STYPE_ACTION: 1694 case IEEE80211_STYPE_ACTION:
1695 if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) 1695 switch (mgmt->u.action.category) {
1696 case WLAN_CATEGORY_BACK: {
1697 struct ieee80211_local *local = sdata->local;
1698 int len = skb->len;
1699 struct sta_info *sta;
1700
1701 rcu_read_lock();
1702 sta = sta_info_get(sdata, mgmt->sa);
1703 if (!sta) {
1704 rcu_read_unlock();
1705 break;
1706 }
1707
1708 local_bh_disable();
1709
1710 switch (mgmt->u.action.u.addba_req.action_code) {
1711 case WLAN_ACTION_ADDBA_REQ:
1712 if (len < (IEEE80211_MIN_ACTION_SIZE +
1713 sizeof(mgmt->u.action.u.addba_req)))
1714 break;
1715 ieee80211_process_addba_request(local, sta, mgmt, len);
1716 break;
1717 case WLAN_ACTION_ADDBA_RESP:
1718 if (len < (IEEE80211_MIN_ACTION_SIZE +
1719 sizeof(mgmt->u.action.u.addba_resp)))
1720 break;
1721 ieee80211_process_addba_resp(local, sta, mgmt, len);
1722 break;
1723 case WLAN_ACTION_DELBA:
1724 if (len < (IEEE80211_MIN_ACTION_SIZE +
1725 sizeof(mgmt->u.action.u.delba)))
1726 break;
1727 ieee80211_process_delba(sdata, sta, mgmt, len);
1728 break;
1729 }
1730 local_bh_enable();
1731 rcu_read_unlock();
1696 break; 1732 break;
1697 1733 }
1698 ieee80211_sta_process_chanswitch(sdata, 1734 case WLAN_CATEGORY_SPECTRUM_MGMT:
1699 &mgmt->u.action.u.chan_switch.sw_elem, 1735 ieee80211_sta_process_chanswitch(sdata,
1700 (void *)ifmgd->associated->priv, 1736 &mgmt->u.action.u.chan_switch.sw_elem,
1701 rx_status->mactime); 1737 (void *)ifmgd->associated->priv,
1702 break; 1738 rx_status->mactime);
1739 break;
1740 }
1703 } 1741 }
1704 mutex_unlock(&ifmgd->mtx); 1742 mutex_unlock(&ifmgd->mtx);
1705 1743
@@ -1722,9 +1760,45 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1722 mutex_unlock(&ifmgd->mtx); 1760 mutex_unlock(&ifmgd->mtx);
1723 1761
1724 if (skb->len >= 24 + 2 /* mgmt + deauth reason */ && 1762 if (skb->len >= 24 + 2 /* mgmt + deauth reason */ &&
1725 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) 1763 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) {
1726 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); 1764 struct ieee80211_local *local = sdata->local;
1765 struct ieee80211_work *wk;
1766
1767 mutex_lock(&local->work_mtx);
1768 list_for_each_entry(wk, &local->work_list, list) {
1769 if (wk->sdata != sdata)
1770 continue;
1771
1772 if (wk->type != IEEE80211_WORK_ASSOC)
1773 continue;
1774
1775 if (memcmp(mgmt->bssid, wk->filter_ta, ETH_ALEN))
1776 continue;
1777 if (memcmp(mgmt->sa, wk->filter_ta, ETH_ALEN))
1778 continue;
1727 1779
1780 /*
1781 * Printing the message only here means we can't
1782 * spuriously print it, but it also means that it
1783 * won't be printed when the frame comes in before
1784 * we even tried to associate or in similar cases.
1785 *
1786 * Ultimately, I suspect cfg80211 should print the
1787 * messages instead.
1788 */
1789 printk(KERN_DEBUG
1790 "%s: deauthenticated from %pM (Reason: %u)\n",
1791 sdata->name, mgmt->bssid,
1792 le16_to_cpu(mgmt->u.deauth.reason_code));
1793
1794 list_del_rcu(&wk->list);
1795 free_work(wk);
1796 break;
1797 }
1798 mutex_unlock(&local->work_mtx);
1799
1800 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
1801 }
1728 out: 1802 out:
1729 kfree_skb(skb); 1803 kfree_skb(skb);
1730} 1804}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 6e2a7bcd8cb8..be9abc2e6348 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1818,17 +1818,26 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
1818 return RX_CONTINUE; 1818 return RX_CONTINUE;
1819 1819
1820 if (ieee80211_is_back_req(bar->frame_control)) { 1820 if (ieee80211_is_back_req(bar->frame_control)) {
1821 struct {
1822 __le16 control, start_seq_num;
1823 } __packed bar_data;
1824
1821 if (!rx->sta) 1825 if (!rx->sta)
1822 return RX_DROP_MONITOR; 1826 return RX_DROP_MONITOR;
1827
1828 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
1829 &bar_data, sizeof(bar_data)))
1830 return RX_DROP_MONITOR;
1831
1823 spin_lock(&rx->sta->lock); 1832 spin_lock(&rx->sta->lock);
1824 tid = le16_to_cpu(bar->control) >> 12; 1833 tid = le16_to_cpu(bar_data.control) >> 12;
1825 if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) { 1834 if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) {
1826 spin_unlock(&rx->sta->lock); 1835 spin_unlock(&rx->sta->lock);
1827 return RX_DROP_MONITOR; 1836 return RX_DROP_MONITOR;
1828 } 1837 }
1829 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid]; 1838 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
1830 1839
1831 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4; 1840 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
1832 1841
1833 /* reset session timer */ 1842 /* reset session timer */
1834 if (tid_agg_rx->timeout) 1843 if (tid_agg_rx->timeout)
@@ -1935,6 +1944,9 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1935 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 1944 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
1936 break; 1945 break;
1937 1946
1947 if (sdata->vif.type == NL80211_IFTYPE_STATION)
1948 return ieee80211_sta_rx_mgmt(sdata, rx->skb);
1949
1938 switch (mgmt->u.action.u.addba_req.action_code) { 1950 switch (mgmt->u.action.u.addba_req.action_code) {
1939 case WLAN_ACTION_ADDBA_REQ: 1951 case WLAN_ACTION_ADDBA_REQ:
1940 if (len < (IEEE80211_MIN_ACTION_SIZE + 1952 if (len < (IEEE80211_MIN_ACTION_SIZE +
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 445de702b8b7..e34622fa0003 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -699,10 +699,8 @@ void xt_free_table_info(struct xt_table_info *info)
699 vfree(info->jumpstack); 699 vfree(info->jumpstack);
700 else 700 else
701 kfree(info->jumpstack); 701 kfree(info->jumpstack);
702 if (sizeof(unsigned int) * nr_cpu_ids > PAGE_SIZE) 702
703 vfree(info->stackptr); 703 free_percpu(info->stackptr);
704 else
705 kfree(info->stackptr);
706 704
707 kfree(info); 705 kfree(info);
708} 706}
@@ -753,14 +751,9 @@ static int xt_jumpstack_alloc(struct xt_table_info *i)
753 unsigned int size; 751 unsigned int size;
754 int cpu; 752 int cpu;
755 753
756 size = sizeof(unsigned int) * nr_cpu_ids; 754 i->stackptr = alloc_percpu(unsigned int);
757 if (size > PAGE_SIZE)
758 i->stackptr = vmalloc(size);
759 else
760 i->stackptr = kmalloc(size, GFP_KERNEL);
761 if (i->stackptr == NULL) 755 if (i->stackptr == NULL)
762 return -ENOMEM; 756 return -ENOMEM;
763 memset(i->stackptr, 0, size);
764 757
765 size = sizeof(void **) * nr_cpu_ids; 758 size = sizeof(void **) * nr_cpu_ids;
766 if (size > PAGE_SIZE) 759 if (size > PAGE_SIZE)
@@ -844,10 +837,6 @@ struct xt_table *xt_register_table(struct net *net,
844 struct xt_table_info *private; 837 struct xt_table_info *private;
845 struct xt_table *t, *table; 838 struct xt_table *t, *table;
846 839
847 ret = xt_jumpstack_alloc(newinfo);
848 if (ret < 0)
849 return ERR_PTR(ret);
850
851 /* Don't add one object to multiple lists. */ 840 /* Don't add one object to multiple lists. */
852 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); 841 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
853 if (!table) { 842 if (!table) {
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 7b048a35ca58..94d72e85a475 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -1045,12 +1045,12 @@ static void pep_sock_unhash(struct sock *sk)
1045 lock_sock(sk); 1045 lock_sock(sk);
1046 if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) { 1046 if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) {
1047 skparent = pn->listener; 1047 skparent = pn->listener;
1048 sk_del_node_init(sk);
1049 release_sock(sk); 1048 release_sock(sk);
1050 1049
1051 sk = skparent;
1052 pn = pep_sk(skparent); 1050 pn = pep_sk(skparent);
1053 lock_sock(sk); 1051 lock_sock(skparent);
1052 sk_del_node_init(sk);
1053 sk = skparent;
1054 } 1054 }
1055 /* Unhash a listening sock only when it is closed 1055 /* Unhash a listening sock only when it is closed
1056 * and all of its active connected pipes are closed. */ 1056 * and all of its active connected pipes are closed. */
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 10ed0d55f759..f68832798db2 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -475,6 +475,7 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
475 err = rds_ib_setup_qp(conn); 475 err = rds_ib_setup_qp(conn);
476 if (err) { 476 if (err) {
477 rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err); 477 rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err);
478 mutex_unlock(&conn->c_cm_lock);
478 goto out; 479 goto out;
479 } 480 }
480 481
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index a9d951b4fbae..b5dd6ac39be8 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -452,6 +452,7 @@ int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id,
452 err = rds_iw_setup_qp(conn); 452 err = rds_iw_setup_qp(conn);
453 if (err) { 453 if (err) {
454 rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err); 454 rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err);
455 mutex_unlock(&conn->c_cm_lock);
455 goto out; 456 goto out;
456 } 457 }
457 458
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index d885ba311564..570949417f38 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -159,6 +159,9 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
159 iph->daddr = new_addr; 159 iph->daddr = new_addr;
160 160
161 csum_replace4(&iph->check, addr, new_addr); 161 csum_replace4(&iph->check, addr, new_addr);
162 } else if ((iph->frag_off & htons(IP_OFFSET)) ||
163 iph->protocol != IPPROTO_ICMP) {
164 goto out;
162 } 165 }
163 166
164 ihl = iph->ihl * 4; 167 ihl = iph->ihl * 4;
@@ -247,6 +250,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
247 break; 250 break;
248 } 251 }
249 252
253out:
250 return action; 254 return action;
251 255
252drop: 256drop:
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index fdbd0b7bd840..50e3d945e1f4 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -125,7 +125,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
125{ 125{
126 struct tcf_pedit *p = a->priv; 126 struct tcf_pedit *p = a->priv;
127 int i, munged = 0; 127 int i, munged = 0;
128 u8 *pptr; 128 unsigned int off;
129 129
130 if (!(skb->tc_verd & TC_OK2MUNGE)) { 130 if (!(skb->tc_verd & TC_OK2MUNGE)) {
131 /* should we set skb->cloned? */ 131 /* should we set skb->cloned? */
@@ -134,7 +134,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
134 } 134 }
135 } 135 }
136 136
137 pptr = skb_network_header(skb); 137 off = skb_network_offset(skb);
138 138
139 spin_lock(&p->tcf_lock); 139 spin_lock(&p->tcf_lock);
140 140
@@ -144,17 +144,17 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
144 struct tc_pedit_key *tkey = p->tcfp_keys; 144 struct tc_pedit_key *tkey = p->tcfp_keys;
145 145
146 for (i = p->tcfp_nkeys; i > 0; i--, tkey++) { 146 for (i = p->tcfp_nkeys; i > 0; i--, tkey++) {
147 u32 *ptr; 147 u32 *ptr, _data;
148 int offset = tkey->off; 148 int offset = tkey->off;
149 149
150 if (tkey->offmask) { 150 if (tkey->offmask) {
151 if (skb->len > tkey->at) { 151 char *d, _d;
152 char *j = pptr + tkey->at; 152
153 offset += ((*j & tkey->offmask) >> 153 d = skb_header_pointer(skb, off + tkey->at, 1,
154 tkey->shift); 154 &_d);
155 } else { 155 if (!d)
156 goto bad; 156 goto bad;
157 } 157 offset += (*d & tkey->offmask) >> tkey->shift;
158 } 158 }
159 159
160 if (offset % 4) { 160 if (offset % 4) {
@@ -169,9 +169,13 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
169 goto bad; 169 goto bad;
170 } 170 }
171 171
172 ptr = (u32 *)(pptr+offset); 172 ptr = skb_header_pointer(skb, off + offset, 4, &_data);
173 if (!ptr)
174 goto bad;
173 /* just do it, baby */ 175 /* just do it, baby */
174 *ptr = ((*ptr & tkey->mask) ^ tkey->val); 176 *ptr = ((*ptr & tkey->mask) ^ tkey->val);
177 if (ptr == &_data)
178 skb_store_bits(skb, off + offset, ptr, 4);
175 munged++; 179 munged++;
176 } 180 }
177 181
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 96275422c619..4f522143811e 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -98,11 +98,11 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
98{ 98{
99 struct { 99 struct {
100 struct tc_u_knode *knode; 100 struct tc_u_knode *knode;
101 u8 *ptr; 101 unsigned int off;
102 } stack[TC_U32_MAXDEPTH]; 102 } stack[TC_U32_MAXDEPTH];
103 103
104 struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root; 104 struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root;
105 u8 *ptr = skb_network_header(skb); 105 unsigned int off = skb_network_offset(skb);
106 struct tc_u_knode *n; 106 struct tc_u_knode *n;
107 int sdepth = 0; 107 int sdepth = 0;
108 int off2 = 0; 108 int off2 = 0;
@@ -134,8 +134,14 @@ next_knode:
134#endif 134#endif
135 135
136 for (i = n->sel.nkeys; i>0; i--, key++) { 136 for (i = n->sel.nkeys; i>0; i--, key++) {
137 137 unsigned int toff;
138 if ((*(__be32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) { 138 __be32 *data, _data;
139
140 toff = off + key->off + (off2 & key->offmask);
141 data = skb_header_pointer(skb, toff, 4, &_data);
142 if (!data)
143 goto out;
144 if ((*data ^ key->val) & key->mask) {
139 n = n->next; 145 n = n->next;
140 goto next_knode; 146 goto next_knode;
141 } 147 }
@@ -174,29 +180,45 @@ check_terminal:
174 if (sdepth >= TC_U32_MAXDEPTH) 180 if (sdepth >= TC_U32_MAXDEPTH)
175 goto deadloop; 181 goto deadloop;
176 stack[sdepth].knode = n; 182 stack[sdepth].knode = n;
177 stack[sdepth].ptr = ptr; 183 stack[sdepth].off = off;
178 sdepth++; 184 sdepth++;
179 185
180 ht = n->ht_down; 186 ht = n->ht_down;
181 sel = 0; 187 sel = 0;
182 if (ht->divisor) 188 if (ht->divisor) {
183 sel = ht->divisor&u32_hash_fold(*(__be32*)(ptr+n->sel.hoff), &n->sel,n->fshift); 189 __be32 *data, _data;
184 190
191 data = skb_header_pointer(skb, off + n->sel.hoff, 4,
192 &_data);
193 if (!data)
194 goto out;
195 sel = ht->divisor & u32_hash_fold(*data, &n->sel,
196 n->fshift);
197 }
185 if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT))) 198 if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT)))
186 goto next_ht; 199 goto next_ht;
187 200
188 if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) { 201 if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) {
189 off2 = n->sel.off + 3; 202 off2 = n->sel.off + 3;
190 if (n->sel.flags&TC_U32_VAROFFSET) 203 if (n->sel.flags & TC_U32_VAROFFSET) {
191 off2 += ntohs(n->sel.offmask & *(__be16*)(ptr+n->sel.offoff)) >>n->sel.offshift; 204 __be16 *data, _data;
205
206 data = skb_header_pointer(skb,
207 off + n->sel.offoff,
208 2, &_data);
209 if (!data)
210 goto out;
211 off2 += ntohs(n->sel.offmask & *data) >>
212 n->sel.offshift;
213 }
192 off2 &= ~3; 214 off2 &= ~3;
193 } 215 }
194 if (n->sel.flags&TC_U32_EAT) { 216 if (n->sel.flags&TC_U32_EAT) {
195 ptr += off2; 217 off += off2;
196 off2 = 0; 218 off2 = 0;
197 } 219 }
198 220
199 if (ptr < skb_tail_pointer(skb)) 221 if (off < skb->len)
200 goto next_ht; 222 goto next_ht;
201 } 223 }
202 224
@@ -204,9 +226,10 @@ check_terminal:
204 if (sdepth--) { 226 if (sdepth--) {
205 n = stack[sdepth].knode; 227 n = stack[sdepth].knode;
206 ht = n->ht_up; 228 ht = n->ht_up;
207 ptr = stack[sdepth].ptr; 229 off = stack[sdepth].off;
208 goto check_terminal; 230 goto check_terminal;
209 } 231 }
232out:
210 return -1; 233 return -1;
211 234
212deadloop: 235deadloop:
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 6a329158bdfa..a3cca0a94346 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -95,13 +95,13 @@ resume:
95 goto error_nolock; 95 goto error_nolock;
96 } 96 }
97 97
98 dst = dst_pop(dst); 98 dst = skb_dst_pop(skb);
99 if (!dst) { 99 if (!dst) {
100 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); 100 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
101 err = -EHOSTUNREACH; 101 err = -EHOSTUNREACH;
102 goto error_nolock; 102 goto error_nolock;
103 } 103 }
104 skb_dst_set(skb, dst); 104 skb_dst_set_noref(skb, dst);
105 x = dst->xfrm; 105 x = dst->xfrm;
106 } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); 106 } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL));
107 107
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index d965a2bad8d3..4bf27d901333 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2153,6 +2153,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2153 return 0; 2153 return 0;
2154 } 2154 }
2155 2155
2156 skb_dst_force(skb);
2156 dst = skb_dst(skb); 2157 dst = skb_dst(skb);
2157 2158
2158 res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0; 2159 res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0;