aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-08-30 02:06:00 -0400
committerDavid S. Miller <davem@davemloft.net>2008-08-30 02:06:00 -0400
commitb171e19ed08c8ba832e5325fadf1be493f56665d (patch)
tree017208f9fcc4ef2cc3fe5cc3f262d2455eda9f61 /net
parent143b11c03cd42f2284efe5128afc057d8fc86c78 (diff)
parent7c19a3d280297d43ef5ff7c6b205dc208a16d3d1 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: net/mac80211/mlme.c
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/icmp.c22
-rw-r--r--net/ipv4/route.c24
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv6/addrconf.c1
-rw-r--r--net/ipv6/icmp.c23
-rw-r--r--net/ipv6/raw.c6
-rw-r--r--net/ipv6/sysctl_net_ipv6.c2
-rw-r--r--net/mac80211/debugfs_netdev.c24
-rw-r--r--net/mac80211/ieee80211_i.h6
-rw-r--r--net/mac80211/mesh.c2
-rw-r--r--net/mac80211/mlme.c52
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/sched/cls_route.c2
-rw-r--r--net/sched/sch_api.c70
-rw-r--r--net/sched/sch_cbq.c6
-rw-r--r--net/sched/sch_generic.c9
-rw-r--r--net/sched/sch_hfsc.c4
-rw-r--r--net/sched/sch_htb.c8
-rw-r--r--net/sched/sch_netem.c2
-rw-r--r--net/sched/sch_teql.c2
-rw-r--r--net/sctp/auth.c7
-rw-r--r--net/sctp/socket.c11
22 files changed, 174 insertions, 117 deletions
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 860558633b2c..55c355e63234 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -204,18 +204,22 @@ static struct sock *icmp_sk(struct net *net)
204 return net->ipv4.icmp_sk[smp_processor_id()]; 204 return net->ipv4.icmp_sk[smp_processor_id()];
205} 205}
206 206
207static inline int icmp_xmit_lock(struct sock *sk) 207static inline struct sock *icmp_xmit_lock(struct net *net)
208{ 208{
209 struct sock *sk;
210
209 local_bh_disable(); 211 local_bh_disable();
210 212
213 sk = icmp_sk(net);
214
211 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { 215 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
212 /* This can happen if the output path signals a 216 /* This can happen if the output path signals a
213 * dst_link_failure() for an outgoing ICMP packet. 217 * dst_link_failure() for an outgoing ICMP packet.
214 */ 218 */
215 local_bh_enable(); 219 local_bh_enable();
216 return 1; 220 return NULL;
217 } 221 }
218 return 0; 222 return sk;
219} 223}
220 224
221static inline void icmp_xmit_unlock(struct sock *sk) 225static inline void icmp_xmit_unlock(struct sock *sk)
@@ -354,15 +358,17 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
354 struct ipcm_cookie ipc; 358 struct ipcm_cookie ipc;
355 struct rtable *rt = skb->rtable; 359 struct rtable *rt = skb->rtable;
356 struct net *net = dev_net(rt->u.dst.dev); 360 struct net *net = dev_net(rt->u.dst.dev);
357 struct sock *sk = icmp_sk(net); 361 struct sock *sk;
358 struct inet_sock *inet = inet_sk(sk); 362 struct inet_sock *inet;
359 __be32 daddr; 363 __be32 daddr;
360 364
361 if (ip_options_echo(&icmp_param->replyopts, skb)) 365 if (ip_options_echo(&icmp_param->replyopts, skb))
362 return; 366 return;
363 367
364 if (icmp_xmit_lock(sk)) 368 sk = icmp_xmit_lock(net);
369 if (sk == NULL)
365 return; 370 return;
371 inet = inet_sk(sk);
366 372
367 icmp_param->data.icmph.checksum = 0; 373 icmp_param->data.icmph.checksum = 0;
368 374
@@ -419,7 +425,6 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
419 if (!rt) 425 if (!rt)
420 goto out; 426 goto out;
421 net = dev_net(rt->u.dst.dev); 427 net = dev_net(rt->u.dst.dev);
422 sk = icmp_sk(net);
423 428
424 /* 429 /*
425 * Find the original header. It is expected to be valid, of course. 430 * Find the original header. It is expected to be valid, of course.
@@ -483,7 +488,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
483 } 488 }
484 } 489 }
485 490
486 if (icmp_xmit_lock(sk)) 491 sk = icmp_xmit_lock(net);
492 if (sk == NULL)
487 return; 493 return;
488 494
489 /* 495 /*
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 71598f64c113..f62187bb6d08 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -3122,14 +3122,23 @@ static ctl_table ipv4_route_table[] = {
3122 { .ctl_name = 0 } 3122 { .ctl_name = 0 }
3123}; 3123};
3124 3124
3125static __net_initdata struct ctl_path ipv4_route_path[] = { 3125static struct ctl_table empty[1];
3126
3127static struct ctl_table ipv4_skeleton[] =
3128{
3129 { .procname = "route", .ctl_name = NET_IPV4_ROUTE,
3130 .mode = 0555, .child = ipv4_route_table},
3131 { .procname = "neigh", .ctl_name = NET_IPV4_NEIGH,
3132 .mode = 0555, .child = empty},
3133 { }
3134};
3135
3136static __net_initdata struct ctl_path ipv4_path[] = {
3126 { .procname = "net", .ctl_name = CTL_NET, }, 3137 { .procname = "net", .ctl_name = CTL_NET, },
3127 { .procname = "ipv4", .ctl_name = NET_IPV4, }, 3138 { .procname = "ipv4", .ctl_name = NET_IPV4, },
3128 { .procname = "route", .ctl_name = NET_IPV4_ROUTE, },
3129 { }, 3139 { },
3130}; 3140};
3131 3141
3132
3133static struct ctl_table ipv4_route_flush_table[] = { 3142static struct ctl_table ipv4_route_flush_table[] = {
3134 { 3143 {
3135 .ctl_name = NET_IPV4_ROUTE_FLUSH, 3144 .ctl_name = NET_IPV4_ROUTE_FLUSH,
@@ -3142,6 +3151,13 @@ static struct ctl_table ipv4_route_flush_table[] = {
3142 { .ctl_name = 0 }, 3151 { .ctl_name = 0 },
3143}; 3152};
3144 3153
3154static __net_initdata struct ctl_path ipv4_route_path[] = {
3155 { .procname = "net", .ctl_name = CTL_NET, },
3156 { .procname = "ipv4", .ctl_name = NET_IPV4, },
3157 { .procname = "route", .ctl_name = NET_IPV4_ROUTE, },
3158 { },
3159};
3160
3145static __net_init int sysctl_route_net_init(struct net *net) 3161static __net_init int sysctl_route_net_init(struct net *net)
3146{ 3162{
3147 struct ctl_table *tbl; 3163 struct ctl_table *tbl;
@@ -3293,7 +3309,7 @@ int __init ip_rt_init(void)
3293 */ 3309 */
3294void __init ip_static_sysctl_init(void) 3310void __init ip_static_sysctl_init(void)
3295{ 3311{
3296 register_sysctl_paths(ipv4_route_path, ipv4_route_table); 3312 register_sysctl_paths(ipv4_path, ipv4_skeleton);
3297} 3313}
3298#endif 3314#endif
3299 3315
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index a00532de2a8c..8165f5aa8c71 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -468,7 +468,8 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
468 } 468 }
469 if (likely(sysctl_tcp_window_scaling)) { 469 if (likely(sysctl_tcp_window_scaling)) {
470 opts->ws = tp->rx_opt.rcv_wscale; 470 opts->ws = tp->rx_opt.rcv_wscale;
471 size += TCPOLEN_WSCALE_ALIGNED; 471 if(likely(opts->ws))
472 size += TCPOLEN_WSCALE_ALIGNED;
472 } 473 }
473 if (likely(sysctl_tcp_sack)) { 474 if (likely(sysctl_tcp_sack)) {
474 opts->options |= OPTION_SACK_ADVERTISE; 475 opts->options |= OPTION_SACK_ADVERTISE;
@@ -509,7 +510,8 @@ static unsigned tcp_synack_options(struct sock *sk,
509 510
510 if (likely(ireq->wscale_ok)) { 511 if (likely(ireq->wscale_ok)) {
511 opts->ws = ireq->rcv_wscale; 512 opts->ws = ireq->rcv_wscale;
512 size += TCPOLEN_WSCALE_ALIGNED; 513 if(likely(opts->ws))
514 size += TCPOLEN_WSCALE_ALIGNED;
513 } 515 }
514 if (likely(doing_ts)) { 516 if (likely(doing_ts)) {
515 opts->options |= OPTION_TS; 517 opts->options |= OPTION_TS;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e2d3b7580b76..7b6a584b62dd 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1688,6 +1688,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
1688 .fc_dst_len = plen, 1688 .fc_dst_len = plen,
1689 .fc_flags = RTF_UP | flags, 1689 .fc_flags = RTF_UP | flags,
1690 .fc_nlinfo.nl_net = dev_net(dev), 1690 .fc_nlinfo.nl_net = dev_net(dev),
1691 .fc_protocol = RTPROT_KERNEL,
1691 }; 1692 };
1692 1693
1693 ipv6_addr_copy(&cfg.fc_dst, pfx); 1694 ipv6_addr_copy(&cfg.fc_dst, pfx);
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index abedf95fdf2d..b3157a0cc15d 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -91,19 +91,22 @@ static struct inet6_protocol icmpv6_protocol = {
91 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 91 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
92}; 92};
93 93
94static __inline__ int icmpv6_xmit_lock(struct sock *sk) 94static __inline__ struct sock *icmpv6_xmit_lock(struct net *net)
95{ 95{
96 struct sock *sk;
97
96 local_bh_disable(); 98 local_bh_disable();
97 99
100 sk = icmpv6_sk(net);
98 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { 101 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
99 /* This can happen if the output path (f.e. SIT or 102 /* This can happen if the output path (f.e. SIT or
100 * ip6ip6 tunnel) signals dst_link_failure() for an 103 * ip6ip6 tunnel) signals dst_link_failure() for an
101 * outgoing ICMP6 packet. 104 * outgoing ICMP6 packet.
102 */ 105 */
103 local_bh_enable(); 106 local_bh_enable();
104 return 1; 107 return NULL;
105 } 108 }
106 return 0; 109 return sk;
107} 110}
108 111
109static __inline__ void icmpv6_xmit_unlock(struct sock *sk) 112static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
@@ -392,11 +395,10 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
392 fl.fl_icmp_code = code; 395 fl.fl_icmp_code = code;
393 security_skb_classify_flow(skb, &fl); 396 security_skb_classify_flow(skb, &fl);
394 397
395 sk = icmpv6_sk(net); 398 sk = icmpv6_xmit_lock(net);
396 np = inet6_sk(sk); 399 if (sk == NULL)
397
398 if (icmpv6_xmit_lock(sk))
399 return; 400 return;
401 np = inet6_sk(sk);
400 402
401 if (!icmpv6_xrlim_allow(sk, type, &fl)) 403 if (!icmpv6_xrlim_allow(sk, type, &fl))
402 goto out; 404 goto out;
@@ -539,11 +541,10 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
539 fl.fl_icmp_type = ICMPV6_ECHO_REPLY; 541 fl.fl_icmp_type = ICMPV6_ECHO_REPLY;
540 security_skb_classify_flow(skb, &fl); 542 security_skb_classify_flow(skb, &fl);
541 543
542 sk = icmpv6_sk(net); 544 sk = icmpv6_xmit_lock(net);
543 np = inet6_sk(sk); 545 if (sk == NULL)
544
545 if (icmpv6_xmit_lock(sk))
546 return; 546 return;
547 np = inet6_sk(sk);
547 548
548 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) 549 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
549 fl.oif = np->mcast_oif; 550 fl.oif = np->mcast_oif;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 01d47674f7e5..e53e493606c5 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -377,14 +377,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
377 skb_checksum_complete(skb)) { 377 skb_checksum_complete(skb)) {
378 atomic_inc(&sk->sk_drops); 378 atomic_inc(&sk->sk_drops);
379 kfree_skb(skb); 379 kfree_skb(skb);
380 return 0; 380 return NET_RX_DROP;
381 } 381 }
382 382
383 /* Charge it to the socket. */ 383 /* Charge it to the socket. */
384 if (sock_queue_rcv_skb(sk,skb)<0) { 384 if (sock_queue_rcv_skb(sk,skb)<0) {
385 atomic_inc(&sk->sk_drops); 385 atomic_inc(&sk->sk_drops);
386 kfree_skb(skb); 386 kfree_skb(skb);
387 return 0; 387 return NET_RX_DROP;
388 } 388 }
389 389
390 return 0; 390 return 0;
@@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
429 if (skb_checksum_complete(skb)) { 429 if (skb_checksum_complete(skb)) {
430 atomic_inc(&sk->sk_drops); 430 atomic_inc(&sk->sk_drops);
431 kfree_skb(skb); 431 kfree_skb(skb);
432 return 0; 432 return NET_RX_DROP;
433 } 433 }
434 } 434 }
435 435
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index e6dfaeac6be3..587f8f60c489 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -156,7 +156,7 @@ static struct ctl_table_header *ip6_base;
156int ipv6_static_sysctl_register(void) 156int ipv6_static_sysctl_register(void)
157{ 157{
158 static struct ctl_table empty[1]; 158 static struct ctl_table empty[1];
159 ip6_base = register_net_sysctl_rotable(net_ipv6_ctl_path, empty); 159 ip6_base = register_sysctl_paths(net_ipv6_ctl_path, empty);
160 if (ip6_base == NULL) 160 if (ip6_base == NULL)
161 return -ENOMEM; 161 return -ENOMEM;
162 return 0; 162 return 0;
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 475f89a8aee1..8165df578c92 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -248,8 +248,8 @@ IEEE80211_IF_WFILE(min_discovery_timeout,
248static void add_sta_files(struct ieee80211_sub_if_data *sdata) 248static void add_sta_files(struct ieee80211_sub_if_data *sdata)
249{ 249{
250 DEBUGFS_ADD(drop_unencrypted, sta); 250 DEBUGFS_ADD(drop_unencrypted, sta);
251 DEBUGFS_ADD(force_unicast_rateidx, ap); 251 DEBUGFS_ADD(force_unicast_rateidx, sta);
252 DEBUGFS_ADD(max_ratectrl_rateidx, ap); 252 DEBUGFS_ADD(max_ratectrl_rateidx, sta);
253 253
254 DEBUGFS_ADD(state, sta); 254 DEBUGFS_ADD(state, sta);
255 DEBUGFS_ADD(bssid, sta); 255 DEBUGFS_ADD(bssid, sta);
@@ -283,8 +283,8 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata)
283static void add_wds_files(struct ieee80211_sub_if_data *sdata) 283static void add_wds_files(struct ieee80211_sub_if_data *sdata)
284{ 284{
285 DEBUGFS_ADD(drop_unencrypted, wds); 285 DEBUGFS_ADD(drop_unencrypted, wds);
286 DEBUGFS_ADD(force_unicast_rateidx, ap); 286 DEBUGFS_ADD(force_unicast_rateidx, wds);
287 DEBUGFS_ADD(max_ratectrl_rateidx, ap); 287 DEBUGFS_ADD(max_ratectrl_rateidx, wds);
288 288
289 DEBUGFS_ADD(peer, wds); 289 DEBUGFS_ADD(peer, wds);
290} 290}
@@ -292,8 +292,8 @@ static void add_wds_files(struct ieee80211_sub_if_data *sdata)
292static void add_vlan_files(struct ieee80211_sub_if_data *sdata) 292static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
293{ 293{
294 DEBUGFS_ADD(drop_unencrypted, vlan); 294 DEBUGFS_ADD(drop_unencrypted, vlan);
295 DEBUGFS_ADD(force_unicast_rateidx, ap); 295 DEBUGFS_ADD(force_unicast_rateidx, vlan);
296 DEBUGFS_ADD(max_ratectrl_rateidx, ap); 296 DEBUGFS_ADD(max_ratectrl_rateidx, vlan);
297} 297}
298 298
299static void add_monitor_files(struct ieee80211_sub_if_data *sdata) 299static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
@@ -381,8 +381,8 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
381static void del_sta_files(struct ieee80211_sub_if_data *sdata) 381static void del_sta_files(struct ieee80211_sub_if_data *sdata)
382{ 382{
383 DEBUGFS_DEL(drop_unencrypted, sta); 383 DEBUGFS_DEL(drop_unencrypted, sta);
384 DEBUGFS_DEL(force_unicast_rateidx, ap); 384 DEBUGFS_DEL(force_unicast_rateidx, sta);
385 DEBUGFS_DEL(max_ratectrl_rateidx, ap); 385 DEBUGFS_DEL(max_ratectrl_rateidx, sta);
386 386
387 DEBUGFS_DEL(state, sta); 387 DEBUGFS_DEL(state, sta);
388 DEBUGFS_DEL(bssid, sta); 388 DEBUGFS_DEL(bssid, sta);
@@ -416,8 +416,8 @@ static void del_ap_files(struct ieee80211_sub_if_data *sdata)
416static void del_wds_files(struct ieee80211_sub_if_data *sdata) 416static void del_wds_files(struct ieee80211_sub_if_data *sdata)
417{ 417{
418 DEBUGFS_DEL(drop_unencrypted, wds); 418 DEBUGFS_DEL(drop_unencrypted, wds);
419 DEBUGFS_DEL(force_unicast_rateidx, ap); 419 DEBUGFS_DEL(force_unicast_rateidx, wds);
420 DEBUGFS_DEL(max_ratectrl_rateidx, ap); 420 DEBUGFS_DEL(max_ratectrl_rateidx, wds);
421 421
422 DEBUGFS_DEL(peer, wds); 422 DEBUGFS_DEL(peer, wds);
423} 423}
@@ -425,8 +425,8 @@ static void del_wds_files(struct ieee80211_sub_if_data *sdata)
425static void del_vlan_files(struct ieee80211_sub_if_data *sdata) 425static void del_vlan_files(struct ieee80211_sub_if_data *sdata)
426{ 426{
427 DEBUGFS_DEL(drop_unencrypted, vlan); 427 DEBUGFS_DEL(drop_unencrypted, vlan);
428 DEBUGFS_DEL(force_unicast_rateidx, ap); 428 DEBUGFS_DEL(force_unicast_rateidx, vlan);
429 DEBUGFS_DEL(max_ratectrl_rateidx, ap); 429 DEBUGFS_DEL(max_ratectrl_rateidx, vlan);
430} 430}
431 431
432static void del_monitor_files(struct ieee80211_sub_if_data *sdata) 432static void del_monitor_files(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 2bb546744b94..570ae83c71b1 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -472,6 +472,8 @@ struct ieee80211_sub_if_data {
472 struct dentry *auth_transaction; 472 struct dentry *auth_transaction;
473 struct dentry *flags; 473 struct dentry *flags;
474 struct dentry *num_beacons_sta; 474 struct dentry *num_beacons_sta;
475 struct dentry *force_unicast_rateidx;
476 struct dentry *max_ratectrl_rateidx;
475 } sta; 477 } sta;
476 struct { 478 struct {
477 struct dentry *drop_unencrypted; 479 struct dentry *drop_unencrypted;
@@ -485,9 +487,13 @@ struct ieee80211_sub_if_data {
485 struct { 487 struct {
486 struct dentry *drop_unencrypted; 488 struct dentry *drop_unencrypted;
487 struct dentry *peer; 489 struct dentry *peer;
490 struct dentry *force_unicast_rateidx;
491 struct dentry *max_ratectrl_rateidx;
488 } wds; 492 } wds;
489 struct { 493 struct {
490 struct dentry *drop_unencrypted; 494 struct dentry *drop_unencrypted;
495 struct dentry *force_unicast_rateidx;
496 struct dentry *max_ratectrl_rateidx;
491 } vlan; 497 } vlan;
492 struct { 498 struct {
493 struct dentry *mode; 499 struct dentry *mode;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index b631703bcc82..3ccb3599c04f 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -376,7 +376,7 @@ errcopy:
376 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i]) 376 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
377 tbl->free_node(p, 0); 377 tbl->free_node(p, 0);
378 } 378 }
379 __mesh_table_free(tbl); 379 __mesh_table_free(newtbl);
380endgrow: 380endgrow:
381 return NULL; 381 return NULL;
382} 382}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index e088b440aafa..7d53382f1a5b 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -500,51 +500,21 @@ int ieee80211_ht_addt_info_ie_to_ht_bss_info(
500static void ieee80211_sta_send_associnfo(struct ieee80211_sub_if_data *sdata, 500static void ieee80211_sta_send_associnfo(struct ieee80211_sub_if_data *sdata,
501 struct ieee80211_if_sta *ifsta) 501 struct ieee80211_if_sta *ifsta)
502{ 502{
503 char *buf;
504 size_t len;
505 int i;
506 union iwreq_data wrqu; 503 union iwreq_data wrqu;
507 504
508 if (!ifsta->assocreq_ies && !ifsta->assocresp_ies)
509 return;
510
511 buf = kmalloc(50 + 2 * (ifsta->assocreq_ies_len +
512 ifsta->assocresp_ies_len), GFP_KERNEL);
513 if (!buf)
514 return;
515
516 len = sprintf(buf, "ASSOCINFO(");
517 if (ifsta->assocreq_ies) { 505 if (ifsta->assocreq_ies) {
518 len += sprintf(buf + len, "ReqIEs="); 506 memset(&wrqu, 0, sizeof(wrqu));
519 for (i = 0; i < ifsta->assocreq_ies_len; i++) { 507 wrqu.data.length = ifsta->assocreq_ies_len;
520 len += sprintf(buf + len, "%02x", 508 wireless_send_event(sdata->dev, IWEVASSOCREQIE, &wrqu,
521 ifsta->assocreq_ies[i]); 509 ifsta->assocreq_ies);
522 }
523 } 510 }
524 if (ifsta->assocresp_ies) {
525 if (ifsta->assocreq_ies)
526 len += sprintf(buf + len, " ");
527 len += sprintf(buf + len, "RespIEs=");
528 for (i = 0; i < ifsta->assocresp_ies_len; i++) {
529 len += sprintf(buf + len, "%02x",
530 ifsta->assocresp_ies[i]);
531 }
532 }
533 len += sprintf(buf + len, ")");
534 511
535 if (len > IW_CUSTOM_MAX) { 512 if (ifsta->assocresp_ies) {
536 len = sprintf(buf, "ASSOCRESPIE="); 513 memset(&wrqu, 0, sizeof(wrqu));
537 for (i = 0; i < ifsta->assocresp_ies_len; i++) { 514 wrqu.data.length = ifsta->assocresp_ies_len;
538 len += sprintf(buf + len, "%02x", 515 wireless_send_event(sdata->dev, IWEVASSOCRESPIE, &wrqu,
539 ifsta->assocresp_ies[i]); 516 ifsta->assocresp_ies);
540 }
541 } 517 }
542
543 memset(&wrqu, 0, sizeof(wrqu));
544 wrqu.data.length = len;
545 wireless_send_event(sdata->dev, IWEVCUSTOM, &wrqu, buf);
546
547 kfree(buf);
548} 518}
549 519
550 520
@@ -864,7 +834,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
864 } 834 }
865 } 835 }
866 836
867 if (count == 8) { 837 if (rates_len > count) {
868 pos = skb_put(skb, rates_len - count + 2); 838 pos = skb_put(skb, rates_len - count + 2);
869 *pos++ = WLAN_EID_EXT_SUPP_RATES; 839 *pos++ = WLAN_EID_EXT_SUPP_RATES;
870 *pos++ = rates_len - count; 840 *pos++ = rates_len - count;
@@ -2788,7 +2758,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
2788 jiffies); 2758 jiffies);
2789#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 2759#endif /* CONFIG_MAC80211_IBSS_DEBUG */
2790 if (beacon_timestamp > rx_timestamp) { 2760 if (beacon_timestamp > rx_timestamp) {
2791#ifndef CONFIG_MAC80211_IBSS_DEBUG 2761#ifdef CONFIG_MAC80211_IBSS_DEBUG
2792 printk(KERN_DEBUG "%s: beacon TSF higher than " 2762 printk(KERN_DEBUG "%s: beacon TSF higher than "
2793 "local TSF - IBSS merge with BSSID %s\n", 2763 "local TSF - IBSS merge with BSSID %s\n",
2794 sdata->dev->name, print_mac(mac, mgmt->bssid)); 2764 sdata->dev->name, print_mac(mac, mgmt->bssid));
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 5cafdd4c8018..8eb79e92e94c 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -205,7 +205,7 @@ replay:
205 } 205 }
206 } 206 }
207 207
208 root_lock = qdisc_root_lock(q); 208 root_lock = qdisc_root_sleeping_lock(q);
209 209
210 if (tp == NULL) { 210 if (tp == NULL) {
211 /* Proto-tcf does not exist, create new one */ 211 /* Proto-tcf does not exist, create new one */
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 481260a4f10f..e3d8455eebc2 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -75,7 +75,7 @@ static __inline__ int route4_fastmap_hash(u32 id, int iif)
75static inline 75static inline
76void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id) 76void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
77{ 77{
78 spinlock_t *root_lock = qdisc_root_lock(q); 78 spinlock_t *root_lock = qdisc_root_sleeping_lock(q);
79 79
80 spin_lock_bh(root_lock); 80 spin_lock_bh(root_lock);
81 memset(head->fastmap, 0, sizeof(head->fastmap)); 81 memset(head->fastmap, 0, sizeof(head->fastmap));
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 45f442d7de47..1122c952aa99 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -199,19 +199,53 @@ struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
199 return NULL; 199 return NULL;
200} 200}
201 201
202/*
203 * This lock is needed until some qdiscs stop calling qdisc_tree_decrease_qlen()
204 * without rtnl_lock(); currently hfsc_dequeue(), netem_dequeue(), tbf_dequeue()
205 */
206static DEFINE_SPINLOCK(qdisc_list_lock);
207
208static void qdisc_list_add(struct Qdisc *q)
209{
210 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
211 spin_lock_bh(&qdisc_list_lock);
212 list_add_tail(&q->list, &qdisc_root_sleeping(q)->list);
213 spin_unlock_bh(&qdisc_list_lock);
214 }
215}
216
217void qdisc_list_del(struct Qdisc *q)
218{
219 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
220 spin_lock_bh(&qdisc_list_lock);
221 list_del(&q->list);
222 spin_unlock_bh(&qdisc_list_lock);
223 }
224}
225EXPORT_SYMBOL(qdisc_list_del);
226
202struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) 227struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
203{ 228{
204 unsigned int i; 229 unsigned int i;
230 struct Qdisc *q;
231
232 spin_lock_bh(&qdisc_list_lock);
205 233
206 for (i = 0; i < dev->num_tx_queues; i++) { 234 for (i = 0; i < dev->num_tx_queues; i++) {
207 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 235 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
208 struct Qdisc *q, *txq_root = txq->qdisc_sleeping; 236 struct Qdisc *txq_root = txq->qdisc_sleeping;
209 237
210 q = qdisc_match_from_root(txq_root, handle); 238 q = qdisc_match_from_root(txq_root, handle);
211 if (q) 239 if (q)
212 return q; 240 goto unlock;
213 } 241 }
214 return qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); 242
243 q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
244
245unlock:
246 spin_unlock_bh(&qdisc_list_lock);
247
248 return q;
215} 249}
216 250
217static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) 251static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
@@ -590,7 +624,7 @@ static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
590 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; 624 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
591 spinlock_t *root_lock; 625 spinlock_t *root_lock;
592 626
593 root_lock = qdisc_root_lock(oqdisc); 627 root_lock = qdisc_lock(oqdisc);
594 spin_lock_bh(root_lock); 628 spin_lock_bh(root_lock);
595 629
596 /* Prune old scheduler */ 630 /* Prune old scheduler */
@@ -601,7 +635,7 @@ static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
601 if (qdisc == NULL) 635 if (qdisc == NULL)
602 qdisc = &noop_qdisc; 636 qdisc = &noop_qdisc;
603 dev_queue->qdisc_sleeping = qdisc; 637 dev_queue->qdisc_sleeping = qdisc;
604 dev_queue->qdisc = &noop_qdisc; 638 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
605 639
606 spin_unlock_bh(root_lock); 640 spin_unlock_bh(root_lock);
607 641
@@ -796,9 +830,16 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
796 sch->stab = stab; 830 sch->stab = stab;
797 } 831 }
798 if (tca[TCA_RATE]) { 832 if (tca[TCA_RATE]) {
833 spinlock_t *root_lock;
834
835 if ((sch->parent != TC_H_ROOT) &&
836 !(sch->flags & TCQ_F_INGRESS))
837 root_lock = qdisc_root_sleeping_lock(sch);
838 else
839 root_lock = qdisc_lock(sch);
840
799 err = gen_new_estimator(&sch->bstats, &sch->rate_est, 841 err = gen_new_estimator(&sch->bstats, &sch->rate_est,
800 qdisc_root_lock(sch), 842 root_lock, tca[TCA_RATE]);
801 tca[TCA_RATE]);
802 if (err) { 843 if (err) {
803 /* 844 /*
804 * Any broken qdiscs that would require 845 * Any broken qdiscs that would require
@@ -810,8 +851,8 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
810 goto err_out3; 851 goto err_out3;
811 } 852 }
812 } 853 }
813 if ((parent != TC_H_ROOT) && !(sch->flags & TCQ_F_INGRESS)) 854
814 list_add_tail(&sch->list, &dev_queue->qdisc_sleeping->list); 855 qdisc_list_add(sch);
815 856
816 return sch; 857 return sch;
817 } 858 }
@@ -850,7 +891,8 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
850 891
851 if (tca[TCA_RATE]) 892 if (tca[TCA_RATE])
852 gen_replace_estimator(&sch->bstats, &sch->rate_est, 893 gen_replace_estimator(&sch->bstats, &sch->rate_est,
853 qdisc_root_lock(sch), tca[TCA_RATE]); 894 qdisc_root_sleeping_lock(sch),
895 tca[TCA_RATE]);
854 return 0; 896 return 0;
855} 897}
856 898
@@ -1127,8 +1169,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1127 if (q->stab && qdisc_dump_stab(skb, q->stab) < 0) 1169 if (q->stab && qdisc_dump_stab(skb, q->stab) < 0)
1128 goto nla_put_failure; 1170 goto nla_put_failure;
1129 1171
1130 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, 1172 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1131 TCA_XSTATS, qdisc_root_lock(q), &d) < 0) 1173 qdisc_root_sleeping_lock(q), &d) < 0)
1132 goto nla_put_failure; 1174 goto nla_put_failure;
1133 1175
1134 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) 1176 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
@@ -1419,8 +1461,8 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1419 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0) 1461 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1420 goto nla_put_failure; 1462 goto nla_put_failure;
1421 1463
1422 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, 1464 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1423 TCA_XSTATS, qdisc_root_lock(q), &d) < 0) 1465 qdisc_root_sleeping_lock(q), &d) < 0)
1424 goto nla_put_failure; 1466 goto nla_put_failure;
1425 1467
1426 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0) 1468 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 8fa90d68ec6d..8b06fa900482 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1754,7 +1754,7 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg)
1754 1754
1755 if (--cl->refcnt == 0) { 1755 if (--cl->refcnt == 0) {
1756#ifdef CONFIG_NET_CLS_ACT 1756#ifdef CONFIG_NET_CLS_ACT
1757 spinlock_t *root_lock = qdisc_root_lock(sch); 1757 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1758 struct cbq_sched_data *q = qdisc_priv(sch); 1758 struct cbq_sched_data *q = qdisc_priv(sch);
1759 1759
1760 spin_lock_bh(root_lock); 1760 spin_lock_bh(root_lock);
@@ -1839,7 +1839,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1839 1839
1840 if (tca[TCA_RATE]) 1840 if (tca[TCA_RATE])
1841 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1841 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1842 qdisc_root_lock(sch), 1842 qdisc_root_sleeping_lock(sch),
1843 tca[TCA_RATE]); 1843 tca[TCA_RATE]);
1844 return 0; 1844 return 0;
1845 } 1845 }
@@ -1930,7 +1930,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1930 1930
1931 if (tca[TCA_RATE]) 1931 if (tca[TCA_RATE])
1932 gen_new_estimator(&cl->bstats, &cl->rate_est, 1932 gen_new_estimator(&cl->bstats, &cl->rate_est,
1933 qdisc_root_lock(sch), tca[TCA_RATE]); 1933 qdisc_root_sleeping_lock(sch), tca[TCA_RATE]);
1934 1934
1935 *arg = (unsigned long)cl; 1935 *arg = (unsigned long)cl;
1936 return 0; 1936 return 0;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index c3ed4d44fc14..9634091ee2f0 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -526,10 +526,9 @@ void qdisc_destroy(struct Qdisc *qdisc)
526 !atomic_dec_and_test(&qdisc->refcnt)) 526 !atomic_dec_and_test(&qdisc->refcnt))
527 return; 527 return;
528 528
529 if (qdisc->parent)
530 list_del(&qdisc->list);
531
532#ifdef CONFIG_NET_SCHED 529#ifdef CONFIG_NET_SCHED
530 qdisc_list_del(qdisc);
531
533 qdisc_put_stab(qdisc->stab); 532 qdisc_put_stab(qdisc->stab);
534#endif 533#endif
535 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); 534 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
@@ -635,7 +634,7 @@ static void dev_deactivate_queue(struct net_device *dev,
635 if (!(qdisc->flags & TCQ_F_BUILTIN)) 634 if (!(qdisc->flags & TCQ_F_BUILTIN))
636 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); 635 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
637 636
638 dev_queue->qdisc = qdisc_default; 637 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
639 qdisc_reset(qdisc); 638 qdisc_reset(qdisc);
640 639
641 spin_unlock_bh(qdisc_lock(qdisc)); 640 spin_unlock_bh(qdisc_lock(qdisc));
@@ -710,7 +709,7 @@ static void shutdown_scheduler_queue(struct net_device *dev,
710 struct Qdisc *qdisc_default = _qdisc_default; 709 struct Qdisc *qdisc_default = _qdisc_default;
711 710
712 if (qdisc) { 711 if (qdisc) {
713 dev_queue->qdisc = qdisc_default; 712 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
714 dev_queue->qdisc_sleeping = qdisc_default; 713 dev_queue->qdisc_sleeping = qdisc_default;
715 714
716 qdisc_destroy(qdisc); 715 qdisc_destroy(qdisc);
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index c2b8d9cce3d2..c1e77da8cd09 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1045,7 +1045,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1045 1045
1046 if (tca[TCA_RATE]) 1046 if (tca[TCA_RATE])
1047 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1047 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1048 qdisc_root_lock(sch), 1048 qdisc_root_sleeping_lock(sch),
1049 tca[TCA_RATE]); 1049 tca[TCA_RATE]);
1050 return 0; 1050 return 0;
1051 } 1051 }
@@ -1104,7 +1104,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1104 1104
1105 if (tca[TCA_RATE]) 1105 if (tca[TCA_RATE])
1106 gen_new_estimator(&cl->bstats, &cl->rate_est, 1106 gen_new_estimator(&cl->bstats, &cl->rate_est,
1107 qdisc_root_lock(sch), tca[TCA_RATE]); 1107 qdisc_root_sleeping_lock(sch), tca[TCA_RATE]);
1108 *arg = (unsigned long)cl; 1108 *arg = (unsigned long)cl;
1109 return 0; 1109 return 0;
1110} 1110}
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 0df0df202ed0..d14f02056ae6 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1043,7 +1043,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1043 1043
1044static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) 1044static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1045{ 1045{
1046 spinlock_t *root_lock = qdisc_root_lock(sch); 1046 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1047 struct htb_sched *q = qdisc_priv(sch); 1047 struct htb_sched *q = qdisc_priv(sch);
1048 struct nlattr *nest; 1048 struct nlattr *nest;
1049 struct tc_htb_glob gopt; 1049 struct tc_htb_glob gopt;
@@ -1075,7 +1075,7 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1075 struct sk_buff *skb, struct tcmsg *tcm) 1075 struct sk_buff *skb, struct tcmsg *tcm)
1076{ 1076{
1077 struct htb_class *cl = (struct htb_class *)arg; 1077 struct htb_class *cl = (struct htb_class *)arg;
1078 spinlock_t *root_lock = qdisc_root_lock(sch); 1078 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1079 struct nlattr *nest; 1079 struct nlattr *nest;
1080 struct tc_htb_opt opt; 1080 struct tc_htb_opt opt;
1081 1081
@@ -1372,7 +1372,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1372 goto failure; 1372 goto failure;
1373 1373
1374 gen_new_estimator(&cl->bstats, &cl->rate_est, 1374 gen_new_estimator(&cl->bstats, &cl->rate_est,
1375 qdisc_root_lock(sch), 1375 qdisc_root_sleeping_lock(sch),
1376 tca[TCA_RATE] ? : &est.nla); 1376 tca[TCA_RATE] ? : &est.nla);
1377 cl->refcnt = 1; 1377 cl->refcnt = 1;
1378 cl->children = 0; 1378 cl->children = 0;
@@ -1427,7 +1427,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1427 } else { 1427 } else {
1428 if (tca[TCA_RATE]) 1428 if (tca[TCA_RATE])
1429 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1429 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1430 qdisc_root_lock(sch), 1430 qdisc_root_sleeping_lock(sch),
1431 tca[TCA_RATE]); 1431 tca[TCA_RATE]);
1432 sch_tree_lock(sch); 1432 sch_tree_lock(sch);
1433 } 1433 }
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index fb0294d0b55e..3781e55046d0 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -341,7 +341,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
341 for (i = 0; i < n; i++) 341 for (i = 0; i < n; i++)
342 d->table[i] = data[i]; 342 d->table[i] = data[i];
343 343
344 root_lock = qdisc_root_lock(sch); 344 root_lock = qdisc_root_sleeping_lock(sch);
345 345
346 spin_lock_bh(root_lock); 346 spin_lock_bh(root_lock);
347 d = xchg(&q->delay_dist, d); 347 d = xchg(&q->delay_dist, d);
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 2c35c678563b..d35ef059abb1 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -161,7 +161,7 @@ teql_destroy(struct Qdisc* sch)
161 txq = netdev_get_tx_queue(master->dev, 0); 161 txq = netdev_get_tx_queue(master->dev, 0);
162 master->slaves = NULL; 162 master->slaves = NULL;
163 163
164 root_lock = qdisc_root_lock(txq->qdisc); 164 root_lock = qdisc_root_sleeping_lock(txq->qdisc);
165 spin_lock_bh(root_lock); 165 spin_lock_bh(root_lock);
166 qdisc_reset(txq->qdisc); 166 qdisc_reset(txq->qdisc);
167 spin_unlock_bh(root_lock); 167 spin_unlock_bh(root_lock);
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 675a5c3e68a6..52db5f60daa0 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -80,6 +80,10 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
80{ 80{
81 struct sctp_auth_bytes *key; 81 struct sctp_auth_bytes *key;
82 82
83 /* Verify that we are not going to overflow INT_MAX */
84 if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
85 return NULL;
86
83 /* Allocate the shared key */ 87 /* Allocate the shared key */
84 key = kmalloc(sizeof(struct sctp_auth_bytes) + key_len, gfp); 88 key = kmalloc(sizeof(struct sctp_auth_bytes) + key_len, gfp);
85 if (!key) 89 if (!key)
@@ -782,6 +786,9 @@ int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
782 for (i = 0; i < hmacs->shmac_num_idents; i++) { 786 for (i = 0; i < hmacs->shmac_num_idents; i++) {
783 id = hmacs->shmac_idents[i]; 787 id = hmacs->shmac_idents[i];
784 788
789 if (id > SCTP_AUTH_HMAC_ID_MAX)
790 return -EOPNOTSUPP;
791
785 if (SCTP_AUTH_HMAC_ID_SHA1 == id) 792 if (SCTP_AUTH_HMAC_ID_SHA1 == id)
786 has_sha1 = 1; 793 has_sha1 = 1;
787 794
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index bb5c9ef13046..5ffb9dec1c3f 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3086,6 +3086,7 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
3086 int optlen) 3086 int optlen)
3087{ 3087{
3088 struct sctp_hmacalgo *hmacs; 3088 struct sctp_hmacalgo *hmacs;
3089 u32 idents;
3089 int err; 3090 int err;
3090 3091
3091 if (!sctp_auth_enable) 3092 if (!sctp_auth_enable)
@@ -3103,8 +3104,9 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
3103 goto out; 3104 goto out;
3104 } 3105 }
3105 3106
3106 if (hmacs->shmac_num_idents == 0 || 3107 idents = hmacs->shmac_num_idents;
3107 hmacs->shmac_num_idents > SCTP_AUTH_NUM_HMACS) { 3108 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS ||
3109 (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) {
3108 err = -EINVAL; 3110 err = -EINVAL;
3109 goto out; 3111 goto out;
3110 } 3112 }
@@ -3144,6 +3146,11 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
3144 goto out; 3146 goto out;
3145 } 3147 }
3146 3148
3149 if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) {
3150 ret = -EINVAL;
3151 goto out;
3152 }
3153
3147 asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); 3154 asoc = sctp_id2assoc(sk, authkey->sca_assoc_id);
3148 if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { 3155 if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) {
3149 ret = -EINVAL; 3156 ret = -EINVAL;