diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-08-23 15:14:42 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-08-23 15:14:42 -0400 |
commit | 6450f65168bcf3c03b5fb44c2fe96682c0d3086b (patch) | |
tree | c6d2c3e0885ef3f73893c7e6d22ea9454d073ee6 | |
parent | 7a8fc9b248e77a4eab0613acf30a6811799786b3 (diff) | |
parent | f410a1fba7afa79d2992620e874a343fdba28332 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6:
ipv6: protocol for address routes
icmp: icmp_sk() should not use smp_processor_id() in preemptible code
pkt_sched: Fix qdisc list locking
pkt_sched: Fix qdisc_watchdog() vs. dev_deactivate() race
sctp: fix potential panics in the SCTP-AUTH API.
-rw-r--r-- | include/net/pkt_sched.h | 1 | ||||
-rw-r--r-- | include/net/sch_generic.h | 5 | ||||
-rw-r--r-- | net/ipv4/icmp.c | 22 | ||||
-rw-r--r-- | net/ipv6/addrconf.c | 1 | ||||
-rw-r--r-- | net/ipv6/icmp.c | 23 | ||||
-rw-r--r-- | net/sched/sch_api.c | 48 | ||||
-rw-r--r-- | net/sched/sch_cbq.c | 4 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 5 | ||||
-rw-r--r-- | net/sctp/endpointola.c | 4 | ||||
-rw-r--r-- | net/sctp/socket.c | 85 |
10 files changed, 149 insertions, 49 deletions
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 853fe83d9f37..b786a5b09253 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h | |||
@@ -78,6 +78,7 @@ extern struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, | |||
78 | 78 | ||
79 | extern int register_qdisc(struct Qdisc_ops *qops); | 79 | extern int register_qdisc(struct Qdisc_ops *qops); |
80 | extern int unregister_qdisc(struct Qdisc_ops *qops); | 80 | extern int unregister_qdisc(struct Qdisc_ops *qops); |
81 | extern void qdisc_list_del(struct Qdisc *q); | ||
81 | extern struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); | 82 | extern struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); |
82 | extern struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle); | 83 | extern struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle); |
83 | extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, | 84 | extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 84d25f2e6188..b1d2cfea89c5 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -193,6 +193,11 @@ static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc) | |||
193 | return qdisc->dev_queue->qdisc; | 193 | return qdisc->dev_queue->qdisc; |
194 | } | 194 | } |
195 | 195 | ||
196 | static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc) | ||
197 | { | ||
198 | return qdisc->dev_queue->qdisc_sleeping; | ||
199 | } | ||
200 | |||
196 | /* The qdisc root lock is a mechanism by which to top level | 201 | /* The qdisc root lock is a mechanism by which to top level |
197 | * of a qdisc tree can be locked from any qdisc node in the | 202 | * of a qdisc tree can be locked from any qdisc node in the |
198 | * forest. This allows changing the configuration of some | 203 | * forest. This allows changing the configuration of some |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 860558633b2c..55c355e63234 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -204,18 +204,22 @@ static struct sock *icmp_sk(struct net *net) | |||
204 | return net->ipv4.icmp_sk[smp_processor_id()]; | 204 | return net->ipv4.icmp_sk[smp_processor_id()]; |
205 | } | 205 | } |
206 | 206 | ||
207 | static inline int icmp_xmit_lock(struct sock *sk) | 207 | static inline struct sock *icmp_xmit_lock(struct net *net) |
208 | { | 208 | { |
209 | struct sock *sk; | ||
210 | |||
209 | local_bh_disable(); | 211 | local_bh_disable(); |
210 | 212 | ||
213 | sk = icmp_sk(net); | ||
214 | |||
211 | if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { | 215 | if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { |
212 | /* This can happen if the output path signals a | 216 | /* This can happen if the output path signals a |
213 | * dst_link_failure() for an outgoing ICMP packet. | 217 | * dst_link_failure() for an outgoing ICMP packet. |
214 | */ | 218 | */ |
215 | local_bh_enable(); | 219 | local_bh_enable(); |
216 | return 1; | 220 | return NULL; |
217 | } | 221 | } |
218 | return 0; | 222 | return sk; |
219 | } | 223 | } |
220 | 224 | ||
221 | static inline void icmp_xmit_unlock(struct sock *sk) | 225 | static inline void icmp_xmit_unlock(struct sock *sk) |
@@ -354,15 +358,17 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) | |||
354 | struct ipcm_cookie ipc; | 358 | struct ipcm_cookie ipc; |
355 | struct rtable *rt = skb->rtable; | 359 | struct rtable *rt = skb->rtable; |
356 | struct net *net = dev_net(rt->u.dst.dev); | 360 | struct net *net = dev_net(rt->u.dst.dev); |
357 | struct sock *sk = icmp_sk(net); | 361 | struct sock *sk; |
358 | struct inet_sock *inet = inet_sk(sk); | 362 | struct inet_sock *inet; |
359 | __be32 daddr; | 363 | __be32 daddr; |
360 | 364 | ||
361 | if (ip_options_echo(&icmp_param->replyopts, skb)) | 365 | if (ip_options_echo(&icmp_param->replyopts, skb)) |
362 | return; | 366 | return; |
363 | 367 | ||
364 | if (icmp_xmit_lock(sk)) | 368 | sk = icmp_xmit_lock(net); |
369 | if (sk == NULL) | ||
365 | return; | 370 | return; |
371 | inet = inet_sk(sk); | ||
366 | 372 | ||
367 | icmp_param->data.icmph.checksum = 0; | 373 | icmp_param->data.icmph.checksum = 0; |
368 | 374 | ||
@@ -419,7 +425,6 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) | |||
419 | if (!rt) | 425 | if (!rt) |
420 | goto out; | 426 | goto out; |
421 | net = dev_net(rt->u.dst.dev); | 427 | net = dev_net(rt->u.dst.dev); |
422 | sk = icmp_sk(net); | ||
423 | 428 | ||
424 | /* | 429 | /* |
425 | * Find the original header. It is expected to be valid, of course. | 430 | * Find the original header. It is expected to be valid, of course. |
@@ -483,7 +488,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) | |||
483 | } | 488 | } |
484 | } | 489 | } |
485 | 490 | ||
486 | if (icmp_xmit_lock(sk)) | 491 | sk = icmp_xmit_lock(net); |
492 | if (sk == NULL) | ||
487 | return; | 493 | return; |
488 | 494 | ||
489 | /* | 495 | /* |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index e2d3b7580b76..7b6a584b62dd 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -1688,6 +1688,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev, | |||
1688 | .fc_dst_len = plen, | 1688 | .fc_dst_len = plen, |
1689 | .fc_flags = RTF_UP | flags, | 1689 | .fc_flags = RTF_UP | flags, |
1690 | .fc_nlinfo.nl_net = dev_net(dev), | 1690 | .fc_nlinfo.nl_net = dev_net(dev), |
1691 | .fc_protocol = RTPROT_KERNEL, | ||
1691 | }; | 1692 | }; |
1692 | 1693 | ||
1693 | ipv6_addr_copy(&cfg.fc_dst, pfx); | 1694 | ipv6_addr_copy(&cfg.fc_dst, pfx); |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index abedf95fdf2d..b3157a0cc15d 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -91,19 +91,22 @@ static struct inet6_protocol icmpv6_protocol = { | |||
91 | .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, | 91 | .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, |
92 | }; | 92 | }; |
93 | 93 | ||
94 | static __inline__ int icmpv6_xmit_lock(struct sock *sk) | 94 | static __inline__ struct sock *icmpv6_xmit_lock(struct net *net) |
95 | { | 95 | { |
96 | struct sock *sk; | ||
97 | |||
96 | local_bh_disable(); | 98 | local_bh_disable(); |
97 | 99 | ||
100 | sk = icmpv6_sk(net); | ||
98 | if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { | 101 | if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { |
99 | /* This can happen if the output path (f.e. SIT or | 102 | /* This can happen if the output path (f.e. SIT or |
100 | * ip6ip6 tunnel) signals dst_link_failure() for an | 103 | * ip6ip6 tunnel) signals dst_link_failure() for an |
101 | * outgoing ICMP6 packet. | 104 | * outgoing ICMP6 packet. |
102 | */ | 105 | */ |
103 | local_bh_enable(); | 106 | local_bh_enable(); |
104 | return 1; | 107 | return NULL; |
105 | } | 108 | } |
106 | return 0; | 109 | return sk; |
107 | } | 110 | } |
108 | 111 | ||
109 | static __inline__ void icmpv6_xmit_unlock(struct sock *sk) | 112 | static __inline__ void icmpv6_xmit_unlock(struct sock *sk) |
@@ -392,11 +395,10 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info, | |||
392 | fl.fl_icmp_code = code; | 395 | fl.fl_icmp_code = code; |
393 | security_skb_classify_flow(skb, &fl); | 396 | security_skb_classify_flow(skb, &fl); |
394 | 397 | ||
395 | sk = icmpv6_sk(net); | 398 | sk = icmpv6_xmit_lock(net); |
396 | np = inet6_sk(sk); | 399 | if (sk == NULL) |
397 | |||
398 | if (icmpv6_xmit_lock(sk)) | ||
399 | return; | 400 | return; |
401 | np = inet6_sk(sk); | ||
400 | 402 | ||
401 | if (!icmpv6_xrlim_allow(sk, type, &fl)) | 403 | if (!icmpv6_xrlim_allow(sk, type, &fl)) |
402 | goto out; | 404 | goto out; |
@@ -539,11 +541,10 @@ static void icmpv6_echo_reply(struct sk_buff *skb) | |||
539 | fl.fl_icmp_type = ICMPV6_ECHO_REPLY; | 541 | fl.fl_icmp_type = ICMPV6_ECHO_REPLY; |
540 | security_skb_classify_flow(skb, &fl); | 542 | security_skb_classify_flow(skb, &fl); |
541 | 543 | ||
542 | sk = icmpv6_sk(net); | 544 | sk = icmpv6_xmit_lock(net); |
543 | np = inet6_sk(sk); | 545 | if (sk == NULL) |
544 | |||
545 | if (icmpv6_xmit_lock(sk)) | ||
546 | return; | 546 | return; |
547 | np = inet6_sk(sk); | ||
547 | 548 | ||
548 | if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) | 549 | if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) |
549 | fl.oif = np->mcast_oif; | 550 | fl.oif = np->mcast_oif; |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index ef0efeca6352..e7fb9e0d21b4 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -199,19 +199,53 @@ struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) | |||
199 | return NULL; | 199 | return NULL; |
200 | } | 200 | } |
201 | 201 | ||
202 | /* | ||
203 | * This lock is needed until some qdiscs stop calling qdisc_tree_decrease_qlen() | ||
204 | * without rtnl_lock(); currently hfsc_dequeue(), netem_dequeue(), tbf_dequeue() | ||
205 | */ | ||
206 | static DEFINE_SPINLOCK(qdisc_list_lock); | ||
207 | |||
208 | static void qdisc_list_add(struct Qdisc *q) | ||
209 | { | ||
210 | if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { | ||
211 | spin_lock_bh(&qdisc_list_lock); | ||
212 | list_add_tail(&q->list, &qdisc_root_sleeping(q)->list); | ||
213 | spin_unlock_bh(&qdisc_list_lock); | ||
214 | } | ||
215 | } | ||
216 | |||
217 | void qdisc_list_del(struct Qdisc *q) | ||
218 | { | ||
219 | if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { | ||
220 | spin_lock_bh(&qdisc_list_lock); | ||
221 | list_del(&q->list); | ||
222 | spin_unlock_bh(&qdisc_list_lock); | ||
223 | } | ||
224 | } | ||
225 | EXPORT_SYMBOL(qdisc_list_del); | ||
226 | |||
202 | struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) | 227 | struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) |
203 | { | 228 | { |
204 | unsigned int i; | 229 | unsigned int i; |
230 | struct Qdisc *q; | ||
231 | |||
232 | spin_lock_bh(&qdisc_list_lock); | ||
205 | 233 | ||
206 | for (i = 0; i < dev->num_tx_queues; i++) { | 234 | for (i = 0; i < dev->num_tx_queues; i++) { |
207 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 235 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
208 | struct Qdisc *q, *txq_root = txq->qdisc_sleeping; | 236 | struct Qdisc *txq_root = txq->qdisc_sleeping; |
209 | 237 | ||
210 | q = qdisc_match_from_root(txq_root, handle); | 238 | q = qdisc_match_from_root(txq_root, handle); |
211 | if (q) | 239 | if (q) |
212 | return q; | 240 | goto unlock; |
213 | } | 241 | } |
214 | return qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); | 242 | |
243 | q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); | ||
244 | |||
245 | unlock: | ||
246 | spin_unlock_bh(&qdisc_list_lock); | ||
247 | |||
248 | return q; | ||
215 | } | 249 | } |
216 | 250 | ||
217 | static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) | 251 | static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) |
@@ -444,6 +478,10 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires) | |||
444 | { | 478 | { |
445 | ktime_t time; | 479 | ktime_t time; |
446 | 480 | ||
481 | if (test_bit(__QDISC_STATE_DEACTIVATED, | ||
482 | &qdisc_root_sleeping(wd->qdisc)->state)) | ||
483 | return; | ||
484 | |||
447 | wd->qdisc->flags |= TCQ_F_THROTTLED; | 485 | wd->qdisc->flags |= TCQ_F_THROTTLED; |
448 | time = ktime_set(0, 0); | 486 | time = ktime_set(0, 0); |
449 | time = ktime_add_ns(time, PSCHED_US2NS(expires)); | 487 | time = ktime_add_ns(time, PSCHED_US2NS(expires)); |
@@ -806,8 +844,8 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, | |||
806 | goto err_out3; | 844 | goto err_out3; |
807 | } | 845 | } |
808 | } | 846 | } |
809 | if ((parent != TC_H_ROOT) && !(sch->flags & TCQ_F_INGRESS)) | 847 | |
810 | list_add_tail(&sch->list, &dev_queue->qdisc_sleeping->list); | 848 | qdisc_list_add(sch); |
811 | 849 | ||
812 | return sch; | 850 | return sch; |
813 | } | 851 | } |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 47ef492c4ff4..8fa90d68ec6d 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -521,6 +521,10 @@ static void cbq_ovl_delay(struct cbq_class *cl) | |||
521 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); | 521 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); |
522 | psched_tdiff_t delay = cl->undertime - q->now; | 522 | psched_tdiff_t delay = cl->undertime - q->now; |
523 | 523 | ||
524 | if (test_bit(__QDISC_STATE_DEACTIVATED, | ||
525 | &qdisc_root_sleeping(cl->qdisc)->state)) | ||
526 | return; | ||
527 | |||
524 | if (!cl->delayed) { | 528 | if (!cl->delayed) { |
525 | psched_time_t sched = q->now; | 529 | psched_time_t sched = q->now; |
526 | ktime_t expires; | 530 | ktime_t expires; |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index c3ed4d44fc14..5f0ade7806a7 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -526,10 +526,9 @@ void qdisc_destroy(struct Qdisc *qdisc) | |||
526 | !atomic_dec_and_test(&qdisc->refcnt)) | 526 | !atomic_dec_and_test(&qdisc->refcnt)) |
527 | return; | 527 | return; |
528 | 528 | ||
529 | if (qdisc->parent) | ||
530 | list_del(&qdisc->list); | ||
531 | |||
532 | #ifdef CONFIG_NET_SCHED | 529 | #ifdef CONFIG_NET_SCHED |
530 | qdisc_list_del(qdisc); | ||
531 | |||
533 | qdisc_put_stab(qdisc->stab); | 532 | qdisc_put_stab(qdisc->stab); |
534 | #endif | 533 | #endif |
535 | gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); | 534 | gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); |
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index e39a0cdef184..4c8d9f45ce09 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c | |||
@@ -103,6 +103,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, | |||
103 | 103 | ||
104 | /* Initialize the CHUNKS parameter */ | 104 | /* Initialize the CHUNKS parameter */ |
105 | auth_chunks->param_hdr.type = SCTP_PARAM_CHUNKS; | 105 | auth_chunks->param_hdr.type = SCTP_PARAM_CHUNKS; |
106 | auth_chunks->param_hdr.length = htons(sizeof(sctp_paramhdr_t)); | ||
106 | 107 | ||
107 | /* If the Add-IP functionality is enabled, we must | 108 | /* If the Add-IP functionality is enabled, we must |
108 | * authenticate, ASCONF and ASCONF-ACK chunks | 109 | * authenticate, ASCONF and ASCONF-ACK chunks |
@@ -110,8 +111,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, | |||
110 | if (sctp_addip_enable) { | 111 | if (sctp_addip_enable) { |
111 | auth_chunks->chunks[0] = SCTP_CID_ASCONF; | 112 | auth_chunks->chunks[0] = SCTP_CID_ASCONF; |
112 | auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK; | 113 | auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK; |
113 | auth_chunks->param_hdr.length = | 114 | auth_chunks->param_hdr.length += htons(2); |
114 | htons(sizeof(sctp_paramhdr_t) + 2); | ||
115 | } | 115 | } |
116 | } | 116 | } |
117 | 117 | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index dbb79adf8f3c..bb5c9ef13046 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -3055,6 +3055,9 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk, | |||
3055 | { | 3055 | { |
3056 | struct sctp_authchunk val; | 3056 | struct sctp_authchunk val; |
3057 | 3057 | ||
3058 | if (!sctp_auth_enable) | ||
3059 | return -EACCES; | ||
3060 | |||
3058 | if (optlen != sizeof(struct sctp_authchunk)) | 3061 | if (optlen != sizeof(struct sctp_authchunk)) |
3059 | return -EINVAL; | 3062 | return -EINVAL; |
3060 | if (copy_from_user(&val, optval, optlen)) | 3063 | if (copy_from_user(&val, optval, optlen)) |
@@ -3085,6 +3088,9 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk, | |||
3085 | struct sctp_hmacalgo *hmacs; | 3088 | struct sctp_hmacalgo *hmacs; |
3086 | int err; | 3089 | int err; |
3087 | 3090 | ||
3091 | if (!sctp_auth_enable) | ||
3092 | return -EACCES; | ||
3093 | |||
3088 | if (optlen < sizeof(struct sctp_hmacalgo)) | 3094 | if (optlen < sizeof(struct sctp_hmacalgo)) |
3089 | return -EINVAL; | 3095 | return -EINVAL; |
3090 | 3096 | ||
@@ -3123,6 +3129,9 @@ static int sctp_setsockopt_auth_key(struct sock *sk, | |||
3123 | struct sctp_association *asoc; | 3129 | struct sctp_association *asoc; |
3124 | int ret; | 3130 | int ret; |
3125 | 3131 | ||
3132 | if (!sctp_auth_enable) | ||
3133 | return -EACCES; | ||
3134 | |||
3126 | if (optlen <= sizeof(struct sctp_authkey)) | 3135 | if (optlen <= sizeof(struct sctp_authkey)) |
3127 | return -EINVAL; | 3136 | return -EINVAL; |
3128 | 3137 | ||
@@ -3160,6 +3169,9 @@ static int sctp_setsockopt_active_key(struct sock *sk, | |||
3160 | struct sctp_authkeyid val; | 3169 | struct sctp_authkeyid val; |
3161 | struct sctp_association *asoc; | 3170 | struct sctp_association *asoc; |
3162 | 3171 | ||
3172 | if (!sctp_auth_enable) | ||
3173 | return -EACCES; | ||
3174 | |||
3163 | if (optlen != sizeof(struct sctp_authkeyid)) | 3175 | if (optlen != sizeof(struct sctp_authkeyid)) |
3164 | return -EINVAL; | 3176 | return -EINVAL; |
3165 | if (copy_from_user(&val, optval, optlen)) | 3177 | if (copy_from_user(&val, optval, optlen)) |
@@ -3185,6 +3197,9 @@ static int sctp_setsockopt_del_key(struct sock *sk, | |||
3185 | struct sctp_authkeyid val; | 3197 | struct sctp_authkeyid val; |
3186 | struct sctp_association *asoc; | 3198 | struct sctp_association *asoc; |
3187 | 3199 | ||
3200 | if (!sctp_auth_enable) | ||
3201 | return -EACCES; | ||
3202 | |||
3188 | if (optlen != sizeof(struct sctp_authkeyid)) | 3203 | if (optlen != sizeof(struct sctp_authkeyid)) |
3189 | return -EINVAL; | 3204 | return -EINVAL; |
3190 | if (copy_from_user(&val, optval, optlen)) | 3205 | if (copy_from_user(&val, optval, optlen)) |
@@ -5197,19 +5212,29 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len, | |||
5197 | static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, | 5212 | static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, |
5198 | char __user *optval, int __user *optlen) | 5213 | char __user *optval, int __user *optlen) |
5199 | { | 5214 | { |
5215 | struct sctp_hmacalgo __user *p = (void __user *)optval; | ||
5200 | struct sctp_hmac_algo_param *hmacs; | 5216 | struct sctp_hmac_algo_param *hmacs; |
5201 | __u16 param_len; | 5217 | __u16 data_len = 0; |
5218 | u32 num_idents; | ||
5219 | |||
5220 | if (!sctp_auth_enable) | ||
5221 | return -EACCES; | ||
5202 | 5222 | ||
5203 | hmacs = sctp_sk(sk)->ep->auth_hmacs_list; | 5223 | hmacs = sctp_sk(sk)->ep->auth_hmacs_list; |
5204 | param_len = ntohs(hmacs->param_hdr.length); | 5224 | data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t); |
5205 | 5225 | ||
5206 | if (len < param_len) | 5226 | if (len < sizeof(struct sctp_hmacalgo) + data_len) |
5207 | return -EINVAL; | 5227 | return -EINVAL; |
5228 | |||
5229 | len = sizeof(struct sctp_hmacalgo) + data_len; | ||
5230 | num_idents = data_len / sizeof(u16); | ||
5231 | |||
5208 | if (put_user(len, optlen)) | 5232 | if (put_user(len, optlen)) |
5209 | return -EFAULT; | 5233 | return -EFAULT; |
5210 | if (copy_to_user(optval, hmacs->hmac_ids, len)) | 5234 | if (put_user(num_idents, &p->shmac_num_idents)) |
5235 | return -EFAULT; | ||
5236 | if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len)) | ||
5211 | return -EFAULT; | 5237 | return -EFAULT; |
5212 | |||
5213 | return 0; | 5238 | return 0; |
5214 | } | 5239 | } |
5215 | 5240 | ||
@@ -5219,6 +5244,9 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len, | |||
5219 | struct sctp_authkeyid val; | 5244 | struct sctp_authkeyid val; |
5220 | struct sctp_association *asoc; | 5245 | struct sctp_association *asoc; |
5221 | 5246 | ||
5247 | if (!sctp_auth_enable) | ||
5248 | return -EACCES; | ||
5249 | |||
5222 | if (len < sizeof(struct sctp_authkeyid)) | 5250 | if (len < sizeof(struct sctp_authkeyid)) |
5223 | return -EINVAL; | 5251 | return -EINVAL; |
5224 | if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) | 5252 | if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) |
@@ -5233,6 +5261,12 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len, | |||
5233 | else | 5261 | else |
5234 | val.scact_keynumber = sctp_sk(sk)->ep->active_key_id; | 5262 | val.scact_keynumber = sctp_sk(sk)->ep->active_key_id; |
5235 | 5263 | ||
5264 | len = sizeof(struct sctp_authkeyid); | ||
5265 | if (put_user(len, optlen)) | ||
5266 | return -EFAULT; | ||
5267 | if (copy_to_user(optval, &val, len)) | ||
5268 | return -EFAULT; | ||
5269 | |||
5236 | return 0; | 5270 | return 0; |
5237 | } | 5271 | } |
5238 | 5272 | ||
@@ -5243,13 +5277,16 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, | |||
5243 | struct sctp_authchunks val; | 5277 | struct sctp_authchunks val; |
5244 | struct sctp_association *asoc; | 5278 | struct sctp_association *asoc; |
5245 | struct sctp_chunks_param *ch; | 5279 | struct sctp_chunks_param *ch; |
5246 | u32 num_chunks; | 5280 | u32 num_chunks = 0; |
5247 | char __user *to; | 5281 | char __user *to; |
5248 | 5282 | ||
5249 | if (len <= sizeof(struct sctp_authchunks)) | 5283 | if (!sctp_auth_enable) |
5284 | return -EACCES; | ||
5285 | |||
5286 | if (len < sizeof(struct sctp_authchunks)) | ||
5250 | return -EINVAL; | 5287 | return -EINVAL; |
5251 | 5288 | ||
5252 | if (copy_from_user(&val, p, sizeof(struct sctp_authchunks))) | 5289 | if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) |
5253 | return -EFAULT; | 5290 | return -EFAULT; |
5254 | 5291 | ||
5255 | to = p->gauth_chunks; | 5292 | to = p->gauth_chunks; |
@@ -5258,20 +5295,21 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, | |||
5258 | return -EINVAL; | 5295 | return -EINVAL; |
5259 | 5296 | ||
5260 | ch = asoc->peer.peer_chunks; | 5297 | ch = asoc->peer.peer_chunks; |
5298 | if (!ch) | ||
5299 | goto num; | ||
5261 | 5300 | ||
5262 | /* See if the user provided enough room for all the data */ | 5301 | /* See if the user provided enough room for all the data */ |
5263 | num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); | 5302 | num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); |
5264 | if (len < num_chunks) | 5303 | if (len < num_chunks) |
5265 | return -EINVAL; | 5304 | return -EINVAL; |
5266 | 5305 | ||
5267 | len = num_chunks; | 5306 | if (copy_to_user(to, ch->chunks, num_chunks)) |
5268 | if (put_user(len, optlen)) | ||
5269 | return -EFAULT; | 5307 | return -EFAULT; |
5308 | num: | ||
5309 | len = sizeof(struct sctp_authchunks) + num_chunks; | ||
5310 | if (put_user(len, optlen)) return -EFAULT; | ||
5270 | if (put_user(num_chunks, &p->gauth_number_of_chunks)) | 5311 | if (put_user(num_chunks, &p->gauth_number_of_chunks)) |
5271 | return -EFAULT; | 5312 | return -EFAULT; |
5272 | if (copy_to_user(to, ch->chunks, len)) | ||
5273 | return -EFAULT; | ||
5274 | |||
5275 | return 0; | 5313 | return 0; |
5276 | } | 5314 | } |
5277 | 5315 | ||
@@ -5282,13 +5320,16 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, | |||
5282 | struct sctp_authchunks val; | 5320 | struct sctp_authchunks val; |
5283 | struct sctp_association *asoc; | 5321 | struct sctp_association *asoc; |
5284 | struct sctp_chunks_param *ch; | 5322 | struct sctp_chunks_param *ch; |
5285 | u32 num_chunks; | 5323 | u32 num_chunks = 0; |
5286 | char __user *to; | 5324 | char __user *to; |
5287 | 5325 | ||
5288 | if (len <= sizeof(struct sctp_authchunks)) | 5326 | if (!sctp_auth_enable) |
5327 | return -EACCES; | ||
5328 | |||
5329 | if (len < sizeof(struct sctp_authchunks)) | ||
5289 | return -EINVAL; | 5330 | return -EINVAL; |
5290 | 5331 | ||
5291 | if (copy_from_user(&val, p, sizeof(struct sctp_authchunks))) | 5332 | if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) |
5292 | return -EFAULT; | 5333 | return -EFAULT; |
5293 | 5334 | ||
5294 | to = p->gauth_chunks; | 5335 | to = p->gauth_chunks; |
@@ -5301,17 +5342,21 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, | |||
5301 | else | 5342 | else |
5302 | ch = sctp_sk(sk)->ep->auth_chunk_list; | 5343 | ch = sctp_sk(sk)->ep->auth_chunk_list; |
5303 | 5344 | ||
5345 | if (!ch) | ||
5346 | goto num; | ||
5347 | |||
5304 | num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); | 5348 | num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); |
5305 | if (len < num_chunks) | 5349 | if (len < sizeof(struct sctp_authchunks) + num_chunks) |
5306 | return -EINVAL; | 5350 | return -EINVAL; |
5307 | 5351 | ||
5308 | len = num_chunks; | 5352 | if (copy_to_user(to, ch->chunks, num_chunks)) |
5353 | return -EFAULT; | ||
5354 | num: | ||
5355 | len = sizeof(struct sctp_authchunks) + num_chunks; | ||
5309 | if (put_user(len, optlen)) | 5356 | if (put_user(len, optlen)) |
5310 | return -EFAULT; | 5357 | return -EFAULT; |
5311 | if (put_user(num_chunks, &p->gauth_number_of_chunks)) | 5358 | if (put_user(num_chunks, &p->gauth_number_of_chunks)) |
5312 | return -EFAULT; | 5359 | return -EFAULT; |
5313 | if (copy_to_user(to, ch->chunks, len)) | ||
5314 | return -EFAULT; | ||
5315 | 5360 | ||
5316 | return 0; | 5361 | return 0; |
5317 | } | 5362 | } |