aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /net/sctp
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'net/sctp')
-rw-r--r--net/sctp/associola.c33
-rw-r--r--net/sctp/auth.c1
-rw-r--r--net/sctp/bind_addr.c2
-rw-r--r--net/sctp/chunk.c16
-rw-r--r--net/sctp/endpointola.c1
-rw-r--r--net/sctp/input.c65
-rw-r--r--net/sctp/inqueue.c1
-rw-r--r--net/sctp/ipv6.c23
-rw-r--r--net/sctp/output.c49
-rw-r--r--net/sctp/outqueue.c27
-rw-r--r--net/sctp/primitive.c1
-rw-r--r--net/sctp/proc.c4
-rw-r--r--net/sctp/protocol.c26
-rw-r--r--net/sctp/sm_make_chunk.c108
-rw-r--r--net/sctp/sm_sideeffect.c71
-rw-r--r--net/sctp/sm_statefuns.c37
-rw-r--r--net/sctp/socket.c390
-rw-r--r--net/sctp/ssnmap.c1
-rw-r--r--net/sctp/sysctl.c62
-rw-r--r--net/sctp/transport.c52
-rw-r--r--net/sctp/tsnmap.c1
-rw-r--r--net/sctp/ulpevent.c1
-rw-r--r--net/sctp/ulpqueue.c1
23 files changed, 435 insertions, 538 deletions
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 7eed77a39d0d..99c93ee98ad9 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -63,6 +63,12 @@
63static void sctp_assoc_bh_rcv(struct work_struct *work); 63static void sctp_assoc_bh_rcv(struct work_struct *work);
64static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc); 64static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
65 65
66/* Keep track of the new idr low so that we don't re-use association id
67 * numbers too fast. It is protected by they idr spin lock is in the
68 * range of 1 - INT_MAX.
69 */
70static u32 idr_low = 1;
71
66 72
67/* 1st Level Abstractions. */ 73/* 1st Level Abstractions. */
68 74
@@ -167,7 +173,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
167 asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0; 173 asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
168 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; 174 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
169 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = 175 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
170 sp->autoclose * HZ; 176 (unsigned long)sp->autoclose * HZ;
171 177
172 /* Initilizes the timers */ 178 /* Initilizes the timers */
173 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) 179 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
@@ -512,7 +518,13 @@ void sctp_assoc_set_primary(struct sctp_association *asoc,
512 * to this destination address earlier. The sender MUST set 518 * to this destination address earlier. The sender MUST set
513 * CYCLING_CHANGEOVER to indicate that this switch is a 519 * CYCLING_CHANGEOVER to indicate that this switch is a
514 * double switch to the same destination address. 520 * double switch to the same destination address.
521 *
522 * Really, only bother is we have data queued or outstanding on
523 * the association.
515 */ 524 */
525 if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
526 return;
527
516 if (transport->cacc.changeover_active) 528 if (transport->cacc.changeover_active)
517 transport->cacc.cycling_changeover = changeover; 529 transport->cacc.cycling_changeover = changeover;
518 530
@@ -732,6 +744,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
732 744
733 peer->partial_bytes_acked = 0; 745 peer->partial_bytes_acked = 0;
734 peer->flight_size = 0; 746 peer->flight_size = 0;
747 peer->burst_limited = 0;
735 748
736 /* Set the transport's RTO.initial value */ 749 /* Set the transport's RTO.initial value */
737 peer->rto = asoc->rto_initial; 750 peer->rto = asoc->rto_initial;
@@ -1181,8 +1194,10 @@ void sctp_assoc_update(struct sctp_association *asoc,
1181 /* Remove any peer addresses not present in the new association. */ 1194 /* Remove any peer addresses not present in the new association. */
1182 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { 1195 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1183 trans = list_entry(pos, struct sctp_transport, transports); 1196 trans = list_entry(pos, struct sctp_transport, transports);
1184 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) 1197 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
1185 sctp_assoc_del_peer(asoc, &trans->ipaddr); 1198 sctp_assoc_rm_peer(asoc, trans);
1199 continue;
1200 }
1186 1201
1187 if (asoc->state >= SCTP_STATE_ESTABLISHED) 1202 if (asoc->state >= SCTP_STATE_ESTABLISHED)
1188 sctp_transport_reset(trans); 1203 sctp_transport_reset(trans);
@@ -1377,8 +1392,9 @@ static inline int sctp_peer_needs_update(struct sctp_association *asoc)
1377 case SCTP_STATE_SHUTDOWN_RECEIVED: 1392 case SCTP_STATE_SHUTDOWN_RECEIVED:
1378 case SCTP_STATE_SHUTDOWN_SENT: 1393 case SCTP_STATE_SHUTDOWN_SENT:
1379 if ((asoc->rwnd > asoc->a_rwnd) && 1394 if ((asoc->rwnd > asoc->a_rwnd) &&
1380 ((asoc->rwnd - asoc->a_rwnd) >= 1395 ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
1381 min_t(__u32, (asoc->base.sk->sk_rcvbuf >> 1), asoc->pathmtu))) 1396 (asoc->base.sk->sk_rcvbuf >> sctp_rwnd_upd_shift),
1397 asoc->pathmtu)))
1382 return 1; 1398 return 1;
1383 break; 1399 break;
1384 default: 1400 default:
@@ -1545,7 +1561,12 @@ retry:
1545 1561
1546 spin_lock_bh(&sctp_assocs_id_lock); 1562 spin_lock_bh(&sctp_assocs_id_lock);
1547 error = idr_get_new_above(&sctp_assocs_id, (void *)asoc, 1563 error = idr_get_new_above(&sctp_assocs_id, (void *)asoc,
1548 1, &assoc_id); 1564 idr_low, &assoc_id);
1565 if (!error) {
1566 idr_low = assoc_id + 1;
1567 if (idr_low == INT_MAX)
1568 idr_low = 1;
1569 }
1549 spin_unlock_bh(&sctp_assocs_id_lock); 1570 spin_unlock_bh(&sctp_assocs_id_lock);
1550 if (error == -EAGAIN) 1571 if (error == -EAGAIN)
1551 goto retry; 1572 goto retry;
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 56935bbc1496..86366390038a 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -34,6 +34,7 @@
34 * be incorporated into the next SCTP release. 34 * be incorporated into the next SCTP release.
35 */ 35 */
36 36
37#include <linux/slab.h>
37#include <linux/types.h> 38#include <linux/types.h>
38#include <linux/crypto.h> 39#include <linux/crypto.h>
39#include <linux/scatterlist.h> 40#include <linux/scatterlist.h>
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 13a6fba41077..faf71d179e46 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -43,6 +43,7 @@
43 */ 43 */
44 44
45#include <linux/types.h> 45#include <linux/types.h>
46#include <linux/slab.h>
46#include <linux/in.h> 47#include <linux/in.h>
47#include <net/sock.h> 48#include <net/sock.h>
48#include <net/ipv6.h> 49#include <net/ipv6.h>
@@ -186,7 +187,6 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
186 addr->valid = 1; 187 addr->valid = 1;
187 188
188 INIT_LIST_HEAD(&addr->list); 189 INIT_LIST_HEAD(&addr->list);
189 INIT_RCU_HEAD(&addr->rcu);
190 190
191 /* We always hold a socket lock when calling this function, 191 /* We always hold a socket lock when calling this function,
192 * and that acts as a writer synchronizing lock. 192 * and that acts as a writer synchronizing lock.
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index acf7c4d128f7..3eab6db59a37 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -42,6 +42,7 @@
42#include <linux/net.h> 42#include <linux/net.h>
43#include <linux/inet.h> 43#include <linux/inet.h>
44#include <linux/skbuff.h> 44#include <linux/skbuff.h>
45#include <linux/slab.h>
45#include <net/sock.h> 46#include <net/sock.h>
46#include <net/sctp/sctp.h> 47#include <net/sctp/sctp.h>
47#include <net/sctp/sm.h> 48#include <net/sctp/sm.h>
@@ -263,9 +264,18 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
263 if (0 == i) 264 if (0 == i)
264 frag |= SCTP_DATA_FIRST_FRAG; 265 frag |= SCTP_DATA_FIRST_FRAG;
265 266
266 if ((i == (whole - 1)) && !over) 267 if ((i == (whole - 1)) && !over) {
267 frag |= SCTP_DATA_LAST_FRAG; 268 frag |= SCTP_DATA_LAST_FRAG;
268 269
270 /* The application requests to set the I-bit of the
271 * last DATA chunk of a user message when providing
272 * the user message to the SCTP implementation.
273 */
274 if ((sinfo->sinfo_flags & SCTP_EOF) ||
275 (sinfo->sinfo_flags & SCTP_SACK_IMMEDIATELY))
276 frag |= SCTP_DATA_SACK_IMM;
277 }
278
269 chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag, 0); 279 chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag, 0);
270 280
271 if (!chunk) 281 if (!chunk)
@@ -297,6 +307,10 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
297 else 307 else
298 frag = SCTP_DATA_LAST_FRAG; 308 frag = SCTP_DATA_LAST_FRAG;
299 309
310 if ((sinfo->sinfo_flags & SCTP_EOF) ||
311 (sinfo->sinfo_flags & SCTP_SACK_IMMEDIATELY))
312 frag |= SCTP_DATA_SACK_IMM;
313
300 chunk = sctp_make_datafrag_empty(asoc, sinfo, over, frag, 0); 314 chunk = sctp_make_datafrag_empty(asoc, sinfo, over, frag, 0);
301 315
302 if (!chunk) 316 if (!chunk)
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 905fda582b92..7ec09ba03a1c 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -144,6 +144,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
144 /* Use SCTP specific send buffer space queues. */ 144 /* Use SCTP specific send buffer space queues. */
145 ep->sndbuf_policy = sctp_sndbuf_policy; 145 ep->sndbuf_policy = sctp_sndbuf_policy;
146 146
147 sk->sk_data_ready = sctp_data_ready;
147 sk->sk_write_space = sctp_write_space; 148 sk->sk_write_space = sctp_write_space;
148 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 149 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
149 150
diff --git a/net/sctp/input.c b/net/sctp/input.c
index c0c973e67add..ea2192444ce6 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -53,6 +53,7 @@
53#include <linux/socket.h> 53#include <linux/socket.h>
54#include <linux/ip.h> 54#include <linux/ip.h>
55#include <linux/time.h> /* For struct timeval */ 55#include <linux/time.h> /* For struct timeval */
56#include <linux/slab.h>
56#include <net/ip.h> 57#include <net/ip.h>
57#include <net/icmp.h> 58#include <net/icmp.h>
58#include <net/snmp.h> 59#include <net/snmp.h>
@@ -75,7 +76,7 @@ static struct sctp_association *__sctp_lookup_association(
75 const union sctp_addr *peer, 76 const union sctp_addr *peer,
76 struct sctp_transport **pt); 77 struct sctp_transport **pt);
77 78
78static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb); 79static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
79 80
80 81
81/* Calculate the SCTP checksum of an SCTP packet. */ 82/* Calculate the SCTP checksum of an SCTP packet. */
@@ -265,8 +266,13 @@ int sctp_rcv(struct sk_buff *skb)
265 } 266 }
266 267
267 if (sock_owned_by_user(sk)) { 268 if (sock_owned_by_user(sk)) {
269 if (sctp_add_backlog(sk, skb)) {
270 sctp_bh_unlock_sock(sk);
271 sctp_chunk_free(chunk);
272 skb = NULL; /* sctp_chunk_free already freed the skb */
273 goto discard_release;
274 }
268 SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG); 275 SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG);
269 sctp_add_backlog(sk, skb);
270 } else { 276 } else {
271 SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ); 277 SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ);
272 sctp_inq_push(&chunk->rcvr->inqueue, chunk); 278 sctp_inq_push(&chunk->rcvr->inqueue, chunk);
@@ -336,8 +342,10 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
336 sctp_bh_lock_sock(sk); 342 sctp_bh_lock_sock(sk);
337 343
338 if (sock_owned_by_user(sk)) { 344 if (sock_owned_by_user(sk)) {
339 sk_add_backlog(sk, skb); 345 if (sk_add_backlog(sk, skb))
340 backloged = 1; 346 sctp_chunk_free(chunk);
347 else
348 backloged = 1;
341 } else 349 } else
342 sctp_inq_push(inqueue, chunk); 350 sctp_inq_push(inqueue, chunk);
343 351
@@ -362,22 +370,27 @@ done:
362 return 0; 370 return 0;
363} 371}
364 372
365static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb) 373static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
366{ 374{
367 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; 375 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
368 struct sctp_ep_common *rcvr = chunk->rcvr; 376 struct sctp_ep_common *rcvr = chunk->rcvr;
377 int ret;
369 378
370 /* Hold the assoc/ep while hanging on the backlog queue. 379 ret = sk_add_backlog(sk, skb);
371 * This way, we know structures we need will not disappear from us 380 if (!ret) {
372 */ 381 /* Hold the assoc/ep while hanging on the backlog queue.
373 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) 382 * This way, we know structures we need will not disappear
374 sctp_association_hold(sctp_assoc(rcvr)); 383 * from us
375 else if (SCTP_EP_TYPE_SOCKET == rcvr->type) 384 */
376 sctp_endpoint_hold(sctp_ep(rcvr)); 385 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
377 else 386 sctp_association_hold(sctp_assoc(rcvr));
378 BUG(); 387 else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
388 sctp_endpoint_hold(sctp_ep(rcvr));
389 else
390 BUG();
391 }
392 return ret;
379 393
380 sk_add_backlog(sk, skb);
381} 394}
382 395
383/* Handle icmp frag needed error. */ 396/* Handle icmp frag needed error. */
@@ -427,11 +440,25 @@ void sctp_icmp_proto_unreachable(struct sock *sk,
427{ 440{
428 SCTP_DEBUG_PRINTK("%s\n", __func__); 441 SCTP_DEBUG_PRINTK("%s\n", __func__);
429 442
430 sctp_do_sm(SCTP_EVENT_T_OTHER, 443 if (sock_owned_by_user(sk)) {
431 SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), 444 if (timer_pending(&t->proto_unreach_timer))
432 asoc->state, asoc->ep, asoc, t, 445 return;
433 GFP_ATOMIC); 446 else {
447 if (!mod_timer(&t->proto_unreach_timer,
448 jiffies + (HZ/20)))
449 sctp_association_hold(asoc);
450 }
451
452 } else {
453 if (timer_pending(&t->proto_unreach_timer) &&
454 del_timer(&t->proto_unreach_timer))
455 sctp_association_put(asoc);
434 456
457 sctp_do_sm(SCTP_EVENT_T_OTHER,
458 SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
459 asoc->state, asoc->ep, asoc, t,
460 GFP_ATOMIC);
461 }
435} 462}
436 463
437/* Common lookup code for icmp/icmpv6 error handler. */ 464/* Common lookup code for icmp/icmpv6 error handler. */
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index bbf5dd2a97c4..ccb6dc48d15b 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -46,6 +46,7 @@
46#include <net/sctp/sctp.h> 46#include <net/sctp/sctp.h>
47#include <net/sctp/sm.h> 47#include <net/sctp/sm.h>
48#include <linux/interrupt.h> 48#include <linux/interrupt.h>
49#include <linux/slab.h>
49 50
50/* Initialize an SCTP inqueue. */ 51/* Initialize an SCTP inqueue. */
51void sctp_inq_init(struct sctp_inq *queue) 52void sctp_inq_init(struct sctp_inq *queue)
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index bb280e60e00a..9fb5d37c37ad 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -58,6 +58,7 @@
58#include <linux/netdevice.h> 58#include <linux/netdevice.h>
59#include <linux/init.h> 59#include <linux/init.h>
60#include <linux/ipsec.h> 60#include <linux/ipsec.h>
61#include <linux/slab.h>
61 62
62#include <linux/ipv6.h> 63#include <linux/ipv6.h>
63#include <linux/icmpv6.h> 64#include <linux/icmpv6.h>
@@ -381,7 +382,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
381 addr->a.v6.sin6_scope_id = dev->ifindex; 382 addr->a.v6.sin6_scope_id = dev->ifindex;
382 addr->valid = 1; 383 addr->valid = 1;
383 INIT_LIST_HEAD(&addr->list); 384 INIT_LIST_HEAD(&addr->list);
384 INIT_RCU_HEAD(&addr->rcu);
385 list_add_tail(&addr->list, addrlist); 385 list_add_tail(&addr->list, addrlist);
386 } 386 }
387 } 387 }
@@ -837,15 +837,16 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
837 if (type & IPV6_ADDR_LINKLOCAL) { 837 if (type & IPV6_ADDR_LINKLOCAL) {
838 if (!addr->v6.sin6_scope_id) 838 if (!addr->v6.sin6_scope_id)
839 return 0; 839 return 0;
840 dev = dev_get_by_index(&init_net, addr->v6.sin6_scope_id); 840 rcu_read_lock();
841 if (!dev) 841 dev = dev_get_by_index_rcu(&init_net,
842 return 0; 842 addr->v6.sin6_scope_id);
843 if (!ipv6_chk_addr(&init_net, &addr->v6.sin6_addr, 843 if (!dev ||
844 !ipv6_chk_addr(&init_net, &addr->v6.sin6_addr,
844 dev, 0)) { 845 dev, 0)) {
845 dev_put(dev); 846 rcu_read_unlock();
846 return 0; 847 return 0;
847 } 848 }
848 dev_put(dev); 849 rcu_read_unlock();
849 } else if (type == IPV6_ADDR_MAPPED) { 850 } else if (type == IPV6_ADDR_MAPPED) {
850 if (!opt->v4mapped) 851 if (!opt->v4mapped)
851 return 0; 852 return 0;
@@ -873,10 +874,12 @@ static int sctp_inet6_send_verify(struct sctp_sock *opt, union sctp_addr *addr)
873 if (type & IPV6_ADDR_LINKLOCAL) { 874 if (type & IPV6_ADDR_LINKLOCAL) {
874 if (!addr->v6.sin6_scope_id) 875 if (!addr->v6.sin6_scope_id)
875 return 0; 876 return 0;
876 dev = dev_get_by_index(&init_net, addr->v6.sin6_scope_id); 877 rcu_read_lock();
878 dev = dev_get_by_index_rcu(&init_net,
879 addr->v6.sin6_scope_id);
880 rcu_read_unlock();
877 if (!dev) 881 if (!dev)
878 return 0; 882 return 0;
879 dev_put(dev);
880 } 883 }
881 af = opt->pf->af; 884 af = opt->pf->af;
882 } 885 }
@@ -930,7 +933,6 @@ static struct inet_protosw sctpv6_seqpacket_protosw = {
930 .protocol = IPPROTO_SCTP, 933 .protocol = IPPROTO_SCTP,
931 .prot = &sctpv6_prot, 934 .prot = &sctpv6_prot,
932 .ops = &inet6_seqpacket_ops, 935 .ops = &inet6_seqpacket_ops,
933 .capability = -1,
934 .no_check = 0, 936 .no_check = 0,
935 .flags = SCTP_PROTOSW_FLAG 937 .flags = SCTP_PROTOSW_FLAG
936}; 938};
@@ -939,7 +941,6 @@ static struct inet_protosw sctpv6_stream_protosw = {
939 .protocol = IPPROTO_SCTP, 941 .protocol = IPPROTO_SCTP,
940 .prot = &sctpv6_prot, 942 .prot = &sctpv6_prot,
941 .ops = &inet6_seqpacket_ops, 943 .ops = &inet6_seqpacket_ops,
942 .capability = -1,
943 .no_check = 0, 944 .no_check = 0,
944 .flags = SCTP_PROTOSW_FLAG, 945 .flags = SCTP_PROTOSW_FLAG,
945}; 946};
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 5cbda8f1ddfd..fad261d41ec2 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -48,6 +48,7 @@
48#include <linux/ip.h> 48#include <linux/ip.h>
49#include <linux/ipv6.h> 49#include <linux/ipv6.h>
50#include <linux/init.h> 50#include <linux/init.h>
51#include <linux/slab.h>
51#include <net/inet_ecn.h> 52#include <net/inet_ecn.h>
52#include <net/ip.h> 53#include <net/ip.h>
53#include <net/icmp.h> 54#include <net/icmp.h>
@@ -429,23 +430,22 @@ int sctp_packet_transmit(struct sctp_packet *packet)
429 list_del_init(&chunk->list); 430 list_del_init(&chunk->list);
430 if (sctp_chunk_is_data(chunk)) { 431 if (sctp_chunk_is_data(chunk)) {
431 432
432 if (!chunk->has_tsn) { 433 if (!chunk->resent) {
433 sctp_chunk_assign_ssn(chunk);
434 sctp_chunk_assign_tsn(chunk);
435 434
436 /* 6.3.1 C4) When data is in flight and when allowed 435 /* 6.3.1 C4) When data is in flight and when allowed
437 * by rule C5, a new RTT measurement MUST be made each 436 * by rule C5, a new RTT measurement MUST be made each
438 * round trip. Furthermore, new RTT measurements 437 * round trip. Furthermore, new RTT measurements
439 * SHOULD be made no more than once per round-trip 438 * SHOULD be made no more than once per round-trip
440 * for a given destination transport address. 439 * for a given destination transport address.
441 */ 440 */
442 441
443 if (!tp->rto_pending) { 442 if (!tp->rto_pending) {
444 chunk->rtt_in_progress = 1; 443 chunk->rtt_in_progress = 1;
445 tp->rto_pending = 1; 444 tp->rto_pending = 1;
446 } 445 }
447 } else 446 }
448 chunk->resent = 1; 447
448 chunk->resent = 1;
449 449
450 has_data = 1; 450 has_data = 1;
451 } 451 }
@@ -557,8 +557,6 @@ int sctp_packet_transmit(struct sctp_packet *packet)
557 struct timer_list *timer; 557 struct timer_list *timer;
558 unsigned long timeout; 558 unsigned long timeout;
559 559
560 tp->last_time_used = jiffies;
561
562 /* Restart the AUTOCLOSE timer when sending data. */ 560 /* Restart the AUTOCLOSE timer when sending data. */
563 if (sctp_state(asoc, ESTABLISHED) && asoc->autoclose) { 561 if (sctp_state(asoc, ESTABLISHED) && asoc->autoclose) {
564 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; 562 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
@@ -617,7 +615,6 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
617 sctp_xmit_t retval = SCTP_XMIT_OK; 615 sctp_xmit_t retval = SCTP_XMIT_OK;
618 size_t datasize, rwnd, inflight, flight_size; 616 size_t datasize, rwnd, inflight, flight_size;
619 struct sctp_transport *transport = packet->transport; 617 struct sctp_transport *transport = packet->transport;
620 __u32 max_burst_bytes;
621 struct sctp_association *asoc = transport->asoc; 618 struct sctp_association *asoc = transport->asoc;
622 struct sctp_outq *q = &asoc->outqueue; 619 struct sctp_outq *q = &asoc->outqueue;
623 620
@@ -650,28 +647,6 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
650 } 647 }
651 } 648 }
652 649
653 /* sctpimpguide-05 2.14.2
654 * D) When the time comes for the sender to
655 * transmit new DATA chunks, the protocol parameter Max.Burst MUST
656 * first be applied to limit how many new DATA chunks may be sent.
657 * The limit is applied by adjusting cwnd as follows:
658 * if ((flightsize + Max.Burst * MTU) < cwnd)
659 * cwnd = flightsize + Max.Burst * MTU
660 */
661 max_burst_bytes = asoc->max_burst * asoc->pathmtu;
662 if ((flight_size + max_burst_bytes) < transport->cwnd) {
663 transport->cwnd = flight_size + max_burst_bytes;
664 SCTP_DEBUG_PRINTK("%s: cwnd limited by max_burst: "
665 "transport: %p, cwnd: %d, "
666 "ssthresh: %d, flight_size: %d, "
667 "pba: %d\n",
668 __func__, transport,
669 transport->cwnd,
670 transport->ssthresh,
671 transport->flight_size,
672 transport->partial_bytes_acked);
673 }
674
675 /* RFC 2960 6.1 Transmission of DATA Chunks 650 /* RFC 2960 6.1 Transmission of DATA Chunks
676 * 651 *
677 * B) At any given time, the sender MUST NOT transmit new data 652 * B) At any given time, the sender MUST NOT transmit new data
@@ -747,6 +722,8 @@ static void sctp_packet_append_data(struct sctp_packet *packet,
747 /* Has been accepted for transmission. */ 722 /* Has been accepted for transmission. */
748 if (!asoc->peer.prsctp_capable) 723 if (!asoc->peer.prsctp_capable)
749 chunk->msg->can_abandon = 0; 724 chunk->msg->can_abandon = 0;
725 sctp_chunk_assign_tsn(chunk);
726 sctp_chunk_assign_ssn(chunk);
750} 727}
751 728
752static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet, 729static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 23e5e97aa617..abfc0b8dee74 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -50,6 +50,7 @@
50#include <linux/list.h> /* For struct list_head */ 50#include <linux/list.h> /* For struct list_head */
51#include <linux/socket.h> 51#include <linux/socket.h>
52#include <linux/ip.h> 52#include <linux/ip.h>
53#include <linux/slab.h>
53#include <net/sock.h> /* For skb_set_owner_w */ 54#include <net/sock.h> /* For skb_set_owner_w */
54 55
55#include <net/sctp/sctp.h> 56#include <net/sctp/sctp.h>
@@ -191,8 +192,8 @@ static inline int sctp_cacc_skip(struct sctp_transport *primary,
191 __u32 tsn) 192 __u32 tsn)
192{ 193{
193 if (primary->cacc.changeover_active && 194 if (primary->cacc.changeover_active &&
194 (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) 195 (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) ||
195 || sctp_cacc_skip_3_2(primary, tsn))) 196 sctp_cacc_skip_3_2(primary, tsn)))
196 return 1; 197 return 1;
197 return 0; 198 return 0;
198} 199}
@@ -921,6 +922,14 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
921 goto sctp_flush_out; 922 goto sctp_flush_out;
922 } 923 }
923 924
925 /* Apply Max.Burst limitation to the current transport in
926 * case it will be used for new data. We are going to
927 * rest it before we return, but we want to apply the limit
928 * to the currently queued data.
929 */
930 if (transport)
931 sctp_transport_burst_limited(transport);
932
924 /* Finally, transmit new packets. */ 933 /* Finally, transmit new packets. */
925 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { 934 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
926 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid 935 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid
@@ -966,6 +975,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
966 packet = &transport->packet; 975 packet = &transport->packet;
967 sctp_packet_config(packet, vtag, 976 sctp_packet_config(packet, vtag,
968 asoc->peer.ecn_capable); 977 asoc->peer.ecn_capable);
978 /* We've switched transports, so apply the
979 * Burst limit to the new transport.
980 */
981 sctp_transport_burst_limited(transport);
969 } 982 }
970 983
971 SCTP_DEBUG_PRINTK("sctp_outq_flush(%p, %p[%s]), ", 984 SCTP_DEBUG_PRINTK("sctp_outq_flush(%p, %p[%s]), ",
@@ -1001,6 +1014,13 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
1001 break; 1014 break;
1002 1015
1003 case SCTP_XMIT_OK: 1016 case SCTP_XMIT_OK:
1017 /* The sender is in the SHUTDOWN-PENDING state,
1018 * The sender MAY set the I-bit in the DATA
1019 * chunk header.
1020 */
1021 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING)
1022 chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM;
1023
1004 break; 1024 break;
1005 1025
1006 default: 1026 default:
@@ -1053,6 +1073,9 @@ sctp_flush_out:
1053 packet = &t->packet; 1073 packet = &t->packet;
1054 if (!sctp_packet_empty(packet)) 1074 if (!sctp_packet_empty(packet))
1055 error = sctp_packet_transmit(packet); 1075 error = sctp_packet_transmit(packet);
1076
1077 /* Clear the burst limited state, if any */
1078 sctp_transport_burst_reset(t);
1056 } 1079 }
1057 1080
1058 return error; 1081 return error;
diff --git a/net/sctp/primitive.c b/net/sctp/primitive.c
index 8cb4f060bce6..534c7eae9d15 100644
--- a/net/sctp/primitive.c
+++ b/net/sctp/primitive.c
@@ -50,6 +50,7 @@
50#include <linux/socket.h> 50#include <linux/socket.h>
51#include <linux/ip.h> 51#include <linux/ip.h>
52#include <linux/time.h> /* For struct timeval */ 52#include <linux/time.h> /* For struct timeval */
53#include <linux/gfp.h>
53#include <net/sock.h> 54#include <net/sock.h>
54#include <net/sctp/sctp.h> 55#include <net/sctp/sctp.h>
55#include <net/sctp/sm.h> 56#include <net/sctp/sm.h>
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index d093cbfeaac4..784bcc9a979d 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -40,7 +40,7 @@
40#include <net/sctp/sctp.h> 40#include <net/sctp/sctp.h>
41#include <net/ip.h> /* for snmp_fold_field */ 41#include <net/ip.h> /* for snmp_fold_field */
42 42
43static struct snmp_mib sctp_snmp_list[] = { 43static const struct snmp_mib sctp_snmp_list[] = {
44 SNMP_MIB_ITEM("SctpCurrEstab", SCTP_MIB_CURRESTAB), 44 SNMP_MIB_ITEM("SctpCurrEstab", SCTP_MIB_CURRESTAB),
45 SNMP_MIB_ITEM("SctpActiveEstabs", SCTP_MIB_ACTIVEESTABS), 45 SNMP_MIB_ITEM("SctpActiveEstabs", SCTP_MIB_ACTIVEESTABS),
46 SNMP_MIB_ITEM("SctpPassiveEstabs", SCTP_MIB_PASSIVEESTABS), 46 SNMP_MIB_ITEM("SctpPassiveEstabs", SCTP_MIB_PASSIVEESTABS),
@@ -83,7 +83,7 @@ static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
83 83
84 for (i = 0; sctp_snmp_list[i].name != NULL; i++) 84 for (i = 0; sctp_snmp_list[i].name != NULL; i++)
85 seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name, 85 seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name,
86 snmp_fold_field((void **)sctp_statistics, 86 snmp_fold_field((void __percpu **)sctp_statistics,
87 sctp_snmp_list[i].entry)); 87 sctp_snmp_list[i].entry));
88 88
89 return 0; 89 return 0;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 612dc878e05c..a56f98e82f92 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -54,6 +54,7 @@
54#include <linux/bootmem.h> 54#include <linux/bootmem.h>
55#include <linux/highmem.h> 55#include <linux/highmem.h>
56#include <linux/swap.h> 56#include <linux/swap.h>
57#include <linux/slab.h>
57#include <net/net_namespace.h> 58#include <net/net_namespace.h>
58#include <net/protocol.h> 59#include <net/protocol.h>
59#include <net/ip.h> 60#include <net/ip.h>
@@ -188,7 +189,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
188 addr->a.v4.sin_addr.s_addr = ifa->ifa_local; 189 addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
189 addr->valid = 1; 190 addr->valid = 1;
190 INIT_LIST_HEAD(&addr->list); 191 INIT_LIST_HEAD(&addr->list);
191 INIT_RCU_HEAD(&addr->rcu);
192 list_add_tail(&addr->list, addrlist); 192 list_add_tail(&addr->list, addrlist);
193 } 193 }
194 } 194 }
@@ -205,14 +205,14 @@ static void sctp_get_local_addr_list(void)
205 struct list_head *pos; 205 struct list_head *pos;
206 struct sctp_af *af; 206 struct sctp_af *af;
207 207
208 read_lock(&dev_base_lock); 208 rcu_read_lock();
209 for_each_netdev(&init_net, dev) { 209 for_each_netdev_rcu(&init_net, dev) {
210 __list_for_each(pos, &sctp_address_families) { 210 __list_for_each(pos, &sctp_address_families) {
211 af = list_entry(pos, struct sctp_af, list); 211 af = list_entry(pos, struct sctp_af, list);
212 af->copy_addrlist(&sctp_local_addr_list, dev); 212 af->copy_addrlist(&sctp_local_addr_list, dev);
213 } 213 }
214 } 214 }
215 read_unlock(&dev_base_lock); 215 rcu_read_unlock();
216} 216}
217 217
218/* Free the existing local addresses. */ 218/* Free the existing local addresses. */
@@ -296,19 +296,19 @@ static void sctp_v4_from_sk(union sctp_addr *addr, struct sock *sk)
296{ 296{
297 addr->v4.sin_family = AF_INET; 297 addr->v4.sin_family = AF_INET;
298 addr->v4.sin_port = 0; 298 addr->v4.sin_port = 0;
299 addr->v4.sin_addr.s_addr = inet_sk(sk)->rcv_saddr; 299 addr->v4.sin_addr.s_addr = inet_sk(sk)->inet_rcv_saddr;
300} 300}
301 301
302/* Initialize sk->sk_rcv_saddr from sctp_addr. */ 302/* Initialize sk->sk_rcv_saddr from sctp_addr. */
303static void sctp_v4_to_sk_saddr(union sctp_addr *addr, struct sock *sk) 303static void sctp_v4_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
304{ 304{
305 inet_sk(sk)->rcv_saddr = addr->v4.sin_addr.s_addr; 305 inet_sk(sk)->inet_rcv_saddr = addr->v4.sin_addr.s_addr;
306} 306}
307 307
308/* Initialize sk->sk_daddr from sctp_addr. */ 308/* Initialize sk->sk_daddr from sctp_addr. */
309static void sctp_v4_to_sk_daddr(union sctp_addr *addr, struct sock *sk) 309static void sctp_v4_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
310{ 310{
311 inet_sk(sk)->daddr = addr->v4.sin_addr.s_addr; 311 inet_sk(sk)->inet_daddr = addr->v4.sin_addr.s_addr;
312} 312}
313 313
314/* Initialize a sctp_addr from an address parameter. */ 314/* Initialize a sctp_addr from an address parameter. */
@@ -598,7 +598,7 @@ static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
598 598
599 newinet = inet_sk(newsk); 599 newinet = inet_sk(newsk);
600 600
601 newinet->daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr; 601 newinet->inet_daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr;
602 602
603 sk_refcnt_debug_inc(newsk); 603 sk_refcnt_debug_inc(newsk);
604 604
@@ -909,7 +909,6 @@ static struct inet_protosw sctp_seqpacket_protosw = {
909 .protocol = IPPROTO_SCTP, 909 .protocol = IPPROTO_SCTP,
910 .prot = &sctp_prot, 910 .prot = &sctp_prot,
911 .ops = &inet_seqpacket_ops, 911 .ops = &inet_seqpacket_ops,
912 .capability = -1,
913 .no_check = 0, 912 .no_check = 0,
914 .flags = SCTP_PROTOSW_FLAG 913 .flags = SCTP_PROTOSW_FLAG
915}; 914};
@@ -918,7 +917,6 @@ static struct inet_protosw sctp_stream_protosw = {
918 .protocol = IPPROTO_SCTP, 917 .protocol = IPPROTO_SCTP,
919 .prot = &sctp_prot, 918 .prot = &sctp_prot,
920 .ops = &inet_seqpacket_ops, 919 .ops = &inet_seqpacket_ops,
921 .capability = -1,
922 .no_check = 0, 920 .no_check = 0,
923 .flags = SCTP_PROTOSW_FLAG 921 .flags = SCTP_PROTOSW_FLAG
924}; 922};
@@ -998,12 +996,13 @@ int sctp_register_pf(struct sctp_pf *pf, sa_family_t family)
998 996
999static inline int init_sctp_mibs(void) 997static inline int init_sctp_mibs(void)
1000{ 998{
1001 return snmp_mib_init((void**)sctp_statistics, sizeof(struct sctp_mib)); 999 return snmp_mib_init((void __percpu **)sctp_statistics,
1000 sizeof(struct sctp_mib));
1002} 1001}
1003 1002
1004static inline void cleanup_sctp_mibs(void) 1003static inline void cleanup_sctp_mibs(void)
1005{ 1004{
1006 snmp_mib_free((void**)sctp_statistics); 1005 snmp_mib_free((void __percpu **)sctp_statistics);
1007} 1006}
1008 1007
1009static void sctp_v4_pf_init(void) 1008static void sctp_v4_pf_init(void)
@@ -1260,6 +1259,9 @@ SCTP_STATIC __init int sctp_init(void)
1260 /* Set SCOPE policy to enabled */ 1259 /* Set SCOPE policy to enabled */
1261 sctp_scope_policy = SCTP_SCOPE_POLICY_ENABLE; 1260 sctp_scope_policy = SCTP_SCOPE_POLICY_ENABLE;
1262 1261
1262 /* Set the default rwnd update threshold */
1263 sctp_rwnd_upd_shift = SCTP_DEFAULT_RWND_SHIFT;
1264
1263 sctp_sysctl_register(); 1265 sctp_sysctl_register();
1264 1266
1265 INIT_LIST_HEAD(&sctp_address_families); 1267 INIT_LIST_HEAD(&sctp_address_families);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 9d881a61ac02..30c1767186b8 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -58,6 +58,7 @@
58#include <linux/inet.h> 58#include <linux/inet.h>
59#include <linux/scatterlist.h> 59#include <linux/scatterlist.h>
60#include <linux/crypto.h> 60#include <linux/crypto.h>
61#include <linux/slab.h>
61#include <net/sock.h> 62#include <net/sock.h>
62 63
63#include <linux/skbuff.h> 64#include <linux/skbuff.h>
@@ -107,7 +108,7 @@ static const struct sctp_paramhdr prsctp_param = {
107 cpu_to_be16(sizeof(struct sctp_paramhdr)), 108 cpu_to_be16(sizeof(struct sctp_paramhdr)),
108}; 109};
109 110
110/* A helper to initialize to initialize an op error inside a 111/* A helper to initialize an op error inside a
111 * provided chunk, as most cause codes will be embedded inside an 112 * provided chunk, as most cause codes will be embedded inside an
112 * abort chunk. 113 * abort chunk.
113 */ 114 */
@@ -124,6 +125,29 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
124 chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); 125 chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
125} 126}
126 127
128/* A helper to initialize an op error inside a
129 * provided chunk, as most cause codes will be embedded inside an
130 * abort chunk. Differs from sctp_init_cause in that it won't oops
131 * if there isn't enough space in the op error chunk
132 */
133int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code,
134 size_t paylen)
135{
136 sctp_errhdr_t err;
137 __u16 len;
138
139 /* Cause code constants are now defined in network order. */
140 err.cause = cause_code;
141 len = sizeof(sctp_errhdr_t) + paylen;
142 err.length = htons(len);
143
144 if (skb_tailroom(chunk->skb) > len)
145 return -ENOSPC;
146 chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk,
147 sizeof(sctp_errhdr_t),
148 &err);
149 return 0;
150}
127/* 3.3.2 Initiation (INIT) (1) 151/* 3.3.2 Initiation (INIT) (1)
128 * 152 *
129 * This chunk is used to initiate a SCTP association between two 153 * This chunk is used to initiate a SCTP association between two
@@ -207,7 +231,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
207 sp = sctp_sk(asoc->base.sk); 231 sp = sctp_sk(asoc->base.sk);
208 num_types = sp->pf->supported_addrs(sp, types); 232 num_types = sp->pf->supported_addrs(sp, types);
209 233
210 chunksize = sizeof(init) + addrs_len + SCTP_SAT_LEN(num_types); 234 chunksize = sizeof(init) + addrs_len;
235 chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types));
211 chunksize += sizeof(ecap_param); 236 chunksize += sizeof(ecap_param);
212 237
213 if (sctp_prsctp_enable) 238 if (sctp_prsctp_enable)
@@ -237,14 +262,14 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
237 /* Add HMACS parameter length if any were defined */ 262 /* Add HMACS parameter length if any were defined */
238 auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; 263 auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
239 if (auth_hmacs->length) 264 if (auth_hmacs->length)
240 chunksize += ntohs(auth_hmacs->length); 265 chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
241 else 266 else
242 auth_hmacs = NULL; 267 auth_hmacs = NULL;
243 268
244 /* Add CHUNKS parameter length */ 269 /* Add CHUNKS parameter length */
245 auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; 270 auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
246 if (auth_chunks->length) 271 if (auth_chunks->length)
247 chunksize += ntohs(auth_chunks->length); 272 chunksize += WORD_ROUND(ntohs(auth_chunks->length));
248 else 273 else
249 auth_chunks = NULL; 274 auth_chunks = NULL;
250 275
@@ -254,7 +279,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
254 279
255 /* If we have any extensions to report, account for that */ 280 /* If we have any extensions to report, account for that */
256 if (num_ext) 281 if (num_ext)
257 chunksize += sizeof(sctp_supported_ext_param_t) + num_ext; 282 chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
283 num_ext);
258 284
259 /* RFC 2960 3.3.2 Initiation (INIT) (1) 285 /* RFC 2960 3.3.2 Initiation (INIT) (1)
260 * 286 *
@@ -396,13 +422,13 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
396 422
397 auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; 423 auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
398 if (auth_hmacs->length) 424 if (auth_hmacs->length)
399 chunksize += ntohs(auth_hmacs->length); 425 chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
400 else 426 else
401 auth_hmacs = NULL; 427 auth_hmacs = NULL;
402 428
403 auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; 429 auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
404 if (auth_chunks->length) 430 if (auth_chunks->length)
405 chunksize += ntohs(auth_chunks->length); 431 chunksize += WORD_ROUND(ntohs(auth_chunks->length));
406 else 432 else
407 auth_chunks = NULL; 433 auth_chunks = NULL;
408 434
@@ -411,7 +437,8 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
411 } 437 }
412 438
413 if (num_ext) 439 if (num_ext)
414 chunksize += sizeof(sctp_supported_ext_param_t) + num_ext; 440 chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
441 num_ext);
415 442
416 /* Now allocate and fill out the chunk. */ 443 /* Now allocate and fill out the chunk. */
417 retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize); 444 retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize);
@@ -987,7 +1014,10 @@ static void *sctp_addto_param(struct sctp_chunk *chunk, int len,
987 1014
988 target = skb_put(chunk->skb, len); 1015 target = skb_put(chunk->skb, len);
989 1016
990 memcpy(target, data, len); 1017 if (data)
1018 memcpy(target, data, len);
1019 else
1020 memset(target, 0, len);
991 1021
992 /* Adjust the chunk length field. */ 1022 /* Adjust the chunk length field. */
993 chunk->chunk_hdr->length = htons(chunklen + len); 1023 chunk->chunk_hdr->length = htons(chunklen + len);
@@ -1125,20 +1155,40 @@ nodata:
1125 return retval; 1155 return retval;
1126} 1156}
1127 1157
1158/* Create an Operation Error chunk of a fixed size,
1159 * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT)
1160 * This is a helper function to allocate an error chunk for
1161 * for those invalid parameter codes in which we may not want
1162 * to report all the errors, if the incomming chunk is large
1163 */
1164static inline struct sctp_chunk *sctp_make_op_error_fixed(
1165 const struct sctp_association *asoc,
1166 const struct sctp_chunk *chunk)
1167{
1168 size_t size = asoc ? asoc->pathmtu : 0;
1169
1170 if (!size)
1171 size = SCTP_DEFAULT_MAXSEGMENT;
1172
1173 return sctp_make_op_error_space(asoc, chunk, size);
1174}
1175
1128/* Create an Operation Error chunk. */ 1176/* Create an Operation Error chunk. */
1129struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, 1177struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
1130 const struct sctp_chunk *chunk, 1178 const struct sctp_chunk *chunk,
1131 __be16 cause_code, const void *payload, 1179 __be16 cause_code, const void *payload,
1132 size_t paylen) 1180 size_t paylen, size_t reserve_tail)
1133{ 1181{
1134 struct sctp_chunk *retval; 1182 struct sctp_chunk *retval;
1135 1183
1136 retval = sctp_make_op_error_space(asoc, chunk, paylen); 1184 retval = sctp_make_op_error_space(asoc, chunk, paylen + reserve_tail);
1137 if (!retval) 1185 if (!retval)
1138 goto nodata; 1186 goto nodata;
1139 1187
1140 sctp_init_cause(retval, cause_code, paylen); 1188 sctp_init_cause(retval, cause_code, paylen + reserve_tail);
1141 sctp_addto_chunk(retval, paylen, payload); 1189 sctp_addto_chunk(retval, paylen, payload);
1190 if (reserve_tail)
1191 sctp_addto_param(retval, reserve_tail, NULL);
1142 1192
1143nodata: 1193nodata:
1144 return retval; 1194 return retval;
@@ -1365,6 +1415,18 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
1365 return target; 1415 return target;
1366} 1416}
1367 1417
1418/* Append bytes to the end of a chunk. Returns NULL if there isn't sufficient
1419 * space in the chunk
1420 */
1421void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk,
1422 int len, const void *data)
1423{
1424 if (skb_tailroom(chunk->skb) > len)
1425 return sctp_addto_chunk(chunk, len, data);
1426 else
1427 return NULL;
1428}
1429
1368/* Append bytes from user space to the end of a chunk. Will panic if 1430/* Append bytes from user space to the end of a chunk. Will panic if
1369 * chunk is not big enough. 1431 * chunk is not big enough.
1370 * Returns a kernel err value. 1432 * Returns a kernel err value.
@@ -1968,13 +2030,12 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
1968 * returning multiple unknown parameters. 2030 * returning multiple unknown parameters.
1969 */ 2031 */
1970 if (NULL == *errp) 2032 if (NULL == *errp)
1971 *errp = sctp_make_op_error_space(asoc, chunk, 2033 *errp = sctp_make_op_error_fixed(asoc, chunk);
1972 ntohs(chunk->chunk_hdr->length));
1973 2034
1974 if (*errp) { 2035 if (*errp) {
1975 sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, 2036 sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
1976 WORD_ROUND(ntohs(param.p->length))); 2037 WORD_ROUND(ntohs(param.p->length)));
1977 sctp_addto_chunk(*errp, 2038 sctp_addto_chunk_fixed(*errp,
1978 WORD_ROUND(ntohs(param.p->length)), 2039 WORD_ROUND(ntohs(param.p->length)),
1979 param.v); 2040 param.v);
1980 } else { 2041 } else {
@@ -3309,21 +3370,6 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
3309 sctp_chunk_free(asconf); 3370 sctp_chunk_free(asconf);
3310 asoc->addip_last_asconf = NULL; 3371 asoc->addip_last_asconf = NULL;
3311 3372
3312 /* Send the next asconf chunk from the addip chunk queue. */
3313 if (!list_empty(&asoc->addip_chunk_list)) {
3314 struct list_head *entry = asoc->addip_chunk_list.next;
3315 asconf = list_entry(entry, struct sctp_chunk, list);
3316
3317 list_del_init(entry);
3318
3319 /* Hold the chunk until an ASCONF_ACK is received. */
3320 sctp_chunk_hold(asconf);
3321 if (sctp_primitive_ASCONF(asoc, asconf))
3322 sctp_chunk_free(asconf);
3323 else
3324 asoc->addip_last_asconf = asconf;
3325 }
3326
3327 return retval; 3373 return retval;
3328} 3374}
3329 3375
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index efa516b47e81..eb1f42f45fdd 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -51,6 +51,7 @@
51#include <linux/types.h> 51#include <linux/types.h>
52#include <linux/socket.h> 52#include <linux/socket.h>
53#include <linux/ip.h> 53#include <linux/ip.h>
54#include <linux/gfp.h>
54#include <net/sock.h> 55#include <net/sock.h>
55#include <net/sctp/sctp.h> 56#include <net/sctp/sctp.h>
56#include <net/sctp/sm.h> 57#include <net/sctp/sm.h>
@@ -217,8 +218,7 @@ static int sctp_gen_sack(struct sctp_association *asoc, int force,
217 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 218 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
218 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); 219 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
219 } else { 220 } else {
220 if (asoc->a_rwnd > asoc->rwnd) 221 asoc->a_rwnd = asoc->rwnd;
221 asoc->a_rwnd = asoc->rwnd;
222 sack = sctp_make_sack(asoc); 222 sack = sctp_make_sack(asoc);
223 if (!sack) 223 if (!sack)
224 goto nomem; 224 goto nomem;
@@ -397,6 +397,41 @@ out_unlock:
397 sctp_transport_put(transport); 397 sctp_transport_put(transport);
398} 398}
399 399
400/* Handle the timeout of the ICMP protocol unreachable timer. Trigger
401 * the correct state machine transition that will close the association.
402 */
403void sctp_generate_proto_unreach_event(unsigned long data)
404{
405 struct sctp_transport *transport = (struct sctp_transport *) data;
406 struct sctp_association *asoc = transport->asoc;
407
408 sctp_bh_lock_sock(asoc->base.sk);
409 if (sock_owned_by_user(asoc->base.sk)) {
410 SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__);
411
412 /* Try again later. */
413 if (!mod_timer(&transport->proto_unreach_timer,
414 jiffies + (HZ/20)))
415 sctp_association_hold(asoc);
416 goto out_unlock;
417 }
418
419 /* Is this structure just waiting around for us to actually
420 * get destroyed?
421 */
422 if (asoc->base.dead)
423 goto out_unlock;
424
425 sctp_do_sm(SCTP_EVENT_T_OTHER,
426 SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
427 asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
428
429out_unlock:
430 sctp_bh_unlock_sock(asoc->base.sk);
431 sctp_association_put(asoc);
432}
433
434
400/* Inject a SACK Timeout event into the state machine. */ 435/* Inject a SACK Timeout event into the state machine. */
401static void sctp_generate_sack_event(unsigned long data) 436static void sctp_generate_sack_event(unsigned long data)
402{ 437{
@@ -476,7 +511,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
476 * used to provide an upper bound to this doubling operation. 511 * used to provide an upper bound to this doubling operation.
477 * 512 *
478 * Special Case: the first HB doesn't trigger exponential backoff. 513 * Special Case: the first HB doesn't trigger exponential backoff.
479 * The first unacknowleged HB triggers it. We do this with a flag 514 * The first unacknowledged HB triggers it. We do this with a flag
480 * that indicates that we have an outstanding HB. 515 * that indicates that we have an outstanding HB.
481 */ 516 */
482 if (!is_hb || transport->hb_sent) { 517 if (!is_hb || transport->hb_sent) {
@@ -718,7 +753,7 @@ static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds,
718 753
719 if (sctp_style(sk, TCP)) { 754 if (sctp_style(sk, TCP)) {
720 /* Change the sk->sk_state of a TCP-style socket that has 755 /* Change the sk->sk_state of a TCP-style socket that has
721 * sucessfully completed a connect() call. 756 * successfully completed a connect() call.
722 */ 757 */
723 if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED)) 758 if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
724 sk->sk_state = SCTP_SS_ESTABLISHED; 759 sk->sk_state = SCTP_SS_ESTABLISHED;
@@ -962,6 +997,29 @@ static int sctp_cmd_send_msg(struct sctp_association *asoc,
962} 997}
963 998
964 999
1000/* Sent the next ASCONF packet currently stored in the association.
1001 * This happens after the ASCONF_ACK was succeffully processed.
1002 */
1003static void sctp_cmd_send_asconf(struct sctp_association *asoc)
1004{
1005 /* Send the next asconf chunk from the addip chunk
1006 * queue.
1007 */
1008 if (!list_empty(&asoc->addip_chunk_list)) {
1009 struct list_head *entry = asoc->addip_chunk_list.next;
1010 struct sctp_chunk *asconf = list_entry(entry,
1011 struct sctp_chunk, list);
1012 list_del_init(entry);
1013
1014 /* Hold the chunk until an ASCONF_ACK is received. */
1015 sctp_chunk_hold(asconf);
1016 if (sctp_primitive_ASCONF(asoc, asconf))
1017 sctp_chunk_free(asconf);
1018 else
1019 asoc->addip_last_asconf = asconf;
1020 }
1021}
1022
965 1023
966/* These three macros allow us to pull the debugging code out of the 1024/* These three macros allow us to pull the debugging code out of the
967 * main flow of sctp_do_sm() to keep attention focused on the real 1025 * main flow of sctp_do_sm() to keep attention focused on the real
@@ -1417,6 +1475,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1417 asoc->init_last_sent_to = t; 1475 asoc->init_last_sent_to = t;
1418 chunk->transport = t; 1476 chunk->transport = t;
1419 t->init_sent_count++; 1477 t->init_sent_count++;
1478 /* Set the new transport as primary */
1479 sctp_assoc_set_primary(asoc, t);
1420 break; 1480 break;
1421 1481
1422 case SCTP_CMD_INIT_RESTART: 1482 case SCTP_CMD_INIT_RESTART:
@@ -1615,6 +1675,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1615 } 1675 }
1616 error = sctp_cmd_send_msg(asoc, cmd->obj.msg); 1676 error = sctp_cmd_send_msg(asoc, cmd->obj.msg);
1617 break; 1677 break;
1678 case SCTP_CMD_SEND_NEXT_ASCONF:
1679 sctp_cmd_send_asconf(asoc);
1680 break;
1618 default: 1681 default:
1619 printk(KERN_WARNING "Impossible command: %u, %p\n", 1682 printk(KERN_WARNING "Impossible command: %u, %p\n",
1620 cmd->verb, cmd->obj.ptr); 1683 cmd->verb, cmd->obj.ptr);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index d4df45022ffa..24b2cd555637 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -56,6 +56,7 @@
56#include <linux/ipv6.h> 56#include <linux/ipv6.h>
57#include <linux/net.h> 57#include <linux/net.h>
58#include <linux/inet.h> 58#include <linux/inet.h>
59#include <linux/slab.h>
59#include <net/sock.h> 60#include <net/sock.h>
60#include <net/inet_ecn.h> 61#include <net/inet_ecn.h>
61#include <linux/skbuff.h> 62#include <linux/skbuff.h>
@@ -996,14 +997,15 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
996 sctp_sf_heartbeat(ep, asoc, type, arg, 997 sctp_sf_heartbeat(ep, asoc, type, arg,
997 commands)) 998 commands))
998 return SCTP_DISPOSITION_NOMEM; 999 return SCTP_DISPOSITION_NOMEM;
1000
999 /* Set transport error counter and association error counter 1001 /* Set transport error counter and association error counter
1000 * when sending heartbeat. 1002 * when sending heartbeat.
1001 */ 1003 */
1002 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_IDLE,
1003 SCTP_TRANSPORT(transport));
1004 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT, 1004 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT,
1005 SCTP_TRANSPORT(transport)); 1005 SCTP_TRANSPORT(transport));
1006 } 1006 }
1007 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_IDLE,
1008 SCTP_TRANSPORT(transport));
1007 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMER_UPDATE, 1009 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMER_UPDATE,
1008 SCTP_TRANSPORT(transport)); 1010 SCTP_TRANSPORT(transport));
1009 1011
@@ -1720,7 +1722,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep,
1720 1722
1721 err = sctp_make_op_error(asoc, chunk, 1723 err = sctp_make_op_error(asoc, chunk,
1722 SCTP_ERROR_COOKIE_IN_SHUTDOWN, 1724 SCTP_ERROR_COOKIE_IN_SHUTDOWN,
1723 NULL, 0); 1725 NULL, 0, 0);
1724 if (err) 1726 if (err)
1725 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 1727 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1726 SCTP_CHUNK(err)); 1728 SCTP_CHUNK(err));
@@ -2868,6 +2870,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
2868 sctp_cmd_seq_t *commands) 2870 sctp_cmd_seq_t *commands)
2869{ 2871{
2870 struct sctp_chunk *chunk = arg; 2872 struct sctp_chunk *chunk = arg;
2873 sctp_arg_t force = SCTP_NOFORCE();
2871 int error; 2874 int error;
2872 2875
2873 if (!sctp_vtag_verify(chunk, asoc)) { 2876 if (!sctp_vtag_verify(chunk, asoc)) {
@@ -2901,6 +2904,9 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
2901 BUG(); 2904 BUG();
2902 } 2905 }
2903 2906
2907 if (chunk->chunk_hdr->flags & SCTP_DATA_SACK_IMM)
2908 force = SCTP_FORCE();
2909
2904 if (asoc->autoclose) { 2910 if (asoc->autoclose) {
2905 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 2911 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
2906 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); 2912 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
@@ -2929,7 +2935,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
2929 * more aggressive than the following algorithms allow. 2935 * more aggressive than the following algorithms allow.
2930 */ 2936 */
2931 if (chunk->end_of_packet) 2937 if (chunk->end_of_packet)
2932 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE()); 2938 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force);
2933 2939
2934 return SCTP_DISPOSITION_CONSUME; 2940 return SCTP_DISPOSITION_CONSUME;
2935 2941
@@ -2954,7 +2960,7 @@ discard_force:
2954 2960
2955discard_noforce: 2961discard_noforce:
2956 if (chunk->end_of_packet) 2962 if (chunk->end_of_packet)
2957 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE()); 2963 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force);
2958 2964
2959 return SCTP_DISPOSITION_DISCARD; 2965 return SCTP_DISPOSITION_DISCARD;
2960consume: 2966consume:
@@ -3572,7 +3578,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
3572 * To do this properly, we'll set the destination address of the chunk 3578 * To do this properly, we'll set the destination address of the chunk
3573 * and at the transmit time, will try look up the transport to use. 3579 * and at the transmit time, will try look up the transport to use.
3574 * Since ASCONFs may be bundled, the correct transport may not be 3580 * Since ASCONFs may be bundled, the correct transport may not be
3575 * created untill we process the entire packet, thus this workaround. 3581 * created until we process the entire packet, thus this workaround.
3576 */ 3582 */
3577 asconf_ack->dest = chunk->source; 3583 asconf_ack->dest = chunk->source;
3578 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack)); 3584 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack));
@@ -3670,8 +3676,14 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3670 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); 3676 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
3671 3677
3672 if (!sctp_process_asconf_ack((struct sctp_association *)asoc, 3678 if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
3673 asconf_ack)) 3679 asconf_ack)) {
3680 /* Successfully processed ASCONF_ACK. We can
3681 * release the next asconf if we have one.
3682 */
3683 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
3684 SCTP_NULL());
3674 return SCTP_DISPOSITION_CONSUME; 3685 return SCTP_DISPOSITION_CONSUME;
3686 }
3675 3687
3676 abort = sctp_make_abort(asoc, asconf_ack, 3688 abort = sctp_make_abort(asoc, asconf_ack,
3677 sizeof(sctp_errhdr_t)); 3689 sizeof(sctp_errhdr_t));
@@ -3973,7 +3985,7 @@ sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep,
3973 err_chunk = sctp_make_op_error(asoc, chunk, 3985 err_chunk = sctp_make_op_error(asoc, chunk,
3974 SCTP_ERROR_UNSUP_HMAC, 3986 SCTP_ERROR_UNSUP_HMAC,
3975 &auth_hdr->hmac_id, 3987 &auth_hdr->hmac_id,
3976 sizeof(__u16)); 3988 sizeof(__u16), 0);
3977 if (err_chunk) { 3989 if (err_chunk) {
3978 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 3990 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
3979 SCTP_CHUNK(err_chunk)); 3991 SCTP_CHUNK(err_chunk));
@@ -4065,7 +4077,8 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
4065 hdr = unk_chunk->chunk_hdr; 4077 hdr = unk_chunk->chunk_hdr;
4066 err_chunk = sctp_make_op_error(asoc, unk_chunk, 4078 err_chunk = sctp_make_op_error(asoc, unk_chunk,
4067 SCTP_ERROR_UNKNOWN_CHUNK, hdr, 4079 SCTP_ERROR_UNKNOWN_CHUNK, hdr,
4068 WORD_ROUND(ntohs(hdr->length))); 4080 WORD_ROUND(ntohs(hdr->length)),
4081 0);
4069 if (err_chunk) { 4082 if (err_chunk) {
4070 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 4083 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
4071 SCTP_CHUNK(err_chunk)); 4084 SCTP_CHUNK(err_chunk));
@@ -4084,7 +4097,8 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
4084 hdr = unk_chunk->chunk_hdr; 4097 hdr = unk_chunk->chunk_hdr;
4085 err_chunk = sctp_make_op_error(asoc, unk_chunk, 4098 err_chunk = sctp_make_op_error(asoc, unk_chunk,
4086 SCTP_ERROR_UNKNOWN_CHUNK, hdr, 4099 SCTP_ERROR_UNKNOWN_CHUNK, hdr,
4087 WORD_ROUND(ntohs(hdr->length))); 4100 WORD_ROUND(ntohs(hdr->length)),
4101 0);
4088 if (err_chunk) { 4102 if (err_chunk) {
4089 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 4103 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
4090 SCTP_CHUNK(err_chunk)); 4104 SCTP_CHUNK(err_chunk));
@@ -6048,7 +6062,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
6048 6062
6049 err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM, 6063 err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM,
6050 &data_hdr->stream, 6064 &data_hdr->stream,
6051 sizeof(data_hdr->stream)); 6065 sizeof(data_hdr->stream),
6066 sizeof(u16));
6052 if (err) 6067 if (err)
6053 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 6068 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
6054 SCTP_CHUNK(err)); 6069 SCTP_CHUNK(err));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 3a95fcb17a9e..44a1ab03a3f0 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -67,6 +67,7 @@
67#include <linux/poll.h> 67#include <linux/poll.h>
68#include <linux/init.h> 68#include <linux/init.h>
69#include <linux/crypto.h> 69#include <linux/crypto.h>
70#include <linux/slab.h>
70 71
71#include <net/ip.h> 72#include <net/ip.h>
72#include <net/icmp.h> 73#include <net/icmp.h>
@@ -394,7 +395,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
394 395
395 /* Refresh ephemeral port. */ 396 /* Refresh ephemeral port. */
396 if (!bp->port) 397 if (!bp->port)
397 bp->port = inet_sk(sk)->num; 398 bp->port = inet_sk(sk)->inet_num;
398 399
399 /* Add the address to the bind address list. 400 /* Add the address to the bind address list.
400 * Use GFP_ATOMIC since BHs will be disabled. 401 * Use GFP_ATOMIC since BHs will be disabled.
@@ -403,7 +404,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
403 404
404 /* Copy back into socket for getsockname() use. */ 405 /* Copy back into socket for getsockname() use. */
405 if (!ret) { 406 if (!ret) {
406 inet_sk(sk)->sport = htons(inet_sk(sk)->num); 407 inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num);
407 af->to_sk_saddr(addr, sk); 408 af->to_sk_saddr(addr, sk);
408 } 409 }
409 410
@@ -1117,7 +1118,7 @@ static int __sctp_connect(struct sock* sk,
1117 } 1118 }
1118 1119
1119 /* Initialize sk's dport and daddr for getpeername() */ 1120 /* Initialize sk's dport and daddr for getpeername() */
1120 inet_sk(sk)->dport = htons(asoc->peer.port); 1121 inet_sk(sk)->inet_dport = htons(asoc->peer.port);
1121 af = sctp_get_af_specific(sa_addr->sa.sa_family); 1122 af = sctp_get_af_specific(sa_addr->sa.sa_family);
1122 af->to_sk_daddr(sa_addr, sk); 1123 af->to_sk_daddr(sa_addr, sk);
1123 sk->sk_err = 0; 1124 sk->sk_err = 0;
@@ -1968,7 +1969,7 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
1968 if (err) 1969 if (err)
1969 goto out_free; 1970 goto out_free;
1970 1971
1971 sock_recv_timestamp(msg, sk, skb); 1972 sock_recv_ts_and_drops(msg, sk, skb);
1972 if (sctp_ulpevent_is_notification(event)) { 1973 if (sctp_ulpevent_is_notification(event)) {
1973 msg->msg_flags |= MSG_NOTIFICATION; 1974 msg->msg_flags |= MSG_NOTIFICATION;
1974 sp->pf->event_msgname(event, msg->msg_name, addr_len); 1975 sp->pf->event_msgname(event, msg->msg_name, addr_len);
@@ -2086,6 +2087,8 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
2086 return -EINVAL; 2087 return -EINVAL;
2087 if (copy_from_user(&sp->autoclose, optval, optlen)) 2088 if (copy_from_user(&sp->autoclose, optval, optlen))
2088 return -EFAULT; 2089 return -EFAULT;
2090 /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
2091 sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
2089 2092
2090 return 0; 2093 return 0;
2091} 2094}
@@ -2311,11 +2314,10 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2311 } 2314 }
2312 } 2315 }
2313 2316
2314 /* Note that unless the spp_flag is set to SPP_PMTUD_ENABLE the value 2317 /* Note that a value of zero indicates the current setting should be
2315 * of this field is ignored. Note also that a value of zero 2318 left unchanged.
2316 * indicates the current setting should be left unchanged.
2317 */ 2319 */
2318 if ((params->spp_flags & SPP_PMTUD_ENABLE) && params->spp_pathmaxrxt) { 2320 if (params->spp_pathmaxrxt) {
2319 if (trans) { 2321 if (trans) {
2320 trans->pathmaxrxt = params->spp_pathmaxrxt; 2322 trans->pathmaxrxt = params->spp_pathmaxrxt;
2321 } else if (asoc) { 2323 } else if (asoc) {
@@ -2354,8 +2356,8 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
2354 pmtud_change == SPP_PMTUD || 2356 pmtud_change == SPP_PMTUD ||
2355 sackdelay_change == SPP_SACKDELAY || 2357 sackdelay_change == SPP_SACKDELAY ||
2356 params.spp_sackdelay > 500 || 2358 params.spp_sackdelay > 500 ||
2357 (params.spp_pathmtu 2359 (params.spp_pathmtu &&
2358 && params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT)) 2360 params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT))
2359 return -EINVAL; 2361 return -EINVAL;
2360 2362
2361 /* If an address other than INADDR_ANY is specified, and 2363 /* If an address other than INADDR_ANY is specified, and
@@ -3717,9 +3719,12 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3717 sp->hmac = NULL; 3719 sp->hmac = NULL;
3718 3720
3719 SCTP_DBG_OBJCNT_INC(sock); 3721 SCTP_DBG_OBJCNT_INC(sock);
3720 percpu_counter_inc(&sctp_sockets_allocated); 3722
3723 /* Set socket backlog limit. */
3724 sk->sk_backlog.limit = sysctl_sctp_rmem[1];
3721 3725
3722 local_bh_disable(); 3726 local_bh_disable();
3727 percpu_counter_inc(&sctp_sockets_allocated);
3723 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 3728 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
3724 local_bh_enable(); 3729 local_bh_enable();
3725 3730
@@ -3736,8 +3741,8 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
3736 /* Release our hold on the endpoint. */ 3741 /* Release our hold on the endpoint. */
3737 ep = sctp_sk(sk)->ep; 3742 ep = sctp_sk(sk)->ep;
3738 sctp_endpoint_free(ep); 3743 sctp_endpoint_free(ep);
3739 percpu_counter_dec(&sctp_sockets_allocated);
3740 local_bh_disable(); 3744 local_bh_disable();
3745 percpu_counter_dec(&sctp_sockets_allocated);
3741 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 3746 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
3742 local_bh_enable(); 3747 local_bh_enable();
3743} 3748}
@@ -4349,90 +4354,6 @@ static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval
4349 return 0; 4354 return 0;
4350} 4355}
4351 4356
4352static int sctp_getsockopt_peer_addrs_num_old(struct sock *sk, int len,
4353 char __user *optval,
4354 int __user *optlen)
4355{
4356 sctp_assoc_t id;
4357 struct sctp_association *asoc;
4358 struct list_head *pos;
4359 int cnt = 0;
4360
4361 if (len < sizeof(sctp_assoc_t))
4362 return -EINVAL;
4363
4364 if (copy_from_user(&id, optval, sizeof(sctp_assoc_t)))
4365 return -EFAULT;
4366
4367 printk(KERN_WARNING "SCTP: Use of SCTP_GET_PEER_ADDRS_NUM_OLD "
4368 "socket option deprecated\n");
4369 /* For UDP-style sockets, id specifies the association to query. */
4370 asoc = sctp_id2assoc(sk, id);
4371 if (!asoc)
4372 return -EINVAL;
4373
4374 list_for_each(pos, &asoc->peer.transport_addr_list) {
4375 cnt ++;
4376 }
4377
4378 return cnt;
4379}
4380
4381/*
4382 * Old API for getting list of peer addresses. Does not work for 32-bit
4383 * programs running on a 64-bit kernel
4384 */
4385static int sctp_getsockopt_peer_addrs_old(struct sock *sk, int len,
4386 char __user *optval,
4387 int __user *optlen)
4388{
4389 struct sctp_association *asoc;
4390 int cnt = 0;
4391 struct sctp_getaddrs_old getaddrs;
4392 struct sctp_transport *from;
4393 void __user *to;
4394 union sctp_addr temp;
4395 struct sctp_sock *sp = sctp_sk(sk);
4396 int addrlen;
4397
4398 if (len < sizeof(struct sctp_getaddrs_old))
4399 return -EINVAL;
4400
4401 len = sizeof(struct sctp_getaddrs_old);
4402
4403 if (copy_from_user(&getaddrs, optval, len))
4404 return -EFAULT;
4405
4406 if (getaddrs.addr_num <= 0) return -EINVAL;
4407
4408 printk(KERN_WARNING "SCTP: Use of SCTP_GET_PEER_ADDRS_OLD "
4409 "socket option deprecated\n");
4410
4411 /* For UDP-style sockets, id specifies the association to query. */
4412 asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
4413 if (!asoc)
4414 return -EINVAL;
4415
4416 to = (void __user *)getaddrs.addrs;
4417 list_for_each_entry(from, &asoc->peer.transport_addr_list,
4418 transports) {
4419 memcpy(&temp, &from->ipaddr, sizeof(temp));
4420 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
4421 addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len;
4422 if (copy_to_user(to, &temp, addrlen))
4423 return -EFAULT;
4424 to += addrlen ;
4425 cnt ++;
4426 if (cnt >= getaddrs.addr_num) break;
4427 }
4428 getaddrs.addr_num = cnt;
4429 if (put_user(len, optlen))
4430 return -EFAULT;
4431 if (copy_to_user(optval, &getaddrs, len))
4432 return -EFAULT;
4433
4434 return 0;
4435}
4436 4357
4437static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, 4358static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
4438 char __user *optval, int __user *optlen) 4359 char __user *optval, int __user *optlen)
@@ -4485,125 +4406,6 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
4485 return 0; 4406 return 0;
4486} 4407}
4487 4408
4488static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len,
4489 char __user *optval,
4490 int __user *optlen)
4491{
4492 sctp_assoc_t id;
4493 struct sctp_bind_addr *bp;
4494 struct sctp_association *asoc;
4495 struct sctp_sockaddr_entry *addr;
4496 int cnt = 0;
4497
4498 if (len < sizeof(sctp_assoc_t))
4499 return -EINVAL;
4500
4501 if (copy_from_user(&id, optval, sizeof(sctp_assoc_t)))
4502 return -EFAULT;
4503
4504 printk(KERN_WARNING "SCTP: Use of SCTP_GET_LOCAL_ADDRS_NUM_OLD "
4505 "socket option deprecated\n");
4506
4507 /*
4508 * For UDP-style sockets, id specifies the association to query.
4509 * If the id field is set to the value '0' then the locally bound
4510 * addresses are returned without regard to any particular
4511 * association.
4512 */
4513 if (0 == id) {
4514 bp = &sctp_sk(sk)->ep->base.bind_addr;
4515 } else {
4516 asoc = sctp_id2assoc(sk, id);
4517 if (!asoc)
4518 return -EINVAL;
4519 bp = &asoc->base.bind_addr;
4520 }
4521
4522 /* If the endpoint is bound to 0.0.0.0 or ::0, count the valid
4523 * addresses from the global local address list.
4524 */
4525 if (sctp_list_single_entry(&bp->address_list)) {
4526 addr = list_entry(bp->address_list.next,
4527 struct sctp_sockaddr_entry, list);
4528 if (sctp_is_any(sk, &addr->a)) {
4529 rcu_read_lock();
4530 list_for_each_entry_rcu(addr,
4531 &sctp_local_addr_list, list) {
4532 if (!addr->valid)
4533 continue;
4534
4535 if ((PF_INET == sk->sk_family) &&
4536 (AF_INET6 == addr->a.sa.sa_family))
4537 continue;
4538
4539 if ((PF_INET6 == sk->sk_family) &&
4540 inet_v6_ipv6only(sk) &&
4541 (AF_INET == addr->a.sa.sa_family))
4542 continue;
4543
4544 cnt++;
4545 }
4546 rcu_read_unlock();
4547 } else {
4548 cnt = 1;
4549 }
4550 goto done;
4551 }
4552
4553 /* Protection on the bound address list is not needed,
4554 * since in the socket option context we hold the socket lock,
4555 * so there is no way that the bound address list can change.
4556 */
4557 list_for_each_entry(addr, &bp->address_list, list) {
4558 cnt ++;
4559 }
4560done:
4561 return cnt;
4562}
4563
4564/* Helper function that copies local addresses to user and returns the number
4565 * of addresses copied.
4566 */
4567static int sctp_copy_laddrs_old(struct sock *sk, __u16 port,
4568 int max_addrs, void *to,
4569 int *bytes_copied)
4570{
4571 struct sctp_sockaddr_entry *addr;
4572 union sctp_addr temp;
4573 int cnt = 0;
4574 int addrlen;
4575
4576 rcu_read_lock();
4577 list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) {
4578 if (!addr->valid)
4579 continue;
4580
4581 if ((PF_INET == sk->sk_family) &&
4582 (AF_INET6 == addr->a.sa.sa_family))
4583 continue;
4584 if ((PF_INET6 == sk->sk_family) &&
4585 inet_v6_ipv6only(sk) &&
4586 (AF_INET == addr->a.sa.sa_family))
4587 continue;
4588 memcpy(&temp, &addr->a, sizeof(temp));
4589 if (!temp.v4.sin_port)
4590 temp.v4.sin_port = htons(port);
4591
4592 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
4593 &temp);
4594 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
4595 memcpy(to, &temp, addrlen);
4596
4597 to += addrlen;
4598 *bytes_copied += addrlen;
4599 cnt ++;
4600 if (cnt >= max_addrs) break;
4601 }
4602 rcu_read_unlock();
4603
4604 return cnt;
4605}
4606
4607static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, 4409static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
4608 size_t space_left, int *bytes_copied) 4410 size_t space_left, int *bytes_copied)
4609{ 4411{
@@ -4647,112 +4449,6 @@ static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
4647 return cnt; 4449 return cnt;
4648} 4450}
4649 4451
4650/* Old API for getting list of local addresses. Does not work for 32-bit
4651 * programs running on a 64-bit kernel
4652 */
4653static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
4654 char __user *optval, int __user *optlen)
4655{
4656 struct sctp_bind_addr *bp;
4657 struct sctp_association *asoc;
4658 int cnt = 0;
4659 struct sctp_getaddrs_old getaddrs;
4660 struct sctp_sockaddr_entry *addr;
4661 void __user *to;
4662 union sctp_addr temp;
4663 struct sctp_sock *sp = sctp_sk(sk);
4664 int addrlen;
4665 int err = 0;
4666 void *addrs;
4667 void *buf;
4668 int bytes_copied = 0;
4669
4670 if (len < sizeof(struct sctp_getaddrs_old))
4671 return -EINVAL;
4672
4673 len = sizeof(struct sctp_getaddrs_old);
4674 if (copy_from_user(&getaddrs, optval, len))
4675 return -EFAULT;
4676
4677 if (getaddrs.addr_num <= 0 ||
4678 getaddrs.addr_num >= (INT_MAX / sizeof(union sctp_addr)))
4679 return -EINVAL;
4680
4681 printk(KERN_WARNING "SCTP: Use of SCTP_GET_LOCAL_ADDRS_OLD "
4682 "socket option deprecated\n");
4683
4684 /*
4685 * For UDP-style sockets, id specifies the association to query.
4686 * If the id field is set to the value '0' then the locally bound
4687 * addresses are returned without regard to any particular
4688 * association.
4689 */
4690 if (0 == getaddrs.assoc_id) {
4691 bp = &sctp_sk(sk)->ep->base.bind_addr;
4692 } else {
4693 asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
4694 if (!asoc)
4695 return -EINVAL;
4696 bp = &asoc->base.bind_addr;
4697 }
4698
4699 to = getaddrs.addrs;
4700
4701 /* Allocate space for a local instance of packed array to hold all
4702 * the data. We store addresses here first and then put write them
4703 * to the user in one shot.
4704 */
4705 addrs = kmalloc(sizeof(union sctp_addr) * getaddrs.addr_num,
4706 GFP_KERNEL);
4707 if (!addrs)
4708 return -ENOMEM;
4709
4710 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
4711 * addresses from the global local address list.
4712 */
4713 if (sctp_list_single_entry(&bp->address_list)) {
4714 addr = list_entry(bp->address_list.next,
4715 struct sctp_sockaddr_entry, list);
4716 if (sctp_is_any(sk, &addr->a)) {
4717 cnt = sctp_copy_laddrs_old(sk, bp->port,
4718 getaddrs.addr_num,
4719 addrs, &bytes_copied);
4720 goto copy_getaddrs;
4721 }
4722 }
4723
4724 buf = addrs;
4725 /* Protection on the bound address list is not needed since
4726 * in the socket option context we hold a socket lock and
4727 * thus the bound address list can't change.
4728 */
4729 list_for_each_entry(addr, &bp->address_list, list) {
4730 memcpy(&temp, &addr->a, sizeof(temp));
4731 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
4732 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
4733 memcpy(buf, &temp, addrlen);
4734 buf += addrlen;
4735 bytes_copied += addrlen;
4736 cnt ++;
4737 if (cnt >= getaddrs.addr_num) break;
4738 }
4739
4740copy_getaddrs:
4741 /* copy the entire address list into the user provided space */
4742 if (copy_to_user(to, addrs, bytes_copied)) {
4743 err = -EFAULT;
4744 goto error;
4745 }
4746
4747 /* copy the leading structure back to user */
4748 getaddrs.addr_num = cnt;
4749 if (copy_to_user(optval, &getaddrs, len))
4750 err = -EFAULT;
4751
4752error:
4753 kfree(addrs);
4754 return err;
4755}
4756 4452
4757static int sctp_getsockopt_local_addrs(struct sock *sk, int len, 4453static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4758 char __user *optval, int __user *optlen) 4454 char __user *optval, int __user *optlen)
@@ -5603,22 +5299,6 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
5603 case SCTP_INITMSG: 5299 case SCTP_INITMSG:
5604 retval = sctp_getsockopt_initmsg(sk, len, optval, optlen); 5300 retval = sctp_getsockopt_initmsg(sk, len, optval, optlen);
5605 break; 5301 break;
5606 case SCTP_GET_PEER_ADDRS_NUM_OLD:
5607 retval = sctp_getsockopt_peer_addrs_num_old(sk, len, optval,
5608 optlen);
5609 break;
5610 case SCTP_GET_LOCAL_ADDRS_NUM_OLD:
5611 retval = sctp_getsockopt_local_addrs_num_old(sk, len, optval,
5612 optlen);
5613 break;
5614 case SCTP_GET_PEER_ADDRS_OLD:
5615 retval = sctp_getsockopt_peer_addrs_old(sk, len, optval,
5616 optlen);
5617 break;
5618 case SCTP_GET_LOCAL_ADDRS_OLD:
5619 retval = sctp_getsockopt_local_addrs_old(sk, len, optval,
5620 optlen);
5621 break;
5622 case SCTP_GET_PEER_ADDRS: 5302 case SCTP_GET_PEER_ADDRS:
5623 retval = sctp_getsockopt_peer_addrs(sk, len, optval, 5303 retval = sctp_getsockopt_peer_addrs(sk, len, optval,
5624 optlen); 5304 optlen);
@@ -5861,7 +5541,7 @@ pp_not_found:
5861 */ 5541 */
5862success: 5542success:
5863 if (!sctp_sk(sk)->bind_hash) { 5543 if (!sctp_sk(sk)->bind_hash) {
5864 inet_sk(sk)->num = snum; 5544 inet_sk(sk)->inet_num = snum;
5865 sk_add_bind_node(sk, &pp->owner); 5545 sk_add_bind_node(sk, &pp->owner);
5866 sctp_sk(sk)->bind_hash = pp; 5546 sctp_sk(sk)->bind_hash = pp;
5867 } 5547 }
@@ -5933,7 +5613,7 @@ SCTP_STATIC int sctp_listen_start(struct sock *sk, int backlog)
5933 if (sctp_autobind(sk)) 5613 if (sctp_autobind(sk))
5934 return -EAGAIN; 5614 return -EAGAIN;
5935 } else { 5615 } else {
5936 if (sctp_get_port(sk, inet_sk(sk)->num)) { 5616 if (sctp_get_port(sk, inet_sk(sk)->inet_num)) {
5937 sk->sk_state = SCTP_SS_CLOSED; 5617 sk->sk_state = SCTP_SS_CLOSED;
5938 return -EADDRINUSE; 5618 return -EADDRINUSE;
5939 } 5619 }
@@ -6104,14 +5784,14 @@ static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
6104static inline void __sctp_put_port(struct sock *sk) 5784static inline void __sctp_put_port(struct sock *sk)
6105{ 5785{
6106 struct sctp_bind_hashbucket *head = 5786 struct sctp_bind_hashbucket *head =
6107 &sctp_port_hashtable[sctp_phashfn(inet_sk(sk)->num)]; 5787 &sctp_port_hashtable[sctp_phashfn(inet_sk(sk)->inet_num)];
6108 struct sctp_bind_bucket *pp; 5788 struct sctp_bind_bucket *pp;
6109 5789
6110 sctp_spin_lock(&head->lock); 5790 sctp_spin_lock(&head->lock);
6111 pp = sctp_sk(sk)->bind_hash; 5791 pp = sctp_sk(sk)->bind_hash;
6112 __sk_del_bind_node(sk); 5792 __sk_del_bind_node(sk);
6113 sctp_sk(sk)->bind_hash = NULL; 5793 sctp_sk(sk)->bind_hash = NULL;
6114 inet_sk(sk)->num = 0; 5794 inet_sk(sk)->inet_num = 0;
6115 sctp_bucket_destroy(pp); 5795 sctp_bucket_destroy(pp);
6116 sctp_spin_unlock(&head->lock); 5796 sctp_spin_unlock(&head->lock);
6117} 5797}
@@ -6138,7 +5818,7 @@ static int sctp_autobind(struct sock *sk)
6138 /* Initialize a local sockaddr structure to INADDR_ANY. */ 5818 /* Initialize a local sockaddr structure to INADDR_ANY. */
6139 af = sctp_sk(sk)->pf->af; 5819 af = sctp_sk(sk)->pf->af;
6140 5820
6141 port = htons(inet_sk(sk)->num); 5821 port = htons(inet_sk(sk)->inet_num);
6142 af->inaddr_any(&autoaddr, port); 5822 af->inaddr_any(&autoaddr, port);
6143 5823
6144 return sctp_do_bind(sk, &autoaddr, af->sockaddr_len); 5824 return sctp_do_bind(sk, &autoaddr, af->sockaddr_len);
@@ -6509,6 +6189,16 @@ do_nonblock:
6509 goto out; 6189 goto out;
6510} 6190}
6511 6191
6192void sctp_data_ready(struct sock *sk, int len)
6193{
6194 read_lock_bh(&sk->sk_callback_lock);
6195 if (sk_has_sleeper(sk))
6196 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN |
6197 POLLRDNORM | POLLRDBAND);
6198 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
6199 read_unlock_bh(&sk->sk_callback_lock);
6200}
6201
6512/* If socket sndbuf has changed, wake up all per association waiters. */ 6202/* If socket sndbuf has changed, wake up all per association waiters. */
6513void sctp_write_space(struct sock *sk) 6203void sctp_write_space(struct sock *sk)
6514{ 6204{
@@ -6683,7 +6373,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
6683 struct sctp_association *asoc) 6373 struct sctp_association *asoc)
6684{ 6374{
6685 struct inet_sock *inet = inet_sk(sk); 6375 struct inet_sock *inet = inet_sk(sk);
6686 struct inet_sock *newinet = inet_sk(newsk); 6376 struct inet_sock *newinet;
6687 6377
6688 newsk->sk_type = sk->sk_type; 6378 newsk->sk_type = sk->sk_type;
6689 newsk->sk_bound_dev_if = sk->sk_bound_dev_if; 6379 newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
@@ -6707,12 +6397,12 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
6707 /* Initialize sk's sport, dport, rcv_saddr and daddr for 6397 /* Initialize sk's sport, dport, rcv_saddr and daddr for
6708 * getsockname() and getpeername() 6398 * getsockname() and getpeername()
6709 */ 6399 */
6710 newinet->sport = inet->sport; 6400 newinet->inet_sport = inet->inet_sport;
6711 newinet->saddr = inet->saddr; 6401 newinet->inet_saddr = inet->inet_saddr;
6712 newinet->rcv_saddr = inet->rcv_saddr; 6402 newinet->inet_rcv_saddr = inet->inet_rcv_saddr;
6713 newinet->dport = htons(asoc->peer.port); 6403 newinet->inet_dport = htons(asoc->peer.port);
6714 newinet->pmtudisc = inet->pmtudisc; 6404 newinet->pmtudisc = inet->pmtudisc;
6715 newinet->id = asoc->next_tsn ^ jiffies; 6405 newinet->inet_id = asoc->next_tsn ^ jiffies;
6716 6406
6717 newinet->uc_ttl = inet->uc_ttl; 6407 newinet->uc_ttl = inet->uc_ttl;
6718 newinet->mc_loop = 1; 6408 newinet->mc_loop = 1;
@@ -6751,13 +6441,13 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
6751 newsp->hmac = NULL; 6441 newsp->hmac = NULL;
6752 6442
6753 /* Hook this new socket in to the bind_hash list. */ 6443 /* Hook this new socket in to the bind_hash list. */
6754 head = &sctp_port_hashtable[sctp_phashfn(inet_sk(oldsk)->num)]; 6444 head = &sctp_port_hashtable[sctp_phashfn(inet_sk(oldsk)->inet_num)];
6755 sctp_local_bh_disable(); 6445 sctp_local_bh_disable();
6756 sctp_spin_lock(&head->lock); 6446 sctp_spin_lock(&head->lock);
6757 pp = sctp_sk(oldsk)->bind_hash; 6447 pp = sctp_sk(oldsk)->bind_hash;
6758 sk_add_bind_node(newsk, &pp->owner); 6448 sk_add_bind_node(newsk, &pp->owner);
6759 sctp_sk(newsk)->bind_hash = pp; 6449 sctp_sk(newsk)->bind_hash = pp;
6760 inet_sk(newsk)->num = inet_sk(oldsk)->num; 6450 inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num;
6761 sctp_spin_unlock(&head->lock); 6451 sctp_spin_unlock(&head->lock);
6762 sctp_local_bh_enable(); 6452 sctp_local_bh_enable();
6763 6453
diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c
index 737d330e5ffc..442ad4ed6315 100644
--- a/net/sctp/ssnmap.c
+++ b/net/sctp/ssnmap.c
@@ -37,6 +37,7 @@
37 */ 37 */
38 38
39#include <linux/types.h> 39#include <linux/types.h>
40#include <linux/slab.h>
40#include <net/sctp/sctp.h> 41#include <net/sctp/sctp.h>
41#include <net/sctp/sm.h> 42#include <net/sctp/sm.h>
42 43
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index ab7151da120f..832590bbe0c0 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -52,6 +52,7 @@ static int int_max = INT_MAX;
52static int sack_timer_min = 1; 52static int sack_timer_min = 1;
53static int sack_timer_max = 500; 53static int sack_timer_max = 500;
54static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ 54static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
55static int rwnd_scale_max = 16;
55 56
56extern int sysctl_sctp_mem[3]; 57extern int sysctl_sctp_mem[3];
57extern int sysctl_sctp_rmem[3]; 58extern int sysctl_sctp_rmem[3];
@@ -59,180 +60,145 @@ extern int sysctl_sctp_wmem[3];
59 60
60static ctl_table sctp_table[] = { 61static ctl_table sctp_table[] = {
61 { 62 {
62 .ctl_name = NET_SCTP_RTO_INITIAL,
63 .procname = "rto_initial", 63 .procname = "rto_initial",
64 .data = &sctp_rto_initial, 64 .data = &sctp_rto_initial,
65 .maxlen = sizeof(unsigned int), 65 .maxlen = sizeof(unsigned int),
66 .mode = 0644, 66 .mode = 0644,
67 .proc_handler = proc_dointvec_minmax, 67 .proc_handler = proc_dointvec_minmax,
68 .strategy = sysctl_intvec,
69 .extra1 = &one, 68 .extra1 = &one,
70 .extra2 = &timer_max 69 .extra2 = &timer_max
71 }, 70 },
72 { 71 {
73 .ctl_name = NET_SCTP_RTO_MIN,
74 .procname = "rto_min", 72 .procname = "rto_min",
75 .data = &sctp_rto_min, 73 .data = &sctp_rto_min,
76 .maxlen = sizeof(unsigned int), 74 .maxlen = sizeof(unsigned int),
77 .mode = 0644, 75 .mode = 0644,
78 .proc_handler = proc_dointvec_minmax, 76 .proc_handler = proc_dointvec_minmax,
79 .strategy = sysctl_intvec,
80 .extra1 = &one, 77 .extra1 = &one,
81 .extra2 = &timer_max 78 .extra2 = &timer_max
82 }, 79 },
83 { 80 {
84 .ctl_name = NET_SCTP_RTO_MAX,
85 .procname = "rto_max", 81 .procname = "rto_max",
86 .data = &sctp_rto_max, 82 .data = &sctp_rto_max,
87 .maxlen = sizeof(unsigned int), 83 .maxlen = sizeof(unsigned int),
88 .mode = 0644, 84 .mode = 0644,
89 .proc_handler = proc_dointvec_minmax, 85 .proc_handler = proc_dointvec_minmax,
90 .strategy = sysctl_intvec,
91 .extra1 = &one, 86 .extra1 = &one,
92 .extra2 = &timer_max 87 .extra2 = &timer_max
93 }, 88 },
94 { 89 {
95 .ctl_name = NET_SCTP_VALID_COOKIE_LIFE,
96 .procname = "valid_cookie_life", 90 .procname = "valid_cookie_life",
97 .data = &sctp_valid_cookie_life, 91 .data = &sctp_valid_cookie_life,
98 .maxlen = sizeof(unsigned int), 92 .maxlen = sizeof(unsigned int),
99 .mode = 0644, 93 .mode = 0644,
100 .proc_handler = proc_dointvec_minmax, 94 .proc_handler = proc_dointvec_minmax,
101 .strategy = sysctl_intvec,
102 .extra1 = &one, 95 .extra1 = &one,
103 .extra2 = &timer_max 96 .extra2 = &timer_max
104 }, 97 },
105 { 98 {
106 .ctl_name = NET_SCTP_MAX_BURST,
107 .procname = "max_burst", 99 .procname = "max_burst",
108 .data = &sctp_max_burst, 100 .data = &sctp_max_burst,
109 .maxlen = sizeof(int), 101 .maxlen = sizeof(int),
110 .mode = 0644, 102 .mode = 0644,
111 .proc_handler = proc_dointvec_minmax, 103 .proc_handler = proc_dointvec_minmax,
112 .strategy = sysctl_intvec,
113 .extra1 = &zero, 104 .extra1 = &zero,
114 .extra2 = &int_max 105 .extra2 = &int_max
115 }, 106 },
116 { 107 {
117 .ctl_name = NET_SCTP_ASSOCIATION_MAX_RETRANS,
118 .procname = "association_max_retrans", 108 .procname = "association_max_retrans",
119 .data = &sctp_max_retrans_association, 109 .data = &sctp_max_retrans_association,
120 .maxlen = sizeof(int), 110 .maxlen = sizeof(int),
121 .mode = 0644, 111 .mode = 0644,
122 .proc_handler = proc_dointvec_minmax, 112 .proc_handler = proc_dointvec_minmax,
123 .strategy = sysctl_intvec,
124 .extra1 = &one, 113 .extra1 = &one,
125 .extra2 = &int_max 114 .extra2 = &int_max
126 }, 115 },
127 { 116 {
128 .ctl_name = NET_SCTP_SNDBUF_POLICY,
129 .procname = "sndbuf_policy", 117 .procname = "sndbuf_policy",
130 .data = &sctp_sndbuf_policy, 118 .data = &sctp_sndbuf_policy,
131 .maxlen = sizeof(int), 119 .maxlen = sizeof(int),
132 .mode = 0644, 120 .mode = 0644,
133 .proc_handler = proc_dointvec, 121 .proc_handler = proc_dointvec,
134 .strategy = sysctl_intvec
135 }, 122 },
136 { 123 {
137 .ctl_name = NET_SCTP_RCVBUF_POLICY,
138 .procname = "rcvbuf_policy", 124 .procname = "rcvbuf_policy",
139 .data = &sctp_rcvbuf_policy, 125 .data = &sctp_rcvbuf_policy,
140 .maxlen = sizeof(int), 126 .maxlen = sizeof(int),
141 .mode = 0644, 127 .mode = 0644,
142 .proc_handler = proc_dointvec, 128 .proc_handler = proc_dointvec,
143 .strategy = sysctl_intvec
144 }, 129 },
145 { 130 {
146 .ctl_name = NET_SCTP_PATH_MAX_RETRANS,
147 .procname = "path_max_retrans", 131 .procname = "path_max_retrans",
148 .data = &sctp_max_retrans_path, 132 .data = &sctp_max_retrans_path,
149 .maxlen = sizeof(int), 133 .maxlen = sizeof(int),
150 .mode = 0644, 134 .mode = 0644,
151 .proc_handler = proc_dointvec_minmax, 135 .proc_handler = proc_dointvec_minmax,
152 .strategy = sysctl_intvec,
153 .extra1 = &one, 136 .extra1 = &one,
154 .extra2 = &int_max 137 .extra2 = &int_max
155 }, 138 },
156 { 139 {
157 .ctl_name = NET_SCTP_MAX_INIT_RETRANSMITS,
158 .procname = "max_init_retransmits", 140 .procname = "max_init_retransmits",
159 .data = &sctp_max_retrans_init, 141 .data = &sctp_max_retrans_init,
160 .maxlen = sizeof(int), 142 .maxlen = sizeof(int),
161 .mode = 0644, 143 .mode = 0644,
162 .proc_handler = proc_dointvec_minmax, 144 .proc_handler = proc_dointvec_minmax,
163 .strategy = sysctl_intvec,
164 .extra1 = &one, 145 .extra1 = &one,
165 .extra2 = &int_max 146 .extra2 = &int_max
166 }, 147 },
167 { 148 {
168 .ctl_name = NET_SCTP_HB_INTERVAL,
169 .procname = "hb_interval", 149 .procname = "hb_interval",
170 .data = &sctp_hb_interval, 150 .data = &sctp_hb_interval,
171 .maxlen = sizeof(unsigned int), 151 .maxlen = sizeof(unsigned int),
172 .mode = 0644, 152 .mode = 0644,
173 .proc_handler = proc_dointvec_minmax, 153 .proc_handler = proc_dointvec_minmax,
174 .strategy = sysctl_intvec,
175 .extra1 = &one, 154 .extra1 = &one,
176 .extra2 = &timer_max 155 .extra2 = &timer_max
177 }, 156 },
178 { 157 {
179 .ctl_name = NET_SCTP_PRESERVE_ENABLE,
180 .procname = "cookie_preserve_enable", 158 .procname = "cookie_preserve_enable",
181 .data = &sctp_cookie_preserve_enable, 159 .data = &sctp_cookie_preserve_enable,
182 .maxlen = sizeof(int), 160 .maxlen = sizeof(int),
183 .mode = 0644, 161 .mode = 0644,
184 .proc_handler = proc_dointvec, 162 .proc_handler = proc_dointvec,
185 .strategy = sysctl_intvec
186 }, 163 },
187 { 164 {
188 .ctl_name = NET_SCTP_RTO_ALPHA,
189 .procname = "rto_alpha_exp_divisor", 165 .procname = "rto_alpha_exp_divisor",
190 .data = &sctp_rto_alpha, 166 .data = &sctp_rto_alpha,
191 .maxlen = sizeof(int), 167 .maxlen = sizeof(int),
192 .mode = 0444, 168 .mode = 0444,
193 .proc_handler = proc_dointvec, 169 .proc_handler = proc_dointvec,
194 .strategy = sysctl_intvec
195 }, 170 },
196 { 171 {
197 .ctl_name = NET_SCTP_RTO_BETA,
198 .procname = "rto_beta_exp_divisor", 172 .procname = "rto_beta_exp_divisor",
199 .data = &sctp_rto_beta, 173 .data = &sctp_rto_beta,
200 .maxlen = sizeof(int), 174 .maxlen = sizeof(int),
201 .mode = 0444, 175 .mode = 0444,
202 .proc_handler = proc_dointvec, 176 .proc_handler = proc_dointvec,
203 .strategy = sysctl_intvec
204 }, 177 },
205 { 178 {
206 .ctl_name = NET_SCTP_ADDIP_ENABLE,
207 .procname = "addip_enable", 179 .procname = "addip_enable",
208 .data = &sctp_addip_enable, 180 .data = &sctp_addip_enable,
209 .maxlen = sizeof(int), 181 .maxlen = sizeof(int),
210 .mode = 0644, 182 .mode = 0644,
211 .proc_handler = proc_dointvec, 183 .proc_handler = proc_dointvec,
212 .strategy = sysctl_intvec
213 }, 184 },
214 { 185 {
215 .ctl_name = NET_SCTP_PRSCTP_ENABLE,
216 .procname = "prsctp_enable", 186 .procname = "prsctp_enable",
217 .data = &sctp_prsctp_enable, 187 .data = &sctp_prsctp_enable,
218 .maxlen = sizeof(int), 188 .maxlen = sizeof(int),
219 .mode = 0644, 189 .mode = 0644,
220 .proc_handler = proc_dointvec, 190 .proc_handler = proc_dointvec,
221 .strategy = sysctl_intvec
222 }, 191 },
223 { 192 {
224 .ctl_name = NET_SCTP_SACK_TIMEOUT,
225 .procname = "sack_timeout", 193 .procname = "sack_timeout",
226 .data = &sctp_sack_timeout, 194 .data = &sctp_sack_timeout,
227 .maxlen = sizeof(int), 195 .maxlen = sizeof(int),
228 .mode = 0644, 196 .mode = 0644,
229 .proc_handler = proc_dointvec_minmax, 197 .proc_handler = proc_dointvec_minmax,
230 .strategy = sysctl_intvec,
231 .extra1 = &sack_timer_min, 198 .extra1 = &sack_timer_min,
232 .extra2 = &sack_timer_max, 199 .extra2 = &sack_timer_max,
233 }, 200 },
234 { 201 {
235 .ctl_name = CTL_UNNUMBERED,
236 .procname = "sctp_mem", 202 .procname = "sctp_mem",
237 .data = &sysctl_sctp_mem, 203 .data = &sysctl_sctp_mem,
238 .maxlen = sizeof(sysctl_sctp_mem), 204 .maxlen = sizeof(sysctl_sctp_mem),
@@ -240,7 +206,6 @@ static ctl_table sctp_table[] = {
240 .proc_handler = proc_dointvec, 206 .proc_handler = proc_dointvec,
241 }, 207 },
242 { 208 {
243 .ctl_name = CTL_UNNUMBERED,
244 .procname = "sctp_rmem", 209 .procname = "sctp_rmem",
245 .data = &sysctl_sctp_rmem, 210 .data = &sysctl_sctp_rmem,
246 .maxlen = sizeof(sysctl_sctp_rmem), 211 .maxlen = sizeof(sysctl_sctp_rmem),
@@ -248,7 +213,6 @@ static ctl_table sctp_table[] = {
248 .proc_handler = proc_dointvec, 213 .proc_handler = proc_dointvec,
249 }, 214 },
250 { 215 {
251 .ctl_name = CTL_UNNUMBERED,
252 .procname = "sctp_wmem", 216 .procname = "sctp_wmem",
253 .data = &sysctl_sctp_wmem, 217 .data = &sysctl_sctp_wmem,
254 .maxlen = sizeof(sysctl_sctp_wmem), 218 .maxlen = sizeof(sysctl_sctp_wmem),
@@ -256,40 +220,44 @@ static ctl_table sctp_table[] = {
256 .proc_handler = proc_dointvec, 220 .proc_handler = proc_dointvec,
257 }, 221 },
258 { 222 {
259 .ctl_name = CTL_UNNUMBERED,
260 .procname = "auth_enable", 223 .procname = "auth_enable",
261 .data = &sctp_auth_enable, 224 .data = &sctp_auth_enable,
262 .maxlen = sizeof(int), 225 .maxlen = sizeof(int),
263 .mode = 0644, 226 .mode = 0644,
264 .proc_handler = proc_dointvec, 227 .proc_handler = proc_dointvec,
265 .strategy = sysctl_intvec
266 }, 228 },
267 { 229 {
268 .ctl_name = CTL_UNNUMBERED,
269 .procname = "addip_noauth_enable", 230 .procname = "addip_noauth_enable",
270 .data = &sctp_addip_noauth, 231 .data = &sctp_addip_noauth,
271 .maxlen = sizeof(int), 232 .maxlen = sizeof(int),
272 .mode = 0644, 233 .mode = 0644,
273 .proc_handler = proc_dointvec, 234 .proc_handler = proc_dointvec,
274 .strategy = sysctl_intvec
275 }, 235 },
276 { 236 {
277 .ctl_name = CTL_UNNUMBERED,
278 .procname = "addr_scope_policy", 237 .procname = "addr_scope_policy",
279 .data = &sctp_scope_policy, 238 .data = &sctp_scope_policy,
280 .maxlen = sizeof(int), 239 .maxlen = sizeof(int),
281 .mode = 0644, 240 .mode = 0644,
282 .proc_handler = &proc_dointvec_minmax, 241 .proc_handler = proc_dointvec_minmax,
283 .strategy = &sysctl_intvec,
284 .extra1 = &zero, 242 .extra1 = &zero,
285 .extra2 = &addr_scope_max, 243 .extra2 = &addr_scope_max,
286 }, 244 },
287 { .ctl_name = 0 } 245 {
246 .procname = "rwnd_update_shift",
247 .data = &sctp_rwnd_upd_shift,
248 .maxlen = sizeof(int),
249 .mode = 0644,
250 .proc_handler = &proc_dointvec_minmax,
251 .extra1 = &one,
252 .extra2 = &rwnd_scale_max,
253 },
254
255 { /* sentinel */ }
288}; 256};
289 257
290static struct ctl_path sctp_path[] = { 258static struct ctl_path sctp_path[] = {
291 { .procname = "net", .ctl_name = CTL_NET, }, 259 { .procname = "net", },
292 { .procname = "sctp", .ctl_name = NET_SCTP, }, 260 { .procname = "sctp", },
293 { } 261 { }
294}; 262};
295 263
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 37a1184d789f..165d54e07fcd 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -48,6 +48,7 @@
48 * be incorporated into the next SCTP release. 48 * be incorporated into the next SCTP release.
49 */ 49 */
50 50
51#include <linux/slab.h>
51#include <linux/types.h> 52#include <linux/types.h>
52#include <linux/random.h> 53#include <linux/random.h>
53#include <net/sctp/sctp.h> 54#include <net/sctp/sctp.h>
@@ -83,7 +84,6 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
83 peer->fast_recovery = 0; 84 peer->fast_recovery = 0;
84 85
85 peer->last_time_heard = jiffies; 86 peer->last_time_heard = jiffies;
86 peer->last_time_used = jiffies;
87 peer->last_time_ecne_reduced = jiffies; 87 peer->last_time_ecne_reduced = jiffies;
88 88
89 peer->init_sent_count = 0; 89 peer->init_sent_count = 0;
@@ -108,6 +108,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
108 (unsigned long)peer); 108 (unsigned long)peer);
109 setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event, 109 setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event,
110 (unsigned long)peer); 110 (unsigned long)peer);
111 setup_timer(&peer->proto_unreach_timer,
112 sctp_generate_proto_unreach_event, (unsigned long)peer);
111 113
112 /* Initialize the 64-bit random nonce sent with heartbeat. */ 114 /* Initialize the 64-bit random nonce sent with heartbeat. */
113 get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce)); 115 get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
@@ -171,6 +173,10 @@ void sctp_transport_free(struct sctp_transport *transport)
171 del_timer(&transport->T3_rtx_timer)) 173 del_timer(&transport->T3_rtx_timer))
172 sctp_transport_put(transport); 174 sctp_transport_put(transport);
173 175
176 /* Delete the ICMP proto unreachable timer if it's active. */
177 if (timer_pending(&transport->proto_unreach_timer) &&
178 del_timer(&transport->proto_unreach_timer))
179 sctp_association_put(transport->asoc);
174 180
175 sctp_transport_put(transport); 181 sctp_transport_put(transport);
176} 182}
@@ -564,10 +570,8 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
564 * to be done every RTO interval, we do it every hearbeat 570 * to be done every RTO interval, we do it every hearbeat
565 * interval. 571 * interval.
566 */ 572 */
567 if (time_after(jiffies, transport->last_time_used + 573 transport->cwnd = max(transport->cwnd/2,
568 transport->rto)) 574 4*transport->asoc->pathmtu);
569 transport->cwnd = max(transport->cwnd/2,
570 4*transport->asoc->pathmtu);
571 break; 575 break;
572 } 576 }
573 577
@@ -578,6 +582,43 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
578 transport->cwnd, transport->ssthresh); 582 transport->cwnd, transport->ssthresh);
579} 583}
580 584
585/* Apply Max.Burst limit to the congestion window:
586 * sctpimpguide-05 2.14.2
587 * D) When the time comes for the sender to
588 * transmit new DATA chunks, the protocol parameter Max.Burst MUST
589 * first be applied to limit how many new DATA chunks may be sent.
590 * The limit is applied by adjusting cwnd as follows:
591 * if ((flightsize+ Max.Burst * MTU) < cwnd)
592 * cwnd = flightsize + Max.Burst * MTU
593 */
594
595void sctp_transport_burst_limited(struct sctp_transport *t)
596{
597 struct sctp_association *asoc = t->asoc;
598 u32 old_cwnd = t->cwnd;
599 u32 max_burst_bytes;
600
601 if (t->burst_limited)
602 return;
603
604 max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu);
605 if (max_burst_bytes < old_cwnd) {
606 t->cwnd = max_burst_bytes;
607 t->burst_limited = old_cwnd;
608 }
609}
610
611/* Restore the old cwnd congestion window, after the burst had it's
612 * desired effect.
613 */
614void sctp_transport_burst_reset(struct sctp_transport *t)
615{
616 if (t->burst_limited) {
617 t->cwnd = t->burst_limited;
618 t->burst_limited = 0;
619 }
620}
621
581/* What is the next timeout value for this transport? */ 622/* What is the next timeout value for this transport? */
582unsigned long sctp_transport_timeout(struct sctp_transport *t) 623unsigned long sctp_transport_timeout(struct sctp_transport *t)
583{ 624{
@@ -600,6 +641,7 @@ void sctp_transport_reset(struct sctp_transport *t)
600 * (see Section 6.2.1) 641 * (see Section 6.2.1)
601 */ 642 */
602 t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); 643 t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
644 t->burst_limited = 0;
603 t->ssthresh = asoc->peer.i.a_rwnd; 645 t->ssthresh = asoc->peer.i.a_rwnd;
604 t->rto = asoc->rto_initial; 646 t->rto = asoc->rto_initial;
605 t->rtt = 0; 647 t->rtt = 0;
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
index 9bd64565021a..747d5412c463 100644
--- a/net/sctp/tsnmap.c
+++ b/net/sctp/tsnmap.c
@@ -42,6 +42,7 @@
42 * be incorporated into the next SCTP release. 42 * be incorporated into the next SCTP release.
43 */ 43 */
44 44
45#include <linux/slab.h>
45#include <linux/types.h> 46#include <linux/types.h>
46#include <linux/bitmap.h> 47#include <linux/bitmap.h>
47#include <net/sctp/sctp.h> 48#include <net/sctp/sctp.h>
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 8b3560fd876d..aa72e89c3ee1 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -43,6 +43,7 @@
43 * be incorporated into the next SCTP release. 43 * be incorporated into the next SCTP release.
44 */ 44 */
45 45
46#include <linux/slab.h>
46#include <linux/types.h> 47#include <linux/types.h>
47#include <linux/skbuff.h> 48#include <linux/skbuff.h>
48#include <net/sctp/structs.h> 49#include <net/sctp/structs.h>
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 7b23803343cc..3a448536f0b6 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -41,6 +41,7 @@
41 * be incorporated into the next SCTP release. 41 * be incorporated into the next SCTP release.
42 */ 42 */
43 43
44#include <linux/slab.h>
44#include <linux/types.h> 45#include <linux/types.h>
45#include <linux/skbuff.h> 46#include <linux/skbuff.h>
46#include <net/sock.h> 47#include <net/sock.h>