diff options
author | David S. Miller <davem@davemloft.net> | 2008-06-10 05:22:26 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-06-10 05:22:26 -0400 |
commit | 65b53e4cc90e59936733b3b95b9451d2ca47528d (patch) | |
tree | 29932718192962671c48c3fd1ea017a6112459e8 /net/sctp | |
parent | 788c0a53164c05c5ccdb1472474372b72ba74644 (diff) | |
parent | 2e761e0532a784816e7e822dbaaece8c5d4be14d (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts:
drivers/net/tg3.c
drivers/net/wireless/rt2x00/rt2x00dev.c
net/mac80211/ieee80211_i.h
Diffstat (limited to 'net/sctp')
-rw-r--r-- | net/sctp/associola.c | 21 | ||||
-rw-r--r-- | net/sctp/ipv6.c | 11 | ||||
-rw-r--r-- | net/sctp/output.c | 2 | ||||
-rw-r--r-- | net/sctp/outqueue.c | 120 | ||||
-rw-r--r-- | net/sctp/protocol.c | 11 | ||||
-rw-r--r-- | net/sctp/transport.c | 50 |
6 files changed, 143 insertions, 72 deletions
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 7b79d1e781a8..d5cc731b6798 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -1206,6 +1206,9 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc) | |||
1206 | struct list_head *head = &asoc->peer.transport_addr_list; | 1206 | struct list_head *head = &asoc->peer.transport_addr_list; |
1207 | struct list_head *pos; | 1207 | struct list_head *pos; |
1208 | 1208 | ||
1209 | if (asoc->peer.transport_count == 1) | ||
1210 | return; | ||
1211 | |||
1209 | /* Find the next transport in a round-robin fashion. */ | 1212 | /* Find the next transport in a round-robin fashion. */ |
1210 | t = asoc->peer.retran_path; | 1213 | t = asoc->peer.retran_path; |
1211 | pos = &t->transports; | 1214 | pos = &t->transports; |
@@ -1220,6 +1223,15 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc) | |||
1220 | 1223 | ||
1221 | t = list_entry(pos, struct sctp_transport, transports); | 1224 | t = list_entry(pos, struct sctp_transport, transports); |
1222 | 1225 | ||
1226 | /* We have exhausted the list, but didn't find any | ||
1227 | * other active transports. If so, use the next | ||
1228 | * transport. | ||
1229 | */ | ||
1230 | if (t == asoc->peer.retran_path) { | ||
1231 | t = next; | ||
1232 | break; | ||
1233 | } | ||
1234 | |||
1223 | /* Try to find an active transport. */ | 1235 | /* Try to find an active transport. */ |
1224 | 1236 | ||
1225 | if ((t->state == SCTP_ACTIVE) || | 1237 | if ((t->state == SCTP_ACTIVE) || |
@@ -1232,15 +1244,6 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc) | |||
1232 | if (!next) | 1244 | if (!next) |
1233 | next = t; | 1245 | next = t; |
1234 | } | 1246 | } |
1235 | |||
1236 | /* We have exhausted the list, but didn't find any | ||
1237 | * other active transports. If so, use the next | ||
1238 | * transport. | ||
1239 | */ | ||
1240 | if (t == asoc->peer.retran_path) { | ||
1241 | t = next; | ||
1242 | break; | ||
1243 | } | ||
1244 | } | 1247 | } |
1245 | 1248 | ||
1246 | asoc->peer.retran_path = t; | 1249 | asoc->peer.retran_path = t; |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index e45e44c60635..a2f4d4d51593 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -299,7 +299,8 @@ static inline int sctp_v6_addr_match_len(union sctp_addr *s1, | |||
299 | /* Fills in the source address(saddr) based on the destination address(daddr) | 299 | /* Fills in the source address(saddr) based on the destination address(daddr) |
300 | * and asoc's bind address list. | 300 | * and asoc's bind address list. |
301 | */ | 301 | */ |
302 | static void sctp_v6_get_saddr(struct sctp_association *asoc, | 302 | static void sctp_v6_get_saddr(struct sctp_sock *sk, |
303 | struct sctp_association *asoc, | ||
303 | struct dst_entry *dst, | 304 | struct dst_entry *dst, |
304 | union sctp_addr *daddr, | 305 | union sctp_addr *daddr, |
305 | union sctp_addr *saddr) | 306 | union sctp_addr *saddr) |
@@ -318,7 +319,7 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc, | |||
318 | if (!asoc) { | 319 | if (!asoc) { |
319 | ipv6_dev_get_saddr(dst ? ip6_dst_idev(dst)->dev : NULL, | 320 | ipv6_dev_get_saddr(dst ? ip6_dst_idev(dst)->dev : NULL, |
320 | &daddr->v6.sin6_addr, | 321 | &daddr->v6.sin6_addr, |
321 | inet6_sk(asoc->base.sk)->srcprefs, | 322 | inet6_sk(&sk->inet.sk)->srcprefs, |
322 | &saddr->v6.sin6_addr); | 323 | &saddr->v6.sin6_addr); |
323 | SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: " NIP6_FMT "\n", | 324 | SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: " NIP6_FMT "\n", |
324 | NIP6(saddr->v6.sin6_addr)); | 325 | NIP6(saddr->v6.sin6_addr)); |
@@ -726,6 +727,11 @@ static void sctp_v6_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr) | |||
726 | seq_printf(seq, NIP6_FMT " ", NIP6(addr->v6.sin6_addr)); | 727 | seq_printf(seq, NIP6_FMT " ", NIP6(addr->v6.sin6_addr)); |
727 | } | 728 | } |
728 | 729 | ||
730 | static void sctp_v6_ecn_capable(struct sock *sk) | ||
731 | { | ||
732 | inet6_sk(sk)->tclass |= INET_ECN_ECT_0; | ||
733 | } | ||
734 | |||
729 | /* Initialize a PF_INET6 socket msg_name. */ | 735 | /* Initialize a PF_INET6 socket msg_name. */ |
730 | static void sctp_inet6_msgname(char *msgname, int *addr_len) | 736 | static void sctp_inet6_msgname(char *msgname, int *addr_len) |
731 | { | 737 | { |
@@ -996,6 +1002,7 @@ static struct sctp_af sctp_af_inet6 = { | |||
996 | .skb_iif = sctp_v6_skb_iif, | 1002 | .skb_iif = sctp_v6_skb_iif, |
997 | .is_ce = sctp_v6_is_ce, | 1003 | .is_ce = sctp_v6_is_ce, |
998 | .seq_dump_addr = sctp_v6_seq_dump_addr, | 1004 | .seq_dump_addr = sctp_v6_seq_dump_addr, |
1005 | .ecn_capable = sctp_v6_ecn_capable, | ||
999 | .net_header_len = sizeof(struct ipv6hdr), | 1006 | .net_header_len = sizeof(struct ipv6hdr), |
1000 | .sockaddr_len = sizeof(struct sockaddr_in6), | 1007 | .sockaddr_len = sizeof(struct sockaddr_in6), |
1001 | #ifdef CONFIG_COMPAT | 1008 | #ifdef CONFIG_COMPAT |
diff --git a/net/sctp/output.c b/net/sctp/output.c index cf4f9fb6819d..6d45bae93b46 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -548,7 +548,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) | |||
548 | * Note: The works for IPv6 layer checks this bit too later | 548 | * Note: The works for IPv6 layer checks this bit too later |
549 | * in transmission. See IP6_ECN_flow_xmit(). | 549 | * in transmission. See IP6_ECN_flow_xmit(). |
550 | */ | 550 | */ |
551 | INET_ECN_xmit(nskb->sk); | 551 | (*tp->af_specific->ecn_capable)(nskb->sk); |
552 | 552 | ||
553 | /* Set up the IP options. */ | 553 | /* Set up the IP options. */ |
554 | /* BUG: not implemented | 554 | /* BUG: not implemented |
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 59edfd25a19c..ace6770e9048 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -208,6 +208,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) | |||
208 | INIT_LIST_HEAD(&q->sacked); | 208 | INIT_LIST_HEAD(&q->sacked); |
209 | INIT_LIST_HEAD(&q->abandoned); | 209 | INIT_LIST_HEAD(&q->abandoned); |
210 | 210 | ||
211 | q->fast_rtx = 0; | ||
211 | q->outstanding_bytes = 0; | 212 | q->outstanding_bytes = 0; |
212 | q->empty = 1; | 213 | q->empty = 1; |
213 | q->cork = 0; | 214 | q->cork = 0; |
@@ -500,6 +501,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, | |||
500 | case SCTP_RTXR_FAST_RTX: | 501 | case SCTP_RTXR_FAST_RTX: |
501 | SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS); | 502 | SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS); |
502 | sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX); | 503 | sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX); |
504 | q->fast_rtx = 1; | ||
503 | break; | 505 | break; |
504 | case SCTP_RTXR_PMTUD: | 506 | case SCTP_RTXR_PMTUD: |
505 | SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS); | 507 | SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS); |
@@ -518,9 +520,15 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, | |||
518 | * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by | 520 | * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by |
519 | * following the procedures outlined in C1 - C5. | 521 | * following the procedures outlined in C1 - C5. |
520 | */ | 522 | */ |
521 | sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point); | 523 | if (reason == SCTP_RTXR_T3_RTX) |
524 | sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point); | ||
522 | 525 | ||
523 | error = sctp_outq_flush(q, /* rtx_timeout */ 1); | 526 | /* Flush the queues only on timeout, since fast_rtx is only |
527 | * triggered during sack processing and the queue | ||
528 | * will be flushed at the end. | ||
529 | */ | ||
530 | if (reason != SCTP_RTXR_FAST_RTX) | ||
531 | error = sctp_outq_flush(q, /* rtx_timeout */ 1); | ||
524 | 532 | ||
525 | if (error) | 533 | if (error) |
526 | q->asoc->base.sk->sk_err = -error; | 534 | q->asoc->base.sk->sk_err = -error; |
@@ -538,17 +546,23 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, | |||
538 | int rtx_timeout, int *start_timer) | 546 | int rtx_timeout, int *start_timer) |
539 | { | 547 | { |
540 | struct list_head *lqueue; | 548 | struct list_head *lqueue; |
541 | struct list_head *lchunk; | ||
542 | struct sctp_transport *transport = pkt->transport; | 549 | struct sctp_transport *transport = pkt->transport; |
543 | sctp_xmit_t status; | 550 | sctp_xmit_t status; |
544 | struct sctp_chunk *chunk, *chunk1; | 551 | struct sctp_chunk *chunk, *chunk1; |
545 | struct sctp_association *asoc; | 552 | struct sctp_association *asoc; |
553 | int fast_rtx; | ||
546 | int error = 0; | 554 | int error = 0; |
555 | int timer = 0; | ||
556 | int done = 0; | ||
547 | 557 | ||
548 | asoc = q->asoc; | 558 | asoc = q->asoc; |
549 | lqueue = &q->retransmit; | 559 | lqueue = &q->retransmit; |
560 | fast_rtx = q->fast_rtx; | ||
550 | 561 | ||
551 | /* RFC 2960 6.3.3 Handle T3-rtx Expiration | 562 | /* This loop handles time-out retransmissions, fast retransmissions, |
563 | * and retransmissions due to opening of whindow. | ||
564 | * | ||
565 | * RFC 2960 6.3.3 Handle T3-rtx Expiration | ||
552 | * | 566 | * |
553 | * E3) Determine how many of the earliest (i.e., lowest TSN) | 567 | * E3) Determine how many of the earliest (i.e., lowest TSN) |
554 | * outstanding DATA chunks for the address for which the | 568 | * outstanding DATA chunks for the address for which the |
@@ -563,12 +577,12 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, | |||
563 | * [Just to be painfully clear, if we are retransmitting | 577 | * [Just to be painfully clear, if we are retransmitting |
564 | * because a timeout just happened, we should send only ONE | 578 | * because a timeout just happened, we should send only ONE |
565 | * packet of retransmitted data.] | 579 | * packet of retransmitted data.] |
580 | * | ||
581 | * For fast retransmissions we also send only ONE packet. However, | ||
582 | * if we are just flushing the queue due to open window, we'll | ||
583 | * try to send as much as possible. | ||
566 | */ | 584 | */ |
567 | lchunk = sctp_list_dequeue(lqueue); | 585 | list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) { |
568 | |||
569 | while (lchunk) { | ||
570 | chunk = list_entry(lchunk, struct sctp_chunk, | ||
571 | transmitted_list); | ||
572 | 586 | ||
573 | /* Make sure that Gap Acked TSNs are not retransmitted. A | 587 | /* Make sure that Gap Acked TSNs are not retransmitted. A |
574 | * simple approach is just to move such TSNs out of the | 588 | * simple approach is just to move such TSNs out of the |
@@ -576,58 +590,60 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, | |||
576 | * next chunk. | 590 | * next chunk. |
577 | */ | 591 | */ |
578 | if (chunk->tsn_gap_acked) { | 592 | if (chunk->tsn_gap_acked) { |
579 | list_add_tail(lchunk, &transport->transmitted); | 593 | list_del(&chunk->transmitted_list); |
580 | lchunk = sctp_list_dequeue(lqueue); | 594 | list_add_tail(&chunk->transmitted_list, |
595 | &transport->transmitted); | ||
581 | continue; | 596 | continue; |
582 | } | 597 | } |
583 | 598 | ||
599 | /* If we are doing fast retransmit, ignore non-fast_rtransmit | ||
600 | * chunks | ||
601 | */ | ||
602 | if (fast_rtx && !chunk->fast_retransmit) | ||
603 | continue; | ||
604 | |||
584 | /* Attempt to append this chunk to the packet. */ | 605 | /* Attempt to append this chunk to the packet. */ |
585 | status = sctp_packet_append_chunk(pkt, chunk); | 606 | status = sctp_packet_append_chunk(pkt, chunk); |
586 | 607 | ||
587 | switch (status) { | 608 | switch (status) { |
588 | case SCTP_XMIT_PMTU_FULL: | 609 | case SCTP_XMIT_PMTU_FULL: |
589 | /* Send this packet. */ | 610 | /* Send this packet. */ |
590 | if ((error = sctp_packet_transmit(pkt)) == 0) | 611 | error = sctp_packet_transmit(pkt); |
591 | *start_timer = 1; | ||
592 | 612 | ||
593 | /* If we are retransmitting, we should only | 613 | /* If we are retransmitting, we should only |
594 | * send a single packet. | 614 | * send a single packet. |
595 | */ | 615 | */ |
596 | if (rtx_timeout) { | 616 | if (rtx_timeout || fast_rtx) |
597 | list_add(lchunk, lqueue); | 617 | done = 1; |
598 | lchunk = NULL; | ||
599 | } | ||
600 | 618 | ||
601 | /* Bundle lchunk in the next round. */ | 619 | /* Bundle next chunk in the next round. */ |
602 | break; | 620 | break; |
603 | 621 | ||
604 | case SCTP_XMIT_RWND_FULL: | 622 | case SCTP_XMIT_RWND_FULL: |
605 | /* Send this packet. */ | 623 | /* Send this packet. */ |
606 | if ((error = sctp_packet_transmit(pkt)) == 0) | 624 | error = sctp_packet_transmit(pkt); |
607 | *start_timer = 1; | ||
608 | 625 | ||
609 | /* Stop sending DATA as there is no more room | 626 | /* Stop sending DATA as there is no more room |
610 | * at the receiver. | 627 | * at the receiver. |
611 | */ | 628 | */ |
612 | list_add(lchunk, lqueue); | 629 | done = 1; |
613 | lchunk = NULL; | ||
614 | break; | 630 | break; |
615 | 631 | ||
616 | case SCTP_XMIT_NAGLE_DELAY: | 632 | case SCTP_XMIT_NAGLE_DELAY: |
617 | /* Send this packet. */ | 633 | /* Send this packet. */ |
618 | if ((error = sctp_packet_transmit(pkt)) == 0) | 634 | error = sctp_packet_transmit(pkt); |
619 | *start_timer = 1; | ||
620 | 635 | ||
621 | /* Stop sending DATA because of nagle delay. */ | 636 | /* Stop sending DATA because of nagle delay. */ |
622 | list_add(lchunk, lqueue); | 637 | done = 1; |
623 | lchunk = NULL; | ||
624 | break; | 638 | break; |
625 | 639 | ||
626 | default: | 640 | default: |
627 | /* The append was successful, so add this chunk to | 641 | /* The append was successful, so add this chunk to |
628 | * the transmitted list. | 642 | * the transmitted list. |
629 | */ | 643 | */ |
630 | list_add_tail(lchunk, &transport->transmitted); | 644 | list_del(&chunk->transmitted_list); |
645 | list_add_tail(&chunk->transmitted_list, | ||
646 | &transport->transmitted); | ||
631 | 647 | ||
632 | /* Mark the chunk as ineligible for fast retransmit | 648 | /* Mark the chunk as ineligible for fast retransmit |
633 | * after it is retransmitted. | 649 | * after it is retransmitted. |
@@ -635,27 +651,44 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, | |||
635 | if (chunk->fast_retransmit > 0) | 651 | if (chunk->fast_retransmit > 0) |
636 | chunk->fast_retransmit = -1; | 652 | chunk->fast_retransmit = -1; |
637 | 653 | ||
638 | *start_timer = 1; | 654 | /* Force start T3-rtx timer when fast retransmitting |
639 | q->empty = 0; | 655 | * the earliest outstanding TSN |
656 | */ | ||
657 | if (!timer && fast_rtx && | ||
658 | ntohl(chunk->subh.data_hdr->tsn) == | ||
659 | asoc->ctsn_ack_point + 1) | ||
660 | timer = 2; | ||
640 | 661 | ||
641 | /* Retrieve a new chunk to bundle. */ | 662 | q->empty = 0; |
642 | lchunk = sctp_list_dequeue(lqueue); | ||
643 | break; | 663 | break; |
644 | } | 664 | } |
645 | 665 | ||
646 | /* If we are here due to a retransmit timeout or a fast | 666 | /* Set the timer if there were no errors */ |
647 | * retransmit and if there are any chunks left in the retransmit | 667 | if (!error && !timer) |
648 | * queue that could not fit in the PMTU sized packet, they need | 668 | timer = 1; |
649 | * to be marked as ineligible for a subsequent fast retransmit. | 669 | |
650 | */ | 670 | if (done) |
651 | if (rtx_timeout && !lchunk) { | 671 | break; |
652 | list_for_each_entry(chunk1, lqueue, transmitted_list) { | 672 | } |
653 | if (chunk1->fast_retransmit > 0) | 673 | |
654 | chunk1->fast_retransmit = -1; | 674 | /* If we are here due to a retransmit timeout or a fast |
655 | } | 675 | * retransmit and if there are any chunks left in the retransmit |
676 | * queue that could not fit in the PMTU sized packet, they need | ||
677 | * to be marked as ineligible for a subsequent fast retransmit. | ||
678 | */ | ||
679 | if (rtx_timeout || fast_rtx) { | ||
680 | list_for_each_entry(chunk1, lqueue, transmitted_list) { | ||
681 | if (chunk1->fast_retransmit > 0) | ||
682 | chunk1->fast_retransmit = -1; | ||
656 | } | 683 | } |
657 | } | 684 | } |
658 | 685 | ||
686 | *start_timer = timer; | ||
687 | |||
688 | /* Clear fast retransmit hint */ | ||
689 | if (fast_rtx) | ||
690 | q->fast_rtx = 0; | ||
691 | |||
659 | return error; | 692 | return error; |
660 | } | 693 | } |
661 | 694 | ||
@@ -862,7 +895,8 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) | |||
862 | rtx_timeout, &start_timer); | 895 | rtx_timeout, &start_timer); |
863 | 896 | ||
864 | if (start_timer) | 897 | if (start_timer) |
865 | sctp_transport_reset_timers(transport); | 898 | sctp_transport_reset_timers(transport, |
899 | start_timer-1); | ||
866 | 900 | ||
867 | /* This can happen on COOKIE-ECHO resend. Only | 901 | /* This can happen on COOKIE-ECHO resend. Only |
868 | * one chunk can get bundled with a COOKIE-ECHO. | 902 | * one chunk can get bundled with a COOKIE-ECHO. |
@@ -977,7 +1011,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) | |||
977 | list_add_tail(&chunk->transmitted_list, | 1011 | list_add_tail(&chunk->transmitted_list, |
978 | &transport->transmitted); | 1012 | &transport->transmitted); |
979 | 1013 | ||
980 | sctp_transport_reset_timers(transport); | 1014 | sctp_transport_reset_timers(transport, start_timer-1); |
981 | 1015 | ||
982 | q->empty = 0; | 1016 | q->empty = 0; |
983 | 1017 | ||
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index b8bd9e014498..d6af466091d2 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -473,11 +473,11 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc, | |||
473 | /* Walk through the bind address list and look for a bind | 473 | /* Walk through the bind address list and look for a bind |
474 | * address that matches the source address of the returned dst. | 474 | * address that matches the source address of the returned dst. |
475 | */ | 475 | */ |
476 | sctp_v4_dst_saddr(&dst_saddr, dst, htons(bp->port)); | ||
476 | rcu_read_lock(); | 477 | rcu_read_lock(); |
477 | list_for_each_entry_rcu(laddr, &bp->address_list, list) { | 478 | list_for_each_entry_rcu(laddr, &bp->address_list, list) { |
478 | if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC)) | 479 | if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC)) |
479 | continue; | 480 | continue; |
480 | sctp_v4_dst_saddr(&dst_saddr, dst, htons(bp->port)); | ||
481 | if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a)) | 481 | if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a)) |
482 | goto out_unlock; | 482 | goto out_unlock; |
483 | } | 483 | } |
@@ -522,7 +522,8 @@ out: | |||
522 | /* For v4, the source address is cached in the route entry(dst). So no need | 522 | /* For v4, the source address is cached in the route entry(dst). So no need |
523 | * to cache it separately and hence this is an empty routine. | 523 | * to cache it separately and hence this is an empty routine. |
524 | */ | 524 | */ |
525 | static void sctp_v4_get_saddr(struct sctp_association *asoc, | 525 | static void sctp_v4_get_saddr(struct sctp_sock *sk, |
526 | struct sctp_association *asoc, | ||
526 | struct dst_entry *dst, | 527 | struct dst_entry *dst, |
527 | union sctp_addr *daddr, | 528 | union sctp_addr *daddr, |
528 | union sctp_addr *saddr) | 529 | union sctp_addr *saddr) |
@@ -619,6 +620,11 @@ static void sctp_v4_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr) | |||
619 | seq_printf(seq, "%d.%d.%d.%d ", NIPQUAD(addr->v4.sin_addr)); | 620 | seq_printf(seq, "%d.%d.%d.%d ", NIPQUAD(addr->v4.sin_addr)); |
620 | } | 621 | } |
621 | 622 | ||
623 | static void sctp_v4_ecn_capable(struct sock *sk) | ||
624 | { | ||
625 | INET_ECN_xmit(sk); | ||
626 | } | ||
627 | |||
622 | /* Event handler for inet address addition/deletion events. | 628 | /* Event handler for inet address addition/deletion events. |
623 | * The sctp_local_addr_list needs to be protocted by a spin lock since | 629 | * The sctp_local_addr_list needs to be protocted by a spin lock since |
624 | * multiple notifiers (say IPv4 and IPv6) may be running at the same | 630 | * multiple notifiers (say IPv4 and IPv6) may be running at the same |
@@ -937,6 +943,7 @@ static struct sctp_af sctp_af_inet = { | |||
937 | .skb_iif = sctp_v4_skb_iif, | 943 | .skb_iif = sctp_v4_skb_iif, |
938 | .is_ce = sctp_v4_is_ce, | 944 | .is_ce = sctp_v4_is_ce, |
939 | .seq_dump_addr = sctp_v4_seq_dump_addr, | 945 | .seq_dump_addr = sctp_v4_seq_dump_addr, |
946 | .ecn_capable = sctp_v4_ecn_capable, | ||
940 | .net_header_len = sizeof(struct iphdr), | 947 | .net_header_len = sizeof(struct iphdr), |
941 | .sockaddr_len = sizeof(struct sockaddr_in), | 948 | .sockaddr_len = sizeof(struct sockaddr_in), |
942 | #ifdef CONFIG_COMPAT | 949 | #ifdef CONFIG_COMPAT |
diff --git a/net/sctp/transport.c b/net/sctp/transport.c index f4938f6c5abe..3f34f61221ec 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c | |||
@@ -79,6 +79,7 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, | |||
79 | peer->rttvar = 0; | 79 | peer->rttvar = 0; |
80 | peer->srtt = 0; | 80 | peer->srtt = 0; |
81 | peer->rto_pending = 0; | 81 | peer->rto_pending = 0; |
82 | peer->fast_recovery = 0; | ||
82 | 83 | ||
83 | peer->last_time_heard = jiffies; | 84 | peer->last_time_heard = jiffies; |
84 | peer->last_time_used = jiffies; | 85 | peer->last_time_used = jiffies; |
@@ -190,7 +191,7 @@ static void sctp_transport_destroy(struct sctp_transport *transport) | |||
190 | /* Start T3_rtx timer if it is not already running and update the heartbeat | 191 | /* Start T3_rtx timer if it is not already running and update the heartbeat |
191 | * timer. This routine is called every time a DATA chunk is sent. | 192 | * timer. This routine is called every time a DATA chunk is sent. |
192 | */ | 193 | */ |
193 | void sctp_transport_reset_timers(struct sctp_transport *transport) | 194 | void sctp_transport_reset_timers(struct sctp_transport *transport, int force) |
194 | { | 195 | { |
195 | /* RFC 2960 6.3.2 Retransmission Timer Rules | 196 | /* RFC 2960 6.3.2 Retransmission Timer Rules |
196 | * | 197 | * |
@@ -200,7 +201,7 @@ void sctp_transport_reset_timers(struct sctp_transport *transport) | |||
200 | * address. | 201 | * address. |
201 | */ | 202 | */ |
202 | 203 | ||
203 | if (!timer_pending(&transport->T3_rtx_timer)) | 204 | if (force || !timer_pending(&transport->T3_rtx_timer)) |
204 | if (!mod_timer(&transport->T3_rtx_timer, | 205 | if (!mod_timer(&transport->T3_rtx_timer, |
205 | jiffies + transport->rto)) | 206 | jiffies + transport->rto)) |
206 | sctp_transport_hold(transport); | 207 | sctp_transport_hold(transport); |
@@ -291,7 +292,7 @@ void sctp_transport_route(struct sctp_transport *transport, | |||
291 | if (saddr) | 292 | if (saddr) |
292 | memcpy(&transport->saddr, saddr, sizeof(union sctp_addr)); | 293 | memcpy(&transport->saddr, saddr, sizeof(union sctp_addr)); |
293 | else | 294 | else |
294 | af->get_saddr(asoc, dst, daddr, &transport->saddr); | 295 | af->get_saddr(opt, asoc, dst, daddr, &transport->saddr); |
295 | 296 | ||
296 | transport->dst = dst; | 297 | transport->dst = dst; |
297 | if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) { | 298 | if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) { |
@@ -403,11 +404,16 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport, | |||
403 | cwnd = transport->cwnd; | 404 | cwnd = transport->cwnd; |
404 | flight_size = transport->flight_size; | 405 | flight_size = transport->flight_size; |
405 | 406 | ||
407 | /* See if we need to exit Fast Recovery first */ | ||
408 | if (transport->fast_recovery && | ||
409 | TSN_lte(transport->fast_recovery_exit, sack_ctsn)) | ||
410 | transport->fast_recovery = 0; | ||
411 | |||
406 | /* The appropriate cwnd increase algorithm is performed if, and only | 412 | /* The appropriate cwnd increase algorithm is performed if, and only |
407 | * if the cumulative TSN has advanced and the congestion window is | 413 | * if the cumulative TSN whould advanced and the congestion window is |
408 | * being fully utilized. | 414 | * being fully utilized. |
409 | */ | 415 | */ |
410 | if ((transport->asoc->ctsn_ack_point >= sack_ctsn) || | 416 | if (TSN_lte(sack_ctsn, transport->asoc->ctsn_ack_point) || |
411 | (flight_size < cwnd)) | 417 | (flight_size < cwnd)) |
412 | return; | 418 | return; |
413 | 419 | ||
@@ -416,17 +422,23 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport, | |||
416 | pmtu = transport->asoc->pathmtu; | 422 | pmtu = transport->asoc->pathmtu; |
417 | 423 | ||
418 | if (cwnd <= ssthresh) { | 424 | if (cwnd <= ssthresh) { |
419 | /* RFC 2960 7.2.1, sctpimpguide-05 2.14.2 When cwnd is less | 425 | /* RFC 4960 7.2.1 |
420 | * than or equal to ssthresh an SCTP endpoint MUST use the | 426 | * o When cwnd is less than or equal to ssthresh, an SCTP |
421 | * slow start algorithm to increase cwnd only if the current | 427 | * endpoint MUST use the slow-start algorithm to increase |
422 | * congestion window is being fully utilized and an incoming | 428 | * cwnd only if the current congestion window is being fully |
423 | * SACK advances the Cumulative TSN Ack Point. Only when these | 429 | * utilized, an incoming SACK advances the Cumulative TSN |
424 | * two conditions are met can the cwnd be increased otherwise | 430 | * Ack Point, and the data sender is not in Fast Recovery. |
425 | * the cwnd MUST not be increased. If these conditions are met | 431 | * Only when these three conditions are met can the cwnd be |
426 | * then cwnd MUST be increased by at most the lesser of | 432 | * increased; otherwise, the cwnd MUST not be increased. |
427 | * 1) the total size of the previously outstanding DATA | 433 | * If these conditions are met, then cwnd MUST be increased |
428 | * chunk(s) acknowledged, and 2) the destination's path MTU. | 434 | * by, at most, the lesser of 1) the total size of the |
435 | * previously outstanding DATA chunk(s) acknowledged, and | ||
436 | * 2) the destination's path MTU. This upper bound protects | ||
437 | * against the ACK-Splitting attack outlined in [SAVAGE99]. | ||
429 | */ | 438 | */ |
439 | if (transport->fast_recovery) | ||
440 | return; | ||
441 | |||
430 | if (bytes_acked > pmtu) | 442 | if (bytes_acked > pmtu) |
431 | cwnd += pmtu; | 443 | cwnd += pmtu; |
432 | else | 444 | else |
@@ -502,6 +514,13 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport, | |||
502 | * cwnd = ssthresh | 514 | * cwnd = ssthresh |
503 | * partial_bytes_acked = 0 | 515 | * partial_bytes_acked = 0 |
504 | */ | 516 | */ |
517 | if (transport->fast_recovery) | ||
518 | return; | ||
519 | |||
520 | /* Mark Fast recovery */ | ||
521 | transport->fast_recovery = 1; | ||
522 | transport->fast_recovery_exit = transport->asoc->next_tsn - 1; | ||
523 | |||
505 | transport->ssthresh = max(transport->cwnd/2, | 524 | transport->ssthresh = max(transport->cwnd/2, |
506 | 4*transport->asoc->pathmtu); | 525 | 4*transport->asoc->pathmtu); |
507 | transport->cwnd = transport->ssthresh; | 526 | transport->cwnd = transport->ssthresh; |
@@ -586,6 +605,7 @@ void sctp_transport_reset(struct sctp_transport *t) | |||
586 | t->flight_size = 0; | 605 | t->flight_size = 0; |
587 | t->error_count = 0; | 606 | t->error_count = 0; |
588 | t->rto_pending = 0; | 607 | t->rto_pending = 0; |
608 | t->fast_recovery = 0; | ||
589 | 609 | ||
590 | /* Initialize the state information for SFR-CACC */ | 610 | /* Initialize the state information for SFR-CACC */ |
591 | t->cacc.changeover_active = 0; | 611 | t->cacc.changeover_active = 0; |