aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVlad Yasevich <vladislav.yasevich@hp.com>2008-06-04 15:39:36 -0400
committerDavid S. Miller <davem@davemloft.net>2008-06-04 15:39:36 -0400
commit8b750ce54bd8ab5f75d519ee450e1b0c5226ebe9 (patch)
treef9097fce144eb4646f5cdb1ac66e3b7f0f4c5a55
parent62aeaff5ccd96462b7077046357a6d7886175a57 (diff)
sctp: Flush the queue only once during fast retransmit.
When fast retransmit is triggered by a sack, we should flush the queue only once so that only 1 retransmit happens. Also, since we could potentially have non-fast-rtx chunks on the retransmit queue, we need make sure any chunks eligable for fast retransmit are sent first during fast retransmission. Signed-off-by: Vlad Yasevich <vladislav.yasevich@hp.com> Tested-by: Wei Yongjun <yjwei@cn.fujitsu.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/sctp/outqueue.c82
1 files changed, 48 insertions, 34 deletions
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 5d3c441e84d3..ace6770e9048 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -520,9 +520,15 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
520 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by 520 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
521 * following the procedures outlined in C1 - C5. 521 * following the procedures outlined in C1 - C5.
522 */ 522 */
523 sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point); 523 if (reason == SCTP_RTXR_T3_RTX)
524 sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);
524 525
525 error = sctp_outq_flush(q, /* rtx_timeout */ 1); 526 /* Flush the queues only on timeout, since fast_rtx is only
527 * triggered during sack processing and the queue
528 * will be flushed at the end.
529 */
530 if (reason != SCTP_RTXR_FAST_RTX)
531 error = sctp_outq_flush(q, /* rtx_timeout */ 1);
526 532
527 if (error) 533 if (error)
528 q->asoc->base.sk->sk_err = -error; 534 q->asoc->base.sk->sk_err = -error;
@@ -540,7 +546,6 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
540 int rtx_timeout, int *start_timer) 546 int rtx_timeout, int *start_timer)
541{ 547{
542 struct list_head *lqueue; 548 struct list_head *lqueue;
543 struct list_head *lchunk;
544 struct sctp_transport *transport = pkt->transport; 549 struct sctp_transport *transport = pkt->transport;
545 sctp_xmit_t status; 550 sctp_xmit_t status;
546 struct sctp_chunk *chunk, *chunk1; 551 struct sctp_chunk *chunk, *chunk1;
@@ -548,12 +553,16 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
548 int fast_rtx; 553 int fast_rtx;
549 int error = 0; 554 int error = 0;
550 int timer = 0; 555 int timer = 0;
556 int done = 0;
551 557
552 asoc = q->asoc; 558 asoc = q->asoc;
553 lqueue = &q->retransmit; 559 lqueue = &q->retransmit;
554 fast_rtx = q->fast_rtx; 560 fast_rtx = q->fast_rtx;
555 561
556 /* RFC 2960 6.3.3 Handle T3-rtx Expiration 562 /* This loop handles time-out retransmissions, fast retransmissions,
563 * and retransmissions due to opening of whindow.
564 *
565 * RFC 2960 6.3.3 Handle T3-rtx Expiration
557 * 566 *
558 * E3) Determine how many of the earliest (i.e., lowest TSN) 567 * E3) Determine how many of the earliest (i.e., lowest TSN)
559 * outstanding DATA chunks for the address for which the 568 * outstanding DATA chunks for the address for which the
@@ -568,12 +577,12 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
568 * [Just to be painfully clear, if we are retransmitting 577 * [Just to be painfully clear, if we are retransmitting
569 * because a timeout just happened, we should send only ONE 578 * because a timeout just happened, we should send only ONE
570 * packet of retransmitted data.] 579 * packet of retransmitted data.]
580 *
581 * For fast retransmissions we also send only ONE packet. However,
582 * if we are just flushing the queue due to open window, we'll
583 * try to send as much as possible.
571 */ 584 */
572 lchunk = sctp_list_dequeue(lqueue); 585 list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {
573
574 while (lchunk) {
575 chunk = list_entry(lchunk, struct sctp_chunk,
576 transmitted_list);
577 586
578 /* Make sure that Gap Acked TSNs are not retransmitted. A 587 /* Make sure that Gap Acked TSNs are not retransmitted. A
579 * simple approach is just to move such TSNs out of the 588 * simple approach is just to move such TSNs out of the
@@ -581,11 +590,18 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
581 * next chunk. 590 * next chunk.
582 */ 591 */
583 if (chunk->tsn_gap_acked) { 592 if (chunk->tsn_gap_acked) {
584 list_add_tail(lchunk, &transport->transmitted); 593 list_del(&chunk->transmitted_list);
585 lchunk = sctp_list_dequeue(lqueue); 594 list_add_tail(&chunk->transmitted_list,
595 &transport->transmitted);
586 continue; 596 continue;
587 } 597 }
588 598
599 /* If we are doing fast retransmit, ignore non-fast_rtransmit
600 * chunks
601 */
602 if (fast_rtx && !chunk->fast_retransmit)
603 continue;
604
589 /* Attempt to append this chunk to the packet. */ 605 /* Attempt to append this chunk to the packet. */
590 status = sctp_packet_append_chunk(pkt, chunk); 606 status = sctp_packet_append_chunk(pkt, chunk);
591 607
@@ -597,12 +613,10 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
597 /* If we are retransmitting, we should only 613 /* If we are retransmitting, we should only
598 * send a single packet. 614 * send a single packet.
599 */ 615 */
600 if (rtx_timeout || fast_rtx) { 616 if (rtx_timeout || fast_rtx)
601 list_add(lchunk, lqueue); 617 done = 1;
602 lchunk = NULL;
603 }
604 618
605 /* Bundle lchunk in the next round. */ 619 /* Bundle next chunk in the next round. */
606 break; 620 break;
607 621
608 case SCTP_XMIT_RWND_FULL: 622 case SCTP_XMIT_RWND_FULL:
@@ -612,8 +626,7 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
612 /* Stop sending DATA as there is no more room 626 /* Stop sending DATA as there is no more room
613 * at the receiver. 627 * at the receiver.
614 */ 628 */
615 list_add(lchunk, lqueue); 629 done = 1;
616 lchunk = NULL;
617 break; 630 break;
618 631
619 case SCTP_XMIT_NAGLE_DELAY: 632 case SCTP_XMIT_NAGLE_DELAY:
@@ -621,15 +634,16 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
621 error = sctp_packet_transmit(pkt); 634 error = sctp_packet_transmit(pkt);
622 635
623 /* Stop sending DATA because of nagle delay. */ 636 /* Stop sending DATA because of nagle delay. */
624 list_add(lchunk, lqueue); 637 done = 1;
625 lchunk = NULL;
626 break; 638 break;
627 639
628 default: 640 default:
629 /* The append was successful, so add this chunk to 641 /* The append was successful, so add this chunk to
630 * the transmitted list. 642 * the transmitted list.
631 */ 643 */
632 list_add_tail(lchunk, &transport->transmitted); 644 list_del(&chunk->transmitted_list);
645 list_add_tail(&chunk->transmitted_list,
646 &transport->transmitted);
633 647
634 /* Mark the chunk as ineligible for fast retransmit 648 /* Mark the chunk as ineligible for fast retransmit
635 * after it is retransmitted. 649 * after it is retransmitted.
@@ -646,9 +660,6 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
646 timer = 2; 660 timer = 2;
647 661
648 q->empty = 0; 662 q->empty = 0;
649
650 /* Retrieve a new chunk to bundle. */
651 lchunk = sctp_list_dequeue(lqueue);
652 break; 663 break;
653 } 664 }
654 665
@@ -656,16 +667,19 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
656 if (!error && !timer) 667 if (!error && !timer)
657 timer = 1; 668 timer = 1;
658 669
659 /* If we are here due to a retransmit timeout or a fast 670 if (done)
660 * retransmit and if there are any chunks left in the retransmit 671 break;
661 * queue that could not fit in the PMTU sized packet, they need 672 }
662 * to be marked as ineligible for a subsequent fast retransmit. 673
663 */ 674 /* If we are here due to a retransmit timeout or a fast
664 if (rtx_timeout && fast_rtx) { 675 * retransmit and if there are any chunks left in the retransmit
665 list_for_each_entry(chunk1, lqueue, transmitted_list) { 676 * queue that could not fit in the PMTU sized packet, they need
666 if (chunk1->fast_retransmit > 0) 677 * to be marked as ineligible for a subsequent fast retransmit.
667 chunk1->fast_retransmit = -1; 678 */
668 } 679 if (rtx_timeout || fast_rtx) {
680 list_for_each_entry(chunk1, lqueue, transmitted_list) {
681 if (chunk1->fast_retransmit > 0)
682 chunk1->fast_retransmit = -1;
669 } 683 }
670 } 684 }
671 685