aboutsummaryrefslogtreecommitdiffstats
path: root/net/dccp/ccids/ccid2.c
diff options
context:
space:
mode:
authorAndrea Bittau <a.bittau@cs.ucl.ac.uk>2006-09-19 16:14:43 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2006-09-22 18:19:41 -0400
commit374bcf32c86e1b56eab832bbb6b21e636707eab6 (patch)
treec813985a0736740c683d841010b9317db0a2b1ee /net/dccp/ccids/ccid2.c
parent07978aabd52ce67f59971872c80f76d6e3ca18ae (diff)
[DCCP] CCID2: Halve cwnd once upon multiple losses in a single RTT
When multiple losses occur in one RTT, the window should be halved only once [a single "congestion event"]. This is now implemented, although not perfectly. Slightly changed the interface for changing the cwnd: pass hctx instead of dp. This is required in order to allow for change_cwnd to be called from _init(). Signed-off-by: Andrea Bittau <a.bittau@cs.ucl.ac.uk> Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/dccp/ccids/ccid2.c')
-rw-r--r--net/dccp/ccids/ccid2.c49
1 files changed, 31 insertions, 18 deletions
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 93a30ae8d07a..b88da035865f 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -187,10 +187,8 @@ static void ccid2_change_l_ack_ratio(struct sock *sk, int val)
187 dp->dccps_l_ack_ratio = val; 187 dp->dccps_l_ack_ratio = val;
188} 188}
189 189
190static void ccid2_change_cwnd(struct sock *sk, int val) 190static void ccid2_change_cwnd(struct ccid2_hc_tx_sock *hctx, int val)
191{ 191{
192 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
193
194 if (val == 0) 192 if (val == 0)
195 val = 1; 193 val = 1;
196 194
@@ -234,7 +232,7 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
234 hctx->ccid2hctx_ssthresh = hctx->ccid2hctx_cwnd >> 1; 232 hctx->ccid2hctx_ssthresh = hctx->ccid2hctx_cwnd >> 1;
235 if (hctx->ccid2hctx_ssthresh < 2) 233 if (hctx->ccid2hctx_ssthresh < 2)
236 hctx->ccid2hctx_ssthresh = 2; 234 hctx->ccid2hctx_ssthresh = 2;
237 ccid2_change_cwnd(sk, 1); 235 ccid2_change_cwnd(hctx, 1);
238 236
239 /* clear state about stuff we sent */ 237 /* clear state about stuff we sent */
240 hctx->ccid2hctx_seqt = hctx->ccid2hctx_seqh; 238 hctx->ccid2hctx_seqt = hctx->ccid2hctx_seqh;
@@ -444,7 +442,7 @@ static inline void ccid2_new_ack(struct sock *sk,
444 /* increase every 2 acks */ 442 /* increase every 2 acks */
445 hctx->ccid2hctx_ssacks++; 443 hctx->ccid2hctx_ssacks++;
446 if (hctx->ccid2hctx_ssacks == 2) { 444 if (hctx->ccid2hctx_ssacks == 2) {
447 ccid2_change_cwnd(sk, hctx->ccid2hctx_cwnd + 1); 445 ccid2_change_cwnd(hctx, hctx->ccid2hctx_cwnd+1);
448 hctx->ccid2hctx_ssacks = 0; 446 hctx->ccid2hctx_ssacks = 0;
449 *maxincr = *maxincr - 1; 447 *maxincr = *maxincr - 1;
450 } 448 }
@@ -457,7 +455,7 @@ static inline void ccid2_new_ack(struct sock *sk,
457 hctx->ccid2hctx_acks++; 455 hctx->ccid2hctx_acks++;
458 456
459 if (hctx->ccid2hctx_acks >= hctx->ccid2hctx_cwnd) { 457 if (hctx->ccid2hctx_acks >= hctx->ccid2hctx_cwnd) {
460 ccid2_change_cwnd(sk, hctx->ccid2hctx_cwnd + 1); 458 ccid2_change_cwnd(hctx, hctx->ccid2hctx_cwnd + 1);
461 hctx->ccid2hctx_acks = 0; 459 hctx->ccid2hctx_acks = 0;
462 } 460 }
463 } 461 }
@@ -532,6 +530,22 @@ static void ccid2_hc_tx_dec_pipe(struct sock *sk)
532 ccid2_hc_tx_kill_rto_timer(sk); 530 ccid2_hc_tx_kill_rto_timer(sk);
533} 531}
534 532
533static void ccid2_congestion_event(struct ccid2_hc_tx_sock *hctx,
534 struct ccid2_seq *seqp)
535{
536 if (time_before(seqp->ccid2s_sent, hctx->ccid2hctx_last_cong)) {
537 ccid2_pr_debug("Multiple losses in an RTT---treating as one\n");
538 return;
539 }
540
541 hctx->ccid2hctx_last_cong = jiffies;
542
543 ccid2_change_cwnd(hctx, hctx->ccid2hctx_cwnd >> 1);
544 hctx->ccid2hctx_ssthresh = hctx->ccid2hctx_cwnd;
545 if (hctx->ccid2hctx_ssthresh < 2)
546 hctx->ccid2hctx_ssthresh = 2;
547}
548
535static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) 549static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
536{ 550{
537 struct dccp_sock *dp = dccp_sk(sk); 551 struct dccp_sock *dp = dccp_sk(sk);
@@ -542,7 +556,6 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
542 unsigned char veclen; 556 unsigned char veclen;
543 int offset = 0; 557 int offset = 0;
544 int done = 0; 558 int done = 0;
545 int loss = 0;
546 unsigned int maxincr = 0; 559 unsigned int maxincr = 0;
547 560
548 ccid2_hc_tx_check_sanity(hctx); 561 ccid2_hc_tx_check_sanity(hctx);
@@ -636,7 +649,8 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
636 !seqp->ccid2s_acked) { 649 !seqp->ccid2s_acked) {
637 if (state == 650 if (state ==
638 DCCP_ACKVEC_STATE_ECN_MARKED) { 651 DCCP_ACKVEC_STATE_ECN_MARKED) {
639 loss = 1; 652 ccid2_congestion_event(hctx,
653 seqp);
640 } else 654 } else
641 ccid2_new_ack(sk, seqp, 655 ccid2_new_ack(sk, seqp,
642 &maxincr); 656 &maxincr);
@@ -688,7 +702,13 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
688 /* check for lost packets */ 702 /* check for lost packets */
689 while (1) { 703 while (1) {
690 if (!seqp->ccid2s_acked) { 704 if (!seqp->ccid2s_acked) {
691 loss = 1; 705 ccid2_pr_debug("Packet lost: %llu\n",
706 seqp->ccid2s_seq);
707 /* XXX need to traverse from tail -> head in
708 * order to detect multiple congestion events in
709 * one ack vector.
710 */
711 ccid2_congestion_event(hctx, seqp);
692 ccid2_hc_tx_dec_pipe(sk); 712 ccid2_hc_tx_dec_pipe(sk);
693 } 713 }
694 if (seqp == hctx->ccid2hctx_seqt) 714 if (seqp == hctx->ccid2hctx_seqt)
@@ -707,14 +727,6 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
707 hctx->ccid2hctx_seqt = hctx->ccid2hctx_seqt->ccid2s_next; 727 hctx->ccid2hctx_seqt = hctx->ccid2hctx_seqt->ccid2s_next;
708 } 728 }
709 729
710 if (loss) {
711 /* XXX do bit shifts guarantee a 0 as the new bit? */
712 ccid2_change_cwnd(sk, hctx->ccid2hctx_cwnd >> 1);
713 hctx->ccid2hctx_ssthresh = hctx->ccid2hctx_cwnd;
714 if (hctx->ccid2hctx_ssthresh < 2)
715 hctx->ccid2hctx_ssthresh = 2;
716 }
717
718 ccid2_hc_tx_check_sanity(hctx); 730 ccid2_hc_tx_check_sanity(hctx);
719} 731}
720 732
@@ -722,7 +734,7 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
722{ 734{
723 struct ccid2_hc_tx_sock *hctx = ccid_priv(ccid); 735 struct ccid2_hc_tx_sock *hctx = ccid_priv(ccid);
724 736
725 hctx->ccid2hctx_cwnd = 1; 737 ccid2_change_cwnd(hctx, 1);
726 /* Initialize ssthresh to infinity. This means that we will exit the 738 /* Initialize ssthresh to infinity. This means that we will exit the
727 * initial slow-start after the first packet loss. This is what we 739 * initial slow-start after the first packet loss. This is what we
728 * want. 740 * want.
@@ -741,6 +753,7 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
741 hctx->ccid2hctx_rttvar = -1; 753 hctx->ccid2hctx_rttvar = -1;
742 hctx->ccid2hctx_lastrtt = 0; 754 hctx->ccid2hctx_lastrtt = 0;
743 hctx->ccid2hctx_rpdupack = -1; 755 hctx->ccid2hctx_rpdupack = -1;
756 hctx->ccid2hctx_last_cong = jiffies;
744 757
745 hctx->ccid2hctx_rtotimer.function = &ccid2_hc_tx_rto_expire; 758 hctx->ccid2hctx_rtotimer.function = &ccid2_hc_tx_rto_expire;
746 hctx->ccid2hctx_rtotimer.data = (unsigned long)sk; 759 hctx->ccid2hctx_rtotimer.data = (unsigned long)sk;