aboutsummaryrefslogtreecommitdiffstats
path: root/net/dccp/ccids/ccid2.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/dccp/ccids/ccid2.c')
-rw-r--r--net/dccp/ccids/ccid2.c84
1 files changed, 66 insertions, 18 deletions
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 0462040fc818..67164bb6ae4d 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -85,7 +85,6 @@ static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
85 85
86static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) 86static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
87{ 87{
88 struct dccp_sock *dp = dccp_sk(sk);
89 u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2); 88 u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2);
90 89
91 /* 90 /*
@@ -98,14 +97,33 @@ static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
98 DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio); 97 DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio);
99 val = max_ratio; 98 val = max_ratio;
100 } 99 }
101 if (val > DCCPF_ACK_RATIO_MAX) 100 dccp_feat_signal_nn_change(sk, DCCPF_ACK_RATIO,
102 val = DCCPF_ACK_RATIO_MAX; 101 min_t(u32, val, DCCPF_ACK_RATIO_MAX));
102}
103 103
104 if (val == dp->dccps_l_ack_ratio) 104static void ccid2_check_l_ack_ratio(struct sock *sk)
105 return; 105{
106 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
106 107
107 ccid2_pr_debug("changing local ack ratio to %u\n", val); 108 /*
108 dp->dccps_l_ack_ratio = val; 109 * After a loss, idle period, application limited period, or RTO we
110 * need to check that the ack ratio is still less than the congestion
111 * window. Otherwise, we will send an entire congestion window of
112 * packets and got no response because we haven't sent ack ratio
113 * packets yet.
114 * If the ack ratio does need to be reduced, we reduce it to half of
115 * the congestion window (or 1 if that's zero) instead of to the
116 * congestion window. This prevents problems if one ack is lost.
117 */
118 if (dccp_feat_nn_get(sk, DCCPF_ACK_RATIO) > hc->tx_cwnd)
119 ccid2_change_l_ack_ratio(sk, hc->tx_cwnd/2 ? : 1U);
120}
121
122static void ccid2_change_l_seq_window(struct sock *sk, u64 val)
123{
124 dccp_feat_signal_nn_change(sk, DCCPF_SEQUENCE_WINDOW,
125 clamp_val(val, DCCPF_SEQ_WMIN,
126 DCCPF_SEQ_WMAX));
109} 127}
110 128
111static void ccid2_hc_tx_rto_expire(unsigned long data) 129static void ccid2_hc_tx_rto_expire(unsigned long data)
@@ -187,6 +205,8 @@ static void ccid2_cwnd_application_limited(struct sock *sk, const u32 now)
187 } 205 }
188 hc->tx_cwnd_used = 0; 206 hc->tx_cwnd_used = 0;
189 hc->tx_cwnd_stamp = now; 207 hc->tx_cwnd_stamp = now;
208
209 ccid2_check_l_ack_ratio(sk);
190} 210}
191 211
192/* This borrows the code of tcp_cwnd_restart() */ 212/* This borrows the code of tcp_cwnd_restart() */
@@ -205,6 +225,8 @@ static void ccid2_cwnd_restart(struct sock *sk, const u32 now)
205 225
206 hc->tx_cwnd_stamp = now; 226 hc->tx_cwnd_stamp = now;
207 hc->tx_cwnd_used = 0; 227 hc->tx_cwnd_used = 0;
228
229 ccid2_check_l_ack_ratio(sk);
208} 230}
209 231
210static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len) 232static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len)
@@ -405,17 +427,37 @@ static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp,
405 unsigned int *maxincr) 427 unsigned int *maxincr)
406{ 428{
407 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); 429 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
408 430 struct dccp_sock *dp = dccp_sk(sk);
409 if (hc->tx_cwnd < hc->tx_ssthresh) { 431 int r_seq_used = hc->tx_cwnd / dp->dccps_l_ack_ratio;
410 if (*maxincr > 0 && ++hc->tx_packets_acked == 2) { 432
433 if (hc->tx_cwnd < dp->dccps_l_seq_win &&
434 r_seq_used < dp->dccps_r_seq_win) {
435 if (hc->tx_cwnd < hc->tx_ssthresh) {
436 if (*maxincr > 0 && ++hc->tx_packets_acked >= 2) {
437 hc->tx_cwnd += 1;
438 *maxincr -= 1;
439 hc->tx_packets_acked = 0;
440 }
441 } else if (++hc->tx_packets_acked >= hc->tx_cwnd) {
411 hc->tx_cwnd += 1; 442 hc->tx_cwnd += 1;
412 *maxincr -= 1;
413 hc->tx_packets_acked = 0; 443 hc->tx_packets_acked = 0;
414 } 444 }
415 } else if (++hc->tx_packets_acked >= hc->tx_cwnd) {
416 hc->tx_cwnd += 1;
417 hc->tx_packets_acked = 0;
418 } 445 }
446
447 /*
448 * Adjust the local sequence window and the ack ratio to allow about
449 * 5 times the number of packets in the network (RFC 4340 7.5.2)
450 */
451 if (r_seq_used * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_r_seq_win)
452 ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio * 2);
453 else if (r_seq_used * CCID2_WIN_CHANGE_FACTOR < dp->dccps_r_seq_win/2)
454 ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio / 2 ? : 1U);
455
456 if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_l_seq_win)
457 ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win * 2);
458 else if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR < dp->dccps_l_seq_win/2)
459 ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win / 2);
460
419 /* 461 /*
420 * FIXME: RTT is sampled several times per acknowledgment (for each 462 * FIXME: RTT is sampled several times per acknowledgment (for each
421 * entry in the Ack Vector), instead of once per Ack (as in TCP SACK). 463 * entry in the Ack Vector), instead of once per Ack (as in TCP SACK).
@@ -441,9 +483,7 @@ static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
441 hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U; 483 hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U;
442 hc->tx_ssthresh = max(hc->tx_cwnd, 2U); 484 hc->tx_ssthresh = max(hc->tx_cwnd, 2U);
443 485
444 /* Avoid spurious timeouts resulting from Ack Ratio > cwnd */ 486 ccid2_check_l_ack_ratio(sk);
445 if (dccp_sk(sk)->dccps_l_ack_ratio > hc->tx_cwnd)
446 ccid2_change_l_ack_ratio(sk, hc->tx_cwnd);
447} 487}
448 488
449static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type, 489static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type,
@@ -494,8 +534,16 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
494 if (hc->tx_rpdupack >= NUMDUPACK) { 534 if (hc->tx_rpdupack >= NUMDUPACK) {
495 hc->tx_rpdupack = -1; /* XXX lame */ 535 hc->tx_rpdupack = -1; /* XXX lame */
496 hc->tx_rpseq = 0; 536 hc->tx_rpseq = 0;
497 537#ifdef __CCID2_COPES_GRACEFULLY_WITH_ACK_CONGESTION_CONTROL__
538 /*
539 * FIXME: Ack Congestion Control is broken; in
540 * the current state instabilities occurred with
541 * Ack Ratios greater than 1; causing hang-ups
542 * and long RTO timeouts. This needs to be fixed
543 * before opening up dynamic changes. -- gerrit
544 */
498 ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio); 545 ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio);
546#endif
499 } 547 }
500 } 548 }
501 } 549 }