aboutsummaryrefslogtreecommitdiffstats
path: root/net/dccp/ccids
diff options
context:
space:
mode:
authorGerrit Renker <gerrit@erg.abdn.ac.uk>2007-11-24 18:40:24 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 17:54:53 -0500
commitd50ad163e6db2dcc365b8d02b30350220f86df04 (patch)
tree56998d89dcf4b748c09b4f5fe82bd2b7742ea3cb /net/dccp/ccids
parentdf054e1d00fdafa2e2920319df326ddb3f0d0413 (diff)
[CCID2]: Deadlock and spurious timeouts when Ack Ratio > cwnd
This patch removes a bug in the current code. I agree with Andrea's comment that there is a problem here but the way it is treated does not fix it. The problem is that whenever Ack Ratio > cwnd, starvation/deadlock occurs: * the receiver will not send an Ack until (Ack Ratio - cwnd) data packets have arrived; * the sender will not send any data packet before the receipt of an Ack advances the send window. The only way that the connection then progresses was via RTO timeout. In one extreme case (bulk transfer), it was observed that this happened for every single packet; i.e. hundreds of packets, each a RTO timeout of 1..3 seconds apart: a transfer which normally would take a fraction of a second thus grew to several minutes. The solution taken by this approach is to observe the relation "Ack Ratio <= cwnd" by using the constraint (1) from RFC 4341, 6.1.2; i.e. set Ack Ratio = ceil(cwnd / 2) and update it whenever either Ack Ratio or cwnd change. This ensures that the deadlock problem can not arise. Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk> Acked-by: Ian McDonald <ian.mcdonald@jandi.co.nz> Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/dccp/ccids')
-rw-r--r--net/dccp/ccids/ccid2.c39
1 files changed, 21 insertions, 18 deletions
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index f18235e8ce84..ef19fb834299 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -143,32 +143,30 @@ static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
143static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) 143static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
144{ 144{
145 struct dccp_sock *dp = dccp_sk(sk); 145 struct dccp_sock *dp = dccp_sk(sk);
146 u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->ccid2hctx_cwnd, 2);
147
146 /* 148 /*
147 * XXX I don't really agree with val != 2. If cwnd is 1, ack ratio 149 * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from
148 * should be 1... it shouldn't be allowed to become 2. 150 * RFC 4341, 6.1.2. We ignore the statement that Ack Ratio 2 is always
149 * -sorbo. 151 * acceptable since this causes starvation/deadlock whenever cwnd < 2.
152 * The same problem arises when Ack Ratio is 0 (ie. Ack Ratio disabled).
150 */ 153 */
151 if (val != 2) { 154 if (val == 0 || val > max_ratio) {
152 const struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); 155 DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio);
153 int max = hctx->ccid2hctx_cwnd / 2; 156 val = max_ratio;
154
155 /* round up */
156 if (hctx->ccid2hctx_cwnd & 1)
157 max++;
158
159 if (val > max)
160 val = max;
161 } 157 }
162 if (val > 0xFFFF) /* RFC 4340, 11.3 */ 158 if (val > 0xFFFF) /* RFC 4340, 11.3 */
163 val = 0xFFFF; 159 val = 0xFFFF;
164 160
161 if (val == dp->dccps_l_ack_ratio)
162 return;
163
165 ccid2_pr_debug("changing local ack ratio to %u\n", val); 164 ccid2_pr_debug("changing local ack ratio to %u\n", val);
166 dp->dccps_l_ack_ratio = val; 165 dp->dccps_l_ack_ratio = val;
167} 166}
168 167
169static void ccid2_change_cwnd(struct ccid2_hc_tx_sock *hctx, u32 val) 168static void ccid2_change_cwnd(struct ccid2_hc_tx_sock *hctx, u32 val)
170{ 169{
171 /* XXX do we need to change ack ratio? */
172 hctx->ccid2hctx_cwnd = val? : 1; 170 hctx->ccid2hctx_cwnd = val? : 1;
173 ccid2_pr_debug("changed cwnd to %u\n", hctx->ccid2hctx_cwnd); 171 ccid2_pr_debug("changed cwnd to %u\n", hctx->ccid2hctx_cwnd);
174} 172}
@@ -519,9 +517,10 @@ static void ccid2_hc_tx_dec_pipe(struct sock *sk)
519 ccid2_hc_tx_kill_rto_timer(sk); 517 ccid2_hc_tx_kill_rto_timer(sk);
520} 518}
521 519
522static void ccid2_congestion_event(struct ccid2_hc_tx_sock *hctx, 520static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
523 struct ccid2_seq *seqp)
524{ 521{
522 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
523
525 if (time_before(seqp->ccid2s_sent, hctx->ccid2hctx_last_cong)) { 524 if (time_before(seqp->ccid2s_sent, hctx->ccid2hctx_last_cong)) {
526 ccid2_pr_debug("Multiple losses in an RTT---treating as one\n"); 525 ccid2_pr_debug("Multiple losses in an RTT---treating as one\n");
527 return; 526 return;
@@ -533,6 +532,10 @@ static void ccid2_congestion_event(struct ccid2_hc_tx_sock *hctx,
533 hctx->ccid2hctx_ssthresh = hctx->ccid2hctx_cwnd; 532 hctx->ccid2hctx_ssthresh = hctx->ccid2hctx_cwnd;
534 if (hctx->ccid2hctx_ssthresh < 2) 533 if (hctx->ccid2hctx_ssthresh < 2)
535 hctx->ccid2hctx_ssthresh = 2; 534 hctx->ccid2hctx_ssthresh = 2;
535
536 /* Avoid spurious timeouts resulting from Ack Ratio > cwnd */
537 if (dccp_sk(sk)->dccps_l_ack_ratio > hctx->ccid2hctx_cwnd)
538 ccid2_change_l_ack_ratio(sk, hctx->ccid2hctx_cwnd);
536} 539}
537 540
538static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) 541static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
@@ -648,7 +651,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
648 !seqp->ccid2s_acked) { 651 !seqp->ccid2s_acked) {
649 if (state == 652 if (state ==
650 DCCP_ACKVEC_STATE_ECN_MARKED) { 653 DCCP_ACKVEC_STATE_ECN_MARKED) {
651 ccid2_congestion_event(hctx, 654 ccid2_congestion_event(sk,
652 seqp); 655 seqp);
653 } else 656 } else
654 ccid2_new_ack(sk, seqp, 657 ccid2_new_ack(sk, seqp,
@@ -713,7 +716,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
713 * order to detect multiple congestion events in 716 * order to detect multiple congestion events in
714 * one ack vector. 717 * one ack vector.
715 */ 718 */
716 ccid2_congestion_event(hctx, seqp); 719 ccid2_congestion_event(sk, seqp);
717 ccid2_hc_tx_dec_pipe(sk); 720 ccid2_hc_tx_dec_pipe(sk);
718 } 721 }
719 if (seqp == hctx->ccid2hctx_seqt) 722 if (seqp == hctx->ccid2hctx_seqt)