diff options
author | Gerrit Renker <gerrit@erg.abdn.ac.uk> | 2008-09-09 07:27:22 -0400 |
---|---|---|
committer | Gerrit Renker <gerrit@erg.abdn.ac.uk> | 2008-09-09 07:27:22 -0400 |
commit | 410e27a49bb98bc7fa3ff5fc05cc313817b9f253 (patch) | |
tree | 88bb1fcf84f9ebfa4299c9a8dcd9e6330b358446 /net/dccp/ccids/ccid2.c | |
parent | 0a68a20cc3eafa73bb54097c28b921147d7d3685 (diff) |
This reverts "Merge branch 'dccp' of git://eden-feed.erg.abdn.ac.uk/dccp_exp"
as it accentally contained the wrong set of patches. These will be
submitted separately.
Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk>
Diffstat (limited to 'net/dccp/ccids/ccid2.c')
-rw-r--r-- | net/dccp/ccids/ccid2.c | 622 |
1 files changed, 372 insertions, 250 deletions
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index fa713227c66f..9a430734530c 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c | |||
@@ -25,7 +25,7 @@ | |||
25 | /* | 25 | /* |
26 | * This implementation should follow RFC 4341 | 26 | * This implementation should follow RFC 4341 |
27 | */ | 27 | */ |
28 | #include "../feat.h" | 28 | |
29 | #include "../ccid.h" | 29 | #include "../ccid.h" |
30 | #include "../dccp.h" | 30 | #include "../dccp.h" |
31 | #include "ccid2.h" | 31 | #include "ccid2.h" |
@@ -34,8 +34,51 @@ | |||
34 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG | 34 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG |
35 | static int ccid2_debug; | 35 | static int ccid2_debug; |
36 | #define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a) | 36 | #define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a) |
37 | |||
38 | static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hctx) | ||
39 | { | ||
40 | int len = 0; | ||
41 | int pipe = 0; | ||
42 | struct ccid2_seq *seqp = hctx->ccid2hctx_seqh; | ||
43 | |||
44 | /* there is data in the chain */ | ||
45 | if (seqp != hctx->ccid2hctx_seqt) { | ||
46 | seqp = seqp->ccid2s_prev; | ||
47 | len++; | ||
48 | if (!seqp->ccid2s_acked) | ||
49 | pipe++; | ||
50 | |||
51 | while (seqp != hctx->ccid2hctx_seqt) { | ||
52 | struct ccid2_seq *prev = seqp->ccid2s_prev; | ||
53 | |||
54 | len++; | ||
55 | if (!prev->ccid2s_acked) | ||
56 | pipe++; | ||
57 | |||
58 | /* packets are sent sequentially */ | ||
59 | BUG_ON(dccp_delta_seqno(seqp->ccid2s_seq, | ||
60 | prev->ccid2s_seq ) >= 0); | ||
61 | BUG_ON(time_before(seqp->ccid2s_sent, | ||
62 | prev->ccid2s_sent)); | ||
63 | |||
64 | seqp = prev; | ||
65 | } | ||
66 | } | ||
67 | |||
68 | BUG_ON(pipe != hctx->ccid2hctx_pipe); | ||
69 | ccid2_pr_debug("len of chain=%d\n", len); | ||
70 | |||
71 | do { | ||
72 | seqp = seqp->ccid2s_prev; | ||
73 | len++; | ||
74 | } while (seqp != hctx->ccid2hctx_seqh); | ||
75 | |||
76 | ccid2_pr_debug("total len=%d\n", len); | ||
77 | BUG_ON(len != hctx->ccid2hctx_seqbufc * CCID2_SEQBUF_LEN); | ||
78 | } | ||
37 | #else | 79 | #else |
38 | #define ccid2_pr_debug(format, a...) | 80 | #define ccid2_pr_debug(format, a...) |
81 | #define ccid2_hc_tx_check_sanity(hctx) | ||
39 | #endif | 82 | #endif |
40 | 83 | ||
41 | static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx) | 84 | static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx) |
@@ -44,7 +87,8 @@ static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx) | |||
44 | int i; | 87 | int i; |
45 | 88 | ||
46 | /* check if we have space to preserve the pointer to the buffer */ | 89 | /* check if we have space to preserve the pointer to the buffer */ |
47 | if (hctx->seqbufc >= sizeof(hctx->seqbuf) / sizeof(struct ccid2_seq *)) | 90 | if (hctx->ccid2hctx_seqbufc >= (sizeof(hctx->ccid2hctx_seqbuf) / |
91 | sizeof(struct ccid2_seq*))) | ||
48 | return -ENOMEM; | 92 | return -ENOMEM; |
49 | 93 | ||
50 | /* allocate buffer and initialize linked list */ | 94 | /* allocate buffer and initialize linked list */ |
@@ -60,35 +104,38 @@ static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx) | |||
60 | seqp->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1]; | 104 | seqp->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1]; |
61 | 105 | ||
62 | /* This is the first allocation. Initiate the head and tail. */ | 106 | /* This is the first allocation. Initiate the head and tail. */ |
63 | if (hctx->seqbufc == 0) | 107 | if (hctx->ccid2hctx_seqbufc == 0) |
64 | hctx->seqh = hctx->seqt = seqp; | 108 | hctx->ccid2hctx_seqh = hctx->ccid2hctx_seqt = seqp; |
65 | else { | 109 | else { |
66 | /* link the existing list with the one we just created */ | 110 | /* link the existing list with the one we just created */ |
67 | hctx->seqh->ccid2s_next = seqp; | 111 | hctx->ccid2hctx_seqh->ccid2s_next = seqp; |
68 | seqp->ccid2s_prev = hctx->seqh; | 112 | seqp->ccid2s_prev = hctx->ccid2hctx_seqh; |
69 | 113 | ||
70 | hctx->seqt->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1]; | 114 | hctx->ccid2hctx_seqt->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1]; |
71 | seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = hctx->seqt; | 115 | seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = hctx->ccid2hctx_seqt; |
72 | } | 116 | } |
73 | 117 | ||
74 | /* store the original pointer to the buffer so we can free it */ | 118 | /* store the original pointer to the buffer so we can free it */ |
75 | hctx->seqbuf[hctx->seqbufc] = seqp; | 119 | hctx->ccid2hctx_seqbuf[hctx->ccid2hctx_seqbufc] = seqp; |
76 | hctx->seqbufc++; | 120 | hctx->ccid2hctx_seqbufc++; |
77 | 121 | ||
78 | return 0; | 122 | return 0; |
79 | } | 123 | } |
80 | 124 | ||
81 | static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | 125 | static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) |
82 | { | 126 | { |
83 | if (ccid2_cwnd_network_limited(ccid2_hc_tx_sk(sk))) | 127 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); |
84 | return CCID_PACKET_WILL_DEQUEUE_LATER; | 128 | |
85 | return CCID_PACKET_SEND_AT_ONCE; | 129 | if (hctx->ccid2hctx_pipe < hctx->ccid2hctx_cwnd) |
130 | return 0; | ||
131 | |||
132 | return 1; /* XXX CCID should dequeue when ready instead of polling */ | ||
86 | } | 133 | } |
87 | 134 | ||
88 | static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) | 135 | static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) |
89 | { | 136 | { |
90 | struct dccp_sock *dp = dccp_sk(sk); | 137 | struct dccp_sock *dp = dccp_sk(sk); |
91 | u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->cwnd, 2); | 138 | u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->ccid2hctx_cwnd, 2); |
92 | 139 | ||
93 | /* | 140 | /* |
94 | * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from | 141 | * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from |
@@ -100,8 +147,8 @@ static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) | |||
100 | DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio); | 147 | DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio); |
101 | val = max_ratio; | 148 | val = max_ratio; |
102 | } | 149 | } |
103 | if (val > DCCPF_ACK_RATIO_MAX) | 150 | if (val > 0xFFFF) /* RFC 4340, 11.3 */ |
104 | val = DCCPF_ACK_RATIO_MAX; | 151 | val = 0xFFFF; |
105 | 152 | ||
106 | if (val == dp->dccps_l_ack_ratio) | 153 | if (val == dp->dccps_l_ack_ratio) |
107 | return; | 154 | return; |
@@ -110,77 +157,99 @@ static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) | |||
110 | dp->dccps_l_ack_ratio = val; | 157 | dp->dccps_l_ack_ratio = val; |
111 | } | 158 | } |
112 | 159 | ||
160 | static void ccid2_change_srtt(struct ccid2_hc_tx_sock *hctx, long val) | ||
161 | { | ||
162 | ccid2_pr_debug("change SRTT to %ld\n", val); | ||
163 | hctx->ccid2hctx_srtt = val; | ||
164 | } | ||
165 | |||
166 | static void ccid2_start_rto_timer(struct sock *sk); | ||
167 | |||
113 | static void ccid2_hc_tx_rto_expire(unsigned long data) | 168 | static void ccid2_hc_tx_rto_expire(unsigned long data) |
114 | { | 169 | { |
115 | struct sock *sk = (struct sock *)data; | 170 | struct sock *sk = (struct sock *)data; |
116 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 171 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); |
117 | const bool sender_was_blocked = ccid2_cwnd_network_limited(hctx); | 172 | long s; |
118 | 173 | ||
119 | bh_lock_sock(sk); | 174 | bh_lock_sock(sk); |
120 | if (sock_owned_by_user(sk)) { | 175 | if (sock_owned_by_user(sk)) { |
121 | sk_reset_timer(sk, &hctx->rtotimer, jiffies + HZ / 5); | 176 | sk_reset_timer(sk, &hctx->ccid2hctx_rtotimer, |
177 | jiffies + HZ / 5); | ||
122 | goto out; | 178 | goto out; |
123 | } | 179 | } |
124 | 180 | ||
125 | ccid2_pr_debug("RTO_EXPIRE\n"); | 181 | ccid2_pr_debug("RTO_EXPIRE\n"); |
126 | 182 | ||
183 | ccid2_hc_tx_check_sanity(hctx); | ||
184 | |||
127 | /* back-off timer */ | 185 | /* back-off timer */ |
128 | hctx->rto <<= 1; | 186 | hctx->ccid2hctx_rto <<= 1; |
129 | if (hctx->rto > DCCP_RTO_MAX) | 187 | |
130 | hctx->rto = DCCP_RTO_MAX; | 188 | s = hctx->ccid2hctx_rto / HZ; |
189 | if (s > 60) | ||
190 | hctx->ccid2hctx_rto = 60 * HZ; | ||
191 | |||
192 | ccid2_start_rto_timer(sk); | ||
131 | 193 | ||
132 | /* adjust pipe, cwnd etc */ | 194 | /* adjust pipe, cwnd etc */ |
133 | hctx->ssthresh = hctx->cwnd / 2; | 195 | hctx->ccid2hctx_ssthresh = hctx->ccid2hctx_cwnd / 2; |
134 | if (hctx->ssthresh < 2) | 196 | if (hctx->ccid2hctx_ssthresh < 2) |
135 | hctx->ssthresh = 2; | 197 | hctx->ccid2hctx_ssthresh = 2; |
136 | hctx->cwnd = 1; | 198 | hctx->ccid2hctx_cwnd = 1; |
137 | hctx->pipe = 0; | 199 | hctx->ccid2hctx_pipe = 0; |
138 | 200 | ||
139 | /* clear state about stuff we sent */ | 201 | /* clear state about stuff we sent */ |
140 | hctx->seqt = hctx->seqh; | 202 | hctx->ccid2hctx_seqt = hctx->ccid2hctx_seqh; |
141 | hctx->packets_acked = 0; | 203 | hctx->ccid2hctx_packets_acked = 0; |
142 | 204 | ||
143 | /* clear ack ratio state. */ | 205 | /* clear ack ratio state. */ |
144 | hctx->rpseq = 0; | 206 | hctx->ccid2hctx_rpseq = 0; |
145 | hctx->rpdupack = -1; | 207 | hctx->ccid2hctx_rpdupack = -1; |
146 | ccid2_change_l_ack_ratio(sk, 1); | 208 | ccid2_change_l_ack_ratio(sk, 1); |
147 | 209 | ccid2_hc_tx_check_sanity(hctx); | |
148 | /* if we were blocked before, we may now send cwnd=1 packet */ | ||
149 | if (sender_was_blocked) | ||
150 | tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); | ||
151 | /* restart backed-off timer */ | ||
152 | sk_reset_timer(sk, &hctx->rtotimer, jiffies + hctx->rto); | ||
153 | out: | 210 | out: |
154 | bh_unlock_sock(sk); | 211 | bh_unlock_sock(sk); |
155 | sock_put(sk); | 212 | sock_put(sk); |
156 | } | 213 | } |
157 | 214 | ||
158 | static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len) | 215 | static void ccid2_start_rto_timer(struct sock *sk) |
216 | { | ||
217 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | ||
218 | |||
219 | ccid2_pr_debug("setting RTO timeout=%ld\n", hctx->ccid2hctx_rto); | ||
220 | |||
221 | BUG_ON(timer_pending(&hctx->ccid2hctx_rtotimer)); | ||
222 | sk_reset_timer(sk, &hctx->ccid2hctx_rtotimer, | ||
223 | jiffies + hctx->ccid2hctx_rto); | ||
224 | } | ||
225 | |||
226 | static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) | ||
159 | { | 227 | { |
160 | struct dccp_sock *dp = dccp_sk(sk); | 228 | struct dccp_sock *dp = dccp_sk(sk); |
161 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 229 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); |
162 | struct ccid2_seq *next; | 230 | struct ccid2_seq *next; |
163 | 231 | ||
164 | hctx->pipe++; | 232 | hctx->ccid2hctx_pipe++; |
165 | 233 | ||
166 | hctx->seqh->ccid2s_seq = dp->dccps_gss; | 234 | hctx->ccid2hctx_seqh->ccid2s_seq = dp->dccps_gss; |
167 | hctx->seqh->ccid2s_acked = 0; | 235 | hctx->ccid2hctx_seqh->ccid2s_acked = 0; |
168 | hctx->seqh->ccid2s_sent = jiffies; | 236 | hctx->ccid2hctx_seqh->ccid2s_sent = jiffies; |
169 | 237 | ||
170 | next = hctx->seqh->ccid2s_next; | 238 | next = hctx->ccid2hctx_seqh->ccid2s_next; |
171 | /* check if we need to alloc more space */ | 239 | /* check if we need to alloc more space */ |
172 | if (next == hctx->seqt) { | 240 | if (next == hctx->ccid2hctx_seqt) { |
173 | if (ccid2_hc_tx_alloc_seq(hctx)) { | 241 | if (ccid2_hc_tx_alloc_seq(hctx)) { |
174 | DCCP_CRIT("packet history - out of memory!"); | 242 | DCCP_CRIT("packet history - out of memory!"); |
175 | /* FIXME: find a more graceful way to bail out */ | 243 | /* FIXME: find a more graceful way to bail out */ |
176 | return; | 244 | return; |
177 | } | 245 | } |
178 | next = hctx->seqh->ccid2s_next; | 246 | next = hctx->ccid2hctx_seqh->ccid2s_next; |
179 | BUG_ON(next == hctx->seqt); | 247 | BUG_ON(next == hctx->ccid2hctx_seqt); |
180 | } | 248 | } |
181 | hctx->seqh = next; | 249 | hctx->ccid2hctx_seqh = next; |
182 | 250 | ||
183 | ccid2_pr_debug("cwnd=%d pipe=%d\n", hctx->cwnd, hctx->pipe); | 251 | ccid2_pr_debug("cwnd=%d pipe=%d\n", hctx->ccid2hctx_cwnd, |
252 | hctx->ccid2hctx_pipe); | ||
184 | 253 | ||
185 | /* | 254 | /* |
186 | * FIXME: The code below is broken and the variables have been removed | 255 | * FIXME: The code below is broken and the variables have been removed |
@@ -203,12 +272,12 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len) | |||
203 | */ | 272 | */ |
204 | #if 0 | 273 | #if 0 |
205 | /* Ack Ratio. Need to maintain a concept of how many windows we sent */ | 274 | /* Ack Ratio. Need to maintain a concept of how many windows we sent */ |
206 | hctx->arsent++; | 275 | hctx->ccid2hctx_arsent++; |
207 | /* We had an ack loss in this window... */ | 276 | /* We had an ack loss in this window... */ |
208 | if (hctx->ackloss) { | 277 | if (hctx->ccid2hctx_ackloss) { |
209 | if (hctx->arsent >= hctx->cwnd) { | 278 | if (hctx->ccid2hctx_arsent >= hctx->ccid2hctx_cwnd) { |
210 | hctx->arsent = 0; | 279 | hctx->ccid2hctx_arsent = 0; |
211 | hctx->ackloss = 0; | 280 | hctx->ccid2hctx_ackloss = 0; |
212 | } | 281 | } |
213 | } else { | 282 | } else { |
214 | /* No acks lost up to now... */ | 283 | /* No acks lost up to now... */ |
@@ -218,28 +287,28 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len) | |||
218 | int denom = dp->dccps_l_ack_ratio * dp->dccps_l_ack_ratio - | 287 | int denom = dp->dccps_l_ack_ratio * dp->dccps_l_ack_ratio - |
219 | dp->dccps_l_ack_ratio; | 288 | dp->dccps_l_ack_ratio; |
220 | 289 | ||
221 | denom = hctx->cwnd * hctx->cwnd / denom; | 290 | denom = hctx->ccid2hctx_cwnd * hctx->ccid2hctx_cwnd / denom; |
222 | 291 | ||
223 | if (hctx->arsent >= denom) { | 292 | if (hctx->ccid2hctx_arsent >= denom) { |
224 | ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio - 1); | 293 | ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio - 1); |
225 | hctx->arsent = 0; | 294 | hctx->ccid2hctx_arsent = 0; |
226 | } | 295 | } |
227 | } else { | 296 | } else { |
228 | /* we can't increase ack ratio further [1] */ | 297 | /* we can't increase ack ratio further [1] */ |
229 | hctx->arsent = 0; /* or maybe set it to cwnd*/ | 298 | hctx->ccid2hctx_arsent = 0; /* or maybe set it to cwnd*/ |
230 | } | 299 | } |
231 | } | 300 | } |
232 | #endif | 301 | #endif |
233 | 302 | ||
234 | /* setup RTO timer */ | 303 | /* setup RTO timer */ |
235 | if (!timer_pending(&hctx->rtotimer)) | 304 | if (!timer_pending(&hctx->ccid2hctx_rtotimer)) |
236 | sk_reset_timer(sk, &hctx->rtotimer, jiffies + hctx->rto); | 305 | ccid2_start_rto_timer(sk); |
237 | 306 | ||
238 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG | 307 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG |
239 | do { | 308 | do { |
240 | struct ccid2_seq *seqp = hctx->seqt; | 309 | struct ccid2_seq *seqp = hctx->ccid2hctx_seqt; |
241 | 310 | ||
242 | while (seqp != hctx->seqh) { | 311 | while (seqp != hctx->ccid2hctx_seqh) { |
243 | ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n", | 312 | ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n", |
244 | (unsigned long long)seqp->ccid2s_seq, | 313 | (unsigned long long)seqp->ccid2s_seq, |
245 | seqp->ccid2s_acked, seqp->ccid2s_sent); | 314 | seqp->ccid2s_acked, seqp->ccid2s_sent); |
@@ -247,158 +316,205 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len) | |||
247 | } | 316 | } |
248 | } while (0); | 317 | } while (0); |
249 | ccid2_pr_debug("=========\n"); | 318 | ccid2_pr_debug("=========\n"); |
319 | ccid2_hc_tx_check_sanity(hctx); | ||
250 | #endif | 320 | #endif |
251 | } | 321 | } |
252 | 322 | ||
253 | /** | 323 | /* XXX Lame code duplication! |
254 | * ccid2_rtt_estimator - Sample RTT and compute RTO using RFC2988 algorithm | 324 | * returns -1 if none was found. |
255 | * This code is almost identical with TCP's tcp_rtt_estimator(), since | 325 | * else returns the next offset to use in the function call. |
256 | * - it has a higher sampling frequency (recommended by RFC 1323), | ||
257 | * - the RTO does not collapse into RTT due to RTTVAR going towards zero, | ||
258 | * - it is simple (cf. more complex proposals such as Eifel timer or research | ||
259 | * which suggests that the gain should be set according to window size), | ||
260 | * - in tests it was found to work well with CCID2 [gerrit]. | ||
261 | */ | 326 | */ |
262 | static void ccid2_rtt_estimator(struct sock *sk, const long mrtt) | 327 | static int ccid2_ackvector(struct sock *sk, struct sk_buff *skb, int offset, |
328 | unsigned char **vec, unsigned char *veclen) | ||
263 | { | 329 | { |
264 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 330 | const struct dccp_hdr *dh = dccp_hdr(skb); |
265 | long m = mrtt ? : 1; | 331 | unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb); |
266 | 332 | unsigned char *opt_ptr; | |
267 | if (hctx->srtt == 0) { | 333 | const unsigned char *opt_end = (unsigned char *)dh + |
268 | /* First measurement m */ | 334 | (dh->dccph_doff * 4); |
269 | hctx->srtt = m << 3; | 335 | unsigned char opt, len; |
270 | hctx->mdev = m << 1; | 336 | unsigned char *value; |
271 | 337 | ||
272 | hctx->mdev_max = max(TCP_RTO_MIN, hctx->mdev); | 338 | BUG_ON(offset < 0); |
273 | hctx->rttvar = hctx->mdev_max; | 339 | options += offset; |
274 | hctx->rtt_seq = dccp_sk(sk)->dccps_gss; | 340 | opt_ptr = options; |
275 | } else { | 341 | if (opt_ptr >= opt_end) |
276 | /* Update scaled SRTT as SRTT += 1/8 * (m - SRTT) */ | 342 | return -1; |
277 | m -= (hctx->srtt >> 3); | 343 | |
278 | hctx->srtt += m; | 344 | while (opt_ptr != opt_end) { |
279 | 345 | opt = *opt_ptr++; | |
280 | /* Similarly, update scaled mdev with regard to |m| */ | 346 | len = 0; |
281 | if (m < 0) { | 347 | value = NULL; |
282 | m = -m; | 348 | |
283 | m -= (hctx->mdev >> 2); | 349 | /* Check if this isn't a single byte option */ |
350 | if (opt > DCCPO_MAX_RESERVED) { | ||
351 | if (opt_ptr == opt_end) | ||
352 | goto out_invalid_option; | ||
353 | |||
354 | len = *opt_ptr++; | ||
355 | if (len < 3) | ||
356 | goto out_invalid_option; | ||
284 | /* | 357 | /* |
285 | * This neutralises RTO increase when RTT < SRTT - mdev | 358 | * Remove the type and len fields, leaving |
286 | * (see P. Sarolahti, A. Kuznetsov,"Congestion Control | 359 | * just the value size |
287 | * in Linux TCP", USENIX 2002, pp. 49-62). | ||
288 | */ | 360 | */ |
289 | if (m > 0) | 361 | len -= 2; |
290 | m >>= 3; | 362 | value = opt_ptr; |
291 | } else { | 363 | opt_ptr += len; |
292 | m -= (hctx->mdev >> 2); | ||
293 | } | ||
294 | hctx->mdev += m; | ||
295 | 364 | ||
296 | if (hctx->mdev > hctx->mdev_max) { | 365 | if (opt_ptr > opt_end) |
297 | hctx->mdev_max = hctx->mdev; | 366 | goto out_invalid_option; |
298 | if (hctx->mdev_max > hctx->rttvar) | ||
299 | hctx->rttvar = hctx->mdev_max; | ||
300 | } | 367 | } |
301 | 368 | ||
302 | /* | 369 | switch (opt) { |
303 | * Decay RTTVAR at most once per flight, exploiting that | 370 | case DCCPO_ACK_VECTOR_0: |
304 | * 1) pipe <= cwnd <= Sequence_Window = W (RFC 4340, 7.5.2) | 371 | case DCCPO_ACK_VECTOR_1: |
305 | * 2) AWL = GSS-W+1 <= GAR <= GSS (RFC 4340, 7.5.1) | 372 | *vec = value; |
306 | * GAR is a useful bound for FlightSize = pipe, AWL is probably | 373 | *veclen = len; |
307 | * too low as it over-estimates pipe. | 374 | return offset + (opt_ptr - options); |
308 | */ | ||
309 | if (after48(dccp_sk(sk)->dccps_gar, hctx->rtt_seq)) { | ||
310 | if (hctx->mdev_max < hctx->rttvar) | ||
311 | hctx->rttvar -= (hctx->rttvar - | ||
312 | hctx->mdev_max) >> 2; | ||
313 | hctx->rtt_seq = dccp_sk(sk)->dccps_gss; | ||
314 | hctx->mdev_max = TCP_RTO_MIN; | ||
315 | } | 375 | } |
316 | } | 376 | } |
317 | 377 | ||
318 | /* | 378 | return -1; |
319 | * Set RTO from SRTT and RTTVAR | ||
320 | * Clock granularity is ignored since the minimum error for RTTVAR is | ||
321 | * clamped to 50msec (corresponding to HZ=20). This leads to a minimum | ||
322 | * RTO of 200msec. This agrees with TCP and RFC 4341, 5.: "Because DCCP | ||
323 | * does not retransmit data, DCCP does not require TCP's recommended | ||
324 | * minimum timeout of one second". | ||
325 | */ | ||
326 | hctx->rto = (hctx->srtt >> 3) + hctx->rttvar; | ||
327 | 379 | ||
328 | if (hctx->rto > DCCP_RTO_MAX) | 380 | out_invalid_option: |
329 | hctx->rto = DCCP_RTO_MAX; | 381 | DCCP_BUG("Invalid option - this should not happen (previous parsing)!"); |
382 | return -1; | ||
330 | } | 383 | } |
331 | 384 | ||
332 | static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp, | 385 | static void ccid2_hc_tx_kill_rto_timer(struct sock *sk) |
333 | unsigned int *maxincr) | ||
334 | { | 386 | { |
335 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 387 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); |
336 | 388 | ||
337 | if (hctx->cwnd < hctx->ssthresh) { | 389 | sk_stop_timer(sk, &hctx->ccid2hctx_rtotimer); |
338 | if (*maxincr > 0 && ++hctx->packets_acked == 2) { | 390 | ccid2_pr_debug("deleted RTO timer\n"); |
339 | hctx->cwnd += 1; | ||
340 | *maxincr -= 1; | ||
341 | hctx->packets_acked = 0; | ||
342 | } | ||
343 | } else if (++hctx->packets_acked >= hctx->cwnd) { | ||
344 | hctx->cwnd += 1; | ||
345 | hctx->packets_acked = 0; | ||
346 | } | ||
347 | /* | ||
348 | * FIXME: RTT is sampled several times per acknowledgment (for each | ||
349 | * entry in the Ack Vector), instead of once per Ack (as in TCP SACK). | ||
350 | * This causes the RTT to be over-estimated, since the older entries | ||
351 | * in the Ack Vector have earlier sending times. | ||
352 | * The cleanest solution is to not use the ccid2s_sent field at all | ||
353 | * and instead use DCCP timestamps - need to be resolved at some time. | ||
354 | */ | ||
355 | ccid2_rtt_estimator(sk, jiffies - seqp->ccid2s_sent); | ||
356 | } | 391 | } |
357 | 392 | ||
358 | static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp) | 393 | static inline void ccid2_new_ack(struct sock *sk, |
394 | struct ccid2_seq *seqp, | ||
395 | unsigned int *maxincr) | ||
359 | { | 396 | { |
360 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 397 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); |
361 | 398 | ||
362 | if (time_before(seqp->ccid2s_sent, hctx->last_cong)) { | 399 | if (hctx->ccid2hctx_cwnd < hctx->ccid2hctx_ssthresh) { |
363 | ccid2_pr_debug("Multiple losses in an RTT---treating as one\n"); | 400 | if (*maxincr > 0 && ++hctx->ccid2hctx_packets_acked == 2) { |
364 | return; | 401 | hctx->ccid2hctx_cwnd += 1; |
402 | *maxincr -= 1; | ||
403 | hctx->ccid2hctx_packets_acked = 0; | ||
404 | } | ||
405 | } else if (++hctx->ccid2hctx_packets_acked >= hctx->ccid2hctx_cwnd) { | ||
406 | hctx->ccid2hctx_cwnd += 1; | ||
407 | hctx->ccid2hctx_packets_acked = 0; | ||
365 | } | 408 | } |
366 | 409 | ||
367 | hctx->last_cong = jiffies; | 410 | /* update RTO */ |
411 | if (hctx->ccid2hctx_srtt == -1 || | ||
412 | time_after(jiffies, hctx->ccid2hctx_lastrtt + hctx->ccid2hctx_srtt)) { | ||
413 | unsigned long r = (long)jiffies - (long)seqp->ccid2s_sent; | ||
414 | int s; | ||
415 | |||
416 | /* first measurement */ | ||
417 | if (hctx->ccid2hctx_srtt == -1) { | ||
418 | ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n", | ||
419 | r, jiffies, | ||
420 | (unsigned long long)seqp->ccid2s_seq); | ||
421 | ccid2_change_srtt(hctx, r); | ||
422 | hctx->ccid2hctx_rttvar = r >> 1; | ||
423 | } else { | ||
424 | /* RTTVAR */ | ||
425 | long tmp = hctx->ccid2hctx_srtt - r; | ||
426 | long srtt; | ||
427 | |||
428 | if (tmp < 0) | ||
429 | tmp *= -1; | ||
430 | |||
431 | tmp >>= 2; | ||
432 | hctx->ccid2hctx_rttvar *= 3; | ||
433 | hctx->ccid2hctx_rttvar >>= 2; | ||
434 | hctx->ccid2hctx_rttvar += tmp; | ||
435 | |||
436 | /* SRTT */ | ||
437 | srtt = hctx->ccid2hctx_srtt; | ||
438 | srtt *= 7; | ||
439 | srtt >>= 3; | ||
440 | tmp = r >> 3; | ||
441 | srtt += tmp; | ||
442 | ccid2_change_srtt(hctx, srtt); | ||
443 | } | ||
444 | s = hctx->ccid2hctx_rttvar << 2; | ||
445 | /* clock granularity is 1 when based on jiffies */ | ||
446 | if (!s) | ||
447 | s = 1; | ||
448 | hctx->ccid2hctx_rto = hctx->ccid2hctx_srtt + s; | ||
449 | |||
450 | /* must be at least a second */ | ||
451 | s = hctx->ccid2hctx_rto / HZ; | ||
452 | /* DCCP doesn't require this [but I like it cuz my code sux] */ | ||
453 | #if 1 | ||
454 | if (s < 1) | ||
455 | hctx->ccid2hctx_rto = HZ; | ||
456 | #endif | ||
457 | /* max 60 seconds */ | ||
458 | if (s > 60) | ||
459 | hctx->ccid2hctx_rto = HZ * 60; | ||
368 | 460 | ||
369 | hctx->cwnd = hctx->cwnd / 2 ? : 1U; | 461 | hctx->ccid2hctx_lastrtt = jiffies; |
370 | hctx->ssthresh = max(hctx->cwnd, 2U); | ||
371 | 462 | ||
372 | /* Avoid spurious timeouts resulting from Ack Ratio > cwnd */ | 463 | ccid2_pr_debug("srtt: %ld rttvar: %ld rto: %ld (HZ=%d) R=%lu\n", |
373 | if (dccp_sk(sk)->dccps_l_ack_ratio > hctx->cwnd) | 464 | hctx->ccid2hctx_srtt, hctx->ccid2hctx_rttvar, |
374 | ccid2_change_l_ack_ratio(sk, hctx->cwnd); | 465 | hctx->ccid2hctx_rto, HZ, r); |
466 | } | ||
467 | |||
468 | /* we got a new ack, so re-start RTO timer */ | ||
469 | ccid2_hc_tx_kill_rto_timer(sk); | ||
470 | ccid2_start_rto_timer(sk); | ||
375 | } | 471 | } |
376 | 472 | ||
377 | static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type, | 473 | static void ccid2_hc_tx_dec_pipe(struct sock *sk) |
378 | u8 option, u8 *optval, u8 optlen) | ||
379 | { | 474 | { |
380 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 475 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); |
381 | 476 | ||
382 | switch (option) { | 477 | if (hctx->ccid2hctx_pipe == 0) |
383 | case DCCPO_ACK_VECTOR_0: | 478 | DCCP_BUG("pipe == 0"); |
384 | case DCCPO_ACK_VECTOR_1: | 479 | else |
385 | return dccp_ackvec_parsed_add(&hctx->av_chunks, optval, optlen, | 480 | hctx->ccid2hctx_pipe--; |
386 | option - DCCPO_ACK_VECTOR_0); | 481 | |
482 | if (hctx->ccid2hctx_pipe == 0) | ||
483 | ccid2_hc_tx_kill_rto_timer(sk); | ||
484 | } | ||
485 | |||
486 | static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp) | ||
487 | { | ||
488 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | ||
489 | |||
490 | if (time_before(seqp->ccid2s_sent, hctx->ccid2hctx_last_cong)) { | ||
491 | ccid2_pr_debug("Multiple losses in an RTT---treating as one\n"); | ||
492 | return; | ||
387 | } | 493 | } |
388 | return 0; | 494 | |
495 | hctx->ccid2hctx_last_cong = jiffies; | ||
496 | |||
497 | hctx->ccid2hctx_cwnd = hctx->ccid2hctx_cwnd / 2 ? : 1U; | ||
498 | hctx->ccid2hctx_ssthresh = max(hctx->ccid2hctx_cwnd, 2U); | ||
499 | |||
500 | /* Avoid spurious timeouts resulting from Ack Ratio > cwnd */ | ||
501 | if (dccp_sk(sk)->dccps_l_ack_ratio > hctx->ccid2hctx_cwnd) | ||
502 | ccid2_change_l_ack_ratio(sk, hctx->ccid2hctx_cwnd); | ||
389 | } | 503 | } |
390 | 504 | ||
391 | static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | 505 | static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) |
392 | { | 506 | { |
393 | struct dccp_sock *dp = dccp_sk(sk); | 507 | struct dccp_sock *dp = dccp_sk(sk); |
394 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 508 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); |
395 | const bool sender_was_blocked = ccid2_cwnd_network_limited(hctx); | ||
396 | struct dccp_ackvec_parsed *avp; | ||
397 | u64 ackno, seqno; | 509 | u64 ackno, seqno; |
398 | struct ccid2_seq *seqp; | 510 | struct ccid2_seq *seqp; |
511 | unsigned char *vector; | ||
512 | unsigned char veclen; | ||
513 | int offset = 0; | ||
399 | int done = 0; | 514 | int done = 0; |
400 | unsigned int maxincr = 0; | 515 | unsigned int maxincr = 0; |
401 | 516 | ||
517 | ccid2_hc_tx_check_sanity(hctx); | ||
402 | /* check reverse path congestion */ | 518 | /* check reverse path congestion */ |
403 | seqno = DCCP_SKB_CB(skb)->dccpd_seq; | 519 | seqno = DCCP_SKB_CB(skb)->dccpd_seq; |
404 | 520 | ||
@@ -407,21 +523,21 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
407 | * -sorbo. | 523 | * -sorbo. |
408 | */ | 524 | */ |
409 | /* need to bootstrap */ | 525 | /* need to bootstrap */ |
410 | if (hctx->rpdupack == -1) { | 526 | if (hctx->ccid2hctx_rpdupack == -1) { |
411 | hctx->rpdupack = 0; | 527 | hctx->ccid2hctx_rpdupack = 0; |
412 | hctx->rpseq = seqno; | 528 | hctx->ccid2hctx_rpseq = seqno; |
413 | } else { | 529 | } else { |
414 | /* check if packet is consecutive */ | 530 | /* check if packet is consecutive */ |
415 | if (dccp_delta_seqno(hctx->rpseq, seqno) == 1) | 531 | if (dccp_delta_seqno(hctx->ccid2hctx_rpseq, seqno) == 1) |
416 | hctx->rpseq = seqno; | 532 | hctx->ccid2hctx_rpseq = seqno; |
417 | /* it's a later packet */ | 533 | /* it's a later packet */ |
418 | else if (after48(seqno, hctx->rpseq)) { | 534 | else if (after48(seqno, hctx->ccid2hctx_rpseq)) { |
419 | hctx->rpdupack++; | 535 | hctx->ccid2hctx_rpdupack++; |
420 | 536 | ||
421 | /* check if we got enough dupacks */ | 537 | /* check if we got enough dupacks */ |
422 | if (hctx->rpdupack >= NUMDUPACK) { | 538 | if (hctx->ccid2hctx_rpdupack >= NUMDUPACK) { |
423 | hctx->rpdupack = -1; /* XXX lame */ | 539 | hctx->ccid2hctx_rpdupack = -1; /* XXX lame */ |
424 | hctx->rpseq = 0; | 540 | hctx->ccid2hctx_rpseq = 0; |
425 | 541 | ||
426 | ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio); | 542 | ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio); |
427 | } | 543 | } |
@@ -429,22 +545,27 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
429 | } | 545 | } |
430 | 546 | ||
431 | /* check forward path congestion */ | 547 | /* check forward path congestion */ |
432 | if (dccp_packet_without_ack(skb)) | 548 | /* still didn't send out new data packets */ |
549 | if (hctx->ccid2hctx_seqh == hctx->ccid2hctx_seqt) | ||
433 | return; | 550 | return; |
434 | 551 | ||
435 | /* still didn't send out new data packets */ | 552 | switch (DCCP_SKB_CB(skb)->dccpd_type) { |
436 | if (hctx->seqh == hctx->seqt) | 553 | case DCCP_PKT_ACK: |
437 | goto done; | 554 | case DCCP_PKT_DATAACK: |
555 | break; | ||
556 | default: | ||
557 | return; | ||
558 | } | ||
438 | 559 | ||
439 | ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq; | 560 | ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq; |
440 | if (after48(ackno, hctx->high_ack)) | 561 | if (after48(ackno, hctx->ccid2hctx_high_ack)) |
441 | hctx->high_ack = ackno; | 562 | hctx->ccid2hctx_high_ack = ackno; |
442 | 563 | ||
443 | seqp = hctx->seqt; | 564 | seqp = hctx->ccid2hctx_seqt; |
444 | while (before48(seqp->ccid2s_seq, ackno)) { | 565 | while (before48(seqp->ccid2s_seq, ackno)) { |
445 | seqp = seqp->ccid2s_next; | 566 | seqp = seqp->ccid2s_next; |
446 | if (seqp == hctx->seqh) { | 567 | if (seqp == hctx->ccid2hctx_seqh) { |
447 | seqp = hctx->seqh->ccid2s_prev; | 568 | seqp = hctx->ccid2hctx_seqh->ccid2s_prev; |
448 | break; | 569 | break; |
449 | } | 570 | } |
450 | } | 571 | } |
@@ -454,26 +575,26 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
454 | * packets per acknowledgement. Rounding up avoids that cwnd is not | 575 | * packets per acknowledgement. Rounding up avoids that cwnd is not |
455 | * advanced when Ack Ratio is 1 and gives a slight edge otherwise. | 576 | * advanced when Ack Ratio is 1 and gives a slight edge otherwise. |
456 | */ | 577 | */ |
457 | if (hctx->cwnd < hctx->ssthresh) | 578 | if (hctx->ccid2hctx_cwnd < hctx->ccid2hctx_ssthresh) |
458 | maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2); | 579 | maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2); |
459 | 580 | ||
460 | /* go through all ack vectors */ | 581 | /* go through all ack vectors */ |
461 | list_for_each_entry(avp, &hctx->av_chunks, node) { | 582 | while ((offset = ccid2_ackvector(sk, skb, offset, |
583 | &vector, &veclen)) != -1) { | ||
462 | /* go through this ack vector */ | 584 | /* go through this ack vector */ |
463 | for (; avp->len--; avp->vec++) { | 585 | while (veclen--) { |
464 | u64 ackno_end_rl = SUB48(ackno, | 586 | const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK; |
465 | dccp_ackvec_runlen(avp->vec)); | 587 | u64 ackno_end_rl = SUB48(ackno, rl); |
466 | 588 | ||
467 | ccid2_pr_debug("ackvec %llu |%u,%u|\n", | 589 | ccid2_pr_debug("ackvec start:%llu end:%llu\n", |
468 | (unsigned long long)ackno, | 590 | (unsigned long long)ackno, |
469 | dccp_ackvec_state(avp->vec) >> 6, | 591 | (unsigned long long)ackno_end_rl); |
470 | dccp_ackvec_runlen(avp->vec)); | ||
471 | /* if the seqno we are analyzing is larger than the | 592 | /* if the seqno we are analyzing is larger than the |
472 | * current ackno, then move towards the tail of our | 593 | * current ackno, then move towards the tail of our |
473 | * seqnos. | 594 | * seqnos. |
474 | */ | 595 | */ |
475 | while (after48(seqp->ccid2s_seq, ackno)) { | 596 | while (after48(seqp->ccid2s_seq, ackno)) { |
476 | if (seqp == hctx->seqt) { | 597 | if (seqp == hctx->ccid2hctx_seqt) { |
477 | done = 1; | 598 | done = 1; |
478 | break; | 599 | break; |
479 | } | 600 | } |
@@ -486,24 +607,26 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
486 | * run length | 607 | * run length |
487 | */ | 608 | */ |
488 | while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) { | 609 | while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) { |
489 | const u8 state = dccp_ackvec_state(avp->vec); | 610 | const u8 state = *vector & |
611 | DCCP_ACKVEC_STATE_MASK; | ||
490 | 612 | ||
491 | /* new packet received or marked */ | 613 | /* new packet received or marked */ |
492 | if (state != DCCPAV_NOT_RECEIVED && | 614 | if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED && |
493 | !seqp->ccid2s_acked) { | 615 | !seqp->ccid2s_acked) { |
494 | if (state == DCCPAV_ECN_MARKED) | 616 | if (state == |
617 | DCCP_ACKVEC_STATE_ECN_MARKED) { | ||
495 | ccid2_congestion_event(sk, | 618 | ccid2_congestion_event(sk, |
496 | seqp); | 619 | seqp); |
497 | else | 620 | } else |
498 | ccid2_new_ack(sk, seqp, | 621 | ccid2_new_ack(sk, seqp, |
499 | &maxincr); | 622 | &maxincr); |
500 | 623 | ||
501 | seqp->ccid2s_acked = 1; | 624 | seqp->ccid2s_acked = 1; |
502 | ccid2_pr_debug("Got ack for %llu\n", | 625 | ccid2_pr_debug("Got ack for %llu\n", |
503 | (unsigned long long)seqp->ccid2s_seq); | 626 | (unsigned long long)seqp->ccid2s_seq); |
504 | hctx->pipe--; | 627 | ccid2_hc_tx_dec_pipe(sk); |
505 | } | 628 | } |
506 | if (seqp == hctx->seqt) { | 629 | if (seqp == hctx->ccid2hctx_seqt) { |
507 | done = 1; | 630 | done = 1; |
508 | break; | 631 | break; |
509 | } | 632 | } |
@@ -513,6 +636,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
513 | break; | 636 | break; |
514 | 637 | ||
515 | ackno = SUB48(ackno_end_rl, 1); | 638 | ackno = SUB48(ackno_end_rl, 1); |
639 | vector++; | ||
516 | } | 640 | } |
517 | if (done) | 641 | if (done) |
518 | break; | 642 | break; |
@@ -521,11 +645,11 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
521 | /* The state about what is acked should be correct now | 645 | /* The state about what is acked should be correct now |
522 | * Check for NUMDUPACK | 646 | * Check for NUMDUPACK |
523 | */ | 647 | */ |
524 | seqp = hctx->seqt; | 648 | seqp = hctx->ccid2hctx_seqt; |
525 | while (before48(seqp->ccid2s_seq, hctx->high_ack)) { | 649 | while (before48(seqp->ccid2s_seq, hctx->ccid2hctx_high_ack)) { |
526 | seqp = seqp->ccid2s_next; | 650 | seqp = seqp->ccid2s_next; |
527 | if (seqp == hctx->seqh) { | 651 | if (seqp == hctx->ccid2hctx_seqh) { |
528 | seqp = hctx->seqh->ccid2s_prev; | 652 | seqp = hctx->ccid2hctx_seqh->ccid2s_prev; |
529 | break; | 653 | break; |
530 | } | 654 | } |
531 | } | 655 | } |
@@ -536,7 +660,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
536 | if (done == NUMDUPACK) | 660 | if (done == NUMDUPACK) |
537 | break; | 661 | break; |
538 | } | 662 | } |
539 | if (seqp == hctx->seqt) | 663 | if (seqp == hctx->ccid2hctx_seqt) |
540 | break; | 664 | break; |
541 | seqp = seqp->ccid2s_prev; | 665 | seqp = seqp->ccid2s_prev; |
542 | } | 666 | } |
@@ -557,34 +681,25 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
557 | * one ack vector. | 681 | * one ack vector. |
558 | */ | 682 | */ |
559 | ccid2_congestion_event(sk, seqp); | 683 | ccid2_congestion_event(sk, seqp); |
560 | hctx->pipe--; | 684 | ccid2_hc_tx_dec_pipe(sk); |
561 | } | 685 | } |
562 | if (seqp == hctx->seqt) | 686 | if (seqp == hctx->ccid2hctx_seqt) |
563 | break; | 687 | break; |
564 | seqp = seqp->ccid2s_prev; | 688 | seqp = seqp->ccid2s_prev; |
565 | } | 689 | } |
566 | 690 | ||
567 | hctx->seqt = last_acked; | 691 | hctx->ccid2hctx_seqt = last_acked; |
568 | } | 692 | } |
569 | 693 | ||
570 | /* trim acked packets in tail */ | 694 | /* trim acked packets in tail */ |
571 | while (hctx->seqt != hctx->seqh) { | 695 | while (hctx->ccid2hctx_seqt != hctx->ccid2hctx_seqh) { |
572 | if (!hctx->seqt->ccid2s_acked) | 696 | if (!hctx->ccid2hctx_seqt->ccid2s_acked) |
573 | break; | 697 | break; |
574 | 698 | ||
575 | hctx->seqt = hctx->seqt->ccid2s_next; | 699 | hctx->ccid2hctx_seqt = hctx->ccid2hctx_seqt->ccid2s_next; |
576 | } | 700 | } |
577 | 701 | ||
578 | /* restart RTO timer if not all outstanding data has been acked */ | 702 | ccid2_hc_tx_check_sanity(hctx); |
579 | if (hctx->pipe == 0) | ||
580 | sk_stop_timer(sk, &hctx->rtotimer); | ||
581 | else | ||
582 | sk_reset_timer(sk, &hctx->rtotimer, jiffies + hctx->rto); | ||
583 | done: | ||
584 | /* check if incoming Acks allow pending packets to be sent */ | ||
585 | if (sender_was_blocked && !ccid2_cwnd_network_limited(hctx)) | ||
586 | tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); | ||
587 | dccp_ackvec_parsed_cleanup(&hctx->av_chunks); | ||
588 | } | 703 | } |
589 | 704 | ||
590 | static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) | 705 | static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) |
@@ -594,13 +709,17 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) | |||
594 | u32 max_ratio; | 709 | u32 max_ratio; |
595 | 710 | ||
596 | /* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */ | 711 | /* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */ |
597 | hctx->ssthresh = ~0U; | 712 | hctx->ccid2hctx_ssthresh = ~0U; |
598 | 713 | ||
599 | /* Use larger initial windows (RFC 3390, rfc2581bis) */ | 714 | /* |
600 | hctx->cwnd = rfc3390_bytes_to_packets(dp->dccps_mss_cache); | 715 | * RFC 4341, 5: "The cwnd parameter is initialized to at most four |
716 | * packets for new connections, following the rules from [RFC3390]". | ||
717 | * We need to convert the bytes of RFC3390 into the packets of RFC 4341. | ||
718 | */ | ||
719 | hctx->ccid2hctx_cwnd = clamp(4380U / dp->dccps_mss_cache, 2U, 4U); | ||
601 | 720 | ||
602 | /* Make sure that Ack Ratio is enabled and within bounds. */ | 721 | /* Make sure that Ack Ratio is enabled and within bounds. */ |
603 | max_ratio = DIV_ROUND_UP(hctx->cwnd, 2); | 722 | max_ratio = DIV_ROUND_UP(hctx->ccid2hctx_cwnd, 2); |
604 | if (dp->dccps_l_ack_ratio == 0 || dp->dccps_l_ack_ratio > max_ratio) | 723 | if (dp->dccps_l_ack_ratio == 0 || dp->dccps_l_ack_ratio > max_ratio) |
605 | dp->dccps_l_ack_ratio = max_ratio; | 724 | dp->dccps_l_ack_ratio = max_ratio; |
606 | 725 | ||
@@ -608,11 +727,15 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) | |||
608 | if (ccid2_hc_tx_alloc_seq(hctx)) | 727 | if (ccid2_hc_tx_alloc_seq(hctx)) |
609 | return -ENOMEM; | 728 | return -ENOMEM; |
610 | 729 | ||
611 | hctx->rto = DCCP_TIMEOUT_INIT; | 730 | hctx->ccid2hctx_rto = 3 * HZ; |
612 | hctx->rpdupack = -1; | 731 | ccid2_change_srtt(hctx, -1); |
613 | hctx->last_cong = jiffies; | 732 | hctx->ccid2hctx_rttvar = -1; |
614 | setup_timer(&hctx->rtotimer, ccid2_hc_tx_rto_expire, (unsigned long)sk); | 733 | hctx->ccid2hctx_rpdupack = -1; |
615 | INIT_LIST_HEAD(&hctx->av_chunks); | 734 | hctx->ccid2hctx_last_cong = jiffies; |
735 | setup_timer(&hctx->ccid2hctx_rtotimer, ccid2_hc_tx_rto_expire, | ||
736 | (unsigned long)sk); | ||
737 | |||
738 | ccid2_hc_tx_check_sanity(hctx); | ||
616 | return 0; | 739 | return 0; |
617 | } | 740 | } |
618 | 741 | ||
@@ -621,11 +744,11 @@ static void ccid2_hc_tx_exit(struct sock *sk) | |||
621 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 744 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); |
622 | int i; | 745 | int i; |
623 | 746 | ||
624 | sk_stop_timer(sk, &hctx->rtotimer); | 747 | ccid2_hc_tx_kill_rto_timer(sk); |
625 | 748 | ||
626 | for (i = 0; i < hctx->seqbufc; i++) | 749 | for (i = 0; i < hctx->ccid2hctx_seqbufc; i++) |
627 | kfree(hctx->seqbuf[i]); | 750 | kfree(hctx->ccid2hctx_seqbuf[i]); |
628 | hctx->seqbufc = 0; | 751 | hctx->ccid2hctx_seqbufc = 0; |
629 | } | 752 | } |
630 | 753 | ||
631 | static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | 754 | static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) |
@@ -636,28 +759,27 @@ static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
636 | switch (DCCP_SKB_CB(skb)->dccpd_type) { | 759 | switch (DCCP_SKB_CB(skb)->dccpd_type) { |
637 | case DCCP_PKT_DATA: | 760 | case DCCP_PKT_DATA: |
638 | case DCCP_PKT_DATAACK: | 761 | case DCCP_PKT_DATAACK: |
639 | hcrx->data++; | 762 | hcrx->ccid2hcrx_data++; |
640 | if (hcrx->data >= dp->dccps_r_ack_ratio) { | 763 | if (hcrx->ccid2hcrx_data >= dp->dccps_r_ack_ratio) { |
641 | dccp_send_ack(sk); | 764 | dccp_send_ack(sk); |
642 | hcrx->data = 0; | 765 | hcrx->ccid2hcrx_data = 0; |
643 | } | 766 | } |
644 | break; | 767 | break; |
645 | } | 768 | } |
646 | } | 769 | } |
647 | 770 | ||
648 | static struct ccid_operations ccid2 = { | 771 | static struct ccid_operations ccid2 = { |
649 | .ccid_id = DCCPC_CCID2, | 772 | .ccid_id = DCCPC_CCID2, |
650 | .ccid_name = "TCP-like", | 773 | .ccid_name = "TCP-like", |
651 | .ccid_owner = THIS_MODULE, | 774 | .ccid_owner = THIS_MODULE, |
652 | .ccid_hc_tx_obj_size = sizeof(struct ccid2_hc_tx_sock), | 775 | .ccid_hc_tx_obj_size = sizeof(struct ccid2_hc_tx_sock), |
653 | .ccid_hc_tx_init = ccid2_hc_tx_init, | 776 | .ccid_hc_tx_init = ccid2_hc_tx_init, |
654 | .ccid_hc_tx_exit = ccid2_hc_tx_exit, | 777 | .ccid_hc_tx_exit = ccid2_hc_tx_exit, |
655 | .ccid_hc_tx_send_packet = ccid2_hc_tx_send_packet, | 778 | .ccid_hc_tx_send_packet = ccid2_hc_tx_send_packet, |
656 | .ccid_hc_tx_packet_sent = ccid2_hc_tx_packet_sent, | 779 | .ccid_hc_tx_packet_sent = ccid2_hc_tx_packet_sent, |
657 | .ccid_hc_tx_parse_options = ccid2_hc_tx_parse_options, | 780 | .ccid_hc_tx_packet_recv = ccid2_hc_tx_packet_recv, |
658 | .ccid_hc_tx_packet_recv = ccid2_hc_tx_packet_recv, | 781 | .ccid_hc_rx_obj_size = sizeof(struct ccid2_hc_rx_sock), |
659 | .ccid_hc_rx_obj_size = sizeof(struct ccid2_hc_rx_sock), | 782 | .ccid_hc_rx_packet_recv = ccid2_hc_rx_packet_recv, |
660 | .ccid_hc_rx_packet_recv = ccid2_hc_rx_packet_recv, | ||
661 | }; | 783 | }; |
662 | 784 | ||
663 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG | 785 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG |