diff options
Diffstat (limited to 'net/dccp/ccids/ccid2.c')
-rw-r--r-- | net/dccp/ccids/ccid2.c | 622 |
1 files changed, 250 insertions, 372 deletions
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index 9a430734530c..fa713227c66f 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c | |||
@@ -25,7 +25,7 @@ | |||
25 | /* | 25 | /* |
26 | * This implementation should follow RFC 4341 | 26 | * This implementation should follow RFC 4341 |
27 | */ | 27 | */ |
28 | 28 | #include "../feat.h" | |
29 | #include "../ccid.h" | 29 | #include "../ccid.h" |
30 | #include "../dccp.h" | 30 | #include "../dccp.h" |
31 | #include "ccid2.h" | 31 | #include "ccid2.h" |
@@ -34,51 +34,8 @@ | |||
34 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG | 34 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG |
35 | static int ccid2_debug; | 35 | static int ccid2_debug; |
36 | #define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a) | 36 | #define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a) |
37 | |||
38 | static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hctx) | ||
39 | { | ||
40 | int len = 0; | ||
41 | int pipe = 0; | ||
42 | struct ccid2_seq *seqp = hctx->ccid2hctx_seqh; | ||
43 | |||
44 | /* there is data in the chain */ | ||
45 | if (seqp != hctx->ccid2hctx_seqt) { | ||
46 | seqp = seqp->ccid2s_prev; | ||
47 | len++; | ||
48 | if (!seqp->ccid2s_acked) | ||
49 | pipe++; | ||
50 | |||
51 | while (seqp != hctx->ccid2hctx_seqt) { | ||
52 | struct ccid2_seq *prev = seqp->ccid2s_prev; | ||
53 | |||
54 | len++; | ||
55 | if (!prev->ccid2s_acked) | ||
56 | pipe++; | ||
57 | |||
58 | /* packets are sent sequentially */ | ||
59 | BUG_ON(dccp_delta_seqno(seqp->ccid2s_seq, | ||
60 | prev->ccid2s_seq ) >= 0); | ||
61 | BUG_ON(time_before(seqp->ccid2s_sent, | ||
62 | prev->ccid2s_sent)); | ||
63 | |||
64 | seqp = prev; | ||
65 | } | ||
66 | } | ||
67 | |||
68 | BUG_ON(pipe != hctx->ccid2hctx_pipe); | ||
69 | ccid2_pr_debug("len of chain=%d\n", len); | ||
70 | |||
71 | do { | ||
72 | seqp = seqp->ccid2s_prev; | ||
73 | len++; | ||
74 | } while (seqp != hctx->ccid2hctx_seqh); | ||
75 | |||
76 | ccid2_pr_debug("total len=%d\n", len); | ||
77 | BUG_ON(len != hctx->ccid2hctx_seqbufc * CCID2_SEQBUF_LEN); | ||
78 | } | ||
79 | #else | 37 | #else |
80 | #define ccid2_pr_debug(format, a...) | 38 | #define ccid2_pr_debug(format, a...) |
81 | #define ccid2_hc_tx_check_sanity(hctx) | ||
82 | #endif | 39 | #endif |
83 | 40 | ||
84 | static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx) | 41 | static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx) |
@@ -87,8 +44,7 @@ static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx) | |||
87 | int i; | 44 | int i; |
88 | 45 | ||
89 | /* check if we have space to preserve the pointer to the buffer */ | 46 | /* check if we have space to preserve the pointer to the buffer */ |
90 | if (hctx->ccid2hctx_seqbufc >= (sizeof(hctx->ccid2hctx_seqbuf) / | 47 | if (hctx->seqbufc >= sizeof(hctx->seqbuf) / sizeof(struct ccid2_seq *)) |
91 | sizeof(struct ccid2_seq*))) | ||
92 | return -ENOMEM; | 48 | return -ENOMEM; |
93 | 49 | ||
94 | /* allocate buffer and initialize linked list */ | 50 | /* allocate buffer and initialize linked list */ |
@@ -104,38 +60,35 @@ static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx) | |||
104 | seqp->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1]; | 60 | seqp->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1]; |
105 | 61 | ||
106 | /* This is the first allocation. Initiate the head and tail. */ | 62 | /* This is the first allocation. Initiate the head and tail. */ |
107 | if (hctx->ccid2hctx_seqbufc == 0) | 63 | if (hctx->seqbufc == 0) |
108 | hctx->ccid2hctx_seqh = hctx->ccid2hctx_seqt = seqp; | 64 | hctx->seqh = hctx->seqt = seqp; |
109 | else { | 65 | else { |
110 | /* link the existing list with the one we just created */ | 66 | /* link the existing list with the one we just created */ |
111 | hctx->ccid2hctx_seqh->ccid2s_next = seqp; | 67 | hctx->seqh->ccid2s_next = seqp; |
112 | seqp->ccid2s_prev = hctx->ccid2hctx_seqh; | 68 | seqp->ccid2s_prev = hctx->seqh; |
113 | 69 | ||
114 | hctx->ccid2hctx_seqt->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1]; | 70 | hctx->seqt->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1]; |
115 | seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = hctx->ccid2hctx_seqt; | 71 | seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = hctx->seqt; |
116 | } | 72 | } |
117 | 73 | ||
118 | /* store the original pointer to the buffer so we can free it */ | 74 | /* store the original pointer to the buffer so we can free it */ |
119 | hctx->ccid2hctx_seqbuf[hctx->ccid2hctx_seqbufc] = seqp; | 75 | hctx->seqbuf[hctx->seqbufc] = seqp; |
120 | hctx->ccid2hctx_seqbufc++; | 76 | hctx->seqbufc++; |
121 | 77 | ||
122 | return 0; | 78 | return 0; |
123 | } | 79 | } |
124 | 80 | ||
125 | static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | 81 | static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) |
126 | { | 82 | { |
127 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 83 | if (ccid2_cwnd_network_limited(ccid2_hc_tx_sk(sk))) |
128 | 84 | return CCID_PACKET_WILL_DEQUEUE_LATER; | |
129 | if (hctx->ccid2hctx_pipe < hctx->ccid2hctx_cwnd) | 85 | return CCID_PACKET_SEND_AT_ONCE; |
130 | return 0; | ||
131 | |||
132 | return 1; /* XXX CCID should dequeue when ready instead of polling */ | ||
133 | } | 86 | } |
134 | 87 | ||
135 | static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) | 88 | static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) |
136 | { | 89 | { |
137 | struct dccp_sock *dp = dccp_sk(sk); | 90 | struct dccp_sock *dp = dccp_sk(sk); |
138 | u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->ccid2hctx_cwnd, 2); | 91 | u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->cwnd, 2); |
139 | 92 | ||
140 | /* | 93 | /* |
141 | * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from | 94 | * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from |
@@ -147,8 +100,8 @@ static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) | |||
147 | DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio); | 100 | DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio); |
148 | val = max_ratio; | 101 | val = max_ratio; |
149 | } | 102 | } |
150 | if (val > 0xFFFF) /* RFC 4340, 11.3 */ | 103 | if (val > DCCPF_ACK_RATIO_MAX) |
151 | val = 0xFFFF; | 104 | val = DCCPF_ACK_RATIO_MAX; |
152 | 105 | ||
153 | if (val == dp->dccps_l_ack_ratio) | 106 | if (val == dp->dccps_l_ack_ratio) |
154 | return; | 107 | return; |
@@ -157,99 +110,77 @@ static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) | |||
157 | dp->dccps_l_ack_ratio = val; | 110 | dp->dccps_l_ack_ratio = val; |
158 | } | 111 | } |
159 | 112 | ||
160 | static void ccid2_change_srtt(struct ccid2_hc_tx_sock *hctx, long val) | ||
161 | { | ||
162 | ccid2_pr_debug("change SRTT to %ld\n", val); | ||
163 | hctx->ccid2hctx_srtt = val; | ||
164 | } | ||
165 | |||
166 | static void ccid2_start_rto_timer(struct sock *sk); | ||
167 | |||
168 | static void ccid2_hc_tx_rto_expire(unsigned long data) | 113 | static void ccid2_hc_tx_rto_expire(unsigned long data) |
169 | { | 114 | { |
170 | struct sock *sk = (struct sock *)data; | 115 | struct sock *sk = (struct sock *)data; |
171 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 116 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); |
172 | long s; | 117 | const bool sender_was_blocked = ccid2_cwnd_network_limited(hctx); |
173 | 118 | ||
174 | bh_lock_sock(sk); | 119 | bh_lock_sock(sk); |
175 | if (sock_owned_by_user(sk)) { | 120 | if (sock_owned_by_user(sk)) { |
176 | sk_reset_timer(sk, &hctx->ccid2hctx_rtotimer, | 121 | sk_reset_timer(sk, &hctx->rtotimer, jiffies + HZ / 5); |
177 | jiffies + HZ / 5); | ||
178 | goto out; | 122 | goto out; |
179 | } | 123 | } |
180 | 124 | ||
181 | ccid2_pr_debug("RTO_EXPIRE\n"); | 125 | ccid2_pr_debug("RTO_EXPIRE\n"); |
182 | 126 | ||
183 | ccid2_hc_tx_check_sanity(hctx); | ||
184 | |||
185 | /* back-off timer */ | 127 | /* back-off timer */ |
186 | hctx->ccid2hctx_rto <<= 1; | 128 | hctx->rto <<= 1; |
187 | 129 | if (hctx->rto > DCCP_RTO_MAX) | |
188 | s = hctx->ccid2hctx_rto / HZ; | 130 | hctx->rto = DCCP_RTO_MAX; |
189 | if (s > 60) | ||
190 | hctx->ccid2hctx_rto = 60 * HZ; | ||
191 | |||
192 | ccid2_start_rto_timer(sk); | ||
193 | 131 | ||
194 | /* adjust pipe, cwnd etc */ | 132 | /* adjust pipe, cwnd etc */ |
195 | hctx->ccid2hctx_ssthresh = hctx->ccid2hctx_cwnd / 2; | 133 | hctx->ssthresh = hctx->cwnd / 2; |
196 | if (hctx->ccid2hctx_ssthresh < 2) | 134 | if (hctx->ssthresh < 2) |
197 | hctx->ccid2hctx_ssthresh = 2; | 135 | hctx->ssthresh = 2; |
198 | hctx->ccid2hctx_cwnd = 1; | 136 | hctx->cwnd = 1; |
199 | hctx->ccid2hctx_pipe = 0; | 137 | hctx->pipe = 0; |
200 | 138 | ||
201 | /* clear state about stuff we sent */ | 139 | /* clear state about stuff we sent */ |
202 | hctx->ccid2hctx_seqt = hctx->ccid2hctx_seqh; | 140 | hctx->seqt = hctx->seqh; |
203 | hctx->ccid2hctx_packets_acked = 0; | 141 | hctx->packets_acked = 0; |
204 | 142 | ||
205 | /* clear ack ratio state. */ | 143 | /* clear ack ratio state. */ |
206 | hctx->ccid2hctx_rpseq = 0; | 144 | hctx->rpseq = 0; |
207 | hctx->ccid2hctx_rpdupack = -1; | 145 | hctx->rpdupack = -1; |
208 | ccid2_change_l_ack_ratio(sk, 1); | 146 | ccid2_change_l_ack_ratio(sk, 1); |
209 | ccid2_hc_tx_check_sanity(hctx); | 147 | |
148 | /* if we were blocked before, we may now send cwnd=1 packet */ | ||
149 | if (sender_was_blocked) | ||
150 | tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); | ||
151 | /* restart backed-off timer */ | ||
152 | sk_reset_timer(sk, &hctx->rtotimer, jiffies + hctx->rto); | ||
210 | out: | 153 | out: |
211 | bh_unlock_sock(sk); | 154 | bh_unlock_sock(sk); |
212 | sock_put(sk); | 155 | sock_put(sk); |
213 | } | 156 | } |
214 | 157 | ||
215 | static void ccid2_start_rto_timer(struct sock *sk) | 158 | static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len) |
216 | { | ||
217 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | ||
218 | |||
219 | ccid2_pr_debug("setting RTO timeout=%ld\n", hctx->ccid2hctx_rto); | ||
220 | |||
221 | BUG_ON(timer_pending(&hctx->ccid2hctx_rtotimer)); | ||
222 | sk_reset_timer(sk, &hctx->ccid2hctx_rtotimer, | ||
223 | jiffies + hctx->ccid2hctx_rto); | ||
224 | } | ||
225 | |||
226 | static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) | ||
227 | { | 159 | { |
228 | struct dccp_sock *dp = dccp_sk(sk); | 160 | struct dccp_sock *dp = dccp_sk(sk); |
229 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 161 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); |
230 | struct ccid2_seq *next; | 162 | struct ccid2_seq *next; |
231 | 163 | ||
232 | hctx->ccid2hctx_pipe++; | 164 | hctx->pipe++; |
233 | 165 | ||
234 | hctx->ccid2hctx_seqh->ccid2s_seq = dp->dccps_gss; | 166 | hctx->seqh->ccid2s_seq = dp->dccps_gss; |
235 | hctx->ccid2hctx_seqh->ccid2s_acked = 0; | 167 | hctx->seqh->ccid2s_acked = 0; |
236 | hctx->ccid2hctx_seqh->ccid2s_sent = jiffies; | 168 | hctx->seqh->ccid2s_sent = jiffies; |
237 | 169 | ||
238 | next = hctx->ccid2hctx_seqh->ccid2s_next; | 170 | next = hctx->seqh->ccid2s_next; |
239 | /* check if we need to alloc more space */ | 171 | /* check if we need to alloc more space */ |
240 | if (next == hctx->ccid2hctx_seqt) { | 172 | if (next == hctx->seqt) { |
241 | if (ccid2_hc_tx_alloc_seq(hctx)) { | 173 | if (ccid2_hc_tx_alloc_seq(hctx)) { |
242 | DCCP_CRIT("packet history - out of memory!"); | 174 | DCCP_CRIT("packet history - out of memory!"); |
243 | /* FIXME: find a more graceful way to bail out */ | 175 | /* FIXME: find a more graceful way to bail out */ |
244 | return; | 176 | return; |
245 | } | 177 | } |
246 | next = hctx->ccid2hctx_seqh->ccid2s_next; | 178 | next = hctx->seqh->ccid2s_next; |
247 | BUG_ON(next == hctx->ccid2hctx_seqt); | 179 | BUG_ON(next == hctx->seqt); |
248 | } | 180 | } |
249 | hctx->ccid2hctx_seqh = next; | 181 | hctx->seqh = next; |
250 | 182 | ||
251 | ccid2_pr_debug("cwnd=%d pipe=%d\n", hctx->ccid2hctx_cwnd, | 183 | ccid2_pr_debug("cwnd=%d pipe=%d\n", hctx->cwnd, hctx->pipe); |
252 | hctx->ccid2hctx_pipe); | ||
253 | 184 | ||
254 | /* | 185 | /* |
255 | * FIXME: The code below is broken and the variables have been removed | 186 | * FIXME: The code below is broken and the variables have been removed |
@@ -272,12 +203,12 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) | |||
272 | */ | 203 | */ |
273 | #if 0 | 204 | #if 0 |
274 | /* Ack Ratio. Need to maintain a concept of how many windows we sent */ | 205 | /* Ack Ratio. Need to maintain a concept of how many windows we sent */ |
275 | hctx->ccid2hctx_arsent++; | 206 | hctx->arsent++; |
276 | /* We had an ack loss in this window... */ | 207 | /* We had an ack loss in this window... */ |
277 | if (hctx->ccid2hctx_ackloss) { | 208 | if (hctx->ackloss) { |
278 | if (hctx->ccid2hctx_arsent >= hctx->ccid2hctx_cwnd) { | 209 | if (hctx->arsent >= hctx->cwnd) { |
279 | hctx->ccid2hctx_arsent = 0; | 210 | hctx->arsent = 0; |
280 | hctx->ccid2hctx_ackloss = 0; | 211 | hctx->ackloss = 0; |
281 | } | 212 | } |
282 | } else { | 213 | } else { |
283 | /* No acks lost up to now... */ | 214 | /* No acks lost up to now... */ |
@@ -287,28 +218,28 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) | |||
287 | int denom = dp->dccps_l_ack_ratio * dp->dccps_l_ack_ratio - | 218 | int denom = dp->dccps_l_ack_ratio * dp->dccps_l_ack_ratio - |
288 | dp->dccps_l_ack_ratio; | 219 | dp->dccps_l_ack_ratio; |
289 | 220 | ||
290 | denom = hctx->ccid2hctx_cwnd * hctx->ccid2hctx_cwnd / denom; | 221 | denom = hctx->cwnd * hctx->cwnd / denom; |
291 | 222 | ||
292 | if (hctx->ccid2hctx_arsent >= denom) { | 223 | if (hctx->arsent >= denom) { |
293 | ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio - 1); | 224 | ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio - 1); |
294 | hctx->ccid2hctx_arsent = 0; | 225 | hctx->arsent = 0; |
295 | } | 226 | } |
296 | } else { | 227 | } else { |
297 | /* we can't increase ack ratio further [1] */ | 228 | /* we can't increase ack ratio further [1] */ |
298 | hctx->ccid2hctx_arsent = 0; /* or maybe set it to cwnd*/ | 229 | hctx->arsent = 0; /* or maybe set it to cwnd*/ |
299 | } | 230 | } |
300 | } | 231 | } |
301 | #endif | 232 | #endif |
302 | 233 | ||
303 | /* setup RTO timer */ | 234 | /* setup RTO timer */ |
304 | if (!timer_pending(&hctx->ccid2hctx_rtotimer)) | 235 | if (!timer_pending(&hctx->rtotimer)) |
305 | ccid2_start_rto_timer(sk); | 236 | sk_reset_timer(sk, &hctx->rtotimer, jiffies + hctx->rto); |
306 | 237 | ||
307 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG | 238 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG |
308 | do { | 239 | do { |
309 | struct ccid2_seq *seqp = hctx->ccid2hctx_seqt; | 240 | struct ccid2_seq *seqp = hctx->seqt; |
310 | 241 | ||
311 | while (seqp != hctx->ccid2hctx_seqh) { | 242 | while (seqp != hctx->seqh) { |
312 | ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n", | 243 | ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n", |
313 | (unsigned long long)seqp->ccid2s_seq, | 244 | (unsigned long long)seqp->ccid2s_seq, |
314 | seqp->ccid2s_acked, seqp->ccid2s_sent); | 245 | seqp->ccid2s_acked, seqp->ccid2s_sent); |
@@ -316,205 +247,158 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) | |||
316 | } | 247 | } |
317 | } while (0); | 248 | } while (0); |
318 | ccid2_pr_debug("=========\n"); | 249 | ccid2_pr_debug("=========\n"); |
319 | ccid2_hc_tx_check_sanity(hctx); | ||
320 | #endif | 250 | #endif |
321 | } | 251 | } |
322 | 252 | ||
323 | /* XXX Lame code duplication! | 253 | /** |
324 | * returns -1 if none was found. | 254 | * ccid2_rtt_estimator - Sample RTT and compute RTO using RFC2988 algorithm |
325 | * else returns the next offset to use in the function call. | 255 | * This code is almost identical with TCP's tcp_rtt_estimator(), since |
256 | * - it has a higher sampling frequency (recommended by RFC 1323), | ||
257 | * - the RTO does not collapse into RTT due to RTTVAR going towards zero, | ||
258 | * - it is simple (cf. more complex proposals such as Eifel timer or research | ||
259 | * which suggests that the gain should be set according to window size), | ||
260 | * - in tests it was found to work well with CCID2 [gerrit]. | ||
326 | */ | 261 | */ |
327 | static int ccid2_ackvector(struct sock *sk, struct sk_buff *skb, int offset, | 262 | static void ccid2_rtt_estimator(struct sock *sk, const long mrtt) |
328 | unsigned char **vec, unsigned char *veclen) | ||
329 | { | 263 | { |
330 | const struct dccp_hdr *dh = dccp_hdr(skb); | 264 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); |
331 | unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb); | 265 | long m = mrtt ? : 1; |
332 | unsigned char *opt_ptr; | 266 | |
333 | const unsigned char *opt_end = (unsigned char *)dh + | 267 | if (hctx->srtt == 0) { |
334 | (dh->dccph_doff * 4); | 268 | /* First measurement m */ |
335 | unsigned char opt, len; | 269 | hctx->srtt = m << 3; |
336 | unsigned char *value; | 270 | hctx->mdev = m << 1; |
337 | 271 | ||
338 | BUG_ON(offset < 0); | 272 | hctx->mdev_max = max(TCP_RTO_MIN, hctx->mdev); |
339 | options += offset; | 273 | hctx->rttvar = hctx->mdev_max; |
340 | opt_ptr = options; | 274 | hctx->rtt_seq = dccp_sk(sk)->dccps_gss; |
341 | if (opt_ptr >= opt_end) | 275 | } else { |
342 | return -1; | 276 | /* Update scaled SRTT as SRTT += 1/8 * (m - SRTT) */ |
343 | 277 | m -= (hctx->srtt >> 3); | |
344 | while (opt_ptr != opt_end) { | 278 | hctx->srtt += m; |
345 | opt = *opt_ptr++; | 279 | |
346 | len = 0; | 280 | /* Similarly, update scaled mdev with regard to |m| */ |
347 | value = NULL; | 281 | if (m < 0) { |
348 | 282 | m = -m; | |
349 | /* Check if this isn't a single byte option */ | 283 | m -= (hctx->mdev >> 2); |
350 | if (opt > DCCPO_MAX_RESERVED) { | ||
351 | if (opt_ptr == opt_end) | ||
352 | goto out_invalid_option; | ||
353 | |||
354 | len = *opt_ptr++; | ||
355 | if (len < 3) | ||
356 | goto out_invalid_option; | ||
357 | /* | 284 | /* |
358 | * Remove the type and len fields, leaving | 285 | * This neutralises RTO increase when RTT < SRTT - mdev |
359 | * just the value size | 286 | * (see P. Sarolahti, A. Kuznetsov,"Congestion Control |
287 | * in Linux TCP", USENIX 2002, pp. 49-62). | ||
360 | */ | 288 | */ |
361 | len -= 2; | 289 | if (m > 0) |
362 | value = opt_ptr; | 290 | m >>= 3; |
363 | opt_ptr += len; | 291 | } else { |
292 | m -= (hctx->mdev >> 2); | ||
293 | } | ||
294 | hctx->mdev += m; | ||
364 | 295 | ||
365 | if (opt_ptr > opt_end) | 296 | if (hctx->mdev > hctx->mdev_max) { |
366 | goto out_invalid_option; | 297 | hctx->mdev_max = hctx->mdev; |
298 | if (hctx->mdev_max > hctx->rttvar) | ||
299 | hctx->rttvar = hctx->mdev_max; | ||
367 | } | 300 | } |
368 | 301 | ||
369 | switch (opt) { | 302 | /* |
370 | case DCCPO_ACK_VECTOR_0: | 303 | * Decay RTTVAR at most once per flight, exploiting that |
371 | case DCCPO_ACK_VECTOR_1: | 304 | * 1) pipe <= cwnd <= Sequence_Window = W (RFC 4340, 7.5.2) |
372 | *vec = value; | 305 | * 2) AWL = GSS-W+1 <= GAR <= GSS (RFC 4340, 7.5.1) |
373 | *veclen = len; | 306 | * GAR is a useful bound for FlightSize = pipe, AWL is probably |
374 | return offset + (opt_ptr - options); | 307 | * too low as it over-estimates pipe. |
308 | */ | ||
309 | if (after48(dccp_sk(sk)->dccps_gar, hctx->rtt_seq)) { | ||
310 | if (hctx->mdev_max < hctx->rttvar) | ||
311 | hctx->rttvar -= (hctx->rttvar - | ||
312 | hctx->mdev_max) >> 2; | ||
313 | hctx->rtt_seq = dccp_sk(sk)->dccps_gss; | ||
314 | hctx->mdev_max = TCP_RTO_MIN; | ||
375 | } | 315 | } |
376 | } | 316 | } |
377 | 317 | ||
378 | return -1; | 318 | /* |
379 | 319 | * Set RTO from SRTT and RTTVAR | |
380 | out_invalid_option: | 320 | * Clock granularity is ignored since the minimum error for RTTVAR is |
381 | DCCP_BUG("Invalid option - this should not happen (previous parsing)!"); | 321 | * clamped to 50msec (corresponding to HZ=20). This leads to a minimum |
382 | return -1; | 322 | * RTO of 200msec. This agrees with TCP and RFC 4341, 5.: "Because DCCP |
383 | } | 323 | * does not retransmit data, DCCP does not require TCP's recommended |
384 | 324 | * minimum timeout of one second". | |
385 | static void ccid2_hc_tx_kill_rto_timer(struct sock *sk) | 325 | */ |
386 | { | 326 | hctx->rto = (hctx->srtt >> 3) + hctx->rttvar; |
387 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | ||
388 | 327 | ||
389 | sk_stop_timer(sk, &hctx->ccid2hctx_rtotimer); | 328 | if (hctx->rto > DCCP_RTO_MAX) |
390 | ccid2_pr_debug("deleted RTO timer\n"); | 329 | hctx->rto = DCCP_RTO_MAX; |
391 | } | 330 | } |
392 | 331 | ||
393 | static inline void ccid2_new_ack(struct sock *sk, | 332 | static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp, |
394 | struct ccid2_seq *seqp, | 333 | unsigned int *maxincr) |
395 | unsigned int *maxincr) | ||
396 | { | 334 | { |
397 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 335 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); |
398 | 336 | ||
399 | if (hctx->ccid2hctx_cwnd < hctx->ccid2hctx_ssthresh) { | 337 | if (hctx->cwnd < hctx->ssthresh) { |
400 | if (*maxincr > 0 && ++hctx->ccid2hctx_packets_acked == 2) { | 338 | if (*maxincr > 0 && ++hctx->packets_acked == 2) { |
401 | hctx->ccid2hctx_cwnd += 1; | 339 | hctx->cwnd += 1; |
402 | *maxincr -= 1; | 340 | *maxincr -= 1; |
403 | hctx->ccid2hctx_packets_acked = 0; | 341 | hctx->packets_acked = 0; |
404 | } | 342 | } |
405 | } else if (++hctx->ccid2hctx_packets_acked >= hctx->ccid2hctx_cwnd) { | 343 | } else if (++hctx->packets_acked >= hctx->cwnd) { |
406 | hctx->ccid2hctx_cwnd += 1; | 344 | hctx->cwnd += 1; |
407 | hctx->ccid2hctx_packets_acked = 0; | 345 | hctx->packets_acked = 0; |
408 | } | 346 | } |
409 | 347 | /* | |
410 | /* update RTO */ | 348 | * FIXME: RTT is sampled several times per acknowledgment (for each |
411 | if (hctx->ccid2hctx_srtt == -1 || | 349 | * entry in the Ack Vector), instead of once per Ack (as in TCP SACK). |
412 | time_after(jiffies, hctx->ccid2hctx_lastrtt + hctx->ccid2hctx_srtt)) { | 350 | * This causes the RTT to be over-estimated, since the older entries |
413 | unsigned long r = (long)jiffies - (long)seqp->ccid2s_sent; | 351 | * in the Ack Vector have earlier sending times. |
414 | int s; | 352 | * The cleanest solution is to not use the ccid2s_sent field at all |
415 | 353 | * and instead use DCCP timestamps - need to be resolved at some time. | |
416 | /* first measurement */ | 354 | */ |
417 | if (hctx->ccid2hctx_srtt == -1) { | 355 | ccid2_rtt_estimator(sk, jiffies - seqp->ccid2s_sent); |
418 | ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n", | ||
419 | r, jiffies, | ||
420 | (unsigned long long)seqp->ccid2s_seq); | ||
421 | ccid2_change_srtt(hctx, r); | ||
422 | hctx->ccid2hctx_rttvar = r >> 1; | ||
423 | } else { | ||
424 | /* RTTVAR */ | ||
425 | long tmp = hctx->ccid2hctx_srtt - r; | ||
426 | long srtt; | ||
427 | |||
428 | if (tmp < 0) | ||
429 | tmp *= -1; | ||
430 | |||
431 | tmp >>= 2; | ||
432 | hctx->ccid2hctx_rttvar *= 3; | ||
433 | hctx->ccid2hctx_rttvar >>= 2; | ||
434 | hctx->ccid2hctx_rttvar += tmp; | ||
435 | |||
436 | /* SRTT */ | ||
437 | srtt = hctx->ccid2hctx_srtt; | ||
438 | srtt *= 7; | ||
439 | srtt >>= 3; | ||
440 | tmp = r >> 3; | ||
441 | srtt += tmp; | ||
442 | ccid2_change_srtt(hctx, srtt); | ||
443 | } | ||
444 | s = hctx->ccid2hctx_rttvar << 2; | ||
445 | /* clock granularity is 1 when based on jiffies */ | ||
446 | if (!s) | ||
447 | s = 1; | ||
448 | hctx->ccid2hctx_rto = hctx->ccid2hctx_srtt + s; | ||
449 | |||
450 | /* must be at least a second */ | ||
451 | s = hctx->ccid2hctx_rto / HZ; | ||
452 | /* DCCP doesn't require this [but I like it cuz my code sux] */ | ||
453 | #if 1 | ||
454 | if (s < 1) | ||
455 | hctx->ccid2hctx_rto = HZ; | ||
456 | #endif | ||
457 | /* max 60 seconds */ | ||
458 | if (s > 60) | ||
459 | hctx->ccid2hctx_rto = HZ * 60; | ||
460 | |||
461 | hctx->ccid2hctx_lastrtt = jiffies; | ||
462 | |||
463 | ccid2_pr_debug("srtt: %ld rttvar: %ld rto: %ld (HZ=%d) R=%lu\n", | ||
464 | hctx->ccid2hctx_srtt, hctx->ccid2hctx_rttvar, | ||
465 | hctx->ccid2hctx_rto, HZ, r); | ||
466 | } | ||
467 | |||
468 | /* we got a new ack, so re-start RTO timer */ | ||
469 | ccid2_hc_tx_kill_rto_timer(sk); | ||
470 | ccid2_start_rto_timer(sk); | ||
471 | } | ||
472 | |||
473 | static void ccid2_hc_tx_dec_pipe(struct sock *sk) | ||
474 | { | ||
475 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | ||
476 | |||
477 | if (hctx->ccid2hctx_pipe == 0) | ||
478 | DCCP_BUG("pipe == 0"); | ||
479 | else | ||
480 | hctx->ccid2hctx_pipe--; | ||
481 | |||
482 | if (hctx->ccid2hctx_pipe == 0) | ||
483 | ccid2_hc_tx_kill_rto_timer(sk); | ||
484 | } | 356 | } |
485 | 357 | ||
486 | static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp) | 358 | static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp) |
487 | { | 359 | { |
488 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 360 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); |
489 | 361 | ||
490 | if (time_before(seqp->ccid2s_sent, hctx->ccid2hctx_last_cong)) { | 362 | if (time_before(seqp->ccid2s_sent, hctx->last_cong)) { |
491 | ccid2_pr_debug("Multiple losses in an RTT---treating as one\n"); | 363 | ccid2_pr_debug("Multiple losses in an RTT---treating as one\n"); |
492 | return; | 364 | return; |
493 | } | 365 | } |
494 | 366 | ||
495 | hctx->ccid2hctx_last_cong = jiffies; | 367 | hctx->last_cong = jiffies; |
496 | 368 | ||
497 | hctx->ccid2hctx_cwnd = hctx->ccid2hctx_cwnd / 2 ? : 1U; | 369 | hctx->cwnd = hctx->cwnd / 2 ? : 1U; |
498 | hctx->ccid2hctx_ssthresh = max(hctx->ccid2hctx_cwnd, 2U); | 370 | hctx->ssthresh = max(hctx->cwnd, 2U); |
499 | 371 | ||
500 | /* Avoid spurious timeouts resulting from Ack Ratio > cwnd */ | 372 | /* Avoid spurious timeouts resulting from Ack Ratio > cwnd */ |
501 | if (dccp_sk(sk)->dccps_l_ack_ratio > hctx->ccid2hctx_cwnd) | 373 | if (dccp_sk(sk)->dccps_l_ack_ratio > hctx->cwnd) |
502 | ccid2_change_l_ack_ratio(sk, hctx->ccid2hctx_cwnd); | 374 | ccid2_change_l_ack_ratio(sk, hctx->cwnd); |
375 | } | ||
376 | |||
377 | static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type, | ||
378 | u8 option, u8 *optval, u8 optlen) | ||
379 | { | ||
380 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | ||
381 | |||
382 | switch (option) { | ||
383 | case DCCPO_ACK_VECTOR_0: | ||
384 | case DCCPO_ACK_VECTOR_1: | ||
385 | return dccp_ackvec_parsed_add(&hctx->av_chunks, optval, optlen, | ||
386 | option - DCCPO_ACK_VECTOR_0); | ||
387 | } | ||
388 | return 0; | ||
503 | } | 389 | } |
504 | 390 | ||
505 | static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | 391 | static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) |
506 | { | 392 | { |
507 | struct dccp_sock *dp = dccp_sk(sk); | 393 | struct dccp_sock *dp = dccp_sk(sk); |
508 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 394 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); |
395 | const bool sender_was_blocked = ccid2_cwnd_network_limited(hctx); | ||
396 | struct dccp_ackvec_parsed *avp; | ||
509 | u64 ackno, seqno; | 397 | u64 ackno, seqno; |
510 | struct ccid2_seq *seqp; | 398 | struct ccid2_seq *seqp; |
511 | unsigned char *vector; | ||
512 | unsigned char veclen; | ||
513 | int offset = 0; | ||
514 | int done = 0; | 399 | int done = 0; |
515 | unsigned int maxincr = 0; | 400 | unsigned int maxincr = 0; |
516 | 401 | ||
517 | ccid2_hc_tx_check_sanity(hctx); | ||
518 | /* check reverse path congestion */ | 402 | /* check reverse path congestion */ |
519 | seqno = DCCP_SKB_CB(skb)->dccpd_seq; | 403 | seqno = DCCP_SKB_CB(skb)->dccpd_seq; |
520 | 404 | ||
@@ -523,21 +407,21 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
523 | * -sorbo. | 407 | * -sorbo. |
524 | */ | 408 | */ |
525 | /* need to bootstrap */ | 409 | /* need to bootstrap */ |
526 | if (hctx->ccid2hctx_rpdupack == -1) { | 410 | if (hctx->rpdupack == -1) { |
527 | hctx->ccid2hctx_rpdupack = 0; | 411 | hctx->rpdupack = 0; |
528 | hctx->ccid2hctx_rpseq = seqno; | 412 | hctx->rpseq = seqno; |
529 | } else { | 413 | } else { |
530 | /* check if packet is consecutive */ | 414 | /* check if packet is consecutive */ |
531 | if (dccp_delta_seqno(hctx->ccid2hctx_rpseq, seqno) == 1) | 415 | if (dccp_delta_seqno(hctx->rpseq, seqno) == 1) |
532 | hctx->ccid2hctx_rpseq = seqno; | 416 | hctx->rpseq = seqno; |
533 | /* it's a later packet */ | 417 | /* it's a later packet */ |
534 | else if (after48(seqno, hctx->ccid2hctx_rpseq)) { | 418 | else if (after48(seqno, hctx->rpseq)) { |
535 | hctx->ccid2hctx_rpdupack++; | 419 | hctx->rpdupack++; |
536 | 420 | ||
537 | /* check if we got enough dupacks */ | 421 | /* check if we got enough dupacks */ |
538 | if (hctx->ccid2hctx_rpdupack >= NUMDUPACK) { | 422 | if (hctx->rpdupack >= NUMDUPACK) { |
539 | hctx->ccid2hctx_rpdupack = -1; /* XXX lame */ | 423 | hctx->rpdupack = -1; /* XXX lame */ |
540 | hctx->ccid2hctx_rpseq = 0; | 424 | hctx->rpseq = 0; |
541 | 425 | ||
542 | ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio); | 426 | ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio); |
543 | } | 427 | } |
@@ -545,27 +429,22 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
545 | } | 429 | } |
546 | 430 | ||
547 | /* check forward path congestion */ | 431 | /* check forward path congestion */ |
548 | /* still didn't send out new data packets */ | 432 | if (dccp_packet_without_ack(skb)) |
549 | if (hctx->ccid2hctx_seqh == hctx->ccid2hctx_seqt) | ||
550 | return; | 433 | return; |
551 | 434 | ||
552 | switch (DCCP_SKB_CB(skb)->dccpd_type) { | 435 | /* still didn't send out new data packets */ |
553 | case DCCP_PKT_ACK: | 436 | if (hctx->seqh == hctx->seqt) |
554 | case DCCP_PKT_DATAACK: | 437 | goto done; |
555 | break; | ||
556 | default: | ||
557 | return; | ||
558 | } | ||
559 | 438 | ||
560 | ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq; | 439 | ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq; |
561 | if (after48(ackno, hctx->ccid2hctx_high_ack)) | 440 | if (after48(ackno, hctx->high_ack)) |
562 | hctx->ccid2hctx_high_ack = ackno; | 441 | hctx->high_ack = ackno; |
563 | 442 | ||
564 | seqp = hctx->ccid2hctx_seqt; | 443 | seqp = hctx->seqt; |
565 | while (before48(seqp->ccid2s_seq, ackno)) { | 444 | while (before48(seqp->ccid2s_seq, ackno)) { |
566 | seqp = seqp->ccid2s_next; | 445 | seqp = seqp->ccid2s_next; |
567 | if (seqp == hctx->ccid2hctx_seqh) { | 446 | if (seqp == hctx->seqh) { |
568 | seqp = hctx->ccid2hctx_seqh->ccid2s_prev; | 447 | seqp = hctx->seqh->ccid2s_prev; |
569 | break; | 448 | break; |
570 | } | 449 | } |
571 | } | 450 | } |
@@ -575,26 +454,26 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
575 | * packets per acknowledgement. Rounding up avoids that cwnd is not | 454 | * packets per acknowledgement. Rounding up avoids that cwnd is not |
576 | * advanced when Ack Ratio is 1 and gives a slight edge otherwise. | 455 | * advanced when Ack Ratio is 1 and gives a slight edge otherwise. |
577 | */ | 456 | */ |
578 | if (hctx->ccid2hctx_cwnd < hctx->ccid2hctx_ssthresh) | 457 | if (hctx->cwnd < hctx->ssthresh) |
579 | maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2); | 458 | maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2); |
580 | 459 | ||
581 | /* go through all ack vectors */ | 460 | /* go through all ack vectors */ |
582 | while ((offset = ccid2_ackvector(sk, skb, offset, | 461 | list_for_each_entry(avp, &hctx->av_chunks, node) { |
583 | &vector, &veclen)) != -1) { | ||
584 | /* go through this ack vector */ | 462 | /* go through this ack vector */ |
585 | while (veclen--) { | 463 | for (; avp->len--; avp->vec++) { |
586 | const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK; | 464 | u64 ackno_end_rl = SUB48(ackno, |
587 | u64 ackno_end_rl = SUB48(ackno, rl); | 465 | dccp_ackvec_runlen(avp->vec)); |
588 | 466 | ||
589 | ccid2_pr_debug("ackvec start:%llu end:%llu\n", | 467 | ccid2_pr_debug("ackvec %llu |%u,%u|\n", |
590 | (unsigned long long)ackno, | 468 | (unsigned long long)ackno, |
591 | (unsigned long long)ackno_end_rl); | 469 | dccp_ackvec_state(avp->vec) >> 6, |
470 | dccp_ackvec_runlen(avp->vec)); | ||
592 | /* if the seqno we are analyzing is larger than the | 471 | /* if the seqno we are analyzing is larger than the |
593 | * current ackno, then move towards the tail of our | 472 | * current ackno, then move towards the tail of our |
594 | * seqnos. | 473 | * seqnos. |
595 | */ | 474 | */ |
596 | while (after48(seqp->ccid2s_seq, ackno)) { | 475 | while (after48(seqp->ccid2s_seq, ackno)) { |
597 | if (seqp == hctx->ccid2hctx_seqt) { | 476 | if (seqp == hctx->seqt) { |
598 | done = 1; | 477 | done = 1; |
599 | break; | 478 | break; |
600 | } | 479 | } |
@@ -607,26 +486,24 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
607 | * run length | 486 | * run length |
608 | */ | 487 | */ |
609 | while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) { | 488 | while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) { |
610 | const u8 state = *vector & | 489 | const u8 state = dccp_ackvec_state(avp->vec); |
611 | DCCP_ACKVEC_STATE_MASK; | ||
612 | 490 | ||
613 | /* new packet received or marked */ | 491 | /* new packet received or marked */ |
614 | if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED && | 492 | if (state != DCCPAV_NOT_RECEIVED && |
615 | !seqp->ccid2s_acked) { | 493 | !seqp->ccid2s_acked) { |
616 | if (state == | 494 | if (state == DCCPAV_ECN_MARKED) |
617 | DCCP_ACKVEC_STATE_ECN_MARKED) { | ||
618 | ccid2_congestion_event(sk, | 495 | ccid2_congestion_event(sk, |
619 | seqp); | 496 | seqp); |
620 | } else | 497 | else |
621 | ccid2_new_ack(sk, seqp, | 498 | ccid2_new_ack(sk, seqp, |
622 | &maxincr); | 499 | &maxincr); |
623 | 500 | ||
624 | seqp->ccid2s_acked = 1; | 501 | seqp->ccid2s_acked = 1; |
625 | ccid2_pr_debug("Got ack for %llu\n", | 502 | ccid2_pr_debug("Got ack for %llu\n", |
626 | (unsigned long long)seqp->ccid2s_seq); | 503 | (unsigned long long)seqp->ccid2s_seq); |
627 | ccid2_hc_tx_dec_pipe(sk); | 504 | hctx->pipe--; |
628 | } | 505 | } |
629 | if (seqp == hctx->ccid2hctx_seqt) { | 506 | if (seqp == hctx->seqt) { |
630 | done = 1; | 507 | done = 1; |
631 | break; | 508 | break; |
632 | } | 509 | } |
@@ -636,7 +513,6 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
636 | break; | 513 | break; |
637 | 514 | ||
638 | ackno = SUB48(ackno_end_rl, 1); | 515 | ackno = SUB48(ackno_end_rl, 1); |
639 | vector++; | ||
640 | } | 516 | } |
641 | if (done) | 517 | if (done) |
642 | break; | 518 | break; |
@@ -645,11 +521,11 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
645 | /* The state about what is acked should be correct now | 521 | /* The state about what is acked should be correct now |
646 | * Check for NUMDUPACK | 522 | * Check for NUMDUPACK |
647 | */ | 523 | */ |
648 | seqp = hctx->ccid2hctx_seqt; | 524 | seqp = hctx->seqt; |
649 | while (before48(seqp->ccid2s_seq, hctx->ccid2hctx_high_ack)) { | 525 | while (before48(seqp->ccid2s_seq, hctx->high_ack)) { |
650 | seqp = seqp->ccid2s_next; | 526 | seqp = seqp->ccid2s_next; |
651 | if (seqp == hctx->ccid2hctx_seqh) { | 527 | if (seqp == hctx->seqh) { |
652 | seqp = hctx->ccid2hctx_seqh->ccid2s_prev; | 528 | seqp = hctx->seqh->ccid2s_prev; |
653 | break; | 529 | break; |
654 | } | 530 | } |
655 | } | 531 | } |
@@ -660,7 +536,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
660 | if (done == NUMDUPACK) | 536 | if (done == NUMDUPACK) |
661 | break; | 537 | break; |
662 | } | 538 | } |
663 | if (seqp == hctx->ccid2hctx_seqt) | 539 | if (seqp == hctx->seqt) |
664 | break; | 540 | break; |
665 | seqp = seqp->ccid2s_prev; | 541 | seqp = seqp->ccid2s_prev; |
666 | } | 542 | } |
@@ -681,25 +557,34 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
681 | * one ack vector. | 557 | * one ack vector. |
682 | */ | 558 | */ |
683 | ccid2_congestion_event(sk, seqp); | 559 | ccid2_congestion_event(sk, seqp); |
684 | ccid2_hc_tx_dec_pipe(sk); | 560 | hctx->pipe--; |
685 | } | 561 | } |
686 | if (seqp == hctx->ccid2hctx_seqt) | 562 | if (seqp == hctx->seqt) |
687 | break; | 563 | break; |
688 | seqp = seqp->ccid2s_prev; | 564 | seqp = seqp->ccid2s_prev; |
689 | } | 565 | } |
690 | 566 | ||
691 | hctx->ccid2hctx_seqt = last_acked; | 567 | hctx->seqt = last_acked; |
692 | } | 568 | } |
693 | 569 | ||
694 | /* trim acked packets in tail */ | 570 | /* trim acked packets in tail */ |
695 | while (hctx->ccid2hctx_seqt != hctx->ccid2hctx_seqh) { | 571 | while (hctx->seqt != hctx->seqh) { |
696 | if (!hctx->ccid2hctx_seqt->ccid2s_acked) | 572 | if (!hctx->seqt->ccid2s_acked) |
697 | break; | 573 | break; |
698 | 574 | ||
699 | hctx->ccid2hctx_seqt = hctx->ccid2hctx_seqt->ccid2s_next; | 575 | hctx->seqt = hctx->seqt->ccid2s_next; |
700 | } | 576 | } |
701 | 577 | ||
702 | ccid2_hc_tx_check_sanity(hctx); | 578 | /* restart RTO timer if not all outstanding data has been acked */ |
579 | if (hctx->pipe == 0) | ||
580 | sk_stop_timer(sk, &hctx->rtotimer); | ||
581 | else | ||
582 | sk_reset_timer(sk, &hctx->rtotimer, jiffies + hctx->rto); | ||
583 | done: | ||
584 | /* check if incoming Acks allow pending packets to be sent */ | ||
585 | if (sender_was_blocked && !ccid2_cwnd_network_limited(hctx)) | ||
586 | tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); | ||
587 | dccp_ackvec_parsed_cleanup(&hctx->av_chunks); | ||
703 | } | 588 | } |
704 | 589 | ||
705 | static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) | 590 | static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) |
@@ -709,17 +594,13 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) | |||
709 | u32 max_ratio; | 594 | u32 max_ratio; |
710 | 595 | ||
711 | /* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */ | 596 | /* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */ |
712 | hctx->ccid2hctx_ssthresh = ~0U; | 597 | hctx->ssthresh = ~0U; |
713 | 598 | ||
714 | /* | 599 | /* Use larger initial windows (RFC 3390, rfc2581bis) */ |
715 | * RFC 4341, 5: "The cwnd parameter is initialized to at most four | 600 | hctx->cwnd = rfc3390_bytes_to_packets(dp->dccps_mss_cache); |
716 | * packets for new connections, following the rules from [RFC3390]". | ||
717 | * We need to convert the bytes of RFC3390 into the packets of RFC 4341. | ||
718 | */ | ||
719 | hctx->ccid2hctx_cwnd = clamp(4380U / dp->dccps_mss_cache, 2U, 4U); | ||
720 | 601 | ||
721 | /* Make sure that Ack Ratio is enabled and within bounds. */ | 602 | /* Make sure that Ack Ratio is enabled and within bounds. */ |
722 | max_ratio = DIV_ROUND_UP(hctx->ccid2hctx_cwnd, 2); | 603 | max_ratio = DIV_ROUND_UP(hctx->cwnd, 2); |
723 | if (dp->dccps_l_ack_ratio == 0 || dp->dccps_l_ack_ratio > max_ratio) | 604 | if (dp->dccps_l_ack_ratio == 0 || dp->dccps_l_ack_ratio > max_ratio) |
724 | dp->dccps_l_ack_ratio = max_ratio; | 605 | dp->dccps_l_ack_ratio = max_ratio; |
725 | 606 | ||
@@ -727,15 +608,11 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) | |||
727 | if (ccid2_hc_tx_alloc_seq(hctx)) | 608 | if (ccid2_hc_tx_alloc_seq(hctx)) |
728 | return -ENOMEM; | 609 | return -ENOMEM; |
729 | 610 | ||
730 | hctx->ccid2hctx_rto = 3 * HZ; | 611 | hctx->rto = DCCP_TIMEOUT_INIT; |
731 | ccid2_change_srtt(hctx, -1); | 612 | hctx->rpdupack = -1; |
732 | hctx->ccid2hctx_rttvar = -1; | 613 | hctx->last_cong = jiffies; |
733 | hctx->ccid2hctx_rpdupack = -1; | 614 | setup_timer(&hctx->rtotimer, ccid2_hc_tx_rto_expire, (unsigned long)sk); |
734 | hctx->ccid2hctx_last_cong = jiffies; | 615 | INIT_LIST_HEAD(&hctx->av_chunks); |
735 | setup_timer(&hctx->ccid2hctx_rtotimer, ccid2_hc_tx_rto_expire, | ||
736 | (unsigned long)sk); | ||
737 | |||
738 | ccid2_hc_tx_check_sanity(hctx); | ||
739 | return 0; | 616 | return 0; |
740 | } | 617 | } |
741 | 618 | ||
@@ -744,11 +621,11 @@ static void ccid2_hc_tx_exit(struct sock *sk) | |||
744 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 621 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); |
745 | int i; | 622 | int i; |
746 | 623 | ||
747 | ccid2_hc_tx_kill_rto_timer(sk); | 624 | sk_stop_timer(sk, &hctx->rtotimer); |
748 | 625 | ||
749 | for (i = 0; i < hctx->ccid2hctx_seqbufc; i++) | 626 | for (i = 0; i < hctx->seqbufc; i++) |
750 | kfree(hctx->ccid2hctx_seqbuf[i]); | 627 | kfree(hctx->seqbuf[i]); |
751 | hctx->ccid2hctx_seqbufc = 0; | 628 | hctx->seqbufc = 0; |
752 | } | 629 | } |
753 | 630 | ||
754 | static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | 631 | static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) |
@@ -759,27 +636,28 @@ static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
759 | switch (DCCP_SKB_CB(skb)->dccpd_type) { | 636 | switch (DCCP_SKB_CB(skb)->dccpd_type) { |
760 | case DCCP_PKT_DATA: | 637 | case DCCP_PKT_DATA: |
761 | case DCCP_PKT_DATAACK: | 638 | case DCCP_PKT_DATAACK: |
762 | hcrx->ccid2hcrx_data++; | 639 | hcrx->data++; |
763 | if (hcrx->ccid2hcrx_data >= dp->dccps_r_ack_ratio) { | 640 | if (hcrx->data >= dp->dccps_r_ack_ratio) { |
764 | dccp_send_ack(sk); | 641 | dccp_send_ack(sk); |
765 | hcrx->ccid2hcrx_data = 0; | 642 | hcrx->data = 0; |
766 | } | 643 | } |
767 | break; | 644 | break; |
768 | } | 645 | } |
769 | } | 646 | } |
770 | 647 | ||
771 | static struct ccid_operations ccid2 = { | 648 | static struct ccid_operations ccid2 = { |
772 | .ccid_id = DCCPC_CCID2, | 649 | .ccid_id = DCCPC_CCID2, |
773 | .ccid_name = "TCP-like", | 650 | .ccid_name = "TCP-like", |
774 | .ccid_owner = THIS_MODULE, | 651 | .ccid_owner = THIS_MODULE, |
775 | .ccid_hc_tx_obj_size = sizeof(struct ccid2_hc_tx_sock), | 652 | .ccid_hc_tx_obj_size = sizeof(struct ccid2_hc_tx_sock), |
776 | .ccid_hc_tx_init = ccid2_hc_tx_init, | 653 | .ccid_hc_tx_init = ccid2_hc_tx_init, |
777 | .ccid_hc_tx_exit = ccid2_hc_tx_exit, | 654 | .ccid_hc_tx_exit = ccid2_hc_tx_exit, |
778 | .ccid_hc_tx_send_packet = ccid2_hc_tx_send_packet, | 655 | .ccid_hc_tx_send_packet = ccid2_hc_tx_send_packet, |
779 | .ccid_hc_tx_packet_sent = ccid2_hc_tx_packet_sent, | 656 | .ccid_hc_tx_packet_sent = ccid2_hc_tx_packet_sent, |
780 | .ccid_hc_tx_packet_recv = ccid2_hc_tx_packet_recv, | 657 | .ccid_hc_tx_parse_options = ccid2_hc_tx_parse_options, |
781 | .ccid_hc_rx_obj_size = sizeof(struct ccid2_hc_rx_sock), | 658 | .ccid_hc_tx_packet_recv = ccid2_hc_tx_packet_recv, |
782 | .ccid_hc_rx_packet_recv = ccid2_hc_rx_packet_recv, | 659 | .ccid_hc_rx_obj_size = sizeof(struct ccid2_hc_rx_sock), |
660 | .ccid_hc_rx_packet_recv = ccid2_hc_rx_packet_recv, | ||
783 | }; | 661 | }; |
784 | 662 | ||
785 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG | 663 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG |