aboutsummaryrefslogtreecommitdiffstats
path: root/net/dccp/ccids/ccid3.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/dccp/ccids/ccid3.c')
-rw-r--r--net/dccp/ccids/ccid3.c84
1 files changed, 40 insertions, 44 deletions
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 8d33a09608e9..35123c19a08f 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -133,12 +133,23 @@ static void ccid3_hc_tx_update_x(struct sock *sk, struct timeval *now)
133 133
134{ 134{
135 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); 135 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
136 __u64 min_rate = 2 * hctx->ccid3hctx_x_recv;
136 const __u64 old_x = hctx->ccid3hctx_x; 137 const __u64 old_x = hctx->ccid3hctx_x;
137 138
139 /*
140 * Handle IDLE periods: do not reduce below RFC3390 initial sending rate
141 * when idling [RFC 4342, 5.1]. See also draft-ietf-dccp-rfc3448bis.
142 * For consistency with X and X_recv, min_rate is also scaled by 2^6.
143 */
144 if (unlikely(hctx->ccid3hctx_idle)) {
145 min_rate = rfc3390_initial_rate(sk);
146 min_rate = max(min_rate, 2 * hctx->ccid3hctx_x_recv);
147 }
148
138 if (hctx->ccid3hctx_p > 0) { 149 if (hctx->ccid3hctx_p > 0) {
139 150
140 hctx->ccid3hctx_x = min(((__u64)hctx->ccid3hctx_x_calc) << 6, 151 hctx->ccid3hctx_x = min(((__u64)hctx->ccid3hctx_x_calc) << 6,
141 hctx->ccid3hctx_x_recv * 2); 152 min_rate);
142 hctx->ccid3hctx_x = max(hctx->ccid3hctx_x, 153 hctx->ccid3hctx_x = max(hctx->ccid3hctx_x,
143 (((__u64)hctx->ccid3hctx_s) << 6) / 154 (((__u64)hctx->ccid3hctx_s) << 6) /
144 TFRC_T_MBI); 155 TFRC_T_MBI);
@@ -147,7 +158,7 @@ static void ccid3_hc_tx_update_x(struct sock *sk, struct timeval *now)
147 (suseconds_t)hctx->ccid3hctx_rtt >= 0) { 158 (suseconds_t)hctx->ccid3hctx_rtt >= 0) {
148 159
149 hctx->ccid3hctx_x = 160 hctx->ccid3hctx_x =
150 max(2 * min(hctx->ccid3hctx_x, hctx->ccid3hctx_x_recv), 161 max(min(2 * hctx->ccid3hctx_x, min_rate),
151 scaled_div(((__u64)hctx->ccid3hctx_s) << 6, 162 scaled_div(((__u64)hctx->ccid3hctx_s) << 6,
152 hctx->ccid3hctx_rtt)); 163 hctx->ccid3hctx_rtt));
153 hctx->ccid3hctx_t_ld = *now; 164 hctx->ccid3hctx_t_ld = *now;
@@ -209,6 +220,7 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
209{ 220{
210 struct sock *sk = (struct sock *)data; 221 struct sock *sk = (struct sock *)data;
211 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); 222 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
223 struct timeval now;
212 unsigned long t_nfb = USEC_PER_SEC / 5; 224 unsigned long t_nfb = USEC_PER_SEC / 5;
213 225
214 bh_lock_sock(sk); 226 bh_lock_sock(sk);
@@ -221,6 +233,8 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
221 ccid3_pr_debug("%s(%p, state=%s) - entry \n", dccp_role(sk), sk, 233 ccid3_pr_debug("%s(%p, state=%s) - entry \n", dccp_role(sk), sk,
222 ccid3_tx_state_name(hctx->ccid3hctx_state)); 234 ccid3_tx_state_name(hctx->ccid3hctx_state));
223 235
236 hctx->ccid3hctx_idle = 1;
237
224 switch (hctx->ccid3hctx_state) { 238 switch (hctx->ccid3hctx_state) {
225 case TFRC_SSTATE_NO_FBACK: 239 case TFRC_SSTATE_NO_FBACK:
226 /* RFC 3448, 4.4: Halve send rate directly */ 240 /* RFC 3448, 4.4: Halve send rate directly */
@@ -239,49 +253,33 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
239 break; 253 break;
240 case TFRC_SSTATE_FBACK: 254 case TFRC_SSTATE_FBACK:
241 /* 255 /*
242 * Check if IDLE since last timeout and recv rate is less than 256 * Modify the cached value of X_recv [RFC 3448, 4.4]
243 * 4 packets (in units of 64*bytes/sec) per RTT 257 *
258 * If (p == 0 || X_calc > 2 * X_recv)
259 * X_recv = max(X_recv / 2, s / (2 * t_mbi));
260 * Else
261 * X_recv = X_calc / 4;
262 *
263 * Note that X_recv is scaled by 2^6 while X_calc is not
244 */ 264 */
245 if (!hctx->ccid3hctx_idle || 265 BUG_ON(hctx->ccid3hctx_p && !hctx->ccid3hctx_x_calc);
246 (hctx->ccid3hctx_x_recv >= 4 *
247 scaled_div(((__u64)hctx->ccid3hctx_s) << 6,
248 hctx->ccid3hctx_rtt))) {
249 struct timeval now;
250 266
251 ccid3_pr_debug("%s(%p, state=%s), not idle\n", 267 if (hctx->ccid3hctx_p == 0 ||
252 dccp_role(sk), sk, 268 (hctx->ccid3hctx_x_calc > (hctx->ccid3hctx_x_recv >> 5))) {
253 ccid3_tx_state_name(hctx->ccid3hctx_state));
254 269
255 /* 270 hctx->ccid3hctx_x_recv =
256 * Modify the cached value of X_recv [RFC 3448, 4.4] 271 max(hctx->ccid3hctx_x_recv / 2,
257 * 272 (((__u64)hctx->ccid3hctx_s) << 6) /
258 * If (p == 0 || X_calc > 2 * X_recv) 273 (2 * TFRC_T_MBI));
259 * X_recv = max(X_recv / 2, s / (2 * t_mbi)); 274
260 * Else 275 if (hctx->ccid3hctx_p == 0)
261 * X_recv = X_calc / 4; 276 dccp_timestamp(sk, &now);
262 * 277 } else {
263 * Note that X_recv is scaled by 2^6 while X_calc is not 278 hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc;
264 */ 279 hctx->ccid3hctx_x_recv <<= 4;
265 BUG_ON(hctx->ccid3hctx_p && !hctx->ccid3hctx_x_calc);
266
267 if (hctx->ccid3hctx_p == 0 ||
268 (hctx->ccid3hctx_x_calc >
269 (hctx->ccid3hctx_x_recv >> 5))) {
270
271 hctx->ccid3hctx_x_recv =
272 max(hctx->ccid3hctx_x_recv / 2,
273 (((__u64)hctx->ccid3hctx_s) << 6) /
274 (2 * TFRC_T_MBI));
275
276 if (hctx->ccid3hctx_p == 0)
277 dccp_timestamp(sk, &now);
278 } else {
279 hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc;
280 hctx->ccid3hctx_x_recv <<= 4;
281 }
282 /* Now recalculate X [RFC 3448, 4.3, step (4)] */
283 ccid3_hc_tx_update_x(sk, &now);
284 } 280 }
281 /* Now recalculate X [RFC 3448, 4.3, step (4)] */
282 ccid3_hc_tx_update_x(sk, &now);
285 /* 283 /*
286 * Schedule no feedback timer to expire in 284 * Schedule no feedback timer to expire in
287 * max(t_RTO, 2 * s/X) = max(t_RTO, 2 * t_ipi) 285 * max(t_RTO, 2 * s/X) = max(t_RTO, 2 * t_ipi)
@@ -296,8 +294,6 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
296 goto out; 294 goto out;
297 } 295 }
298 296
299 hctx->ccid3hctx_idle = 1;
300
301restart_timer: 297restart_timer:
302 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, 298 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
303 jiffies + usecs_to_jiffies(t_nfb)); 299 jiffies + usecs_to_jiffies(t_nfb));
@@ -377,6 +373,7 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
377 /* prepare to send now (add options etc.) */ 373 /* prepare to send now (add options etc.) */
378 dp->dccps_hc_tx_insert_options = 1; 374 dp->dccps_hc_tx_insert_options = 1;
379 DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count; 375 DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count;
376 hctx->ccid3hctx_idle = 0;
380 377
381 /* set the nominal send time for the next following packet */ 378 /* set the nominal send time for the next following packet */
382 timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi); 379 timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
@@ -407,7 +404,6 @@ static void ccid3_hc_tx_packet_sent(struct sock *sk, int more,
407 packet->dccphtx_seqno = dccp_sk(sk)->dccps_gss; 404 packet->dccphtx_seqno = dccp_sk(sk)->dccps_gss;
408 packet->dccphtx_rtt = hctx->ccid3hctx_rtt; 405 packet->dccphtx_rtt = hctx->ccid3hctx_rtt;
409 packet->dccphtx_sent = 1; 406 packet->dccphtx_sent = 1;
410 hctx->ccid3hctx_idle = 0;
411} 407}
412 408
413static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) 409static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)