aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorStephen Hemminger <stephen@networkplumber.org>2013-02-05 02:25:17 -0500
committerDavid S. Miller <davem@davemloft.net>2013-02-05 14:51:16 -0500
commitca2eb5679f8ddffff60156af42595df44a315ef0 (patch)
treee056a2cf5454039f071d47cc919bf73a9bf0c60f /net
parent547472b8e1da72ae226430c0c4273e36fc8ca768 (diff)
tcp: remove Appropriate Byte Count support
TCP Appropriate Byte Count was added by me, but later disabled. There is no point in maintaining it since it is a potential source of bugs and Linux already implements other better window protection heuristics. Signed-off-by: Stephen Hemminger <stephen@networkplumber.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/sysctl_net_ipv4.c7
-rw-r--r--net/ipv4/tcp.c1
-rw-r--r--net/ipv4/tcp_cong.c30
-rw-r--r--net/ipv4/tcp_input.c15
-rw-r--r--net/ipv4/tcp_minisocks.c1
5 files changed, 1 insertions, 53 deletions
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 2622707602d1..960fd29d9b8e 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -633,13 +633,6 @@ static struct ctl_table ipv4_table[] = {
633 .proc_handler = proc_tcp_congestion_control, 633 .proc_handler = proc_tcp_congestion_control,
634 }, 634 },
635 { 635 {
636 .procname = "tcp_abc",
637 .data = &sysctl_tcp_abc,
638 .maxlen = sizeof(int),
639 .mode = 0644,
640 .proc_handler = proc_dointvec,
641 },
642 {
643 .procname = "tcp_mtu_probing", 636 .procname = "tcp_mtu_probing",
644 .data = &sysctl_tcp_mtu_probing, 637 .data = &sysctl_tcp_mtu_probing,
645 .maxlen = sizeof(int), 638 .maxlen = sizeof(int),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 3ec1f69c5ceb..2c7e5963c2ea 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2289,7 +2289,6 @@ int tcp_disconnect(struct sock *sk, int flags)
2289 tp->packets_out = 0; 2289 tp->packets_out = 0;
2290 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 2290 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
2291 tp->snd_cwnd_cnt = 0; 2291 tp->snd_cwnd_cnt = 0;
2292 tp->bytes_acked = 0;
2293 tp->window_clamp = 0; 2292 tp->window_clamp = 0;
2294 tcp_set_ca_state(sk, TCP_CA_Open); 2293 tcp_set_ca_state(sk, TCP_CA_Open);
2295 tcp_clear_retrans(tp); 2294 tcp_clear_retrans(tp);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index cdf2e707bb10..019c2389a341 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -317,28 +317,11 @@ void tcp_slow_start(struct tcp_sock *tp)
317 snd_cwnd = 1U; 317 snd_cwnd = 1U;
318 } 318 }
319 319
320 /* RFC3465: ABC Slow start
321 * Increase only after a full MSS of bytes is acked
322 *
323 * TCP sender SHOULD increase cwnd by the number of
324 * previously unacknowledged bytes ACKed by each incoming
325 * acknowledgment, provided the increase is not more than L
326 */
327 if (sysctl_tcp_abc && tp->bytes_acked < tp->mss_cache)
328 return;
329
330 if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) 320 if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh)
331 cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */ 321 cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */
332 else 322 else
333 cnt = snd_cwnd; /* exponential increase */ 323 cnt = snd_cwnd; /* exponential increase */
334 324
335 /* RFC3465: ABC
336 * We MAY increase by 2 if discovered delayed ack
337 */
338 if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache)
339 cnt <<= 1;
340 tp->bytes_acked = 0;
341
342 tp->snd_cwnd_cnt += cnt; 325 tp->snd_cwnd_cnt += cnt;
343 while (tp->snd_cwnd_cnt >= snd_cwnd) { 326 while (tp->snd_cwnd_cnt >= snd_cwnd) {
344 tp->snd_cwnd_cnt -= snd_cwnd; 327 tp->snd_cwnd_cnt -= snd_cwnd;
@@ -378,20 +361,9 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
378 /* In "safe" area, increase. */ 361 /* In "safe" area, increase. */
379 if (tp->snd_cwnd <= tp->snd_ssthresh) 362 if (tp->snd_cwnd <= tp->snd_ssthresh)
380 tcp_slow_start(tp); 363 tcp_slow_start(tp);
381
382 /* In dangerous area, increase slowly. */ 364 /* In dangerous area, increase slowly. */
383 else if (sysctl_tcp_abc) { 365 else
384 /* RFC3465: Appropriate Byte Count
385 * increase once for each full cwnd acked
386 */
387 if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) {
388 tp->bytes_acked -= tp->snd_cwnd*tp->mss_cache;
389 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
390 tp->snd_cwnd++;
391 }
392 } else {
393 tcp_cong_avoid_ai(tp, tp->snd_cwnd); 366 tcp_cong_avoid_ai(tp, tp->snd_cwnd);
394 }
395} 367}
396EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); 368EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
397 369
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e376aa9591bc..f56bd1082f54 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -98,7 +98,6 @@ int sysctl_tcp_frto_response __read_mostly;
98int sysctl_tcp_thin_dupack __read_mostly; 98int sysctl_tcp_thin_dupack __read_mostly;
99 99
100int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; 100int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
101int sysctl_tcp_abc __read_mostly;
102int sysctl_tcp_early_retrans __read_mostly = 2; 101int sysctl_tcp_early_retrans __read_mostly = 2;
103 102
104#define FLAG_DATA 0x01 /* Incoming frame contained data. */ 103#define FLAG_DATA 0x01 /* Incoming frame contained data. */
@@ -2007,7 +2006,6 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
2007 tp->snd_cwnd_cnt = 0; 2006 tp->snd_cwnd_cnt = 0;
2008 tp->snd_cwnd_stamp = tcp_time_stamp; 2007 tp->snd_cwnd_stamp = tcp_time_stamp;
2009 tp->frto_counter = 0; 2008 tp->frto_counter = 0;
2010 tp->bytes_acked = 0;
2011 2009
2012 tp->reordering = min_t(unsigned int, tp->reordering, 2010 tp->reordering = min_t(unsigned int, tp->reordering,
2013 sysctl_tcp_reordering); 2011 sysctl_tcp_reordering);
@@ -2056,7 +2054,6 @@ void tcp_enter_loss(struct sock *sk, int how)
2056 tp->snd_cwnd_cnt = 0; 2054 tp->snd_cwnd_cnt = 0;
2057 tp->snd_cwnd_stamp = tcp_time_stamp; 2055 tp->snd_cwnd_stamp = tcp_time_stamp;
2058 2056
2059 tp->bytes_acked = 0;
2060 tcp_clear_retrans_partial(tp); 2057 tcp_clear_retrans_partial(tp);
2061 2058
2062 if (tcp_is_reno(tp)) 2059 if (tcp_is_reno(tp))
@@ -2684,7 +2681,6 @@ static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh)
2684 struct tcp_sock *tp = tcp_sk(sk); 2681 struct tcp_sock *tp = tcp_sk(sk);
2685 2682
2686 tp->high_seq = tp->snd_nxt; 2683 tp->high_seq = tp->snd_nxt;
2687 tp->bytes_acked = 0;
2688 tp->snd_cwnd_cnt = 0; 2684 tp->snd_cwnd_cnt = 0;
2689 tp->prior_cwnd = tp->snd_cwnd; 2685 tp->prior_cwnd = tp->snd_cwnd;
2690 tp->prr_delivered = 0; 2686 tp->prr_delivered = 0;
@@ -2735,7 +2731,6 @@ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
2735 struct tcp_sock *tp = tcp_sk(sk); 2731 struct tcp_sock *tp = tcp_sk(sk);
2736 2732
2737 tp->prior_ssthresh = 0; 2733 tp->prior_ssthresh = 0;
2738 tp->bytes_acked = 0;
2739 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { 2734 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
2740 tp->undo_marker = 0; 2735 tp->undo_marker = 0;
2741 tcp_init_cwnd_reduction(sk, set_ssthresh); 2736 tcp_init_cwnd_reduction(sk, set_ssthresh);
@@ -3417,7 +3412,6 @@ static void tcp_conservative_spur_to_response(struct tcp_sock *tp)
3417{ 3412{
3418 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 3413 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
3419 tp->snd_cwnd_cnt = 0; 3414 tp->snd_cwnd_cnt = 0;
3420 tp->bytes_acked = 0;
3421 TCP_ECN_queue_cwr(tp); 3415 TCP_ECN_queue_cwr(tp);
3422 tcp_moderate_cwnd(tp); 3416 tcp_moderate_cwnd(tp);
3423} 3417}
@@ -3609,15 +3603,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3609 if (after(ack, prior_snd_una)) 3603 if (after(ack, prior_snd_una))
3610 flag |= FLAG_SND_UNA_ADVANCED; 3604 flag |= FLAG_SND_UNA_ADVANCED;
3611 3605
3612 if (sysctl_tcp_abc) {
3613 if (icsk->icsk_ca_state < TCP_CA_CWR)
3614 tp->bytes_acked += ack - prior_snd_una;
3615 else if (icsk->icsk_ca_state == TCP_CA_Loss)
3616 /* we assume just one segment left network */
3617 tp->bytes_acked += min(ack - prior_snd_una,
3618 tp->mss_cache);
3619 }
3620
3621 prior_fackets = tp->fackets_out; 3606 prior_fackets = tp->fackets_out;
3622 prior_in_flight = tcp_packets_in_flight(tp); 3607 prior_in_flight = tcp_packets_in_flight(tp);
3623 3608
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index f35f2dfb6401..f0409287b5f4 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -446,7 +446,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
446 */ 446 */
447 newtp->snd_cwnd = TCP_INIT_CWND; 447 newtp->snd_cwnd = TCP_INIT_CWND;
448 newtp->snd_cwnd_cnt = 0; 448 newtp->snd_cwnd_cnt = 0;
449 newtp->bytes_acked = 0;
450 449
451 newtp->frto_counter = 0; 450 newtp->frto_counter = 0;
452 newtp->frto_highmark = 0; 451 newtp->frto_highmark = 0;