aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/ip-sysctl.txt11
-rw-r--r--include/linux/tcp.h1
-rw-r--r--include/net/tcp.h1
-rw-r--r--kernel/sysctl_binary.c1
-rw-r--r--net/ipv4/sysctl_net_ipv4.c7
-rw-r--r--net/ipv4/tcp.c1
-rw-r--r--net/ipv4/tcp_cong.c30
-rw-r--r--net/ipv4/tcp_input.c15
-rw-r--r--net/ipv4/tcp_minisocks.c1
9 files changed, 1 insertions, 67 deletions
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 19ac1802bfd4..dc2dc87d2557 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -130,17 +130,6 @@ somaxconn - INTEGER
130 Defaults to 128. See also tcp_max_syn_backlog for additional tuning 130 Defaults to 128. See also tcp_max_syn_backlog for additional tuning
131 for TCP sockets. 131 for TCP sockets.
132 132
133tcp_abc - INTEGER
134 Controls Appropriate Byte Count (ABC) defined in RFC3465.
135 ABC is a way of increasing congestion window (cwnd) more slowly
136 in response to partial acknowledgments.
137 Possible values are:
138 0 increase cwnd once per acknowledgment (no ABC)
139 1 increase cwnd once per acknowledgment of full sized segment
140 2 allow increase cwnd by two if acknowledgment is
141 of two segments to compensate for delayed acknowledgments.
142 Default: 0 (off)
143
144tcp_abort_on_overflow - BOOLEAN 133tcp_abort_on_overflow - BOOLEAN
145 If listening service is too slow to accept new connections, 134 If listening service is too slow to accept new connections,
146 reset them. Default state is FALSE. It means that if overflow 135 reset them. Default state is FALSE. It means that if overflow
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 4e1d2283e3cc..6d0d46138ae8 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -246,7 +246,6 @@ struct tcp_sock {
246 u32 sacked_out; /* SACK'd packets */ 246 u32 sacked_out; /* SACK'd packets */
247 u32 fackets_out; /* FACK'd packets */ 247 u32 fackets_out; /* FACK'd packets */
248 u32 tso_deferred; 248 u32 tso_deferred;
249 u32 bytes_acked; /* Appropriate Byte Counting - RFC3465 */
250 249
251 /* from STCP, retrans queue hinting */ 250 /* from STCP, retrans queue hinting */
252 struct sk_buff* lost_skb_hint; 251 struct sk_buff* lost_skb_hint;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 614af8b7758e..23f2e98d4b65 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -279,7 +279,6 @@ extern int sysctl_tcp_dma_copybreak;
279extern int sysctl_tcp_nometrics_save; 279extern int sysctl_tcp_nometrics_save;
280extern int sysctl_tcp_moderate_rcvbuf; 280extern int sysctl_tcp_moderate_rcvbuf;
281extern int sysctl_tcp_tso_win_divisor; 281extern int sysctl_tcp_tso_win_divisor;
282extern int sysctl_tcp_abc;
283extern int sysctl_tcp_mtu_probing; 282extern int sysctl_tcp_mtu_probing;
284extern int sysctl_tcp_base_mss; 283extern int sysctl_tcp_base_mss;
285extern int sysctl_tcp_workaround_signed_windows; 284extern int sysctl_tcp_workaround_signed_windows;
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 5a6384450501..b669ca1fa103 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -387,7 +387,6 @@ static const struct bin_table bin_net_ipv4_table[] = {
387 { CTL_INT, NET_TCP_MODERATE_RCVBUF, "tcp_moderate_rcvbuf" }, 387 { CTL_INT, NET_TCP_MODERATE_RCVBUF, "tcp_moderate_rcvbuf" },
388 { CTL_INT, NET_TCP_TSO_WIN_DIVISOR, "tcp_tso_win_divisor" }, 388 { CTL_INT, NET_TCP_TSO_WIN_DIVISOR, "tcp_tso_win_divisor" },
389 { CTL_STR, NET_TCP_CONG_CONTROL, "tcp_congestion_control" }, 389 { CTL_STR, NET_TCP_CONG_CONTROL, "tcp_congestion_control" },
390 { CTL_INT, NET_TCP_ABC, "tcp_abc" },
391 { CTL_INT, NET_TCP_MTU_PROBING, "tcp_mtu_probing" }, 390 { CTL_INT, NET_TCP_MTU_PROBING, "tcp_mtu_probing" },
392 { CTL_INT, NET_TCP_BASE_MSS, "tcp_base_mss" }, 391 { CTL_INT, NET_TCP_BASE_MSS, "tcp_base_mss" },
393 { CTL_INT, NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, "tcp_workaround_signed_windows" }, 392 { CTL_INT, NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, "tcp_workaround_signed_windows" },
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 2622707602d1..960fd29d9b8e 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -633,13 +633,6 @@ static struct ctl_table ipv4_table[] = {
633 .proc_handler = proc_tcp_congestion_control, 633 .proc_handler = proc_tcp_congestion_control,
634 }, 634 },
635 { 635 {
636 .procname = "tcp_abc",
637 .data = &sysctl_tcp_abc,
638 .maxlen = sizeof(int),
639 .mode = 0644,
640 .proc_handler = proc_dointvec,
641 },
642 {
643 .procname = "tcp_mtu_probing", 636 .procname = "tcp_mtu_probing",
644 .data = &sysctl_tcp_mtu_probing, 637 .data = &sysctl_tcp_mtu_probing,
645 .maxlen = sizeof(int), 638 .maxlen = sizeof(int),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 3ec1f69c5ceb..2c7e5963c2ea 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2289,7 +2289,6 @@ int tcp_disconnect(struct sock *sk, int flags)
2289 tp->packets_out = 0; 2289 tp->packets_out = 0;
2290 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 2290 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
2291 tp->snd_cwnd_cnt = 0; 2291 tp->snd_cwnd_cnt = 0;
2292 tp->bytes_acked = 0;
2293 tp->window_clamp = 0; 2292 tp->window_clamp = 0;
2294 tcp_set_ca_state(sk, TCP_CA_Open); 2293 tcp_set_ca_state(sk, TCP_CA_Open);
2295 tcp_clear_retrans(tp); 2294 tcp_clear_retrans(tp);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index cdf2e707bb10..019c2389a341 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -317,28 +317,11 @@ void tcp_slow_start(struct tcp_sock *tp)
317 snd_cwnd = 1U; 317 snd_cwnd = 1U;
318 } 318 }
319 319
320 /* RFC3465: ABC Slow start
321 * Increase only after a full MSS of bytes is acked
322 *
323 * TCP sender SHOULD increase cwnd by the number of
324 * previously unacknowledged bytes ACKed by each incoming
325 * acknowledgment, provided the increase is not more than L
326 */
327 if (sysctl_tcp_abc && tp->bytes_acked < tp->mss_cache)
328 return;
329
330 if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) 320 if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh)
331 cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */ 321 cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */
332 else 322 else
333 cnt = snd_cwnd; /* exponential increase */ 323 cnt = snd_cwnd; /* exponential increase */
334 324
335 /* RFC3465: ABC
336 * We MAY increase by 2 if discovered delayed ack
337 */
338 if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache)
339 cnt <<= 1;
340 tp->bytes_acked = 0;
341
342 tp->snd_cwnd_cnt += cnt; 325 tp->snd_cwnd_cnt += cnt;
343 while (tp->snd_cwnd_cnt >= snd_cwnd) { 326 while (tp->snd_cwnd_cnt >= snd_cwnd) {
344 tp->snd_cwnd_cnt -= snd_cwnd; 327 tp->snd_cwnd_cnt -= snd_cwnd;
@@ -378,20 +361,9 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
378 /* In "safe" area, increase. */ 361 /* In "safe" area, increase. */
379 if (tp->snd_cwnd <= tp->snd_ssthresh) 362 if (tp->snd_cwnd <= tp->snd_ssthresh)
380 tcp_slow_start(tp); 363 tcp_slow_start(tp);
381
382 /* In dangerous area, increase slowly. */ 364 /* In dangerous area, increase slowly. */
383 else if (sysctl_tcp_abc) { 365 else
384 /* RFC3465: Appropriate Byte Count
385 * increase once for each full cwnd acked
386 */
387 if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) {
388 tp->bytes_acked -= tp->snd_cwnd*tp->mss_cache;
389 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
390 tp->snd_cwnd++;
391 }
392 } else {
393 tcp_cong_avoid_ai(tp, tp->snd_cwnd); 366 tcp_cong_avoid_ai(tp, tp->snd_cwnd);
394 }
395} 367}
396EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); 368EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
397 369
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e376aa9591bc..f56bd1082f54 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -98,7 +98,6 @@ int sysctl_tcp_frto_response __read_mostly;
98int sysctl_tcp_thin_dupack __read_mostly; 98int sysctl_tcp_thin_dupack __read_mostly;
99 99
100int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; 100int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
101int sysctl_tcp_abc __read_mostly;
102int sysctl_tcp_early_retrans __read_mostly = 2; 101int sysctl_tcp_early_retrans __read_mostly = 2;
103 102
104#define FLAG_DATA 0x01 /* Incoming frame contained data. */ 103#define FLAG_DATA 0x01 /* Incoming frame contained data. */
@@ -2007,7 +2006,6 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
2007 tp->snd_cwnd_cnt = 0; 2006 tp->snd_cwnd_cnt = 0;
2008 tp->snd_cwnd_stamp = tcp_time_stamp; 2007 tp->snd_cwnd_stamp = tcp_time_stamp;
2009 tp->frto_counter = 0; 2008 tp->frto_counter = 0;
2010 tp->bytes_acked = 0;
2011 2009
2012 tp->reordering = min_t(unsigned int, tp->reordering, 2010 tp->reordering = min_t(unsigned int, tp->reordering,
2013 sysctl_tcp_reordering); 2011 sysctl_tcp_reordering);
@@ -2056,7 +2054,6 @@ void tcp_enter_loss(struct sock *sk, int how)
2056 tp->snd_cwnd_cnt = 0; 2054 tp->snd_cwnd_cnt = 0;
2057 tp->snd_cwnd_stamp = tcp_time_stamp; 2055 tp->snd_cwnd_stamp = tcp_time_stamp;
2058 2056
2059 tp->bytes_acked = 0;
2060 tcp_clear_retrans_partial(tp); 2057 tcp_clear_retrans_partial(tp);
2061 2058
2062 if (tcp_is_reno(tp)) 2059 if (tcp_is_reno(tp))
@@ -2684,7 +2681,6 @@ static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh)
2684 struct tcp_sock *tp = tcp_sk(sk); 2681 struct tcp_sock *tp = tcp_sk(sk);
2685 2682
2686 tp->high_seq = tp->snd_nxt; 2683 tp->high_seq = tp->snd_nxt;
2687 tp->bytes_acked = 0;
2688 tp->snd_cwnd_cnt = 0; 2684 tp->snd_cwnd_cnt = 0;
2689 tp->prior_cwnd = tp->snd_cwnd; 2685 tp->prior_cwnd = tp->snd_cwnd;
2690 tp->prr_delivered = 0; 2686 tp->prr_delivered = 0;
@@ -2735,7 +2731,6 @@ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
2735 struct tcp_sock *tp = tcp_sk(sk); 2731 struct tcp_sock *tp = tcp_sk(sk);
2736 2732
2737 tp->prior_ssthresh = 0; 2733 tp->prior_ssthresh = 0;
2738 tp->bytes_acked = 0;
2739 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { 2734 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
2740 tp->undo_marker = 0; 2735 tp->undo_marker = 0;
2741 tcp_init_cwnd_reduction(sk, set_ssthresh); 2736 tcp_init_cwnd_reduction(sk, set_ssthresh);
@@ -3417,7 +3412,6 @@ static void tcp_conservative_spur_to_response(struct tcp_sock *tp)
3417{ 3412{
3418 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 3413 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
3419 tp->snd_cwnd_cnt = 0; 3414 tp->snd_cwnd_cnt = 0;
3420 tp->bytes_acked = 0;
3421 TCP_ECN_queue_cwr(tp); 3415 TCP_ECN_queue_cwr(tp);
3422 tcp_moderate_cwnd(tp); 3416 tcp_moderate_cwnd(tp);
3423} 3417}
@@ -3609,15 +3603,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3609 if (after(ack, prior_snd_una)) 3603 if (after(ack, prior_snd_una))
3610 flag |= FLAG_SND_UNA_ADVANCED; 3604 flag |= FLAG_SND_UNA_ADVANCED;
3611 3605
3612 if (sysctl_tcp_abc) {
3613 if (icsk->icsk_ca_state < TCP_CA_CWR)
3614 tp->bytes_acked += ack - prior_snd_una;
3615 else if (icsk->icsk_ca_state == TCP_CA_Loss)
3616 /* we assume just one segment left network */
3617 tp->bytes_acked += min(ack - prior_snd_una,
3618 tp->mss_cache);
3619 }
3620
3621 prior_fackets = tp->fackets_out; 3606 prior_fackets = tp->fackets_out;
3622 prior_in_flight = tcp_packets_in_flight(tp); 3607 prior_in_flight = tcp_packets_in_flight(tp);
3623 3608
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index f35f2dfb6401..f0409287b5f4 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -446,7 +446,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
446 */ 446 */
447 newtp->snd_cwnd = TCP_INIT_CWND; 447 newtp->snd_cwnd = TCP_INIT_CWND;
448 newtp->snd_cwnd_cnt = 0; 448 newtp->snd_cwnd_cnt = 0;
449 newtp->bytes_acked = 0;
450 449
451 newtp->frto_counter = 0; 450 newtp->frto_counter = 0;
452 newtp->frto_highmark = 0; 451 newtp->frto_highmark = 0;