aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2018-05-21 18:08:56 -0400
committerDavid S. Miller <davem@davemloft.net>2018-05-22 15:43:15 -0400
commit9a9c9b51e54618861420093ae6e9b50a961914c5 (patch)
tree880629d0a7f41e788da040cd6f9ac5b5c5cf81b9 /net/ipv4/tcp_input.c
parent290aa0ad74c995c60d94fb4f1d66d411efa13dd5 (diff)
tcp: add max_quickacks param to tcp_incr_quickack and tcp_enter_quickack_mode
We want to add finer control of the number of ACK packets sent after ECN events. This patch is not changing current behavior, it only enables following change. Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Soheil Hassas Yeganeh <soheil@google.com> Acked-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index aebb29ab2fdf..2e970e9f4e09 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -203,21 +203,23 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
203 } 203 }
204} 204}
205 205
206static void tcp_incr_quickack(struct sock *sk) 206static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
207{ 207{
208 struct inet_connection_sock *icsk = inet_csk(sk); 208 struct inet_connection_sock *icsk = inet_csk(sk);
209 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); 209 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
210 210
211 if (quickacks == 0) 211 if (quickacks == 0)
212 quickacks = 2; 212 quickacks = 2;
213 quickacks = min(quickacks, max_quickacks);
213 if (quickacks > icsk->icsk_ack.quick) 214 if (quickacks > icsk->icsk_ack.quick)
214 icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); 215 icsk->icsk_ack.quick = quickacks;
215} 216}
216 217
217static void tcp_enter_quickack_mode(struct sock *sk) 218static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
218{ 219{
219 struct inet_connection_sock *icsk = inet_csk(sk); 220 struct inet_connection_sock *icsk = inet_csk(sk);
220 tcp_incr_quickack(sk); 221
222 tcp_incr_quickack(sk, max_quickacks);
221 icsk->icsk_ack.pingpong = 0; 223 icsk->icsk_ack.pingpong = 0;
222 icsk->icsk_ack.ato = TCP_ATO_MIN; 224 icsk->icsk_ack.ato = TCP_ATO_MIN;
223} 225}
@@ -261,7 +263,7 @@ static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
261 * it is probably a retransmit. 263 * it is probably a retransmit.
262 */ 264 */
263 if (tp->ecn_flags & TCP_ECN_SEEN) 265 if (tp->ecn_flags & TCP_ECN_SEEN)
264 tcp_enter_quickack_mode((struct sock *)tp); 266 tcp_enter_quickack_mode((struct sock *)tp, TCP_MAX_QUICKACKS);
265 break; 267 break;
266 case INET_ECN_CE: 268 case INET_ECN_CE:
267 if (tcp_ca_needs_ecn((struct sock *)tp)) 269 if (tcp_ca_needs_ecn((struct sock *)tp))
@@ -269,7 +271,7 @@ static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
269 271
270 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { 272 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
271 /* Better not delay acks, sender can have a very low cwnd */ 273 /* Better not delay acks, sender can have a very low cwnd */
272 tcp_enter_quickack_mode((struct sock *)tp); 274 tcp_enter_quickack_mode((struct sock *)tp, TCP_MAX_QUICKACKS);
273 tp->ecn_flags |= TCP_ECN_DEMAND_CWR; 275 tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
274 } 276 }
275 tp->ecn_flags |= TCP_ECN_SEEN; 277 tp->ecn_flags |= TCP_ECN_SEEN;
@@ -686,7 +688,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
686 /* The _first_ data packet received, initialize 688 /* The _first_ data packet received, initialize
687 * delayed ACK engine. 689 * delayed ACK engine.
688 */ 690 */
689 tcp_incr_quickack(sk); 691 tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
690 icsk->icsk_ack.ato = TCP_ATO_MIN; 692 icsk->icsk_ack.ato = TCP_ATO_MIN;
691 } else { 693 } else {
692 int m = now - icsk->icsk_ack.lrcvtime; 694 int m = now - icsk->icsk_ack.lrcvtime;
@@ -702,7 +704,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
702 /* Too long gap. Apparently sender failed to 704 /* Too long gap. Apparently sender failed to
703 * restart window, so that we send ACKs quickly. 705 * restart window, so that we send ACKs quickly.
704 */ 706 */
705 tcp_incr_quickack(sk); 707 tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
706 sk_mem_reclaim(sk); 708 sk_mem_reclaim(sk);
707 } 709 }
708 } 710 }
@@ -4179,7 +4181,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
4179 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 4181 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
4180 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4182 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4181 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); 4183 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
4182 tcp_enter_quickack_mode(sk); 4184 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
4183 4185
4184 if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) { 4186 if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
4185 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 4187 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
@@ -4706,7 +4708,7 @@ queue_and_out:
4706 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 4708 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
4707 4709
4708out_of_window: 4710out_of_window:
4709 tcp_enter_quickack_mode(sk); 4711 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
4710 inet_csk_schedule_ack(sk); 4712 inet_csk_schedule_ack(sk);
4711drop: 4713drop:
4712 tcp_drop(sk, skb); 4714 tcp_drop(sk, skb);
@@ -5790,7 +5792,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5790 * to stand against the temptation 8) --ANK 5792 * to stand against the temptation 8) --ANK
5791 */ 5793 */
5792 inet_csk_schedule_ack(sk); 5794 inet_csk_schedule_ack(sk);
5793 tcp_enter_quickack_mode(sk); 5795 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
5794 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 5796 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
5795 TCP_DELACK_MAX, TCP_RTO_MAX); 5797 TCP_DELACK_MAX, TCP_RTO_MAX);
5796 5798