aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2005-05-23 15:03:06 -0400
committerDavid S. Miller <davem@davemloft.net>2005-05-23 15:03:06 -0400
commit314324121f9b94b2ca657a494cf2b9cb0e4a28cc (patch)
treedade029a8df8b249d14282d8f8023a0de0f6c1e7 /net/ipv4
parente16fa6b9d2ad9467cf5bdf517e6b6f45e5867ad6 (diff)
[TCP]: Fix stretch ACK performance killer when doing ucopy.
When we are doing ucopy, we try to defer the ACK generation to cleanup_rbuf(). This works most of the time very well, but if the ucopy prequeue is large, this ACKing behavior kills performance. With TSO, it is possible to fill the prequeue so large that by the time the ACK is sent and gets back to the sender, most of the window has emptied of data and performance suffers significantly. This behavior does help in some cases, so we should think about re-enabling this trick in the future, using some kind of limit in order to avoid the bug case. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp_input.c11
1 files changed, 1 insertions, 10 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 79835a67a274..5bad504630a3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4355,16 +4355,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4355 goto no_ack; 4355 goto no_ack;
4356 } 4356 }
4357 4357
4358 if (eaten) { 4358 __tcp_ack_snd_check(sk, 0);
4359 if (tcp_in_quickack_mode(tp)) {
4360 tcp_send_ack(sk);
4361 } else {
4362 tcp_send_delayed_ack(sk);
4363 }
4364 } else {
4365 __tcp_ack_snd_check(sk, 0);
4366 }
4367
4368no_ack: 4359no_ack:
4369 if (eaten) 4360 if (eaten)
4370 __kfree_skb(skb); 4361 __kfree_skb(skb);