diff options
author | Dmitry Yusupov <dima@neterion.com> | 2005-08-23 13:09:27 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-08-23 13:09:27 -0400 |
commit | 14869c388673e8db3348ab3706fa6485d0f0cf95 (patch) | |
tree | fa5de8895166ae31371264544027941d469044f9 /net/ipv4 | |
parent | f6fdd7d9c273bb2a20ab467cb57067494f932fa3 (diff) |
[TCP]: Do TSO deferral even if tail SKB can go out now.
If the tail SKB fits into the window, it is still
benefitical to defer until the goal percentage of
the window is available. This give the application
time to feed more data into the send queue and thus
results in larger TSO frames going out.
Patch from Dmitry Yusupov <dima@neterion.com>.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/tcp_output.c | 4 |
1 files changed, 0 insertions, 4 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 566045e58437..dd30dd137b74 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -925,10 +925,6 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_ | |||
925 | 925 | ||
926 | limit = min(send_win, cong_win); | 926 | limit = min(send_win, cong_win); |
927 | 927 | ||
928 | /* If sk_send_head can be sent fully now, just do it. */ | ||
929 | if (skb->len <= limit) | ||
930 | return 0; | ||
931 | |||
932 | if (sysctl_tcp_tso_win_divisor) { | 928 | if (sysctl_tcp_tso_win_divisor) { |
933 | u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); | 929 | u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); |
934 | 930 | ||