diff options
author | Ben Hutchings <bhutchings@solarflare.com> | 2012-07-30 12:11:42 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-08-02 03:19:17 -0400 |
commit | 1485348d2424e1131ea42efc033cbd9366462b01 (patch) | |
tree | 706d69b398cbd109fd3a9806fa485150d043cc52 /net/ipv4/tcp_output.c | |
parent | 7e6d06f0de3f74ca929441add094518ae332257c (diff) |
tcp: Apply device TSO segment limit earlier
Cache the device gso_max_segs in sock::sk_gso_max_segs and use it to
limit the size of TSO skbs. This avoids the need to fall back to
software GSO for local TCP senders.
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r-- | net/ipv4/tcp_output.c | 21 |
1 files changed, 12 insertions, 9 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 3f1bcff0b10b..a7b3ec9b6c3e 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -1522,21 +1522,21 @@ static void tcp_cwnd_validate(struct sock *sk) | |||
1522 | * when we would be allowed to send the split-due-to-Nagle skb fully. | 1522 | * when we would be allowed to send the split-due-to-Nagle skb fully. |
1523 | */ | 1523 | */ |
1524 | static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, | 1524 | static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, |
1525 | unsigned int mss_now, unsigned int cwnd) | 1525 | unsigned int mss_now, unsigned int max_segs) |
1526 | { | 1526 | { |
1527 | const struct tcp_sock *tp = tcp_sk(sk); | 1527 | const struct tcp_sock *tp = tcp_sk(sk); |
1528 | u32 needed, window, cwnd_len; | 1528 | u32 needed, window, max_len; |
1529 | 1529 | ||
1530 | window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; | 1530 | window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; |
1531 | cwnd_len = mss_now * cwnd; | 1531 | max_len = mss_now * max_segs; |
1532 | 1532 | ||
1533 | if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk))) | 1533 | if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) |
1534 | return cwnd_len; | 1534 | return max_len; |
1535 | 1535 | ||
1536 | needed = min(skb->len, window); | 1536 | needed = min(skb->len, window); |
1537 | 1537 | ||
1538 | if (cwnd_len <= needed) | 1538 | if (max_len <= needed) |
1539 | return cwnd_len; | 1539 | return max_len; |
1540 | 1540 | ||
1541 | return needed - needed % mss_now; | 1541 | return needed - needed % mss_now; |
1542 | } | 1542 | } |
@@ -1765,7 +1765,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) | |||
1765 | limit = min(send_win, cong_win); | 1765 | limit = min(send_win, cong_win); |
1766 | 1766 | ||
1767 | /* If a full-sized TSO skb can be sent, do it. */ | 1767 | /* If a full-sized TSO skb can be sent, do it. */ |
1768 | if (limit >= sk->sk_gso_max_size) | 1768 | if (limit >= min_t(unsigned int, sk->sk_gso_max_size, |
1769 | sk->sk_gso_max_segs * tp->mss_cache)) | ||
1769 | goto send_now; | 1770 | goto send_now; |
1770 | 1771 | ||
1771 | /* Middle in queue won't get any more data, full sendable already? */ | 1772 | /* Middle in queue won't get any more data, full sendable already? */ |
@@ -1999,7 +2000,9 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | |||
1999 | limit = mss_now; | 2000 | limit = mss_now; |
2000 | if (tso_segs > 1 && !tcp_urg_mode(tp)) | 2001 | if (tso_segs > 1 && !tcp_urg_mode(tp)) |
2001 | limit = tcp_mss_split_point(sk, skb, mss_now, | 2002 | limit = tcp_mss_split_point(sk, skb, mss_now, |
2002 | cwnd_quota); | 2003 | min_t(unsigned int, |
2004 | cwnd_quota, | ||
2005 | sk->sk_gso_max_segs)); | ||
2003 | 2006 | ||
2004 | if (skb->len > limit && | 2007 | if (skb->len > limit && |
2005 | unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) | 2008 | unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) |