diff options
-rw-r--r-- | include/net/sock.h | 2 | ||||
-rw-r--r-- | net/core/sock.c | 1 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 4 | ||||
-rw-r--r-- | net/ipv4/tcp_cong.c | 3 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 21 |
5 files changed, 20 insertions, 11 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index b3730239bf18..72132aef53fc 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -218,6 +218,7 @@ struct cg_proto; | |||
218 | * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK) | 218 | * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK) |
219 | * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) | 219 | * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) |
220 | * @sk_gso_max_size: Maximum GSO segment size to build | 220 | * @sk_gso_max_size: Maximum GSO segment size to build |
221 | * @sk_gso_max_segs: Maximum number of GSO segments | ||
221 | * @sk_lingertime: %SO_LINGER l_linger setting | 222 | * @sk_lingertime: %SO_LINGER l_linger setting |
222 | * @sk_backlog: always used with the per-socket spinlock held | 223 | * @sk_backlog: always used with the per-socket spinlock held |
223 | * @sk_callback_lock: used with the callbacks in the end of this struct | 224 | * @sk_callback_lock: used with the callbacks in the end of this struct |
@@ -338,6 +339,7 @@ struct sock { | |||
338 | netdev_features_t sk_route_nocaps; | 339 | netdev_features_t sk_route_nocaps; |
339 | int sk_gso_type; | 340 | int sk_gso_type; |
340 | unsigned int sk_gso_max_size; | 341 | unsigned int sk_gso_max_size; |
342 | u16 sk_gso_max_segs; | ||
341 | int sk_rcvlowat; | 343 | int sk_rcvlowat; |
342 | unsigned long sk_lingertime; | 344 | unsigned long sk_lingertime; |
343 | struct sk_buff_head sk_error_queue; | 345 | struct sk_buff_head sk_error_queue; |
diff --git a/net/core/sock.c b/net/core/sock.c index 6b654b3ddfda..8f67ced8d6a8 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -1458,6 +1458,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) | |||
1458 | } else { | 1458 | } else { |
1459 | sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; | 1459 | sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; |
1460 | sk->sk_gso_max_size = dst->dev->gso_max_size; | 1460 | sk->sk_gso_max_size = dst->dev->gso_max_size; |
1461 | sk->sk_gso_max_segs = dst->dev->gso_max_segs; | ||
1461 | } | 1462 | } |
1462 | } | 1463 | } |
1463 | } | 1464 | } |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index e7e6eeae49c0..2109ff4a1daf 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -811,7 +811,9 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, | |||
811 | old_size_goal + mss_now > xmit_size_goal)) { | 811 | old_size_goal + mss_now > xmit_size_goal)) { |
812 | xmit_size_goal = old_size_goal; | 812 | xmit_size_goal = old_size_goal; |
813 | } else { | 813 | } else { |
814 | tp->xmit_size_goal_segs = xmit_size_goal / mss_now; | 814 | tp->xmit_size_goal_segs = |
815 | min_t(u16, xmit_size_goal / mss_now, | ||
816 | sk->sk_gso_max_segs); | ||
815 | xmit_size_goal = tp->xmit_size_goal_segs * mss_now; | 817 | xmit_size_goal = tp->xmit_size_goal_segs * mss_now; |
816 | } | 818 | } |
817 | } | 819 | } |
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 4d4db16e336e..1432cdb0644c 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c | |||
@@ -291,7 +291,8 @@ bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) | |||
291 | left = tp->snd_cwnd - in_flight; | 291 | left = tp->snd_cwnd - in_flight; |
292 | if (sk_can_gso(sk) && | 292 | if (sk_can_gso(sk) && |
293 | left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && | 293 | left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && |
294 | left * tp->mss_cache < sk->sk_gso_max_size) | 294 | left * tp->mss_cache < sk->sk_gso_max_size && |
295 | left < sk->sk_gso_max_segs) | ||
295 | return true; | 296 | return true; |
296 | return left <= tcp_max_tso_deferred_mss(tp); | 297 | return left <= tcp_max_tso_deferred_mss(tp); |
297 | } | 298 | } |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 3f1bcff0b10b..a7b3ec9b6c3e 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -1522,21 +1522,21 @@ static void tcp_cwnd_validate(struct sock *sk) | |||
1522 | * when we would be allowed to send the split-due-to-Nagle skb fully. | 1522 | * when we would be allowed to send the split-due-to-Nagle skb fully. |
1523 | */ | 1523 | */ |
1524 | static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, | 1524 | static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, |
1525 | unsigned int mss_now, unsigned int cwnd) | 1525 | unsigned int mss_now, unsigned int max_segs) |
1526 | { | 1526 | { |
1527 | const struct tcp_sock *tp = tcp_sk(sk); | 1527 | const struct tcp_sock *tp = tcp_sk(sk); |
1528 | u32 needed, window, cwnd_len; | 1528 | u32 needed, window, max_len; |
1529 | 1529 | ||
1530 | window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; | 1530 | window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; |
1531 | cwnd_len = mss_now * cwnd; | 1531 | max_len = mss_now * max_segs; |
1532 | 1532 | ||
1533 | if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk))) | 1533 | if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) |
1534 | return cwnd_len; | 1534 | return max_len; |
1535 | 1535 | ||
1536 | needed = min(skb->len, window); | 1536 | needed = min(skb->len, window); |
1537 | 1537 | ||
1538 | if (cwnd_len <= needed) | 1538 | if (max_len <= needed) |
1539 | return cwnd_len; | 1539 | return max_len; |
1540 | 1540 | ||
1541 | return needed - needed % mss_now; | 1541 | return needed - needed % mss_now; |
1542 | } | 1542 | } |
@@ -1765,7 +1765,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) | |||
1765 | limit = min(send_win, cong_win); | 1765 | limit = min(send_win, cong_win); |
1766 | 1766 | ||
1767 | /* If a full-sized TSO skb can be sent, do it. */ | 1767 | /* If a full-sized TSO skb can be sent, do it. */ |
1768 | if (limit >= sk->sk_gso_max_size) | 1768 | if (limit >= min_t(unsigned int, sk->sk_gso_max_size, |
1769 | sk->sk_gso_max_segs * tp->mss_cache)) | ||
1769 | goto send_now; | 1770 | goto send_now; |
1770 | 1771 | ||
1771 | /* Middle in queue won't get any more data, full sendable already? */ | 1772 | /* Middle in queue won't get any more data, full sendable already? */ |
@@ -1999,7 +2000,9 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | |||
1999 | limit = mss_now; | 2000 | limit = mss_now; |
2000 | if (tso_segs > 1 && !tcp_urg_mode(tp)) | 2001 | if (tso_segs > 1 && !tcp_urg_mode(tp)) |
2001 | limit = tcp_mss_split_point(sk, skb, mss_now, | 2002 | limit = tcp_mss_split_point(sk, skb, mss_now, |
2002 | cwnd_quota); | 2003 | min_t(unsigned int, |
2004 | cwnd_quota, | ||
2005 | sk->sk_gso_max_segs)); | ||
2003 | 2006 | ||
2004 | if (skb->len > limit && | 2007 | if (skb->len > limit && |
2005 | unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) | 2008 | unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) |