aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c34
1 files changed, 30 insertions, 4 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e9f985e42405..6818042cd8a9 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1730,7 +1730,7 @@ u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
1730 */ 1730 */
1731 segs = max_t(u32, bytes / mss_now, min_tso_segs); 1731 segs = max_t(u32, bytes / mss_now, min_tso_segs);
1732 1732
1733 return min_t(u32, segs, sk->sk_gso_max_segs); 1733 return segs;
1734} 1734}
1735EXPORT_SYMBOL(tcp_tso_autosize); 1735EXPORT_SYMBOL(tcp_tso_autosize);
1736 1736
@@ -1742,9 +1742,10 @@ static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
1742 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; 1742 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1743 u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0; 1743 u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0;
1744 1744
1745 return tso_segs ? : 1745 if (!tso_segs)
1746 tcp_tso_autosize(sk, mss_now, 1746 tso_segs = tcp_tso_autosize(sk, mss_now,
1747 sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs); 1747 sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
1748 return min_t(u32, tso_segs, sk->sk_gso_max_segs);
1748} 1749}
1749 1750
1750/* Returns the portion of skb which can be sent right away */ 1751/* Returns the portion of skb which can be sent right away */
@@ -2027,6 +2028,24 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk)
2027 } 2028 }
2028} 2029}
2029 2030
2031static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
2032{
2033 struct sk_buff *skb, *next;
2034
2035 skb = tcp_send_head(sk);
2036 tcp_for_write_queue_from_safe(skb, next, sk) {
2037 if (len <= skb->len)
2038 break;
2039
2040 if (unlikely(TCP_SKB_CB(skb)->eor))
2041 return false;
2042
2043 len -= skb->len;
2044 }
2045
2046 return true;
2047}
2048
2030/* Create a new MTU probe if we are ready. 2049/* Create a new MTU probe if we are ready.
2031 * MTU probe is regularly attempting to increase the path MTU by 2050 * MTU probe is regularly attempting to increase the path MTU by
2032 * deliberately sending larger packets. This discovers routing 2051 * deliberately sending larger packets. This discovers routing
@@ -2099,6 +2118,9 @@ static int tcp_mtu_probe(struct sock *sk)
2099 return 0; 2118 return 0;
2100 } 2119 }
2101 2120
2121 if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
2122 return -1;
2123
2102 /* We're allowed to probe. Build it now. */ 2124 /* We're allowed to probe. Build it now. */
2103 nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false); 2125 nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
2104 if (!nskb) 2126 if (!nskb)
@@ -2134,6 +2156,10 @@ static int tcp_mtu_probe(struct sock *sk)
2134 /* We've eaten all the data from this skb. 2156 /* We've eaten all the data from this skb.
2135 * Throw it away. */ 2157 * Throw it away. */
2136 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; 2158 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
2159 /* If this is the last SKB we copy and eor is set
2160 * we need to propagate it to the new skb.
2161 */
2162 TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
2137 tcp_unlink_write_queue(skb, sk); 2163 tcp_unlink_write_queue(skb, sk);
2138 sk_wmem_free_skb(sk, skb); 2164 sk_wmem_free_skb(sk, skb);
2139 } else { 2165 } else {