diff options
author | Ilya Lesokhin <ilyal@mellanox.com> | 2018-02-12 05:57:04 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-02-12 11:41:42 -0500 |
commit | 808cf9e38cd7923036a99f459ccc8cf2955e47af (patch) | |
tree | e2ce55eb21695ac6f43e87bc4b946a318d3f07a1 /net/ipv4/tcp_output.c | |
parent | fb23403536eabe81ee90d32cb3051030b871d988 (diff) |
tcp: Honor the eor bit in tcp_mtu_probe
Avoid SKB coalescing if eor bit is set in one of the relevant
SKBs.
Fixes: c134ecb87817 ("tcp: Make use of MSG_EOR in tcp_sendmsg")
Signed-off-by: Ilya Lesokhin <ilyal@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r-- | net/ipv4/tcp_output.c | 25 |
1 files changed, 25 insertions, 0 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e9f985e42405..b2bca373f8be 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2027,6 +2027,24 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk) | |||
2027 | } | 2027 | } |
2028 | } | 2028 | } |
2029 | 2029 | ||
2030 | static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len) | ||
2031 | { | ||
2032 | struct sk_buff *skb, *next; | ||
2033 | |||
2034 | skb = tcp_send_head(sk); | ||
2035 | tcp_for_write_queue_from_safe(skb, next, sk) { | ||
2036 | if (len <= skb->len) | ||
2037 | break; | ||
2038 | |||
2039 | if (unlikely(TCP_SKB_CB(skb)->eor)) | ||
2040 | return false; | ||
2041 | |||
2042 | len -= skb->len; | ||
2043 | } | ||
2044 | |||
2045 | return true; | ||
2046 | } | ||
2047 | |||
2030 | /* Create a new MTU probe if we are ready. | 2048 | /* Create a new MTU probe if we are ready. |
2031 | * MTU probe is regularly attempting to increase the path MTU by | 2049 | * MTU probe is regularly attempting to increase the path MTU by |
2032 | * deliberately sending larger packets. This discovers routing | 2050 | * deliberately sending larger packets. This discovers routing |
@@ -2099,6 +2117,9 @@ static int tcp_mtu_probe(struct sock *sk) | |||
2099 | return 0; | 2117 | return 0; |
2100 | } | 2118 | } |
2101 | 2119 | ||
2120 | if (!tcp_can_coalesce_send_queue_head(sk, probe_size)) | ||
2121 | return -1; | ||
2122 | |||
2102 | /* We're allowed to probe. Build it now. */ | 2123 | /* We're allowed to probe. Build it now. */ |
2103 | nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false); | 2124 | nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false); |
2104 | if (!nskb) | 2125 | if (!nskb) |
@@ -2134,6 +2155,10 @@ static int tcp_mtu_probe(struct sock *sk) | |||
2134 | /* We've eaten all the data from this skb. | 2155 | /* We've eaten all the data from this skb. |
2135 | * Throw it away. */ | 2156 | * Throw it away. */ |
2136 | TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; | 2157 | TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; |
2158 | /* If this is the last SKB we copy and eor is set | ||
2159 | * we need to propagate it to the new skb. | ||
2160 | */ | ||
2161 | TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor; | ||
2137 | tcp_unlink_write_queue(skb, sk); | 2162 | tcp_unlink_write_queue(skb, sk); |
2138 | sk_wmem_free_skb(sk, skb); | 2163 | sk_wmem_free_skb(sk, skb); |
2139 | } else { | 2164 | } else { |