aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 91f5cc5a3f88..4522579aaca2 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1846,17 +1846,17 @@ static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
1846 * know that all the data is in scatter-gather pages, and that the 1846 * know that all the data is in scatter-gather pages, and that the
1847 * packet has never been sent out before (and thus is not cloned). 1847 * packet has never been sent out before (and thus is not cloned).
1848 */ 1848 */
1849static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue, 1849static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1850 struct sk_buff *skb, unsigned int len,
1851 unsigned int mss_now, gfp_t gfp) 1850 unsigned int mss_now, gfp_t gfp)
1852{ 1851{
1853 struct sk_buff *buff;
1854 int nlen = skb->len - len; 1852 int nlen = skb->len - len;
1853 struct sk_buff *buff;
1855 u8 flags; 1854 u8 flags;
1856 1855
1857 /* All of a TSO frame must be composed of paged data. */ 1856 /* All of a TSO frame must be composed of paged data. */
1858 if (skb->len != skb->data_len) 1857 if (skb->len != skb->data_len)
1859 return tcp_fragment(sk, tcp_queue, skb, len, mss_now, gfp); 1858 return tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
1859 skb, len, mss_now, gfp);
1860 1860
1861 buff = sk_stream_alloc_skb(sk, 0, gfp, true); 1861 buff = sk_stream_alloc_skb(sk, 0, gfp, true);
1862 if (unlikely(!buff)) 1862 if (unlikely(!buff))
@@ -1892,7 +1892,7 @@ static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue,
1892 1892
1893 /* Link BUFF into the send queue. */ 1893 /* Link BUFF into the send queue. */
1894 __skb_header_release(buff); 1894 __skb_header_release(buff);
1895 tcp_insert_write_queue_after(skb, buff, sk, tcp_queue); 1895 tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE);
1896 1896
1897 return 0; 1897 return 0;
1898} 1898}
@@ -2391,8 +2391,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2391 nonagle); 2391 nonagle);
2392 2392
2393 if (skb->len > limit && 2393 if (skb->len > limit &&
2394 unlikely(tso_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE, 2394 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
2395 skb, limit, mss_now, gfp)))
2396 break; 2395 break;
2397 2396
2398 if (tcp_small_queue_check(sk, skb, 0)) 2397 if (tcp_small_queue_check(sk, skb, 0))