aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2013-04-12 07:31:52 -0400
committerDavid S. Miller <davem@davemloft.net>2013-04-12 18:17:06 -0400
commitd6a4a10411764cf1c3a5dad4f06c5ebe5194488b (patch)
tree54d1e4c0c92bc780d9e2f8a822c1c8dc271df70d /net/ipv4
parentd14a489a411937fb9420fe2b05168ee9e1e06c9c (diff)
tcp: GSO should be TSQ friendly
I noticed that TSQ (TCP Small queues) was less effective when TSO is turned off, and GSO is on. If BQL is not enabled, TSQ has then no effect. It turns out the GSO engine frees the original gso_skb at the time the fragments are generated and queued to the NIC. We should instead call the tcp_wfree() destructor for the last fragment, to keep the flow control as intended in TSQ. This effectively limits the number of queued packets on qdisc + NIC layers. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Tom Herbert <therbert@google.com> Cc: Yuchung Cheng <ycheng@google.com> Cc: Nandita Dukkipati <nanditad@google.com> Cc: Neal Cardwell <ncardwell@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp.c12
-rw-r--r--net/ipv4/tcp_output.c2
2 files changed, 13 insertions, 1 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index a96f7b586277..963bda18486f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2885,6 +2885,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
2885 __be32 delta; 2885 __be32 delta;
2886 unsigned int oldlen; 2886 unsigned int oldlen;
2887 unsigned int mss; 2887 unsigned int mss;
2888 struct sk_buff *gso_skb = skb;
2888 2889
2889 if (!pskb_may_pull(skb, sizeof(*th))) 2890 if (!pskb_may_pull(skb, sizeof(*th)))
2890 goto out; 2891 goto out;
@@ -2953,6 +2954,17 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
2953 th->cwr = 0; 2954 th->cwr = 0;
2954 } while (skb->next); 2955 } while (skb->next);
2955 2956
2957 /* Following permits TCP Small Queues to work well with GSO :
2958 * The callback to TCP stack will be called at the time last frag
2959 * is freed at TX completion, and not right now when gso_skb
2960 * is freed by GSO engine
2961 */
2962 if (gso_skb->destructor == tcp_wfree) {
2963 swap(gso_skb->sk, skb->sk);
2964 swap(gso_skb->destructor, skb->destructor);
2965 swap(gso_skb->truesize, skb->truesize);
2966 }
2967
2956 delta = htonl(oldlen + (skb->tail - skb->transport_header) + 2968 delta = htonl(oldlen + (skb->tail - skb->transport_header) +
2957 skb->data_len); 2969 skb->data_len);
2958 th->check = ~csum_fold((__force __wsum)((__force u32)th->check + 2970 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index af354c98fdb5..d12694353540 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -787,7 +787,7 @@ void __init tcp_tasklet_init(void)
787 * We cant xmit new skbs from this context, as we might already 787 * We cant xmit new skbs from this context, as we might already
788 * hold qdisc lock. 788 * hold qdisc lock.
789 */ 789 */
790static void tcp_wfree(struct sk_buff *skb) 790void tcp_wfree(struct sk_buff *skb)
791{ 791{
792 struct sock *sk = skb->sk; 792 struct sock *sk = skb->sk;
793 struct tcp_sock *tp = tcp_sk(sk); 793 struct tcp_sock *tp = tcp_sk(sk);