aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2013-10-25 20:26:17 -0400
committerDavid S. Miller <davem@davemloft.net>2013-10-29 00:04:47 -0400
commit0d08c42cf9a71530fef5ebcfe368f38f2dd0476f (patch)
treed806d557fb1411add3dddbe6e59ff580afb7e05b /net/ipv4
parentfc59d5bdf1e3dca0336d155e55d812286db075ad (diff)
tcp: gso: fix truesize tracking
commit 6ff50cd55545 ("tcp: gso: do not generate out of order packets") had an heuristic that can trigger a warning in skb_try_coalesce(), because skb->truesize of the gso segments were exactly set to mss. This breaks the requirement that skb->truesize >= skb->len + truesizeof(struct sk_buff); It can trivially be reproduced by : ifconfig lo mtu 1500 ethtool -K lo tso off netperf As the skbs are looped into the TCP networking stack, skb_try_coalesce() warns us of these skb under-estimating their truesize. Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp_offload.c13
1 files changed, 5 insertions, 8 deletions
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 3a7525e6c086..533c58a5cfb7 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -18,6 +18,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
18 netdev_features_t features) 18 netdev_features_t features)
19{ 19{
20 struct sk_buff *segs = ERR_PTR(-EINVAL); 20 struct sk_buff *segs = ERR_PTR(-EINVAL);
21 unsigned int sum_truesize = 0;
21 struct tcphdr *th; 22 struct tcphdr *th;
22 unsigned int thlen; 23 unsigned int thlen;
23 unsigned int seq; 24 unsigned int seq;
@@ -102,13 +103,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
102 if (copy_destructor) { 103 if (copy_destructor) {
103 skb->destructor = gso_skb->destructor; 104 skb->destructor = gso_skb->destructor;
104 skb->sk = gso_skb->sk; 105 skb->sk = gso_skb->sk;
105 /* {tcp|sock}_wfree() use exact truesize accounting : 106 sum_truesize += skb->truesize;
106 * sum(skb->truesize) MUST be exactly be gso_skb->truesize
107 * So we account mss bytes of 'true size' for each segment.
108 * The last segment will contain the remaining.
109 */
110 skb->truesize = mss;
111 gso_skb->truesize -= mss;
112 } 107 }
113 skb = skb->next; 108 skb = skb->next;
114 th = tcp_hdr(skb); 109 th = tcp_hdr(skb);
@@ -125,7 +120,9 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
125 if (copy_destructor) { 120 if (copy_destructor) {
126 swap(gso_skb->sk, skb->sk); 121 swap(gso_skb->sk, skb->sk);
127 swap(gso_skb->destructor, skb->destructor); 122 swap(gso_skb->destructor, skb->destructor);
128 swap(gso_skb->truesize, skb->truesize); 123 sum_truesize += skb->truesize;
124 atomic_add(sum_truesize - gso_skb->truesize,
125 &skb->sk->sk_wmem_alloc);
129 } 126 }
130 127
131 delta = htonl(oldlen + (skb_tail_pointer(skb) - 128 delta = htonl(oldlen + (skb_tail_pointer(skb) -