aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTom Herbert <therbert@google.com>2014-09-20 17:52:28 -0400
committerDavid S. Miller <davem@davemloft.net>2014-09-26 00:22:46 -0400
commitd020f8f73318589bf41f864b7f89f95669350873 (patch)
treed31c3e518fc4ea7dda9bacef18daa8d151f65a73
parent2fdbfea5735d3deb30a8782c57f7210cb034e69d (diff)
tcp: move logic out of tcp_v[64]_gso_send_check
In tcp_v[46]_gso_send_check the TCP checksum is initialized to the pseudo header checksum using __tcp_v[46]_send_check. We can move this logic into new tcp[46]_gso_segment functions to be done when ip_summed != CHECKSUM_PARTIAL (ip_summed == CHECKSUM_PARTIAL should be the common case, possibly always true when taking GSO path). After this change tcp_v[46]_gso_send_check is no-op. Signed-off-by: Tom Herbert <therbert@google.com> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/ipv4/tcp_offload.c39
-rw-r--r--net/ipv6/tcpv6_offload.c37
2 files changed, 47 insertions, 29 deletions
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 72912533a191..7cd12b0458ff 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -29,6 +29,28 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
29 } 29 }
30} 30}
31 31
32struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
33 netdev_features_t features)
34{
35 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
36 return ERR_PTR(-EINVAL);
37
38 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
39 const struct iphdr *iph = ip_hdr(skb);
40 struct tcphdr *th = tcp_hdr(skb);
41
42 /* Set up checksum pseudo header, usually expect stack to
43 * have done this already.
44 */
45
46 th->check = 0;
47 skb->ip_summed = CHECKSUM_PARTIAL;
48 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
49 }
50
51 return tcp_gso_segment(skb, features);
52}
53
32struct sk_buff *tcp_gso_segment(struct sk_buff *skb, 54struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
33 netdev_features_t features) 55 netdev_features_t features)
34{ 56{
@@ -44,9 +66,6 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
44 __sum16 newcheck; 66 __sum16 newcheck;
45 bool ooo_okay, copy_destructor; 67 bool ooo_okay, copy_destructor;
46 68
47 if (!pskb_may_pull(skb, sizeof(*th)))
48 goto out;
49
50 th = tcp_hdr(skb); 69 th = tcp_hdr(skb);
51 thlen = th->doff * 4; 70 thlen = th->doff * 4;
52 if (thlen < sizeof(*th)) 71 if (thlen < sizeof(*th))
@@ -271,18 +290,6 @@ EXPORT_SYMBOL(tcp_gro_complete);
271 290
272static int tcp_v4_gso_send_check(struct sk_buff *skb) 291static int tcp_v4_gso_send_check(struct sk_buff *skb)
273{ 292{
274 const struct iphdr *iph;
275 struct tcphdr *th;
276
277 if (!pskb_may_pull(skb, sizeof(*th)))
278 return -EINVAL;
279
280 iph = ip_hdr(skb);
281 th = tcp_hdr(skb);
282
283 th->check = 0;
284 skb->ip_summed = CHECKSUM_PARTIAL;
285 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
286 return 0; 293 return 0;
287} 294}
288 295
@@ -314,7 +321,7 @@ static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
314static const struct net_offload tcpv4_offload = { 321static const struct net_offload tcpv4_offload = {
315 .callbacks = { 322 .callbacks = {
316 .gso_send_check = tcp_v4_gso_send_check, 323 .gso_send_check = tcp_v4_gso_send_check,
317 .gso_segment = tcp_gso_segment, 324 .gso_segment = tcp4_gso_segment,
318 .gro_receive = tcp4_gro_receive, 325 .gro_receive = tcp4_gro_receive,
319 .gro_complete = tcp4_gro_complete, 326 .gro_complete = tcp4_gro_complete,
320 }, 327 },
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index dbb3d9262bf6..96253154db3a 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -17,18 +17,6 @@
17 17
18static int tcp_v6_gso_send_check(struct sk_buff *skb) 18static int tcp_v6_gso_send_check(struct sk_buff *skb)
19{ 19{
20 const struct ipv6hdr *ipv6h;
21 struct tcphdr *th;
22
23 if (!pskb_may_pull(skb, sizeof(*th)))
24 return -EINVAL;
25
26 ipv6h = ipv6_hdr(skb);
27 th = tcp_hdr(skb);
28
29 th->check = 0;
30 skb->ip_summed = CHECKSUM_PARTIAL;
31 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
32 return 0; 20 return 0;
33} 21}
34 22
@@ -58,10 +46,33 @@ static int tcp6_gro_complete(struct sk_buff *skb, int thoff)
58 return tcp_gro_complete(skb); 46 return tcp_gro_complete(skb);
59} 47}
60 48
49struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
50 netdev_features_t features)
51{
52 struct tcphdr *th;
53
54 if (!pskb_may_pull(skb, sizeof(*th)))
55 return ERR_PTR(-EINVAL);
56
57 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
58 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
59 struct tcphdr *th = tcp_hdr(skb);
60
61 /* Set up pseudo header, usually expect stack to have done
62 * this.
63 */
64
65 th->check = 0;
66 skb->ip_summed = CHECKSUM_PARTIAL;
67 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
68 }
69
70 return tcp_gso_segment(skb, features);
71}
61static const struct net_offload tcpv6_offload = { 72static const struct net_offload tcpv6_offload = {
62 .callbacks = { 73 .callbacks = {
63 .gso_send_check = tcp_v6_gso_send_check, 74 .gso_send_check = tcp_v6_gso_send_check,
64 .gso_segment = tcp_gso_segment, 75 .gso_segment = tcp6_gso_segment,
65 .gro_receive = tcp6_gro_receive, 76 .gro_receive = tcp6_gro_receive,
66 .gro_complete = tcp6_gro_complete, 77 .gro_complete = tcp6_gro_complete,
67 }, 78 },