aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTom Herbert <therbert@google.com>2014-11-04 12:06:52 -0500
committerDavid S. Miller <davem@davemloft.net>2014-11-05 16:30:03 -0500
commit4bcb877d257c87298aedead1ffeaba0d5df1991d (patch)
tree00b616716253b6bed2510505e38f345b1a165efa
parent63487babf08d6d67483c67ed21d8cea6674a44ec (diff)
udp: Offload outer UDP tunnel csum if available
In __skb_udp_tunnel_segment if outer UDP checksums are enabled and ip_summed is not already CHECKSUM_PARTIAL, set up checksum offload if device features allow it. Signed-off-by: Tom Herbert <therbert@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/ipv4/udp_offload.c52
1 files changed, 36 insertions, 16 deletions
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 6480cea7aa53..a774711a88b9 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -29,7 +29,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
29 netdev_features_t features, 29 netdev_features_t features,
30 struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, 30 struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
31 netdev_features_t features), 31 netdev_features_t features),
32 __be16 new_protocol) 32 __be16 new_protocol, bool is_ipv6)
33{ 33{
34 struct sk_buff *segs = ERR_PTR(-EINVAL); 34 struct sk_buff *segs = ERR_PTR(-EINVAL);
35 u16 mac_offset = skb->mac_header; 35 u16 mac_offset = skb->mac_header;
@@ -39,7 +39,9 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
39 netdev_features_t enc_features; 39 netdev_features_t enc_features;
40 int udp_offset, outer_hlen; 40 int udp_offset, outer_hlen;
41 unsigned int oldlen; 41 unsigned int oldlen;
42 bool need_csum; 42 bool need_csum = !!(skb_shinfo(skb)->gso_type &
43 SKB_GSO_UDP_TUNNEL_CSUM);
44 bool offload_csum = false, dont_encap = need_csum;
43 45
44 oldlen = (u16)~skb->len; 46 oldlen = (u16)~skb->len;
45 47
@@ -52,10 +54,12 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
52 skb_set_network_header(skb, skb_inner_network_offset(skb)); 54 skb_set_network_header(skb, skb_inner_network_offset(skb));
53 skb->mac_len = skb_inner_network_offset(skb); 55 skb->mac_len = skb_inner_network_offset(skb);
54 skb->protocol = new_protocol; 56 skb->protocol = new_protocol;
57 skb->encap_hdr_csum = need_csum;
55 58
56 need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); 59 /* Try to offload checksum if possible */
57 if (need_csum) 60 offload_csum = !!(need_csum &&
58 skb->encap_hdr_csum = 1; 61 (skb->dev->features &
62 (is_ipv6 ? NETIF_F_V6_CSUM : NETIF_F_V4_CSUM)));
59 63
60 /* segment inner packet. */ 64 /* segment inner packet. */
61 enc_features = skb->dev->hw_enc_features & features; 65 enc_features = skb->dev->hw_enc_features & features;
@@ -72,11 +76,21 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
72 do { 76 do {
73 struct udphdr *uh; 77 struct udphdr *uh;
74 int len; 78 int len;
75 79 __be32 delta;
76 skb_reset_inner_headers(skb); 80
77 skb->encapsulation = 1; 81 if (dont_encap) {
82 skb->encapsulation = 0;
83 skb->ip_summed = CHECKSUM_NONE;
84 } else {
85 /* Only set up inner headers if we might be offloading
86 * inner checksum.
87 */
88 skb_reset_inner_headers(skb);
89 skb->encapsulation = 1;
90 }
78 91
79 skb->mac_len = mac_len; 92 skb->mac_len = mac_len;
93 skb->protocol = protocol;
80 94
81 skb_push(skb, outer_hlen); 95 skb_push(skb, outer_hlen);
82 skb_reset_mac_header(skb); 96 skb_reset_mac_header(skb);
@@ -86,19 +100,25 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
86 uh = udp_hdr(skb); 100 uh = udp_hdr(skb);
87 uh->len = htons(len); 101 uh->len = htons(len);
88 102
89 if (need_csum) { 103 if (!need_csum)
90 __be32 delta = htonl(oldlen + len); 104 continue;
105
106 delta = htonl(oldlen + len);
107
108 uh->check = ~csum_fold((__force __wsum)
109 ((__force u32)uh->check +
110 (__force u32)delta));
91 111
92 uh->check = ~csum_fold((__force __wsum) 112 if (offload_csum) {
93 ((__force u32)uh->check + 113 skb->ip_summed = CHECKSUM_PARTIAL;
94 (__force u32)delta)); 114 skb->csum_start = skb_transport_header(skb) - skb->head;
115 skb->csum_offset = offsetof(struct udphdr, check);
116 } else {
95 uh->check = gso_make_checksum(skb, ~uh->check); 117 uh->check = gso_make_checksum(skb, ~uh->check);
96 118
97 if (uh->check == 0) 119 if (uh->check == 0)
98 uh->check = CSUM_MANGLED_0; 120 uh->check = CSUM_MANGLED_0;
99 } 121 }
100
101 skb->protocol = protocol;
102 } while ((skb = skb->next)); 122 } while ((skb = skb->next));
103out: 123out:
104 return segs; 124 return segs;
@@ -134,7 +154,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
134 } 154 }
135 155
136 segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment, 156 segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment,
137 protocol); 157 protocol, is_ipv6);
138 158
139out_unlock: 159out_unlock:
140 rcu_read_unlock(); 160 rcu_read_unlock();