diff options
author | Tom Herbert <therbert@google.com> | 2014-06-04 20:20:02 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-06-05 01:46:38 -0400 |
commit | 7e2b10c1e52ca37fb522be49f4be367f9311d0cd (patch) | |
tree | 67fd8a9e9b90f732c45f406df145c8a149d24a53 | |
parent | 77157e1973cbdb8d60bdb0ec749d6014bedc5bd5 (diff) |
net: Support for multiple checksums with gso
When creating a GSO packet segment we may need to set more than
one checksum in the packet (for instance a TCP checksum and
UDP checksum for VXLAN encapsulation). To be efficient, we want
to do checksum calculation for any part of the packet at most once.
This patch adds csum_start offset to skb_gso_cb. This tracks the
starting offset for skb->csum which is initially set in skb_segment.
When a protocol needs to compute a transport checksum it calls
gso_make_checksum which computes the checksum value from the start
of transport header to csum_start and then adds in skb->csum to get
the full checksum. skb->csum and csum_start are then updated to reflect
the checksum of the resultant packet starting from the transport header.
This patch also adds a flag to skbuff, encap_hdr_csum, which is set
in *gso_segment fucntions to indicate that a tunnel protocol needs
checksum calculation
Signed-off-by: Tom Herbert <therbert@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/skbuff.h | 26 | ||||
-rw-r--r-- | net/core/skbuff.c | 8 | ||||
-rw-r--r-- | net/ipv4/ip_tunnel_core.c | 8 |
3 files changed, 40 insertions, 2 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7a9beeb1c458..d8d397acb52c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -567,7 +567,8 @@ struct sk_buff { | |||
567 | * headers if needed | 567 | * headers if needed |
568 | */ | 568 | */ |
569 | __u8 encapsulation:1; | 569 | __u8 encapsulation:1; |
570 | /* 6/8 bit hole (depending on ndisc_nodetype presence) */ | 570 | __u8 encap_hdr_csum:1; |
571 | /* 5/7 bit hole (depending on ndisc_nodetype presence) */ | ||
571 | kmemcheck_bitfield_end(flags2); | 572 | kmemcheck_bitfield_end(flags2); |
572 | 573 | ||
573 | #if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL | 574 | #if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL |
@@ -2988,6 +2989,7 @@ static inline struct sec_path *skb_sec_path(struct sk_buff *skb) | |||
2988 | struct skb_gso_cb { | 2989 | struct skb_gso_cb { |
2989 | int mac_offset; | 2990 | int mac_offset; |
2990 | int encap_level; | 2991 | int encap_level; |
2992 | __u16 csum_start; | ||
2991 | }; | 2993 | }; |
2992 | #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb) | 2994 | #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb) |
2993 | 2995 | ||
@@ -3012,6 +3014,28 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra) | |||
3012 | return 0; | 3014 | return 0; |
3013 | } | 3015 | } |
3014 | 3016 | ||
3017 | /* Compute the checksum for a gso segment. First compute the checksum value | ||
3018 | * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and | ||
3019 | * then add in skb->csum (checksum from csum_start to end of packet). | ||
3020 | * skb->csum and csum_start are then updated to reflect the checksum of the | ||
3021 | * resultant packet starting from the transport header-- the resultant checksum | ||
3022 | * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo | ||
3023 | * header. | ||
3024 | */ | ||
3025 | static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res) | ||
3026 | { | ||
3027 | int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) - | ||
3028 | skb_transport_offset(skb); | ||
3029 | __u16 csum; | ||
3030 | |||
3031 | csum = csum_fold(csum_partial(skb_transport_header(skb), | ||
3032 | plen, skb->csum)); | ||
3033 | skb->csum = res; | ||
3034 | SKB_GSO_CB(skb)->csum_start -= plen; | ||
3035 | |||
3036 | return csum; | ||
3037 | } | ||
3038 | |||
3015 | static inline bool skb_is_gso(const struct sk_buff *skb) | 3039 | static inline bool skb_is_gso(const struct sk_buff *skb) |
3016 | { | 3040 | { |
3017 | return skb_shinfo(skb)->gso_size; | 3041 | return skb_shinfo(skb)->gso_size; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 3f6c7e8be8a4..05f4bef2ce12 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -2885,7 +2885,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, | |||
2885 | if (unlikely(!proto)) | 2885 | if (unlikely(!proto)) |
2886 | return ERR_PTR(-EINVAL); | 2886 | return ERR_PTR(-EINVAL); |
2887 | 2887 | ||
2888 | csum = !!can_checksum_protocol(features, proto); | 2888 | csum = !head_skb->encap_hdr_csum && |
2889 | !!can_checksum_protocol(features, proto); | ||
2890 | |||
2889 | __skb_push(head_skb, doffset); | 2891 | __skb_push(head_skb, doffset); |
2890 | headroom = skb_headroom(head_skb); | 2892 | headroom = skb_headroom(head_skb); |
2891 | pos = skb_headlen(head_skb); | 2893 | pos = skb_headlen(head_skb); |
@@ -2983,6 +2985,8 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, | |||
2983 | nskb->csum = skb_copy_and_csum_bits(head_skb, offset, | 2985 | nskb->csum = skb_copy_and_csum_bits(head_skb, offset, |
2984 | skb_put(nskb, len), | 2986 | skb_put(nskb, len), |
2985 | len, 0); | 2987 | len, 0); |
2988 | SKB_GSO_CB(nskb)->csum_start = | ||
2989 | skb_headroom(nskb) + offset; | ||
2986 | continue; | 2990 | continue; |
2987 | } | 2991 | } |
2988 | 2992 | ||
@@ -3052,6 +3056,8 @@ perform_csum_check: | |||
3052 | nskb->csum = skb_checksum(nskb, doffset, | 3056 | nskb->csum = skb_checksum(nskb, doffset, |
3053 | nskb->len - doffset, 0); | 3057 | nskb->len - doffset, 0); |
3054 | nskb->ip_summed = CHECKSUM_NONE; | 3058 | nskb->ip_summed = CHECKSUM_NONE; |
3059 | SKB_GSO_CB(nskb)->csum_start = | ||
3060 | skb_headroom(nskb) + doffset; | ||
3055 | } | 3061 | } |
3056 | } while ((offset += len) < head_skb->len); | 3062 | } while ((offset += len) < head_skb->len); |
3057 | 3063 | ||
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 847e69cbff7e..f4c987bb7e94 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c | |||
@@ -135,6 +135,14 @@ struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, | |||
135 | return skb; | 135 | return skb; |
136 | } | 136 | } |
137 | 137 | ||
138 | /* If packet is not gso and we are resolving any partial checksum, | ||
139 | * clear encapsulation flag. This allows setting CHECKSUM_PARTIAL | ||
140 | * on the outer header without confusing devices that implement | ||
141 | * NETIF_F_IP_CSUM with encapsulation. | ||
142 | */ | ||
143 | if (csum_help) | ||
144 | skb->encapsulation = 0; | ||
145 | |||
138 | if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) { | 146 | if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) { |
139 | err = skb_checksum_help(skb); | 147 | err = skb_checksum_help(skb); |
140 | if (unlikely(err)) | 148 | if (unlikely(err)) |