diff options
author | Eric Dumazet <edumazet@google.com> | 2013-10-20 23:47:29 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-10-21 18:49:39 -0400 |
commit | d3e5e0062de5f2c6444455b5708a62a50c93a50c (patch) | |
tree | b384b9a84a28d34ae68afbd3902a4237e0eb9e0a /net/ipv6 | |
parent | 212124dd8a9b9b25a689b6a2025d315f0e6afe75 (diff) |
ipv6: gso: make ipv6_gso_segment() stackable
In order to support GSO on SIT tunnels, we need to make
inet_gso_segment() stackable.
It should not assume network header starts right after mac
header.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r-- | net/ipv6/ip6_offload.c | 23 |
1 files changed, 17 insertions, 6 deletions
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 5c2fc1d04196..f9b33d82bb9d 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c | |||
@@ -90,6 +90,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, | |||
90 | u8 *prevhdr; | 90 | u8 *prevhdr; |
91 | int offset = 0; | 91 | int offset = 0; |
92 | bool tunnel; | 92 | bool tunnel; |
93 | int nhoff; | ||
93 | 94 | ||
94 | if (unlikely(skb_shinfo(skb)->gso_type & | 95 | if (unlikely(skb_shinfo(skb)->gso_type & |
95 | ~(SKB_GSO_UDP | | 96 | ~(SKB_GSO_UDP | |
@@ -103,10 +104,16 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, | |||
103 | 0))) | 104 | 0))) |
104 | goto out; | 105 | goto out; |
105 | 106 | ||
107 | skb_reset_network_header(skb); | ||
108 | nhoff = skb_network_header(skb) - skb_mac_header(skb); | ||
106 | if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) | 109 | if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) |
107 | goto out; | 110 | goto out; |
108 | 111 | ||
109 | tunnel = skb->encapsulation; | 112 | tunnel = SKB_GSO_CB(skb)->encap_level > 0; |
113 | if (tunnel) | ||
114 | features = skb->dev->hw_enc_features & netif_skb_features(skb); | ||
115 | SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h); | ||
116 | |||
110 | ipv6h = ipv6_hdr(skb); | 117 | ipv6h = ipv6_hdr(skb); |
111 | __skb_pull(skb, sizeof(*ipv6h)); | 118 | __skb_pull(skb, sizeof(*ipv6h)); |
112 | segs = ERR_PTR(-EPROTONOSUPPORT); | 119 | segs = ERR_PTR(-EPROTONOSUPPORT); |
@@ -123,13 +130,17 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, | |||
123 | goto out; | 130 | goto out; |
124 | 131 | ||
125 | for (skb = segs; skb; skb = skb->next) { | 132 | for (skb = segs; skb; skb = skb->next) { |
126 | ipv6h = ipv6_hdr(skb); | 133 | ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff); |
127 | ipv6h->payload_len = htons(skb->len - skb->mac_len - | 134 | ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h)); |
128 | sizeof(*ipv6h)); | 135 | if (tunnel) { |
136 | skb_reset_inner_headers(skb); | ||
137 | skb->encapsulation = 1; | ||
138 | } | ||
139 | skb->network_header = (u8 *)ipv6h - skb->head; | ||
140 | |||
129 | if (!tunnel && proto == IPPROTO_UDP) { | 141 | if (!tunnel && proto == IPPROTO_UDP) { |
130 | unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); | 142 | unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); |
131 | fptr = (struct frag_hdr *)(skb_network_header(skb) + | 143 | fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen); |
132 | unfrag_ip6hlen); | ||
133 | fptr->frag_off = htons(offset); | 144 | fptr->frag_off = htons(offset); |
134 | if (skb->next != NULL) | 145 | if (skb->next != NULL) |
135 | fptr->frag_off |= htons(IP6_MF); | 146 | fptr->frag_off |= htons(IP6_MF); |