aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_offload.c
diff options
context:
space:
mode:
authorJerry Chu <hkchu@google.com>2013-12-11 23:53:45 -0500
committerDavid S. Miller <davem@davemloft.net>2013-12-12 13:47:53 -0500
commit299603e8370a93dd5d8e8d800f0dff1ce2c53d36 (patch)
tree2a10106aabe88c278a0cd02b93af1add04f5ffcc /net/ipv4/tcp_offload.c
parenta46dc748caea185d4d0978280a1af0112bf6a8f8 (diff)
net-gro: Prepare GRO stack for the upcoming tunneling support
This patch modifies the GRO stack to avoid the use of "network_header" and associated macros like ip_hdr() and ipv6_hdr() in order to allow an arbitary number of IP hdrs (v4 or v6) to be used in the encapsulation chain. This lays the foundation for various IP tunneling support (IP-in-IP, GRE, VXLAN, SIT,...) to be added later. With this patch, the GRO stack traversing now is mostly based on skb_gro_offset rather than special hdr offsets saved in skb (e.g., skb->network_header). As a result all but the top layer (i.e., the the transport layer) must have hdrs of the same length in order for a pkt to be considered for aggregation. Therefore when adding a new encap layer (e.g., for tunneling), one must check and skip flows (e.g., by setting NAPI_GRO_CB(p)->same_flow to 0) that have a different hdr length. Note that unlike the network header, the transport header can and will continue to be set by the GRO code since there will be at most one "transport layer" in the encap chain. Signed-off-by: H.K. Jerry Chu <hkchu@google.com> Suggested-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_offload.c')
-rw-r--r--net/ipv4/tcp_offload.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 05606353c7e7..2658a27f540d 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -240,7 +240,7 @@ int tcp_gro_complete(struct sk_buff *skb)
240{ 240{
241 struct tcphdr *th = tcp_hdr(skb); 241 struct tcphdr *th = tcp_hdr(skb);
242 242
243 skb->csum_start = skb_transport_header(skb) - skb->head; 243 skb->csum_start = (unsigned char *)th - skb->head;
244 skb->csum_offset = offsetof(struct tcphdr, check); 244 skb->csum_offset = offsetof(struct tcphdr, check);
245 skb->ip_summed = CHECKSUM_PARTIAL; 245 skb->ip_summed = CHECKSUM_PARTIAL;
246 246
@@ -272,6 +272,7 @@ static int tcp_v4_gso_send_check(struct sk_buff *skb)
272 272
273static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) 273static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
274{ 274{
275 /* Use the IP hdr immediately proceeding for this transport */
275 const struct iphdr *iph = skb_gro_network_header(skb); 276 const struct iphdr *iph = skb_gro_network_header(skb);
276 __wsum wsum; 277 __wsum wsum;
277 278
@@ -303,13 +304,13 @@ skip_csum:
303 return tcp_gro_receive(head, skb); 304 return tcp_gro_receive(head, skb);
304} 305}
305 306
306static int tcp4_gro_complete(struct sk_buff *skb) 307static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
307{ 308{
308 const struct iphdr *iph = ip_hdr(skb); 309 const struct iphdr *iph = ip_hdr(skb);
309 struct tcphdr *th = tcp_hdr(skb); 310 struct tcphdr *th = tcp_hdr(skb);
310 311
311 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), 312 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
312 iph->saddr, iph->daddr, 0); 313 iph->daddr, 0);
313 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 314 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
314 315
315 return tcp_gro_complete(skb); 316 return tcp_gro_complete(skb);