aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c2
-rw-r--r--net/core/dev.c70
-rw-r--r--net/core/skbuff.c23
-rw-r--r--net/ipv4/af_inet.c10
-rw-r--r--net/ipv4/tcp.c16
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv6/af_inet6.c30
-rw-r--r--net/ipv6/tcp_ipv6.c2
8 files changed, 111 insertions, 44 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 2eb057a74654..378fa69d625a 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -98,6 +98,8 @@ drop:
98int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, 98int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
99 unsigned int vlan_tci, struct sk_buff *skb) 99 unsigned int vlan_tci, struct sk_buff *skb)
100{ 100{
101 skb_gro_reset_offset(skb);
102
101 return napi_skb_finish(vlan_gro_common(napi, grp, vlan_tci, skb), skb); 103 return napi_skb_finish(vlan_gro_common(napi, grp, vlan_tci, skb), skb);
102} 104}
103EXPORT_SYMBOL(vlan_gro_receive); 105EXPORT_SYMBOL(vlan_gro_receive);
diff --git a/net/core/dev.c b/net/core/dev.c
index cd23ae15a1d5..df406dcf7482 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -215,6 +215,13 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
215 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)]; 215 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
216} 216}
217 217
218static inline void *skb_gro_mac_header(struct sk_buff *skb)
219{
220 return skb_headlen(skb) ? skb_mac_header(skb) :
221 page_address(skb_shinfo(skb)->frags[0].page) +
222 skb_shinfo(skb)->frags[0].page_offset;
223}
224
218/* Device list insertion */ 225/* Device list insertion */
219static int list_netdevice(struct net_device *dev) 226static int list_netdevice(struct net_device *dev)
220{ 227{
@@ -2350,7 +2357,6 @@ static int napi_gro_complete(struct sk_buff *skb)
2350 2357
2351out: 2358out:
2352 skb_shinfo(skb)->gso_size = 0; 2359 skb_shinfo(skb)->gso_size = 0;
2353 __skb_push(skb, -skb_network_offset(skb));
2354 return netif_receive_skb(skb); 2360 return netif_receive_skb(skb);
2355} 2361}
2356 2362
@@ -2368,6 +2374,25 @@ void napi_gro_flush(struct napi_struct *napi)
2368} 2374}
2369EXPORT_SYMBOL(napi_gro_flush); 2375EXPORT_SYMBOL(napi_gro_flush);
2370 2376
2377void *skb_gro_header(struct sk_buff *skb, unsigned int hlen)
2378{
2379 unsigned int offset = skb_gro_offset(skb);
2380
2381 hlen += offset;
2382 if (hlen <= skb_headlen(skb))
2383 return skb->data + offset;
2384
2385 if (unlikely(!skb_shinfo(skb)->nr_frags ||
2386 skb_shinfo(skb)->frags[0].size <=
2387 hlen - skb_headlen(skb) ||
2388 PageHighMem(skb_shinfo(skb)->frags[0].page)))
2389 return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
2390
2391 return page_address(skb_shinfo(skb)->frags[0].page) +
2392 skb_shinfo(skb)->frags[0].page_offset + offset;
2393}
2394EXPORT_SYMBOL(skb_gro_header);
2395
2371int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2396int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2372{ 2397{
2373 struct sk_buff **pp = NULL; 2398 struct sk_buff **pp = NULL;
@@ -2388,11 +2413,13 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2388 rcu_read_lock(); 2413 rcu_read_lock();
2389 list_for_each_entry_rcu(ptype, head, list) { 2414 list_for_each_entry_rcu(ptype, head, list) {
2390 struct sk_buff *p; 2415 struct sk_buff *p;
2416 void *mac;
2391 2417
2392 if (ptype->type != type || ptype->dev || !ptype->gro_receive) 2418 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2393 continue; 2419 continue;
2394 2420
2395 skb_reset_network_header(skb); 2421 skb_set_network_header(skb, skb_gro_offset(skb));
2422 mac = skb_gro_mac_header(skb);
2396 mac_len = skb->network_header - skb->mac_header; 2423 mac_len = skb->network_header - skb->mac_header;
2397 skb->mac_len = mac_len; 2424 skb->mac_len = mac_len;
2398 NAPI_GRO_CB(skb)->same_flow = 0; 2425 NAPI_GRO_CB(skb)->same_flow = 0;
@@ -2406,8 +2433,7 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2406 continue; 2433 continue;
2407 2434
2408 if (p->mac_len != mac_len || 2435 if (p->mac_len != mac_len ||
2409 memcmp(skb_mac_header(p), skb_mac_header(skb), 2436 memcmp(skb_mac_header(p), mac, mac_len))
2410 mac_len))
2411 NAPI_GRO_CB(p)->same_flow = 0; 2437 NAPI_GRO_CB(p)->same_flow = 0;
2412 } 2438 }
2413 2439
@@ -2434,13 +2460,11 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2434 if (same_flow) 2460 if (same_flow)
2435 goto ok; 2461 goto ok;
2436 2462
2437 if (NAPI_GRO_CB(skb)->flush || count >= MAX_GRO_SKBS) { 2463 if (NAPI_GRO_CB(skb)->flush || count >= MAX_GRO_SKBS)
2438 __skb_push(skb, -skb_network_offset(skb));
2439 goto normal; 2464 goto normal;
2440 }
2441 2465
2442 NAPI_GRO_CB(skb)->count = 1; 2466 NAPI_GRO_CB(skb)->count = 1;
2443 skb_shinfo(skb)->gso_size = skb->len; 2467 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
2444 skb->next = napi->gro_list; 2468 skb->next = napi->gro_list;
2445 napi->gro_list = skb; 2469 napi->gro_list = skb;
2446 ret = GRO_HELD; 2470 ret = GRO_HELD;
@@ -2488,6 +2512,8 @@ EXPORT_SYMBOL(napi_skb_finish);
2488 2512
2489int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2513int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2490{ 2514{
2515 skb_gro_reset_offset(skb);
2516
2491 return napi_skb_finish(__napi_gro_receive(napi, skb), skb); 2517 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
2492} 2518}
2493EXPORT_SYMBOL(napi_gro_receive); 2519EXPORT_SYMBOL(napi_gro_receive);
@@ -2506,6 +2532,7 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
2506{ 2532{
2507 struct net_device *dev = napi->dev; 2533 struct net_device *dev = napi->dev;
2508 struct sk_buff *skb = napi->skb; 2534 struct sk_buff *skb = napi->skb;
2535 struct ethhdr *eth;
2509 2536
2510 napi->skb = NULL; 2537 napi->skb = NULL;
2511 2538
@@ -2525,13 +2552,23 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
2525 skb->len += info->len; 2552 skb->len += info->len;
2526 skb->truesize += info->len; 2553 skb->truesize += info->len;
2527 2554
2528 if (!pskb_may_pull(skb, ETH_HLEN)) { 2555 skb_reset_mac_header(skb);
2556 skb_gro_reset_offset(skb);
2557
2558 eth = skb_gro_header(skb, sizeof(*eth));
2559 if (!eth) {
2529 napi_reuse_skb(napi, skb); 2560 napi_reuse_skb(napi, skb);
2530 skb = NULL; 2561 skb = NULL;
2531 goto out; 2562 goto out;
2532 } 2563 }
2533 2564
2534 skb->protocol = eth_type_trans(skb, dev); 2565 skb_gro_pull(skb, sizeof(*eth));
2566
2567 /*
2568 * This works because the only protocols we care about don't require
2569 * special handling. We'll fix it up properly at the end.
2570 */
2571 skb->protocol = eth->h_proto;
2535 2572
2536 skb->ip_summed = info->ip_summed; 2573 skb->ip_summed = info->ip_summed;
2537 skb->csum = info->csum; 2574 skb->csum = info->csum;
@@ -2544,10 +2581,21 @@ EXPORT_SYMBOL(napi_fraginfo_skb);
2544int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret) 2581int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2545{ 2582{
2546 int err = NET_RX_SUCCESS; 2583 int err = NET_RX_SUCCESS;
2584 int may;
2547 2585
2548 switch (ret) { 2586 switch (ret) {
2549 case GRO_NORMAL: 2587 case GRO_NORMAL:
2550 return netif_receive_skb(skb); 2588 case GRO_HELD:
2589 may = pskb_may_pull(skb, skb_gro_offset(skb));
2590 BUG_ON(!may);
2591
2592 skb->protocol = eth_type_trans(skb, napi->dev);
2593
2594 if (ret == GRO_NORMAL)
2595 return netif_receive_skb(skb);
2596
2597 skb_gro_pull(skb, -ETH_HLEN);
2598 break;
2551 2599
2552 case GRO_DROP: 2600 case GRO_DROP:
2553 err = NET_RX_DROP; 2601 err = NET_RX_DROP;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2e5f2ca3bdcd..f9f4065a7e9b 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2584,17 +2584,21 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2584 struct sk_buff *p = *head; 2584 struct sk_buff *p = *head;
2585 struct sk_buff *nskb; 2585 struct sk_buff *nskb;
2586 unsigned int headroom; 2586 unsigned int headroom;
2587 unsigned int hlen = p->data - skb_mac_header(p); 2587 unsigned int len = skb_gro_len(skb);
2588 unsigned int len = skb->len;
2589 2588
2590 if (hlen + p->len + len >= 65536) 2589 if (p->len + len >= 65536)
2591 return -E2BIG; 2590 return -E2BIG;
2592 2591
2593 if (skb_shinfo(p)->frag_list) 2592 if (skb_shinfo(p)->frag_list)
2594 goto merge; 2593 goto merge;
2595 else if (!skb_headlen(p) && !skb_headlen(skb) && 2594 else if (skb_headlen(skb) <= skb_gro_offset(skb) &&
2596 skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags < 2595 skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags <=
2597 MAX_SKB_FRAGS) { 2596 MAX_SKB_FRAGS) {
2597 skb_shinfo(skb)->frags[0].page_offset +=
2598 skb_gro_offset(skb) - skb_headlen(skb);
2599 skb_shinfo(skb)->frags[0].size -=
2600 skb_gro_offset(skb) - skb_headlen(skb);
2601
2598 memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags, 2602 memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags,
2599 skb_shinfo(skb)->frags, 2603 skb_shinfo(skb)->frags,
2600 skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); 2604 skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
@@ -2611,7 +2615,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2611 } 2615 }
2612 2616
2613 headroom = skb_headroom(p); 2617 headroom = skb_headroom(p);
2614 nskb = netdev_alloc_skb(p->dev, headroom); 2618 nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p));
2615 if (unlikely(!nskb)) 2619 if (unlikely(!nskb))
2616 return -ENOMEM; 2620 return -ENOMEM;
2617 2621
@@ -2619,12 +2623,15 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2619 nskb->mac_len = p->mac_len; 2623 nskb->mac_len = p->mac_len;
2620 2624
2621 skb_reserve(nskb, headroom); 2625 skb_reserve(nskb, headroom);
2626 __skb_put(nskb, skb_gro_offset(p));
2622 2627
2623 skb_set_mac_header(nskb, -hlen); 2628 skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
2624 skb_set_network_header(nskb, skb_network_offset(p)); 2629 skb_set_network_header(nskb, skb_network_offset(p));
2625 skb_set_transport_header(nskb, skb_transport_offset(p)); 2630 skb_set_transport_header(nskb, skb_transport_offset(p));
2626 2631
2627 memcpy(skb_mac_header(nskb), skb_mac_header(p), hlen); 2632 __skb_pull(p, skb_gro_offset(p));
2633 memcpy(skb_mac_header(nskb), skb_mac_header(p),
2634 p->data - skb_mac_header(p));
2628 2635
2629 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 2636 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
2630 skb_shinfo(nskb)->frag_list = p; 2637 skb_shinfo(nskb)->frag_list = p;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 743f5542d65a..d6770f295d5b 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1253,10 +1253,10 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1253 int proto; 1253 int proto;
1254 int id; 1254 int id;
1255 1255
1256 if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) 1256 iph = skb_gro_header(skb, sizeof(*iph));
1257 if (unlikely(!iph))
1257 goto out; 1258 goto out;
1258 1259
1259 iph = ip_hdr(skb);
1260 proto = iph->protocol & (MAX_INET_PROTOS - 1); 1260 proto = iph->protocol & (MAX_INET_PROTOS - 1);
1261 1261
1262 rcu_read_lock(); 1262 rcu_read_lock();
@@ -1270,7 +1270,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1270 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 1270 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
1271 goto out_unlock; 1271 goto out_unlock;
1272 1272
1273 flush = ntohs(iph->tot_len) != skb->len || 1273 flush = ntohs(iph->tot_len) != skb_gro_len(skb) ||
1274 iph->frag_off != htons(IP_DF); 1274 iph->frag_off != htons(IP_DF);
1275 id = ntohs(iph->id); 1275 id = ntohs(iph->id);
1276 1276
@@ -1298,8 +1298,8 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1298 } 1298 }
1299 1299
1300 NAPI_GRO_CB(skb)->flush |= flush; 1300 NAPI_GRO_CB(skb)->flush |= flush;
1301 __skb_pull(skb, sizeof(*iph)); 1301 skb_gro_pull(skb, sizeof(*iph));
1302 skb_reset_transport_header(skb); 1302 skb_set_transport_header(skb, skb_gro_offset(skb));
1303 1303
1304 pp = ops->gro_receive(head, skb); 1304 pp = ops->gro_receive(head, skb);
1305 1305
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0cd71b84e483..1cd608253940 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2481,19 +2481,19 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2481 unsigned int mss = 1; 2481 unsigned int mss = 1;
2482 int flush = 1; 2482 int flush = 1;
2483 2483
2484 if (!pskb_may_pull(skb, sizeof(*th))) 2484 th = skb_gro_header(skb, sizeof(*th));
2485 if (unlikely(!th))
2485 goto out; 2486 goto out;
2486 2487
2487 th = tcp_hdr(skb);
2488 thlen = th->doff * 4; 2488 thlen = th->doff * 4;
2489 if (thlen < sizeof(*th)) 2489 if (thlen < sizeof(*th))
2490 goto out; 2490 goto out;
2491 2491
2492 if (!pskb_may_pull(skb, thlen)) 2492 th = skb_gro_header(skb, thlen);
2493 if (unlikely(!th))
2493 goto out; 2494 goto out;
2494 2495
2495 th = tcp_hdr(skb); 2496 skb_gro_pull(skb, thlen);
2496 __skb_pull(skb, thlen);
2497 2497
2498 flags = tcp_flag_word(th); 2498 flags = tcp_flag_word(th);
2499 2499
@@ -2521,10 +2521,10 @@ found:
2521 flush |= th->ack_seq != th2->ack_seq || th->window != th2->window; 2521 flush |= th->ack_seq != th2->ack_seq || th->window != th2->window;
2522 flush |= memcmp(th + 1, th2 + 1, thlen - sizeof(*th)); 2522 flush |= memcmp(th + 1, th2 + 1, thlen - sizeof(*th));
2523 2523
2524 total = p->len; 2524 total = skb_gro_len(p);
2525 mss = skb_shinfo(p)->gso_size; 2525 mss = skb_shinfo(p)->gso_size;
2526 2526
2527 flush |= skb->len > mss || skb->len <= 0; 2527 flush |= skb_gro_len(skb) > mss || !skb_gro_len(skb);
2528 flush |= ntohl(th2->seq) + total != ntohl(th->seq); 2528 flush |= ntohl(th2->seq) + total != ntohl(th->seq);
2529 2529
2530 if (flush || skb_gro_receive(head, skb)) { 2530 if (flush || skb_gro_receive(head, skb)) {
@@ -2537,7 +2537,7 @@ found:
2537 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); 2537 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
2538 2538
2539out_check_final: 2539out_check_final:
2540 flush = skb->len < mss; 2540 flush = skb_gro_len(skb) < mss;
2541 flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST | 2541 flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST |
2542 TCP_FLAG_SYN | TCP_FLAG_FIN); 2542 TCP_FLAG_SYN | TCP_FLAG_FIN);
2543 2543
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 19d7b429a262..f6b962f56ab4 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2355,7 +2355,7 @@ struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2355 2355
2356 switch (skb->ip_summed) { 2356 switch (skb->ip_summed) {
2357 case CHECKSUM_COMPLETE: 2357 case CHECKSUM_COMPLETE:
2358 if (!tcp_v4_check(skb->len, iph->saddr, iph->daddr, 2358 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2359 skb->csum)) { 2359 skb->csum)) {
2360 skb->ip_summed = CHECKSUM_UNNECESSARY; 2360 skb->ip_summed = CHECKSUM_UNNECESSARY;
2361 break; 2361 break;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index c802bc1658a8..bd91eadcbe3f 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -799,24 +799,34 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
799 int proto; 799 int proto;
800 __wsum csum; 800 __wsum csum;
801 801
802 if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) 802 iph = skb_gro_header(skb, sizeof(*iph));
803 if (unlikely(!iph))
803 goto out; 804 goto out;
804 805
805 iph = ipv6_hdr(skb); 806 skb_gro_pull(skb, sizeof(*iph));
806 __skb_pull(skb, sizeof(*iph)); 807 skb_set_transport_header(skb, skb_gro_offset(skb));
807 808
808 flush += ntohs(iph->payload_len) != skb->len; 809 flush += ntohs(iph->payload_len) != skb_gro_len(skb);
809 810
810 rcu_read_lock(); 811 rcu_read_lock();
811 proto = ipv6_gso_pull_exthdrs(skb, iph->nexthdr); 812 proto = iph->nexthdr;
812 iph = ipv6_hdr(skb);
813 IPV6_GRO_CB(skb)->proto = proto;
814 ops = rcu_dereference(inet6_protos[proto]); 813 ops = rcu_dereference(inet6_protos[proto]);
815 if (!ops || !ops->gro_receive) 814 if (!ops || !ops->gro_receive) {
816 goto out_unlock; 815 __pskb_pull(skb, skb_gro_offset(skb));
816 proto = ipv6_gso_pull_exthdrs(skb, proto);
817 skb_gro_pull(skb, -skb_transport_offset(skb));
818 skb_reset_transport_header(skb);
819 __skb_push(skb, skb_gro_offset(skb));
820
821 if (!ops || !ops->gro_receive)
822 goto out_unlock;
823
824 iph = ipv6_hdr(skb);
825 }
826
827 IPV6_GRO_CB(skb)->proto = proto;
817 828
818 flush--; 829 flush--;
819 skb_reset_transport_header(skb);
820 nlen = skb_network_header_len(skb); 830 nlen = skb_network_header_len(skb);
821 831
822 for (p = *head; p; p = p->next) { 832 for (p = *head; p; p = p->next) {
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index e5b85d45bee8..00f1269e11e9 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -948,7 +948,7 @@ struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb)
948 948
949 switch (skb->ip_summed) { 949 switch (skb->ip_summed) {
950 case CHECKSUM_COMPLETE: 950 case CHECKSUM_COMPLETE:
951 if (!tcp_v6_check(skb->len, &iph->saddr, &iph->daddr, 951 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
952 skb->csum)) { 952 skb->csum)) {
953 skb->ip_summed = CHECKSUM_UNNECESSARY; 953 skb->ip_summed = CHECKSUM_UNNECESSARY;
954 break; 954 break;