summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJiri Pirko <jiri@mellanox.com>2018-11-13 17:22:48 -0500
committerDavid S. Miller <davem@davemloft.net>2018-11-16 22:51:08 -0500
commit32764c66faba8fff950346776eb46801b67c610f (patch)
tree47026733b2c5c8b35ece9a4dfb195f4103ad2683
parent99310e732a75c40fc9843f52b306fc9943bcce9d (diff)
net: 8021q: move vlan offload registrations into vlan_core
Currently, the vlan packet offloads are registered only upon 8021q module load. However, even without this module loaded, the offloads could be utilized, for example by openvswitch datapath. As reported by Michael, that causes 2x to 5x performance improvement, depending on a testcase. So move the vlan offload registrations into vlan_core and make this available even without 8021q module loaded. Reported-by: Michael Shteinbok <michaelsh86@gmail.com> Signed-off-by: Jiri Pirko <jiri@mellanox.com> Tested-by: Michael Shteinbok <michaelsh86@gmail.com> Reviewed-by: David Ahern <dsahern@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/8021q/vlan.c96
-rw-r--r--net/8021q/vlan_core.c99
2 files changed, 99 insertions, 96 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 1b7a375c6616..aef1a977279c 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -648,93 +648,6 @@ out:
648 return err; 648 return err;
649} 649}
650 650
651static struct sk_buff *vlan_gro_receive(struct list_head *head,
652 struct sk_buff *skb)
653{
654 const struct packet_offload *ptype;
655 unsigned int hlen, off_vlan;
656 struct sk_buff *pp = NULL;
657 struct vlan_hdr *vhdr;
658 struct sk_buff *p;
659 __be16 type;
660 int flush = 1;
661
662 off_vlan = skb_gro_offset(skb);
663 hlen = off_vlan + sizeof(*vhdr);
664 vhdr = skb_gro_header_fast(skb, off_vlan);
665 if (skb_gro_header_hard(skb, hlen)) {
666 vhdr = skb_gro_header_slow(skb, hlen, off_vlan);
667 if (unlikely(!vhdr))
668 goto out;
669 }
670
671 type = vhdr->h_vlan_encapsulated_proto;
672
673 rcu_read_lock();
674 ptype = gro_find_receive_by_type(type);
675 if (!ptype)
676 goto out_unlock;
677
678 flush = 0;
679
680 list_for_each_entry(p, head, list) {
681 struct vlan_hdr *vhdr2;
682
683 if (!NAPI_GRO_CB(p)->same_flow)
684 continue;
685
686 vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
687 if (compare_vlan_header(vhdr, vhdr2))
688 NAPI_GRO_CB(p)->same_flow = 0;
689 }
690
691 skb_gro_pull(skb, sizeof(*vhdr));
692 skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
693 pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
694
695out_unlock:
696 rcu_read_unlock();
697out:
698 skb_gro_flush_final(skb, pp, flush);
699
700 return pp;
701}
702
703static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
704{
705 struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
706 __be16 type = vhdr->h_vlan_encapsulated_proto;
707 struct packet_offload *ptype;
708 int err = -ENOENT;
709
710 rcu_read_lock();
711 ptype = gro_find_complete_by_type(type);
712 if (ptype)
713 err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(*vhdr));
714
715 rcu_read_unlock();
716 return err;
717}
718
719static struct packet_offload vlan_packet_offloads[] __read_mostly = {
720 {
721 .type = cpu_to_be16(ETH_P_8021Q),
722 .priority = 10,
723 .callbacks = {
724 .gro_receive = vlan_gro_receive,
725 .gro_complete = vlan_gro_complete,
726 },
727 },
728 {
729 .type = cpu_to_be16(ETH_P_8021AD),
730 .priority = 10,
731 .callbacks = {
732 .gro_receive = vlan_gro_receive,
733 .gro_complete = vlan_gro_complete,
734 },
735 },
736};
737
738static int __net_init vlan_init_net(struct net *net) 651static int __net_init vlan_init_net(struct net *net)
739{ 652{
740 struct vlan_net *vn = net_generic(net, vlan_net_id); 653 struct vlan_net *vn = net_generic(net, vlan_net_id);
@@ -762,7 +675,6 @@ static struct pernet_operations vlan_net_ops = {
762static int __init vlan_proto_init(void) 675static int __init vlan_proto_init(void)
763{ 676{
764 int err; 677 int err;
765 unsigned int i;
766 678
767 pr_info("%s v%s\n", vlan_fullname, vlan_version); 679 pr_info("%s v%s\n", vlan_fullname, vlan_version);
768 680
@@ -786,9 +698,6 @@ static int __init vlan_proto_init(void)
786 if (err < 0) 698 if (err < 0)
787 goto err5; 699 goto err5;
788 700
789 for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
790 dev_add_offload(&vlan_packet_offloads[i]);
791
792 vlan_ioctl_set(vlan_ioctl_handler); 701 vlan_ioctl_set(vlan_ioctl_handler);
793 return 0; 702 return 0;
794 703
@@ -806,13 +715,8 @@ err0:
806 715
807static void __exit vlan_cleanup_module(void) 716static void __exit vlan_cleanup_module(void)
808{ 717{
809 unsigned int i;
810
811 vlan_ioctl_set(NULL); 718 vlan_ioctl_set(NULL);
812 719
813 for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
814 dev_remove_offload(&vlan_packet_offloads[i]);
815
816 vlan_netlink_fini(); 720 vlan_netlink_fini();
817 721
818 unregister_netdevice_notifier(&vlan_notifier_block); 722 unregister_netdevice_notifier(&vlan_notifier_block);
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 57425049faf2..a313165e7a67 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -453,3 +453,102 @@ bool vlan_uses_dev(const struct net_device *dev)
453 return vlan_info->grp.nr_vlan_devs ? true : false; 453 return vlan_info->grp.nr_vlan_devs ? true : false;
454} 454}
455EXPORT_SYMBOL(vlan_uses_dev); 455EXPORT_SYMBOL(vlan_uses_dev);
456
457static struct sk_buff *vlan_gro_receive(struct list_head *head,
458 struct sk_buff *skb)
459{
460 const struct packet_offload *ptype;
461 unsigned int hlen, off_vlan;
462 struct sk_buff *pp = NULL;
463 struct vlan_hdr *vhdr;
464 struct sk_buff *p;
465 __be16 type;
466 int flush = 1;
467
468 off_vlan = skb_gro_offset(skb);
469 hlen = off_vlan + sizeof(*vhdr);
470 vhdr = skb_gro_header_fast(skb, off_vlan);
471 if (skb_gro_header_hard(skb, hlen)) {
472 vhdr = skb_gro_header_slow(skb, hlen, off_vlan);
473 if (unlikely(!vhdr))
474 goto out;
475 }
476
477 type = vhdr->h_vlan_encapsulated_proto;
478
479 rcu_read_lock();
480 ptype = gro_find_receive_by_type(type);
481 if (!ptype)
482 goto out_unlock;
483
484 flush = 0;
485
486 list_for_each_entry(p, head, list) {
487 struct vlan_hdr *vhdr2;
488
489 if (!NAPI_GRO_CB(p)->same_flow)
490 continue;
491
492 vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
493 if (compare_vlan_header(vhdr, vhdr2))
494 NAPI_GRO_CB(p)->same_flow = 0;
495 }
496
497 skb_gro_pull(skb, sizeof(*vhdr));
498 skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
499 pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
500
501out_unlock:
502 rcu_read_unlock();
503out:
504 skb_gro_flush_final(skb, pp, flush);
505
506 return pp;
507}
508
509static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
510{
511 struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
512 __be16 type = vhdr->h_vlan_encapsulated_proto;
513 struct packet_offload *ptype;
514 int err = -ENOENT;
515
516 rcu_read_lock();
517 ptype = gro_find_complete_by_type(type);
518 if (ptype)
519 err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(*vhdr));
520
521 rcu_read_unlock();
522 return err;
523}
524
525static struct packet_offload vlan_packet_offloads[] __read_mostly = {
526 {
527 .type = cpu_to_be16(ETH_P_8021Q),
528 .priority = 10,
529 .callbacks = {
530 .gro_receive = vlan_gro_receive,
531 .gro_complete = vlan_gro_complete,
532 },
533 },
534 {
535 .type = cpu_to_be16(ETH_P_8021AD),
536 .priority = 10,
537 .callbacks = {
538 .gro_receive = vlan_gro_receive,
539 .gro_complete = vlan_gro_complete,
540 },
541 },
542};
543
544static int __init vlan_offload_init(void)
545{
546 unsigned int i;
547
548 for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
549 dev_add_offload(&vlan_packet_offloads[i]);
550
551 return 0;
552}
553
554fs_initcall(vlan_offload_init);