diff options
Diffstat (limited to 'net/8021q')
-rw-r--r-- | net/8021q/vlan.c | 97 | ||||
-rw-r--r-- | net/8021q/vlan.h | 21 | ||||
-rw-r--r-- | net/8021q/vlan_core.c | 43 | ||||
-rw-r--r-- | net/8021q/vlan_dev.c | 153 | ||||
-rw-r--r-- | net/8021q/vlan_netlink.c | 7 | ||||
-rw-r--r-- | net/8021q/vlanproc.c | 15 |
6 files changed, 223 insertions, 113 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index a29c5ab5815c..97da977c2a23 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/netdevice.h> | 23 | #include <linux/netdevice.h> |
24 | #include <linux/skbuff.h> | 24 | #include <linux/skbuff.h> |
25 | #include <linux/slab.h> | ||
25 | #include <linux/init.h> | 26 | #include <linux/init.h> |
26 | #include <linux/rculist.h> | 27 | #include <linux/rculist.h> |
27 | #include <net/p8022.h> | 28 | #include <net/p8022.h> |
@@ -41,7 +42,7 @@ | |||
41 | 42 | ||
42 | /* Global VLAN variables */ | 43 | /* Global VLAN variables */ |
43 | 44 | ||
44 | int vlan_net_id; | 45 | int vlan_net_id __read_mostly; |
45 | 46 | ||
46 | /* Our listing of VLAN group(s) */ | 47 | /* Our listing of VLAN group(s) */ |
47 | static struct hlist_head vlan_group_hash[VLAN_GRP_HASH_SIZE]; | 48 | static struct hlist_head vlan_group_hash[VLAN_GRP_HASH_SIZE]; |
@@ -140,7 +141,7 @@ static void vlan_rcu_free(struct rcu_head *rcu) | |||
140 | vlan_group_free(container_of(rcu, struct vlan_group, rcu)); | 141 | vlan_group_free(container_of(rcu, struct vlan_group, rcu)); |
141 | } | 142 | } |
142 | 143 | ||
143 | void unregister_vlan_dev(struct net_device *dev) | 144 | void unregister_vlan_dev(struct net_device *dev, struct list_head *head) |
144 | { | 145 | { |
145 | struct vlan_dev_info *vlan = vlan_dev_info(dev); | 146 | struct vlan_dev_info *vlan = vlan_dev_info(dev); |
146 | struct net_device *real_dev = vlan->real_dev; | 147 | struct net_device *real_dev = vlan->real_dev; |
@@ -159,12 +160,13 @@ void unregister_vlan_dev(struct net_device *dev) | |||
159 | if (real_dev->features & NETIF_F_HW_VLAN_FILTER) | 160 | if (real_dev->features & NETIF_F_HW_VLAN_FILTER) |
160 | ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id); | 161 | ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id); |
161 | 162 | ||
162 | vlan_group_set_device(grp, vlan_id, NULL); | ||
163 | grp->nr_vlans--; | 163 | grp->nr_vlans--; |
164 | 164 | ||
165 | synchronize_net(); | 165 | vlan_group_set_device(grp, vlan_id, NULL); |
166 | if (!grp->killall) | ||
167 | synchronize_net(); | ||
166 | 168 | ||
167 | unregister_netdevice(dev); | 169 | unregister_netdevice_queue(dev, head); |
168 | 170 | ||
169 | /* If the group is now empty, kill off the group. */ | 171 | /* If the group is now empty, kill off the group. */ |
170 | if (grp->nr_vlans == 0) { | 172 | if (grp->nr_vlans == 0) { |
@@ -183,27 +185,6 @@ void unregister_vlan_dev(struct net_device *dev) | |||
183 | dev_put(real_dev); | 185 | dev_put(real_dev); |
184 | } | 186 | } |
185 | 187 | ||
186 | static void vlan_transfer_operstate(const struct net_device *dev, | ||
187 | struct net_device *vlandev) | ||
188 | { | ||
189 | /* Have to respect userspace enforced dormant state | ||
190 | * of real device, also must allow supplicant running | ||
191 | * on VLAN device | ||
192 | */ | ||
193 | if (dev->operstate == IF_OPER_DORMANT) | ||
194 | netif_dormant_on(vlandev); | ||
195 | else | ||
196 | netif_dormant_off(vlandev); | ||
197 | |||
198 | if (netif_carrier_ok(dev)) { | ||
199 | if (!netif_carrier_ok(vlandev)) | ||
200 | netif_carrier_on(vlandev); | ||
201 | } else { | ||
202 | if (netif_carrier_ok(vlandev)) | ||
203 | netif_carrier_off(vlandev); | ||
204 | } | ||
205 | } | ||
206 | |||
207 | int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id) | 188 | int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id) |
208 | { | 189 | { |
209 | const char *name = real_dev->name; | 190 | const char *name = real_dev->name; |
@@ -261,7 +242,7 @@ int register_vlan_dev(struct net_device *dev) | |||
261 | /* Account for reference in struct vlan_dev_info */ | 242 | /* Account for reference in struct vlan_dev_info */ |
262 | dev_hold(real_dev); | 243 | dev_hold(real_dev); |
263 | 244 | ||
264 | vlan_transfer_operstate(real_dev, dev); | 245 | netif_stacked_transfer_operstate(real_dev, dev); |
265 | linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */ | 246 | linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */ |
266 | 247 | ||
267 | /* So, got the sucker initialized, now lets place | 248 | /* So, got the sucker initialized, now lets place |
@@ -398,6 +379,8 @@ static void vlan_transfer_features(struct net_device *dev, | |||
398 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | 379 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) |
399 | vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; | 380 | vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; |
400 | #endif | 381 | #endif |
382 | vlandev->real_num_tx_queues = dev->real_num_tx_queues; | ||
383 | BUG_ON(vlandev->real_num_tx_queues > vlandev->num_tx_queues); | ||
401 | 384 | ||
402 | if (old_features != vlandev->features) | 385 | if (old_features != vlandev->features) |
403 | netdev_features_change(vlandev); | 386 | netdev_features_change(vlandev); |
@@ -430,6 +413,8 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
430 | struct vlan_group *grp; | 413 | struct vlan_group *grp; |
431 | int i, flgs; | 414 | int i, flgs; |
432 | struct net_device *vlandev; | 415 | struct net_device *vlandev; |
416 | struct vlan_dev_info *vlan; | ||
417 | LIST_HEAD(list); | ||
433 | 418 | ||
434 | if (is_vlan_dev(dev)) | 419 | if (is_vlan_dev(dev)) |
435 | __vlan_device_event(dev, event); | 420 | __vlan_device_event(dev, event); |
@@ -450,7 +435,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
450 | if (!vlandev) | 435 | if (!vlandev) |
451 | continue; | 436 | continue; |
452 | 437 | ||
453 | vlan_transfer_operstate(dev, vlandev); | 438 | netif_stacked_transfer_operstate(dev, vlandev); |
454 | } | 439 | } |
455 | break; | 440 | break; |
456 | 441 | ||
@@ -505,8 +490,10 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
505 | if (!(flgs & IFF_UP)) | 490 | if (!(flgs & IFF_UP)) |
506 | continue; | 491 | continue; |
507 | 492 | ||
508 | dev_change_flags(vlandev, flgs & ~IFF_UP); | 493 | vlan = vlan_dev_info(vlandev); |
509 | vlan_transfer_operstate(dev, vlandev); | 494 | if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) |
495 | dev_change_flags(vlandev, flgs & ~IFF_UP); | ||
496 | netif_stacked_transfer_operstate(dev, vlandev); | ||
510 | } | 497 | } |
511 | break; | 498 | break; |
512 | 499 | ||
@@ -521,13 +508,17 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
521 | if (flgs & IFF_UP) | 508 | if (flgs & IFF_UP) |
522 | continue; | 509 | continue; |
523 | 510 | ||
524 | dev_change_flags(vlandev, flgs | IFF_UP); | 511 | vlan = vlan_dev_info(vlandev); |
525 | vlan_transfer_operstate(dev, vlandev); | 512 | if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) |
513 | dev_change_flags(vlandev, flgs | IFF_UP); | ||
514 | netif_stacked_transfer_operstate(dev, vlandev); | ||
526 | } | 515 | } |
527 | break; | 516 | break; |
528 | 517 | ||
529 | case NETDEV_UNREGISTER: | 518 | case NETDEV_UNREGISTER: |
530 | /* Delete all VLANs for this dev. */ | 519 | /* Delete all VLANs for this dev. */ |
520 | grp->killall = 1; | ||
521 | |||
531 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { | 522 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { |
532 | vlandev = vlan_group_get_device(grp, i); | 523 | vlandev = vlan_group_get_device(grp, i); |
533 | if (!vlandev) | 524 | if (!vlandev) |
@@ -538,8 +529,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
538 | if (grp->nr_vlans == 1) | 529 | if (grp->nr_vlans == 1) |
539 | i = VLAN_GROUP_ARRAY_LEN; | 530 | i = VLAN_GROUP_ARRAY_LEN; |
540 | 531 | ||
541 | unregister_vlan_dev(vlandev); | 532 | unregister_vlan_dev(vlandev, &list); |
542 | } | 533 | } |
534 | unregister_netdevice_many(&list); | ||
543 | break; | 535 | break; |
544 | } | 536 | } |
545 | 537 | ||
@@ -645,7 +637,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg) | |||
645 | err = -EPERM; | 637 | err = -EPERM; |
646 | if (!capable(CAP_NET_ADMIN)) | 638 | if (!capable(CAP_NET_ADMIN)) |
647 | break; | 639 | break; |
648 | unregister_vlan_dev(dev); | 640 | unregister_vlan_dev(dev, NULL); |
649 | err = 0; | 641 | err = 0; |
650 | break; | 642 | break; |
651 | 643 | ||
@@ -674,49 +666,28 @@ out: | |||
674 | return err; | 666 | return err; |
675 | } | 667 | } |
676 | 668 | ||
677 | static int vlan_init_net(struct net *net) | 669 | static int __net_init vlan_init_net(struct net *net) |
678 | { | 670 | { |
671 | struct vlan_net *vn = net_generic(net, vlan_net_id); | ||
679 | int err; | 672 | int err; |
680 | struct vlan_net *vn; | ||
681 | |||
682 | err = -ENOMEM; | ||
683 | vn = kzalloc(sizeof(struct vlan_net), GFP_KERNEL); | ||
684 | if (vn == NULL) | ||
685 | goto err_alloc; | ||
686 | |||
687 | err = net_assign_generic(net, vlan_net_id, vn); | ||
688 | if (err < 0) | ||
689 | goto err_assign; | ||
690 | 673 | ||
691 | vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD; | 674 | vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD; |
692 | 675 | ||
693 | err = vlan_proc_init(net); | 676 | err = vlan_proc_init(net); |
694 | if (err < 0) | ||
695 | goto err_proc; | ||
696 | |||
697 | return 0; | ||
698 | 677 | ||
699 | err_proc: | ||
700 | /* nothing */ | ||
701 | err_assign: | ||
702 | kfree(vn); | ||
703 | err_alloc: | ||
704 | return err; | 678 | return err; |
705 | } | 679 | } |
706 | 680 | ||
707 | static void vlan_exit_net(struct net *net) | 681 | static void __net_exit vlan_exit_net(struct net *net) |
708 | { | 682 | { |
709 | struct vlan_net *vn; | ||
710 | |||
711 | vn = net_generic(net, vlan_net_id); | ||
712 | rtnl_kill_links(net, &vlan_link_ops); | ||
713 | vlan_proc_cleanup(net); | 683 | vlan_proc_cleanup(net); |
714 | kfree(vn); | ||
715 | } | 684 | } |
716 | 685 | ||
717 | static struct pernet_operations vlan_net_ops = { | 686 | static struct pernet_operations vlan_net_ops = { |
718 | .init = vlan_init_net, | 687 | .init = vlan_init_net, |
719 | .exit = vlan_exit_net, | 688 | .exit = vlan_exit_net, |
689 | .id = &vlan_net_id, | ||
690 | .size = sizeof(struct vlan_net), | ||
720 | }; | 691 | }; |
721 | 692 | ||
722 | static int __init vlan_proto_init(void) | 693 | static int __init vlan_proto_init(void) |
@@ -726,7 +697,7 @@ static int __init vlan_proto_init(void) | |||
726 | pr_info("%s v%s %s\n", vlan_fullname, vlan_version, vlan_copyright); | 697 | pr_info("%s v%s %s\n", vlan_fullname, vlan_version, vlan_copyright); |
727 | pr_info("All bugs added by %s\n", vlan_buggyright); | 698 | pr_info("All bugs added by %s\n", vlan_buggyright); |
728 | 699 | ||
729 | err = register_pernet_gen_device(&vlan_net_id, &vlan_net_ops); | 700 | err = register_pernet_subsys(&vlan_net_ops); |
730 | if (err < 0) | 701 | if (err < 0) |
731 | goto err0; | 702 | goto err0; |
732 | 703 | ||
@@ -751,7 +722,7 @@ err4: | |||
751 | err3: | 722 | err3: |
752 | unregister_netdevice_notifier(&vlan_notifier_block); | 723 | unregister_netdevice_notifier(&vlan_notifier_block); |
753 | err2: | 724 | err2: |
754 | unregister_pernet_gen_device(vlan_net_id, &vlan_net_ops); | 725 | unregister_pernet_subsys(&vlan_net_ops); |
755 | err0: | 726 | err0: |
756 | return err; | 727 | return err; |
757 | } | 728 | } |
@@ -771,7 +742,7 @@ static void __exit vlan_cleanup_module(void) | |||
771 | for (i = 0; i < VLAN_GRP_HASH_SIZE; i++) | 742 | for (i = 0; i < VLAN_GRP_HASH_SIZE; i++) |
772 | BUG_ON(!hlist_empty(&vlan_group_hash[i])); | 743 | BUG_ON(!hlist_empty(&vlan_group_hash[i])); |
773 | 744 | ||
774 | unregister_pernet_gen_device(vlan_net_id, &vlan_net_ops); | 745 | unregister_pernet_subsys(&vlan_net_ops); |
775 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ | 746 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ |
776 | 747 | ||
777 | vlan_gvrp_uninit(); | 748 | vlan_gvrp_uninit(); |
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h index 82570bc2a180..6abdcac1b2e8 100644 --- a/net/8021q/vlan.h +++ b/net/8021q/vlan.h | |||
@@ -16,6 +16,21 @@ struct vlan_priority_tci_mapping { | |||
16 | struct vlan_priority_tci_mapping *next; | 16 | struct vlan_priority_tci_mapping *next; |
17 | }; | 17 | }; |
18 | 18 | ||
19 | |||
20 | /** | ||
21 | * struct vlan_rx_stats - VLAN percpu rx stats | ||
22 | * @rx_packets: number of received packets | ||
23 | * @rx_bytes: number of received bytes | ||
24 | * @multicast: number of received multicast packets | ||
25 | * @rx_errors: number of errors | ||
26 | */ | ||
27 | struct vlan_rx_stats { | ||
28 | unsigned long rx_packets; | ||
29 | unsigned long rx_bytes; | ||
30 | unsigned long multicast; | ||
31 | unsigned long rx_errors; | ||
32 | }; | ||
33 | |||
19 | /** | 34 | /** |
20 | * struct vlan_dev_info - VLAN private device data | 35 | * struct vlan_dev_info - VLAN private device data |
21 | * @nr_ingress_mappings: number of ingress priority mappings | 36 | * @nr_ingress_mappings: number of ingress priority mappings |
@@ -29,6 +44,7 @@ struct vlan_priority_tci_mapping { | |||
29 | * @dent: proc dir entry | 44 | * @dent: proc dir entry |
30 | * @cnt_inc_headroom_on_tx: statistic - number of skb expansions on TX | 45 | * @cnt_inc_headroom_on_tx: statistic - number of skb expansions on TX |
31 | * @cnt_encap_on_xmit: statistic - number of skb encapsulations on TX | 46 | * @cnt_encap_on_xmit: statistic - number of skb encapsulations on TX |
47 | * @vlan_rx_stats: ptr to percpu rx stats | ||
32 | */ | 48 | */ |
33 | struct vlan_dev_info { | 49 | struct vlan_dev_info { |
34 | unsigned int nr_ingress_mappings; | 50 | unsigned int nr_ingress_mappings; |
@@ -45,6 +61,7 @@ struct vlan_dev_info { | |||
45 | struct proc_dir_entry *dent; | 61 | struct proc_dir_entry *dent; |
46 | unsigned long cnt_inc_headroom_on_tx; | 62 | unsigned long cnt_inc_headroom_on_tx; |
47 | unsigned long cnt_encap_on_xmit; | 63 | unsigned long cnt_encap_on_xmit; |
64 | struct vlan_rx_stats __percpu *vlan_rx_stats; | ||
48 | }; | 65 | }; |
49 | 66 | ||
50 | static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev) | 67 | static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev) |
@@ -82,14 +99,14 @@ void vlan_dev_get_realdev_name(const struct net_device *dev, char *result); | |||
82 | int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id); | 99 | int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id); |
83 | void vlan_setup(struct net_device *dev); | 100 | void vlan_setup(struct net_device *dev); |
84 | int register_vlan_dev(struct net_device *dev); | 101 | int register_vlan_dev(struct net_device *dev); |
85 | void unregister_vlan_dev(struct net_device *dev); | 102 | void unregister_vlan_dev(struct net_device *dev, struct list_head *head); |
86 | 103 | ||
87 | static inline u32 vlan_get_ingress_priority(struct net_device *dev, | 104 | static inline u32 vlan_get_ingress_priority(struct net_device *dev, |
88 | u16 vlan_tci) | 105 | u16 vlan_tci) |
89 | { | 106 | { |
90 | struct vlan_dev_info *vip = vlan_dev_info(dev); | 107 | struct vlan_dev_info *vip = vlan_dev_info(dev); |
91 | 108 | ||
92 | return vip->ingress_priority_map[(vlan_tci >> 13) & 0x7]; | 109 | return vip->ingress_priority_map[(vlan_tci >> VLAN_PRIO_SHIFT) & 0x7]; |
93 | } | 110 | } |
94 | 111 | ||
95 | #ifdef CONFIG_VLAN_8021Q_GVRP | 112 | #ifdef CONFIG_VLAN_8021Q_GVRP |
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 7f7de1a04de6..c584a0af77d3 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -11,10 +11,11 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, | |||
11 | if (netpoll_rx(skb)) | 11 | if (netpoll_rx(skb)) |
12 | return NET_RX_DROP; | 12 | return NET_RX_DROP; |
13 | 13 | ||
14 | if (skb_bond_should_drop(skb)) | 14 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) |
15 | goto drop; | 15 | goto drop; |
16 | 16 | ||
17 | skb->vlan_tci = vlan_tci; | 17 | skb->skb_iif = skb->dev->ifindex; |
18 | __vlan_hwaccel_put_tag(skb, vlan_tci); | ||
18 | skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); | 19 | skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); |
19 | 20 | ||
20 | if (!skb->dev) | 21 | if (!skb->dev) |
@@ -31,7 +32,7 @@ EXPORT_SYMBOL(__vlan_hwaccel_rx); | |||
31 | int vlan_hwaccel_do_receive(struct sk_buff *skb) | 32 | int vlan_hwaccel_do_receive(struct sk_buff *skb) |
32 | { | 33 | { |
33 | struct net_device *dev = skb->dev; | 34 | struct net_device *dev = skb->dev; |
34 | struct net_device_stats *stats; | 35 | struct vlan_rx_stats *rx_stats; |
35 | 36 | ||
36 | skb->dev = vlan_dev_info(dev)->real_dev; | 37 | skb->dev = vlan_dev_info(dev)->real_dev; |
37 | netif_nit_deliver(skb); | 38 | netif_nit_deliver(skb); |
@@ -40,15 +41,17 @@ int vlan_hwaccel_do_receive(struct sk_buff *skb) | |||
40 | skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); | 41 | skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); |
41 | skb->vlan_tci = 0; | 42 | skb->vlan_tci = 0; |
42 | 43 | ||
43 | stats = &dev->stats; | 44 | rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, |
44 | stats->rx_packets++; | 45 | smp_processor_id()); |
45 | stats->rx_bytes += skb->len; | 46 | |
47 | rx_stats->rx_packets++; | ||
48 | rx_stats->rx_bytes += skb->len; | ||
46 | 49 | ||
47 | switch (skb->pkt_type) { | 50 | switch (skb->pkt_type) { |
48 | case PACKET_BROADCAST: | 51 | case PACKET_BROADCAST: |
49 | break; | 52 | break; |
50 | case PACKET_MULTICAST: | 53 | case PACKET_MULTICAST: |
51 | stats->multicast++; | 54 | rx_stats->multicast++; |
52 | break; | 55 | break; |
53 | case PACKET_OTHERHOST: | 56 | case PACKET_OTHERHOST: |
54 | /* Our lower layer thinks this is not local, let's make sure. | 57 | /* Our lower layer thinks this is not local, let's make sure. |
@@ -74,15 +77,17 @@ u16 vlan_dev_vlan_id(const struct net_device *dev) | |||
74 | } | 77 | } |
75 | EXPORT_SYMBOL(vlan_dev_vlan_id); | 78 | EXPORT_SYMBOL(vlan_dev_vlan_id); |
76 | 79 | ||
77 | static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, | 80 | static gro_result_t |
78 | unsigned int vlan_tci, struct sk_buff *skb) | 81 | vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, |
82 | unsigned int vlan_tci, struct sk_buff *skb) | ||
79 | { | 83 | { |
80 | struct sk_buff *p; | 84 | struct sk_buff *p; |
81 | 85 | ||
82 | if (skb_bond_should_drop(skb)) | 86 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) |
83 | goto drop; | 87 | goto drop; |
84 | 88 | ||
85 | skb->vlan_tci = vlan_tci; | 89 | skb->skb_iif = skb->dev->ifindex; |
90 | __vlan_hwaccel_put_tag(skb, vlan_tci); | ||
86 | skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); | 91 | skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); |
87 | 92 | ||
88 | if (!skb->dev) | 93 | if (!skb->dev) |
@@ -101,11 +106,12 @@ drop: | |||
101 | return GRO_DROP; | 106 | return GRO_DROP; |
102 | } | 107 | } |
103 | 108 | ||
104 | int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, | 109 | gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, |
105 | unsigned int vlan_tci, struct sk_buff *skb) | 110 | unsigned int vlan_tci, struct sk_buff *skb) |
106 | { | 111 | { |
107 | if (netpoll_rx_on(skb)) | 112 | if (netpoll_rx_on(skb)) |
108 | return vlan_hwaccel_receive_skb(skb, grp, vlan_tci); | 113 | return vlan_hwaccel_receive_skb(skb, grp, vlan_tci) |
114 | ? GRO_DROP : GRO_NORMAL; | ||
109 | 115 | ||
110 | skb_gro_reset_offset(skb); | 116 | skb_gro_reset_offset(skb); |
111 | 117 | ||
@@ -113,17 +119,18 @@ int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, | |||
113 | } | 119 | } |
114 | EXPORT_SYMBOL(vlan_gro_receive); | 120 | EXPORT_SYMBOL(vlan_gro_receive); |
115 | 121 | ||
116 | int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, | 122 | gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, |
117 | unsigned int vlan_tci) | 123 | unsigned int vlan_tci) |
118 | { | 124 | { |
119 | struct sk_buff *skb = napi_frags_skb(napi); | 125 | struct sk_buff *skb = napi_frags_skb(napi); |
120 | 126 | ||
121 | if (!skb) | 127 | if (!skb) |
122 | return NET_RX_DROP; | 128 | return GRO_DROP; |
123 | 129 | ||
124 | if (netpoll_rx_on(skb)) { | 130 | if (netpoll_rx_on(skb)) { |
125 | skb->protocol = eth_type_trans(skb, skb->dev); | 131 | skb->protocol = eth_type_trans(skb, skb->dev); |
126 | return vlan_hwaccel_receive_skb(skb, grp, vlan_tci); | 132 | return vlan_hwaccel_receive_skb(skb, grp, vlan_tci) |
133 | ? GRO_DROP : GRO_NORMAL; | ||
127 | } | 134 | } |
128 | 135 | ||
129 | return napi_frags_finish(napi, skb, | 136 | return napi_frags_finish(napi, skb, |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 4198ec5c8abc..29b6348c8d4d 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -21,6 +21,7 @@ | |||
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/slab.h> | ||
24 | #include <linux/skbuff.h> | 25 | #include <linux/skbuff.h> |
25 | #include <linux/netdevice.h> | 26 | #include <linux/netdevice.h> |
26 | #include <linux/etherdevice.h> | 27 | #include <linux/etherdevice.h> |
@@ -140,7 +141,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
140 | struct packet_type *ptype, struct net_device *orig_dev) | 141 | struct packet_type *ptype, struct net_device *orig_dev) |
141 | { | 142 | { |
142 | struct vlan_hdr *vhdr; | 143 | struct vlan_hdr *vhdr; |
143 | struct net_device_stats *stats; | 144 | struct vlan_rx_stats *rx_stats; |
144 | u16 vlan_id; | 145 | u16 vlan_id; |
145 | u16 vlan_tci; | 146 | u16 vlan_tci; |
146 | 147 | ||
@@ -163,9 +164,10 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
163 | goto err_unlock; | 164 | goto err_unlock; |
164 | } | 165 | } |
165 | 166 | ||
166 | stats = &skb->dev->stats; | 167 | rx_stats = per_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats, |
167 | stats->rx_packets++; | 168 | smp_processor_id()); |
168 | stats->rx_bytes += skb->len; | 169 | rx_stats->rx_packets++; |
170 | rx_stats->rx_bytes += skb->len; | ||
169 | 171 | ||
170 | skb_pull_rcsum(skb, VLAN_HLEN); | 172 | skb_pull_rcsum(skb, VLAN_HLEN); |
171 | 173 | ||
@@ -180,7 +182,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
180 | break; | 182 | break; |
181 | 183 | ||
182 | case PACKET_MULTICAST: | 184 | case PACKET_MULTICAST: |
183 | stats->multicast++; | 185 | rx_stats->multicast++; |
184 | break; | 186 | break; |
185 | 187 | ||
186 | case PACKET_OTHERHOST: | 188 | case PACKET_OTHERHOST: |
@@ -200,7 +202,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
200 | 202 | ||
201 | skb = vlan_check_reorder_header(skb); | 203 | skb = vlan_check_reorder_header(skb); |
202 | if (!skb) { | 204 | if (!skb) { |
203 | stats->rx_errors++; | 205 | rx_stats->rx_errors++; |
204 | goto err_unlock; | 206 | goto err_unlock; |
205 | } | 207 | } |
206 | 208 | ||
@@ -262,11 +264,10 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, | |||
262 | vhdr->h_vlan_TCI = htons(vlan_tci); | 264 | vhdr->h_vlan_TCI = htons(vlan_tci); |
263 | 265 | ||
264 | /* | 266 | /* |
265 | * Set the protocol type. For a packet of type ETH_P_802_3 we | 267 | * Set the protocol type. For a packet of type ETH_P_802_3/2 we |
266 | * put the length in here instead. It is up to the 802.2 | 268 | * put the length in here instead. |
267 | * layer to carry protocol information. | ||
268 | */ | 269 | */ |
269 | if (type != ETH_P_802_3) | 270 | if (type != ETH_P_802_3 && type != ETH_P_802_2) |
270 | vhdr->h_vlan_encapsulated_proto = htons(type); | 271 | vhdr->h_vlan_encapsulated_proto = htons(type); |
271 | else | 272 | else |
272 | vhdr->h_vlan_encapsulated_proto = htons(len); | 273 | vhdr->h_vlan_encapsulated_proto = htons(len); |
@@ -322,7 +323,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, | |||
322 | } | 323 | } |
323 | 324 | ||
324 | 325 | ||
325 | skb->dev = vlan_dev_info(dev)->real_dev; | 326 | skb_set_dev(skb, vlan_dev_info(dev)->real_dev); |
326 | len = skb->len; | 327 | len = skb->len; |
327 | ret = dev_queue_xmit(skb); | 328 | ret = dev_queue_xmit(skb); |
328 | 329 | ||
@@ -332,7 +333,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, | |||
332 | } else | 333 | } else |
333 | txq->tx_dropped++; | 334 | txq->tx_dropped++; |
334 | 335 | ||
335 | return NETDEV_TX_OK; | 336 | return ret; |
336 | } | 337 | } |
337 | 338 | ||
338 | static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, | 339 | static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, |
@@ -358,7 +359,15 @@ static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, | |||
358 | } else | 359 | } else |
359 | txq->tx_dropped++; | 360 | txq->tx_dropped++; |
360 | 361 | ||
361 | return NETDEV_TX_OK; | 362 | return ret; |
363 | } | ||
364 | |||
365 | static u16 vlan_dev_select_queue(struct net_device *dev, struct sk_buff *skb) | ||
366 | { | ||
367 | struct net_device *rdev = vlan_dev_info(dev)->real_dev; | ||
368 | const struct net_device_ops *ops = rdev->netdev_ops; | ||
369 | |||
370 | return ops->ndo_select_queue(rdev, skb); | ||
362 | } | 371 | } |
363 | 372 | ||
364 | static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) | 373 | static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) |
@@ -393,7 +402,7 @@ int vlan_dev_set_egress_priority(const struct net_device *dev, | |||
393 | struct vlan_dev_info *vlan = vlan_dev_info(dev); | 402 | struct vlan_dev_info *vlan = vlan_dev_info(dev); |
394 | struct vlan_priority_tci_mapping *mp = NULL; | 403 | struct vlan_priority_tci_mapping *mp = NULL; |
395 | struct vlan_priority_tci_mapping *np; | 404 | struct vlan_priority_tci_mapping *np; |
396 | u32 vlan_qos = (vlan_prio << 13) & 0xE000; | 405 | u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK; |
397 | 406 | ||
398 | /* See if a priority mapping exists.. */ | 407 | /* See if a priority mapping exists.. */ |
399 | mp = vlan->egress_priority_map[skb_prio & 0xF]; | 408 | mp = vlan->egress_priority_map[skb_prio & 0xF]; |
@@ -430,7 +439,8 @@ int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask) | |||
430 | struct vlan_dev_info *vlan = vlan_dev_info(dev); | 439 | struct vlan_dev_info *vlan = vlan_dev_info(dev); |
431 | u32 old_flags = vlan->flags; | 440 | u32 old_flags = vlan->flags; |
432 | 441 | ||
433 | if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP)) | 442 | if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP | |
443 | VLAN_FLAG_LOOSE_BINDING)) | ||
434 | return -EINVAL; | 444 | return -EINVAL; |
435 | 445 | ||
436 | vlan->flags = (old_flags & ~mask) | (flags & mask); | 446 | vlan->flags = (old_flags & ~mask) | (flags & mask); |
@@ -455,7 +465,8 @@ static int vlan_dev_open(struct net_device *dev) | |||
455 | struct net_device *real_dev = vlan->real_dev; | 465 | struct net_device *real_dev = vlan->real_dev; |
456 | int err; | 466 | int err; |
457 | 467 | ||
458 | if (!(real_dev->flags & IFF_UP)) | 468 | if (!(real_dev->flags & IFF_UP) && |
469 | !(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) | ||
459 | return -ENETDOWN; | 470 | return -ENETDOWN; |
460 | 471 | ||
461 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { | 472 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { |
@@ -626,6 +637,17 @@ static int vlan_dev_fcoe_disable(struct net_device *dev) | |||
626 | rc = ops->ndo_fcoe_disable(real_dev); | 637 | rc = ops->ndo_fcoe_disable(real_dev); |
627 | return rc; | 638 | return rc; |
628 | } | 639 | } |
640 | |||
641 | static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) | ||
642 | { | ||
643 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; | ||
644 | const struct net_device_ops *ops = real_dev->netdev_ops; | ||
645 | int rc = -EINVAL; | ||
646 | |||
647 | if (ops->ndo_fcoe_get_wwn) | ||
648 | rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type); | ||
649 | return rc; | ||
650 | } | ||
629 | #endif | 651 | #endif |
630 | 652 | ||
631 | static void vlan_dev_change_rx_flags(struct net_device *dev, int change) | 653 | static void vlan_dev_change_rx_flags(struct net_device *dev, int change) |
@@ -675,7 +697,8 @@ static const struct header_ops vlan_header_ops = { | |||
675 | .parse = eth_header_parse, | 697 | .parse = eth_header_parse, |
676 | }; | 698 | }; |
677 | 699 | ||
678 | static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops; | 700 | static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops, |
701 | vlan_netdev_ops_sq, vlan_netdev_accel_ops_sq; | ||
679 | 702 | ||
680 | static int vlan_dev_init(struct net_device *dev) | 703 | static int vlan_dev_init(struct net_device *dev) |
681 | { | 704 | { |
@@ -709,17 +732,28 @@ static int vlan_dev_init(struct net_device *dev) | |||
709 | if (real_dev->features & NETIF_F_HW_VLAN_TX) { | 732 | if (real_dev->features & NETIF_F_HW_VLAN_TX) { |
710 | dev->header_ops = real_dev->header_ops; | 733 | dev->header_ops = real_dev->header_ops; |
711 | dev->hard_header_len = real_dev->hard_header_len; | 734 | dev->hard_header_len = real_dev->hard_header_len; |
712 | dev->netdev_ops = &vlan_netdev_accel_ops; | 735 | if (real_dev->netdev_ops->ndo_select_queue) |
736 | dev->netdev_ops = &vlan_netdev_accel_ops_sq; | ||
737 | else | ||
738 | dev->netdev_ops = &vlan_netdev_accel_ops; | ||
713 | } else { | 739 | } else { |
714 | dev->header_ops = &vlan_header_ops; | 740 | dev->header_ops = &vlan_header_ops; |
715 | dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; | 741 | dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; |
716 | dev->netdev_ops = &vlan_netdev_ops; | 742 | if (real_dev->netdev_ops->ndo_select_queue) |
743 | dev->netdev_ops = &vlan_netdev_ops_sq; | ||
744 | else | ||
745 | dev->netdev_ops = &vlan_netdev_ops; | ||
717 | } | 746 | } |
718 | 747 | ||
719 | if (is_vlan_dev(real_dev)) | 748 | if (is_vlan_dev(real_dev)) |
720 | subclass = 1; | 749 | subclass = 1; |
721 | 750 | ||
722 | vlan_dev_set_lockdep_class(dev, subclass); | 751 | vlan_dev_set_lockdep_class(dev, subclass); |
752 | |||
753 | vlan_dev_info(dev)->vlan_rx_stats = alloc_percpu(struct vlan_rx_stats); | ||
754 | if (!vlan_dev_info(dev)->vlan_rx_stats) | ||
755 | return -ENOMEM; | ||
756 | |||
723 | return 0; | 757 | return 0; |
724 | } | 758 | } |
725 | 759 | ||
@@ -729,6 +763,8 @@ static void vlan_dev_uninit(struct net_device *dev) | |||
729 | struct vlan_dev_info *vlan = vlan_dev_info(dev); | 763 | struct vlan_dev_info *vlan = vlan_dev_info(dev); |
730 | int i; | 764 | int i; |
731 | 765 | ||
766 | free_percpu(vlan->vlan_rx_stats); | ||
767 | vlan->vlan_rx_stats = NULL; | ||
732 | for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { | 768 | for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { |
733 | while ((pm = vlan->egress_priority_map[i]) != NULL) { | 769 | while ((pm = vlan->egress_priority_map[i]) != NULL) { |
734 | vlan->egress_priority_map[i] = pm->next; | 770 | vlan->egress_priority_map[i] = pm->next; |
@@ -764,6 +800,31 @@ static u32 vlan_ethtool_get_flags(struct net_device *dev) | |||
764 | return dev_ethtool_get_flags(vlan->real_dev); | 800 | return dev_ethtool_get_flags(vlan->real_dev); |
765 | } | 801 | } |
766 | 802 | ||
803 | static struct net_device_stats *vlan_dev_get_stats(struct net_device *dev) | ||
804 | { | ||
805 | struct net_device_stats *stats = &dev->stats; | ||
806 | |||
807 | dev_txq_stats_fold(dev, stats); | ||
808 | |||
809 | if (vlan_dev_info(dev)->vlan_rx_stats) { | ||
810 | struct vlan_rx_stats *p, rx = {0}; | ||
811 | int i; | ||
812 | |||
813 | for_each_possible_cpu(i) { | ||
814 | p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i); | ||
815 | rx.rx_packets += p->rx_packets; | ||
816 | rx.rx_bytes += p->rx_bytes; | ||
817 | rx.rx_errors += p->rx_errors; | ||
818 | rx.multicast += p->multicast; | ||
819 | } | ||
820 | stats->rx_packets = rx.rx_packets; | ||
821 | stats->rx_bytes = rx.rx_bytes; | ||
822 | stats->rx_errors = rx.rx_errors; | ||
823 | stats->multicast = rx.multicast; | ||
824 | } | ||
825 | return stats; | ||
826 | } | ||
827 | |||
767 | static const struct ethtool_ops vlan_ethtool_ops = { | 828 | static const struct ethtool_ops vlan_ethtool_ops = { |
768 | .get_settings = vlan_ethtool_get_settings, | 829 | .get_settings = vlan_ethtool_get_settings, |
769 | .get_drvinfo = vlan_ethtool_get_drvinfo, | 830 | .get_drvinfo = vlan_ethtool_get_drvinfo, |
@@ -786,11 +847,13 @@ static const struct net_device_ops vlan_netdev_ops = { | |||
786 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, | 847 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, |
787 | .ndo_do_ioctl = vlan_dev_ioctl, | 848 | .ndo_do_ioctl = vlan_dev_ioctl, |
788 | .ndo_neigh_setup = vlan_dev_neigh_setup, | 849 | .ndo_neigh_setup = vlan_dev_neigh_setup, |
850 | .ndo_get_stats = vlan_dev_get_stats, | ||
789 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | 851 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) |
790 | .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, | 852 | .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, |
791 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, | 853 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, |
792 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, | 854 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, |
793 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, | 855 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, |
856 | .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, | ||
794 | #endif | 857 | #endif |
795 | }; | 858 | }; |
796 | 859 | ||
@@ -808,11 +871,63 @@ static const struct net_device_ops vlan_netdev_accel_ops = { | |||
808 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, | 871 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, |
809 | .ndo_do_ioctl = vlan_dev_ioctl, | 872 | .ndo_do_ioctl = vlan_dev_ioctl, |
810 | .ndo_neigh_setup = vlan_dev_neigh_setup, | 873 | .ndo_neigh_setup = vlan_dev_neigh_setup, |
874 | .ndo_get_stats = vlan_dev_get_stats, | ||
875 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | ||
876 | .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, | ||
877 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, | ||
878 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, | ||
879 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, | ||
880 | .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, | ||
881 | #endif | ||
882 | }; | ||
883 | |||
884 | static const struct net_device_ops vlan_netdev_ops_sq = { | ||
885 | .ndo_select_queue = vlan_dev_select_queue, | ||
886 | .ndo_change_mtu = vlan_dev_change_mtu, | ||
887 | .ndo_init = vlan_dev_init, | ||
888 | .ndo_uninit = vlan_dev_uninit, | ||
889 | .ndo_open = vlan_dev_open, | ||
890 | .ndo_stop = vlan_dev_stop, | ||
891 | .ndo_start_xmit = vlan_dev_hard_start_xmit, | ||
892 | .ndo_validate_addr = eth_validate_addr, | ||
893 | .ndo_set_mac_address = vlan_dev_set_mac_address, | ||
894 | .ndo_set_rx_mode = vlan_dev_set_rx_mode, | ||
895 | .ndo_set_multicast_list = vlan_dev_set_rx_mode, | ||
896 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, | ||
897 | .ndo_do_ioctl = vlan_dev_ioctl, | ||
898 | .ndo_neigh_setup = vlan_dev_neigh_setup, | ||
899 | .ndo_get_stats = vlan_dev_get_stats, | ||
900 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | ||
901 | .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, | ||
902 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, | ||
903 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, | ||
904 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, | ||
905 | .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, | ||
906 | #endif | ||
907 | }; | ||
908 | |||
909 | static const struct net_device_ops vlan_netdev_accel_ops_sq = { | ||
910 | .ndo_select_queue = vlan_dev_select_queue, | ||
911 | .ndo_change_mtu = vlan_dev_change_mtu, | ||
912 | .ndo_init = vlan_dev_init, | ||
913 | .ndo_uninit = vlan_dev_uninit, | ||
914 | .ndo_open = vlan_dev_open, | ||
915 | .ndo_stop = vlan_dev_stop, | ||
916 | .ndo_start_xmit = vlan_dev_hwaccel_hard_start_xmit, | ||
917 | .ndo_validate_addr = eth_validate_addr, | ||
918 | .ndo_set_mac_address = vlan_dev_set_mac_address, | ||
919 | .ndo_set_rx_mode = vlan_dev_set_rx_mode, | ||
920 | .ndo_set_multicast_list = vlan_dev_set_rx_mode, | ||
921 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, | ||
922 | .ndo_do_ioctl = vlan_dev_ioctl, | ||
923 | .ndo_neigh_setup = vlan_dev_neigh_setup, | ||
924 | .ndo_get_stats = vlan_dev_get_stats, | ||
811 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | 925 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) |
812 | .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, | 926 | .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, |
813 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, | 927 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, |
814 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, | 928 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, |
815 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, | 929 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, |
930 | .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, | ||
816 | #endif | 931 | #endif |
817 | }; | 932 | }; |
818 | 933 | ||
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c index a91504850195..ddc105734af7 100644 --- a/net/8021q/vlan_netlink.c +++ b/net/8021q/vlan_netlink.c | |||
@@ -60,7 +60,8 @@ static int vlan_validate(struct nlattr *tb[], struct nlattr *data[]) | |||
60 | if (data[IFLA_VLAN_FLAGS]) { | 60 | if (data[IFLA_VLAN_FLAGS]) { |
61 | flags = nla_data(data[IFLA_VLAN_FLAGS]); | 61 | flags = nla_data(data[IFLA_VLAN_FLAGS]); |
62 | if ((flags->flags & flags->mask) & | 62 | if ((flags->flags & flags->mask) & |
63 | ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP)) | 63 | ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP | |
64 | VLAN_FLAG_LOOSE_BINDING)) | ||
64 | return -EINVAL; | 65 | return -EINVAL; |
65 | } | 66 | } |
66 | 67 | ||
@@ -119,7 +120,7 @@ static int vlan_get_tx_queues(struct net *net, | |||
119 | return 0; | 120 | return 0; |
120 | } | 121 | } |
121 | 122 | ||
122 | static int vlan_newlink(struct net_device *dev, | 123 | static int vlan_newlink(struct net *src_net, struct net_device *dev, |
123 | struct nlattr *tb[], struct nlattr *data[]) | 124 | struct nlattr *tb[], struct nlattr *data[]) |
124 | { | 125 | { |
125 | struct vlan_dev_info *vlan = vlan_dev_info(dev); | 126 | struct vlan_dev_info *vlan = vlan_dev_info(dev); |
@@ -131,7 +132,7 @@ static int vlan_newlink(struct net_device *dev, | |||
131 | 132 | ||
132 | if (!tb[IFLA_LINK]) | 133 | if (!tb[IFLA_LINK]) |
133 | return -EINVAL; | 134 | return -EINVAL; |
134 | real_dev = __dev_get_by_index(dev_net(dev), nla_get_u32(tb[IFLA_LINK])); | 135 | real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); |
135 | if (!real_dev) | 136 | if (!real_dev) |
136 | return -ENODEV; | 137 | return -ENODEV; |
137 | 138 | ||
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c index 6262c335f3c2..afead353e215 100644 --- a/net/8021q/vlanproc.c +++ b/net/8021q/vlanproc.c | |||
@@ -140,7 +140,7 @@ void vlan_proc_cleanup(struct net *net) | |||
140 | * Create /proc/net/vlan entries | 140 | * Create /proc/net/vlan entries |
141 | */ | 141 | */ |
142 | 142 | ||
143 | int vlan_proc_init(struct net *net) | 143 | int __net_init vlan_proc_init(struct net *net) |
144 | { | 144 | { |
145 | struct vlan_net *vn = net_generic(net, vlan_net_id); | 145 | struct vlan_net *vn = net_generic(net, vlan_net_id); |
146 | 146 | ||
@@ -201,18 +201,17 @@ int vlan_proc_rem_dev(struct net_device *vlandev) | |||
201 | 201 | ||
202 | /* start read of /proc/net/vlan/config */ | 202 | /* start read of /proc/net/vlan/config */ |
203 | static void *vlan_seq_start(struct seq_file *seq, loff_t *pos) | 203 | static void *vlan_seq_start(struct seq_file *seq, loff_t *pos) |
204 | __acquires(dev_base_lock) | 204 | __acquires(rcu) |
205 | { | 205 | { |
206 | struct net_device *dev; | 206 | struct net_device *dev; |
207 | struct net *net = seq_file_net(seq); | 207 | struct net *net = seq_file_net(seq); |
208 | loff_t i = 1; | 208 | loff_t i = 1; |
209 | 209 | ||
210 | read_lock(&dev_base_lock); | 210 | rcu_read_lock(); |
211 | |||
212 | if (*pos == 0) | 211 | if (*pos == 0) |
213 | return SEQ_START_TOKEN; | 212 | return SEQ_START_TOKEN; |
214 | 213 | ||
215 | for_each_netdev(net, dev) { | 214 | for_each_netdev_rcu(net, dev) { |
216 | if (!is_vlan_dev(dev)) | 215 | if (!is_vlan_dev(dev)) |
217 | continue; | 216 | continue; |
218 | 217 | ||
@@ -234,7 +233,7 @@ static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
234 | if (v == SEQ_START_TOKEN) | 233 | if (v == SEQ_START_TOKEN) |
235 | dev = net_device_entry(&net->dev_base_head); | 234 | dev = net_device_entry(&net->dev_base_head); |
236 | 235 | ||
237 | for_each_netdev_continue(net, dev) { | 236 | for_each_netdev_continue_rcu(net, dev) { |
238 | if (!is_vlan_dev(dev)) | 237 | if (!is_vlan_dev(dev)) |
239 | continue; | 238 | continue; |
240 | 239 | ||
@@ -245,9 +244,9 @@ static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
245 | } | 244 | } |
246 | 245 | ||
247 | static void vlan_seq_stop(struct seq_file *seq, void *v) | 246 | static void vlan_seq_stop(struct seq_file *seq, void *v) |
248 | __releases(dev_base_lock) | 247 | __releases(rcu) |
249 | { | 248 | { |
250 | read_unlock(&dev_base_lock); | 249 | rcu_read_unlock(); |
251 | } | 250 | } |
252 | 251 | ||
253 | static int vlan_seq_show(struct seq_file *seq, void *v) | 252 | static int vlan_seq_show(struct seq_file *seq, void *v) |