diff options
author | David S. Miller <davem@davemloft.net> | 2013-04-19 14:46:27 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-04-19 14:46:27 -0400 |
commit | 447b816fe03898c4dad19b254ca3dd05bae46ec3 (patch) | |
tree | 9e3d6af491b1a01b077b927e51d513134e67e1a5 /net | |
parent | c2962897c94605bc8f158a37dee8d867dda9f116 (diff) | |
parent | 28d2b136ca6c7bf7173a43a90f747ecda5b0520d (diff) |
Merge branch '8021ad'
Patrick McHardy says:
====================
The following patches add support for 802.1ad (provider tagging) to the
VLAN driver. The patchset consists of the following parts:
- renaming of the NET_F_HW_VLAN feature flags to indicate that they only
operate on CTAGs
- preparation for 802.1ad VLAN filtering offload by adding a proto argument
to the rx_{add,kill}_vid net_device_ops callbacks
- preparation of the VLAN code to support multiple protocols by making the
protocol used for tagging a property of the VLAN device and converting
the device lookup functions accordingly
- second step of preparation of the VLAN code by making the packet tagging
functions take a protocol argument
- introducation of 802.1ad support in the VLAN code, consisting mainly of
checking for ETH_P_8021AD in a couple of places and testing the netdevice
offload feature checks to take the protocol into account
- announcement of STAG offloading capabilities in a couple of drivers for
virtual network devices
The patchset is based on net-next.git and has been tested with single and
double tagging with and without HW acceleration (for CTAGs).
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/8021q/Kconfig | 2 | ||||
-rw-r--r-- | net/8021q/vlan.c | 97 | ||||
-rw-r--r-- | net/8021q/vlan.h | 57 | ||||
-rw-r--r-- | net/8021q/vlan_core.c | 73 | ||||
-rw-r--r-- | net/8021q/vlan_dev.c | 11 | ||||
-rw-r--r-- | net/8021q/vlan_gvrp.c | 4 | ||||
-rw-r--r-- | net/8021q/vlan_mvrp.c | 4 | ||||
-rw-r--r-- | net/8021q/vlan_netlink.c | 32 | ||||
-rw-r--r-- | net/batman-adv/bridge_loop_avoidance.c | 2 | ||||
-rw-r--r-- | net/bridge/br_device.c | 4 | ||||
-rw-r--r-- | net/bridge/br_netfilter.c | 3 | ||||
-rw-r--r-- | net/bridge/br_vlan.c | 20 | ||||
-rw-r--r-- | net/core/dev.c | 24 | ||||
-rw-r--r-- | net/core/ethtool.c | 25 | ||||
-rw-r--r-- | net/core/netpoll.c | 5 | ||||
-rw-r--r-- | net/core/skbuff.c | 1 | ||||
-rw-r--r-- | net/openvswitch/actions.c | 6 | ||||
-rw-r--r-- | net/openvswitch/datapath.c | 2 | ||||
-rw-r--r-- | net/openvswitch/vport-internal_dev.c | 2 |
19 files changed, 224 insertions, 150 deletions
diff --git a/net/8021q/Kconfig b/net/8021q/Kconfig index 8f7517df41a5..b85a91fa61f1 100644 --- a/net/8021q/Kconfig +++ b/net/8021q/Kconfig | |||
@@ -3,7 +3,7 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | config VLAN_8021Q | 5 | config VLAN_8021Q |
6 | tristate "802.1Q VLAN Support" | 6 | tristate "802.1Q/802.1ad VLAN Support" |
7 | ---help--- | 7 | ---help--- |
8 | Select this and you will be able to create 802.1Q VLAN interfaces | 8 | Select this and you will be able to create 802.1Q VLAN interfaces |
9 | on your ethernet interfaces. 802.1Q VLAN supports almost | 9 | on your ethernet interfaces. 802.1Q VLAN supports almost |
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 85addcd9372b..9424f3718ea7 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -51,14 +51,18 @@ const char vlan_version[] = DRV_VERSION; | |||
51 | 51 | ||
52 | /* End of global variables definitions. */ | 52 | /* End of global variables definitions. */ |
53 | 53 | ||
54 | static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id) | 54 | static int vlan_group_prealloc_vid(struct vlan_group *vg, |
55 | __be16 vlan_proto, u16 vlan_id) | ||
55 | { | 56 | { |
56 | struct net_device **array; | 57 | struct net_device **array; |
58 | unsigned int pidx, vidx; | ||
57 | unsigned int size; | 59 | unsigned int size; |
58 | 60 | ||
59 | ASSERT_RTNL(); | 61 | ASSERT_RTNL(); |
60 | 62 | ||
61 | array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; | 63 | pidx = vlan_proto_idx(vlan_proto); |
64 | vidx = vlan_id / VLAN_GROUP_ARRAY_PART_LEN; | ||
65 | array = vg->vlan_devices_arrays[pidx][vidx]; | ||
62 | if (array != NULL) | 66 | if (array != NULL) |
63 | return 0; | 67 | return 0; |
64 | 68 | ||
@@ -67,7 +71,7 @@ static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id) | |||
67 | if (array == NULL) | 71 | if (array == NULL) |
68 | return -ENOBUFS; | 72 | return -ENOBUFS; |
69 | 73 | ||
70 | vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN] = array; | 74 | vg->vlan_devices_arrays[pidx][vidx] = array; |
71 | return 0; | 75 | return 0; |
72 | } | 76 | } |
73 | 77 | ||
@@ -93,7 +97,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) | |||
93 | if (vlan->flags & VLAN_FLAG_GVRP) | 97 | if (vlan->flags & VLAN_FLAG_GVRP) |
94 | vlan_gvrp_request_leave(dev); | 98 | vlan_gvrp_request_leave(dev); |
95 | 99 | ||
96 | vlan_group_set_device(grp, vlan_id, NULL); | 100 | vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL); |
97 | /* Because unregister_netdevice_queue() makes sure at least one rcu | 101 | /* Because unregister_netdevice_queue() makes sure at least one rcu |
98 | * grace period is respected before device freeing, | 102 | * grace period is respected before device freeing, |
99 | * we dont need to call synchronize_net() here. | 103 | * we dont need to call synchronize_net() here. |
@@ -112,13 +116,14 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) | |||
112 | * VLAN is not 0 (leave it there for 802.1p). | 116 | * VLAN is not 0 (leave it there for 802.1p). |
113 | */ | 117 | */ |
114 | if (vlan_id) | 118 | if (vlan_id) |
115 | vlan_vid_del(real_dev, vlan_id); | 119 | vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id); |
116 | 120 | ||
117 | /* Get rid of the vlan's reference to real_dev */ | 121 | /* Get rid of the vlan's reference to real_dev */ |
118 | dev_put(real_dev); | 122 | dev_put(real_dev); |
119 | } | 123 | } |
120 | 124 | ||
121 | int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id) | 125 | int vlan_check_real_dev(struct net_device *real_dev, |
126 | __be16 protocol, u16 vlan_id) | ||
122 | { | 127 | { |
123 | const char *name = real_dev->name; | 128 | const char *name = real_dev->name; |
124 | 129 | ||
@@ -127,7 +132,7 @@ int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id) | |||
127 | return -EOPNOTSUPP; | 132 | return -EOPNOTSUPP; |
128 | } | 133 | } |
129 | 134 | ||
130 | if (vlan_find_dev(real_dev, vlan_id) != NULL) | 135 | if (vlan_find_dev(real_dev, protocol, vlan_id) != NULL) |
131 | return -EEXIST; | 136 | return -EEXIST; |
132 | 137 | ||
133 | return 0; | 138 | return 0; |
@@ -142,7 +147,7 @@ int register_vlan_dev(struct net_device *dev) | |||
142 | struct vlan_group *grp; | 147 | struct vlan_group *grp; |
143 | int err; | 148 | int err; |
144 | 149 | ||
145 | err = vlan_vid_add(real_dev, vlan_id); | 150 | err = vlan_vid_add(real_dev, vlan->vlan_proto, vlan_id); |
146 | if (err) | 151 | if (err) |
147 | return err; | 152 | return err; |
148 | 153 | ||
@@ -160,7 +165,7 @@ int register_vlan_dev(struct net_device *dev) | |||
160 | goto out_uninit_gvrp; | 165 | goto out_uninit_gvrp; |
161 | } | 166 | } |
162 | 167 | ||
163 | err = vlan_group_prealloc_vid(grp, vlan_id); | 168 | err = vlan_group_prealloc_vid(grp, vlan->vlan_proto, vlan_id); |
164 | if (err < 0) | 169 | if (err < 0) |
165 | goto out_uninit_mvrp; | 170 | goto out_uninit_mvrp; |
166 | 171 | ||
@@ -181,7 +186,7 @@ int register_vlan_dev(struct net_device *dev) | |||
181 | /* So, got the sucker initialized, now lets place | 186 | /* So, got the sucker initialized, now lets place |
182 | * it into our local structure. | 187 | * it into our local structure. |
183 | */ | 188 | */ |
184 | vlan_group_set_device(grp, vlan_id, dev); | 189 | vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, dev); |
185 | grp->nr_vlan_devs++; | 190 | grp->nr_vlan_devs++; |
186 | 191 | ||
187 | return 0; | 192 | return 0; |
@@ -195,7 +200,7 @@ out_uninit_gvrp: | |||
195 | if (grp->nr_vlan_devs == 0) | 200 | if (grp->nr_vlan_devs == 0) |
196 | vlan_gvrp_uninit_applicant(real_dev); | 201 | vlan_gvrp_uninit_applicant(real_dev); |
197 | out_vid_del: | 202 | out_vid_del: |
198 | vlan_vid_del(real_dev, vlan_id); | 203 | vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id); |
199 | return err; | 204 | return err; |
200 | } | 205 | } |
201 | 206 | ||
@@ -213,7 +218,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) | |||
213 | if (vlan_id >= VLAN_VID_MASK) | 218 | if (vlan_id >= VLAN_VID_MASK) |
214 | return -ERANGE; | 219 | return -ERANGE; |
215 | 220 | ||
216 | err = vlan_check_real_dev(real_dev, vlan_id); | 221 | err = vlan_check_real_dev(real_dev, htons(ETH_P_8021Q), vlan_id); |
217 | if (err < 0) | 222 | if (err < 0) |
218 | return err; | 223 | return err; |
219 | 224 | ||
@@ -255,6 +260,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) | |||
255 | new_dev->mtu = real_dev->mtu; | 260 | new_dev->mtu = real_dev->mtu; |
256 | new_dev->priv_flags |= (real_dev->priv_flags & IFF_UNICAST_FLT); | 261 | new_dev->priv_flags |= (real_dev->priv_flags & IFF_UNICAST_FLT); |
257 | 262 | ||
263 | vlan_dev_priv(new_dev)->vlan_proto = htons(ETH_P_8021Q); | ||
258 | vlan_dev_priv(new_dev)->vlan_id = vlan_id; | 264 | vlan_dev_priv(new_dev)->vlan_id = vlan_id; |
259 | vlan_dev_priv(new_dev)->real_dev = real_dev; | 265 | vlan_dev_priv(new_dev)->real_dev = real_dev; |
260 | vlan_dev_priv(new_dev)->dent = NULL; | 266 | vlan_dev_priv(new_dev)->dent = NULL; |
@@ -301,7 +307,7 @@ static void vlan_transfer_features(struct net_device *dev, | |||
301 | { | 307 | { |
302 | vlandev->gso_max_size = dev->gso_max_size; | 308 | vlandev->gso_max_size = dev->gso_max_size; |
303 | 309 | ||
304 | if (dev->features & NETIF_F_HW_VLAN_TX) | 310 | if (dev->features & NETIF_F_HW_VLAN_CTAG_TX) |
305 | vlandev->hard_header_len = dev->hard_header_len; | 311 | vlandev->hard_header_len = dev->hard_header_len; |
306 | else | 312 | else |
307 | vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN; | 313 | vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN; |
@@ -341,16 +347,17 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
341 | int i, flgs; | 347 | int i, flgs; |
342 | struct net_device *vlandev; | 348 | struct net_device *vlandev; |
343 | struct vlan_dev_priv *vlan; | 349 | struct vlan_dev_priv *vlan; |
350 | bool last = false; | ||
344 | LIST_HEAD(list); | 351 | LIST_HEAD(list); |
345 | 352 | ||
346 | if (is_vlan_dev(dev)) | 353 | if (is_vlan_dev(dev)) |
347 | __vlan_device_event(dev, event); | 354 | __vlan_device_event(dev, event); |
348 | 355 | ||
349 | if ((event == NETDEV_UP) && | 356 | if ((event == NETDEV_UP) && |
350 | (dev->features & NETIF_F_HW_VLAN_FILTER)) { | 357 | (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) { |
351 | pr_info("adding VLAN 0 to HW filter on device %s\n", | 358 | pr_info("adding VLAN 0 to HW filter on device %s\n", |
352 | dev->name); | 359 | dev->name); |
353 | vlan_vid_add(dev, 0); | 360 | vlan_vid_add(dev, htons(ETH_P_8021Q), 0); |
354 | } | 361 | } |
355 | 362 | ||
356 | vlan_info = rtnl_dereference(dev->vlan_info); | 363 | vlan_info = rtnl_dereference(dev->vlan_info); |
@@ -365,22 +372,13 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
365 | switch (event) { | 372 | switch (event) { |
366 | case NETDEV_CHANGE: | 373 | case NETDEV_CHANGE: |
367 | /* Propagate real device state to vlan devices */ | 374 | /* Propagate real device state to vlan devices */ |
368 | for (i = 0; i < VLAN_N_VID; i++) { | 375 | vlan_group_for_each_dev(grp, i, vlandev) |
369 | vlandev = vlan_group_get_device(grp, i); | ||
370 | if (!vlandev) | ||
371 | continue; | ||
372 | |||
373 | netif_stacked_transfer_operstate(dev, vlandev); | 376 | netif_stacked_transfer_operstate(dev, vlandev); |
374 | } | ||
375 | break; | 377 | break; |
376 | 378 | ||
377 | case NETDEV_CHANGEADDR: | 379 | case NETDEV_CHANGEADDR: |
378 | /* Adjust unicast filters on underlying device */ | 380 | /* Adjust unicast filters on underlying device */ |
379 | for (i = 0; i < VLAN_N_VID; i++) { | 381 | vlan_group_for_each_dev(grp, i, vlandev) { |
380 | vlandev = vlan_group_get_device(grp, i); | ||
381 | if (!vlandev) | ||
382 | continue; | ||
383 | |||
384 | flgs = vlandev->flags; | 382 | flgs = vlandev->flags; |
385 | if (!(flgs & IFF_UP)) | 383 | if (!(flgs & IFF_UP)) |
386 | continue; | 384 | continue; |
@@ -390,11 +388,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
390 | break; | 388 | break; |
391 | 389 | ||
392 | case NETDEV_CHANGEMTU: | 390 | case NETDEV_CHANGEMTU: |
393 | for (i = 0; i < VLAN_N_VID; i++) { | 391 | vlan_group_for_each_dev(grp, i, vlandev) { |
394 | vlandev = vlan_group_get_device(grp, i); | ||
395 | if (!vlandev) | ||
396 | continue; | ||
397 | |||
398 | if (vlandev->mtu <= dev->mtu) | 392 | if (vlandev->mtu <= dev->mtu) |
399 | continue; | 393 | continue; |
400 | 394 | ||
@@ -404,26 +398,16 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
404 | 398 | ||
405 | case NETDEV_FEAT_CHANGE: | 399 | case NETDEV_FEAT_CHANGE: |
406 | /* Propagate device features to underlying device */ | 400 | /* Propagate device features to underlying device */ |
407 | for (i = 0; i < VLAN_N_VID; i++) { | 401 | vlan_group_for_each_dev(grp, i, vlandev) |
408 | vlandev = vlan_group_get_device(grp, i); | ||
409 | if (!vlandev) | ||
410 | continue; | ||
411 | |||
412 | vlan_transfer_features(dev, vlandev); | 402 | vlan_transfer_features(dev, vlandev); |
413 | } | ||
414 | |||
415 | break; | 403 | break; |
416 | 404 | ||
417 | case NETDEV_DOWN: | 405 | case NETDEV_DOWN: |
418 | if (dev->features & NETIF_F_HW_VLAN_FILTER) | 406 | if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) |
419 | vlan_vid_del(dev, 0); | 407 | vlan_vid_del(dev, htons(ETH_P_8021Q), 0); |
420 | 408 | ||
421 | /* Put all VLANs for this dev in the down state too. */ | 409 | /* Put all VLANs for this dev in the down state too. */ |
422 | for (i = 0; i < VLAN_N_VID; i++) { | 410 | vlan_group_for_each_dev(grp, i, vlandev) { |
423 | vlandev = vlan_group_get_device(grp, i); | ||
424 | if (!vlandev) | ||
425 | continue; | ||
426 | |||
427 | flgs = vlandev->flags; | 411 | flgs = vlandev->flags; |
428 | if (!(flgs & IFF_UP)) | 412 | if (!(flgs & IFF_UP)) |
429 | continue; | 413 | continue; |
@@ -437,11 +421,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
437 | 421 | ||
438 | case NETDEV_UP: | 422 | case NETDEV_UP: |
439 | /* Put all VLANs for this dev in the up state too. */ | 423 | /* Put all VLANs for this dev in the up state too. */ |
440 | for (i = 0; i < VLAN_N_VID; i++) { | 424 | vlan_group_for_each_dev(grp, i, vlandev) { |
441 | vlandev = vlan_group_get_device(grp, i); | ||
442 | if (!vlandev) | ||
443 | continue; | ||
444 | |||
445 | flgs = vlandev->flags; | 425 | flgs = vlandev->flags; |
446 | if (flgs & IFF_UP) | 426 | if (flgs & IFF_UP) |
447 | continue; | 427 | continue; |
@@ -458,17 +438,15 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
458 | if (dev->reg_state != NETREG_UNREGISTERING) | 438 | if (dev->reg_state != NETREG_UNREGISTERING) |
459 | break; | 439 | break; |
460 | 440 | ||
461 | for (i = 0; i < VLAN_N_VID; i++) { | 441 | vlan_group_for_each_dev(grp, i, vlandev) { |
462 | vlandev = vlan_group_get_device(grp, i); | ||
463 | if (!vlandev) | ||
464 | continue; | ||
465 | |||
466 | /* removal of last vid destroys vlan_info, abort | 442 | /* removal of last vid destroys vlan_info, abort |
467 | * afterwards */ | 443 | * afterwards */ |
468 | if (vlan_info->nr_vids == 1) | 444 | if (vlan_info->nr_vids == 1) |
469 | i = VLAN_N_VID; | 445 | last = true; |
470 | 446 | ||
471 | unregister_vlan_dev(vlandev, &list); | 447 | unregister_vlan_dev(vlandev, &list); |
448 | if (last) | ||
449 | break; | ||
472 | } | 450 | } |
473 | unregister_netdevice_many(&list); | 451 | unregister_netdevice_many(&list); |
474 | break; | 452 | break; |
@@ -482,13 +460,8 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
482 | case NETDEV_NOTIFY_PEERS: | 460 | case NETDEV_NOTIFY_PEERS: |
483 | case NETDEV_BONDING_FAILOVER: | 461 | case NETDEV_BONDING_FAILOVER: |
484 | /* Propagate to vlan devices */ | 462 | /* Propagate to vlan devices */ |
485 | for (i = 0; i < VLAN_N_VID; i++) { | 463 | vlan_group_for_each_dev(grp, i, vlandev) |
486 | vlandev = vlan_group_get_device(grp, i); | ||
487 | if (!vlandev) | ||
488 | continue; | ||
489 | |||
490 | call_netdevice_notifiers(event, vlandev); | 464 | call_netdevice_notifiers(event, vlandev); |
491 | } | ||
492 | break; | 465 | break; |
493 | } | 466 | } |
494 | 467 | ||
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h index 670f1e8cfc0f..abc9cb631c47 100644 --- a/net/8021q/vlan.h +++ b/net/8021q/vlan.h | |||
@@ -49,6 +49,7 @@ struct netpoll; | |||
49 | * @ingress_priority_map: ingress priority mappings | 49 | * @ingress_priority_map: ingress priority mappings |
50 | * @nr_egress_mappings: number of egress priority mappings | 50 | * @nr_egress_mappings: number of egress priority mappings |
51 | * @egress_priority_map: hash of egress priority mappings | 51 | * @egress_priority_map: hash of egress priority mappings |
52 | * @vlan_proto: VLAN encapsulation protocol | ||
52 | * @vlan_id: VLAN identifier | 53 | * @vlan_id: VLAN identifier |
53 | * @flags: device flags | 54 | * @flags: device flags |
54 | * @real_dev: underlying netdevice | 55 | * @real_dev: underlying netdevice |
@@ -62,6 +63,7 @@ struct vlan_dev_priv { | |||
62 | unsigned int nr_egress_mappings; | 63 | unsigned int nr_egress_mappings; |
63 | struct vlan_priority_tci_mapping *egress_priority_map[16]; | 64 | struct vlan_priority_tci_mapping *egress_priority_map[16]; |
64 | 65 | ||
66 | __be16 vlan_proto; | ||
65 | u16 vlan_id; | 67 | u16 vlan_id; |
66 | u16 flags; | 68 | u16 flags; |
67 | 69 | ||
@@ -87,10 +89,17 @@ static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev) | |||
87 | #define VLAN_GROUP_ARRAY_SPLIT_PARTS 8 | 89 | #define VLAN_GROUP_ARRAY_SPLIT_PARTS 8 |
88 | #define VLAN_GROUP_ARRAY_PART_LEN (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS) | 90 | #define VLAN_GROUP_ARRAY_PART_LEN (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS) |
89 | 91 | ||
92 | enum vlan_protos { | ||
93 | VLAN_PROTO_8021Q = 0, | ||
94 | VLAN_PROTO_8021AD, | ||
95 | VLAN_PROTO_NUM, | ||
96 | }; | ||
97 | |||
90 | struct vlan_group { | 98 | struct vlan_group { |
91 | unsigned int nr_vlan_devs; | 99 | unsigned int nr_vlan_devs; |
92 | struct hlist_node hlist; /* linked list */ | 100 | struct hlist_node hlist; /* linked list */ |
93 | struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS]; | 101 | struct net_device **vlan_devices_arrays[VLAN_PROTO_NUM] |
102 | [VLAN_GROUP_ARRAY_SPLIT_PARTS]; | ||
94 | }; | 103 | }; |
95 | 104 | ||
96 | struct vlan_info { | 105 | struct vlan_info { |
@@ -103,37 +112,66 @@ struct vlan_info { | |||
103 | struct rcu_head rcu; | 112 | struct rcu_head rcu; |
104 | }; | 113 | }; |
105 | 114 | ||
106 | static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, | 115 | static inline unsigned int vlan_proto_idx(__be16 proto) |
107 | u16 vlan_id) | 116 | { |
117 | switch (proto) { | ||
118 | case __constant_htons(ETH_P_8021Q): | ||
119 | return VLAN_PROTO_8021Q; | ||
120 | case __constant_htons(ETH_P_8021AD): | ||
121 | return VLAN_PROTO_8021AD; | ||
122 | default: | ||
123 | BUG(); | ||
124 | } | ||
125 | } | ||
126 | |||
127 | static inline struct net_device *__vlan_group_get_device(struct vlan_group *vg, | ||
128 | unsigned int pidx, | ||
129 | u16 vlan_id) | ||
108 | { | 130 | { |
109 | struct net_device **array; | 131 | struct net_device **array; |
110 | array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; | 132 | |
133 | array = vg->vlan_devices_arrays[pidx] | ||
134 | [vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; | ||
111 | return array ? array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] : NULL; | 135 | return array ? array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] : NULL; |
112 | } | 136 | } |
113 | 137 | ||
138 | static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, | ||
139 | __be16 vlan_proto, | ||
140 | u16 vlan_id) | ||
141 | { | ||
142 | return __vlan_group_get_device(vg, vlan_proto_idx(vlan_proto), vlan_id); | ||
143 | } | ||
144 | |||
114 | static inline void vlan_group_set_device(struct vlan_group *vg, | 145 | static inline void vlan_group_set_device(struct vlan_group *vg, |
115 | u16 vlan_id, | 146 | __be16 vlan_proto, u16 vlan_id, |
116 | struct net_device *dev) | 147 | struct net_device *dev) |
117 | { | 148 | { |
118 | struct net_device **array; | 149 | struct net_device **array; |
119 | if (!vg) | 150 | if (!vg) |
120 | return; | 151 | return; |
121 | array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; | 152 | array = vg->vlan_devices_arrays[vlan_proto_idx(vlan_proto)] |
153 | [vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; | ||
122 | array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev; | 154 | array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev; |
123 | } | 155 | } |
124 | 156 | ||
125 | /* Must be invoked with rcu_read_lock or with RTNL. */ | 157 | /* Must be invoked with rcu_read_lock or with RTNL. */ |
126 | static inline struct net_device *vlan_find_dev(struct net_device *real_dev, | 158 | static inline struct net_device *vlan_find_dev(struct net_device *real_dev, |
127 | u16 vlan_id) | 159 | __be16 vlan_proto, u16 vlan_id) |
128 | { | 160 | { |
129 | struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info); | 161 | struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info); |
130 | 162 | ||
131 | if (vlan_info) | 163 | if (vlan_info) |
132 | return vlan_group_get_device(&vlan_info->grp, vlan_id); | 164 | return vlan_group_get_device(&vlan_info->grp, |
165 | vlan_proto, vlan_id); | ||
133 | 166 | ||
134 | return NULL; | 167 | return NULL; |
135 | } | 168 | } |
136 | 169 | ||
170 | #define vlan_group_for_each_dev(grp, i, dev) \ | ||
171 | for ((i) = 0; i < VLAN_PROTO_NUM * VLAN_N_VID; i++) \ | ||
172 | if (((dev) = __vlan_group_get_device((grp), (i) / VLAN_N_VID, \ | ||
173 | (i) % VLAN_N_VID))) | ||
174 | |||
137 | /* found in vlan_dev.c */ | 175 | /* found in vlan_dev.c */ |
138 | void vlan_dev_set_ingress_priority(const struct net_device *dev, | 176 | void vlan_dev_set_ingress_priority(const struct net_device *dev, |
139 | u32 skb_prio, u16 vlan_prio); | 177 | u32 skb_prio, u16 vlan_prio); |
@@ -142,7 +180,8 @@ int vlan_dev_set_egress_priority(const struct net_device *dev, | |||
142 | int vlan_dev_change_flags(const struct net_device *dev, u32 flag, u32 mask); | 180 | int vlan_dev_change_flags(const struct net_device *dev, u32 flag, u32 mask); |
143 | void vlan_dev_get_realdev_name(const struct net_device *dev, char *result); | 181 | void vlan_dev_get_realdev_name(const struct net_device *dev, char *result); |
144 | 182 | ||
145 | int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id); | 183 | int vlan_check_real_dev(struct net_device *real_dev, |
184 | __be16 protocol, u16 vlan_id); | ||
146 | void vlan_setup(struct net_device *dev); | 185 | void vlan_setup(struct net_device *dev); |
147 | int register_vlan_dev(struct net_device *dev); | 186 | int register_vlan_dev(struct net_device *dev); |
148 | void unregister_vlan_dev(struct net_device *dev, struct list_head *head); | 187 | void unregister_vlan_dev(struct net_device *dev, struct list_head *head); |
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index f3b6f515eba6..ebfa2fceb88b 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -8,11 +8,12 @@ | |||
8 | bool vlan_do_receive(struct sk_buff **skbp) | 8 | bool vlan_do_receive(struct sk_buff **skbp) |
9 | { | 9 | { |
10 | struct sk_buff *skb = *skbp; | 10 | struct sk_buff *skb = *skbp; |
11 | __be16 vlan_proto = skb->vlan_proto; | ||
11 | u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; | 12 | u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; |
12 | struct net_device *vlan_dev; | 13 | struct net_device *vlan_dev; |
13 | struct vlan_pcpu_stats *rx_stats; | 14 | struct vlan_pcpu_stats *rx_stats; |
14 | 15 | ||
15 | vlan_dev = vlan_find_dev(skb->dev, vlan_id); | 16 | vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id); |
16 | if (!vlan_dev) | 17 | if (!vlan_dev) |
17 | return false; | 18 | return false; |
18 | 19 | ||
@@ -38,7 +39,8 @@ bool vlan_do_receive(struct sk_buff **skbp) | |||
38 | * original position later | 39 | * original position later |
39 | */ | 40 | */ |
40 | skb_push(skb, offset); | 41 | skb_push(skb, offset); |
41 | skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci); | 42 | skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto, |
43 | skb->vlan_tci); | ||
42 | if (!skb) | 44 | if (!skb) |
43 | return false; | 45 | return false; |
44 | skb_pull(skb, offset + VLAN_HLEN); | 46 | skb_pull(skb, offset + VLAN_HLEN); |
@@ -62,12 +64,13 @@ bool vlan_do_receive(struct sk_buff **skbp) | |||
62 | 64 | ||
63 | /* Must be invoked with rcu_read_lock. */ | 65 | /* Must be invoked with rcu_read_lock. */ |
64 | struct net_device *__vlan_find_dev_deep(struct net_device *dev, | 66 | struct net_device *__vlan_find_dev_deep(struct net_device *dev, |
65 | u16 vlan_id) | 67 | __be16 vlan_proto, u16 vlan_id) |
66 | { | 68 | { |
67 | struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info); | 69 | struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info); |
68 | 70 | ||
69 | if (vlan_info) { | 71 | if (vlan_info) { |
70 | return vlan_group_get_device(&vlan_info->grp, vlan_id); | 72 | return vlan_group_get_device(&vlan_info->grp, |
73 | vlan_proto, vlan_id); | ||
71 | } else { | 74 | } else { |
72 | /* | 75 | /* |
73 | * Lower devices of master uppers (bonding, team) do not have | 76 | * Lower devices of master uppers (bonding, team) do not have |
@@ -78,7 +81,8 @@ struct net_device *__vlan_find_dev_deep(struct net_device *dev, | |||
78 | 81 | ||
79 | upper_dev = netdev_master_upper_dev_get_rcu(dev); | 82 | upper_dev = netdev_master_upper_dev_get_rcu(dev); |
80 | if (upper_dev) | 83 | if (upper_dev) |
81 | return __vlan_find_dev_deep(upper_dev, vlan_id); | 84 | return __vlan_find_dev_deep(upper_dev, |
85 | vlan_proto, vlan_id); | ||
82 | } | 86 | } |
83 | 87 | ||
84 | return NULL; | 88 | return NULL; |
@@ -125,7 +129,7 @@ struct sk_buff *vlan_untag(struct sk_buff *skb) | |||
125 | 129 | ||
126 | vhdr = (struct vlan_hdr *) skb->data; | 130 | vhdr = (struct vlan_hdr *) skb->data; |
127 | vlan_tci = ntohs(vhdr->h_vlan_TCI); | 131 | vlan_tci = ntohs(vhdr->h_vlan_TCI); |
128 | __vlan_hwaccel_put_tag(skb, vlan_tci); | 132 | __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); |
129 | 133 | ||
130 | skb_pull_rcsum(skb, VLAN_HLEN); | 134 | skb_pull_rcsum(skb, VLAN_HLEN); |
131 | vlan_set_encap_proto(skb, vhdr); | 135 | vlan_set_encap_proto(skb, vhdr); |
@@ -185,35 +189,49 @@ static struct vlan_info *vlan_info_alloc(struct net_device *dev) | |||
185 | 189 | ||
186 | struct vlan_vid_info { | 190 | struct vlan_vid_info { |
187 | struct list_head list; | 191 | struct list_head list; |
188 | unsigned short vid; | 192 | __be16 proto; |
193 | u16 vid; | ||
189 | int refcount; | 194 | int refcount; |
190 | }; | 195 | }; |
191 | 196 | ||
197 | static bool vlan_hw_filter_capable(const struct net_device *dev, | ||
198 | const struct vlan_vid_info *vid_info) | ||
199 | { | ||
200 | if (vid_info->proto == htons(ETH_P_8021Q) && | ||
201 | dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) | ||
202 | return true; | ||
203 | if (vid_info->proto == htons(ETH_P_8021AD) && | ||
204 | dev->features & NETIF_F_HW_VLAN_STAG_FILTER) | ||
205 | return true; | ||
206 | return false; | ||
207 | } | ||
208 | |||
192 | static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info, | 209 | static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info, |
193 | unsigned short vid) | 210 | __be16 proto, u16 vid) |
194 | { | 211 | { |
195 | struct vlan_vid_info *vid_info; | 212 | struct vlan_vid_info *vid_info; |
196 | 213 | ||
197 | list_for_each_entry(vid_info, &vlan_info->vid_list, list) { | 214 | list_for_each_entry(vid_info, &vlan_info->vid_list, list) { |
198 | if (vid_info->vid == vid) | 215 | if (vid_info->proto == proto && vid_info->vid == vid) |
199 | return vid_info; | 216 | return vid_info; |
200 | } | 217 | } |
201 | return NULL; | 218 | return NULL; |
202 | } | 219 | } |
203 | 220 | ||
204 | static struct vlan_vid_info *vlan_vid_info_alloc(unsigned short vid) | 221 | static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid) |
205 | { | 222 | { |
206 | struct vlan_vid_info *vid_info; | 223 | struct vlan_vid_info *vid_info; |
207 | 224 | ||
208 | vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL); | 225 | vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL); |
209 | if (!vid_info) | 226 | if (!vid_info) |
210 | return NULL; | 227 | return NULL; |
228 | vid_info->proto = proto; | ||
211 | vid_info->vid = vid; | 229 | vid_info->vid = vid; |
212 | 230 | ||
213 | return vid_info; | 231 | return vid_info; |
214 | } | 232 | } |
215 | 233 | ||
216 | static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid, | 234 | static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid, |
217 | struct vlan_vid_info **pvid_info) | 235 | struct vlan_vid_info **pvid_info) |
218 | { | 236 | { |
219 | struct net_device *dev = vlan_info->real_dev; | 237 | struct net_device *dev = vlan_info->real_dev; |
@@ -221,12 +239,12 @@ static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid, | |||
221 | struct vlan_vid_info *vid_info; | 239 | struct vlan_vid_info *vid_info; |
222 | int err; | 240 | int err; |
223 | 241 | ||
224 | vid_info = vlan_vid_info_alloc(vid); | 242 | vid_info = vlan_vid_info_alloc(proto, vid); |
225 | if (!vid_info) | 243 | if (!vid_info) |
226 | return -ENOMEM; | 244 | return -ENOMEM; |
227 | 245 | ||
228 | if (dev->features & NETIF_F_HW_VLAN_FILTER) { | 246 | if (vlan_hw_filter_capable(dev, vid_info)) { |
229 | err = ops->ndo_vlan_rx_add_vid(dev, vid); | 247 | err = ops->ndo_vlan_rx_add_vid(dev, proto, vid); |
230 | if (err) { | 248 | if (err) { |
231 | kfree(vid_info); | 249 | kfree(vid_info); |
232 | return err; | 250 | return err; |
@@ -238,7 +256,7 @@ static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid, | |||
238 | return 0; | 256 | return 0; |
239 | } | 257 | } |
240 | 258 | ||
241 | int vlan_vid_add(struct net_device *dev, unsigned short vid) | 259 | int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid) |
242 | { | 260 | { |
243 | struct vlan_info *vlan_info; | 261 | struct vlan_info *vlan_info; |
244 | struct vlan_vid_info *vid_info; | 262 | struct vlan_vid_info *vid_info; |
@@ -254,9 +272,9 @@ int vlan_vid_add(struct net_device *dev, unsigned short vid) | |||
254 | return -ENOMEM; | 272 | return -ENOMEM; |
255 | vlan_info_created = true; | 273 | vlan_info_created = true; |
256 | } | 274 | } |
257 | vid_info = vlan_vid_info_get(vlan_info, vid); | 275 | vid_info = vlan_vid_info_get(vlan_info, proto, vid); |
258 | if (!vid_info) { | 276 | if (!vid_info) { |
259 | err = __vlan_vid_add(vlan_info, vid, &vid_info); | 277 | err = __vlan_vid_add(vlan_info, proto, vid, &vid_info); |
260 | if (err) | 278 | if (err) |
261 | goto out_free_vlan_info; | 279 | goto out_free_vlan_info; |
262 | } | 280 | } |
@@ -279,14 +297,15 @@ static void __vlan_vid_del(struct vlan_info *vlan_info, | |||
279 | { | 297 | { |
280 | struct net_device *dev = vlan_info->real_dev; | 298 | struct net_device *dev = vlan_info->real_dev; |
281 | const struct net_device_ops *ops = dev->netdev_ops; | 299 | const struct net_device_ops *ops = dev->netdev_ops; |
282 | unsigned short vid = vid_info->vid; | 300 | __be16 proto = vid_info->proto; |
301 | u16 vid = vid_info->vid; | ||
283 | int err; | 302 | int err; |
284 | 303 | ||
285 | if (dev->features & NETIF_F_HW_VLAN_FILTER) { | 304 | if (vlan_hw_filter_capable(dev, vid_info)) { |
286 | err = ops->ndo_vlan_rx_kill_vid(dev, vid); | 305 | err = ops->ndo_vlan_rx_kill_vid(dev, proto, vid); |
287 | if (err) { | 306 | if (err) { |
288 | pr_warn("failed to kill vid %d for device %s\n", | 307 | pr_warn("failed to kill vid %04x/%d for device %s\n", |
289 | vid, dev->name); | 308 | proto, vid, dev->name); |
290 | } | 309 | } |
291 | } | 310 | } |
292 | list_del(&vid_info->list); | 311 | list_del(&vid_info->list); |
@@ -294,7 +313,7 @@ static void __vlan_vid_del(struct vlan_info *vlan_info, | |||
294 | vlan_info->nr_vids--; | 313 | vlan_info->nr_vids--; |
295 | } | 314 | } |
296 | 315 | ||
297 | void vlan_vid_del(struct net_device *dev, unsigned short vid) | 316 | void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid) |
298 | { | 317 | { |
299 | struct vlan_info *vlan_info; | 318 | struct vlan_info *vlan_info; |
300 | struct vlan_vid_info *vid_info; | 319 | struct vlan_vid_info *vid_info; |
@@ -305,7 +324,7 @@ void vlan_vid_del(struct net_device *dev, unsigned short vid) | |||
305 | if (!vlan_info) | 324 | if (!vlan_info) |
306 | return; | 325 | return; |
307 | 326 | ||
308 | vid_info = vlan_vid_info_get(vlan_info, vid); | 327 | vid_info = vlan_vid_info_get(vlan_info, proto, vid); |
309 | if (!vid_info) | 328 | if (!vid_info) |
310 | return; | 329 | return; |
311 | vid_info->refcount--; | 330 | vid_info->refcount--; |
@@ -333,7 +352,7 @@ int vlan_vids_add_by_dev(struct net_device *dev, | |||
333 | return 0; | 352 | return 0; |
334 | 353 | ||
335 | list_for_each_entry(vid_info, &vlan_info->vid_list, list) { | 354 | list_for_each_entry(vid_info, &vlan_info->vid_list, list) { |
336 | err = vlan_vid_add(dev, vid_info->vid); | 355 | err = vlan_vid_add(dev, vid_info->proto, vid_info->vid); |
337 | if (err) | 356 | if (err) |
338 | goto unwind; | 357 | goto unwind; |
339 | } | 358 | } |
@@ -343,7 +362,7 @@ unwind: | |||
343 | list_for_each_entry_continue_reverse(vid_info, | 362 | list_for_each_entry_continue_reverse(vid_info, |
344 | &vlan_info->vid_list, | 363 | &vlan_info->vid_list, |
345 | list) { | 364 | list) { |
346 | vlan_vid_del(dev, vid_info->vid); | 365 | vlan_vid_del(dev, vid_info->proto, vid_info->vid); |
347 | } | 366 | } |
348 | 367 | ||
349 | return err; | 368 | return err; |
@@ -363,7 +382,7 @@ void vlan_vids_del_by_dev(struct net_device *dev, | |||
363 | return; | 382 | return; |
364 | 383 | ||
365 | list_for_each_entry(vid_info, &vlan_info->vid_list, list) | 384 | list_for_each_entry(vid_info, &vlan_info->vid_list, list) |
366 | vlan_vid_del(dev, vid_info->vid); | 385 | vlan_vid_del(dev, vid_info->proto, vid_info->vid); |
367 | } | 386 | } |
368 | EXPORT_SYMBOL(vlan_vids_del_by_dev); | 387 | EXPORT_SYMBOL(vlan_vids_del_by_dev); |
369 | 388 | ||
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 19cf81bf9f69..8af508536d36 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -99,6 +99,7 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, | |||
99 | const void *daddr, const void *saddr, | 99 | const void *daddr, const void *saddr, |
100 | unsigned int len) | 100 | unsigned int len) |
101 | { | 101 | { |
102 | struct vlan_dev_priv *vlan = vlan_dev_priv(dev); | ||
102 | struct vlan_hdr *vhdr; | 103 | struct vlan_hdr *vhdr; |
103 | unsigned int vhdrlen = 0; | 104 | unsigned int vhdrlen = 0; |
104 | u16 vlan_tci = 0; | 105 | u16 vlan_tci = 0; |
@@ -120,8 +121,8 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, | |||
120 | else | 121 | else |
121 | vhdr->h_vlan_encapsulated_proto = htons(len); | 122 | vhdr->h_vlan_encapsulated_proto = htons(len); |
122 | 123 | ||
123 | skb->protocol = htons(ETH_P_8021Q); | 124 | skb->protocol = vlan->vlan_proto; |
124 | type = ETH_P_8021Q; | 125 | type = ntohs(vlan->vlan_proto); |
125 | vhdrlen = VLAN_HLEN; | 126 | vhdrlen = VLAN_HLEN; |
126 | } | 127 | } |
127 | 128 | ||
@@ -161,12 +162,12 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, | |||
161 | * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING | 162 | * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING |
162 | * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... | 163 | * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... |
163 | */ | 164 | */ |
164 | if (veth->h_vlan_proto != htons(ETH_P_8021Q) || | 165 | if (veth->h_vlan_proto != vlan->vlan_proto || |
165 | vlan->flags & VLAN_FLAG_REORDER_HDR) { | 166 | vlan->flags & VLAN_FLAG_REORDER_HDR) { |
166 | u16 vlan_tci; | 167 | u16 vlan_tci; |
167 | vlan_tci = vlan->vlan_id; | 168 | vlan_tci = vlan->vlan_id; |
168 | vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); | 169 | vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); |
169 | skb = __vlan_hwaccel_put_tag(skb, vlan_tci); | 170 | skb = __vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci); |
170 | } | 171 | } |
171 | 172 | ||
172 | skb->dev = vlan->real_dev; | 173 | skb->dev = vlan->real_dev; |
@@ -583,7 +584,7 @@ static int vlan_dev_init(struct net_device *dev) | |||
583 | #endif | 584 | #endif |
584 | 585 | ||
585 | dev->needed_headroom = real_dev->needed_headroom; | 586 | dev->needed_headroom = real_dev->needed_headroom; |
586 | if (real_dev->features & NETIF_F_HW_VLAN_TX) { | 587 | if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) { |
587 | dev->header_ops = real_dev->header_ops; | 588 | dev->header_ops = real_dev->header_ops; |
588 | dev->hard_header_len = real_dev->hard_header_len; | 589 | dev->hard_header_len = real_dev->hard_header_len; |
589 | } else { | 590 | } else { |
diff --git a/net/8021q/vlan_gvrp.c b/net/8021q/vlan_gvrp.c index 6f9755352760..66a80320b032 100644 --- a/net/8021q/vlan_gvrp.c +++ b/net/8021q/vlan_gvrp.c | |||
@@ -32,6 +32,8 @@ int vlan_gvrp_request_join(const struct net_device *dev) | |||
32 | const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); | 32 | const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); |
33 | __be16 vlan_id = htons(vlan->vlan_id); | 33 | __be16 vlan_id = htons(vlan->vlan_id); |
34 | 34 | ||
35 | if (vlan->vlan_proto != htons(ETH_P_8021Q)) | ||
36 | return 0; | ||
35 | return garp_request_join(vlan->real_dev, &vlan_gvrp_app, | 37 | return garp_request_join(vlan->real_dev, &vlan_gvrp_app, |
36 | &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID); | 38 | &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID); |
37 | } | 39 | } |
@@ -41,6 +43,8 @@ void vlan_gvrp_request_leave(const struct net_device *dev) | |||
41 | const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); | 43 | const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); |
42 | __be16 vlan_id = htons(vlan->vlan_id); | 44 | __be16 vlan_id = htons(vlan->vlan_id); |
43 | 45 | ||
46 | if (vlan->vlan_proto != htons(ETH_P_8021Q)) | ||
47 | return; | ||
44 | garp_request_leave(vlan->real_dev, &vlan_gvrp_app, | 48 | garp_request_leave(vlan->real_dev, &vlan_gvrp_app, |
45 | &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID); | 49 | &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID); |
46 | } | 50 | } |
diff --git a/net/8021q/vlan_mvrp.c b/net/8021q/vlan_mvrp.c index d9ec1d5964aa..e0fe091801b0 100644 --- a/net/8021q/vlan_mvrp.c +++ b/net/8021q/vlan_mvrp.c | |||
@@ -38,6 +38,8 @@ int vlan_mvrp_request_join(const struct net_device *dev) | |||
38 | const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); | 38 | const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); |
39 | __be16 vlan_id = htons(vlan->vlan_id); | 39 | __be16 vlan_id = htons(vlan->vlan_id); |
40 | 40 | ||
41 | if (vlan->vlan_proto != htons(ETH_P_8021Q)) | ||
42 | return 0; | ||
41 | return mrp_request_join(vlan->real_dev, &vlan_mrp_app, | 43 | return mrp_request_join(vlan->real_dev, &vlan_mrp_app, |
42 | &vlan_id, sizeof(vlan_id), MVRP_ATTR_VID); | 44 | &vlan_id, sizeof(vlan_id), MVRP_ATTR_VID); |
43 | } | 45 | } |
@@ -47,6 +49,8 @@ void vlan_mvrp_request_leave(const struct net_device *dev) | |||
47 | const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); | 49 | const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); |
48 | __be16 vlan_id = htons(vlan->vlan_id); | 50 | __be16 vlan_id = htons(vlan->vlan_id); |
49 | 51 | ||
52 | if (vlan->vlan_proto != htons(ETH_P_8021Q)) | ||
53 | return; | ||
50 | mrp_request_leave(vlan->real_dev, &vlan_mrp_app, | 54 | mrp_request_leave(vlan->real_dev, &vlan_mrp_app, |
51 | &vlan_id, sizeof(vlan_id), MVRP_ATTR_VID); | 55 | &vlan_id, sizeof(vlan_id), MVRP_ATTR_VID); |
52 | } | 56 | } |
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c index 1789658b7cd7..309129732285 100644 --- a/net/8021q/vlan_netlink.c +++ b/net/8021q/vlan_netlink.c | |||
@@ -23,6 +23,7 @@ static const struct nla_policy vlan_policy[IFLA_VLAN_MAX + 1] = { | |||
23 | [IFLA_VLAN_FLAGS] = { .len = sizeof(struct ifla_vlan_flags) }, | 23 | [IFLA_VLAN_FLAGS] = { .len = sizeof(struct ifla_vlan_flags) }, |
24 | [IFLA_VLAN_EGRESS_QOS] = { .type = NLA_NESTED }, | 24 | [IFLA_VLAN_EGRESS_QOS] = { .type = NLA_NESTED }, |
25 | [IFLA_VLAN_INGRESS_QOS] = { .type = NLA_NESTED }, | 25 | [IFLA_VLAN_INGRESS_QOS] = { .type = NLA_NESTED }, |
26 | [IFLA_VLAN_PROTOCOL] = { .type = NLA_U16 }, | ||
26 | }; | 27 | }; |
27 | 28 | ||
28 | static const struct nla_policy vlan_map_policy[IFLA_VLAN_QOS_MAX + 1] = { | 29 | static const struct nla_policy vlan_map_policy[IFLA_VLAN_QOS_MAX + 1] = { |
@@ -53,6 +54,16 @@ static int vlan_validate(struct nlattr *tb[], struct nlattr *data[]) | |||
53 | if (!data) | 54 | if (!data) |
54 | return -EINVAL; | 55 | return -EINVAL; |
55 | 56 | ||
57 | if (data[IFLA_VLAN_PROTOCOL]) { | ||
58 | switch (nla_get_be16(data[IFLA_VLAN_PROTOCOL])) { | ||
59 | case __constant_htons(ETH_P_8021Q): | ||
60 | case __constant_htons(ETH_P_8021AD): | ||
61 | break; | ||
62 | default: | ||
63 | return -EPROTONOSUPPORT; | ||
64 | } | ||
65 | } | ||
66 | |||
56 | if (data[IFLA_VLAN_ID]) { | 67 | if (data[IFLA_VLAN_ID]) { |
57 | id = nla_get_u16(data[IFLA_VLAN_ID]); | 68 | id = nla_get_u16(data[IFLA_VLAN_ID]); |
58 | if (id >= VLAN_VID_MASK) | 69 | if (id >= VLAN_VID_MASK) |
@@ -107,6 +118,7 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev, | |||
107 | { | 118 | { |
108 | struct vlan_dev_priv *vlan = vlan_dev_priv(dev); | 119 | struct vlan_dev_priv *vlan = vlan_dev_priv(dev); |
109 | struct net_device *real_dev; | 120 | struct net_device *real_dev; |
121 | __be16 proto; | ||
110 | int err; | 122 | int err; |
111 | 123 | ||
112 | if (!data[IFLA_VLAN_ID]) | 124 | if (!data[IFLA_VLAN_ID]) |
@@ -118,11 +130,17 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev, | |||
118 | if (!real_dev) | 130 | if (!real_dev) |
119 | return -ENODEV; | 131 | return -ENODEV; |
120 | 132 | ||
121 | vlan->vlan_id = nla_get_u16(data[IFLA_VLAN_ID]); | 133 | if (data[IFLA_VLAN_PROTOCOL]) |
122 | vlan->real_dev = real_dev; | 134 | proto = nla_get_be16(data[IFLA_VLAN_PROTOCOL]); |
123 | vlan->flags = VLAN_FLAG_REORDER_HDR; | 135 | else |
136 | proto = htons(ETH_P_8021Q); | ||
137 | |||
138 | vlan->vlan_proto = proto; | ||
139 | vlan->vlan_id = nla_get_u16(data[IFLA_VLAN_ID]); | ||
140 | vlan->real_dev = real_dev; | ||
141 | vlan->flags = VLAN_FLAG_REORDER_HDR; | ||
124 | 142 | ||
125 | err = vlan_check_real_dev(real_dev, vlan->vlan_id); | 143 | err = vlan_check_real_dev(real_dev, vlan->vlan_proto, vlan->vlan_id); |
126 | if (err < 0) | 144 | if (err < 0) |
127 | return err; | 145 | return err; |
128 | 146 | ||
@@ -151,7 +169,8 @@ static size_t vlan_get_size(const struct net_device *dev) | |||
151 | { | 169 | { |
152 | struct vlan_dev_priv *vlan = vlan_dev_priv(dev); | 170 | struct vlan_dev_priv *vlan = vlan_dev_priv(dev); |
153 | 171 | ||
154 | return nla_total_size(2) + /* IFLA_VLAN_ID */ | 172 | return nla_total_size(2) + /* IFLA_VLAN_PROTOCOL */ |
173 | nla_total_size(2) + /* IFLA_VLAN_ID */ | ||
155 | sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */ | 174 | sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */ |
156 | vlan_qos_map_size(vlan->nr_ingress_mappings) + | 175 | vlan_qos_map_size(vlan->nr_ingress_mappings) + |
157 | vlan_qos_map_size(vlan->nr_egress_mappings); | 176 | vlan_qos_map_size(vlan->nr_egress_mappings); |
@@ -166,7 +185,8 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev) | |||
166 | struct nlattr *nest; | 185 | struct nlattr *nest; |
167 | unsigned int i; | 186 | unsigned int i; |
168 | 187 | ||
169 | if (nla_put_u16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id)) | 188 | if (nla_put_be16(skb, IFLA_VLAN_PROTOCOL, vlan->vlan_proto) || |
189 | nla_put_u16(skb, IFLA_VLAN_ID, vlan->vlan_id)) | ||
170 | goto nla_put_failure; | 190 | goto nla_put_failure; |
171 | if (vlan->flags) { | 191 | if (vlan->flags) { |
172 | f.flags = vlan->flags; | 192 | f.flags = vlan->flags; |
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 6a4f728680ae..379061c72549 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c | |||
@@ -341,7 +341,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac, | |||
341 | } | 341 | } |
342 | 342 | ||
343 | if (vid != -1) | 343 | if (vid != -1) |
344 | skb = vlan_insert_tag(skb, vid); | 344 | skb = vlan_insert_tag(skb, htons(ETH_P_8021Q), vid); |
345 | 345 | ||
346 | skb_reset_mac_header(skb); | 346 | skb_reset_mac_header(skb); |
347 | skb->protocol = eth_type_trans(skb, soft_iface); | 347 | skb->protocol = eth_type_trans(skb, soft_iface); |
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 314c73ed418f..967312803e41 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -348,10 +348,10 @@ void br_dev_setup(struct net_device *dev) | |||
348 | 348 | ||
349 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | | 349 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | |
350 | NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | NETIF_F_LLTX | | 350 | NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | NETIF_F_LLTX | |
351 | NETIF_F_NETNS_LOCAL | NETIF_F_HW_VLAN_TX; | 351 | NETIF_F_NETNS_LOCAL | NETIF_F_HW_VLAN_CTAG_TX; |
352 | dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | | 352 | dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | |
353 | NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | | 353 | NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | |
354 | NETIF_F_HW_VLAN_TX; | 354 | NETIF_F_HW_VLAN_CTAG_TX; |
355 | 355 | ||
356 | br->dev = dev; | 356 | br->dev = dev; |
357 | spin_lock_init(&br->lock); | 357 | spin_lock_init(&br->lock); |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index fe43bc7b063f..1ed75bfd8d1d 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -535,7 +535,8 @@ static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct | |||
535 | if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb)) | 535 | if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb)) |
536 | return br; | 536 | return br; |
537 | 537 | ||
538 | vlan = __vlan_find_dev_deep(br, vlan_tx_tag_get(skb) & VLAN_VID_MASK); | 538 | vlan = __vlan_find_dev_deep(br, skb->vlan_proto, |
539 | vlan_tx_tag_get(skb) & VLAN_VID_MASK); | ||
539 | 540 | ||
540 | return vlan ? vlan : br; | 541 | return vlan ? vlan : br; |
541 | } | 542 | } |
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 93dde75923f0..bd58b45f5f90 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c | |||
@@ -34,6 +34,7 @@ static void __vlan_add_flags(struct net_port_vlans *v, u16 vid, u16 flags) | |||
34 | 34 | ||
35 | static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags) | 35 | static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags) |
36 | { | 36 | { |
37 | const struct net_device_ops *ops; | ||
37 | struct net_bridge_port *p = NULL; | 38 | struct net_bridge_port *p = NULL; |
38 | struct net_bridge *br; | 39 | struct net_bridge *br; |
39 | struct net_device *dev; | 40 | struct net_device *dev; |
@@ -53,15 +54,17 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags) | |||
53 | br = v->parent.br; | 54 | br = v->parent.br; |
54 | dev = br->dev; | 55 | dev = br->dev; |
55 | } | 56 | } |
57 | ops = dev->netdev_ops; | ||
56 | 58 | ||
57 | if (p && (dev->features & NETIF_F_HW_VLAN_FILTER)) { | 59 | if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) { |
58 | /* Add VLAN to the device filter if it is supported. | 60 | /* Add VLAN to the device filter if it is supported. |
59 | * Stricly speaking, this is not necessary now, since | 61 | * Stricly speaking, this is not necessary now, since |
60 | * devices are made promiscuous by the bridge, but if | 62 | * devices are made promiscuous by the bridge, but if |
61 | * that ever changes this code will allow tagged | 63 | * that ever changes this code will allow tagged |
62 | * traffic to enter the bridge. | 64 | * traffic to enter the bridge. |
63 | */ | 65 | */ |
64 | err = dev->netdev_ops->ndo_vlan_rx_add_vid(dev, vid); | 66 | err = ops->ndo_vlan_rx_add_vid(dev, htons(ETH_P_8021Q), |
67 | vid); | ||
65 | if (err) | 68 | if (err) |
66 | return err; | 69 | return err; |
67 | } | 70 | } |
@@ -82,8 +85,8 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags) | |||
82 | return 0; | 85 | return 0; |
83 | 86 | ||
84 | out_filt: | 87 | out_filt: |
85 | if (p && (dev->features & NETIF_F_HW_VLAN_FILTER)) | 88 | if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) |
86 | dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, vid); | 89 | ops->ndo_vlan_rx_kill_vid(dev, htons(ETH_P_8021Q), vid); |
87 | return err; | 90 | return err; |
88 | } | 91 | } |
89 | 92 | ||
@@ -97,9 +100,10 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid) | |||
97 | 100 | ||
98 | if (v->port_idx && vid) { | 101 | if (v->port_idx && vid) { |
99 | struct net_device *dev = v->parent.port->dev; | 102 | struct net_device *dev = v->parent.port->dev; |
103 | const struct net_device_ops *ops = dev->netdev_ops; | ||
100 | 104 | ||
101 | if (dev->features & NETIF_F_HW_VLAN_FILTER) | 105 | if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) |
102 | dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, vid); | 106 | ops->ndo_vlan_rx_kill_vid(dev, htons(ETH_P_8021Q), vid); |
103 | } | 107 | } |
104 | 108 | ||
105 | clear_bit(vid, v->vlan_bitmap); | 109 | clear_bit(vid, v->vlan_bitmap); |
@@ -171,7 +175,7 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br, | |||
171 | * mac header. | 175 | * mac header. |
172 | */ | 176 | */ |
173 | skb_push(skb, ETH_HLEN); | 177 | skb_push(skb, ETH_HLEN); |
174 | skb = __vlan_put_tag(skb, skb->vlan_tci); | 178 | skb = __vlan_put_tag(skb, skb->vlan_proto, skb->vlan_tci); |
175 | if (!skb) | 179 | if (!skb) |
176 | goto out; | 180 | goto out; |
177 | /* put skb->data back to where it was */ | 181 | /* put skb->data back to where it was */ |
@@ -213,7 +217,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, | |||
213 | /* PVID is set on this port. Any untagged ingress | 217 | /* PVID is set on this port. Any untagged ingress |
214 | * frame is considered to belong to this vlan. | 218 | * frame is considered to belong to this vlan. |
215 | */ | 219 | */ |
216 | __vlan_hwaccel_put_tag(skb, pvid); | 220 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid); |
217 | return true; | 221 | return true; |
218 | } | 222 | } |
219 | 223 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 3655ff927315..fad4c385f7a1 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2212,7 +2212,7 @@ __be16 skb_network_protocol(struct sk_buff *skb) | |||
2212 | __be16 type = skb->protocol; | 2212 | __be16 type = skb->protocol; |
2213 | int vlan_depth = ETH_HLEN; | 2213 | int vlan_depth = ETH_HLEN; |
2214 | 2214 | ||
2215 | while (type == htons(ETH_P_8021Q)) { | 2215 | while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { |
2216 | struct vlan_hdr *vh; | 2216 | struct vlan_hdr *vh; |
2217 | 2217 | ||
2218 | if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) | 2218 | if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) |
@@ -2428,20 +2428,22 @@ netdev_features_t netif_skb_features(struct sk_buff *skb) | |||
2428 | if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs) | 2428 | if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs) |
2429 | features &= ~NETIF_F_GSO_MASK; | 2429 | features &= ~NETIF_F_GSO_MASK; |
2430 | 2430 | ||
2431 | if (protocol == htons(ETH_P_8021Q)) { | 2431 | if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { |
2432 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; | 2432 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; |
2433 | protocol = veh->h_vlan_encapsulated_proto; | 2433 | protocol = veh->h_vlan_encapsulated_proto; |
2434 | } else if (!vlan_tx_tag_present(skb)) { | 2434 | } else if (!vlan_tx_tag_present(skb)) { |
2435 | return harmonize_features(skb, protocol, features); | 2435 | return harmonize_features(skb, protocol, features); |
2436 | } | 2436 | } |
2437 | 2437 | ||
2438 | features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX); | 2438 | features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | |
2439 | NETIF_F_HW_VLAN_STAG_TX); | ||
2439 | 2440 | ||
2440 | if (protocol != htons(ETH_P_8021Q)) { | 2441 | if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD)) { |
2441 | return harmonize_features(skb, protocol, features); | 2442 | return harmonize_features(skb, protocol, features); |
2442 | } else { | 2443 | } else { |
2443 | features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | | 2444 | features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | |
2444 | NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX; | 2445 | NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX | |
2446 | NETIF_F_HW_VLAN_STAG_TX; | ||
2445 | return harmonize_features(skb, protocol, features); | 2447 | return harmonize_features(skb, protocol, features); |
2446 | } | 2448 | } |
2447 | } | 2449 | } |
@@ -2482,8 +2484,9 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | |||
2482 | features = netif_skb_features(skb); | 2484 | features = netif_skb_features(skb); |
2483 | 2485 | ||
2484 | if (vlan_tx_tag_present(skb) && | 2486 | if (vlan_tx_tag_present(skb) && |
2485 | !(features & NETIF_F_HW_VLAN_TX)) { | 2487 | !vlan_hw_offload_capable(features, skb->vlan_proto)) { |
2486 | skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); | 2488 | skb = __vlan_put_tag(skb, skb->vlan_proto, |
2489 | vlan_tx_tag_get(skb)); | ||
2487 | if (unlikely(!skb)) | 2490 | if (unlikely(!skb)) |
2488 | goto out; | 2491 | goto out; |
2489 | 2492 | ||
@@ -3359,6 +3362,7 @@ static bool skb_pfmemalloc_protocol(struct sk_buff *skb) | |||
3359 | case __constant_htons(ETH_P_IP): | 3362 | case __constant_htons(ETH_P_IP): |
3360 | case __constant_htons(ETH_P_IPV6): | 3363 | case __constant_htons(ETH_P_IPV6): |
3361 | case __constant_htons(ETH_P_8021Q): | 3364 | case __constant_htons(ETH_P_8021Q): |
3365 | case __constant_htons(ETH_P_8021AD): | ||
3362 | return true; | 3366 | return true; |
3363 | default: | 3367 | default: |
3364 | return false; | 3368 | return false; |
@@ -3399,7 +3403,8 @@ another_round: | |||
3399 | 3403 | ||
3400 | __this_cpu_inc(softnet_data.processed); | 3404 | __this_cpu_inc(softnet_data.processed); |
3401 | 3405 | ||
3402 | if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { | 3406 | if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || |
3407 | skb->protocol == cpu_to_be16(ETH_P_8021AD)) { | ||
3403 | skb = vlan_untag(skb); | 3408 | skb = vlan_untag(skb); |
3404 | if (unlikely(!skb)) | 3409 | if (unlikely(!skb)) |
3405 | goto unlock; | 3410 | goto unlock; |
@@ -5180,7 +5185,8 @@ int register_netdevice(struct net_device *dev) | |||
5180 | } | 5185 | } |
5181 | } | 5186 | } |
5182 | 5187 | ||
5183 | if (((dev->hw_features | dev->features) & NETIF_F_HW_VLAN_FILTER) && | 5188 | if (((dev->hw_features | dev->features) & |
5189 | NETIF_F_HW_VLAN_CTAG_FILTER) && | ||
5184 | (!dev->netdev_ops->ndo_vlan_rx_add_vid || | 5190 | (!dev->netdev_ops->ndo_vlan_rx_add_vid || |
5185 | !dev->netdev_ops->ndo_vlan_rx_kill_vid)) { | 5191 | !dev->netdev_ops->ndo_vlan_rx_kill_vid)) { |
5186 | netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n"); | 5192 | netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n"); |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index adc1351e6873..b87712cfd26c 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -60,10 +60,10 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] | |||
60 | [NETIF_F_IPV6_CSUM_BIT] = "tx-checksum-ipv6", | 60 | [NETIF_F_IPV6_CSUM_BIT] = "tx-checksum-ipv6", |
61 | [NETIF_F_HIGHDMA_BIT] = "highdma", | 61 | [NETIF_F_HIGHDMA_BIT] = "highdma", |
62 | [NETIF_F_FRAGLIST_BIT] = "tx-scatter-gather-fraglist", | 62 | [NETIF_F_FRAGLIST_BIT] = "tx-scatter-gather-fraglist", |
63 | [NETIF_F_HW_VLAN_TX_BIT] = "tx-vlan-hw-insert", | 63 | [NETIF_F_HW_VLAN_CTAG_TX_BIT] = "tx-vlan-ctag-hw-insert", |
64 | 64 | ||
65 | [NETIF_F_HW_VLAN_RX_BIT] = "rx-vlan-hw-parse", | 65 | [NETIF_F_HW_VLAN_CTAG_RX_BIT] = "rx-vlan-ctag-hw-parse", |
66 | [NETIF_F_HW_VLAN_FILTER_BIT] = "rx-vlan-filter", | 66 | [NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-ctag-filter", |
67 | [NETIF_F_VLAN_CHALLENGED_BIT] = "vlan-challenged", | 67 | [NETIF_F_VLAN_CHALLENGED_BIT] = "vlan-challenged", |
68 | [NETIF_F_GSO_BIT] = "tx-generic-segmentation", | 68 | [NETIF_F_GSO_BIT] = "tx-generic-segmentation", |
69 | [NETIF_F_LLTX_BIT] = "tx-lockless", | 69 | [NETIF_F_LLTX_BIT] = "tx-lockless", |
@@ -267,18 +267,19 @@ static int ethtool_set_one_feature(struct net_device *dev, | |||
267 | 267 | ||
268 | #define ETH_ALL_FLAGS (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | \ | 268 | #define ETH_ALL_FLAGS (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | \ |
269 | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH) | 269 | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH) |
270 | #define ETH_ALL_FEATURES (NETIF_F_LRO | NETIF_F_HW_VLAN_RX | \ | 270 | #define ETH_ALL_FEATURES (NETIF_F_LRO | NETIF_F_HW_VLAN_CTAG_RX | \ |
271 | NETIF_F_HW_VLAN_TX | NETIF_F_NTUPLE | NETIF_F_RXHASH) | 271 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_NTUPLE | \ |
272 | NETIF_F_RXHASH) | ||
272 | 273 | ||
273 | static u32 __ethtool_get_flags(struct net_device *dev) | 274 | static u32 __ethtool_get_flags(struct net_device *dev) |
274 | { | 275 | { |
275 | u32 flags = 0; | 276 | u32 flags = 0; |
276 | 277 | ||
277 | if (dev->features & NETIF_F_LRO) flags |= ETH_FLAG_LRO; | 278 | if (dev->features & NETIF_F_LRO) flags |= ETH_FLAG_LRO; |
278 | if (dev->features & NETIF_F_HW_VLAN_RX) flags |= ETH_FLAG_RXVLAN; | 279 | if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) flags |= ETH_FLAG_RXVLAN; |
279 | if (dev->features & NETIF_F_HW_VLAN_TX) flags |= ETH_FLAG_TXVLAN; | 280 | if (dev->features & NETIF_F_HW_VLAN_CTAG_TX) flags |= ETH_FLAG_TXVLAN; |
280 | if (dev->features & NETIF_F_NTUPLE) flags |= ETH_FLAG_NTUPLE; | 281 | if (dev->features & NETIF_F_NTUPLE) flags |= ETH_FLAG_NTUPLE; |
281 | if (dev->features & NETIF_F_RXHASH) flags |= ETH_FLAG_RXHASH; | 282 | if (dev->features & NETIF_F_RXHASH) flags |= ETH_FLAG_RXHASH; |
282 | 283 | ||
283 | return flags; | 284 | return flags; |
284 | } | 285 | } |
@@ -291,8 +292,8 @@ static int __ethtool_set_flags(struct net_device *dev, u32 data) | |||
291 | return -EINVAL; | 292 | return -EINVAL; |
292 | 293 | ||
293 | if (data & ETH_FLAG_LRO) features |= NETIF_F_LRO; | 294 | if (data & ETH_FLAG_LRO) features |= NETIF_F_LRO; |
294 | if (data & ETH_FLAG_RXVLAN) features |= NETIF_F_HW_VLAN_RX; | 295 | if (data & ETH_FLAG_RXVLAN) features |= NETIF_F_HW_VLAN_CTAG_RX; |
295 | if (data & ETH_FLAG_TXVLAN) features |= NETIF_F_HW_VLAN_TX; | 296 | if (data & ETH_FLAG_TXVLAN) features |= NETIF_F_HW_VLAN_CTAG_TX; |
296 | if (data & ETH_FLAG_NTUPLE) features |= NETIF_F_NTUPLE; | 297 | if (data & ETH_FLAG_NTUPLE) features |= NETIF_F_NTUPLE; |
297 | if (data & ETH_FLAG_RXHASH) features |= NETIF_F_RXHASH; | 298 | if (data & ETH_FLAG_RXHASH) features |= NETIF_F_RXHASH; |
298 | 299 | ||
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index a3a17aed3639..209d84253dd5 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -383,8 +383,9 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
383 | if (__netif_tx_trylock(txq)) { | 383 | if (__netif_tx_trylock(txq)) { |
384 | if (!netif_xmit_stopped(txq)) { | 384 | if (!netif_xmit_stopped(txq)) { |
385 | if (vlan_tx_tag_present(skb) && | 385 | if (vlan_tx_tag_present(skb) && |
386 | !(netif_skb_features(skb) & NETIF_F_HW_VLAN_TX)) { | 386 | !vlan_hw_offload_capable(netif_skb_features(skb), |
387 | skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); | 387 | skb->vlan_proto)) { |
388 | skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb)); | ||
388 | if (unlikely(!skb)) | 389 | if (unlikely(!skb)) |
389 | break; | 390 | break; |
390 | skb->vlan_tci = 0; | 391 | skb->vlan_tci = 0; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index ba646145cd5c..a92d9e7d10f7 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -707,6 +707,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
707 | new->tc_verd = old->tc_verd; | 707 | new->tc_verd = old->tc_verd; |
708 | #endif | 708 | #endif |
709 | #endif | 709 | #endif |
710 | new->vlan_proto = old->vlan_proto; | ||
710 | new->vlan_tci = old->vlan_tci; | 711 | new->vlan_tci = old->vlan_tci; |
711 | 712 | ||
712 | skb_copy_secmark(new, old); | 713 | skb_copy_secmark(new, old); |
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index d4d5363c7ba7..894b6cbdd929 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c | |||
@@ -98,7 +98,7 @@ static int pop_vlan(struct sk_buff *skb) | |||
98 | if (unlikely(err)) | 98 | if (unlikely(err)) |
99 | return err; | 99 | return err; |
100 | 100 | ||
101 | __vlan_hwaccel_put_tag(skb, ntohs(tci)); | 101 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(tci)); |
102 | return 0; | 102 | return 0; |
103 | } | 103 | } |
104 | 104 | ||
@@ -110,7 +110,7 @@ static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vla | |||
110 | /* push down current VLAN tag */ | 110 | /* push down current VLAN tag */ |
111 | current_tag = vlan_tx_tag_get(skb); | 111 | current_tag = vlan_tx_tag_get(skb); |
112 | 112 | ||
113 | if (!__vlan_put_tag(skb, current_tag)) | 113 | if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag)) |
114 | return -ENOMEM; | 114 | return -ENOMEM; |
115 | 115 | ||
116 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 116 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
@@ -118,7 +118,7 @@ static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vla | |||
118 | + (2 * ETH_ALEN), VLAN_HLEN, 0)); | 118 | + (2 * ETH_ALEN), VLAN_HLEN, 0)); |
119 | 119 | ||
120 | } | 120 | } |
121 | __vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); | 121 | __vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); |
122 | return 0; | 122 | return 0; |
123 | } | 123 | } |
124 | 124 | ||
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index b7d0b7c3fe2c..7bb5d4f6bb90 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -401,7 +401,7 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex, | |||
401 | if (!nskb) | 401 | if (!nskb) |
402 | return -ENOMEM; | 402 | return -ENOMEM; |
403 | 403 | ||
404 | nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb)); | 404 | nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb)); |
405 | if (!nskb) | 405 | if (!nskb) |
406 | return -ENOMEM; | 406 | return -ENOMEM; |
407 | 407 | ||
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index 9604760494b1..73682de8dc69 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c | |||
@@ -137,7 +137,7 @@ static void do_setup(struct net_device *netdev) | |||
137 | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_TSO; | 137 | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_TSO; |
138 | 138 | ||
139 | netdev->vlan_features = netdev->features; | 139 | netdev->vlan_features = netdev->features; |
140 | netdev->features |= NETIF_F_HW_VLAN_TX; | 140 | netdev->features |= NETIF_F_HW_VLAN_CTAG_TX; |
141 | netdev->hw_features = netdev->features & ~NETIF_F_LLTX; | 141 | netdev->hw_features = netdev->features & ~NETIF_F_LLTX; |
142 | eth_hw_addr_random(netdev); | 142 | eth_hw_addr_random(netdev); |
143 | } | 143 | } |