aboutsummaryrefslogtreecommitdiffstats
path: root/net/8021q
diff options
context:
space:
mode:
Diffstat (limited to 'net/8021q')
-rw-r--r--net/8021q/Kconfig2
-rw-r--r--net/8021q/vlan.c97
-rw-r--r--net/8021q/vlan.h58
-rw-r--r--net/8021q/vlan_core.c80
-rw-r--r--net/8021q/vlan_dev.c11
-rw-r--r--net/8021q/vlan_gvrp.c4
-rw-r--r--net/8021q/vlan_mvrp.c4
-rw-r--r--net/8021q/vlan_netlink.c32
8 files changed, 175 insertions, 113 deletions
diff --git a/net/8021q/Kconfig b/net/8021q/Kconfig
index 8f7517df41a5..b85a91fa61f1 100644
--- a/net/8021q/Kconfig
+++ b/net/8021q/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5config VLAN_8021Q 5config VLAN_8021Q
6 tristate "802.1Q VLAN Support" 6 tristate "802.1Q/802.1ad VLAN Support"
7 ---help--- 7 ---help---
8 Select this and you will be able to create 802.1Q VLAN interfaces 8 Select this and you will be able to create 802.1Q VLAN interfaces
9 on your ethernet interfaces. 802.1Q VLAN supports almost 9 on your ethernet interfaces. 802.1Q VLAN supports almost
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 85addcd9372b..9424f3718ea7 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -51,14 +51,18 @@ const char vlan_version[] = DRV_VERSION;
51 51
52/* End of global variables definitions. */ 52/* End of global variables definitions. */
53 53
54static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id) 54static int vlan_group_prealloc_vid(struct vlan_group *vg,
55 __be16 vlan_proto, u16 vlan_id)
55{ 56{
56 struct net_device **array; 57 struct net_device **array;
58 unsigned int pidx, vidx;
57 unsigned int size; 59 unsigned int size;
58 60
59 ASSERT_RTNL(); 61 ASSERT_RTNL();
60 62
61 array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; 63 pidx = vlan_proto_idx(vlan_proto);
64 vidx = vlan_id / VLAN_GROUP_ARRAY_PART_LEN;
65 array = vg->vlan_devices_arrays[pidx][vidx];
62 if (array != NULL) 66 if (array != NULL)
63 return 0; 67 return 0;
64 68
@@ -67,7 +71,7 @@ static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id)
67 if (array == NULL) 71 if (array == NULL)
68 return -ENOBUFS; 72 return -ENOBUFS;
69 73
70 vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN] = array; 74 vg->vlan_devices_arrays[pidx][vidx] = array;
71 return 0; 75 return 0;
72} 76}
73 77
@@ -93,7 +97,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
93 if (vlan->flags & VLAN_FLAG_GVRP) 97 if (vlan->flags & VLAN_FLAG_GVRP)
94 vlan_gvrp_request_leave(dev); 98 vlan_gvrp_request_leave(dev);
95 99
96 vlan_group_set_device(grp, vlan_id, NULL); 100 vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL);
97 /* Because unregister_netdevice_queue() makes sure at least one rcu 101 /* Because unregister_netdevice_queue() makes sure at least one rcu
98 * grace period is respected before device freeing, 102 * grace period is respected before device freeing,
99 * we dont need to call synchronize_net() here. 103 * we dont need to call synchronize_net() here.
@@ -112,13 +116,14 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
112 * VLAN is not 0 (leave it there for 802.1p). 116 * VLAN is not 0 (leave it there for 802.1p).
113 */ 117 */
114 if (vlan_id) 118 if (vlan_id)
115 vlan_vid_del(real_dev, vlan_id); 119 vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
116 120
117 /* Get rid of the vlan's reference to real_dev */ 121 /* Get rid of the vlan's reference to real_dev */
118 dev_put(real_dev); 122 dev_put(real_dev);
119} 123}
120 124
121int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id) 125int vlan_check_real_dev(struct net_device *real_dev,
126 __be16 protocol, u16 vlan_id)
122{ 127{
123 const char *name = real_dev->name; 128 const char *name = real_dev->name;
124 129
@@ -127,7 +132,7 @@ int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id)
127 return -EOPNOTSUPP; 132 return -EOPNOTSUPP;
128 } 133 }
129 134
130 if (vlan_find_dev(real_dev, vlan_id) != NULL) 135 if (vlan_find_dev(real_dev, protocol, vlan_id) != NULL)
131 return -EEXIST; 136 return -EEXIST;
132 137
133 return 0; 138 return 0;
@@ -142,7 +147,7 @@ int register_vlan_dev(struct net_device *dev)
142 struct vlan_group *grp; 147 struct vlan_group *grp;
143 int err; 148 int err;
144 149
145 err = vlan_vid_add(real_dev, vlan_id); 150 err = vlan_vid_add(real_dev, vlan->vlan_proto, vlan_id);
146 if (err) 151 if (err)
147 return err; 152 return err;
148 153
@@ -160,7 +165,7 @@ int register_vlan_dev(struct net_device *dev)
160 goto out_uninit_gvrp; 165 goto out_uninit_gvrp;
161 } 166 }
162 167
163 err = vlan_group_prealloc_vid(grp, vlan_id); 168 err = vlan_group_prealloc_vid(grp, vlan->vlan_proto, vlan_id);
164 if (err < 0) 169 if (err < 0)
165 goto out_uninit_mvrp; 170 goto out_uninit_mvrp;
166 171
@@ -181,7 +186,7 @@ int register_vlan_dev(struct net_device *dev)
181 /* So, got the sucker initialized, now lets place 186 /* So, got the sucker initialized, now lets place
182 * it into our local structure. 187 * it into our local structure.
183 */ 188 */
184 vlan_group_set_device(grp, vlan_id, dev); 189 vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, dev);
185 grp->nr_vlan_devs++; 190 grp->nr_vlan_devs++;
186 191
187 return 0; 192 return 0;
@@ -195,7 +200,7 @@ out_uninit_gvrp:
195 if (grp->nr_vlan_devs == 0) 200 if (grp->nr_vlan_devs == 0)
196 vlan_gvrp_uninit_applicant(real_dev); 201 vlan_gvrp_uninit_applicant(real_dev);
197out_vid_del: 202out_vid_del:
198 vlan_vid_del(real_dev, vlan_id); 203 vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
199 return err; 204 return err;
200} 205}
201 206
@@ -213,7 +218,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
213 if (vlan_id >= VLAN_VID_MASK) 218 if (vlan_id >= VLAN_VID_MASK)
214 return -ERANGE; 219 return -ERANGE;
215 220
216 err = vlan_check_real_dev(real_dev, vlan_id); 221 err = vlan_check_real_dev(real_dev, htons(ETH_P_8021Q), vlan_id);
217 if (err < 0) 222 if (err < 0)
218 return err; 223 return err;
219 224
@@ -255,6 +260,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
255 new_dev->mtu = real_dev->mtu; 260 new_dev->mtu = real_dev->mtu;
256 new_dev->priv_flags |= (real_dev->priv_flags & IFF_UNICAST_FLT); 261 new_dev->priv_flags |= (real_dev->priv_flags & IFF_UNICAST_FLT);
257 262
263 vlan_dev_priv(new_dev)->vlan_proto = htons(ETH_P_8021Q);
258 vlan_dev_priv(new_dev)->vlan_id = vlan_id; 264 vlan_dev_priv(new_dev)->vlan_id = vlan_id;
259 vlan_dev_priv(new_dev)->real_dev = real_dev; 265 vlan_dev_priv(new_dev)->real_dev = real_dev;
260 vlan_dev_priv(new_dev)->dent = NULL; 266 vlan_dev_priv(new_dev)->dent = NULL;
@@ -301,7 +307,7 @@ static void vlan_transfer_features(struct net_device *dev,
301{ 307{
302 vlandev->gso_max_size = dev->gso_max_size; 308 vlandev->gso_max_size = dev->gso_max_size;
303 309
304 if (dev->features & NETIF_F_HW_VLAN_TX) 310 if (dev->features & NETIF_F_HW_VLAN_CTAG_TX)
305 vlandev->hard_header_len = dev->hard_header_len; 311 vlandev->hard_header_len = dev->hard_header_len;
306 else 312 else
307 vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN; 313 vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
@@ -341,16 +347,17 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
341 int i, flgs; 347 int i, flgs;
342 struct net_device *vlandev; 348 struct net_device *vlandev;
343 struct vlan_dev_priv *vlan; 349 struct vlan_dev_priv *vlan;
350 bool last = false;
344 LIST_HEAD(list); 351 LIST_HEAD(list);
345 352
346 if (is_vlan_dev(dev)) 353 if (is_vlan_dev(dev))
347 __vlan_device_event(dev, event); 354 __vlan_device_event(dev, event);
348 355
349 if ((event == NETDEV_UP) && 356 if ((event == NETDEV_UP) &&
350 (dev->features & NETIF_F_HW_VLAN_FILTER)) { 357 (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
351 pr_info("adding VLAN 0 to HW filter on device %s\n", 358 pr_info("adding VLAN 0 to HW filter on device %s\n",
352 dev->name); 359 dev->name);
353 vlan_vid_add(dev, 0); 360 vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
354 } 361 }
355 362
356 vlan_info = rtnl_dereference(dev->vlan_info); 363 vlan_info = rtnl_dereference(dev->vlan_info);
@@ -365,22 +372,13 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
365 switch (event) { 372 switch (event) {
366 case NETDEV_CHANGE: 373 case NETDEV_CHANGE:
367 /* Propagate real device state to vlan devices */ 374 /* Propagate real device state to vlan devices */
368 for (i = 0; i < VLAN_N_VID; i++) { 375 vlan_group_for_each_dev(grp, i, vlandev)
369 vlandev = vlan_group_get_device(grp, i);
370 if (!vlandev)
371 continue;
372
373 netif_stacked_transfer_operstate(dev, vlandev); 376 netif_stacked_transfer_operstate(dev, vlandev);
374 }
375 break; 377 break;
376 378
377 case NETDEV_CHANGEADDR: 379 case NETDEV_CHANGEADDR:
378 /* Adjust unicast filters on underlying device */ 380 /* Adjust unicast filters on underlying device */
379 for (i = 0; i < VLAN_N_VID; i++) { 381 vlan_group_for_each_dev(grp, i, vlandev) {
380 vlandev = vlan_group_get_device(grp, i);
381 if (!vlandev)
382 continue;
383
384 flgs = vlandev->flags; 382 flgs = vlandev->flags;
385 if (!(flgs & IFF_UP)) 383 if (!(flgs & IFF_UP))
386 continue; 384 continue;
@@ -390,11 +388,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
390 break; 388 break;
391 389
392 case NETDEV_CHANGEMTU: 390 case NETDEV_CHANGEMTU:
393 for (i = 0; i < VLAN_N_VID; i++) { 391 vlan_group_for_each_dev(grp, i, vlandev) {
394 vlandev = vlan_group_get_device(grp, i);
395 if (!vlandev)
396 continue;
397
398 if (vlandev->mtu <= dev->mtu) 392 if (vlandev->mtu <= dev->mtu)
399 continue; 393 continue;
400 394
@@ -404,26 +398,16 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
404 398
405 case NETDEV_FEAT_CHANGE: 399 case NETDEV_FEAT_CHANGE:
406 /* Propagate device features to underlying device */ 400 /* Propagate device features to underlying device */
407 for (i = 0; i < VLAN_N_VID; i++) { 401 vlan_group_for_each_dev(grp, i, vlandev)
408 vlandev = vlan_group_get_device(grp, i);
409 if (!vlandev)
410 continue;
411
412 vlan_transfer_features(dev, vlandev); 402 vlan_transfer_features(dev, vlandev);
413 }
414
415 break; 403 break;
416 404
417 case NETDEV_DOWN: 405 case NETDEV_DOWN:
418 if (dev->features & NETIF_F_HW_VLAN_FILTER) 406 if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
419 vlan_vid_del(dev, 0); 407 vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
420 408
421 /* Put all VLANs for this dev in the down state too. */ 409 /* Put all VLANs for this dev in the down state too. */
422 for (i = 0; i < VLAN_N_VID; i++) { 410 vlan_group_for_each_dev(grp, i, vlandev) {
423 vlandev = vlan_group_get_device(grp, i);
424 if (!vlandev)
425 continue;
426
427 flgs = vlandev->flags; 411 flgs = vlandev->flags;
428 if (!(flgs & IFF_UP)) 412 if (!(flgs & IFF_UP))
429 continue; 413 continue;
@@ -437,11 +421,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
437 421
438 case NETDEV_UP: 422 case NETDEV_UP:
439 /* Put all VLANs for this dev in the up state too. */ 423 /* Put all VLANs for this dev in the up state too. */
440 for (i = 0; i < VLAN_N_VID; i++) { 424 vlan_group_for_each_dev(grp, i, vlandev) {
441 vlandev = vlan_group_get_device(grp, i);
442 if (!vlandev)
443 continue;
444
445 flgs = vlandev->flags; 425 flgs = vlandev->flags;
446 if (flgs & IFF_UP) 426 if (flgs & IFF_UP)
447 continue; 427 continue;
@@ -458,17 +438,15 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
458 if (dev->reg_state != NETREG_UNREGISTERING) 438 if (dev->reg_state != NETREG_UNREGISTERING)
459 break; 439 break;
460 440
461 for (i = 0; i < VLAN_N_VID; i++) { 441 vlan_group_for_each_dev(grp, i, vlandev) {
462 vlandev = vlan_group_get_device(grp, i);
463 if (!vlandev)
464 continue;
465
466 /* removal of last vid destroys vlan_info, abort 442 /* removal of last vid destroys vlan_info, abort
467 * afterwards */ 443 * afterwards */
468 if (vlan_info->nr_vids == 1) 444 if (vlan_info->nr_vids == 1)
469 i = VLAN_N_VID; 445 last = true;
470 446
471 unregister_vlan_dev(vlandev, &list); 447 unregister_vlan_dev(vlandev, &list);
448 if (last)
449 break;
472 } 450 }
473 unregister_netdevice_many(&list); 451 unregister_netdevice_many(&list);
474 break; 452 break;
@@ -482,13 +460,8 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
482 case NETDEV_NOTIFY_PEERS: 460 case NETDEV_NOTIFY_PEERS:
483 case NETDEV_BONDING_FAILOVER: 461 case NETDEV_BONDING_FAILOVER:
484 /* Propagate to vlan devices */ 462 /* Propagate to vlan devices */
485 for (i = 0; i < VLAN_N_VID; i++) { 463 vlan_group_for_each_dev(grp, i, vlandev)
486 vlandev = vlan_group_get_device(grp, i);
487 if (!vlandev)
488 continue;
489
490 call_netdevice_notifiers(event, vlandev); 464 call_netdevice_notifiers(event, vlandev);
491 }
492 break; 465 break;
493 } 466 }
494 467
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 670f1e8cfc0f..ba5983f34c42 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -49,6 +49,7 @@ struct netpoll;
49 * @ingress_priority_map: ingress priority mappings 49 * @ingress_priority_map: ingress priority mappings
50 * @nr_egress_mappings: number of egress priority mappings 50 * @nr_egress_mappings: number of egress priority mappings
51 * @egress_priority_map: hash of egress priority mappings 51 * @egress_priority_map: hash of egress priority mappings
52 * @vlan_proto: VLAN encapsulation protocol
52 * @vlan_id: VLAN identifier 53 * @vlan_id: VLAN identifier
53 * @flags: device flags 54 * @flags: device flags
54 * @real_dev: underlying netdevice 55 * @real_dev: underlying netdevice
@@ -62,6 +63,7 @@ struct vlan_dev_priv {
62 unsigned int nr_egress_mappings; 63 unsigned int nr_egress_mappings;
63 struct vlan_priority_tci_mapping *egress_priority_map[16]; 64 struct vlan_priority_tci_mapping *egress_priority_map[16];
64 65
66 __be16 vlan_proto;
65 u16 vlan_id; 67 u16 vlan_id;
66 u16 flags; 68 u16 flags;
67 69
@@ -87,10 +89,17 @@ static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
87#define VLAN_GROUP_ARRAY_SPLIT_PARTS 8 89#define VLAN_GROUP_ARRAY_SPLIT_PARTS 8
88#define VLAN_GROUP_ARRAY_PART_LEN (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS) 90#define VLAN_GROUP_ARRAY_PART_LEN (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS)
89 91
92enum vlan_protos {
93 VLAN_PROTO_8021Q = 0,
94 VLAN_PROTO_8021AD,
95 VLAN_PROTO_NUM,
96};
97
90struct vlan_group { 98struct vlan_group {
91 unsigned int nr_vlan_devs; 99 unsigned int nr_vlan_devs;
92 struct hlist_node hlist; /* linked list */ 100 struct hlist_node hlist; /* linked list */
93 struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS]; 101 struct net_device **vlan_devices_arrays[VLAN_PROTO_NUM]
102 [VLAN_GROUP_ARRAY_SPLIT_PARTS];
94}; 103};
95 104
96struct vlan_info { 105struct vlan_info {
@@ -103,37 +112,67 @@ struct vlan_info {
103 struct rcu_head rcu; 112 struct rcu_head rcu;
104}; 113};
105 114
106static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, 115static inline unsigned int vlan_proto_idx(__be16 proto)
107 u16 vlan_id) 116{
117 switch (proto) {
118 case __constant_htons(ETH_P_8021Q):
119 return VLAN_PROTO_8021Q;
120 case __constant_htons(ETH_P_8021AD):
121 return VLAN_PROTO_8021AD;
122 default:
123 BUG();
124 return 0;
125 }
126}
127
128static inline struct net_device *__vlan_group_get_device(struct vlan_group *vg,
129 unsigned int pidx,
130 u16 vlan_id)
108{ 131{
109 struct net_device **array; 132 struct net_device **array;
110 array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; 133
134 array = vg->vlan_devices_arrays[pidx]
135 [vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
111 return array ? array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] : NULL; 136 return array ? array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] : NULL;
112} 137}
113 138
139static inline struct net_device *vlan_group_get_device(struct vlan_group *vg,
140 __be16 vlan_proto,
141 u16 vlan_id)
142{
143 return __vlan_group_get_device(vg, vlan_proto_idx(vlan_proto), vlan_id);
144}
145
114static inline void vlan_group_set_device(struct vlan_group *vg, 146static inline void vlan_group_set_device(struct vlan_group *vg,
115 u16 vlan_id, 147 __be16 vlan_proto, u16 vlan_id,
116 struct net_device *dev) 148 struct net_device *dev)
117{ 149{
118 struct net_device **array; 150 struct net_device **array;
119 if (!vg) 151 if (!vg)
120 return; 152 return;
121 array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; 153 array = vg->vlan_devices_arrays[vlan_proto_idx(vlan_proto)]
154 [vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
122 array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev; 155 array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev;
123} 156}
124 157
125/* Must be invoked with rcu_read_lock or with RTNL. */ 158/* Must be invoked with rcu_read_lock or with RTNL. */
126static inline struct net_device *vlan_find_dev(struct net_device *real_dev, 159static inline struct net_device *vlan_find_dev(struct net_device *real_dev,
127 u16 vlan_id) 160 __be16 vlan_proto, u16 vlan_id)
128{ 161{
129 struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info); 162 struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info);
130 163
131 if (vlan_info) 164 if (vlan_info)
132 return vlan_group_get_device(&vlan_info->grp, vlan_id); 165 return vlan_group_get_device(&vlan_info->grp,
166 vlan_proto, vlan_id);
133 167
134 return NULL; 168 return NULL;
135} 169}
136 170
171#define vlan_group_for_each_dev(grp, i, dev) \
172 for ((i) = 0; i < VLAN_PROTO_NUM * VLAN_N_VID; i++) \
173 if (((dev) = __vlan_group_get_device((grp), (i) / VLAN_N_VID, \
174 (i) % VLAN_N_VID)))
175
137/* found in vlan_dev.c */ 176/* found in vlan_dev.c */
138void vlan_dev_set_ingress_priority(const struct net_device *dev, 177void vlan_dev_set_ingress_priority(const struct net_device *dev,
139 u32 skb_prio, u16 vlan_prio); 178 u32 skb_prio, u16 vlan_prio);
@@ -142,7 +181,8 @@ int vlan_dev_set_egress_priority(const struct net_device *dev,
142int vlan_dev_change_flags(const struct net_device *dev, u32 flag, u32 mask); 181int vlan_dev_change_flags(const struct net_device *dev, u32 flag, u32 mask);
143void vlan_dev_get_realdev_name(const struct net_device *dev, char *result); 182void vlan_dev_get_realdev_name(const struct net_device *dev, char *result);
144 183
145int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id); 184int vlan_check_real_dev(struct net_device *real_dev,
185 __be16 protocol, u16 vlan_id);
146void vlan_setup(struct net_device *dev); 186void vlan_setup(struct net_device *dev);
147int register_vlan_dev(struct net_device *dev); 187int register_vlan_dev(struct net_device *dev);
148void unregister_vlan_dev(struct net_device *dev, struct list_head *head); 188void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index f3b6f515eba6..8a15eaadc4bd 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -8,11 +8,12 @@
8bool vlan_do_receive(struct sk_buff **skbp) 8bool vlan_do_receive(struct sk_buff **skbp)
9{ 9{
10 struct sk_buff *skb = *skbp; 10 struct sk_buff *skb = *skbp;
11 __be16 vlan_proto = skb->vlan_proto;
11 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; 12 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
12 struct net_device *vlan_dev; 13 struct net_device *vlan_dev;
13 struct vlan_pcpu_stats *rx_stats; 14 struct vlan_pcpu_stats *rx_stats;
14 15
15 vlan_dev = vlan_find_dev(skb->dev, vlan_id); 16 vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
16 if (!vlan_dev) 17 if (!vlan_dev)
17 return false; 18 return false;
18 19
@@ -38,7 +39,8 @@ bool vlan_do_receive(struct sk_buff **skbp)
38 * original position later 39 * original position later
39 */ 40 */
40 skb_push(skb, offset); 41 skb_push(skb, offset);
41 skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci); 42 skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto,
43 skb->vlan_tci);
42 if (!skb) 44 if (!skb)
43 return false; 45 return false;
44 skb_pull(skb, offset + VLAN_HLEN); 46 skb_pull(skb, offset + VLAN_HLEN);
@@ -62,12 +64,13 @@ bool vlan_do_receive(struct sk_buff **skbp)
62 64
63/* Must be invoked with rcu_read_lock. */ 65/* Must be invoked with rcu_read_lock. */
64struct net_device *__vlan_find_dev_deep(struct net_device *dev, 66struct net_device *__vlan_find_dev_deep(struct net_device *dev,
65 u16 vlan_id) 67 __be16 vlan_proto, u16 vlan_id)
66{ 68{
67 struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info); 69 struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
68 70
69 if (vlan_info) { 71 if (vlan_info) {
70 return vlan_group_get_device(&vlan_info->grp, vlan_id); 72 return vlan_group_get_device(&vlan_info->grp,
73 vlan_proto, vlan_id);
71 } else { 74 } else {
72 /* 75 /*
73 * Lower devices of master uppers (bonding, team) do not have 76 * Lower devices of master uppers (bonding, team) do not have
@@ -78,7 +81,8 @@ struct net_device *__vlan_find_dev_deep(struct net_device *dev,
78 81
79 upper_dev = netdev_master_upper_dev_get_rcu(dev); 82 upper_dev = netdev_master_upper_dev_get_rcu(dev);
80 if (upper_dev) 83 if (upper_dev)
81 return __vlan_find_dev_deep(upper_dev, vlan_id); 84 return __vlan_find_dev_deep(upper_dev,
85 vlan_proto, vlan_id);
82 } 86 }
83 87
84 return NULL; 88 return NULL;
@@ -125,7 +129,7 @@ struct sk_buff *vlan_untag(struct sk_buff *skb)
125 129
126 vhdr = (struct vlan_hdr *) skb->data; 130 vhdr = (struct vlan_hdr *) skb->data;
127 vlan_tci = ntohs(vhdr->h_vlan_TCI); 131 vlan_tci = ntohs(vhdr->h_vlan_TCI);
128 __vlan_hwaccel_put_tag(skb, vlan_tci); 132 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
129 133
130 skb_pull_rcsum(skb, VLAN_HLEN); 134 skb_pull_rcsum(skb, VLAN_HLEN);
131 vlan_set_encap_proto(skb, vhdr); 135 vlan_set_encap_proto(skb, vhdr);
@@ -153,10 +157,11 @@ EXPORT_SYMBOL(vlan_untag);
153 157
154static void vlan_group_free(struct vlan_group *grp) 158static void vlan_group_free(struct vlan_group *grp)
155{ 159{
156 int i; 160 int i, j;
157 161
158 for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) 162 for (i = 0; i < VLAN_PROTO_NUM; i++)
159 kfree(grp->vlan_devices_arrays[i]); 163 for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++)
164 kfree(grp->vlan_devices_arrays[i][j]);
160} 165}
161 166
162static void vlan_info_free(struct vlan_info *vlan_info) 167static void vlan_info_free(struct vlan_info *vlan_info)
@@ -185,35 +190,49 @@ static struct vlan_info *vlan_info_alloc(struct net_device *dev)
185 190
186struct vlan_vid_info { 191struct vlan_vid_info {
187 struct list_head list; 192 struct list_head list;
188 unsigned short vid; 193 __be16 proto;
194 u16 vid;
189 int refcount; 195 int refcount;
190}; 196};
191 197
198static bool vlan_hw_filter_capable(const struct net_device *dev,
199 const struct vlan_vid_info *vid_info)
200{
201 if (vid_info->proto == htons(ETH_P_8021Q) &&
202 dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
203 return true;
204 if (vid_info->proto == htons(ETH_P_8021AD) &&
205 dev->features & NETIF_F_HW_VLAN_STAG_FILTER)
206 return true;
207 return false;
208}
209
192static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info, 210static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
193 unsigned short vid) 211 __be16 proto, u16 vid)
194{ 212{
195 struct vlan_vid_info *vid_info; 213 struct vlan_vid_info *vid_info;
196 214
197 list_for_each_entry(vid_info, &vlan_info->vid_list, list) { 215 list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
198 if (vid_info->vid == vid) 216 if (vid_info->proto == proto && vid_info->vid == vid)
199 return vid_info; 217 return vid_info;
200 } 218 }
201 return NULL; 219 return NULL;
202} 220}
203 221
204static struct vlan_vid_info *vlan_vid_info_alloc(unsigned short vid) 222static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid)
205{ 223{
206 struct vlan_vid_info *vid_info; 224 struct vlan_vid_info *vid_info;
207 225
208 vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL); 226 vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
209 if (!vid_info) 227 if (!vid_info)
210 return NULL; 228 return NULL;
229 vid_info->proto = proto;
211 vid_info->vid = vid; 230 vid_info->vid = vid;
212 231
213 return vid_info; 232 return vid_info;
214} 233}
215 234
216static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid, 235static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid,
217 struct vlan_vid_info **pvid_info) 236 struct vlan_vid_info **pvid_info)
218{ 237{
219 struct net_device *dev = vlan_info->real_dev; 238 struct net_device *dev = vlan_info->real_dev;
@@ -221,12 +240,12 @@ static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid,
221 struct vlan_vid_info *vid_info; 240 struct vlan_vid_info *vid_info;
222 int err; 241 int err;
223 242
224 vid_info = vlan_vid_info_alloc(vid); 243 vid_info = vlan_vid_info_alloc(proto, vid);
225 if (!vid_info) 244 if (!vid_info)
226 return -ENOMEM; 245 return -ENOMEM;
227 246
228 if (dev->features & NETIF_F_HW_VLAN_FILTER) { 247 if (vlan_hw_filter_capable(dev, vid_info)) {
229 err = ops->ndo_vlan_rx_add_vid(dev, vid); 248 err = ops->ndo_vlan_rx_add_vid(dev, proto, vid);
230 if (err) { 249 if (err) {
231 kfree(vid_info); 250 kfree(vid_info);
232 return err; 251 return err;
@@ -238,7 +257,7 @@ static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid,
238 return 0; 257 return 0;
239} 258}
240 259
241int vlan_vid_add(struct net_device *dev, unsigned short vid) 260int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
242{ 261{
243 struct vlan_info *vlan_info; 262 struct vlan_info *vlan_info;
244 struct vlan_vid_info *vid_info; 263 struct vlan_vid_info *vid_info;
@@ -254,9 +273,9 @@ int vlan_vid_add(struct net_device *dev, unsigned short vid)
254 return -ENOMEM; 273 return -ENOMEM;
255 vlan_info_created = true; 274 vlan_info_created = true;
256 } 275 }
257 vid_info = vlan_vid_info_get(vlan_info, vid); 276 vid_info = vlan_vid_info_get(vlan_info, proto, vid);
258 if (!vid_info) { 277 if (!vid_info) {
259 err = __vlan_vid_add(vlan_info, vid, &vid_info); 278 err = __vlan_vid_add(vlan_info, proto, vid, &vid_info);
260 if (err) 279 if (err)
261 goto out_free_vlan_info; 280 goto out_free_vlan_info;
262 } 281 }
@@ -279,14 +298,15 @@ static void __vlan_vid_del(struct vlan_info *vlan_info,
279{ 298{
280 struct net_device *dev = vlan_info->real_dev; 299 struct net_device *dev = vlan_info->real_dev;
281 const struct net_device_ops *ops = dev->netdev_ops; 300 const struct net_device_ops *ops = dev->netdev_ops;
282 unsigned short vid = vid_info->vid; 301 __be16 proto = vid_info->proto;
302 u16 vid = vid_info->vid;
283 int err; 303 int err;
284 304
285 if (dev->features & NETIF_F_HW_VLAN_FILTER) { 305 if (vlan_hw_filter_capable(dev, vid_info)) {
286 err = ops->ndo_vlan_rx_kill_vid(dev, vid); 306 err = ops->ndo_vlan_rx_kill_vid(dev, proto, vid);
287 if (err) { 307 if (err) {
288 pr_warn("failed to kill vid %d for device %s\n", 308 pr_warn("failed to kill vid %04x/%d for device %s\n",
289 vid, dev->name); 309 proto, vid, dev->name);
290 } 310 }
291 } 311 }
292 list_del(&vid_info->list); 312 list_del(&vid_info->list);
@@ -294,7 +314,7 @@ static void __vlan_vid_del(struct vlan_info *vlan_info,
294 vlan_info->nr_vids--; 314 vlan_info->nr_vids--;
295} 315}
296 316
297void vlan_vid_del(struct net_device *dev, unsigned short vid) 317void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
298{ 318{
299 struct vlan_info *vlan_info; 319 struct vlan_info *vlan_info;
300 struct vlan_vid_info *vid_info; 320 struct vlan_vid_info *vid_info;
@@ -305,7 +325,7 @@ void vlan_vid_del(struct net_device *dev, unsigned short vid)
305 if (!vlan_info) 325 if (!vlan_info)
306 return; 326 return;
307 327
308 vid_info = vlan_vid_info_get(vlan_info, vid); 328 vid_info = vlan_vid_info_get(vlan_info, proto, vid);
309 if (!vid_info) 329 if (!vid_info)
310 return; 330 return;
311 vid_info->refcount--; 331 vid_info->refcount--;
@@ -333,7 +353,7 @@ int vlan_vids_add_by_dev(struct net_device *dev,
333 return 0; 353 return 0;
334 354
335 list_for_each_entry(vid_info, &vlan_info->vid_list, list) { 355 list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
336 err = vlan_vid_add(dev, vid_info->vid); 356 err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
337 if (err) 357 if (err)
338 goto unwind; 358 goto unwind;
339 } 359 }
@@ -343,7 +363,7 @@ unwind:
343 list_for_each_entry_continue_reverse(vid_info, 363 list_for_each_entry_continue_reverse(vid_info,
344 &vlan_info->vid_list, 364 &vlan_info->vid_list,
345 list) { 365 list) {
346 vlan_vid_del(dev, vid_info->vid); 366 vlan_vid_del(dev, vid_info->proto, vid_info->vid);
347 } 367 }
348 368
349 return err; 369 return err;
@@ -363,7 +383,7 @@ void vlan_vids_del_by_dev(struct net_device *dev,
363 return; 383 return;
364 384
365 list_for_each_entry(vid_info, &vlan_info->vid_list, list) 385 list_for_each_entry(vid_info, &vlan_info->vid_list, list)
366 vlan_vid_del(dev, vid_info->vid); 386 vlan_vid_del(dev, vid_info->proto, vid_info->vid);
367} 387}
368EXPORT_SYMBOL(vlan_vids_del_by_dev); 388EXPORT_SYMBOL(vlan_vids_del_by_dev);
369 389
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 19cf81bf9f69..8af508536d36 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -99,6 +99,7 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
99 const void *daddr, const void *saddr, 99 const void *daddr, const void *saddr,
100 unsigned int len) 100 unsigned int len)
101{ 101{
102 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
102 struct vlan_hdr *vhdr; 103 struct vlan_hdr *vhdr;
103 unsigned int vhdrlen = 0; 104 unsigned int vhdrlen = 0;
104 u16 vlan_tci = 0; 105 u16 vlan_tci = 0;
@@ -120,8 +121,8 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
120 else 121 else
121 vhdr->h_vlan_encapsulated_proto = htons(len); 122 vhdr->h_vlan_encapsulated_proto = htons(len);
122 123
123 skb->protocol = htons(ETH_P_8021Q); 124 skb->protocol = vlan->vlan_proto;
124 type = ETH_P_8021Q; 125 type = ntohs(vlan->vlan_proto);
125 vhdrlen = VLAN_HLEN; 126 vhdrlen = VLAN_HLEN;
126 } 127 }
127 128
@@ -161,12 +162,12 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
161 * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING 162 * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
162 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... 163 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
163 */ 164 */
164 if (veth->h_vlan_proto != htons(ETH_P_8021Q) || 165 if (veth->h_vlan_proto != vlan->vlan_proto ||
165 vlan->flags & VLAN_FLAG_REORDER_HDR) { 166 vlan->flags & VLAN_FLAG_REORDER_HDR) {
166 u16 vlan_tci; 167 u16 vlan_tci;
167 vlan_tci = vlan->vlan_id; 168 vlan_tci = vlan->vlan_id;
168 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); 169 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
169 skb = __vlan_hwaccel_put_tag(skb, vlan_tci); 170 skb = __vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci);
170 } 171 }
171 172
172 skb->dev = vlan->real_dev; 173 skb->dev = vlan->real_dev;
@@ -583,7 +584,7 @@ static int vlan_dev_init(struct net_device *dev)
583#endif 584#endif
584 585
585 dev->needed_headroom = real_dev->needed_headroom; 586 dev->needed_headroom = real_dev->needed_headroom;
586 if (real_dev->features & NETIF_F_HW_VLAN_TX) { 587 if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
587 dev->header_ops = real_dev->header_ops; 588 dev->header_ops = real_dev->header_ops;
588 dev->hard_header_len = real_dev->hard_header_len; 589 dev->hard_header_len = real_dev->hard_header_len;
589 } else { 590 } else {
diff --git a/net/8021q/vlan_gvrp.c b/net/8021q/vlan_gvrp.c
index 6f9755352760..66a80320b032 100644
--- a/net/8021q/vlan_gvrp.c
+++ b/net/8021q/vlan_gvrp.c
@@ -32,6 +32,8 @@ int vlan_gvrp_request_join(const struct net_device *dev)
32 const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 32 const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
33 __be16 vlan_id = htons(vlan->vlan_id); 33 __be16 vlan_id = htons(vlan->vlan_id);
34 34
35 if (vlan->vlan_proto != htons(ETH_P_8021Q))
36 return 0;
35 return garp_request_join(vlan->real_dev, &vlan_gvrp_app, 37 return garp_request_join(vlan->real_dev, &vlan_gvrp_app,
36 &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID); 38 &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID);
37} 39}
@@ -41,6 +43,8 @@ void vlan_gvrp_request_leave(const struct net_device *dev)
41 const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 43 const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
42 __be16 vlan_id = htons(vlan->vlan_id); 44 __be16 vlan_id = htons(vlan->vlan_id);
43 45
46 if (vlan->vlan_proto != htons(ETH_P_8021Q))
47 return;
44 garp_request_leave(vlan->real_dev, &vlan_gvrp_app, 48 garp_request_leave(vlan->real_dev, &vlan_gvrp_app,
45 &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID); 49 &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID);
46} 50}
diff --git a/net/8021q/vlan_mvrp.c b/net/8021q/vlan_mvrp.c
index d9ec1d5964aa..e0fe091801b0 100644
--- a/net/8021q/vlan_mvrp.c
+++ b/net/8021q/vlan_mvrp.c
@@ -38,6 +38,8 @@ int vlan_mvrp_request_join(const struct net_device *dev)
38 const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 38 const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
39 __be16 vlan_id = htons(vlan->vlan_id); 39 __be16 vlan_id = htons(vlan->vlan_id);
40 40
41 if (vlan->vlan_proto != htons(ETH_P_8021Q))
42 return 0;
41 return mrp_request_join(vlan->real_dev, &vlan_mrp_app, 43 return mrp_request_join(vlan->real_dev, &vlan_mrp_app,
42 &vlan_id, sizeof(vlan_id), MVRP_ATTR_VID); 44 &vlan_id, sizeof(vlan_id), MVRP_ATTR_VID);
43} 45}
@@ -47,6 +49,8 @@ void vlan_mvrp_request_leave(const struct net_device *dev)
47 const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 49 const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
48 __be16 vlan_id = htons(vlan->vlan_id); 50 __be16 vlan_id = htons(vlan->vlan_id);
49 51
52 if (vlan->vlan_proto != htons(ETH_P_8021Q))
53 return;
50 mrp_request_leave(vlan->real_dev, &vlan_mrp_app, 54 mrp_request_leave(vlan->real_dev, &vlan_mrp_app,
51 &vlan_id, sizeof(vlan_id), MVRP_ATTR_VID); 55 &vlan_id, sizeof(vlan_id), MVRP_ATTR_VID);
52} 56}
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index 1789658b7cd7..309129732285 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -23,6 +23,7 @@ static const struct nla_policy vlan_policy[IFLA_VLAN_MAX + 1] = {
23 [IFLA_VLAN_FLAGS] = { .len = sizeof(struct ifla_vlan_flags) }, 23 [IFLA_VLAN_FLAGS] = { .len = sizeof(struct ifla_vlan_flags) },
24 [IFLA_VLAN_EGRESS_QOS] = { .type = NLA_NESTED }, 24 [IFLA_VLAN_EGRESS_QOS] = { .type = NLA_NESTED },
25 [IFLA_VLAN_INGRESS_QOS] = { .type = NLA_NESTED }, 25 [IFLA_VLAN_INGRESS_QOS] = { .type = NLA_NESTED },
26 [IFLA_VLAN_PROTOCOL] = { .type = NLA_U16 },
26}; 27};
27 28
28static const struct nla_policy vlan_map_policy[IFLA_VLAN_QOS_MAX + 1] = { 29static const struct nla_policy vlan_map_policy[IFLA_VLAN_QOS_MAX + 1] = {
@@ -53,6 +54,16 @@ static int vlan_validate(struct nlattr *tb[], struct nlattr *data[])
53 if (!data) 54 if (!data)
54 return -EINVAL; 55 return -EINVAL;
55 56
57 if (data[IFLA_VLAN_PROTOCOL]) {
58 switch (nla_get_be16(data[IFLA_VLAN_PROTOCOL])) {
59 case __constant_htons(ETH_P_8021Q):
60 case __constant_htons(ETH_P_8021AD):
61 break;
62 default:
63 return -EPROTONOSUPPORT;
64 }
65 }
66
56 if (data[IFLA_VLAN_ID]) { 67 if (data[IFLA_VLAN_ID]) {
57 id = nla_get_u16(data[IFLA_VLAN_ID]); 68 id = nla_get_u16(data[IFLA_VLAN_ID]);
58 if (id >= VLAN_VID_MASK) 69 if (id >= VLAN_VID_MASK)
@@ -107,6 +118,7 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
107{ 118{
108 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 119 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
109 struct net_device *real_dev; 120 struct net_device *real_dev;
121 __be16 proto;
110 int err; 122 int err;
111 123
112 if (!data[IFLA_VLAN_ID]) 124 if (!data[IFLA_VLAN_ID])
@@ -118,11 +130,17 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
118 if (!real_dev) 130 if (!real_dev)
119 return -ENODEV; 131 return -ENODEV;
120 132
121 vlan->vlan_id = nla_get_u16(data[IFLA_VLAN_ID]); 133 if (data[IFLA_VLAN_PROTOCOL])
122 vlan->real_dev = real_dev; 134 proto = nla_get_be16(data[IFLA_VLAN_PROTOCOL]);
123 vlan->flags = VLAN_FLAG_REORDER_HDR; 135 else
136 proto = htons(ETH_P_8021Q);
137
138 vlan->vlan_proto = proto;
139 vlan->vlan_id = nla_get_u16(data[IFLA_VLAN_ID]);
140 vlan->real_dev = real_dev;
141 vlan->flags = VLAN_FLAG_REORDER_HDR;
124 142
125 err = vlan_check_real_dev(real_dev, vlan->vlan_id); 143 err = vlan_check_real_dev(real_dev, vlan->vlan_proto, vlan->vlan_id);
126 if (err < 0) 144 if (err < 0)
127 return err; 145 return err;
128 146
@@ -151,7 +169,8 @@ static size_t vlan_get_size(const struct net_device *dev)
151{ 169{
152 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 170 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
153 171
154 return nla_total_size(2) + /* IFLA_VLAN_ID */ 172 return nla_total_size(2) + /* IFLA_VLAN_PROTOCOL */
173 nla_total_size(2) + /* IFLA_VLAN_ID */
155 sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */ 174 sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */
156 vlan_qos_map_size(vlan->nr_ingress_mappings) + 175 vlan_qos_map_size(vlan->nr_ingress_mappings) +
157 vlan_qos_map_size(vlan->nr_egress_mappings); 176 vlan_qos_map_size(vlan->nr_egress_mappings);
@@ -166,7 +185,8 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
166 struct nlattr *nest; 185 struct nlattr *nest;
167 unsigned int i; 186 unsigned int i;
168 187
169 if (nla_put_u16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id)) 188 if (nla_put_be16(skb, IFLA_VLAN_PROTOCOL, vlan->vlan_proto) ||
189 nla_put_u16(skb, IFLA_VLAN_ID, vlan->vlan_id))
170 goto nla_put_failure; 190 goto nla_put_failure;
171 if (vlan->flags) { 191 if (vlan->flags) {
172 f.flags = vlan->flags; 192 f.flags = vlan->flags;