aboutsummaryrefslogtreecommitdiffstats
path: root/net/8021q
diff options
context:
space:
mode:
authorJiri Pirko <jpirko@redhat.com>2011-12-07 23:11:18 -0500
committerDavid S. Miller <davem@davemloft.net>2011-12-08 19:52:42 -0500
commit5b9ea6e022e9ba0fe39cb349ac40361f78d5da5b (patch)
tree11f0de492ee799fd4174f79ac6aae4c3533beb25 /net/8021q
parent87002b03baabd2b8f6281ab6411ed88d24958de1 (diff)
vlan: introduce vid list with reference counting
This allows to keep track of vids needed to be in rx vlan filters of devices even if they are used in bond/team etc. vlan_info as well as vlan_group previously was, is allocated when first vid is added and dealocated whan last vid is deleted. vlan_group definition is moved to private header. Signed-off-by: Jiri Pirko <jpirko@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/8021q')
-rw-r--r--net/8021q/vlan.c90
-rw-r--r--net/8021q/vlan.h30
-rw-r--r--net/8021q/vlan_core.c168
3 files changed, 217 insertions, 71 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index dd9aa400888b..efea35b02e7f 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -51,27 +51,6 @@ const char vlan_version[] = DRV_VERSION;
51 51
52/* End of global variables definitions. */ 52/* End of global variables definitions. */
53 53
54static void vlan_group_free(struct vlan_group *grp)
55{
56 int i;
57
58 for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
59 kfree(grp->vlan_devices_arrays[i]);
60 kfree(grp);
61}
62
63static struct vlan_group *vlan_group_alloc(struct net_device *real_dev)
64{
65 struct vlan_group *grp;
66
67 grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL);
68 if (!grp)
69 return NULL;
70
71 grp->real_dev = real_dev;
72 return grp;
73}
74
75static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id) 54static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id)
76{ 55{
77 struct net_device **array; 56 struct net_device **array;
@@ -92,22 +71,20 @@ static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id)
92 return 0; 71 return 0;
93} 72}
94 73
95static void vlan_rcu_free(struct rcu_head *rcu)
96{
97 vlan_group_free(container_of(rcu, struct vlan_group, rcu));
98}
99
100void unregister_vlan_dev(struct net_device *dev, struct list_head *head) 74void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
101{ 75{
102 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 76 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
103 struct net_device *real_dev = vlan->real_dev; 77 struct net_device *real_dev = vlan->real_dev;
78 struct vlan_info *vlan_info;
104 struct vlan_group *grp; 79 struct vlan_group *grp;
105 u16 vlan_id = vlan->vlan_id; 80 u16 vlan_id = vlan->vlan_id;
106 81
107 ASSERT_RTNL(); 82 ASSERT_RTNL();
108 83
109 grp = rtnl_dereference(real_dev->vlgrp); 84 vlan_info = rtnl_dereference(real_dev->vlan_info);
110 BUG_ON(!grp); 85 BUG_ON(!vlan_info);
86
87 grp = &vlan_info->grp;
111 88
112 /* Take it out of our own structures, but be sure to interlock with 89 /* Take it out of our own structures, but be sure to interlock with
113 * HW accelerating devices or SW vlan input packet processing if 90 * HW accelerating devices or SW vlan input packet processing if
@@ -116,7 +93,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
116 if (vlan_id) 93 if (vlan_id)
117 vlan_vid_del(real_dev, vlan_id); 94 vlan_vid_del(real_dev, vlan_id);
118 95
119 grp->nr_vlans--; 96 grp->nr_vlan_devs--;
120 97
121 if (vlan->flags & VLAN_FLAG_GVRP) 98 if (vlan->flags & VLAN_FLAG_GVRP)
122 vlan_gvrp_request_leave(dev); 99 vlan_gvrp_request_leave(dev);
@@ -128,16 +105,9 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
128 */ 105 */
129 unregister_netdevice_queue(dev, head); 106 unregister_netdevice_queue(dev, head);
130 107
131 /* If the group is now empty, kill off the group. */ 108 if (grp->nr_vlan_devs == 0)
132 if (grp->nr_vlans == 0) {
133 vlan_gvrp_uninit_applicant(real_dev); 109 vlan_gvrp_uninit_applicant(real_dev);
134 110
135 RCU_INIT_POINTER(real_dev->vlgrp, NULL);
136
137 /* Free the group, after all cpu's are done. */
138 call_rcu(&grp->rcu, vlan_rcu_free);
139 }
140
141 /* Get rid of the vlan's reference to real_dev */ 111 /* Get rid of the vlan's reference to real_dev */
142 dev_put(real_dev); 112 dev_put(real_dev);
143} 113}
@@ -169,17 +139,23 @@ int register_vlan_dev(struct net_device *dev)
169 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 139 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
170 struct net_device *real_dev = vlan->real_dev; 140 struct net_device *real_dev = vlan->real_dev;
171 u16 vlan_id = vlan->vlan_id; 141 u16 vlan_id = vlan->vlan_id;
172 struct vlan_group *grp, *ngrp = NULL; 142 struct vlan_info *vlan_info;
143 struct vlan_group *grp;
173 int err; 144 int err;
174 145
175 grp = rtnl_dereference(real_dev->vlgrp); 146 err = vlan_vid_add(real_dev, vlan_id);
176 if (!grp) { 147 if (err)
177 ngrp = grp = vlan_group_alloc(real_dev); 148 return err;
178 if (!grp) 149
179 return -ENOBUFS; 150 vlan_info = rtnl_dereference(real_dev->vlan_info);
151 /* vlan_info should be there now. vlan_vid_add took care of it */
152 BUG_ON(!vlan_info);
153
154 grp = &vlan_info->grp;
155 if (grp->nr_vlan_devs == 0) {
180 err = vlan_gvrp_init_applicant(real_dev); 156 err = vlan_gvrp_init_applicant(real_dev);
181 if (err < 0) 157 if (err < 0)
182 goto out_free_group; 158 goto out_vid_del;
183 } 159 }
184 160
185 err = vlan_group_prealloc_vid(grp, vlan_id); 161 err = vlan_group_prealloc_vid(grp, vlan_id);
@@ -200,23 +176,15 @@ int register_vlan_dev(struct net_device *dev)
200 * it into our local structure. 176 * it into our local structure.
201 */ 177 */
202 vlan_group_set_device(grp, vlan_id, dev); 178 vlan_group_set_device(grp, vlan_id, dev);
203 grp->nr_vlans++; 179 grp->nr_vlan_devs++;
204
205 if (ngrp) {
206 rcu_assign_pointer(real_dev->vlgrp, ngrp);
207 }
208 vlan_vid_add(real_dev, vlan_id);
209 180
210 return 0; 181 return 0;
211 182
212out_uninit_applicant: 183out_uninit_applicant:
213 if (ngrp) 184 if (grp->nr_vlan_devs == 0)
214 vlan_gvrp_uninit_applicant(real_dev); 185 vlan_gvrp_uninit_applicant(real_dev);
215out_free_group: 186out_vid_del:
216 if (ngrp) { 187 vlan_vid_del(real_dev, vlan_id);
217 /* Free the group, after all cpu's are done. */
218 call_rcu(&ngrp->rcu, vlan_rcu_free);
219 }
220 return err; 188 return err;
221} 189}
222 190
@@ -357,6 +325,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
357{ 325{
358 struct net_device *dev = ptr; 326 struct net_device *dev = ptr;
359 struct vlan_group *grp; 327 struct vlan_group *grp;
328 struct vlan_info *vlan_info;
360 int i, flgs; 329 int i, flgs;
361 struct net_device *vlandev; 330 struct net_device *vlandev;
362 struct vlan_dev_priv *vlan; 331 struct vlan_dev_priv *vlan;
@@ -372,9 +341,10 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
372 vlan_vid_add(dev, 0); 341 vlan_vid_add(dev, 0);
373 } 342 }
374 343
375 grp = rtnl_dereference(dev->vlgrp); 344 vlan_info = rtnl_dereference(dev->vlan_info);
376 if (!grp) 345 if (!vlan_info)
377 goto out; 346 goto out;
347 grp = &vlan_info->grp;
378 348
379 /* It is OK that we do not hold the group lock right now, 349 /* It is OK that we do not hold the group lock right now,
380 * as we run under the RTNL lock. 350 * as we run under the RTNL lock.
@@ -478,9 +448,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
478 if (!vlandev) 448 if (!vlandev)
479 continue; 449 continue;
480 450
481 /* unregistration of last vlan destroys group, abort 451 /* removal of last vid destroys vlan_info, abort
482 * afterwards */ 452 * afterwards */
483 if (grp->nr_vlans == 1) 453 if (vlan_info->nr_vids == 1)
484 i = VLAN_N_VID; 454 i = VLAN_N_VID;
485 455
486 unregister_vlan_dev(vlandev, &list); 456 unregister_vlan_dev(vlandev, &list);
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index d3c4ea4a3836..28d8dc20cb6d 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/if_vlan.h> 4#include <linux/if_vlan.h>
5#include <linux/u64_stats_sync.h> 5#include <linux/u64_stats_sync.h>
6#include <linux/list.h>
6 7
7 8
8/** 9/**
@@ -74,6 +75,29 @@ static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
74 return netdev_priv(dev); 75 return netdev_priv(dev);
75} 76}
76 77
78/* if this changes, algorithm will have to be reworked because this
79 * depends on completely exhausting the VLAN identifier space. Thus
80 * it gives constant time look-up, but in many cases it wastes memory.
81 */
82#define VLAN_GROUP_ARRAY_SPLIT_PARTS 8
83#define VLAN_GROUP_ARRAY_PART_LEN (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS)
84
85struct vlan_group {
86 unsigned int nr_vlan_devs;
87 struct hlist_node hlist; /* linked list */
88 struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS];
89};
90
91struct vlan_info {
92 struct net_device *real_dev; /* The ethernet(like) device
93 * the vlan is attached to.
94 */
95 struct vlan_group grp;
96 struct list_head vid_list;
97 unsigned int nr_vids;
98 struct rcu_head rcu;
99};
100
77static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, 101static inline struct net_device *vlan_group_get_device(struct vlan_group *vg,
78 u16 vlan_id) 102 u16 vlan_id)
79{ 103{
@@ -97,10 +121,10 @@ static inline void vlan_group_set_device(struct vlan_group *vg,
97static inline struct net_device *vlan_find_dev(struct net_device *real_dev, 121static inline struct net_device *vlan_find_dev(struct net_device *real_dev,
98 u16 vlan_id) 122 u16 vlan_id)
99{ 123{
100 struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp); 124 struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info);
101 125
102 if (grp) 126 if (vlan_info)
103 return vlan_group_get_device(grp, vlan_id); 127 return vlan_group_get_device(&vlan_info->grp, vlan_id);
104 128
105 return NULL; 129 return NULL;
106} 130}
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 544f9cb9678c..329e0313e01f 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -71,10 +71,10 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
71struct net_device *__vlan_find_dev_deep(struct net_device *real_dev, 71struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
72 u16 vlan_id) 72 u16 vlan_id)
73{ 73{
74 struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp); 74 struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info);
75 75
76 if (grp) { 76 if (vlan_info) {
77 return vlan_group_get_device(grp, vlan_id); 77 return vlan_group_get_device(&vlan_info->grp, vlan_id);
78 } else { 78 } else {
79 /* 79 /*
80 * Bonding slaves do not have grp assigned to themselves. 80 * Bonding slaves do not have grp assigned to themselves.
@@ -147,25 +147,177 @@ err_free:
147 return NULL; 147 return NULL;
148} 148}
149 149
150int vlan_vid_add(struct net_device *dev, unsigned short vid) 150
151/*
152 * vlan info and vid list
153 */
154
155static void vlan_group_free(struct vlan_group *grp)
156{
157 int i;
158
159 for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
160 kfree(grp->vlan_devices_arrays[i]);
161}
162
163static void vlan_info_free(struct vlan_info *vlan_info)
164{
165 vlan_group_free(&vlan_info->grp);
166 kfree(vlan_info);
167}
168
169static void vlan_info_rcu_free(struct rcu_head *rcu)
170{
171 vlan_info_free(container_of(rcu, struct vlan_info, rcu));
172}
173
174static struct vlan_info *vlan_info_alloc(struct net_device *dev)
175{
176 struct vlan_info *vlan_info;
177
178 vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
179 if (!vlan_info)
180 return NULL;
181
182 vlan_info->real_dev = dev;
183 INIT_LIST_HEAD(&vlan_info->vid_list);
184 return vlan_info;
185}
186
187struct vlan_vid_info {
188 struct list_head list;
189 unsigned short vid;
190 int refcount;
191};
192
193static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
194 unsigned short vid)
195{
196 struct vlan_vid_info *vid_info;
197
198 list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
199 if (vid_info->vid == vid)
200 return vid_info;
201 }
202 return NULL;
203}
204
205static struct vlan_vid_info *vlan_vid_info_alloc(unsigned short vid)
206{
207 struct vlan_vid_info *vid_info;
208
209 vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
210 if (!vid_info)
211 return NULL;
212 vid_info->vid = vid;
213
214 return vid_info;
215}
216
217static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid,
218 struct vlan_vid_info **pvid_info)
151{ 219{
220 struct net_device *dev = vlan_info->real_dev;
152 const struct net_device_ops *ops = dev->netdev_ops; 221 const struct net_device_ops *ops = dev->netdev_ops;
222 struct vlan_vid_info *vid_info;
223 int err;
224
225 vid_info = vlan_vid_info_alloc(vid);
226 if (!vid_info)
227 return -ENOMEM;
153 228
154 if ((dev->features & NETIF_F_HW_VLAN_FILTER) && 229 if ((dev->features & NETIF_F_HW_VLAN_FILTER) &&
155 ops->ndo_vlan_rx_add_vid) { 230 ops->ndo_vlan_rx_add_vid) {
156 return ops->ndo_vlan_rx_add_vid(dev, vid); 231 err = ops->ndo_vlan_rx_add_vid(dev, vid);
232 if (err) {
233 kfree(vid_info);
234 return err;
235 }
157 } 236 }
237 list_add(&vid_info->list, &vlan_info->vid_list);
238 vlan_info->nr_vids++;
239 *pvid_info = vid_info;
158 return 0; 240 return 0;
159} 241}
242
243int vlan_vid_add(struct net_device *dev, unsigned short vid)
244{
245 struct vlan_info *vlan_info;
246 struct vlan_vid_info *vid_info;
247 bool vlan_info_created = false;
248 int err;
249
250 ASSERT_RTNL();
251
252 vlan_info = rtnl_dereference(dev->vlan_info);
253 if (!vlan_info) {
254 vlan_info = vlan_info_alloc(dev);
255 if (!vlan_info)
256 return -ENOMEM;
257 vlan_info_created = true;
258 }
259 vid_info = vlan_vid_info_get(vlan_info, vid);
260 if (!vid_info) {
261 err = __vlan_vid_add(vlan_info, vid, &vid_info);
262 if (err)
263 goto out_free_vlan_info;
264 }
265 vid_info->refcount++;
266
267 if (vlan_info_created)
268 rcu_assign_pointer(dev->vlan_info, vlan_info);
269
270 return 0;
271
272out_free_vlan_info:
273 if (vlan_info_created)
274 kfree(vlan_info);
275 return err;
276}
160EXPORT_SYMBOL(vlan_vid_add); 277EXPORT_SYMBOL(vlan_vid_add);
161 278
162void vlan_vid_del(struct net_device *dev, unsigned short vid) 279static void __vlan_vid_del(struct vlan_info *vlan_info,
280 struct vlan_vid_info *vid_info)
163{ 281{
282 struct net_device *dev = vlan_info->real_dev;
164 const struct net_device_ops *ops = dev->netdev_ops; 283 const struct net_device_ops *ops = dev->netdev_ops;
284 unsigned short vid = vid_info->vid;
285 int err;
165 286
166 if ((dev->features & NETIF_F_HW_VLAN_FILTER) && 287 if ((dev->features & NETIF_F_HW_VLAN_FILTER) &&
167 ops->ndo_vlan_rx_kill_vid) { 288 ops->ndo_vlan_rx_kill_vid) {
168 ops->ndo_vlan_rx_kill_vid(dev, vid); 289 err = ops->ndo_vlan_rx_kill_vid(dev, vid);
290 if (err) {
291 pr_warn("failed to kill vid %d for device %s\n",
292 vid, dev->name);
293 }
294 }
295 list_del(&vid_info->list);
296 kfree(vid_info);
297 vlan_info->nr_vids--;
298}
299
300void vlan_vid_del(struct net_device *dev, unsigned short vid)
301{
302 struct vlan_info *vlan_info;
303 struct vlan_vid_info *vid_info;
304
305 ASSERT_RTNL();
306
307 vlan_info = rtnl_dereference(dev->vlan_info);
308 if (!vlan_info)
309 return;
310
311 vid_info = vlan_vid_info_get(vlan_info, vid);
312 if (!vid_info)
313 return;
314 vid_info->refcount--;
315 if (vid_info->refcount == 0) {
316 __vlan_vid_del(vlan_info, vid_info);
317 if (vlan_info->nr_vids == 0) {
318 RCU_INIT_POINTER(dev->vlan_info, NULL);
319 call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
320 }
169 } 321 }
170} 322}
171EXPORT_SYMBOL(vlan_vid_del); 323EXPORT_SYMBOL(vlan_vid_del);