diff options
Diffstat (limited to 'include/linux/if_vlan.h')
-rw-r--r-- | include/linux/if_vlan.h | 238 |
1 files changed, 71 insertions, 167 deletions
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 15ace02b7b24..9e7b49b8062d 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h | |||
@@ -14,10 +14,6 @@ | |||
14 | #define _LINUX_IF_VLAN_H_ | 14 | #define _LINUX_IF_VLAN_H_ |
15 | 15 | ||
16 | #ifdef __KERNEL__ | 16 | #ifdef __KERNEL__ |
17 | |||
18 | /* externally defined structs */ | ||
19 | struct hlist_node; | ||
20 | |||
21 | #include <linux/netdevice.h> | 17 | #include <linux/netdevice.h> |
22 | #include <linux/etherdevice.h> | 18 | #include <linux/etherdevice.h> |
23 | 19 | ||
@@ -91,7 +87,7 @@ struct vlan_group { | |||
91 | }; | 87 | }; |
92 | 88 | ||
93 | static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, | 89 | static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, |
94 | unsigned int vlan_id) | 90 | u16 vlan_id) |
95 | { | 91 | { |
96 | struct net_device **array; | 92 | struct net_device **array; |
97 | array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; | 93 | array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; |
@@ -99,7 +95,7 @@ static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, | |||
99 | } | 95 | } |
100 | 96 | ||
101 | static inline void vlan_group_set_device(struct vlan_group *vg, | 97 | static inline void vlan_group_set_device(struct vlan_group *vg, |
102 | unsigned int vlan_id, | 98 | u16 vlan_id, |
103 | struct net_device *dev) | 99 | struct net_device *dev) |
104 | { | 100 | { |
105 | struct net_device **array; | 101 | struct net_device **array; |
@@ -109,164 +105,81 @@ static inline void vlan_group_set_device(struct vlan_group *vg, | |||
109 | array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev; | 105 | array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev; |
110 | } | 106 | } |
111 | 107 | ||
112 | struct vlan_priority_tci_mapping { | 108 | #define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci) |
113 | u32 priority; | 109 | #define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci) |
114 | unsigned short vlan_qos; /* This should be shifted when first set, so we only do it | ||
115 | * at provisioning time. | ||
116 | * ((skb->priority << 13) & 0xE000) | ||
117 | */ | ||
118 | struct vlan_priority_tci_mapping *next; | ||
119 | }; | ||
120 | 110 | ||
121 | /* Holds information that makes sense if this device is a VLAN device. */ | 111 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) |
122 | struct vlan_dev_info { | 112 | extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); |
123 | /** This will be the mapping that correlates skb->priority to | 113 | extern u16 vlan_dev_vlan_id(const struct net_device *dev); |
124 | * 3 bits of VLAN QOS tags... | ||
125 | */ | ||
126 | unsigned int nr_ingress_mappings; | ||
127 | u32 ingress_priority_map[8]; | ||
128 | |||
129 | unsigned int nr_egress_mappings; | ||
130 | struct vlan_priority_tci_mapping *egress_priority_map[16]; /* hash table */ | ||
131 | |||
132 | unsigned short vlan_id; /* The VLAN Identifier for this interface. */ | ||
133 | unsigned short flags; /* (1 << 0) re_order_header This option will cause the | ||
134 | * VLAN code to move around the ethernet header on | ||
135 | * ingress to make the skb look **exactly** like it | ||
136 | * came in from an ethernet port. This destroys some of | ||
137 | * the VLAN information in the skb, but it fixes programs | ||
138 | * like DHCP that use packet-filtering and don't understand | ||
139 | * 802.1Q | ||
140 | */ | ||
141 | struct net_device *real_dev; /* the underlying device/interface */ | ||
142 | unsigned char real_dev_addr[ETH_ALEN]; | ||
143 | struct proc_dir_entry *dent; /* Holds the proc data */ | ||
144 | unsigned long cnt_inc_headroom_on_tx; /* How many times did we have to grow the skb on TX. */ | ||
145 | unsigned long cnt_encap_on_xmit; /* How many times did we have to encapsulate the skb on TX. */ | ||
146 | }; | ||
147 | 114 | ||
148 | static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev) | 115 | extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, |
116 | u16 vlan_tci, int polling); | ||
117 | #else | ||
118 | static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) | ||
149 | { | 119 | { |
150 | return netdev_priv(dev); | 120 | BUG(); |
121 | return NULL; | ||
151 | } | 122 | } |
152 | 123 | ||
153 | /* inline functions */ | 124 | static inline u16 vlan_dev_vlan_id(const struct net_device *dev) |
154 | static inline __u32 vlan_get_ingress_priority(struct net_device *dev, | ||
155 | unsigned short vlan_tag) | ||
156 | { | 125 | { |
157 | struct vlan_dev_info *vip = vlan_dev_info(dev); | 126 | BUG(); |
158 | 127 | return 0; | |
159 | return vip->ingress_priority_map[(vlan_tag >> 13) & 0x7]; | ||
160 | } | 128 | } |
161 | 129 | ||
162 | /* VLAN tx hw acceleration helpers. */ | 130 | static inline int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, |
163 | struct vlan_skb_tx_cookie { | 131 | u16 vlan_tci, int polling) |
164 | u32 magic; | ||
165 | u32 vlan_tag; | ||
166 | }; | ||
167 | |||
168 | #define VLAN_TX_COOKIE_MAGIC 0x564c414e /* "VLAN" in ascii. */ | ||
169 | #define VLAN_TX_SKB_CB(__skb) ((struct vlan_skb_tx_cookie *)&((__skb)->cb[0])) | ||
170 | #define vlan_tx_tag_present(__skb) \ | ||
171 | (VLAN_TX_SKB_CB(__skb)->magic == VLAN_TX_COOKIE_MAGIC) | ||
172 | #define vlan_tx_tag_get(__skb) (VLAN_TX_SKB_CB(__skb)->vlan_tag) | ||
173 | |||
174 | /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ | ||
175 | static inline int __vlan_hwaccel_rx(struct sk_buff *skb, | ||
176 | struct vlan_group *grp, | ||
177 | unsigned short vlan_tag, int polling) | ||
178 | { | 132 | { |
179 | struct net_device_stats *stats; | 133 | BUG(); |
180 | 134 | return NET_XMIT_SUCCESS; | |
181 | if (skb_bond_should_drop(skb)) { | ||
182 | dev_kfree_skb_any(skb); | ||
183 | return NET_RX_DROP; | ||
184 | } | ||
185 | |||
186 | skb->dev = vlan_group_get_device(grp, vlan_tag & VLAN_VID_MASK); | ||
187 | if (skb->dev == NULL) { | ||
188 | dev_kfree_skb_any(skb); | ||
189 | |||
190 | /* Not NET_RX_DROP, this is not being dropped | ||
191 | * due to congestion. | ||
192 | */ | ||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | skb->dev->last_rx = jiffies; | ||
197 | |||
198 | stats = &skb->dev->stats; | ||
199 | stats->rx_packets++; | ||
200 | stats->rx_bytes += skb->len; | ||
201 | |||
202 | skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tag); | ||
203 | switch (skb->pkt_type) { | ||
204 | case PACKET_BROADCAST: | ||
205 | break; | ||
206 | |||
207 | case PACKET_MULTICAST: | ||
208 | stats->multicast++; | ||
209 | break; | ||
210 | |||
211 | case PACKET_OTHERHOST: | ||
212 | /* Our lower layer thinks this is not local, let's make sure. | ||
213 | * This allows the VLAN to have a different MAC than the underlying | ||
214 | * device, and still route correctly. | ||
215 | */ | ||
216 | if (!compare_ether_addr(eth_hdr(skb)->h_dest, | ||
217 | skb->dev->dev_addr)) | ||
218 | skb->pkt_type = PACKET_HOST; | ||
219 | break; | ||
220 | }; | ||
221 | |||
222 | return (polling ? netif_receive_skb(skb) : netif_rx(skb)); | ||
223 | } | 135 | } |
136 | #endif | ||
224 | 137 | ||
138 | /** | ||
139 | * vlan_hwaccel_rx - netif_rx wrapper for VLAN RX acceleration | ||
140 | * @skb: buffer | ||
141 | * @grp: vlan group | ||
142 | * @vlan_tci: VLAN TCI as received from the card | ||
143 | */ | ||
225 | static inline int vlan_hwaccel_rx(struct sk_buff *skb, | 144 | static inline int vlan_hwaccel_rx(struct sk_buff *skb, |
226 | struct vlan_group *grp, | 145 | struct vlan_group *grp, |
227 | unsigned short vlan_tag) | 146 | u16 vlan_tci) |
228 | { | 147 | { |
229 | return __vlan_hwaccel_rx(skb, grp, vlan_tag, 0); | 148 | return __vlan_hwaccel_rx(skb, grp, vlan_tci, 0); |
230 | } | 149 | } |
231 | 150 | ||
151 | /** | ||
152 | * vlan_hwaccel_receive_skb - netif_receive_skb wrapper for VLAN RX acceleration | ||
153 | * @skb: buffer | ||
154 | * @grp: vlan group | ||
155 | * @vlan_tci: VLAN TCI as received from the card | ||
156 | */ | ||
232 | static inline int vlan_hwaccel_receive_skb(struct sk_buff *skb, | 157 | static inline int vlan_hwaccel_receive_skb(struct sk_buff *skb, |
233 | struct vlan_group *grp, | 158 | struct vlan_group *grp, |
234 | unsigned short vlan_tag) | 159 | u16 vlan_tci) |
235 | { | 160 | { |
236 | return __vlan_hwaccel_rx(skb, grp, vlan_tag, 1); | 161 | return __vlan_hwaccel_rx(skb, grp, vlan_tci, 1); |
237 | } | 162 | } |
238 | 163 | ||
239 | /** | 164 | /** |
240 | * __vlan_put_tag - regular VLAN tag inserting | 165 | * __vlan_put_tag - regular VLAN tag inserting |
241 | * @skb: skbuff to tag | 166 | * @skb: skbuff to tag |
242 | * @tag: VLAN tag to insert | 167 | * @vlan_tci: VLAN TCI to insert |
243 | * | 168 | * |
244 | * Inserts the VLAN tag into @skb as part of the payload | 169 | * Inserts the VLAN tag into @skb as part of the payload |
245 | * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. | 170 | * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. |
246 | * | 171 | * |
247 | * Following the skb_unshare() example, in case of error, the calling function | 172 | * Following the skb_unshare() example, in case of error, the calling function |
248 | * doesn't have to worry about freeing the original skb. | 173 | * doesn't have to worry about freeing the original skb. |
249 | */ | 174 | */ |
250 | static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, unsigned short tag) | 175 | static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci) |
251 | { | 176 | { |
252 | struct vlan_ethhdr *veth; | 177 | struct vlan_ethhdr *veth; |
253 | 178 | ||
254 | if (skb_headroom(skb) < VLAN_HLEN) { | 179 | if (skb_cow_head(skb, VLAN_HLEN) < 0) { |
255 | struct sk_buff *sk_tmp = skb; | 180 | kfree_skb(skb); |
256 | skb = skb_realloc_headroom(sk_tmp, VLAN_HLEN); | 181 | return NULL; |
257 | kfree_skb(sk_tmp); | ||
258 | if (!skb) { | ||
259 | printk(KERN_ERR "vlan: failed to realloc headroom\n"); | ||
260 | return NULL; | ||
261 | } | ||
262 | } else { | ||
263 | skb = skb_unshare(skb, GFP_ATOMIC); | ||
264 | if (!skb) { | ||
265 | printk(KERN_ERR "vlan: failed to unshare skbuff\n"); | ||
266 | return NULL; | ||
267 | } | ||
268 | } | 182 | } |
269 | |||
270 | veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); | 183 | veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); |
271 | 184 | ||
272 | /* Move the mac addresses to the beginning of the new header. */ | 185 | /* Move the mac addresses to the beginning of the new header. */ |
@@ -275,12 +188,10 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, unsigned short | |||
275 | /* first, the ethernet type */ | 188 | /* first, the ethernet type */ |
276 | veth->h_vlan_proto = htons(ETH_P_8021Q); | 189 | veth->h_vlan_proto = htons(ETH_P_8021Q); |
277 | 190 | ||
278 | /* now, the tag */ | 191 | /* now, the TCI */ |
279 | veth->h_vlan_TCI = htons(tag); | 192 | veth->h_vlan_TCI = htons(vlan_tci); |
280 | 193 | ||
281 | skb->protocol = htons(ETH_P_8021Q); | 194 | skb->protocol = htons(ETH_P_8021Q); |
282 | skb->mac_header -= VLAN_HLEN; | ||
283 | skb->network_header -= VLAN_HLEN; | ||
284 | 195 | ||
285 | return skb; | 196 | return skb; |
286 | } | 197 | } |
@@ -288,18 +199,14 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, unsigned short | |||
288 | /** | 199 | /** |
289 | * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting | 200 | * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting |
290 | * @skb: skbuff to tag | 201 | * @skb: skbuff to tag |
291 | * @tag: VLAN tag to insert | 202 | * @vlan_tci: VLAN TCI to insert |
292 | * | 203 | * |
293 | * Puts the VLAN tag in @skb->cb[] and lets the device do the rest | 204 | * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest |
294 | */ | 205 | */ |
295 | static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, unsigned short tag) | 206 | static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, |
207 | u16 vlan_tci) | ||
296 | { | 208 | { |
297 | struct vlan_skb_tx_cookie *cookie; | 209 | skb->vlan_tci = vlan_tci; |
298 | |||
299 | cookie = VLAN_TX_SKB_CB(skb); | ||
300 | cookie->magic = VLAN_TX_COOKIE_MAGIC; | ||
301 | cookie->vlan_tag = tag; | ||
302 | |||
303 | return skb; | 210 | return skb; |
304 | } | 211 | } |
305 | 212 | ||
@@ -308,28 +215,28 @@ static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, unsign | |||
308 | /** | 215 | /** |
309 | * vlan_put_tag - inserts VLAN tag according to device features | 216 | * vlan_put_tag - inserts VLAN tag according to device features |
310 | * @skb: skbuff to tag | 217 | * @skb: skbuff to tag |
311 | * @tag: VLAN tag to insert | 218 | * @vlan_tci: VLAN TCI to insert |
312 | * | 219 | * |
313 | * Assumes skb->dev is the target that will xmit this frame. | 220 | * Assumes skb->dev is the target that will xmit this frame. |
314 | * Returns a VLAN tagged skb. | 221 | * Returns a VLAN tagged skb. |
315 | */ | 222 | */ |
316 | static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, unsigned short tag) | 223 | static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, u16 vlan_tci) |
317 | { | 224 | { |
318 | if (skb->dev->features & NETIF_F_HW_VLAN_TX) { | 225 | if (skb->dev->features & NETIF_F_HW_VLAN_TX) { |
319 | return __vlan_hwaccel_put_tag(skb, tag); | 226 | return __vlan_hwaccel_put_tag(skb, vlan_tci); |
320 | } else { | 227 | } else { |
321 | return __vlan_put_tag(skb, tag); | 228 | return __vlan_put_tag(skb, vlan_tci); |
322 | } | 229 | } |
323 | } | 230 | } |
324 | 231 | ||
325 | /** | 232 | /** |
326 | * __vlan_get_tag - get the VLAN ID that is part of the payload | 233 | * __vlan_get_tag - get the VLAN ID that is part of the payload |
327 | * @skb: skbuff to query | 234 | * @skb: skbuff to query |
328 | * @tag: buffer to store vlaue | 235 | * @vlan_tci: buffer to store vlaue |
329 | * | 236 | * |
330 | * Returns error if the skb is not of VLAN type | 237 | * Returns error if the skb is not of VLAN type |
331 | */ | 238 | */ |
332 | static inline int __vlan_get_tag(const struct sk_buff *skb, unsigned short *tag) | 239 | static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) |
333 | { | 240 | { |
334 | struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data; | 241 | struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data; |
335 | 242 | ||
@@ -337,29 +244,25 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, unsigned short *tag) | |||
337 | return -EINVAL; | 244 | return -EINVAL; |
338 | } | 245 | } |
339 | 246 | ||
340 | *tag = ntohs(veth->h_vlan_TCI); | 247 | *vlan_tci = ntohs(veth->h_vlan_TCI); |
341 | |||
342 | return 0; | 248 | return 0; |
343 | } | 249 | } |
344 | 250 | ||
345 | /** | 251 | /** |
346 | * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[] | 252 | * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[] |
347 | * @skb: skbuff to query | 253 | * @skb: skbuff to query |
348 | * @tag: buffer to store vlaue | 254 | * @vlan_tci: buffer to store vlaue |
349 | * | 255 | * |
350 | * Returns error if @skb->cb[] is not set correctly | 256 | * Returns error if @skb->vlan_tci is not set correctly |
351 | */ | 257 | */ |
352 | static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, | 258 | static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, |
353 | unsigned short *tag) | 259 | u16 *vlan_tci) |
354 | { | 260 | { |
355 | struct vlan_skb_tx_cookie *cookie; | 261 | if (vlan_tx_tag_present(skb)) { |
356 | 262 | *vlan_tci = skb->vlan_tci; | |
357 | cookie = VLAN_TX_SKB_CB(skb); | ||
358 | if (cookie->magic == VLAN_TX_COOKIE_MAGIC) { | ||
359 | *tag = cookie->vlan_tag; | ||
360 | return 0; | 263 | return 0; |
361 | } else { | 264 | } else { |
362 | *tag = 0; | 265 | *vlan_tci = 0; |
363 | return -EINVAL; | 266 | return -EINVAL; |
364 | } | 267 | } |
365 | } | 268 | } |
@@ -369,16 +272,16 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, | |||
369 | /** | 272 | /** |
370 | * vlan_get_tag - get the VLAN ID from the skb | 273 | * vlan_get_tag - get the VLAN ID from the skb |
371 | * @skb: skbuff to query | 274 | * @skb: skbuff to query |
372 | * @tag: buffer to store vlaue | 275 | * @vlan_tci: buffer to store vlaue |
373 | * | 276 | * |
374 | * Returns error if the skb is not VLAN tagged | 277 | * Returns error if the skb is not VLAN tagged |
375 | */ | 278 | */ |
376 | static inline int vlan_get_tag(const struct sk_buff *skb, unsigned short *tag) | 279 | static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) |
377 | { | 280 | { |
378 | if (skb->dev->features & NETIF_F_HW_VLAN_TX) { | 281 | if (skb->dev->features & NETIF_F_HW_VLAN_TX) { |
379 | return __vlan_hwaccel_get_tag(skb, tag); | 282 | return __vlan_hwaccel_get_tag(skb, vlan_tci); |
380 | } else { | 283 | } else { |
381 | return __vlan_get_tag(skb, tag); | 284 | return __vlan_get_tag(skb, vlan_tci); |
382 | } | 285 | } |
383 | } | 286 | } |
384 | 287 | ||
@@ -402,6 +305,7 @@ enum vlan_ioctl_cmds { | |||
402 | 305 | ||
403 | enum vlan_flags { | 306 | enum vlan_flags { |
404 | VLAN_FLAG_REORDER_HDR = 0x1, | 307 | VLAN_FLAG_REORDER_HDR = 0x1, |
308 | VLAN_FLAG_GVRP = 0x2, | ||
405 | }; | 309 | }; |
406 | 310 | ||
407 | enum vlan_name_types { | 311 | enum vlan_name_types { |