aboutsummaryrefslogtreecommitdiffstats
path: root/net/8021q
diff options
context:
space:
mode:
authorPedro Garcia <pedro.netdev@dondevamos.com>2010-07-18 18:38:44 -0400
committerDavid S. Miller <davem@davemloft.net>2010-07-18 18:38:44 -0400
commitad1afb00393915a51c21b1ae8704562bf036855f (patch)
tree68ead91b78624a66f4aabb9699f4b414b914a762 /net/8021q
parent01893c82b4e6949f4e3a453c4faea34970359d76 (diff)
vlan_dev: VLAN 0 should be treated as "no vlan tag" (802.1p packet)
- Without the 8021q module loaded in the kernel, all 802.1p packets (VLAN 0 but QoS tagging) are silently discarded (as expected, as the protocol is not loaded). - Without this patch in 8021q module, these packets are forwarded to the module, but they are discarded also if VLAN 0 is not configured, which should not be the default behaviour, as VLAN 0 is not really a VLANed packet but a 802.1p packet. Defining VLAN 0 makes it almost impossible to communicate with mixed 802.1p and non 802.1p devices on the same network due to arp table issues. - Changed logic to skip vlan specific code in vlan_skb_recv if VLAN is 0 and we have not defined a VLAN with ID 0, but we accept the packet with the encapsulated proto and pass it later to netif_rx. - In the vlan device event handler, added some logic to add VLAN 0 to HW filter in devices that support it (this prevented any traffic in VLAN 0 to reach the stack in e1000e with HW filter under 2.6.35, and probably also with other HW filtered cards, so we fix it here). - In the vlan unregister logic, prevent the elimination of VLAN 0 in devices with HW filter. - The default behaviour is to ignore the VLAN 0 tagging and accept the packet as if it was not tagged, but we can still define a VLAN 0 if desired (so it is backwards compatible). Signed-off-by: Pedro Garcia <pedro.netdev@dondevamos.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/8021q')
-rw-r--r--net/8021q/vlan.c13
-rw-r--r--net/8021q/vlan_core.c19
-rw-r--r--net/8021q/vlan_dev.c103
3 files changed, 86 insertions, 49 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 3c1c8c14e929..a2ad15250575 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -155,9 +155,10 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
155 BUG_ON(!grp); 155 BUG_ON(!grp);
156 156
157 /* Take it out of our own structures, but be sure to interlock with 157 /* Take it out of our own structures, but be sure to interlock with
158 * HW accelerating devices or SW vlan input packet processing. 158 * HW accelerating devices or SW vlan input packet processing if
159 * VLAN is not 0 (leave it there for 802.1p).
159 */ 160 */
160 if (real_dev->features & NETIF_F_HW_VLAN_FILTER) 161 if (vlan_id && (real_dev->features & NETIF_F_HW_VLAN_FILTER))
161 ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id); 162 ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id);
162 163
163 grp->nr_vlans--; 164 grp->nr_vlans--;
@@ -419,6 +420,14 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
419 if (is_vlan_dev(dev)) 420 if (is_vlan_dev(dev))
420 __vlan_device_event(dev, event); 421 __vlan_device_event(dev, event);
421 422
423 if ((event == NETDEV_UP) &&
424 (dev->features & NETIF_F_HW_VLAN_FILTER) &&
425 dev->netdev_ops->ndo_vlan_rx_add_vid) {
426 pr_info("8021q: adding VLAN 0 to HW filter on device %s\n",
427 dev->name);
428 dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0);
429 }
430
422 grp = __vlan_find_group(dev); 431 grp = __vlan_find_group(dev);
423 if (!grp) 432 if (!grp)
424 goto out; 433 goto out;
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 1b9406a31f0c..01ddb0472f86 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -8,6 +8,9 @@
8int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, 8int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
9 u16 vlan_tci, int polling) 9 u16 vlan_tci, int polling)
10{ 10{
11 struct net_device *vlan_dev;
12 u16 vlan_id;
13
11 if (netpoll_rx(skb)) 14 if (netpoll_rx(skb))
12 return NET_RX_DROP; 15 return NET_RX_DROP;
13 16
@@ -16,9 +19,12 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
16 19
17 skb->skb_iif = skb->dev->ifindex; 20 skb->skb_iif = skb->dev->ifindex;
18 __vlan_hwaccel_put_tag(skb, vlan_tci); 21 __vlan_hwaccel_put_tag(skb, vlan_tci);
19 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); 22 vlan_id = vlan_tci & VLAN_VID_MASK;
23 vlan_dev = vlan_group_get_device(grp, vlan_id);
20 24
21 if (!skb->dev) 25 if (vlan_dev)
26 skb->dev = vlan_dev;
27 else if (vlan_id)
22 goto drop; 28 goto drop;
23 29
24 return (polling ? netif_receive_skb(skb) : netif_rx(skb)); 30 return (polling ? netif_receive_skb(skb) : netif_rx(skb));
@@ -83,15 +89,20 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
83 unsigned int vlan_tci, struct sk_buff *skb) 89 unsigned int vlan_tci, struct sk_buff *skb)
84{ 90{
85 struct sk_buff *p; 91 struct sk_buff *p;
92 struct net_device *vlan_dev;
93 u16 vlan_id;
86 94
87 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) 95 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
88 skb->deliver_no_wcard = 1; 96 skb->deliver_no_wcard = 1;
89 97
90 skb->skb_iif = skb->dev->ifindex; 98 skb->skb_iif = skb->dev->ifindex;
91 __vlan_hwaccel_put_tag(skb, vlan_tci); 99 __vlan_hwaccel_put_tag(skb, vlan_tci);
92 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); 100 vlan_id = vlan_tci & VLAN_VID_MASK;
101 vlan_dev = vlan_group_get_device(grp, vlan_id);
93 102
94 if (!skb->dev) 103 if (vlan_dev)
104 skb->dev = vlan_dev;
105 else if (vlan_id)
95 goto drop; 106 goto drop;
96 107
97 for (p = napi->gro_list; p; p = p->next) { 108 for (p = napi->gro_list; p; p = p->next) {
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 7cb285f96b99..3d59c9bf8feb 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -142,6 +142,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
142{ 142{
143 struct vlan_hdr *vhdr; 143 struct vlan_hdr *vhdr;
144 struct vlan_rx_stats *rx_stats; 144 struct vlan_rx_stats *rx_stats;
145 struct net_device *vlan_dev;
145 u16 vlan_id; 146 u16 vlan_id;
146 u16 vlan_tci; 147 u16 vlan_tci;
147 148
@@ -157,55 +158,71 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
157 vlan_id = vlan_tci & VLAN_VID_MASK; 158 vlan_id = vlan_tci & VLAN_VID_MASK;
158 159
159 rcu_read_lock(); 160 rcu_read_lock();
160 skb->dev = __find_vlan_dev(dev, vlan_id); 161 vlan_dev = __find_vlan_dev(dev, vlan_id);
161 if (!skb->dev) {
162 pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n",
163 __func__, vlan_id, dev->name);
164 goto err_unlock;
165 }
166
167 rx_stats = per_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats,
168 smp_processor_id());
169 u64_stats_update_begin(&rx_stats->syncp);
170 rx_stats->rx_packets++;
171 rx_stats->rx_bytes += skb->len;
172
173 skb_pull_rcsum(skb, VLAN_HLEN);
174
175 skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci);
176 162
177 pr_debug("%s: priority: %u for TCI: %hu\n", 163 /* If the VLAN device is defined, we use it.
178 __func__, skb->priority, vlan_tci); 164 * If not, and the VID is 0, it is a 802.1p packet (not
179 165 * really a VLAN), so we will just netif_rx it later to the
180 switch (skb->pkt_type) { 166 * original interface, but with the skb->proto set to the
181 case PACKET_BROADCAST: /* Yeah, stats collect these together.. */ 167 * wrapped proto: we do nothing here.
182 /* stats->broadcast ++; // no such counter :-( */ 168 */
183 break;
184
185 case PACKET_MULTICAST:
186 rx_stats->rx_multicast++;
187 break;
188 169
189 case PACKET_OTHERHOST: 170 if (!vlan_dev) {
190 /* Our lower layer thinks this is not local, let's make sure. 171 if (vlan_id) {
191 * This allows the VLAN to have a different MAC than the 172 pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n",
192 * underlying device, and still route correctly. 173 __func__, vlan_id, dev->name);
193 */ 174 goto err_unlock;
194 if (!compare_ether_addr(eth_hdr(skb)->h_dest, 175 }
195 skb->dev->dev_addr)) 176 rx_stats = NULL;
196 skb->pkt_type = PACKET_HOST; 177 } else {
197 break; 178 skb->dev = vlan_dev;
198 default: 179
199 break; 180 rx_stats = per_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats,
181 smp_processor_id());
182 u64_stats_update_begin(&rx_stats->syncp);
183 rx_stats->rx_packets++;
184 rx_stats->rx_bytes += skb->len;
185
186 skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci);
187
188 pr_debug("%s: priority: %u for TCI: %hu\n",
189 __func__, skb->priority, vlan_tci);
190
191 switch (skb->pkt_type) {
192 case PACKET_BROADCAST:
193 /* Yeah, stats collect these together.. */
194 /* stats->broadcast ++; // no such counter :-( */
195 break;
196
197 case PACKET_MULTICAST:
198 rx_stats->rx_multicast++;
199 break;
200
201 case PACKET_OTHERHOST:
202 /* Our lower layer thinks this is not local, let's make
203 * sure.
204 * This allows the VLAN to have a different MAC than the
205 * underlying device, and still route correctly.
206 */
207 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
208 skb->dev->dev_addr))
209 skb->pkt_type = PACKET_HOST;
210 break;
211 default:
212 break;
213 }
214 u64_stats_update_end(&rx_stats->syncp);
200 } 215 }
201 u64_stats_update_end(&rx_stats->syncp);
202 216
217 skb_pull_rcsum(skb, VLAN_HLEN);
203 vlan_set_encap_proto(skb, vhdr); 218 vlan_set_encap_proto(skb, vhdr);
204 219
205 skb = vlan_check_reorder_header(skb); 220 if (vlan_dev) {
206 if (!skb) { 221 skb = vlan_check_reorder_header(skb);
207 rx_stats->rx_errors++; 222 if (!skb) {
208 goto err_unlock; 223 rx_stats->rx_errors++;
224 goto err_unlock;
225 }
209 } 226 }
210 227
211 netif_rx(skb); 228 netif_rx(skb);