aboutsummaryrefslogtreecommitdiffstats
path: root/net/8021q
diff options
context:
space:
mode:
authorJesse Gross <jesse@nicira.com>2010-10-20 09:56:06 -0400
committerDavid S. Miller <davem@davemloft.net>2010-10-21 04:26:53 -0400
commit3701e51382a026cba10c60b03efabe534fba4ca4 (patch)
tree9f205d8ad0edf65b4405d9b60cb65f3cd8e44ae4 /net/8021q
parent65ac6a5fa658b90f1be700c55e7cd72e4611015d (diff)
vlan: Centralize handling of hardware acceleration.
Currently each driver that is capable of vlan hardware acceleration must be aware of the vlan groups that are configured and then pass the stripped tag to a specialized receive function. This is different from other types of hardware offload in that it places a significant amount of knowledge in the driver itself rather keeping it in the networking core. This makes vlan offloading function more similarly to other forms of offloading (such as checksum offloading or TSO) by doing the following: * On receive, stripped vlans are passed directly to the network core, without attempting to check for vlan groups or reconstructing the header if no group * vlans are made less special by folding the logic into the main receive routines * On transmit, the device layer will add the vlan header in software if the hardware doesn't support it, instead of spreading that logic out in upper layers, such as bonding. There are a number of advantages to this: * Fixes all bugs with drivers incorrectly dropping vlan headers at once. * Avoids having to disable VLAN acceleration when in promiscuous mode (good for bridging since it always puts devices in promiscuous mode). * Keeps VLAN tag separate until given to ultimate consumer, which avoids needing to do header reconstruction as in tg3 unless absolutely necessary. * Consolidates common code in core networking. Signed-off-by: Jesse Gross <jesse@nicira.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/8021q')
-rw-r--r--net/8021q/vlan.c9
-rw-r--r--net/8021q/vlan_core.c125
2 files changed, 29 insertions, 105 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index f862dccf6bb0..05b867e43757 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -135,7 +135,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
135 vlan_gvrp_uninit_applicant(real_dev); 135 vlan_gvrp_uninit_applicant(real_dev);
136 136
137 rcu_assign_pointer(real_dev->vlgrp, NULL); 137 rcu_assign_pointer(real_dev->vlgrp, NULL);
138 if (real_dev->features & NETIF_F_HW_VLAN_RX) 138 if (ops->ndo_vlan_rx_register)
139 ops->ndo_vlan_rx_register(real_dev, NULL); 139 ops->ndo_vlan_rx_register(real_dev, NULL);
140 140
141 /* Free the group, after all cpu's are done. */ 141 /* Free the group, after all cpu's are done. */
@@ -156,11 +156,6 @@ int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id)
156 return -EOPNOTSUPP; 156 return -EOPNOTSUPP;
157 } 157 }
158 158
159 if ((real_dev->features & NETIF_F_HW_VLAN_RX) && !ops->ndo_vlan_rx_register) {
160 pr_info("8021q: device %s has buggy VLAN hw accel\n", name);
161 return -EOPNOTSUPP;
162 }
163
164 if ((real_dev->features & NETIF_F_HW_VLAN_FILTER) && 159 if ((real_dev->features & NETIF_F_HW_VLAN_FILTER) &&
165 (!ops->ndo_vlan_rx_add_vid || !ops->ndo_vlan_rx_kill_vid)) { 160 (!ops->ndo_vlan_rx_add_vid || !ops->ndo_vlan_rx_kill_vid)) {
166 pr_info("8021q: Device %s has buggy VLAN hw accel\n", name); 161 pr_info("8021q: Device %s has buggy VLAN hw accel\n", name);
@@ -213,7 +208,7 @@ int register_vlan_dev(struct net_device *dev)
213 grp->nr_vlans++; 208 grp->nr_vlans++;
214 209
215 if (ngrp) { 210 if (ngrp) {
216 if (real_dev->features & NETIF_F_HW_VLAN_RX) 211 if (ops->ndo_vlan_rx_register)
217 ops->ndo_vlan_rx_register(real_dev, ngrp); 212 ops->ndo_vlan_rx_register(real_dev, ngrp);
218 rcu_assign_pointer(real_dev->vlgrp, ngrp); 213 rcu_assign_pointer(real_dev->vlgrp, ngrp);
219 } 214 }
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index dee727ce0291..69b2f79800a5 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -4,54 +4,29 @@
4#include <linux/netpoll.h> 4#include <linux/netpoll.h>
5#include "vlan.h" 5#include "vlan.h"
6 6
7/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ 7bool vlan_hwaccel_do_receive(struct sk_buff **skbp)
8int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
9 u16 vlan_tci, int polling)
10{ 8{
9 struct sk_buff *skb = *skbp;
10 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
11 struct net_device *vlan_dev; 11 struct net_device *vlan_dev;
12 u16 vlan_id; 12 struct vlan_rx_stats *rx_stats;
13
14 if (netpoll_rx(skb))
15 return NET_RX_DROP;
16
17 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
18 skb->deliver_no_wcard = 1;
19 13
20 skb->skb_iif = skb->dev->ifindex; 14 vlan_dev = vlan_find_dev(skb->dev, vlan_id);
21 __vlan_hwaccel_put_tag(skb, vlan_tci); 15 if (!vlan_dev) {
22 vlan_id = vlan_tci & VLAN_VID_MASK; 16 if (vlan_id)
23 vlan_dev = vlan_group_get_device(grp, vlan_id); 17 skb->pkt_type = PACKET_OTHERHOST;
24 18 return false;
25 if (vlan_dev)
26 skb->dev = vlan_dev;
27 else if (vlan_id) {
28 if (!(skb->dev->flags & IFF_PROMISC))
29 goto drop;
30 skb->pkt_type = PACKET_OTHERHOST;
31 } 19 }
32 20
33 return polling ? netif_receive_skb(skb) : netif_rx(skb); 21 skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
22 if (unlikely(!skb))
23 return false;
34 24
35drop: 25 skb->dev = vlan_dev;
36 atomic_long_inc(&skb->dev->rx_dropped); 26 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
37 dev_kfree_skb_any(skb);
38 return NET_RX_DROP;
39}
40EXPORT_SYMBOL(__vlan_hwaccel_rx);
41
42void vlan_hwaccel_do_receive(struct sk_buff *skb)
43{
44 struct net_device *dev = skb->dev;
45 struct vlan_rx_stats *rx_stats;
46
47 skb->dev = vlan_dev_real_dev(dev);
48 netif_nit_deliver(skb);
49
50 skb->dev = dev;
51 skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
52 skb->vlan_tci = 0; 27 skb->vlan_tci = 0;
53 28
54 rx_stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats); 29 rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_rx_stats);
55 30
56 u64_stats_update_begin(&rx_stats->syncp); 31 u64_stats_update_begin(&rx_stats->syncp);
57 rx_stats->rx_packets++; 32 rx_stats->rx_packets++;
@@ -68,11 +43,13 @@ void vlan_hwaccel_do_receive(struct sk_buff *skb)
68 * This allows the VLAN to have a different MAC than the 43 * This allows the VLAN to have a different MAC than the
69 * underlying device, and still route correctly. */ 44 * underlying device, and still route correctly. */
70 if (!compare_ether_addr(eth_hdr(skb)->h_dest, 45 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
71 dev->dev_addr)) 46 vlan_dev->dev_addr))
72 skb->pkt_type = PACKET_HOST; 47 skb->pkt_type = PACKET_HOST;
73 break; 48 break;
74 } 49 }
75 u64_stats_update_end(&rx_stats->syncp); 50 u64_stats_update_end(&rx_stats->syncp);
51
52 return true;
76} 53}
77 54
78struct net_device *vlan_dev_real_dev(const struct net_device *dev) 55struct net_device *vlan_dev_real_dev(const struct net_device *dev)
@@ -87,75 +64,27 @@ u16 vlan_dev_vlan_id(const struct net_device *dev)
87} 64}
88EXPORT_SYMBOL(vlan_dev_vlan_id); 65EXPORT_SYMBOL(vlan_dev_vlan_id);
89 66
90static gro_result_t 67/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
91vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, 68int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
92 unsigned int vlan_tci, struct sk_buff *skb) 69 u16 vlan_tci, int polling)
93{ 70{
94 struct sk_buff *p;
95 struct net_device *vlan_dev;
96 u16 vlan_id;
97
98 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
99 skb->deliver_no_wcard = 1;
100
101 skb->skb_iif = skb->dev->ifindex;
102 __vlan_hwaccel_put_tag(skb, vlan_tci); 71 __vlan_hwaccel_put_tag(skb, vlan_tci);
103 vlan_id = vlan_tci & VLAN_VID_MASK; 72 return polling ? netif_receive_skb(skb) : netif_rx(skb);
104 vlan_dev = vlan_group_get_device(grp, vlan_id);
105
106 if (vlan_dev)
107 skb->dev = vlan_dev;
108 else if (vlan_id) {
109 if (!(skb->dev->flags & IFF_PROMISC))
110 goto drop;
111 skb->pkt_type = PACKET_OTHERHOST;
112 }
113
114 for (p = napi->gro_list; p; p = p->next) {
115 unsigned long diffs;
116
117 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
118 diffs |= compare_ether_header(skb_mac_header(p),
119 skb_gro_mac_header(skb));
120 NAPI_GRO_CB(p)->same_flow = !diffs;
121 NAPI_GRO_CB(p)->flush = 0;
122 }
123
124 return dev_gro_receive(napi, skb);
125
126drop:
127 atomic_long_inc(&skb->dev->rx_dropped);
128 return GRO_DROP;
129} 73}
74EXPORT_SYMBOL(__vlan_hwaccel_rx);
130 75
131gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, 76gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
132 unsigned int vlan_tci, struct sk_buff *skb) 77 unsigned int vlan_tci, struct sk_buff *skb)
133{ 78{
134 if (netpoll_rx_on(skb)) 79 __vlan_hwaccel_put_tag(skb, vlan_tci);
135 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci) 80 return napi_gro_receive(napi, skb);
136 ? GRO_DROP : GRO_NORMAL;
137
138 skb_gro_reset_offset(skb);
139
140 return napi_skb_finish(vlan_gro_common(napi, grp, vlan_tci, skb), skb);
141} 81}
142EXPORT_SYMBOL(vlan_gro_receive); 82EXPORT_SYMBOL(vlan_gro_receive);
143 83
144gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, 84gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
145 unsigned int vlan_tci) 85 unsigned int vlan_tci)
146{ 86{
147 struct sk_buff *skb = napi_frags_skb(napi); 87 __vlan_hwaccel_put_tag(napi->skb, vlan_tci);
148 88 return napi_gro_frags(napi);
149 if (!skb)
150 return GRO_DROP;
151
152 if (netpoll_rx_on(skb)) {
153 skb->protocol = eth_type_trans(skb, skb->dev);
154 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci)
155 ? GRO_DROP : GRO_NORMAL;
156 }
157
158 return napi_frags_finish(napi, skb,
159 vlan_gro_common(napi, grp, vlan_tci, skb));
160} 89}
161EXPORT_SYMBOL(vlan_gro_frags); 90EXPORT_SYMBOL(vlan_gro_frags);