diff options
author | Jesse Gross <jesse@nicira.com> | 2010-10-20 09:56:06 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-10-21 04:26:53 -0400 |
commit | 3701e51382a026cba10c60b03efabe534fba4ca4 (patch) | |
tree | 9f205d8ad0edf65b4405d9b60cb65f3cd8e44ae4 /net/8021q/vlan_core.c | |
parent | 65ac6a5fa658b90f1be700c55e7cd72e4611015d (diff) |
vlan: Centralize handling of hardware acceleration.
Currently each driver that is capable of vlan hardware acceleration
must be aware of the vlan groups that are configured and then pass
the stripped tag to a specialized receive function. This is
different from other types of hardware offload in that it places a
significant amount of knowledge in the driver itself rather keeping
it in the networking core.
This makes vlan offloading function more similarly to other forms
of offloading (such as checksum offloading or TSO) by doing the
following:
* On receive, stripped vlans are passed directly to the network
core, without attempting to check for vlan groups or reconstructing
the header if no group
* vlans are made less special by folding the logic into the main
receive routines
* On transmit, the device layer will add the vlan header in software
if the hardware doesn't support it, instead of spreading that logic
out in upper layers, such as bonding.
There are a number of advantages to this:
* Fixes all bugs with drivers incorrectly dropping vlan headers at once.
* Avoids having to disable VLAN acceleration when in promiscuous mode
(good for bridging since it always puts devices in promiscuous mode).
* Keeps VLAN tag separate until given to ultimate consumer, which
avoids needing to do header reconstruction as in tg3 unless absolutely
necessary.
* Consolidates common code in core networking.
Signed-off-by: Jesse Gross <jesse@nicira.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/8021q/vlan_core.c')
-rw-r--r-- | net/8021q/vlan_core.c | 125 |
1 files changed, 27 insertions, 98 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index dee727ce0291..69b2f79800a5 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -4,54 +4,29 @@ | |||
4 | #include <linux/netpoll.h> | 4 | #include <linux/netpoll.h> |
5 | #include "vlan.h" | 5 | #include "vlan.h" |
6 | 6 | ||
7 | /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ | 7 | bool vlan_hwaccel_do_receive(struct sk_buff **skbp) |
8 | int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, | ||
9 | u16 vlan_tci, int polling) | ||
10 | { | 8 | { |
9 | struct sk_buff *skb = *skbp; | ||
10 | u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; | ||
11 | struct net_device *vlan_dev; | 11 | struct net_device *vlan_dev; |
12 | u16 vlan_id; | 12 | struct vlan_rx_stats *rx_stats; |
13 | |||
14 | if (netpoll_rx(skb)) | ||
15 | return NET_RX_DROP; | ||
16 | |||
17 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) | ||
18 | skb->deliver_no_wcard = 1; | ||
19 | 13 | ||
20 | skb->skb_iif = skb->dev->ifindex; | 14 | vlan_dev = vlan_find_dev(skb->dev, vlan_id); |
21 | __vlan_hwaccel_put_tag(skb, vlan_tci); | 15 | if (!vlan_dev) { |
22 | vlan_id = vlan_tci & VLAN_VID_MASK; | 16 | if (vlan_id) |
23 | vlan_dev = vlan_group_get_device(grp, vlan_id); | 17 | skb->pkt_type = PACKET_OTHERHOST; |
24 | 18 | return false; | |
25 | if (vlan_dev) | ||
26 | skb->dev = vlan_dev; | ||
27 | else if (vlan_id) { | ||
28 | if (!(skb->dev->flags & IFF_PROMISC)) | ||
29 | goto drop; | ||
30 | skb->pkt_type = PACKET_OTHERHOST; | ||
31 | } | 19 | } |
32 | 20 | ||
33 | return polling ? netif_receive_skb(skb) : netif_rx(skb); | 21 | skb = *skbp = skb_share_check(skb, GFP_ATOMIC); |
22 | if (unlikely(!skb)) | ||
23 | return false; | ||
34 | 24 | ||
35 | drop: | 25 | skb->dev = vlan_dev; |
36 | atomic_long_inc(&skb->dev->rx_dropped); | 26 | skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci); |
37 | dev_kfree_skb_any(skb); | ||
38 | return NET_RX_DROP; | ||
39 | } | ||
40 | EXPORT_SYMBOL(__vlan_hwaccel_rx); | ||
41 | |||
42 | void vlan_hwaccel_do_receive(struct sk_buff *skb) | ||
43 | { | ||
44 | struct net_device *dev = skb->dev; | ||
45 | struct vlan_rx_stats *rx_stats; | ||
46 | |||
47 | skb->dev = vlan_dev_real_dev(dev); | ||
48 | netif_nit_deliver(skb); | ||
49 | |||
50 | skb->dev = dev; | ||
51 | skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); | ||
52 | skb->vlan_tci = 0; | 27 | skb->vlan_tci = 0; |
53 | 28 | ||
54 | rx_stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats); | 29 | rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_rx_stats); |
55 | 30 | ||
56 | u64_stats_update_begin(&rx_stats->syncp); | 31 | u64_stats_update_begin(&rx_stats->syncp); |
57 | rx_stats->rx_packets++; | 32 | rx_stats->rx_packets++; |
@@ -68,11 +43,13 @@ void vlan_hwaccel_do_receive(struct sk_buff *skb) | |||
68 | * This allows the VLAN to have a different MAC than the | 43 | * This allows the VLAN to have a different MAC than the |
69 | * underlying device, and still route correctly. */ | 44 | * underlying device, and still route correctly. */ |
70 | if (!compare_ether_addr(eth_hdr(skb)->h_dest, | 45 | if (!compare_ether_addr(eth_hdr(skb)->h_dest, |
71 | dev->dev_addr)) | 46 | vlan_dev->dev_addr)) |
72 | skb->pkt_type = PACKET_HOST; | 47 | skb->pkt_type = PACKET_HOST; |
73 | break; | 48 | break; |
74 | } | 49 | } |
75 | u64_stats_update_end(&rx_stats->syncp); | 50 | u64_stats_update_end(&rx_stats->syncp); |
51 | |||
52 | return true; | ||
76 | } | 53 | } |
77 | 54 | ||
78 | struct net_device *vlan_dev_real_dev(const struct net_device *dev) | 55 | struct net_device *vlan_dev_real_dev(const struct net_device *dev) |
@@ -87,75 +64,27 @@ u16 vlan_dev_vlan_id(const struct net_device *dev) | |||
87 | } | 64 | } |
88 | EXPORT_SYMBOL(vlan_dev_vlan_id); | 65 | EXPORT_SYMBOL(vlan_dev_vlan_id); |
89 | 66 | ||
90 | static gro_result_t | 67 | /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ |
91 | vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, | 68 | int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, |
92 | unsigned int vlan_tci, struct sk_buff *skb) | 69 | u16 vlan_tci, int polling) |
93 | { | 70 | { |
94 | struct sk_buff *p; | ||
95 | struct net_device *vlan_dev; | ||
96 | u16 vlan_id; | ||
97 | |||
98 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) | ||
99 | skb->deliver_no_wcard = 1; | ||
100 | |||
101 | skb->skb_iif = skb->dev->ifindex; | ||
102 | __vlan_hwaccel_put_tag(skb, vlan_tci); | 71 | __vlan_hwaccel_put_tag(skb, vlan_tci); |
103 | vlan_id = vlan_tci & VLAN_VID_MASK; | 72 | return polling ? netif_receive_skb(skb) : netif_rx(skb); |
104 | vlan_dev = vlan_group_get_device(grp, vlan_id); | ||
105 | |||
106 | if (vlan_dev) | ||
107 | skb->dev = vlan_dev; | ||
108 | else if (vlan_id) { | ||
109 | if (!(skb->dev->flags & IFF_PROMISC)) | ||
110 | goto drop; | ||
111 | skb->pkt_type = PACKET_OTHERHOST; | ||
112 | } | ||
113 | |||
114 | for (p = napi->gro_list; p; p = p->next) { | ||
115 | unsigned long diffs; | ||
116 | |||
117 | diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; | ||
118 | diffs |= compare_ether_header(skb_mac_header(p), | ||
119 | skb_gro_mac_header(skb)); | ||
120 | NAPI_GRO_CB(p)->same_flow = !diffs; | ||
121 | NAPI_GRO_CB(p)->flush = 0; | ||
122 | } | ||
123 | |||
124 | return dev_gro_receive(napi, skb); | ||
125 | |||
126 | drop: | ||
127 | atomic_long_inc(&skb->dev->rx_dropped); | ||
128 | return GRO_DROP; | ||
129 | } | 73 | } |
74 | EXPORT_SYMBOL(__vlan_hwaccel_rx); | ||
130 | 75 | ||
131 | gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, | 76 | gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, |
132 | unsigned int vlan_tci, struct sk_buff *skb) | 77 | unsigned int vlan_tci, struct sk_buff *skb) |
133 | { | 78 | { |
134 | if (netpoll_rx_on(skb)) | 79 | __vlan_hwaccel_put_tag(skb, vlan_tci); |
135 | return vlan_hwaccel_receive_skb(skb, grp, vlan_tci) | 80 | return napi_gro_receive(napi, skb); |
136 | ? GRO_DROP : GRO_NORMAL; | ||
137 | |||
138 | skb_gro_reset_offset(skb); | ||
139 | |||
140 | return napi_skb_finish(vlan_gro_common(napi, grp, vlan_tci, skb), skb); | ||
141 | } | 81 | } |
142 | EXPORT_SYMBOL(vlan_gro_receive); | 82 | EXPORT_SYMBOL(vlan_gro_receive); |
143 | 83 | ||
144 | gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, | 84 | gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, |
145 | unsigned int vlan_tci) | 85 | unsigned int vlan_tci) |
146 | { | 86 | { |
147 | struct sk_buff *skb = napi_frags_skb(napi); | 87 | __vlan_hwaccel_put_tag(napi->skb, vlan_tci); |
148 | 88 | return napi_gro_frags(napi); | |
149 | if (!skb) | ||
150 | return GRO_DROP; | ||
151 | |||
152 | if (netpoll_rx_on(skb)) { | ||
153 | skb->protocol = eth_type_trans(skb, skb->dev); | ||
154 | return vlan_hwaccel_receive_skb(skb, grp, vlan_tci) | ||
155 | ? GRO_DROP : GRO_NORMAL; | ||
156 | } | ||
157 | |||
158 | return napi_frags_finish(napi, skb, | ||
159 | vlan_gro_common(napi, grp, vlan_tci, skb)); | ||
160 | } | 89 | } |
161 | EXPORT_SYMBOL(vlan_gro_frags); | 90 | EXPORT_SYMBOL(vlan_gro_frags); |