aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJesse Gross <jesse@nicira.com>2010-10-20 09:56:06 -0400
committerDavid S. Miller <davem@davemloft.net>2010-10-21 04:26:53 -0400
commit3701e51382a026cba10c60b03efabe534fba4ca4 (patch)
tree9f205d8ad0edf65b4405d9b60cb65f3cd8e44ae4 /net
parent65ac6a5fa658b90f1be700c55e7cd72e4611015d (diff)
vlan: Centralize handling of hardware acceleration.
Currently each driver that is capable of vlan hardware acceleration must be aware of the vlan groups that are configured and then pass the stripped tag to a specialized receive function. This is different from other types of hardware offload in that it places a significant amount of knowledge in the driver itself rather keeping it in the networking core. This makes vlan offloading function more similarly to other forms of offloading (such as checksum offloading or TSO) by doing the following: * On receive, stripped vlans are passed directly to the network core, without attempting to check for vlan groups or reconstructing the header if no group * vlans are made less special by folding the logic into the main receive routines * On transmit, the device layer will add the vlan header in software if the hardware doesn't support it, instead of spreading that logic out in upper layers, such as bonding. There are a number of advantages to this: * Fixes all bugs with drivers incorrectly dropping vlan headers at once. * Avoids having to disable VLAN acceleration when in promiscuous mode (good for bridging since it always puts devices in promiscuous mode). * Keeps VLAN tag separate until given to ultimate consumer, which avoids needing to do header reconstruction as in tg3 unless absolutely necessary. * Consolidates common code in core networking. Signed-off-by: Jesse Gross <jesse@nicira.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c9
-rw-r--r--net/8021q/vlan_core.c125
-rw-r--r--net/core/dev.c47
3 files changed, 44 insertions, 137 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index f862dccf6bb0..05b867e43757 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -135,7 +135,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
135 vlan_gvrp_uninit_applicant(real_dev); 135 vlan_gvrp_uninit_applicant(real_dev);
136 136
137 rcu_assign_pointer(real_dev->vlgrp, NULL); 137 rcu_assign_pointer(real_dev->vlgrp, NULL);
138 if (real_dev->features & NETIF_F_HW_VLAN_RX) 138 if (ops->ndo_vlan_rx_register)
139 ops->ndo_vlan_rx_register(real_dev, NULL); 139 ops->ndo_vlan_rx_register(real_dev, NULL);
140 140
141 /* Free the group, after all cpu's are done. */ 141 /* Free the group, after all cpu's are done. */
@@ -156,11 +156,6 @@ int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id)
156 return -EOPNOTSUPP; 156 return -EOPNOTSUPP;
157 } 157 }
158 158
159 if ((real_dev->features & NETIF_F_HW_VLAN_RX) && !ops->ndo_vlan_rx_register) {
160 pr_info("8021q: device %s has buggy VLAN hw accel\n", name);
161 return -EOPNOTSUPP;
162 }
163
164 if ((real_dev->features & NETIF_F_HW_VLAN_FILTER) && 159 if ((real_dev->features & NETIF_F_HW_VLAN_FILTER) &&
165 (!ops->ndo_vlan_rx_add_vid || !ops->ndo_vlan_rx_kill_vid)) { 160 (!ops->ndo_vlan_rx_add_vid || !ops->ndo_vlan_rx_kill_vid)) {
166 pr_info("8021q: Device %s has buggy VLAN hw accel\n", name); 161 pr_info("8021q: Device %s has buggy VLAN hw accel\n", name);
@@ -213,7 +208,7 @@ int register_vlan_dev(struct net_device *dev)
213 grp->nr_vlans++; 208 grp->nr_vlans++;
214 209
215 if (ngrp) { 210 if (ngrp) {
216 if (real_dev->features & NETIF_F_HW_VLAN_RX) 211 if (ops->ndo_vlan_rx_register)
217 ops->ndo_vlan_rx_register(real_dev, ngrp); 212 ops->ndo_vlan_rx_register(real_dev, ngrp);
218 rcu_assign_pointer(real_dev->vlgrp, ngrp); 213 rcu_assign_pointer(real_dev->vlgrp, ngrp);
219 } 214 }
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index dee727ce0291..69b2f79800a5 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -4,54 +4,29 @@
4#include <linux/netpoll.h> 4#include <linux/netpoll.h>
5#include "vlan.h" 5#include "vlan.h"
6 6
7/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ 7bool vlan_hwaccel_do_receive(struct sk_buff **skbp)
8int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
9 u16 vlan_tci, int polling)
10{ 8{
9 struct sk_buff *skb = *skbp;
10 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
11 struct net_device *vlan_dev; 11 struct net_device *vlan_dev;
12 u16 vlan_id; 12 struct vlan_rx_stats *rx_stats;
13
14 if (netpoll_rx(skb))
15 return NET_RX_DROP;
16
17 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
18 skb->deliver_no_wcard = 1;
19 13
20 skb->skb_iif = skb->dev->ifindex; 14 vlan_dev = vlan_find_dev(skb->dev, vlan_id);
21 __vlan_hwaccel_put_tag(skb, vlan_tci); 15 if (!vlan_dev) {
22 vlan_id = vlan_tci & VLAN_VID_MASK; 16 if (vlan_id)
23 vlan_dev = vlan_group_get_device(grp, vlan_id); 17 skb->pkt_type = PACKET_OTHERHOST;
24 18 return false;
25 if (vlan_dev)
26 skb->dev = vlan_dev;
27 else if (vlan_id) {
28 if (!(skb->dev->flags & IFF_PROMISC))
29 goto drop;
30 skb->pkt_type = PACKET_OTHERHOST;
31 } 19 }
32 20
33 return polling ? netif_receive_skb(skb) : netif_rx(skb); 21 skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
22 if (unlikely(!skb))
23 return false;
34 24
35drop: 25 skb->dev = vlan_dev;
36 atomic_long_inc(&skb->dev->rx_dropped); 26 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
37 dev_kfree_skb_any(skb);
38 return NET_RX_DROP;
39}
40EXPORT_SYMBOL(__vlan_hwaccel_rx);
41
42void vlan_hwaccel_do_receive(struct sk_buff *skb)
43{
44 struct net_device *dev = skb->dev;
45 struct vlan_rx_stats *rx_stats;
46
47 skb->dev = vlan_dev_real_dev(dev);
48 netif_nit_deliver(skb);
49
50 skb->dev = dev;
51 skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
52 skb->vlan_tci = 0; 27 skb->vlan_tci = 0;
53 28
54 rx_stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats); 29 rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_rx_stats);
55 30
56 u64_stats_update_begin(&rx_stats->syncp); 31 u64_stats_update_begin(&rx_stats->syncp);
57 rx_stats->rx_packets++; 32 rx_stats->rx_packets++;
@@ -68,11 +43,13 @@ void vlan_hwaccel_do_receive(struct sk_buff *skb)
68 * This allows the VLAN to have a different MAC than the 43 * This allows the VLAN to have a different MAC than the
69 * underlying device, and still route correctly. */ 44 * underlying device, and still route correctly. */
70 if (!compare_ether_addr(eth_hdr(skb)->h_dest, 45 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
71 dev->dev_addr)) 46 vlan_dev->dev_addr))
72 skb->pkt_type = PACKET_HOST; 47 skb->pkt_type = PACKET_HOST;
73 break; 48 break;
74 } 49 }
75 u64_stats_update_end(&rx_stats->syncp); 50 u64_stats_update_end(&rx_stats->syncp);
51
52 return true;
76} 53}
77 54
78struct net_device *vlan_dev_real_dev(const struct net_device *dev) 55struct net_device *vlan_dev_real_dev(const struct net_device *dev)
@@ -87,75 +64,27 @@ u16 vlan_dev_vlan_id(const struct net_device *dev)
87} 64}
88EXPORT_SYMBOL(vlan_dev_vlan_id); 65EXPORT_SYMBOL(vlan_dev_vlan_id);
89 66
90static gro_result_t 67/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
91vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, 68int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
92 unsigned int vlan_tci, struct sk_buff *skb) 69 u16 vlan_tci, int polling)
93{ 70{
94 struct sk_buff *p;
95 struct net_device *vlan_dev;
96 u16 vlan_id;
97
98 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
99 skb->deliver_no_wcard = 1;
100
101 skb->skb_iif = skb->dev->ifindex;
102 __vlan_hwaccel_put_tag(skb, vlan_tci); 71 __vlan_hwaccel_put_tag(skb, vlan_tci);
103 vlan_id = vlan_tci & VLAN_VID_MASK; 72 return polling ? netif_receive_skb(skb) : netif_rx(skb);
104 vlan_dev = vlan_group_get_device(grp, vlan_id);
105
106 if (vlan_dev)
107 skb->dev = vlan_dev;
108 else if (vlan_id) {
109 if (!(skb->dev->flags & IFF_PROMISC))
110 goto drop;
111 skb->pkt_type = PACKET_OTHERHOST;
112 }
113
114 for (p = napi->gro_list; p; p = p->next) {
115 unsigned long diffs;
116
117 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
118 diffs |= compare_ether_header(skb_mac_header(p),
119 skb_gro_mac_header(skb));
120 NAPI_GRO_CB(p)->same_flow = !diffs;
121 NAPI_GRO_CB(p)->flush = 0;
122 }
123
124 return dev_gro_receive(napi, skb);
125
126drop:
127 atomic_long_inc(&skb->dev->rx_dropped);
128 return GRO_DROP;
129} 73}
74EXPORT_SYMBOL(__vlan_hwaccel_rx);
130 75
131gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, 76gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
132 unsigned int vlan_tci, struct sk_buff *skb) 77 unsigned int vlan_tci, struct sk_buff *skb)
133{ 78{
134 if (netpoll_rx_on(skb)) 79 __vlan_hwaccel_put_tag(skb, vlan_tci);
135 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci) 80 return napi_gro_receive(napi, skb);
136 ? GRO_DROP : GRO_NORMAL;
137
138 skb_gro_reset_offset(skb);
139
140 return napi_skb_finish(vlan_gro_common(napi, grp, vlan_tci, skb), skb);
141} 81}
142EXPORT_SYMBOL(vlan_gro_receive); 82EXPORT_SYMBOL(vlan_gro_receive);
143 83
144gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, 84gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
145 unsigned int vlan_tci) 85 unsigned int vlan_tci)
146{ 86{
147 struct sk_buff *skb = napi_frags_skb(napi); 87 __vlan_hwaccel_put_tag(napi->skb, vlan_tci);
148 88 return napi_gro_frags(napi);
149 if (!skb)
150 return GRO_DROP;
151
152 if (netpoll_rx_on(skb)) {
153 skb->protocol = eth_type_trans(skb, skb->dev);
154 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci)
155 ? GRO_DROP : GRO_NORMAL;
156 }
157
158 return napi_frags_finish(napi, skb,
159 vlan_gro_common(napi, grp, vlan_tci, skb));
160} 89}
161EXPORT_SYMBOL(vlan_gro_frags); 90EXPORT_SYMBOL(vlan_gro_frags);
diff --git a/net/core/dev.c b/net/core/dev.c
index 1bfd96b1fbd4..97fd6bc2004c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2789,33 +2789,6 @@ out:
2789} 2789}
2790#endif 2790#endif
2791 2791
2792/*
2793 * netif_nit_deliver - deliver received packets to network taps
2794 * @skb: buffer
2795 *
2796 * This function is used to deliver incoming packets to network
2797 * taps. It should be used when the normal netif_receive_skb path
2798 * is bypassed, for example because of VLAN acceleration.
2799 */
2800void netif_nit_deliver(struct sk_buff *skb)
2801{
2802 struct packet_type *ptype;
2803
2804 if (list_empty(&ptype_all))
2805 return;
2806
2807 skb_reset_network_header(skb);
2808 skb_reset_transport_header(skb);
2809 skb->mac_len = skb->network_header - skb->mac_header;
2810
2811 rcu_read_lock();
2812 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2813 if (!ptype->dev || ptype->dev == skb->dev)
2814 deliver_skb(skb, ptype, skb->dev);
2815 }
2816 rcu_read_unlock();
2817}
2818
2819/** 2792/**
2820 * netdev_rx_handler_register - register receive handler 2793 * netdev_rx_handler_register - register receive handler
2821 * @dev: device to register a handler for 2794 * @dev: device to register a handler for
@@ -2925,9 +2898,6 @@ static int __netif_receive_skb(struct sk_buff *skb)
2925 if (!netdev_tstamp_prequeue) 2898 if (!netdev_tstamp_prequeue)
2926 net_timestamp_check(skb); 2899 net_timestamp_check(skb);
2927 2900
2928 if (vlan_tx_tag_present(skb))
2929 vlan_hwaccel_do_receive(skb);
2930
2931 /* if we've gotten here through NAPI, check netpoll */ 2901 /* if we've gotten here through NAPI, check netpoll */
2932 if (netpoll_receive_skb(skb)) 2902 if (netpoll_receive_skb(skb))
2933 return NET_RX_DROP; 2903 return NET_RX_DROP;
@@ -2940,8 +2910,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
2940 * be delivered to pkt handlers that are exact matches. Also 2910 * be delivered to pkt handlers that are exact matches. Also
2941 * the deliver_no_wcard flag will be set. If packet handlers 2911 * the deliver_no_wcard flag will be set. If packet handlers
2942 * are sensitive to duplicate packets these skbs will need to 2912 * are sensitive to duplicate packets these skbs will need to
2943 * be dropped at the handler. The vlan accel path may have 2913 * be dropped at the handler.
2944 * already set the deliver_no_wcard flag.
2945 */ 2914 */
2946 null_or_orig = NULL; 2915 null_or_orig = NULL;
2947 orig_dev = skb->dev; 2916 orig_dev = skb->dev;
@@ -3000,6 +2969,18 @@ ncls:
3000 goto out; 2969 goto out;
3001 } 2970 }
3002 2971
2972 if (vlan_tx_tag_present(skb)) {
2973 if (pt_prev) {
2974 ret = deliver_skb(skb, pt_prev, orig_dev);
2975 pt_prev = NULL;
2976 }
2977 if (vlan_hwaccel_do_receive(&skb)) {
2978 ret = __netif_receive_skb(skb);
2979 goto out;
2980 } else if (unlikely(!skb))
2981 goto out;
2982 }
2983
3003 /* 2984 /*
3004 * Make sure frames received on VLAN interfaces stacked on 2985 * Make sure frames received on VLAN interfaces stacked on
3005 * bonding interfaces still make their way to any base bonding 2986 * bonding interfaces still make their way to any base bonding
@@ -3264,6 +3245,7 @@ __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3264 unsigned long diffs; 3245 unsigned long diffs;
3265 3246
3266 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 3247 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3248 diffs |= p->vlan_tci ^ skb->vlan_tci;
3267 diffs |= compare_ether_header(skb_mac_header(p), 3249 diffs |= compare_ether_header(skb_mac_header(p),
3268 skb_gro_mac_header(skb)); 3250 skb_gro_mac_header(skb));
3269 NAPI_GRO_CB(p)->same_flow = !diffs; 3251 NAPI_GRO_CB(p)->same_flow = !diffs;
@@ -3323,6 +3305,7 @@ void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3323{ 3305{
3324 __skb_pull(skb, skb_headlen(skb)); 3306 __skb_pull(skb, skb_headlen(skb));
3325 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb)); 3307 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
3308 skb->vlan_tci = 0;
3326 3309
3327 napi->skb = skb; 3310 napi->skb = skb;
3328} 3311}