aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2009-01-06 13:50:09 -0500
committerDavid S. Miller <davem@davemloft.net>2009-01-06 13:50:09 -0500
commite1c096e251e52773afeffbbcb74d0a072be47ea3 (patch)
treef49f4a34015ba7a4bee16c15942b8f1d47f8c20a /net
parent96e93eab20337d063c70d537bd7bc70d90e04fa3 (diff)
vlan: Add GRO interfaces
This patch adds GRO interfaces for hardware-assisted VLAN reception. With this in place we're now at parity with LRO as far as the interface is concerned. That is, you can now take any LRO driver and convert it over to GRO. As the CB memory clashes with GRO's use of CB, I've removed it entirely by storing dev in skb->dev. This is OK because VLAN gets called first thing in netif_receive_skb and skb->dev is not used in between us calling netif_rx and netif_receive_skb getting called. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c111
1 files changed, 88 insertions, 23 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index dd86a1dc4cd0..6c1323940263 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -3,46 +3,35 @@
3#include <linux/if_vlan.h> 3#include <linux/if_vlan.h>
4#include "vlan.h" 4#include "vlan.h"
5 5
6struct vlan_hwaccel_cb {
7 struct net_device *dev;
8};
9
10static inline struct vlan_hwaccel_cb *vlan_hwaccel_cb(struct sk_buff *skb)
11{
12 return (struct vlan_hwaccel_cb *)skb->cb;
13}
14
15/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ 6/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
16int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, 7int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
17 u16 vlan_tci, int polling) 8 u16 vlan_tci, int polling)
18{ 9{
19 struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb); 10 if (skb_bond_should_drop(skb))
20 11 goto drop;
21 if (skb_bond_should_drop(skb)) {
22 dev_kfree_skb_any(skb);
23 return NET_RX_DROP;
24 }
25 12
26 skb->vlan_tci = vlan_tci; 13 skb->vlan_tci = vlan_tci;
27 cb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); 14 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
15
16 if (!skb->dev)
17 goto drop;
28 18
29 return (polling ? netif_receive_skb(skb) : netif_rx(skb)); 19 return (polling ? netif_receive_skb(skb) : netif_rx(skb));
20
21drop:
22 dev_kfree_skb_any(skb);
23 return NET_RX_DROP;
30} 24}
31EXPORT_SYMBOL(__vlan_hwaccel_rx); 25EXPORT_SYMBOL(__vlan_hwaccel_rx);
32 26
33int vlan_hwaccel_do_receive(struct sk_buff *skb) 27int vlan_hwaccel_do_receive(struct sk_buff *skb)
34{ 28{
35 struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb); 29 struct net_device *dev = skb->dev;
36 struct net_device *dev = cb->dev;
37 struct net_device_stats *stats; 30 struct net_device_stats *stats;
38 31
32 skb->dev = vlan_dev_info(dev)->real_dev;
39 netif_nit_deliver(skb); 33 netif_nit_deliver(skb);
40 34
41 if (dev == NULL) {
42 kfree_skb(skb);
43 return -1;
44 }
45
46 skb->dev = dev; 35 skb->dev = dev;
47 skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); 36 skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
48 skb->vlan_tci = 0; 37 skb->vlan_tci = 0;
@@ -80,3 +69,79 @@ u16 vlan_dev_vlan_id(const struct net_device *dev)
80 return vlan_dev_info(dev)->vlan_id; 69 return vlan_dev_info(dev)->vlan_id;
81} 70}
82EXPORT_SYMBOL_GPL(vlan_dev_vlan_id); 71EXPORT_SYMBOL_GPL(vlan_dev_vlan_id);
72
73static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
74 unsigned int vlan_tci, struct sk_buff *skb)
75{
76 struct sk_buff *p;
77
78 if (skb_bond_should_drop(skb))
79 goto drop;
80
81 skb->vlan_tci = vlan_tci;
82 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
83
84 if (!skb->dev)
85 goto drop;
86
87 for (p = napi->gro_list; p; p = p->next) {
88 NAPI_GRO_CB(p)->same_flow = p->dev == skb->dev;
89 NAPI_GRO_CB(p)->flush = 0;
90 }
91
92 return dev_gro_receive(napi, skb);
93
94drop:
95 return 2;
96}
97
98int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
99 unsigned int vlan_tci, struct sk_buff *skb)
100{
101 int err = NET_RX_SUCCESS;
102
103 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
104 case -1:
105 return netif_receive_skb(skb);
106
107 case 2:
108 err = NET_RX_DROP;
109 /* fall through */
110
111 case 1:
112 kfree_skb(skb);
113 break;
114 }
115
116 return err;
117}
118EXPORT_SYMBOL(vlan_gro_receive);
119
120int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
121 unsigned int vlan_tci, struct napi_gro_fraginfo *info)
122{
123 struct sk_buff *skb = napi_fraginfo_skb(napi, info);
124 int err = NET_RX_DROP;
125
126 if (!skb)
127 goto out;
128
129 err = NET_RX_SUCCESS;
130
131 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
132 case -1:
133 return netif_receive_skb(skb);
134
135 case 2:
136 err = NET_RX_DROP;
137 /* fall through */
138
139 case 1:
140 napi_reuse_skb(napi, skb);
141 break;
142 }
143
144out:
145 return err;
146}
147EXPORT_SYMBOL(vlan_gro_frags);