aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorJesse Gross <jesse@nicira.com>2010-10-20 09:56:06 -0400
committerDavid S. Miller <davem@davemloft.net>2010-10-21 04:26:53 -0400
commit3701e51382a026cba10c60b03efabe534fba4ca4 (patch)
tree9f205d8ad0edf65b4405d9b60cb65f3cd8e44ae4 /net/core
parent65ac6a5fa658b90f1be700c55e7cd72e4611015d (diff)
vlan: Centralize handling of hardware acceleration.
Currently each driver that is capable of vlan hardware acceleration must be aware of the vlan groups that are configured and then pass the stripped tag to a specialized receive function. This is different from other types of hardware offload in that it places a significant amount of knowledge in the driver itself rather keeping it in the networking core. This makes vlan offloading function more similarly to other forms of offloading (such as checksum offloading or TSO) by doing the following: * On receive, stripped vlans are passed directly to the network core, without attempting to check for vlan groups or reconstructing the header if no group * vlans are made less special by folding the logic into the main receive routines * On transmit, the device layer will add the vlan header in software if the hardware doesn't support it, instead of spreading that logic out in upper layers, such as bonding. There are a number of advantages to this: * Fixes all bugs with drivers incorrectly dropping vlan headers at once. * Avoids having to disable VLAN acceleration when in promiscuous mode (good for bridging since it always puts devices in promiscuous mode). * Keeps VLAN tag separate until given to ultimate consumer, which avoids needing to do header reconstruction as in tg3 unless absolutely necessary. * Consolidates common code in core networking. Signed-off-by: Jesse Gross <jesse@nicira.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c47
1 files changed, 15 insertions, 32 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 1bfd96b1fbd4..97fd6bc2004c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2789,33 +2789,6 @@ out:
2789} 2789}
2790#endif 2790#endif
2791 2791
2792/*
2793 * netif_nit_deliver - deliver received packets to network taps
2794 * @skb: buffer
2795 *
2796 * This function is used to deliver incoming packets to network
2797 * taps. It should be used when the normal netif_receive_skb path
2798 * is bypassed, for example because of VLAN acceleration.
2799 */
2800void netif_nit_deliver(struct sk_buff *skb)
2801{
2802 struct packet_type *ptype;
2803
2804 if (list_empty(&ptype_all))
2805 return;
2806
2807 skb_reset_network_header(skb);
2808 skb_reset_transport_header(skb);
2809 skb->mac_len = skb->network_header - skb->mac_header;
2810
2811 rcu_read_lock();
2812 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2813 if (!ptype->dev || ptype->dev == skb->dev)
2814 deliver_skb(skb, ptype, skb->dev);
2815 }
2816 rcu_read_unlock();
2817}
2818
2819/** 2792/**
2820 * netdev_rx_handler_register - register receive handler 2793 * netdev_rx_handler_register - register receive handler
2821 * @dev: device to register a handler for 2794 * @dev: device to register a handler for
@@ -2925,9 +2898,6 @@ static int __netif_receive_skb(struct sk_buff *skb)
2925 if (!netdev_tstamp_prequeue) 2898 if (!netdev_tstamp_prequeue)
2926 net_timestamp_check(skb); 2899 net_timestamp_check(skb);
2927 2900
2928 if (vlan_tx_tag_present(skb))
2929 vlan_hwaccel_do_receive(skb);
2930
2931 /* if we've gotten here through NAPI, check netpoll */ 2901 /* if we've gotten here through NAPI, check netpoll */
2932 if (netpoll_receive_skb(skb)) 2902 if (netpoll_receive_skb(skb))
2933 return NET_RX_DROP; 2903 return NET_RX_DROP;
@@ -2940,8 +2910,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
2940 * be delivered to pkt handlers that are exact matches. Also 2910 * be delivered to pkt handlers that are exact matches. Also
2941 * the deliver_no_wcard flag will be set. If packet handlers 2911 * the deliver_no_wcard flag will be set. If packet handlers
2942 * are sensitive to duplicate packets these skbs will need to 2912 * are sensitive to duplicate packets these skbs will need to
2943 * be dropped at the handler. The vlan accel path may have 2913 * be dropped at the handler.
2944 * already set the deliver_no_wcard flag.
2945 */ 2914 */
2946 null_or_orig = NULL; 2915 null_or_orig = NULL;
2947 orig_dev = skb->dev; 2916 orig_dev = skb->dev;
@@ -3000,6 +2969,18 @@ ncls:
3000 goto out; 2969 goto out;
3001 } 2970 }
3002 2971
2972 if (vlan_tx_tag_present(skb)) {
2973 if (pt_prev) {
2974 ret = deliver_skb(skb, pt_prev, orig_dev);
2975 pt_prev = NULL;
2976 }
2977 if (vlan_hwaccel_do_receive(&skb)) {
2978 ret = __netif_receive_skb(skb);
2979 goto out;
2980 } else if (unlikely(!skb))
2981 goto out;
2982 }
2983
3003 /* 2984 /*
3004 * Make sure frames received on VLAN interfaces stacked on 2985 * Make sure frames received on VLAN interfaces stacked on
3005 * bonding interfaces still make their way to any base bonding 2986 * bonding interfaces still make their way to any base bonding
@@ -3264,6 +3245,7 @@ __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3264 unsigned long diffs; 3245 unsigned long diffs;
3265 3246
3266 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 3247 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3248 diffs |= p->vlan_tci ^ skb->vlan_tci;
3267 diffs |= compare_ether_header(skb_mac_header(p), 3249 diffs |= compare_ether_header(skb_mac_header(p),
3268 skb_gro_mac_header(skb)); 3250 skb_gro_mac_header(skb));
3269 NAPI_GRO_CB(p)->same_flow = !diffs; 3251 NAPI_GRO_CB(p)->same_flow = !diffs;
@@ -3323,6 +3305,7 @@ void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3323{ 3305{
3324 __skb_pull(skb, skb_headlen(skb)); 3306 __skb_pull(skb, skb_headlen(skb));
3325 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb)); 3307 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
3308 skb->vlan_tci = 0;
3326 3309
3327 napi->skb = skb; 3310 napi->skb = skb;
3328} 3311}