aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorJesse Gross <jesse@nicira.com>2011-01-09 01:23:34 -0500
committerDavid S. Miller <davem@davemloft.net>2011-01-10 02:35:35 -0500
commit02932ce9e2c136e6fab2571c8e0dd69ae8ec9853 (patch)
treefd79af457060855375d9297755c48f5105912638 /net/core/dev.c
parent91ecb63c074d802f8cf103f1dafb4aed24d0f24c (diff)
net offloading: Convert skb_need_linearize() to use precomputed features.
This switches skb_need_linearize() to use the features that have been centrally computed. In doing so, this fixes a problem where scatter/gather should not be used because the card does not support checksum offloading on that type of packet. On device registration we only check that some form of checksum offloading is available if scatter/gatther is enabled but we must also check at transmission time. Examples of this include IPv6 or vlan packets on a NIC that only supports IPv4 offloading. Signed-off-by: Jesse Gross <jesse@nicira.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c21
1 files changed, 6 insertions, 15 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 4cd3e84e1294..2f838f1d222c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2059,22 +2059,13 @@ EXPORT_SYMBOL(netif_skb_features);
2059 * support DMA from it. 2059 * support DMA from it.
2060 */ 2060 */
2061static inline int skb_needs_linearize(struct sk_buff *skb, 2061static inline int skb_needs_linearize(struct sk_buff *skb,
2062 struct net_device *dev) 2062 int features)
2063{ 2063{
2064 if (skb_is_nonlinear(skb)) { 2064 return skb_is_nonlinear(skb) &&
2065 int features = dev->features; 2065 ((skb_has_frag_list(skb) &&
2066 2066 !(features & NETIF_F_FRAGLIST)) ||
2067 if (vlan_tx_tag_present(skb))
2068 features &= dev->vlan_features;
2069
2070 return (skb_has_frag_list(skb) &&
2071 !(features & NETIF_F_FRAGLIST)) ||
2072 (skb_shinfo(skb)->nr_frags && 2067 (skb_shinfo(skb)->nr_frags &&
2073 (!(features & NETIF_F_SG) || 2068 !(features & NETIF_F_SG)));
2074 illegal_highdma(dev, skb)));
2075 }
2076
2077 return 0;
2078} 2069}
2079 2070
2080int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2071int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
@@ -2115,7 +2106,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2115 if (skb->next) 2106 if (skb->next)
2116 goto gso; 2107 goto gso;
2117 } else { 2108 } else {
2118 if (skb_needs_linearize(skb, dev) && 2109 if (skb_needs_linearize(skb, features) &&
2119 __skb_linearize(skb)) 2110 __skb_linearize(skb))
2120 goto out_kfree_skb; 2111 goto out_kfree_skb;
2121 2112