aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJesse Gross <jesse@nicira.com>2014-12-24 01:37:26 -0500
committerDavid S. Miller <davem@davemloft.net>2014-12-26 17:20:56 -0500
commit5f35227ea34bb616c436d9da47fc325866c428f3 (patch)
treee6f997cb2f1f1bce1d2b5be613e1be667b9f927f /net
parent6e4ab361b6f3eb41ffe63224a8b5770fc68ef710 (diff)
net: Generalize ndo_gso_check to ndo_features_check
GSO isn't the only offload feature with restrictions that potentially can't be expressed with the current features mechanism. Checksum is another although it's a general issue that could in theory apply to anything. Even if it may be possible to implement these restrictions in other ways, it can result in duplicate code or inefficient per-packet behavior. This generalizes ndo_gso_check so that drivers can remove any features that don't make sense for a given packet, similar to netif_skb_features(). It also converts existing driver restrictions to the new format, completing the work that was done to support tunnel protocols since the issues apply to checksums as well. By actually removing features from the set that are used to do offloading, it solves another problem with the existing interface. In these cases, GSO would run with the original set of features and not do anything because it appears that segmentation is not required. CC: Tom Herbert <therbert@google.com> CC: Joe Stringer <joestringer@nicira.com> CC: Eric Dumazet <edumazet@google.com> CC: Hayes Wang <hayeswang@realtek.com> Signed-off-by: Jesse Gross <jesse@nicira.com> Acked-by: Tom Herbert <therbert@google.com> Fixes: 04ffcb255f22 ("net: Add ndo_gso_check") Tested-by: Hayes Wang <hayeswang@realtek.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c23
1 files changed, 14 insertions, 9 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 0094562b732a..683d493aa1bf 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2563,7 +2563,7 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
2563 2563
2564netdev_features_t netif_skb_features(struct sk_buff *skb) 2564netdev_features_t netif_skb_features(struct sk_buff *skb)
2565{ 2565{
2566 const struct net_device *dev = skb->dev; 2566 struct net_device *dev = skb->dev;
2567 netdev_features_t features = dev->features; 2567 netdev_features_t features = dev->features;
2568 u16 gso_segs = skb_shinfo(skb)->gso_segs; 2568 u16 gso_segs = skb_shinfo(skb)->gso_segs;
2569 __be16 protocol = skb->protocol; 2569 __be16 protocol = skb->protocol;
@@ -2571,13 +2571,20 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
2571 if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs) 2571 if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
2572 features &= ~NETIF_F_GSO_MASK; 2572 features &= ~NETIF_F_GSO_MASK;
2573 2573
2574 /* If encapsulation offload request, verify we are testing
2575 * hardware encapsulation features instead of standard
2576 * features for the netdev
2577 */
2578 if (skb->encapsulation)
2579 features &= dev->hw_enc_features;
2580
2574 if (!vlan_tx_tag_present(skb)) { 2581 if (!vlan_tx_tag_present(skb)) {
2575 if (unlikely(protocol == htons(ETH_P_8021Q) || 2582 if (unlikely(protocol == htons(ETH_P_8021Q) ||
2576 protocol == htons(ETH_P_8021AD))) { 2583 protocol == htons(ETH_P_8021AD))) {
2577 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2584 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2578 protocol = veh->h_vlan_encapsulated_proto; 2585 protocol = veh->h_vlan_encapsulated_proto;
2579 } else { 2586 } else {
2580 return harmonize_features(skb, features); 2587 goto finalize;
2581 } 2588 }
2582 } 2589 }
2583 2590
@@ -2595,6 +2602,11 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
2595 NETIF_F_HW_VLAN_CTAG_TX | 2602 NETIF_F_HW_VLAN_CTAG_TX |
2596 NETIF_F_HW_VLAN_STAG_TX); 2603 NETIF_F_HW_VLAN_STAG_TX);
2597 2604
2605finalize:
2606 if (dev->netdev_ops->ndo_features_check)
2607 features &= dev->netdev_ops->ndo_features_check(skb, dev,
2608 features);
2609
2598 return harmonize_features(skb, features); 2610 return harmonize_features(skb, features);
2599} 2611}
2600EXPORT_SYMBOL(netif_skb_features); 2612EXPORT_SYMBOL(netif_skb_features);
@@ -2665,13 +2677,6 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
2665 if (unlikely(!skb)) 2677 if (unlikely(!skb))
2666 goto out_null; 2678 goto out_null;
2667 2679
2668 /* If encapsulation offload request, verify we are testing
2669 * hardware encapsulation features instead of standard
2670 * features for the netdev
2671 */
2672 if (skb->encapsulation)
2673 features &= dev->hw_enc_features;
2674
2675 if (netif_needs_gso(dev, skb, features)) { 2680 if (netif_needs_gso(dev, skb, features)) {
2676 struct sk_buff *segs; 2681 struct sk_buff *segs;
2677 2682