aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-08-30 18:17:13 -0400
committerDavid S. Miller <davem@davemloft.net>2014-09-01 20:39:55 -0400
commiteae3f88ee44251bcca3a085f9565257c6f9f9e69 (patch)
tree391cbf69b702d81b637dfd03a8aa239eaf24535d /net/core
parent95f6b3dda2a4a052f7dabe9998e4ffac491b7bc2 (diff)
net: Separate out SKB validation logic from transmit path.
dev_hard_start_xmit() does two things, it first validates and canonicalizes the SKB, then it actually sends it. Make a set of helper functions for doing the first part. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c125
1 files changed, 71 insertions, 54 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 6d82194e414b..704a5434f77d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2644,80 +2644,97 @@ out:
2644 return skb; 2644 return skb;
2645} 2645}
2646 2646
2647int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2647struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, netdev_features_t features)
2648 struct netdev_queue *txq)
2649{ 2648{
2650 int rc = NETDEV_TX_OK; 2649 if (vlan_tx_tag_present(skb) &&
2650 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
2651 skb = __vlan_put_tag(skb, skb->vlan_proto,
2652 vlan_tx_tag_get(skb));
2653 if (skb)
2654 skb->vlan_tci = 0;
2655 }
2656 return skb;
2657}
2651 2658
2652 if (likely(!skb->next)) { 2659static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
2653 netdev_features_t features; 2660{
2661 netdev_features_t features;
2654 2662
2655 /* 2663 if (skb->next)
2656 * If device doesn't need skb->dst, release it right now while 2664 return skb;
2657 * its hot in this cpu cache
2658 */
2659 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2660 skb_dst_drop(skb);
2661 2665
2662 features = netif_skb_features(skb); 2666 /* If device doesn't need skb->dst, release it right now while
2667 * its hot in this cpu cache
2668 */
2669 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2670 skb_dst_drop(skb);
2663 2671
2664 if (vlan_tx_tag_present(skb) && 2672 features = netif_skb_features(skb);
2665 !vlan_hw_offload_capable(features, skb->vlan_proto)) { 2673 skb = validate_xmit_vlan(skb, features);
2666 skb = __vlan_put_tag(skb, skb->vlan_proto, 2674 if (unlikely(!skb))
2667 vlan_tx_tag_get(skb)); 2675 goto out_null;
2668 if (unlikely(!skb))
2669 goto out;
2670 2676
2671 skb->vlan_tci = 0; 2677 /* If encapsulation offload request, verify we are testing
2672 } 2678 * hardware encapsulation features instead of standard
2679 * features for the netdev
2680 */
2681 if (skb->encapsulation)
2682 features &= dev->hw_enc_features;
2673 2683
2674 /* If encapsulation offload request, verify we are testing 2684 if (netif_needs_gso(skb, features)) {
2675 * hardware encapsulation features instead of standard 2685 if (unlikely(dev_gso_segment(skb, features)))
2676 * features for the netdev 2686 goto out_kfree_skb;
2677 */ 2687 } else {
2678 if (skb->encapsulation) 2688 if (skb_needs_linearize(skb, features) &&
2679 features &= dev->hw_enc_features; 2689 __skb_linearize(skb))
2690 goto out_kfree_skb;
2680 2691
2681 if (netif_needs_gso(skb, features)) { 2692 /* If packet is not checksummed and device does not
2682 if (unlikely(dev_gso_segment(skb, features))) 2693 * support checksumming for this protocol, complete
2683 goto out_kfree_skb; 2694 * checksumming here.
2684 if (skb->next) 2695 */
2685 goto gso; 2696 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2686 } else { 2697 if (skb->encapsulation)
2687 if (skb_needs_linearize(skb, features) && 2698 skb_set_inner_transport_header(skb,
2688 __skb_linearize(skb)) 2699 skb_checksum_start_offset(skb));
2700 else
2701 skb_set_transport_header(skb,
2702 skb_checksum_start_offset(skb));
2703 if (!(features & NETIF_F_ALL_CSUM) &&
2704 skb_checksum_help(skb))
2689 goto out_kfree_skb; 2705 goto out_kfree_skb;
2690
2691 /* If packet is not checksummed and device does not
2692 * support checksumming for this protocol, complete
2693 * checksumming here.
2694 */
2695 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2696 if (skb->encapsulation)
2697 skb_set_inner_transport_header(skb,
2698 skb_checksum_start_offset(skb));
2699 else
2700 skb_set_transport_header(skb,
2701 skb_checksum_start_offset(skb));
2702 if (!(features & NETIF_F_ALL_CSUM) &&
2703 skb_checksum_help(skb))
2704 goto out_kfree_skb;
2705 }
2706 } 2706 }
2707 }
2708
2709 return skb;
2710
2711out_kfree_skb:
2712 kfree_skb(skb);
2713out_null:
2714 return NULL;
2715}
2716
2717int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2718 struct netdev_queue *txq)
2719{
2720 int rc = NETDEV_TX_OK;
2721
2722 skb = validate_xmit_skb(skb, dev);
2723 if (!skb)
2724 return rc;
2707 2725
2726 if (likely(!skb->next))
2708 return xmit_one(skb, dev, txq, false); 2727 return xmit_one(skb, dev, txq, false);
2709 }
2710 2728
2711gso:
2712 skb->next = xmit_list(skb->next, dev, txq, &rc); 2729 skb->next = xmit_list(skb->next, dev, txq, &rc);
2713 if (likely(skb->next == NULL)) { 2730 if (likely(skb->next == NULL)) {
2714 skb->destructor = DEV_GSO_CB(skb)->destructor; 2731 skb->destructor = DEV_GSO_CB(skb)->destructor;
2715 consume_skb(skb); 2732 consume_skb(skb);
2716 return rc; 2733 return rc;
2717 } 2734 }
2718out_kfree_skb: 2735
2719 kfree_skb(skb); 2736 kfree_skb(skb);
2720out: 2737
2721 return rc; 2738 return rc;
2722} 2739}
2723EXPORT_SYMBOL_GPL(dev_hard_start_xmit); 2740EXPORT_SYMBOL_GPL(dev_hard_start_xmit);