aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-08-30 22:22:20 -0400
committerDavid S. Miller <davem@davemloft.net>2014-09-01 20:39:56 -0400
commitce93718fb7cdbc064c3000ff59e4d3200bdfa744 (patch)
treee3bfb4777857095c58395e697c06a3e4ecf61c72 /net/core
parent50cbe9ab5f8d92d2d4a327b56e96559d8f63a1fa (diff)
net: Don't keep around original SKB when we software segment GSO frames.
Just maintain the list properly by returning the head of the remaining SKB list from dev_hard_start_xmit(). Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c79
1 files changed, 14 insertions, 65 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 75bc5b068a13..c89da4f306b1 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2485,52 +2485,6 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2485 return 0; 2485 return 0;
2486} 2486}
2487 2487
2488struct dev_gso_cb {
2489 void (*destructor)(struct sk_buff *skb);
2490};
2491
2492#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2493
2494static void dev_gso_skb_destructor(struct sk_buff *skb)
2495{
2496 struct dev_gso_cb *cb;
2497
2498 kfree_skb_list(skb->next);
2499 skb->next = NULL;
2500
2501 cb = DEV_GSO_CB(skb);
2502 if (cb->destructor)
2503 cb->destructor(skb);
2504}
2505
2506/**
2507 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2508 * @skb: buffer to segment
2509 * @features: device features as applicable to this skb
2510 *
2511 * This function segments the given skb and stores the list of segments
2512 * in skb->next.
2513 */
2514static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2515{
2516 struct sk_buff *segs;
2517
2518 segs = skb_gso_segment(skb, features);
2519
2520 /* Verifying header integrity only. */
2521 if (!segs)
2522 return 0;
2523
2524 if (IS_ERR(segs))
2525 return PTR_ERR(segs);
2526
2527 skb->next = segs;
2528 DEV_GSO_CB(skb)->destructor = skb->destructor;
2529 skb->destructor = dev_gso_skb_destructor;
2530
2531 return 0;
2532}
2533
2534/* If MPLS offload request, verify we are testing hardware MPLS features 2488/* If MPLS offload request, verify we are testing hardware MPLS features
2535 * instead of standard features for the netdev. 2489 * instead of standard features for the netdev.
2536 */ 2490 */
@@ -2682,8 +2636,13 @@ struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
2682 features &= dev->hw_enc_features; 2636 features &= dev->hw_enc_features;
2683 2637
2684 if (netif_needs_gso(skb, features)) { 2638 if (netif_needs_gso(skb, features)) {
2685 if (unlikely(dev_gso_segment(skb, features))) 2639 struct sk_buff *segs;
2686 goto out_kfree_skb; 2640
2641 segs = skb_gso_segment(skb, features);
2642 kfree_skb(skb);
2643 if (IS_ERR(segs))
2644 segs = NULL;
2645 skb = segs;
2687 } else { 2646 } else {
2688 if (skb_needs_linearize(skb, features) && 2647 if (skb_needs_linearize(skb, features) &&
2689 __skb_linearize(skb)) 2648 __skb_linearize(skb))
@@ -2714,26 +2673,16 @@ out_null:
2714 return NULL; 2673 return NULL;
2715} 2674}
2716 2675
2717int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2676struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2718 struct netdev_queue *txq) 2677 struct netdev_queue *txq, int *ret)
2719{ 2678{
2720 int rc = NETDEV_TX_OK; 2679 if (likely(!skb->next)) {
2721 2680 *ret = xmit_one(skb, dev, txq, false);
2722 if (likely(!skb->next)) 2681 return skb;
2723 return xmit_one(skb, dev, txq, false);
2724
2725 skb->next = xmit_list(skb->next, dev, txq, &rc);
2726 if (likely(skb->next == NULL)) {
2727 skb->destructor = DEV_GSO_CB(skb)->destructor;
2728 consume_skb(skb);
2729 return rc;
2730 } 2682 }
2731 2683
2732 kfree_skb(skb); 2684 return xmit_list(skb, dev, txq, ret);
2733
2734 return rc;
2735} 2685}
2736EXPORT_SYMBOL_GPL(dev_hard_start_xmit);
2737 2686
2738static void qdisc_pkt_len_init(struct sk_buff *skb) 2687static void qdisc_pkt_len_init(struct sk_buff *skb)
2739{ 2688{
@@ -2945,7 +2894,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
2945 2894
2946 if (!netif_xmit_stopped(txq)) { 2895 if (!netif_xmit_stopped(txq)) {
2947 __this_cpu_inc(xmit_recursion); 2896 __this_cpu_inc(xmit_recursion);
2948 rc = dev_hard_start_xmit(skb, dev, txq); 2897 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
2949 __this_cpu_dec(xmit_recursion); 2898 __this_cpu_dec(xmit_recursion);
2950 if (dev_xmit_complete(rc)) { 2899 if (dev_xmit_complete(rc)) {
2951 HARD_TX_UNLOCK(dev, txq); 2900 HARD_TX_UNLOCK(dev, txq);