diff options
author | David S. Miller <davem@davemloft.net> | 2014-08-30 00:19:14 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-09-01 20:39:55 -0400 |
commit | 7f2e870f2a48a0524a3b03b04fa019311d16a7f7 (patch) | |
tree | 8a0e0049081df9f280628d6c6418f3214f28271f | |
parent | 2ea255137555052655c6a646c4e48ea7481494c7 (diff) |
net: Move main gso loop out of dev_hard_start_xmit() into helper.
There is a slight policy change happening here as well.
The previous code would drop the entire rest of the GSO skb if any of
them got, for example, a congestion notification.
That makes no sense, anything NET_XMIT_MASK and below is something
like congestion or policing. And in the congestion case it doesn't
even mean the packet was actually dropped.
Just continue until dev_xmit_complete() evaluates to false.
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/core/dev.c | 48 |
1 files changed, 29 insertions, 19 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 0fde7d2153db..ab7bb809711e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2616,6 +2616,34 @@ static int xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
2616 | return rc; | 2616 | return rc; |
2617 | } | 2617 | } |
2618 | 2618 | ||
2619 | static struct sk_buff *xmit_list(struct sk_buff *first, struct net_device *dev, | ||
2620 | struct netdev_queue *txq, int *ret) | ||
2621 | { | ||
2622 | struct sk_buff *skb = first; | ||
2623 | int rc = NETDEV_TX_OK; | ||
2624 | |||
2625 | while (skb) { | ||
2626 | struct sk_buff *next = skb->next; | ||
2627 | |||
2628 | skb->next = NULL; | ||
2629 | rc = xmit_one(skb, dev, txq); | ||
2630 | if (unlikely(!dev_xmit_complete(rc))) { | ||
2631 | skb->next = next; | ||
2632 | goto out; | ||
2633 | } | ||
2634 | |||
2635 | skb = next; | ||
2636 | if (netif_xmit_stopped(txq) && skb) { | ||
2637 | rc = NETDEV_TX_BUSY; | ||
2638 | break; | ||
2639 | } | ||
2640 | } | ||
2641 | |||
2642 | out: | ||
2643 | *ret = rc; | ||
2644 | return skb; | ||
2645 | } | ||
2646 | |||
2619 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | 2647 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
2620 | struct netdev_queue *txq) | 2648 | struct netdev_queue *txq) |
2621 | { | 2649 | { |
@@ -2681,25 +2709,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | |||
2681 | } | 2709 | } |
2682 | 2710 | ||
2683 | gso: | 2711 | gso: |
2684 | do { | 2712 | skb->next = xmit_list(skb->next, dev, txq, &rc); |
2685 | struct sk_buff *nskb = skb->next; | ||
2686 | |||
2687 | skb->next = nskb->next; | ||
2688 | nskb->next = NULL; | ||
2689 | |||
2690 | rc = xmit_one(nskb, dev, txq); | ||
2691 | if (unlikely(rc != NETDEV_TX_OK)) { | ||
2692 | if (rc & ~NETDEV_TX_MASK) | ||
2693 | goto out_kfree_gso_skb; | ||
2694 | nskb->next = skb->next; | ||
2695 | skb->next = nskb; | ||
2696 | return rc; | ||
2697 | } | ||
2698 | if (unlikely(netif_xmit_stopped(txq) && skb->next)) | ||
2699 | return NETDEV_TX_BUSY; | ||
2700 | } while (skb->next); | ||
2701 | |||
2702 | out_kfree_gso_skb: | ||
2703 | if (likely(skb->next == NULL)) { | 2713 | if (likely(skb->next == NULL)) { |
2704 | skb->destructor = DEV_GSO_CB(skb)->destructor; | 2714 | skb->destructor = DEV_GSO_CB(skb)->destructor; |
2705 | consume_skb(skb); | 2715 | consume_skb(skb); |