diff options
author | David S. Miller <davem@davemloft.net> | 2014-01-14 17:37:09 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-01-14 17:42:42 -0500 |
commit | 0a379e21c503b2ff66b44d588df9f231e9b0b9ca (patch) | |
tree | 22b875fcf4b67fcd007726f00c5fc1748ce985d0 /net/core | |
parent | a49da8811e71c5355b52c65ee32976741d5834cd (diff) | |
parent | fdc3452cd2c7b2bfe0f378f92123f4f9a98fa2bd (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 29 | ||||
-rw-r--r-- | net/core/flow_dissector.c | 10 | ||||
-rw-r--r-- | net/core/netpoll.c | 2 |
3 files changed, 25 insertions, 16 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 87312dcf0aa8..2bee80591f9a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2530,7 +2530,7 @@ netdev_features_t netif_skb_features(struct sk_buff *skb) | |||
2530 | EXPORT_SYMBOL(netif_skb_features); | 2530 | EXPORT_SYMBOL(netif_skb_features); |
2531 | 2531 | ||
2532 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | 2532 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
2533 | struct netdev_queue *txq, void *accel_priv) | 2533 | struct netdev_queue *txq) |
2534 | { | 2534 | { |
2535 | const struct net_device_ops *ops = dev->netdev_ops; | 2535 | const struct net_device_ops *ops = dev->netdev_ops; |
2536 | int rc = NETDEV_TX_OK; | 2536 | int rc = NETDEV_TX_OK; |
@@ -2596,13 +2596,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | |||
2596 | dev_queue_xmit_nit(skb, dev); | 2596 | dev_queue_xmit_nit(skb, dev); |
2597 | 2597 | ||
2598 | skb_len = skb->len; | 2598 | skb_len = skb->len; |
2599 | if (accel_priv) | ||
2600 | rc = ops->ndo_dfwd_start_xmit(skb, dev, accel_priv); | ||
2601 | else | ||
2602 | rc = ops->ndo_start_xmit(skb, dev); | 2599 | rc = ops->ndo_start_xmit(skb, dev); |
2603 | 2600 | ||
2604 | trace_net_dev_xmit(skb, rc, dev, skb_len); | 2601 | trace_net_dev_xmit(skb, rc, dev, skb_len); |
2605 | if (rc == NETDEV_TX_OK && txq) | 2602 | if (rc == NETDEV_TX_OK) |
2606 | txq_trans_update(txq); | 2603 | txq_trans_update(txq); |
2607 | return rc; | 2604 | return rc; |
2608 | } | 2605 | } |
@@ -2618,10 +2615,7 @@ gso: | |||
2618 | dev_queue_xmit_nit(nskb, dev); | 2615 | dev_queue_xmit_nit(nskb, dev); |
2619 | 2616 | ||
2620 | skb_len = nskb->len; | 2617 | skb_len = nskb->len; |
2621 | if (accel_priv) | 2618 | rc = ops->ndo_start_xmit(nskb, dev); |
2622 | rc = ops->ndo_dfwd_start_xmit(nskb, dev, accel_priv); | ||
2623 | else | ||
2624 | rc = ops->ndo_start_xmit(nskb, dev); | ||
2625 | trace_net_dev_xmit(nskb, rc, dev, skb_len); | 2619 | trace_net_dev_xmit(nskb, rc, dev, skb_len); |
2626 | if (unlikely(rc != NETDEV_TX_OK)) { | 2620 | if (unlikely(rc != NETDEV_TX_OK)) { |
2627 | if (rc & ~NETDEV_TX_MASK) | 2621 | if (rc & ~NETDEV_TX_MASK) |
@@ -2802,7 +2796,7 @@ EXPORT_SYMBOL(dev_loopback_xmit); | |||
2802 | * the BH enable code must have IRQs enabled so that it will not deadlock. | 2796 | * the BH enable code must have IRQs enabled so that it will not deadlock. |
2803 | * --BLG | 2797 | * --BLG |
2804 | */ | 2798 | */ |
2805 | int dev_queue_xmit(struct sk_buff *skb) | 2799 | int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) |
2806 | { | 2800 | { |
2807 | struct net_device *dev = skb->dev; | 2801 | struct net_device *dev = skb->dev; |
2808 | struct netdev_queue *txq; | 2802 | struct netdev_queue *txq; |
@@ -2818,7 +2812,7 @@ int dev_queue_xmit(struct sk_buff *skb) | |||
2818 | 2812 | ||
2819 | skb_update_prio(skb); | 2813 | skb_update_prio(skb); |
2820 | 2814 | ||
2821 | txq = netdev_pick_tx(dev, skb); | 2815 | txq = netdev_pick_tx(dev, skb, accel_priv); |
2822 | q = rcu_dereference_bh(txq->qdisc); | 2816 | q = rcu_dereference_bh(txq->qdisc); |
2823 | 2817 | ||
2824 | #ifdef CONFIG_NET_CLS_ACT | 2818 | #ifdef CONFIG_NET_CLS_ACT |
@@ -2854,7 +2848,7 @@ int dev_queue_xmit(struct sk_buff *skb) | |||
2854 | 2848 | ||
2855 | if (!netif_xmit_stopped(txq)) { | 2849 | if (!netif_xmit_stopped(txq)) { |
2856 | __this_cpu_inc(xmit_recursion); | 2850 | __this_cpu_inc(xmit_recursion); |
2857 | rc = dev_hard_start_xmit(skb, dev, txq, NULL); | 2851 | rc = dev_hard_start_xmit(skb, dev, txq); |
2858 | __this_cpu_dec(xmit_recursion); | 2852 | __this_cpu_dec(xmit_recursion); |
2859 | if (dev_xmit_complete(rc)) { | 2853 | if (dev_xmit_complete(rc)) { |
2860 | HARD_TX_UNLOCK(dev, txq); | 2854 | HARD_TX_UNLOCK(dev, txq); |
@@ -2883,8 +2877,19 @@ out: | |||
2883 | rcu_read_unlock_bh(); | 2877 | rcu_read_unlock_bh(); |
2884 | return rc; | 2878 | return rc; |
2885 | } | 2879 | } |
2880 | |||
2881 | int dev_queue_xmit(struct sk_buff *skb) | ||
2882 | { | ||
2883 | return __dev_queue_xmit(skb, NULL); | ||
2884 | } | ||
2886 | EXPORT_SYMBOL(dev_queue_xmit); | 2885 | EXPORT_SYMBOL(dev_queue_xmit); |
2887 | 2886 | ||
2887 | int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) | ||
2888 | { | ||
2889 | return __dev_queue_xmit(skb, accel_priv); | ||
2890 | } | ||
2891 | EXPORT_SYMBOL(dev_queue_xmit_accel); | ||
2892 | |||
2888 | 2893 | ||
2889 | /*======================================================================= | 2894 | /*======================================================================= |
2890 | Receiver routines | 2895 | Receiver routines |
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index b324bfa3485c..87577d447554 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
@@ -395,17 +395,21 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) | |||
395 | EXPORT_SYMBOL(__netdev_pick_tx); | 395 | EXPORT_SYMBOL(__netdev_pick_tx); |
396 | 396 | ||
397 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, | 397 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, |
398 | struct sk_buff *skb) | 398 | struct sk_buff *skb, |
399 | void *accel_priv) | ||
399 | { | 400 | { |
400 | int queue_index = 0; | 401 | int queue_index = 0; |
401 | 402 | ||
402 | if (dev->real_num_tx_queues != 1) { | 403 | if (dev->real_num_tx_queues != 1) { |
403 | const struct net_device_ops *ops = dev->netdev_ops; | 404 | const struct net_device_ops *ops = dev->netdev_ops; |
404 | if (ops->ndo_select_queue) | 405 | if (ops->ndo_select_queue) |
405 | queue_index = ops->ndo_select_queue(dev, skb); | 406 | queue_index = ops->ndo_select_queue(dev, skb, |
407 | accel_priv); | ||
406 | else | 408 | else |
407 | queue_index = __netdev_pick_tx(dev, skb); | 409 | queue_index = __netdev_pick_tx(dev, skb); |
408 | queue_index = dev_cap_txqueue(dev, queue_index); | 410 | |
411 | if (!accel_priv) | ||
412 | queue_index = dev_cap_txqueue(dev, queue_index); | ||
409 | } | 413 | } |
410 | 414 | ||
411 | skb_set_queue_mapping(skb, queue_index); | 415 | skb_set_queue_mapping(skb, queue_index); |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 303097874633..19fe9c717ced 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -375,7 +375,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
375 | if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { | 375 | if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { |
376 | struct netdev_queue *txq; | 376 | struct netdev_queue *txq; |
377 | 377 | ||
378 | txq = netdev_pick_tx(dev, skb); | 378 | txq = netdev_pick_tx(dev, skb, NULL); |
379 | 379 | ||
380 | /* try until next clock tick */ | 380 | /* try until next clock tick */ |
381 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; | 381 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; |