aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2013-01-10 03:56:51 -0500
committerDavid S. Miller <davem@davemloft.net>2013-01-11 01:47:03 -0500
commit416186fbf8c5b4e4465a10c6ac7a45b6c47144b2 (patch)
tree8670e015c8e6ba3e41c7373671bcfe1b2703b653 /net/core
parentc10d73671ad30f54692f7f69f0e09e75d3a8926a (diff)
net: Split core bits of netdev_pick_tx into __netdev_pick_tx
This change splits the core bits of netdev_pick_tx into a separate function. The main idea behind this is to make this code accessible to select queue functions when they decide to process the standard path instead of their own custom path in their select queue routine. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c57
1 files changed, 32 insertions, 25 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 4794cae84939..81ff67149f62 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2495,37 +2495,44 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2495#endif 2495#endif
2496} 2496}
2497 2497
2498struct netdev_queue *netdev_pick_tx(struct net_device *dev, 2498u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
2499 struct sk_buff *skb)
2500{ 2499{
2501 int queue_index; 2500 struct sock *sk = skb->sk;
2502 const struct net_device_ops *ops = dev->netdev_ops; 2501 int queue_index = sk_tx_queue_get(sk);
2503
2504 if (dev->real_num_tx_queues == 1)
2505 queue_index = 0;
2506 else if (ops->ndo_select_queue) {
2507 queue_index = ops->ndo_select_queue(dev, skb);
2508 queue_index = dev_cap_txqueue(dev, queue_index);
2509 } else {
2510 struct sock *sk = skb->sk;
2511 queue_index = sk_tx_queue_get(sk);
2512 2502
2513 if (queue_index < 0 || skb->ooo_okay || 2503 if (queue_index < 0 || skb->ooo_okay ||
2514 queue_index >= dev->real_num_tx_queues) { 2504 queue_index >= dev->real_num_tx_queues) {
2515 int old_index = queue_index; 2505 int new_index = get_xps_queue(dev, skb);
2506 if (new_index < 0)
2507 new_index = skb_tx_hash(dev, skb);
2516 2508
2517 queue_index = get_xps_queue(dev, skb); 2509 if (queue_index != new_index && sk) {
2518 if (queue_index < 0) 2510 struct dst_entry *dst =
2519 queue_index = skb_tx_hash(dev, skb);
2520
2521 if (queue_index != old_index && sk) {
2522 struct dst_entry *dst =
2523 rcu_dereference_check(sk->sk_dst_cache, 1); 2511 rcu_dereference_check(sk->sk_dst_cache, 1);
2524 2512
2525 if (dst && skb_dst(skb) == dst) 2513 if (dst && skb_dst(skb) == dst)
2526 sk_tx_queue_set(sk, queue_index); 2514 sk_tx_queue_set(sk, queue_index);
2527 } 2515
2528 } 2516 }
2517
2518 queue_index = new_index;
2519 }
2520
2521 return queue_index;
2522}
2523
2524struct netdev_queue *netdev_pick_tx(struct net_device *dev,
2525 struct sk_buff *skb)
2526{
2527 int queue_index = 0;
2528
2529 if (dev->real_num_tx_queues != 1) {
2530 const struct net_device_ops *ops = dev->netdev_ops;
2531 if (ops->ndo_select_queue)
2532 queue_index = ops->ndo_select_queue(dev, skb);
2533 else
2534 queue_index = __netdev_pick_tx(dev, skb);
2535 queue_index = dev_cap_txqueue(dev, queue_index);
2529 } 2536 }
2530 2537
2531 skb_set_queue_mapping(skb, queue_index); 2538 skb_set_queue_mapping(skb, queue_index);