aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorTom Herbert <therbert@google.com>2010-11-21 08:17:29 -0500
committerDavid S. Miller <davem@davemloft.net>2010-11-24 14:44:19 -0500
commit3853b5841c01a3f492fe137afaad9c209e5162c6 (patch)
tree6781db9ec592d9798129cd4715ce00dc9007b78c /net/core/dev.c
parent22f4fbd9bd283ef85126e511171932a4af703776 (diff)
xps: Improvements in TX queue selection
In dev_pick_tx, don't do work in calculating queue index or setting the index in the sock unless the device has more than one queue. This allows the sock to be set only with a queue index of a multi-queue device which is desirable if device are stacked like in a tunnel. We also allow the mapping of a socket to queue to be changed. To maintain in order packet transmission a flag (ooo_okay) has been added to the sk_buff structure. If a transport layer sets this flag on a packet, the transmit queue can be changed for the socket. Presumably, the transport would set this if there was no possbility of creating OOO packets (for instance, there are no packets in flight for the socket). This patch includes the modification in TCP output for setting this flag. Signed-off-by: Tom Herbert <therbert@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 381b8e280162..7b17674a29ec 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2148,20 +2148,24 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2148 int queue_index; 2148 int queue_index;
2149 const struct net_device_ops *ops = dev->netdev_ops; 2149 const struct net_device_ops *ops = dev->netdev_ops;
2150 2150
2151 if (ops->ndo_select_queue) { 2151 if (dev->real_num_tx_queues == 1)
2152 queue_index = 0;
2153 else if (ops->ndo_select_queue) {
2152 queue_index = ops->ndo_select_queue(dev, skb); 2154 queue_index = ops->ndo_select_queue(dev, skb);
2153 queue_index = dev_cap_txqueue(dev, queue_index); 2155 queue_index = dev_cap_txqueue(dev, queue_index);
2154 } else { 2156 } else {
2155 struct sock *sk = skb->sk; 2157 struct sock *sk = skb->sk;
2156 queue_index = sk_tx_queue_get(sk); 2158 queue_index = sk_tx_queue_get(sk);
2157 if (queue_index < 0 || queue_index >= dev->real_num_tx_queues) {
2158 2159
2159 queue_index = 0; 2160 if (queue_index < 0 || skb->ooo_okay ||
2160 if (dev->real_num_tx_queues > 1) 2161 queue_index >= dev->real_num_tx_queues) {
2161 queue_index = skb_tx_hash(dev, skb); 2162 int old_index = queue_index;
2162 2163
2163 if (sk) { 2164 queue_index = skb_tx_hash(dev, skb);
2164 struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1); 2165
2166 if (queue_index != old_index && sk) {
2167 struct dst_entry *dst =
2168 rcu_dereference_check(sk->sk_dst_cache, 1);
2165 2169
2166 if (dst && skb_dst(skb) == dst) 2170 if (dst && skb_dst(skb) == dst)
2167 sk_tx_queue_set(sk, queue_index); 2171 sk_tx_queue_set(sk, queue_index);