aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c53
1 files changed, 50 insertions, 3 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 7b17674a29e..c852f0038a0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1557,12 +1557,16 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1557 */ 1557 */
1558int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 1558int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1559{ 1559{
1560 int rc;
1561
1560 if (txq < 1 || txq > dev->num_tx_queues) 1562 if (txq < 1 || txq > dev->num_tx_queues)
1561 return -EINVAL; 1563 return -EINVAL;
1562 1564
1563 if (dev->reg_state == NETREG_REGISTERED) { 1565 if (dev->reg_state == NETREG_REGISTERED) {
1564 ASSERT_RTNL(); 1566 ASSERT_RTNL();
1565 1567
1568 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
1569 txq);
1566 if (txq < dev->real_num_tx_queues) 1570 if (txq < dev->real_num_tx_queues)
1567 qdisc_reset_all_tx_gt(dev, txq); 1571 qdisc_reset_all_tx_gt(dev, txq);
1568 } 1572 }
@@ -2142,6 +2146,44 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2142 return queue_index; 2146 return queue_index;
2143} 2147}
2144 2148
2149static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2150{
2151#ifdef CONFIG_RPS
2152 struct xps_dev_maps *dev_maps;
2153 struct xps_map *map;
2154 int queue_index = -1;
2155
2156 rcu_read_lock();
2157 dev_maps = rcu_dereference(dev->xps_maps);
2158 if (dev_maps) {
2159 map = rcu_dereference(
2160 dev_maps->cpu_map[raw_smp_processor_id()]);
2161 if (map) {
2162 if (map->len == 1)
2163 queue_index = map->queues[0];
2164 else {
2165 u32 hash;
2166 if (skb->sk && skb->sk->sk_hash)
2167 hash = skb->sk->sk_hash;
2168 else
2169 hash = (__force u16) skb->protocol ^
2170 skb->rxhash;
2171 hash = jhash_1word(hash, hashrnd);
2172 queue_index = map->queues[
2173 ((u64)hash * map->len) >> 32];
2174 }
2175 if (unlikely(queue_index >= dev->real_num_tx_queues))
2176 queue_index = -1;
2177 }
2178 }
2179 rcu_read_unlock();
2180
2181 return queue_index;
2182#else
2183 return -1;
2184#endif
2185}
2186
2145static struct netdev_queue *dev_pick_tx(struct net_device *dev, 2187static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2146 struct sk_buff *skb) 2188 struct sk_buff *skb)
2147{ 2189{
@@ -2161,7 +2203,9 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2161 queue_index >= dev->real_num_tx_queues) { 2203 queue_index >= dev->real_num_tx_queues) {
2162 int old_index = queue_index; 2204 int old_index = queue_index;
2163 2205
2164 queue_index = skb_tx_hash(dev, skb); 2206 queue_index = get_xps_queue(dev, skb);
2207 if (queue_index < 0)
2208 queue_index = skb_tx_hash(dev, skb);
2165 2209
2166 if (queue_index != old_index && sk) { 2210 if (queue_index != old_index && sk) {
2167 struct dst_entry *dst = 2211 struct dst_entry *dst =
@@ -5066,6 +5110,7 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
5066{ 5110{
5067 unsigned int count = dev->num_tx_queues; 5111 unsigned int count = dev->num_tx_queues;
5068 struct netdev_queue *tx; 5112 struct netdev_queue *tx;
5113 int i;
5069 5114
5070 BUG_ON(count < 1); 5115 BUG_ON(count < 1);
5071 5116
@@ -5076,6 +5121,10 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
5076 return -ENOMEM; 5121 return -ENOMEM;
5077 } 5122 }
5078 dev->_tx = tx; 5123 dev->_tx = tx;
5124
5125 for (i = 0; i < count; i++)
5126 tx[i].dev = dev;
5127
5079 return 0; 5128 return 0;
5080} 5129}
5081 5130
@@ -5083,8 +5132,6 @@ static void netdev_init_one_queue(struct net_device *dev,
5083 struct netdev_queue *queue, 5132 struct netdev_queue *queue,
5084 void *_unused) 5133 void *_unused)
5085{ 5134{
5086 queue->dev = dev;
5087
5088 /* Initialize queue lock */ 5135 /* Initialize queue lock */
5089 spin_lock_init(&queue->_xmit_lock); 5136 spin_lock_init(&queue->_xmit_lock);
5090 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); 5137 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);