aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/virtio_net.c
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2013-11-05 05:19:45 -0500
committerDavid S. Miller <davem@davemloft.net>2013-11-05 22:20:29 -0500
commit9bb8ca86075f37d3c169b9c46f8e7c6d3165e18f (patch)
tree22cce2f2704f6673d37c720c7ebbfa1a98a4b7a2 /drivers/net/virtio_net.c
parent249a3630c48e5df8f8706d4cdf90bddf2b737c5d (diff)
virtio-net: switch to use XPS to choose txq
We used to use a percpu structure vq_index to record the cpu to queue mapping, this is suboptimal since it duplicates the work of XPS and loses all other XPS functionality such as allowing user to configure their own transmission steering strategy. So this patch switches to use XPS and suggest a default mapping when the number of cpus is equal to the number of queues. With XPS support, there's no need for keeping per-cpu vq_index and .ndo_select_queue(), so they were removed also. Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Michael S. Tsirkin <mst@redhat.com> Acked-by: Rusty Russell <rusty@rustcorp.com.au> Acked-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r--drivers/net/virtio_net.c48
1 files changed, 2 insertions, 46 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index a7e9ad9b213a..01f4eb5c8b78 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -132,9 +132,6 @@ struct virtnet_info {
132 /* Does the affinity hint is set for virtqueues? */ 132 /* Does the affinity hint is set for virtqueues? */
133 bool affinity_hint_set; 133 bool affinity_hint_set;
134 134
135 /* Per-cpu variable to show the mapping from CPU to virtqueue */
136 int __percpu *vq_index;
137
138 /* CPU hot plug notifier */ 135 /* CPU hot plug notifier */
139 struct notifier_block nb; 136 struct notifier_block nb;
140}; 137};
@@ -1114,7 +1111,6 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
1114static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) 1111static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
1115{ 1112{
1116 int i; 1113 int i;
1117 int cpu;
1118 1114
1119 if (vi->affinity_hint_set) { 1115 if (vi->affinity_hint_set) {
1120 for (i = 0; i < vi->max_queue_pairs; i++) { 1116 for (i = 0; i < vi->max_queue_pairs; i++) {
@@ -1124,16 +1120,6 @@ static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
1124 1120
1125 vi->affinity_hint_set = false; 1121 vi->affinity_hint_set = false;
1126 } 1122 }
1127
1128 i = 0;
1129 for_each_online_cpu(cpu) {
1130 if (cpu == hcpu) {
1131 *per_cpu_ptr(vi->vq_index, cpu) = -1;
1132 } else {
1133 *per_cpu_ptr(vi->vq_index, cpu) =
1134 ++i % vi->curr_queue_pairs;
1135 }
1136 }
1137} 1123}
1138 1124
1139static void virtnet_set_affinity(struct virtnet_info *vi) 1125static void virtnet_set_affinity(struct virtnet_info *vi)
@@ -1155,7 +1141,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
1155 for_each_online_cpu(cpu) { 1141 for_each_online_cpu(cpu) {
1156 virtqueue_set_affinity(vi->rq[i].vq, cpu); 1142 virtqueue_set_affinity(vi->rq[i].vq, cpu);
1157 virtqueue_set_affinity(vi->sq[i].vq, cpu); 1143 virtqueue_set_affinity(vi->sq[i].vq, cpu);
1158 *per_cpu_ptr(vi->vq_index, cpu) = i; 1144 netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
1159 i++; 1145 i++;
1160 } 1146 }
1161 1147
@@ -1269,28 +1255,6 @@ static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
1269 return 0; 1255 return 0;
1270} 1256}
1271 1257
1272/* To avoid contending a lock hold by a vcpu who would exit to host, select the
1273 * txq based on the processor id.
1274 */
1275static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb)
1276{
1277 int txq;
1278 struct virtnet_info *vi = netdev_priv(dev);
1279
1280 if (skb_rx_queue_recorded(skb)) {
1281 txq = skb_get_rx_queue(skb);
1282 } else {
1283 txq = *__this_cpu_ptr(vi->vq_index);
1284 if (txq == -1)
1285 txq = 0;
1286 }
1287
1288 while (unlikely(txq >= dev->real_num_tx_queues))
1289 txq -= dev->real_num_tx_queues;
1290
1291 return txq;
1292}
1293
1294static const struct net_device_ops virtnet_netdev = { 1258static const struct net_device_ops virtnet_netdev = {
1295 .ndo_open = virtnet_open, 1259 .ndo_open = virtnet_open,
1296 .ndo_stop = virtnet_close, 1260 .ndo_stop = virtnet_close,
@@ -1302,7 +1266,6 @@ static const struct net_device_ops virtnet_netdev = {
1302 .ndo_get_stats64 = virtnet_stats, 1266 .ndo_get_stats64 = virtnet_stats,
1303 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, 1267 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
1304 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, 1268 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
1305 .ndo_select_queue = virtnet_select_queue,
1306#ifdef CONFIG_NET_POLL_CONTROLLER 1269#ifdef CONFIG_NET_POLL_CONTROLLER
1307 .ndo_poll_controller = virtnet_netpoll, 1270 .ndo_poll_controller = virtnet_netpoll,
1308#endif 1271#endif
@@ -1613,10 +1576,6 @@ static int virtnet_probe(struct virtio_device *vdev)
1613 if (vi->stats == NULL) 1576 if (vi->stats == NULL)
1614 goto free; 1577 goto free;
1615 1578
1616 vi->vq_index = alloc_percpu(int);
1617 if (vi->vq_index == NULL)
1618 goto free_stats;
1619
1620 mutex_init(&vi->config_lock); 1579 mutex_init(&vi->config_lock);
1621 vi->config_enable = true; 1580 vi->config_enable = true;
1622 INIT_WORK(&vi->config_work, virtnet_config_changed_work); 1581 INIT_WORK(&vi->config_work, virtnet_config_changed_work);
@@ -1643,7 +1602,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1643 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ 1602 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
1644 err = init_vqs(vi); 1603 err = init_vqs(vi);
1645 if (err) 1604 if (err)
1646 goto free_index; 1605 goto free_stats;
1647 1606
1648 netif_set_real_num_tx_queues(dev, 1); 1607 netif_set_real_num_tx_queues(dev, 1);
1649 netif_set_real_num_rx_queues(dev, 1); 1608 netif_set_real_num_rx_queues(dev, 1);
@@ -1696,8 +1655,6 @@ free_vqs:
1696 virtnet_del_vqs(vi); 1655 virtnet_del_vqs(vi);
1697 if (vi->alloc_frag.page) 1656 if (vi->alloc_frag.page)
1698 put_page(vi->alloc_frag.page); 1657 put_page(vi->alloc_frag.page);
1699free_index:
1700 free_percpu(vi->vq_index);
1701free_stats: 1658free_stats:
1702 free_percpu(vi->stats); 1659 free_percpu(vi->stats);
1703free: 1660free:
@@ -1736,7 +1693,6 @@ static void virtnet_remove(struct virtio_device *vdev)
1736 1693
1737 flush_work(&vi->config_work); 1694 flush_work(&vi->config_work);
1738 1695
1739 free_percpu(vi->vq_index);
1740 free_percpu(vi->stats); 1696 free_percpu(vi->stats);
1741 free_netdev(vi->dev); 1697 free_netdev(vi->dev);
1742} 1698}