aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorWanlong Gao <gaowanlong@cn.fujitsu.com>2013-01-24 18:51:29 -0500
committerDavid S. Miller <davem@davemloft.net>2013-01-27 01:22:51 -0500
commit47be24796c13e7d9f087005c2bedc68ee0709f7b (patch)
treed1f4e4c652876e1121f97cb8f5ccf4e1ef1946bf /drivers/net
parentee50e135aeb048b90fab662e661c58b67341830b (diff)
virtio-net: fix the set affinity bug when CPU IDs are not consecutive
As Michael mentioned, set affinity and select queue will not work very well when CPU IDs are not consecutive, this can happen with hot unplug. Fix this bug by traversal the online CPUs, and create a per cpu variable to find the mapping from CPU to the preferable virtual-queue. Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: "Michael S. Tsirkin" <mst@redhat.com> Cc: Jason Wang <jasowang@redhat.com> Cc: Eric Dumazet <erdnetdev@gmail.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: virtualization@lists.linux-foundation.org Cc: netdev@vger.kernel.org Signed-off-by: Wanlong Gao <gaowanlong@cn.fujitsu.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/virtio_net.c67
1 files changed, 54 insertions, 13 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index a6fcf15adc4f..fda214a5426a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -123,6 +123,9 @@ struct virtnet_info {
123 123
124 /* Does the affinity hint is set for virtqueues? */ 124 /* Does the affinity hint is set for virtqueues? */
125 bool affinity_hint_set; 125 bool affinity_hint_set;
126
127 /* Per-cpu variable to show the mapping from CPU to virtqueue */
128 int __percpu *vq_index;
126}; 129};
127 130
128struct skb_vnet_hdr { 131struct skb_vnet_hdr {
@@ -1016,6 +1019,7 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
1016static void virtnet_set_affinity(struct virtnet_info *vi, bool set) 1019static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
1017{ 1020{
1018 int i; 1021 int i;
1022 int cpu;
1019 1023
1020 /* In multiqueue mode, when the number of cpu is equal to the number of 1024 /* In multiqueue mode, when the number of cpu is equal to the number of
1021 * queue pairs, we let the queue pairs to be private to one cpu by 1025 * queue pairs, we let the queue pairs to be private to one cpu by
@@ -1023,22 +1027,40 @@ static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
1023 */ 1027 */
1024 if ((vi->curr_queue_pairs == 1 || 1028 if ((vi->curr_queue_pairs == 1 ||
1025 vi->max_queue_pairs != num_online_cpus()) && set) { 1029 vi->max_queue_pairs != num_online_cpus()) && set) {
1026 if (vi->affinity_hint_set) 1030 if (vi->affinity_hint_set) {
1027 set = false; 1031 set = false;
1028 else 1032 } else {
1033 i = 0;
1034 for_each_online_cpu(cpu)
1035 *per_cpu_ptr(vi->vq_index, cpu) =
1036 ++i % vi->curr_queue_pairs;
1029 return; 1037 return;
1038 }
1030 } 1039 }
1031 1040
1032 for (i = 0; i < vi->max_queue_pairs; i++) { 1041 if (set) {
1033 int cpu = set ? i : -1; 1042 i = 0;
1034 virtqueue_set_affinity(vi->rq[i].vq, cpu); 1043 for_each_online_cpu(cpu) {
1035 virtqueue_set_affinity(vi->sq[i].vq, cpu); 1044 virtqueue_set_affinity(vi->rq[i].vq, cpu);
1036 } 1045 virtqueue_set_affinity(vi->sq[i].vq, cpu);
1046 *per_cpu_ptr(vi->vq_index, cpu) = i;
1047 i++;
1048 }
1037 1049
1038 if (set)
1039 vi->affinity_hint_set = true; 1050 vi->affinity_hint_set = true;
1040 else 1051 } else {
1052 for(i = 0; i < vi->max_queue_pairs; i++) {
1053 virtqueue_set_affinity(vi->rq[i].vq, -1);
1054 virtqueue_set_affinity(vi->sq[i].vq, -1);
1055 }
1056
1057 i = 0;
1058 for_each_online_cpu(cpu)
1059 *per_cpu_ptr(vi->vq_index, cpu) =
1060 ++i % vi->curr_queue_pairs;
1061
1041 vi->affinity_hint_set = false; 1062 vi->affinity_hint_set = false;
1063 }
1042} 1064}
1043 1065
1044static void virtnet_get_ringparam(struct net_device *dev, 1066static void virtnet_get_ringparam(struct net_device *dev,
@@ -1082,6 +1104,7 @@ static int virtnet_set_channels(struct net_device *dev,
1082 if (queue_pairs > vi->max_queue_pairs) 1104 if (queue_pairs > vi->max_queue_pairs)
1083 return -EINVAL; 1105 return -EINVAL;
1084 1106
1107 get_online_cpus();
1085 err = virtnet_set_queues(vi, queue_pairs); 1108 err = virtnet_set_queues(vi, queue_pairs);
1086 if (!err) { 1109 if (!err) {
1087 netif_set_real_num_tx_queues(dev, queue_pairs); 1110 netif_set_real_num_tx_queues(dev, queue_pairs);
@@ -1089,6 +1112,7 @@ static int virtnet_set_channels(struct net_device *dev,
1089 1112
1090 virtnet_set_affinity(vi, true); 1113 virtnet_set_affinity(vi, true);
1091 } 1114 }
1115 put_online_cpus();
1092 1116
1093 return err; 1117 return err;
1094} 1118}
@@ -1127,12 +1151,19 @@ static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
1127 1151
1128/* To avoid contending a lock hold by a vcpu who would exit to host, select the 1152/* To avoid contending a lock hold by a vcpu who would exit to host, select the
1129 * txq based on the processor id. 1153 * txq based on the processor id.
1130 * TODO: handle cpu hotplug.
1131 */ 1154 */
1132static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb) 1155static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb)
1133{ 1156{
1134 int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 1157 int txq;
1135 smp_processor_id(); 1158 struct virtnet_info *vi = netdev_priv(dev);
1159
1160 if (skb_rx_queue_recorded(skb)) {
1161 txq = skb_get_rx_queue(skb);
1162 } else {
1163 txq = *__this_cpu_ptr(vi->vq_index);
1164 if (txq == -1)
1165 txq = 0;
1166 }
1136 1167
1137 while (unlikely(txq >= dev->real_num_tx_queues)) 1168 while (unlikely(txq >= dev->real_num_tx_queues))
1138 txq -= dev->real_num_tx_queues; 1169 txq -= dev->real_num_tx_queues;
@@ -1371,7 +1402,10 @@ static int init_vqs(struct virtnet_info *vi)
1371 if (ret) 1402 if (ret)
1372 goto err_free; 1403 goto err_free;
1373 1404
1405 get_online_cpus();
1374 virtnet_set_affinity(vi, true); 1406 virtnet_set_affinity(vi, true);
1407 put_online_cpus();
1408
1375 return 0; 1409 return 0;
1376 1410
1377err_free: 1411err_free:
@@ -1453,6 +1487,10 @@ static int virtnet_probe(struct virtio_device *vdev)
1453 if (vi->stats == NULL) 1487 if (vi->stats == NULL)
1454 goto free; 1488 goto free;
1455 1489
1490 vi->vq_index = alloc_percpu(int);
1491 if (vi->vq_index == NULL)
1492 goto free_stats;
1493
1456 mutex_init(&vi->config_lock); 1494 mutex_init(&vi->config_lock);
1457 vi->config_enable = true; 1495 vi->config_enable = true;
1458 INIT_WORK(&vi->config_work, virtnet_config_changed_work); 1496 INIT_WORK(&vi->config_work, virtnet_config_changed_work);
@@ -1476,7 +1514,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1476 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ 1514 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
1477 err = init_vqs(vi); 1515 err = init_vqs(vi);
1478 if (err) 1516 if (err)
1479 goto free_stats; 1517 goto free_index;
1480 1518
1481 netif_set_real_num_tx_queues(dev, 1); 1519 netif_set_real_num_tx_queues(dev, 1);
1482 netif_set_real_num_rx_queues(dev, 1); 1520 netif_set_real_num_rx_queues(dev, 1);
@@ -1520,6 +1558,8 @@ free_recv_bufs:
1520free_vqs: 1558free_vqs:
1521 cancel_delayed_work_sync(&vi->refill); 1559 cancel_delayed_work_sync(&vi->refill);
1522 virtnet_del_vqs(vi); 1560 virtnet_del_vqs(vi);
1561free_index:
1562 free_percpu(vi->vq_index);
1523free_stats: 1563free_stats:
1524 free_percpu(vi->stats); 1564 free_percpu(vi->stats);
1525free: 1565free:
@@ -1554,6 +1594,7 @@ static void virtnet_remove(struct virtio_device *vdev)
1554 1594
1555 flush_work(&vi->config_work); 1595 flush_work(&vi->config_work);
1556 1596
1597 free_percpu(vi->vq_index);
1557 free_percpu(vi->stats); 1598 free_percpu(vi->stats);
1558 free_netdev(vi->dev); 1599 free_netdev(vi->dev);
1559} 1600}