aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/vhost/net.c41
-rw-r--r--drivers/vhost/scsi.c21
-rw-r--r--drivers/vhost/vhost.c20
-rw-r--r--drivers/vhost/vhost.h5
-rw-r--r--drivers/vhost/vsock.c28
-rw-r--r--drivers/virtio/Kconfig8
-rw-r--r--tools/virtio/linux/kernel.h2
7 files changed, 77 insertions, 48 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index df51a35cf537..2d9df786a9d3 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -604,12 +604,6 @@ static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter,
604 return iov_iter_count(iter); 604 return iov_iter_count(iter);
605} 605}
606 606
607static bool vhost_exceeds_weight(int pkts, int total_len)
608{
609 return total_len >= VHOST_NET_WEIGHT ||
610 pkts >= VHOST_NET_PKT_WEIGHT;
611}
612
613static int get_tx_bufs(struct vhost_net *net, 607static int get_tx_bufs(struct vhost_net *net,
614 struct vhost_net_virtqueue *nvq, 608 struct vhost_net_virtqueue *nvq,
615 struct msghdr *msg, 609 struct msghdr *msg,
@@ -779,7 +773,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
779 int sent_pkts = 0; 773 int sent_pkts = 0;
780 bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX); 774 bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX);
781 775
782 for (;;) { 776 do {
783 bool busyloop_intr = false; 777 bool busyloop_intr = false;
784 778
785 if (nvq->done_idx == VHOST_NET_BATCH) 779 if (nvq->done_idx == VHOST_NET_BATCH)
@@ -845,11 +839,7 @@ done:
845 vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head); 839 vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
846 vq->heads[nvq->done_idx].len = 0; 840 vq->heads[nvq->done_idx].len = 0;
847 ++nvq->done_idx; 841 ++nvq->done_idx;
848 if (vhost_exceeds_weight(++sent_pkts, total_len)) { 842 } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
849 vhost_poll_queue(&vq->poll);
850 break;
851 }
852 }
853 843
854 vhost_tx_batch(net, nvq, sock, &msg); 844 vhost_tx_batch(net, nvq, sock, &msg);
855} 845}
@@ -874,7 +864,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
874 bool zcopy_used; 864 bool zcopy_used;
875 int sent_pkts = 0; 865 int sent_pkts = 0;
876 866
877 for (;;) { 867 do {
878 bool busyloop_intr; 868 bool busyloop_intr;
879 869
880 /* Release DMAs done buffers first */ 870 /* Release DMAs done buffers first */
@@ -951,11 +941,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
951 else 941 else
952 vhost_zerocopy_signal_used(net, vq); 942 vhost_zerocopy_signal_used(net, vq);
953 vhost_net_tx_packet(net); 943 vhost_net_tx_packet(net);
954 if (unlikely(vhost_exceeds_weight(++sent_pkts, total_len))) { 944 } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
955 vhost_poll_queue(&vq->poll);
956 break;
957 }
958 }
959} 945}
960 946
961/* Expects to be always run from workqueue - which acts as 947/* Expects to be always run from workqueue - which acts as
@@ -1153,8 +1139,11 @@ static void handle_rx(struct vhost_net *net)
1153 vq->log : NULL; 1139 vq->log : NULL;
1154 mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF); 1140 mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
1155 1141
1156 while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk, 1142 do {
1157 &busyloop_intr))) { 1143 sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
1144 &busyloop_intr);
1145 if (!sock_len)
1146 break;
1158 sock_len += sock_hlen; 1147 sock_len += sock_hlen;
1159 vhost_len = sock_len + vhost_hlen; 1148 vhost_len = sock_len + vhost_hlen;
1160 headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx, 1149 headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
@@ -1239,14 +1228,11 @@ static void handle_rx(struct vhost_net *net)
1239 vhost_log_write(vq, vq_log, log, vhost_len, 1228 vhost_log_write(vq, vq_log, log, vhost_len,
1240 vq->iov, in); 1229 vq->iov, in);
1241 total_len += vhost_len; 1230 total_len += vhost_len;
1242 if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) { 1231 } while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len)));
1243 vhost_poll_queue(&vq->poll); 1232
1244 goto out;
1245 }
1246 }
1247 if (unlikely(busyloop_intr)) 1233 if (unlikely(busyloop_intr))
1248 vhost_poll_queue(&vq->poll); 1234 vhost_poll_queue(&vq->poll);
1249 else 1235 else if (!sock_len)
1250 vhost_net_enable_vq(net, vq); 1236 vhost_net_enable_vq(net, vq);
1251out: 1237out:
1252 vhost_net_signal_used(nvq); 1238 vhost_net_signal_used(nvq);
@@ -1338,7 +1324,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
1338 vhost_net_buf_init(&n->vqs[i].rxq); 1324 vhost_net_buf_init(&n->vqs[i].rxq);
1339 } 1325 }
1340 vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX, 1326 vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
1341 UIO_MAXIOV + VHOST_NET_BATCH); 1327 UIO_MAXIOV + VHOST_NET_BATCH,
1328 VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT);
1342 1329
1343 vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev); 1330 vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
1344 vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev); 1331 vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index c090d177bd75..a9caf1bc3c3e 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -57,6 +57,12 @@
57#define VHOST_SCSI_PREALLOC_UPAGES 2048 57#define VHOST_SCSI_PREALLOC_UPAGES 2048
58#define VHOST_SCSI_PREALLOC_PROT_SGLS 2048 58#define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
59 59
60/* Max number of requests before requeueing the job.
61 * Using this limit prevents one virtqueue from starving others with
62 * request.
63 */
64#define VHOST_SCSI_WEIGHT 256
65
60struct vhost_scsi_inflight { 66struct vhost_scsi_inflight {
61 /* Wait for the flush operation to finish */ 67 /* Wait for the flush operation to finish */
62 struct completion comp; 68 struct completion comp;
@@ -912,7 +918,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
912 struct iov_iter in_iter, prot_iter, data_iter; 918 struct iov_iter in_iter, prot_iter, data_iter;
913 u64 tag; 919 u64 tag;
914 u32 exp_data_len, data_direction; 920 u32 exp_data_len, data_direction;
915 int ret, prot_bytes; 921 int ret, prot_bytes, c = 0;
916 u16 lun; 922 u16 lun;
917 u8 task_attr; 923 u8 task_attr;
918 bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI); 924 bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
@@ -932,7 +938,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
932 938
933 vhost_disable_notify(&vs->dev, vq); 939 vhost_disable_notify(&vs->dev, vq);
934 940
935 for (;;) { 941 do {
936 ret = vhost_scsi_get_desc(vs, vq, &vc); 942 ret = vhost_scsi_get_desc(vs, vq, &vc);
937 if (ret) 943 if (ret)
938 goto err; 944 goto err;
@@ -1112,7 +1118,7 @@ err:
1112 break; 1118 break;
1113 else if (ret == -EIO) 1119 else if (ret == -EIO)
1114 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out); 1120 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1115 } 1121 } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1116out: 1122out:
1117 mutex_unlock(&vq->mutex); 1123 mutex_unlock(&vq->mutex);
1118} 1124}
@@ -1171,7 +1177,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1171 } v_req; 1177 } v_req;
1172 struct vhost_scsi_ctx vc; 1178 struct vhost_scsi_ctx vc;
1173 size_t typ_size; 1179 size_t typ_size;
1174 int ret; 1180 int ret, c = 0;
1175 1181
1176 mutex_lock(&vq->mutex); 1182 mutex_lock(&vq->mutex);
1177 /* 1183 /*
@@ -1185,7 +1191,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1185 1191
1186 vhost_disable_notify(&vs->dev, vq); 1192 vhost_disable_notify(&vs->dev, vq);
1187 1193
1188 for (;;) { 1194 do {
1189 ret = vhost_scsi_get_desc(vs, vq, &vc); 1195 ret = vhost_scsi_get_desc(vs, vq, &vc);
1190 if (ret) 1196 if (ret)
1191 goto err; 1197 goto err;
@@ -1264,7 +1270,7 @@ err:
1264 break; 1270 break;
1265 else if (ret == -EIO) 1271 else if (ret == -EIO)
1266 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out); 1272 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1267 } 1273 } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1268out: 1274out:
1269 mutex_unlock(&vq->mutex); 1275 mutex_unlock(&vq->mutex);
1270} 1276}
@@ -1621,7 +1627,8 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
1621 vqs[i] = &vs->vqs[i].vq; 1627 vqs[i] = &vs->vqs[i].vq;
1622 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; 1628 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1623 } 1629 }
1624 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV); 1630 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
1631 VHOST_SCSI_WEIGHT, 0);
1625 1632
1626 vhost_scsi_init_inflight(vs, NULL); 1633 vhost_scsi_init_inflight(vs, NULL);
1627 1634
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 1e3ed41ae1f3..3f3eac4bcc58 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -413,8 +413,24 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
413 vhost_vq_free_iovecs(dev->vqs[i]); 413 vhost_vq_free_iovecs(dev->vqs[i]);
414} 414}
415 415
416bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
417 int pkts, int total_len)
418{
419 struct vhost_dev *dev = vq->dev;
420
421 if ((dev->byte_weight && total_len >= dev->byte_weight) ||
422 pkts >= dev->weight) {
423 vhost_poll_queue(&vq->poll);
424 return true;
425 }
426
427 return false;
428}
429EXPORT_SYMBOL_GPL(vhost_exceeds_weight);
430
416void vhost_dev_init(struct vhost_dev *dev, 431void vhost_dev_init(struct vhost_dev *dev,
417 struct vhost_virtqueue **vqs, int nvqs, int iov_limit) 432 struct vhost_virtqueue **vqs, int nvqs,
433 int iov_limit, int weight, int byte_weight)
418{ 434{
419 struct vhost_virtqueue *vq; 435 struct vhost_virtqueue *vq;
420 int i; 436 int i;
@@ -428,6 +444,8 @@ void vhost_dev_init(struct vhost_dev *dev,
428 dev->mm = NULL; 444 dev->mm = NULL;
429 dev->worker = NULL; 445 dev->worker = NULL;
430 dev->iov_limit = iov_limit; 446 dev->iov_limit = iov_limit;
447 dev->weight = weight;
448 dev->byte_weight = byte_weight;
431 init_llist_head(&dev->work_list); 449 init_llist_head(&dev->work_list);
432 init_waitqueue_head(&dev->wait); 450 init_waitqueue_head(&dev->wait);
433 INIT_LIST_HEAD(&dev->read_list); 451 INIT_LIST_HEAD(&dev->read_list);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 9490e7ddb340..27a78a9b8cc7 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -171,10 +171,13 @@ struct vhost_dev {
171 struct list_head pending_list; 171 struct list_head pending_list;
172 wait_queue_head_t wait; 172 wait_queue_head_t wait;
173 int iov_limit; 173 int iov_limit;
174 int weight;
175 int byte_weight;
174}; 176};
175 177
178bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
176void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, 179void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
177 int nvqs, int iov_limit); 180 int nvqs, int iov_limit, int weight, int byte_weight);
178long vhost_dev_set_owner(struct vhost_dev *dev); 181long vhost_dev_set_owner(struct vhost_dev *dev);
179bool vhost_dev_has_owner(struct vhost_dev *dev); 182bool vhost_dev_has_owner(struct vhost_dev *dev);
180long vhost_dev_check_owner(struct vhost_dev *); 183long vhost_dev_check_owner(struct vhost_dev *);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index bb5fc0e9fbc2..814bed72d793 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -21,6 +21,14 @@
21#include "vhost.h" 21#include "vhost.h"
22 22
23#define VHOST_VSOCK_DEFAULT_HOST_CID 2 23#define VHOST_VSOCK_DEFAULT_HOST_CID 2
24/* Max number of bytes transferred before requeueing the job.
25 * Using this limit prevents one virtqueue from starving others. */
26#define VHOST_VSOCK_WEIGHT 0x80000
27/* Max number of packets transferred before requeueing the job.
28 * Using this limit prevents one virtqueue from starving others with
29 * small pkts.
30 */
31#define VHOST_VSOCK_PKT_WEIGHT 256
24 32
25enum { 33enum {
26 VHOST_VSOCK_FEATURES = VHOST_FEATURES, 34 VHOST_VSOCK_FEATURES = VHOST_FEATURES,
@@ -78,6 +86,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
78 struct vhost_virtqueue *vq) 86 struct vhost_virtqueue *vq)
79{ 87{
80 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; 88 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
89 int pkts = 0, total_len = 0;
81 bool added = false; 90 bool added = false;
82 bool restart_tx = false; 91 bool restart_tx = false;
83 92
@@ -89,7 +98,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
89 /* Avoid further vmexits, we're already processing the virtqueue */ 98 /* Avoid further vmexits, we're already processing the virtqueue */
90 vhost_disable_notify(&vsock->dev, vq); 99 vhost_disable_notify(&vsock->dev, vq);
91 100
92 for (;;) { 101 do {
93 struct virtio_vsock_pkt *pkt; 102 struct virtio_vsock_pkt *pkt;
94 struct iov_iter iov_iter; 103 struct iov_iter iov_iter;
95 unsigned out, in; 104 unsigned out, in;
@@ -174,8 +183,9 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
174 */ 183 */
175 virtio_transport_deliver_tap_pkt(pkt); 184 virtio_transport_deliver_tap_pkt(pkt);
176 185
186 total_len += pkt->len;
177 virtio_transport_free_pkt(pkt); 187 virtio_transport_free_pkt(pkt);
178 } 188 } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
179 if (added) 189 if (added)
180 vhost_signal(&vsock->dev, vq); 190 vhost_signal(&vsock->dev, vq);
181 191
@@ -350,7 +360,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
350 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock, 360 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
351 dev); 361 dev);
352 struct virtio_vsock_pkt *pkt; 362 struct virtio_vsock_pkt *pkt;
353 int head; 363 int head, pkts = 0, total_len = 0;
354 unsigned int out, in; 364 unsigned int out, in;
355 bool added = false; 365 bool added = false;
356 366
@@ -360,7 +370,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
360 goto out; 370 goto out;
361 371
362 vhost_disable_notify(&vsock->dev, vq); 372 vhost_disable_notify(&vsock->dev, vq);
363 for (;;) { 373 do {
364 u32 len; 374 u32 len;
365 375
366 if (!vhost_vsock_more_replies(vsock)) { 376 if (!vhost_vsock_more_replies(vsock)) {
@@ -401,9 +411,11 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
401 else 411 else
402 virtio_transport_free_pkt(pkt); 412 virtio_transport_free_pkt(pkt);
403 413
404 vhost_add_used(vq, head, sizeof(pkt->hdr) + len); 414 len += sizeof(pkt->hdr);
415 vhost_add_used(vq, head, len);
416 total_len += len;
405 added = true; 417 added = true;
406 } 418 } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
407 419
408no_more_replies: 420no_more_replies:
409 if (added) 421 if (added)
@@ -531,7 +543,9 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
531 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick; 543 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
532 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick; 544 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
533 545
534 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), UIO_MAXIOV); 546 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
547 UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
548 VHOST_VSOCK_WEIGHT);
535 549
536 file->private_data = vsock; 550 file->private_data = vsock;
537 spin_lock_init(&vsock->send_pkt_list_lock); 551 spin_lock_init(&vsock->send_pkt_list_lock);
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 9aea44ed54c7..023fc3bc01c6 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -63,12 +63,12 @@ config VIRTIO_INPUT
63 63
64 If unsure, say M. 64 If unsure, say M.
65 65
66 config VIRTIO_MMIO 66config VIRTIO_MMIO
67 tristate "Platform bus driver for memory mapped virtio devices" 67 tristate "Platform bus driver for memory mapped virtio devices"
68 depends on HAS_IOMEM && HAS_DMA 68 depends on HAS_IOMEM && HAS_DMA
69 select VIRTIO 69 select VIRTIO
70 ---help--- 70 ---help---
71 This drivers provides support for memory mapped virtio 71 This drivers provides support for memory mapped virtio
72 platform device driver. 72 platform device driver.
73 73
74 If unsure, say N. 74 If unsure, say N.
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
index 7ef45a4a3cba..6683b4a70b05 100644
--- a/tools/virtio/linux/kernel.h
+++ b/tools/virtio/linux/kernel.h
@@ -127,7 +127,7 @@ static inline void free_page(unsigned long addr)
127#define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__) 127#define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
128#define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__) 128#define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
129 129
130#define WARN_ON_ONCE(cond) ((cond) ? fprintf (stderr, "WARNING\n") : 0) 130#define WARN_ON_ONCE(cond) (unlikely(cond) ? fprintf (stderr, "WARNING\n") : 0)
131 131
132#define min(x, y) ({ \ 132#define min(x, y) ({ \
133 typeof(x) _min1 = (x); \ 133 typeof(x) _min1 = (x); \