aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vhost
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2016-03-04 06:24:53 -0500
committerMichael S. Tsirkin <mst@redhat.com>2016-03-10 19:18:53 -0500
commit0308813724606549436d30efd877a80c8e00790e (patch)
tree9ce96f57528d75f21df24831ad35fca9fdc7a4d4 /drivers/vhost
parentd4a60603fa0b42012decfa058dfa44cffde7a10c (diff)
vhost_net: basic polling support
This patch tries to poll for new added tx buffer or socket receive queue for a while at the end of tx/rx processing. The maximum time spent on polling were specified through a new kind of vring ioctl. Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/vhost')
-rw-r--r--drivers/vhost/net.c78
-rw-r--r--drivers/vhost/vhost.c14
-rw-r--r--drivers/vhost/vhost.h1
3 files changed, 88 insertions, 5 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 7bd75ff8be26..f744eeb3e2b4 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -287,6 +287,43 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
287 rcu_read_unlock_bh(); 287 rcu_read_unlock_bh();
288} 288}
289 289
290static inline unsigned long busy_clock(void)
291{
292 return local_clock() >> 10;
293}
294
295static bool vhost_can_busy_poll(struct vhost_dev *dev,
296 unsigned long endtime)
297{
298 return likely(!need_resched()) &&
299 likely(!time_after(busy_clock(), endtime)) &&
300 likely(!signal_pending(current)) &&
301 !vhost_has_work(dev);
302}
303
304static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
305 struct vhost_virtqueue *vq,
306 struct iovec iov[], unsigned int iov_size,
307 unsigned int *out_num, unsigned int *in_num)
308{
309 unsigned long uninitialized_var(endtime);
310 int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
311 out_num, in_num, NULL, NULL);
312
313 if (r == vq->num && vq->busyloop_timeout) {
314 preempt_disable();
315 endtime = busy_clock() + vq->busyloop_timeout;
316 while (vhost_can_busy_poll(vq->dev, endtime) &&
317 vhost_vq_avail_empty(vq->dev, vq))
318 cpu_relax_lowlatency();
319 preempt_enable();
320 r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
321 out_num, in_num, NULL, NULL);
322 }
323
324 return r;
325}
326
290/* Expects to be always run from workqueue - which acts as 327/* Expects to be always run from workqueue - which acts as
291 * read-size critical section for our kind of RCU. */ 328 * read-size critical section for our kind of RCU. */
292static void handle_tx(struct vhost_net *net) 329static void handle_tx(struct vhost_net *net)
@@ -331,10 +368,9 @@ static void handle_tx(struct vhost_net *net)
331 % UIO_MAXIOV == nvq->done_idx)) 368 % UIO_MAXIOV == nvq->done_idx))
332 break; 369 break;
333 370
334 head = vhost_get_vq_desc(vq, vq->iov, 371 head = vhost_net_tx_get_vq_desc(net, vq, vq->iov,
335 ARRAY_SIZE(vq->iov), 372 ARRAY_SIZE(vq->iov),
336 &out, &in, 373 &out, &in);
337 NULL, NULL);
338 /* On error, stop handling until the next kick. */ 374 /* On error, stop handling until the next kick. */
339 if (unlikely(head < 0)) 375 if (unlikely(head < 0))
340 break; 376 break;
@@ -435,6 +471,38 @@ static int peek_head_len(struct sock *sk)
435 return len; 471 return len;
436} 472}
437 473
474static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
475{
476 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
477 struct vhost_virtqueue *vq = &nvq->vq;
478 unsigned long uninitialized_var(endtime);
479 int len = peek_head_len(sk);
480
481 if (!len && vq->busyloop_timeout) {
482 /* Both tx vq and rx socket were polled here */
483 mutex_lock(&vq->mutex);
484 vhost_disable_notify(&net->dev, vq);
485
486 preempt_disable();
487 endtime = busy_clock() + vq->busyloop_timeout;
488
489 while (vhost_can_busy_poll(&net->dev, endtime) &&
490 skb_queue_empty(&sk->sk_receive_queue) &&
491 vhost_vq_avail_empty(&net->dev, vq))
492 cpu_relax_lowlatency();
493
494 preempt_enable();
495
496 if (vhost_enable_notify(&net->dev, vq))
497 vhost_poll_queue(&vq->poll);
498 mutex_unlock(&vq->mutex);
499
500 len = peek_head_len(sk);
501 }
502
503 return len;
504}
505
438/* This is a multi-buffer version of vhost_get_desc, that works if 506/* This is a multi-buffer version of vhost_get_desc, that works if
439 * vq has read descriptors only. 507 * vq has read descriptors only.
440 * @vq - the relevant virtqueue 508 * @vq - the relevant virtqueue
@@ -553,7 +621,7 @@ static void handle_rx(struct vhost_net *net)
553 vq->log : NULL; 621 vq->log : NULL;
554 mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF); 622 mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
555 623
556 while ((sock_len = peek_head_len(sock->sk))) { 624 while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
557 sock_len += sock_hlen; 625 sock_len += sock_hlen;
558 vhost_len = sock_len + vhost_hlen; 626 vhost_len = sock_len + vhost_hlen;
559 headcount = get_rx_bufs(vq, vq->heads, vhost_len, 627 headcount = get_rx_bufs(vq, vq->heads, vhost_len,
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 97f26f0aab40..669fef1e2bb6 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -303,6 +303,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
303 vq->memory = NULL; 303 vq->memory = NULL;
304 vhost_reset_is_le(vq); 304 vhost_reset_is_le(vq);
305 vhost_disable_cross_endian(vq); 305 vhost_disable_cross_endian(vq);
306 vq->busyloop_timeout = 0;
306} 307}
307 308
308static int vhost_worker(void *data) 309static int vhost_worker(void *data)
@@ -937,6 +938,19 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
937 case VHOST_GET_VRING_ENDIAN: 938 case VHOST_GET_VRING_ENDIAN:
938 r = vhost_get_vring_endian(vq, idx, argp); 939 r = vhost_get_vring_endian(vq, idx, argp);
939 break; 940 break;
941 case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
942 if (copy_from_user(&s, argp, sizeof(s))) {
943 r = -EFAULT;
944 break;
945 }
946 vq->busyloop_timeout = s.num;
947 break;
948 case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
949 s.index = idx;
950 s.num = vq->busyloop_timeout;
951 if (copy_to_user(argp, &s, sizeof(s)))
952 r = -EFAULT;
953 break;
940 default: 954 default:
941 r = -ENOIOCTLCMD; 955 r = -ENOIOCTLCMD;
942 } 956 }
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index af5af773bf7a..d36d8beb3351 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -115,6 +115,7 @@ struct vhost_virtqueue {
115 /* Ring endianness requested by userspace for cross-endian support. */ 115 /* Ring endianness requested by userspace for cross-endian support. */
116 bool user_be; 116 bool user_be;
117#endif 117#endif
118 u32 busyloop_timeout;
118}; 119};
119 120
120struct vhost_dev { 121struct vhost_dev {