aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vhost
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2013-09-02 04:40:59 -0400
committerDavid S. Miller <davem@davemloft.net>2013-09-03 22:46:57 -0400
commitce21a02913dc79205485637b6e0927a4c800c4a4 (patch)
treec1a9325b2a7579c65a5accf8f05d3a9cb2f048ad /drivers/vhost
parentc49e4e573be86acd36c747511ea5dc76be122206 (diff)
vhost_net: determine whether or not to use zerocopy at one time
Currently, even if the packet length is smaller than VHOST_GOODCOPY_LEN, if upend_idx != done_idx we still set zcopy_used to true and rollback this choice later. This could be avoided by determining zerocopy once by checking all conditions at one time before. Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/vhost')
-rw-r--r--drivers/vhost/net.c47
1 files changed, 20 insertions, 27 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 8a6dd0d5667c..3f89dea297a3 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -404,43 +404,36 @@ static void handle_tx(struct vhost_net *net)
404 iov_length(nvq->hdr, s), hdr_size); 404 iov_length(nvq->hdr, s), hdr_size);
405 break; 405 break;
406 } 406 }
407 zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN || 407
408 nvq->upend_idx != nvq->done_idx); 408 zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
409 && (nvq->upend_idx + 1) % UIO_MAXIOV !=
410 nvq->done_idx
411 && vhost_net_tx_select_zcopy(net);
409 412
410 /* use msg_control to pass vhost zerocopy ubuf info to skb */ 413 /* use msg_control to pass vhost zerocopy ubuf info to skb */
411 if (zcopy_used) { 414 if (zcopy_used) {
415 struct ubuf_info *ubuf;
416 ubuf = nvq->ubuf_info + nvq->upend_idx;
417
412 vq->heads[nvq->upend_idx].id = head; 418 vq->heads[nvq->upend_idx].id = head;
413 if (!vhost_net_tx_select_zcopy(net) || 419 vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
414 len < VHOST_GOODCOPY_LEN) { 420 ubuf->callback = vhost_zerocopy_callback;
415 /* copy don't need to wait for DMA done */ 421 ubuf->ctx = nvq->ubufs;
416 vq->heads[nvq->upend_idx].len = 422 ubuf->desc = nvq->upend_idx;
417 VHOST_DMA_DONE_LEN; 423 msg.msg_control = ubuf;
418 msg.msg_control = NULL; 424 msg.msg_controllen = sizeof(ubuf);
419 msg.msg_controllen = 0; 425 ubufs = nvq->ubufs;
420 ubufs = NULL; 426 kref_get(&ubufs->kref);
421 } else {
422 struct ubuf_info *ubuf;
423 ubuf = nvq->ubuf_info + nvq->upend_idx;
424
425 vq->heads[nvq->upend_idx].len =
426 VHOST_DMA_IN_PROGRESS;
427 ubuf->callback = vhost_zerocopy_callback;
428 ubuf->ctx = nvq->ubufs;
429 ubuf->desc = nvq->upend_idx;
430 msg.msg_control = ubuf;
431 msg.msg_controllen = sizeof(ubuf);
432 ubufs = nvq->ubufs;
433 kref_get(&ubufs->kref);
434 }
435 nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; 427 nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
436 } else 428 } else {
437 msg.msg_control = NULL; 429 msg.msg_control = NULL;
430 ubufs = NULL;
431 }
438 /* TODO: Check specific error and bomb out unless ENOBUFS? */ 432 /* TODO: Check specific error and bomb out unless ENOBUFS? */
439 err = sock->ops->sendmsg(NULL, sock, &msg, len); 433 err = sock->ops->sendmsg(NULL, sock, &msg, len);
440 if (unlikely(err < 0)) { 434 if (unlikely(err < 0)) {
441 if (zcopy_used) { 435 if (zcopy_used) {
442 if (ubufs) 436 vhost_net_ubuf_put(ubufs);
443 vhost_net_ubuf_put(ubufs);
444 nvq->upend_idx = ((unsigned)nvq->upend_idx - 1) 437 nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
445 % UIO_MAXIOV; 438 % UIO_MAXIOV;
446 } 439 }