aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2012-12-06 10:00:18 -0500
committerMichael S. Tsirkin <mst@redhat.com>2012-12-06 10:09:18 -0500
commitcedb9bdce099206290a2bdd02ce47a7b253b6a84 (patch)
treee30b69a7b94c6fe30272549f3c3ad6faf10bd37f /drivers
parent1280c27f8e29acf4af2da914e80ec27c3dbd5c01 (diff)
vhost-net: skip head management if no outstanding
For short packets zerocopy mode adds overhead of managing heads which isn't necessary: we could simly update used ring directly same as with zerocopy disabled. Things seem to run a bit faster if we detect and bypass head management when zcopy isn't used. Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/vhost/net.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 6a86deb39a72..aa76ca72606a 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -241,7 +241,7 @@ static void handle_tx(struct vhost_net *net)
241 size_t hdr_size; 241 size_t hdr_size;
242 struct socket *sock; 242 struct socket *sock;
243 struct vhost_ubuf_ref *uninitialized_var(ubufs); 243 struct vhost_ubuf_ref *uninitialized_var(ubufs);
244 bool zcopy; 244 bool zcopy, zcopy_used;
245 245
246 /* TODO: check that we are running from vhost_worker? */ 246 /* TODO: check that we are running from vhost_worker? */
247 sock = rcu_dereference_check(vq->private_data, 1); 247 sock = rcu_dereference_check(vq->private_data, 1);
@@ -319,8 +319,11 @@ static void handle_tx(struct vhost_net *net)
319 iov_length(vq->hdr, s), hdr_size); 319 iov_length(vq->hdr, s), hdr_size);
320 break; 320 break;
321 } 321 }
322 zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN ||
323 vq->upend_idx != vq->done_idx);
324
322 /* use msg_control to pass vhost zerocopy ubuf info to skb */ 325 /* use msg_control to pass vhost zerocopy ubuf info to skb */
323 if (zcopy) { 326 if (zcopy_used) {
324 vq->heads[vq->upend_idx].id = head; 327 vq->heads[vq->upend_idx].id = head;
325 if (!vhost_net_tx_select_zcopy(net) || 328 if (!vhost_net_tx_select_zcopy(net) ||
326 len < VHOST_GOODCOPY_LEN) { 329 len < VHOST_GOODCOPY_LEN) {
@@ -348,7 +351,7 @@ static void handle_tx(struct vhost_net *net)
348 /* TODO: Check specific error and bomb out unless ENOBUFS? */ 351 /* TODO: Check specific error and bomb out unless ENOBUFS? */
349 err = sock->ops->sendmsg(NULL, sock, &msg, len); 352 err = sock->ops->sendmsg(NULL, sock, &msg, len);
350 if (unlikely(err < 0)) { 353 if (unlikely(err < 0)) {
351 if (zcopy) { 354 if (zcopy_used) {
352 if (ubufs) 355 if (ubufs)
353 vhost_ubuf_put(ubufs); 356 vhost_ubuf_put(ubufs);
354 vq->upend_idx = ((unsigned)vq->upend_idx - 1) % 357 vq->upend_idx = ((unsigned)vq->upend_idx - 1) %
@@ -362,7 +365,7 @@ static void handle_tx(struct vhost_net *net)
362 if (err != len) 365 if (err != len)
363 pr_debug("Truncated TX packet: " 366 pr_debug("Truncated TX packet: "
364 " len %d != %zd\n", err, len); 367 " len %d != %zd\n", err, len);
365 if (!zcopy) 368 if (!zcopy_used)
366 vhost_add_used_and_signal(&net->dev, vq, head, 0); 369 vhost_add_used_and_signal(&net->dev, vq, head, 0);
367 else 370 else
368 vhost_zerocopy_signal_used(net, vq); 371 vhost_zerocopy_signal_used(net, vq);