aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vhost/net.c
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2013-09-02 04:41:01 -0400
committerDavid S. Miller <davem@davemloft.net>2013-09-03 22:46:58 -0400
commitf7c6be404d8fa52c54ff931390aab01e5c7654d6 (patch)
treef2ff95e8a480a9d0a4442a2f8c1851432614a5fe /drivers/vhost/net.c
parent19c73b3e08d16ee923f3962df4abf6205127896a (diff)
vhost_net: correctly limit the max pending buffers
As Michael point out, We used to limit the max pending DMAs to get better cache utilization. But it was not done correctly since it was one done when there's no new buffers submitted from guest. Guest can easily exceeds the limitation by keeping sending packets. So this patch moves the check into main loop. Tests shows about 5%-10% improvement on per cpu throughput for guest tx. Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/vhost/net.c')
-rw-r--r--drivers/vhost/net.c18
1 files changed, 7 insertions, 11 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 8e9dc554b1ef..831eb4fd197d 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -363,6 +363,13 @@ static void handle_tx(struct vhost_net *net)
363 if (zcopy) 363 if (zcopy)
364 vhost_zerocopy_signal_used(net, vq); 364 vhost_zerocopy_signal_used(net, vq);
365 365
366 /* If more outstanding DMAs, queue the work.
367 * Handle upend_idx wrap around
368 */
369 if (unlikely((nvq->upend_idx + vq->num - VHOST_MAX_PEND)
370 % UIO_MAXIOV == nvq->done_idx))
371 break;
372
366 head = vhost_get_vq_desc(&net->dev, vq, vq->iov, 373 head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
367 ARRAY_SIZE(vq->iov), 374 ARRAY_SIZE(vq->iov),
368 &out, &in, 375 &out, &in,
@@ -372,17 +379,6 @@ static void handle_tx(struct vhost_net *net)
372 break; 379 break;
373 /* Nothing new? Wait for eventfd to tell us they refilled. */ 380 /* Nothing new? Wait for eventfd to tell us they refilled. */
374 if (head == vq->num) { 381 if (head == vq->num) {
375 int num_pends;
376
377 /* If more outstanding DMAs, queue the work.
378 * Handle upend_idx wrap around
379 */
380 num_pends = likely(nvq->upend_idx >= nvq->done_idx) ?
381 (nvq->upend_idx - nvq->done_idx) :
382 (nvq->upend_idx + UIO_MAXIOV -
383 nvq->done_idx);
384 if (unlikely(num_pends > VHOST_MAX_PEND))
385 break;
386 if (unlikely(vhost_enable_notify(&net->dev, vq))) { 382 if (unlikely(vhost_enable_notify(&net->dev, vq))) {
387 vhost_disable_notify(&net->dev, vq); 383 vhost_disable_notify(&net->dev, vq);
388 continue; 384 continue;