summaryrefslogtreecommitdiffstats
path: root/drivers/vhost/net.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/vhost/net.c')
-rw-r--r--drivers/vhost/net.c41
1 files changed, 14 insertions, 27 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index df51a35cf537..2d9df786a9d3 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -604,12 +604,6 @@ static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter,
604 return iov_iter_count(iter); 604 return iov_iter_count(iter);
605} 605}
606 606
607static bool vhost_exceeds_weight(int pkts, int total_len)
608{
609 return total_len >= VHOST_NET_WEIGHT ||
610 pkts >= VHOST_NET_PKT_WEIGHT;
611}
612
613static int get_tx_bufs(struct vhost_net *net, 607static int get_tx_bufs(struct vhost_net *net,
614 struct vhost_net_virtqueue *nvq, 608 struct vhost_net_virtqueue *nvq,
615 struct msghdr *msg, 609 struct msghdr *msg,
@@ -779,7 +773,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
779 int sent_pkts = 0; 773 int sent_pkts = 0;
780 bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX); 774 bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX);
781 775
782 for (;;) { 776 do {
783 bool busyloop_intr = false; 777 bool busyloop_intr = false;
784 778
785 if (nvq->done_idx == VHOST_NET_BATCH) 779 if (nvq->done_idx == VHOST_NET_BATCH)
@@ -845,11 +839,7 @@ done:
845 vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head); 839 vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
846 vq->heads[nvq->done_idx].len = 0; 840 vq->heads[nvq->done_idx].len = 0;
847 ++nvq->done_idx; 841 ++nvq->done_idx;
848 if (vhost_exceeds_weight(++sent_pkts, total_len)) { 842 } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
849 vhost_poll_queue(&vq->poll);
850 break;
851 }
852 }
853 843
854 vhost_tx_batch(net, nvq, sock, &msg); 844 vhost_tx_batch(net, nvq, sock, &msg);
855} 845}
@@ -874,7 +864,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
874 bool zcopy_used; 864 bool zcopy_used;
875 int sent_pkts = 0; 865 int sent_pkts = 0;
876 866
877 for (;;) { 867 do {
878 bool busyloop_intr; 868 bool busyloop_intr;
879 869
880 /* Release DMAs done buffers first */ 870 /* Release DMAs done buffers first */
@@ -951,11 +941,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
951 else 941 else
952 vhost_zerocopy_signal_used(net, vq); 942 vhost_zerocopy_signal_used(net, vq);
953 vhost_net_tx_packet(net); 943 vhost_net_tx_packet(net);
954 if (unlikely(vhost_exceeds_weight(++sent_pkts, total_len))) { 944 } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
955 vhost_poll_queue(&vq->poll);
956 break;
957 }
958 }
959} 945}
960 946
961/* Expects to be always run from workqueue - which acts as 947/* Expects to be always run from workqueue - which acts as
@@ -1153,8 +1139,11 @@ static void handle_rx(struct vhost_net *net)
1153 vq->log : NULL; 1139 vq->log : NULL;
1154 mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF); 1140 mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
1155 1141
1156 while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk, 1142 do {
1157 &busyloop_intr))) { 1143 sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
1144 &busyloop_intr);
1145 if (!sock_len)
1146 break;
1158 sock_len += sock_hlen; 1147 sock_len += sock_hlen;
1159 vhost_len = sock_len + vhost_hlen; 1148 vhost_len = sock_len + vhost_hlen;
1160 headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx, 1149 headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
@@ -1239,14 +1228,11 @@ static void handle_rx(struct vhost_net *net)
1239 vhost_log_write(vq, vq_log, log, vhost_len, 1228 vhost_log_write(vq, vq_log, log, vhost_len,
1240 vq->iov, in); 1229 vq->iov, in);
1241 total_len += vhost_len; 1230 total_len += vhost_len;
1242 if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) { 1231 } while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len)));
1243 vhost_poll_queue(&vq->poll); 1232
1244 goto out;
1245 }
1246 }
1247 if (unlikely(busyloop_intr)) 1233 if (unlikely(busyloop_intr))
1248 vhost_poll_queue(&vq->poll); 1234 vhost_poll_queue(&vq->poll);
1249 else 1235 else if (!sock_len)
1250 vhost_net_enable_vq(net, vq); 1236 vhost_net_enable_vq(net, vq);
1251out: 1237out:
1252 vhost_net_signal_used(nvq); 1238 vhost_net_signal_used(nvq);
@@ -1338,7 +1324,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
1338 vhost_net_buf_init(&n->vqs[i].rxq); 1324 vhost_net_buf_init(&n->vqs[i].rxq);
1339 } 1325 }
1340 vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX, 1326 vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
1341 UIO_MAXIOV + VHOST_NET_BATCH); 1327 UIO_MAXIOV + VHOST_NET_BATCH,
1328 VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT);
1342 1329
1343 vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev); 1330 vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
1344 vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev); 1331 vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);