diff options
author | David S. Miller <davem@davemloft.net> | 2018-06-03 09:31:58 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-06-03 09:31:58 -0400 |
commit | 9c54aeb03a6d65a5834bd92376e921cbac6dfb8f (patch) | |
tree | 70441095d58678711d68cfef4934765251425d1f /drivers/vhost | |
parent | eaf47b17a77fda841a1102d76c15161ee438b347 (diff) | |
parent | 918fe1b3157978ada4267468008c5f89ef101e7d (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Filling in the padding slot in the bpf structure as a bug fix in 'ne'
overlapped with actually using that padding area for something in
'net-next'.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/vhost')
-rw-r--r-- | drivers/vhost/net.c | 37 |
1 files changed, 24 insertions, 13 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index c4b49fca4871..e7cf7d21cfb5 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -108,7 +108,9 @@ struct vhost_net_virtqueue { | |||
108 | /* vhost zerocopy support fields below: */ | 108 | /* vhost zerocopy support fields below: */ |
109 | /* last used idx for outstanding DMA zerocopy buffers */ | 109 | /* last used idx for outstanding DMA zerocopy buffers */ |
110 | int upend_idx; | 110 | int upend_idx; |
111 | /* first used idx for DMA done zerocopy buffers */ | 111 | /* For TX, first used idx for DMA done zerocopy buffers |
112 | * For RX, number of batched heads | ||
113 | */ | ||
112 | int done_idx; | 114 | int done_idx; |
113 | /* an array of userspace buffers info */ | 115 | /* an array of userspace buffers info */ |
114 | struct ubuf_info *ubuf_info; | 116 | struct ubuf_info *ubuf_info; |
@@ -629,6 +631,18 @@ static int sk_has_rx_data(struct sock *sk) | |||
629 | return skb_queue_empty(&sk->sk_receive_queue); | 631 | return skb_queue_empty(&sk->sk_receive_queue); |
630 | } | 632 | } |
631 | 633 | ||
634 | static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq) | ||
635 | { | ||
636 | struct vhost_virtqueue *vq = &nvq->vq; | ||
637 | struct vhost_dev *dev = vq->dev; | ||
638 | |||
639 | if (!nvq->done_idx) | ||
640 | return; | ||
641 | |||
642 | vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx); | ||
643 | nvq->done_idx = 0; | ||
644 | } | ||
645 | |||
632 | static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) | 646 | static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) |
633 | { | 647 | { |
634 | struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX]; | 648 | struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX]; |
@@ -638,6 +652,8 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) | |||
638 | int len = peek_head_len(rvq, sk); | 652 | int len = peek_head_len(rvq, sk); |
639 | 653 | ||
640 | if (!len && vq->busyloop_timeout) { | 654 | if (!len && vq->busyloop_timeout) { |
655 | /* Flush batched heads first */ | ||
656 | vhost_rx_signal_used(rvq); | ||
641 | /* Both tx vq and rx socket were polled here */ | 657 | /* Both tx vq and rx socket were polled here */ |
642 | mutex_lock_nested(&vq->mutex, 1); | 658 | mutex_lock_nested(&vq->mutex, 1); |
643 | vhost_disable_notify(&net->dev, vq); | 659 | vhost_disable_notify(&net->dev, vq); |
@@ -765,7 +781,7 @@ static void handle_rx(struct vhost_net *net) | |||
765 | }; | 781 | }; |
766 | size_t total_len = 0; | 782 | size_t total_len = 0; |
767 | int err, mergeable; | 783 | int err, mergeable; |
768 | s16 headcount, nheads = 0; | 784 | s16 headcount; |
769 | size_t vhost_hlen, sock_hlen; | 785 | size_t vhost_hlen, sock_hlen; |
770 | size_t vhost_len, sock_len; | 786 | size_t vhost_len, sock_len; |
771 | struct socket *sock; | 787 | struct socket *sock; |
@@ -794,8 +810,8 @@ static void handle_rx(struct vhost_net *net) | |||
794 | while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) { | 810 | while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) { |
795 | sock_len += sock_hlen; | 811 | sock_len += sock_hlen; |
796 | vhost_len = sock_len + vhost_hlen; | 812 | vhost_len = sock_len + vhost_hlen; |
797 | headcount = get_rx_bufs(vq, vq->heads + nheads, vhost_len, | 813 | headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx, |
798 | &in, vq_log, &log, | 814 | vhost_len, &in, vq_log, &log, |
799 | likely(mergeable) ? UIO_MAXIOV : 1); | 815 | likely(mergeable) ? UIO_MAXIOV : 1); |
800 | /* On error, stop handling until the next kick. */ | 816 | /* On error, stop handling until the next kick. */ |
801 | if (unlikely(headcount < 0)) | 817 | if (unlikely(headcount < 0)) |
@@ -866,12 +882,9 @@ static void handle_rx(struct vhost_net *net) | |||
866 | vhost_discard_vq_desc(vq, headcount); | 882 | vhost_discard_vq_desc(vq, headcount); |
867 | goto out; | 883 | goto out; |
868 | } | 884 | } |
869 | nheads += headcount; | 885 | nvq->done_idx += headcount; |
870 | if (nheads > VHOST_RX_BATCH) { | 886 | if (nvq->done_idx > VHOST_RX_BATCH) |
871 | vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, | 887 | vhost_rx_signal_used(nvq); |
872 | nheads); | ||
873 | nheads = 0; | ||
874 | } | ||
875 | if (unlikely(vq_log)) | 888 | if (unlikely(vq_log)) |
876 | vhost_log_write(vq, vq_log, log, vhost_len); | 889 | vhost_log_write(vq, vq_log, log, vhost_len); |
877 | total_len += vhost_len; | 890 | total_len += vhost_len; |
@@ -883,9 +896,7 @@ static void handle_rx(struct vhost_net *net) | |||
883 | } | 896 | } |
884 | vhost_net_enable_vq(net, vq); | 897 | vhost_net_enable_vq(net, vq); |
885 | out: | 898 | out: |
886 | if (nheads) | 899 | vhost_rx_signal_used(nvq); |
887 | vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, | ||
888 | nheads); | ||
889 | mutex_unlock(&vq->mutex); | 900 | mutex_unlock(&vq->mutex); |
890 | } | 901 | } |
891 | 902 | ||