aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vhost
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-12-14 14:33:23 -0500
committerDavid S. Miller <davem@davemloft.net>2010-12-14 14:33:23 -0500
commit9fe146aef44afe5ec677d8150b6ae94e09b773f7 (patch)
tree25342064d136f582e57c6c2ebf4ec62dc8c71576 /drivers/vhost
parent6389aa73ab8c15084fce18307a8e198eaff818da (diff)
parent4e53f78e5b06c073a5c10814c72e98c1ca8a9f10 (diff)
Merge branch 'vhost-net-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Diffstat (limited to 'drivers/vhost')
-rw-r--r--drivers/vhost/net.c9
-rw-r--r--drivers/vhost/test.c320
-rw-r--r--drivers/vhost/test.h7
-rw-r--r--drivers/vhost/vhost.c44
-rw-r--r--drivers/vhost/vhost.h2
5 files changed, 352 insertions, 30 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index f442668a1e52..9b3ca103135f 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -10,7 +10,6 @@
10#include <linux/eventfd.h> 10#include <linux/eventfd.h>
11#include <linux/vhost.h> 11#include <linux/vhost.h>
12#include <linux/virtio_net.h> 12#include <linux/virtio_net.h>
13#include <linux/mmu_context.h>
14#include <linux/miscdevice.h> 13#include <linux/miscdevice.h>
15#include <linux/module.h> 14#include <linux/module.h>
16#include <linux/mutex.h> 15#include <linux/mutex.h>
@@ -143,7 +142,6 @@ static void handle_tx(struct vhost_net *net)
143 return; 142 return;
144 } 143 }
145 144
146 use_mm(net->dev.mm);
147 mutex_lock(&vq->mutex); 145 mutex_lock(&vq->mutex);
148 vhost_disable_notify(vq); 146 vhost_disable_notify(vq);
149 147
@@ -208,7 +206,6 @@ static void handle_tx(struct vhost_net *net)
208 } 206 }
209 207
210 mutex_unlock(&vq->mutex); 208 mutex_unlock(&vq->mutex);
211 unuse_mm(net->dev.mm);
212} 209}
213 210
214static int peek_head_len(struct sock *sk) 211static int peek_head_len(struct sock *sk)
@@ -313,7 +310,6 @@ static void handle_rx_big(struct vhost_net *net)
313 if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue)) 310 if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
314 return; 311 return;
315 312
316 use_mm(net->dev.mm);
317 mutex_lock(&vq->mutex); 313 mutex_lock(&vq->mutex);
318 vhost_disable_notify(vq); 314 vhost_disable_notify(vq);
319 hdr_size = vq->vhost_hlen; 315 hdr_size = vq->vhost_hlen;
@@ -392,7 +388,6 @@ static void handle_rx_big(struct vhost_net *net)
392 } 388 }
393 389
394 mutex_unlock(&vq->mutex); 390 mutex_unlock(&vq->mutex);
395 unuse_mm(net->dev.mm);
396} 391}
397 392
398/* Expects to be always run from workqueue - which acts as 393/* Expects to be always run from workqueue - which acts as
@@ -424,7 +419,6 @@ static void handle_rx_mergeable(struct vhost_net *net)
424 if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue)) 419 if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
425 return; 420 return;
426 421
427 use_mm(net->dev.mm);
428 mutex_lock(&vq->mutex); 422 mutex_lock(&vq->mutex);
429 vhost_disable_notify(vq); 423 vhost_disable_notify(vq);
430 vhost_hlen = vq->vhost_hlen; 424 vhost_hlen = vq->vhost_hlen;
@@ -459,7 +453,7 @@ static void handle_rx_mergeable(struct vhost_net *net)
459 move_iovec_hdr(vq->iov, vq->hdr, vhost_hlen, in); 453 move_iovec_hdr(vq->iov, vq->hdr, vhost_hlen, in);
460 else 454 else
461 /* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF: 455 /* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF:
462 * needed because sendmsg can modify msg_iov. */ 456 * needed because recvmsg can modify msg_iov. */
463 copy_iovec_hdr(vq->iov, vq->hdr, sock_hlen, in); 457 copy_iovec_hdr(vq->iov, vq->hdr, sock_hlen, in);
464 msg.msg_iovlen = in; 458 msg.msg_iovlen = in;
465 err = sock->ops->recvmsg(NULL, sock, &msg, 459 err = sock->ops->recvmsg(NULL, sock, &msg,
@@ -501,7 +495,6 @@ static void handle_rx_mergeable(struct vhost_net *net)
501 } 495 }
502 496
503 mutex_unlock(&vq->mutex); 497 mutex_unlock(&vq->mutex);
504 unuse_mm(net->dev.mm);
505} 498}
506 499
507static void handle_rx(struct vhost_net *net) 500static void handle_rx(struct vhost_net *net)
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
new file mode 100644
index 000000000000..099f30230d06
--- /dev/null
+++ b/drivers/vhost/test.c
@@ -0,0 +1,320 @@
1/* Copyright (C) 2009 Red Hat, Inc.
2 * Author: Michael S. Tsirkin <mst@redhat.com>
3 *
4 * This work is licensed under the terms of the GNU GPL, version 2.
5 *
6 * test virtio server in host kernel.
7 */
8
9#include <linux/compat.h>
10#include <linux/eventfd.h>
11#include <linux/vhost.h>
12#include <linux/miscdevice.h>
13#include <linux/module.h>
14#include <linux/mutex.h>
15#include <linux/workqueue.h>
16#include <linux/rcupdate.h>
17#include <linux/file.h>
18#include <linux/slab.h>
19
20#include "test.h"
21#include "vhost.c"
22
23/* Max number of bytes transferred before requeueing the job.
24 * Using this limit prevents one virtqueue from starving others. */
25#define VHOST_TEST_WEIGHT 0x80000
26
27enum {
28 VHOST_TEST_VQ = 0,
29 VHOST_TEST_VQ_MAX = 1,
30};
31
32struct vhost_test {
33 struct vhost_dev dev;
34 struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
35};
36
37/* Expects to be always run from workqueue - which acts as
38 * read-size critical section for our kind of RCU. */
39static void handle_vq(struct vhost_test *n)
40{
41 struct vhost_virtqueue *vq = &n->dev.vqs[VHOST_TEST_VQ];
42 unsigned out, in;
43 int head;
44 size_t len, total_len = 0;
45 void *private;
46
47 private = rcu_dereference_check(vq->private_data, 1);
48 if (!private)
49 return;
50
51 mutex_lock(&vq->mutex);
52 vhost_disable_notify(vq);
53
54 for (;;) {
55 head = vhost_get_vq_desc(&n->dev, vq, vq->iov,
56 ARRAY_SIZE(vq->iov),
57 &out, &in,
58 NULL, NULL);
59 /* On error, stop handling until the next kick. */
60 if (unlikely(head < 0))
61 break;
62 /* Nothing new? Wait for eventfd to tell us they refilled. */
63 if (head == vq->num) {
64 if (unlikely(vhost_enable_notify(vq))) {
65 vhost_disable_notify(vq);
66 continue;
67 }
68 break;
69 }
70 if (in) {
71 vq_err(vq, "Unexpected descriptor format for TX: "
72 "out %d, int %d\n", out, in);
73 break;
74 }
75 len = iov_length(vq->iov, out);
76 /* Sanity check */
77 if (!len) {
78 vq_err(vq, "Unexpected 0 len for TX\n");
79 break;
80 }
81 vhost_add_used_and_signal(&n->dev, vq, head, 0);
82 total_len += len;
83 if (unlikely(total_len >= VHOST_TEST_WEIGHT)) {
84 vhost_poll_queue(&vq->poll);
85 break;
86 }
87 }
88
89 mutex_unlock(&vq->mutex);
90}
91
92static void handle_vq_kick(struct vhost_work *work)
93{
94 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
95 poll.work);
96 struct vhost_test *n = container_of(vq->dev, struct vhost_test, dev);
97
98 handle_vq(n);
99}
100
101static int vhost_test_open(struct inode *inode, struct file *f)
102{
103 struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
104 struct vhost_dev *dev;
105 int r;
106
107 if (!n)
108 return -ENOMEM;
109
110 dev = &n->dev;
111 n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
112 r = vhost_dev_init(dev, n->vqs, VHOST_TEST_VQ_MAX);
113 if (r < 0) {
114 kfree(n);
115 return r;
116 }
117
118 f->private_data = n;
119
120 return 0;
121}
122
123static void *vhost_test_stop_vq(struct vhost_test *n,
124 struct vhost_virtqueue *vq)
125{
126 void *private;
127
128 mutex_lock(&vq->mutex);
129 private = rcu_dereference_protected(vq->private_data,
130 lockdep_is_held(&vq->mutex));
131 rcu_assign_pointer(vq->private_data, NULL);
132 mutex_unlock(&vq->mutex);
133 return private;
134}
135
136static void vhost_test_stop(struct vhost_test *n, void **privatep)
137{
138 *privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ);
139}
140
141static void vhost_test_flush_vq(struct vhost_test *n, int index)
142{
143 vhost_poll_flush(&n->dev.vqs[index].poll);
144}
145
146static void vhost_test_flush(struct vhost_test *n)
147{
148 vhost_test_flush_vq(n, VHOST_TEST_VQ);
149}
150
151static int vhost_test_release(struct inode *inode, struct file *f)
152{
153 struct vhost_test *n = f->private_data;
154 void *private;
155
156 vhost_test_stop(n, &private);
157 vhost_test_flush(n);
158 vhost_dev_cleanup(&n->dev);
159 /* We do an extra flush before freeing memory,
160 * since jobs can re-queue themselves. */
161 vhost_test_flush(n);
162 kfree(n);
163 return 0;
164}
165
166static long vhost_test_run(struct vhost_test *n, int test)
167{
168 void *priv, *oldpriv;
169 struct vhost_virtqueue *vq;
170 int r, index;
171
172 if (test < 0 || test > 1)
173 return -EINVAL;
174
175 mutex_lock(&n->dev.mutex);
176 r = vhost_dev_check_owner(&n->dev);
177 if (r)
178 goto err;
179
180 for (index = 0; index < n->dev.nvqs; ++index) {
181 /* Verify that ring has been setup correctly. */
182 if (!vhost_vq_access_ok(&n->vqs[index])) {
183 r = -EFAULT;
184 goto err;
185 }
186 }
187
188 for (index = 0; index < n->dev.nvqs; ++index) {
189 vq = n->vqs + index;
190 mutex_lock(&vq->mutex);
191 priv = test ? n : NULL;
192
193 /* start polling new socket */
194 oldpriv = rcu_dereference_protected(vq->private_data,
195 lockdep_is_held(&vq->mutex));
196 rcu_assign_pointer(vq->private_data, priv);
197
198 mutex_unlock(&vq->mutex);
199
200 if (oldpriv) {
201 vhost_test_flush_vq(n, index);
202 }
203 }
204
205 mutex_unlock(&n->dev.mutex);
206 return 0;
207
208err:
209 mutex_unlock(&n->dev.mutex);
210 return r;
211}
212
213static long vhost_test_reset_owner(struct vhost_test *n)
214{
215 void *priv = NULL;
216 long err;
217 mutex_lock(&n->dev.mutex);
218 err = vhost_dev_check_owner(&n->dev);
219 if (err)
220 goto done;
221 vhost_test_stop(n, &priv);
222 vhost_test_flush(n);
223 err = vhost_dev_reset_owner(&n->dev);
224done:
225 mutex_unlock(&n->dev.mutex);
226 return err;
227}
228
229static int vhost_test_set_features(struct vhost_test *n, u64 features)
230{
231 mutex_lock(&n->dev.mutex);
232 if ((features & (1 << VHOST_F_LOG_ALL)) &&
233 !vhost_log_access_ok(&n->dev)) {
234 mutex_unlock(&n->dev.mutex);
235 return -EFAULT;
236 }
237 n->dev.acked_features = features;
238 smp_wmb();
239 vhost_test_flush(n);
240 mutex_unlock(&n->dev.mutex);
241 return 0;
242}
243
244static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
245 unsigned long arg)
246{
247 struct vhost_test *n = f->private_data;
248 void __user *argp = (void __user *)arg;
249 u64 __user *featurep = argp;
250 int test;
251 u64 features;
252 int r;
253 switch (ioctl) {
254 case VHOST_TEST_RUN:
255 if (copy_from_user(&test, argp, sizeof test))
256 return -EFAULT;
257 return vhost_test_run(n, test);
258 case VHOST_GET_FEATURES:
259 features = VHOST_FEATURES;
260 if (copy_to_user(featurep, &features, sizeof features))
261 return -EFAULT;
262 return 0;
263 case VHOST_SET_FEATURES:
264 if (copy_from_user(&features, featurep, sizeof features))
265 return -EFAULT;
266 if (features & ~VHOST_FEATURES)
267 return -EOPNOTSUPP;
268 return vhost_test_set_features(n, features);
269 case VHOST_RESET_OWNER:
270 return vhost_test_reset_owner(n);
271 default:
272 mutex_lock(&n->dev.mutex);
273 r = vhost_dev_ioctl(&n->dev, ioctl, arg);
274 vhost_test_flush(n);
275 mutex_unlock(&n->dev.mutex);
276 return r;
277 }
278}
279
280#ifdef CONFIG_COMPAT
281static long vhost_test_compat_ioctl(struct file *f, unsigned int ioctl,
282 unsigned long arg)
283{
284 return vhost_test_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
285}
286#endif
287
288static const struct file_operations vhost_test_fops = {
289 .owner = THIS_MODULE,
290 .release = vhost_test_release,
291 .unlocked_ioctl = vhost_test_ioctl,
292#ifdef CONFIG_COMPAT
293 .compat_ioctl = vhost_test_compat_ioctl,
294#endif
295 .open = vhost_test_open,
296 .llseek = noop_llseek,
297};
298
299static struct miscdevice vhost_test_misc = {
300 MISC_DYNAMIC_MINOR,
301 "vhost-test",
302 &vhost_test_fops,
303};
304
305static int vhost_test_init(void)
306{
307 return misc_register(&vhost_test_misc);
308}
309module_init(vhost_test_init);
310
311static void vhost_test_exit(void)
312{
313 misc_deregister(&vhost_test_misc);
314}
315module_exit(vhost_test_exit);
316
317MODULE_VERSION("0.0.1");
318MODULE_LICENSE("GPL v2");
319MODULE_AUTHOR("Michael S. Tsirkin");
320MODULE_DESCRIPTION("Host kernel side for virtio simulator");
diff --git a/drivers/vhost/test.h b/drivers/vhost/test.h
new file mode 100644
index 000000000000..1fef5df82153
--- /dev/null
+++ b/drivers/vhost/test.h
@@ -0,0 +1,7 @@
1#ifndef LINUX_VHOST_TEST_H
2#define LINUX_VHOST_TEST_H
3
4/* Start a given test on the virtio null device. 0 stops all tests. */
5#define VHOST_TEST_RUN _IOW(VHOST_VIRTIO, 0x31, int)
6
7#endif
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 94701ff3a23a..38244f59cdd9 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -15,6 +15,7 @@
15#include <linux/vhost.h> 15#include <linux/vhost.h>
16#include <linux/virtio_net.h> 16#include <linux/virtio_net.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/mmu_context.h>
18#include <linux/miscdevice.h> 19#include <linux/miscdevice.h>
19#include <linux/mutex.h> 20#include <linux/mutex.h>
20#include <linux/rcupdate.h> 21#include <linux/rcupdate.h>
@@ -29,8 +30,6 @@
29#include <linux/if_packet.h> 30#include <linux/if_packet.h>
30#include <linux/if_arp.h> 31#include <linux/if_arp.h>
31 32
32#include <net/sock.h>
33
34#include "vhost.h" 33#include "vhost.h"
35 34
36enum { 35enum {
@@ -157,7 +156,6 @@ static void vhost_vq_reset(struct vhost_dev *dev,
157 vq->avail_idx = 0; 156 vq->avail_idx = 0;
158 vq->last_used_idx = 0; 157 vq->last_used_idx = 0;
159 vq->used_flags = 0; 158 vq->used_flags = 0;
160 vq->used_flags = 0;
161 vq->log_used = false; 159 vq->log_used = false;
162 vq->log_addr = -1ull; 160 vq->log_addr = -1ull;
163 vq->vhost_hlen = 0; 161 vq->vhost_hlen = 0;
@@ -178,6 +176,8 @@ static int vhost_worker(void *data)
178 struct vhost_work *work = NULL; 176 struct vhost_work *work = NULL;
179 unsigned uninitialized_var(seq); 177 unsigned uninitialized_var(seq);
180 178
179 use_mm(dev->mm);
180
181 for (;;) { 181 for (;;) {
182 /* mb paired w/ kthread_stop */ 182 /* mb paired w/ kthread_stop */
183 set_current_state(TASK_INTERRUPTIBLE); 183 set_current_state(TASK_INTERRUPTIBLE);
@@ -192,7 +192,7 @@ static int vhost_worker(void *data)
192 if (kthread_should_stop()) { 192 if (kthread_should_stop()) {
193 spin_unlock_irq(&dev->work_lock); 193 spin_unlock_irq(&dev->work_lock);
194 __set_current_state(TASK_RUNNING); 194 __set_current_state(TASK_RUNNING);
195 return 0; 195 break;
196 } 196 }
197 if (!list_empty(&dev->work_list)) { 197 if (!list_empty(&dev->work_list)) {
198 work = list_first_entry(&dev->work_list, 198 work = list_first_entry(&dev->work_list,
@@ -210,6 +210,8 @@ static int vhost_worker(void *data)
210 schedule(); 210 schedule();
211 211
212 } 212 }
213 unuse_mm(dev->mm);
214 return 0;
213} 215}
214 216
215/* Helper to allocate iovec buffers for all vqs. */ 217/* Helper to allocate iovec buffers for all vqs. */
@@ -402,15 +404,14 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
402 kfree(rcu_dereference_protected(dev->memory, 404 kfree(rcu_dereference_protected(dev->memory,
403 lockdep_is_held(&dev->mutex))); 405 lockdep_is_held(&dev->mutex)));
404 RCU_INIT_POINTER(dev->memory, NULL); 406 RCU_INIT_POINTER(dev->memory, NULL);
405 if (dev->mm)
406 mmput(dev->mm);
407 dev->mm = NULL;
408
409 WARN_ON(!list_empty(&dev->work_list)); 407 WARN_ON(!list_empty(&dev->work_list));
410 if (dev->worker) { 408 if (dev->worker) {
411 kthread_stop(dev->worker); 409 kthread_stop(dev->worker);
412 dev->worker = NULL; 410 dev->worker = NULL;
413 } 411 }
412 if (dev->mm)
413 mmput(dev->mm);
414 dev->mm = NULL;
414} 415}
415 416
416static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) 417static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
@@ -881,14 +882,15 @@ static int set_bit_to_user(int nr, void __user *addr)
881static int log_write(void __user *log_base, 882static int log_write(void __user *log_base,
882 u64 write_address, u64 write_length) 883 u64 write_address, u64 write_length)
883{ 884{
885 u64 write_page = write_address / VHOST_PAGE_SIZE;
884 int r; 886 int r;
885 if (!write_length) 887 if (!write_length)
886 return 0; 888 return 0;
887 write_address /= VHOST_PAGE_SIZE; 889 write_length += write_address % VHOST_PAGE_SIZE;
888 for (;;) { 890 for (;;) {
889 u64 base = (u64)(unsigned long)log_base; 891 u64 base = (u64)(unsigned long)log_base;
890 u64 log = base + write_address / 8; 892 u64 log = base + write_page / 8;
891 int bit = write_address % 8; 893 int bit = write_page % 8;
892 if ((u64)(unsigned long)log != log) 894 if ((u64)(unsigned long)log != log)
893 return -EFAULT; 895 return -EFAULT;
894 r = set_bit_to_user(bit, (void __user *)(unsigned long)log); 896 r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
@@ -897,7 +899,7 @@ static int log_write(void __user *log_base,
897 if (write_length <= VHOST_PAGE_SIZE) 899 if (write_length <= VHOST_PAGE_SIZE)
898 break; 900 break;
899 write_length -= VHOST_PAGE_SIZE; 901 write_length -= VHOST_PAGE_SIZE;
900 write_address += VHOST_PAGE_SIZE; 902 write_page += 1;
901 } 903 }
902 return r; 904 return r;
903} 905}
@@ -1092,7 +1094,7 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1092 1094
1093 /* Check it isn't doing very strange things with descriptor numbers. */ 1095 /* Check it isn't doing very strange things with descriptor numbers. */
1094 last_avail_idx = vq->last_avail_idx; 1096 last_avail_idx = vq->last_avail_idx;
1095 if (unlikely(get_user(vq->avail_idx, &vq->avail->idx))) { 1097 if (unlikely(__get_user(vq->avail_idx, &vq->avail->idx))) {
1096 vq_err(vq, "Failed to access avail idx at %p\n", 1098 vq_err(vq, "Failed to access avail idx at %p\n",
1097 &vq->avail->idx); 1099 &vq->avail->idx);
1098 return -EFAULT; 1100 return -EFAULT;
@@ -1113,8 +1115,8 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1113 1115
1114 /* Grab the next descriptor number they're advertising, and increment 1116 /* Grab the next descriptor number they're advertising, and increment
1115 * the index we've seen. */ 1117 * the index we've seen. */
1116 if (unlikely(get_user(head, 1118 if (unlikely(__get_user(head,
1117 &vq->avail->ring[last_avail_idx % vq->num]))) { 1119 &vq->avail->ring[last_avail_idx % vq->num]))) {
1118 vq_err(vq, "Failed to read head: idx %d address %p\n", 1120 vq_err(vq, "Failed to read head: idx %d address %p\n",
1119 last_avail_idx, 1121 last_avail_idx,
1120 &vq->avail->ring[last_avail_idx % vq->num]); 1122 &vq->avail->ring[last_avail_idx % vq->num]);
@@ -1213,17 +1215,17 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
1213 /* The virtqueue contains a ring of used buffers. Get a pointer to the 1215 /* The virtqueue contains a ring of used buffers. Get a pointer to the
1214 * next entry in that used ring. */ 1216 * next entry in that used ring. */
1215 used = &vq->used->ring[vq->last_used_idx % vq->num]; 1217 used = &vq->used->ring[vq->last_used_idx % vq->num];
1216 if (put_user(head, &used->id)) { 1218 if (__put_user(head, &used->id)) {
1217 vq_err(vq, "Failed to write used id"); 1219 vq_err(vq, "Failed to write used id");
1218 return -EFAULT; 1220 return -EFAULT;
1219 } 1221 }
1220 if (put_user(len, &used->len)) { 1222 if (__put_user(len, &used->len)) {
1221 vq_err(vq, "Failed to write used len"); 1223 vq_err(vq, "Failed to write used len");
1222 return -EFAULT; 1224 return -EFAULT;
1223 } 1225 }
1224 /* Make sure buffer is written before we update index. */ 1226 /* Make sure buffer is written before we update index. */
1225 smp_wmb(); 1227 smp_wmb();
1226 if (put_user(vq->last_used_idx + 1, &vq->used->idx)) { 1228 if (__put_user(vq->last_used_idx + 1, &vq->used->idx)) {
1227 vq_err(vq, "Failed to increment used idx"); 1229 vq_err(vq, "Failed to increment used idx");
1228 return -EFAULT; 1230 return -EFAULT;
1229 } 1231 }
@@ -1255,7 +1257,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
1255 1257
1256 start = vq->last_used_idx % vq->num; 1258 start = vq->last_used_idx % vq->num;
1257 used = vq->used->ring + start; 1259 used = vq->used->ring + start;
1258 if (copy_to_user(used, heads, count * sizeof *used)) { 1260 if (__copy_to_user(used, heads, count * sizeof *used)) {
1259 vq_err(vq, "Failed to write used"); 1261 vq_err(vq, "Failed to write used");
1260 return -EFAULT; 1262 return -EFAULT;
1261 } 1263 }
@@ -1316,7 +1318,7 @@ void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1316 * interrupts. */ 1318 * interrupts. */
1317 smp_mb(); 1319 smp_mb();
1318 1320
1319 if (get_user(flags, &vq->avail->flags)) { 1321 if (__get_user(flags, &vq->avail->flags)) {
1320 vq_err(vq, "Failed to get flags"); 1322 vq_err(vq, "Failed to get flags");
1321 return; 1323 return;
1322 } 1324 }
@@ -1367,7 +1369,7 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
1367 /* They could have slipped one in as we were doing that: make 1369 /* They could have slipped one in as we were doing that: make
1368 * sure it's written, then check again. */ 1370 * sure it's written, then check again. */
1369 smp_mb(); 1371 smp_mb();
1370 r = get_user(avail_idx, &vq->avail->idx); 1372 r = __get_user(avail_idx, &vq->avail->idx);
1371 if (r) { 1373 if (r) {
1372 vq_err(vq, "Failed to check avail idx at %p: %d\n", 1374 vq_err(vq, "Failed to check avail idx at %p: %d\n",
1373 &vq->avail->idx, r); 1375 &vq->avail->idx, r);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 073d06ae091f..2af44b7b1f3f 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -102,7 +102,7 @@ struct vhost_virtqueue {
102 * flush the vhost_work instead of synchronize_rcu. Therefore readers do 102 * flush the vhost_work instead of synchronize_rcu. Therefore readers do
103 * not need to call rcu_read_lock/rcu_read_unlock: the beginning of 103 * not need to call rcu_read_lock/rcu_read_unlock: the beginning of
104 * vhost_work execution acts instead of rcu_read_lock() and the end of 104 * vhost_work execution acts instead of rcu_read_lock() and the end of
105 * vhost_work execution acts instead of rcu_read_lock(). 105 * vhost_work execution acts instead of rcu_read_unlock().
106 * Writers use virtqueue mutex. */ 106 * Writers use virtqueue mutex. */
107 void __rcu *private_data; 107 void __rcu *private_data;
108 /* Log write descriptors */ 108 /* Log write descriptors */