aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vhost
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2014-12-11 18:10:49 -0500
committerMichael S. Tsirkin <mst@redhat.com>2014-12-15 16:49:28 -0500
commitb9f7ac8c72894c19bf258a54ecaa708df4ffbe80 (patch)
tree74d25a95bf300aed5f58c6a08a8faf561db13179 /drivers/vhost
parentb97a8a90067896f99f0d636dbc2b89a953123fad (diff)
vringh: update for virtio 1.0 APIs
When switching everything over to virtio 1.0 memory access APIs, I missed converting vringh. Fortunately, it's straight-forward. Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/vhost')
-rw-r--r--drivers/vhost/vringh.c121
1 files changed, 74 insertions, 47 deletions
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index ac3fe2757961..3bb02c60a2f5 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -11,6 +11,7 @@
11#include <linux/uaccess.h> 11#include <linux/uaccess.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/export.h> 13#include <linux/export.h>
14#include <uapi/linux/virtio_config.h>
14 15
15static __printf(1,2) __cold void vringh_bad(const char *fmt, ...) 16static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
16{ 17{
@@ -28,13 +29,14 @@ static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
28 29
29/* Returns vring->num if empty, -ve on error. */ 30/* Returns vring->num if empty, -ve on error. */
30static inline int __vringh_get_head(const struct vringh *vrh, 31static inline int __vringh_get_head(const struct vringh *vrh,
31 int (*getu16)(u16 *val, const u16 *p), 32 int (*getu16)(const struct vringh *vrh,
33 u16 *val, const __virtio16 *p),
32 u16 *last_avail_idx) 34 u16 *last_avail_idx)
33{ 35{
34 u16 avail_idx, i, head; 36 u16 avail_idx, i, head;
35 int err; 37 int err;
36 38
37 err = getu16(&avail_idx, &vrh->vring.avail->idx); 39 err = getu16(vrh, &avail_idx, &vrh->vring.avail->idx);
38 if (err) { 40 if (err) {
39 vringh_bad("Failed to access avail idx at %p", 41 vringh_bad("Failed to access avail idx at %p",
40 &vrh->vring.avail->idx); 42 &vrh->vring.avail->idx);
@@ -49,7 +51,7 @@ static inline int __vringh_get_head(const struct vringh *vrh,
49 51
50 i = *last_avail_idx & (vrh->vring.num - 1); 52 i = *last_avail_idx & (vrh->vring.num - 1);
51 53
52 err = getu16(&head, &vrh->vring.avail->ring[i]); 54 err = getu16(vrh, &head, &vrh->vring.avail->ring[i]);
53 if (err) { 55 if (err) {
54 vringh_bad("Failed to read head: idx %d address %p", 56 vringh_bad("Failed to read head: idx %d address %p",
55 *last_avail_idx, &vrh->vring.avail->ring[i]); 57 *last_avail_idx, &vrh->vring.avail->ring[i]);
@@ -144,28 +146,32 @@ static inline bool no_range_check(struct vringh *vrh, u64 addr, size_t *len,
144} 146}
145 147
146/* No reason for this code to be inline. */ 148/* No reason for this code to be inline. */
147static int move_to_indirect(int *up_next, u16 *i, void *addr, 149static int move_to_indirect(const struct vringh *vrh,
150 int *up_next, u16 *i, void *addr,
148 const struct vring_desc *desc, 151 const struct vring_desc *desc,
149 struct vring_desc **descs, int *desc_max) 152 struct vring_desc **descs, int *desc_max)
150{ 153{
154 u32 len;
155
151 /* Indirect tables can't have indirect. */ 156 /* Indirect tables can't have indirect. */
152 if (*up_next != -1) { 157 if (*up_next != -1) {
153 vringh_bad("Multilevel indirect %u->%u", *up_next, *i); 158 vringh_bad("Multilevel indirect %u->%u", *up_next, *i);
154 return -EINVAL; 159 return -EINVAL;
155 } 160 }
156 161
157 if (unlikely(desc->len % sizeof(struct vring_desc))) { 162 len = vringh32_to_cpu(vrh, desc->len);
163 if (unlikely(len % sizeof(struct vring_desc))) {
158 vringh_bad("Strange indirect len %u", desc->len); 164 vringh_bad("Strange indirect len %u", desc->len);
159 return -EINVAL; 165 return -EINVAL;
160 } 166 }
161 167
162 /* We will check this when we follow it! */ 168 /* We will check this when we follow it! */
163 if (desc->flags & VRING_DESC_F_NEXT) 169 if (desc->flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT))
164 *up_next = desc->next; 170 *up_next = vringh16_to_cpu(vrh, desc->next);
165 else 171 else
166 *up_next = -2; 172 *up_next = -2;
167 *descs = addr; 173 *descs = addr;
168 *desc_max = desc->len / sizeof(struct vring_desc); 174 *desc_max = len / sizeof(struct vring_desc);
169 175
170 /* Now, start at the first indirect. */ 176 /* Now, start at the first indirect. */
171 *i = 0; 177 *i = 0;
@@ -287,22 +293,25 @@ __vringh_iov(struct vringh *vrh, u16 i,
287 if (unlikely(err)) 293 if (unlikely(err))
288 goto fail; 294 goto fail;
289 295
290 if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) { 296 if (unlikely(desc.flags &
297 cpu_to_vringh16(vrh, VRING_DESC_F_INDIRECT))) {
298 u64 a = vringh64_to_cpu(vrh, desc.addr);
299
291 /* Make sure it's OK, and get offset. */ 300 /* Make sure it's OK, and get offset. */
292 len = desc.len; 301 len = vringh32_to_cpu(vrh, desc.len);
293 if (!rcheck(vrh, desc.addr, &len, &range, getrange)) { 302 if (!rcheck(vrh, a, &len, &range, getrange)) {
294 err = -EINVAL; 303 err = -EINVAL;
295 goto fail; 304 goto fail;
296 } 305 }
297 306
298 if (unlikely(len != desc.len)) { 307 if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
299 slow = true; 308 slow = true;
300 /* We need to save this range to use offset */ 309 /* We need to save this range to use offset */
301 slowrange = range; 310 slowrange = range;
302 } 311 }
303 312
304 addr = (void *)(long)(desc.addr + range.offset); 313 addr = (void *)(long)(a + range.offset);
305 err = move_to_indirect(&up_next, &i, addr, &desc, 314 err = move_to_indirect(vrh, &up_next, &i, addr, &desc,
306 &descs, &desc_max); 315 &descs, &desc_max);
307 if (err) 316 if (err)
308 goto fail; 317 goto fail;
@@ -315,7 +324,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
315 goto fail; 324 goto fail;
316 } 325 }
317 326
318 if (desc.flags & VRING_DESC_F_WRITE) 327 if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_WRITE))
319 iov = wiov; 328 iov = wiov;
320 else { 329 else {
321 iov = riov; 330 iov = riov;
@@ -336,12 +345,14 @@ __vringh_iov(struct vringh *vrh, u16 i,
336 345
337 again: 346 again:
338 /* Make sure it's OK, and get offset. */ 347 /* Make sure it's OK, and get offset. */
339 len = desc.len; 348 len = vringh32_to_cpu(vrh, desc.len);
340 if (!rcheck(vrh, desc.addr, &len, &range, getrange)) { 349 if (!rcheck(vrh, vringh64_to_cpu(vrh, desc.addr), &len, &range,
350 getrange)) {
341 err = -EINVAL; 351 err = -EINVAL;
342 goto fail; 352 goto fail;
343 } 353 }
344 addr = (void *)(unsigned long)(desc.addr + range.offset); 354 addr = (void *)(unsigned long)(vringh64_to_cpu(vrh, desc.addr) +
355 range.offset);
345 356
346 if (unlikely(iov->used == (iov->max_num & ~VRINGH_IOV_ALLOCATED))) { 357 if (unlikely(iov->used == (iov->max_num & ~VRINGH_IOV_ALLOCATED))) {
347 err = resize_iovec(iov, gfp); 358 err = resize_iovec(iov, gfp);
@@ -353,14 +364,16 @@ __vringh_iov(struct vringh *vrh, u16 i,
353 iov->iov[iov->used].iov_len = len; 364 iov->iov[iov->used].iov_len = len;
354 iov->used++; 365 iov->used++;
355 366
356 if (unlikely(len != desc.len)) { 367 if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
357 desc.len -= len; 368 desc.len = cpu_to_vringh32(vrh,
358 desc.addr += len; 369 vringh32_to_cpu(vrh, desc.len) - len);
370 desc.addr = cpu_to_vringh64(vrh,
371 vringh64_to_cpu(vrh, desc.addr) + len);
359 goto again; 372 goto again;
360 } 373 }
361 374
362 if (desc.flags & VRING_DESC_F_NEXT) { 375 if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) {
363 i = desc.next; 376 i = vringh16_to_cpu(vrh, desc.next);
364 } else { 377 } else {
365 /* Just in case we need to finish traversing above. */ 378 /* Just in case we need to finish traversing above. */
366 if (unlikely(up_next > 0)) { 379 if (unlikely(up_next > 0)) {
@@ -387,7 +400,8 @@ fail:
387static inline int __vringh_complete(struct vringh *vrh, 400static inline int __vringh_complete(struct vringh *vrh,
388 const struct vring_used_elem *used, 401 const struct vring_used_elem *used,
389 unsigned int num_used, 402 unsigned int num_used,
390 int (*putu16)(u16 *p, u16 val), 403 int (*putu16)(const struct vringh *vrh,
404 __virtio16 *p, u16 val),
391 int (*putused)(struct vring_used_elem *dst, 405 int (*putused)(struct vring_used_elem *dst,
392 const struct vring_used_elem 406 const struct vring_used_elem
393 *src, unsigned num)) 407 *src, unsigned num))
@@ -420,7 +434,7 @@ static inline int __vringh_complete(struct vringh *vrh,
420 /* Make sure buffer is written before we update index. */ 434 /* Make sure buffer is written before we update index. */
421 virtio_wmb(vrh->weak_barriers); 435 virtio_wmb(vrh->weak_barriers);
422 436
423 err = putu16(&vrh->vring.used->idx, used_idx + num_used); 437 err = putu16(vrh, &vrh->vring.used->idx, used_idx + num_used);
424 if (err) { 438 if (err) {
425 vringh_bad("Failed to update used index at %p", 439 vringh_bad("Failed to update used index at %p",
426 &vrh->vring.used->idx); 440 &vrh->vring.used->idx);
@@ -433,7 +447,9 @@ static inline int __vringh_complete(struct vringh *vrh,
433 447
434 448
435static inline int __vringh_need_notify(struct vringh *vrh, 449static inline int __vringh_need_notify(struct vringh *vrh,
436 int (*getu16)(u16 *val, const u16 *p)) 450 int (*getu16)(const struct vringh *vrh,
451 u16 *val,
452 const __virtio16 *p))
437{ 453{
438 bool notify; 454 bool notify;
439 u16 used_event; 455 u16 used_event;
@@ -447,7 +463,7 @@ static inline int __vringh_need_notify(struct vringh *vrh,
447 /* Old-style, without event indices. */ 463 /* Old-style, without event indices. */
448 if (!vrh->event_indices) { 464 if (!vrh->event_indices) {
449 u16 flags; 465 u16 flags;
450 err = getu16(&flags, &vrh->vring.avail->flags); 466 err = getu16(vrh, &flags, &vrh->vring.avail->flags);
451 if (err) { 467 if (err) {
452 vringh_bad("Failed to get flags at %p", 468 vringh_bad("Failed to get flags at %p",
453 &vrh->vring.avail->flags); 469 &vrh->vring.avail->flags);
@@ -457,7 +473,7 @@ static inline int __vringh_need_notify(struct vringh *vrh,
457 } 473 }
458 474
459 /* Modern: we know when other side wants to know. */ 475 /* Modern: we know when other side wants to know. */
460 err = getu16(&used_event, &vring_used_event(&vrh->vring)); 476 err = getu16(vrh, &used_event, &vring_used_event(&vrh->vring));
461 if (err) { 477 if (err) {
462 vringh_bad("Failed to get used event idx at %p", 478 vringh_bad("Failed to get used event idx at %p",
463 &vring_used_event(&vrh->vring)); 479 &vring_used_event(&vrh->vring));
@@ -478,20 +494,22 @@ static inline int __vringh_need_notify(struct vringh *vrh,
478} 494}
479 495
480static inline bool __vringh_notify_enable(struct vringh *vrh, 496static inline bool __vringh_notify_enable(struct vringh *vrh,
481 int (*getu16)(u16 *val, const u16 *p), 497 int (*getu16)(const struct vringh *vrh,
482 int (*putu16)(u16 *p, u16 val)) 498 u16 *val, const __virtio16 *p),
499 int (*putu16)(const struct vringh *vrh,
500 __virtio16 *p, u16 val))
483{ 501{
484 u16 avail; 502 u16 avail;
485 503
486 if (!vrh->event_indices) { 504 if (!vrh->event_indices) {
487 /* Old-school; update flags. */ 505 /* Old-school; update flags. */
488 if (putu16(&vrh->vring.used->flags, 0) != 0) { 506 if (putu16(vrh, &vrh->vring.used->flags, 0) != 0) {
489 vringh_bad("Clearing used flags %p", 507 vringh_bad("Clearing used flags %p",
490 &vrh->vring.used->flags); 508 &vrh->vring.used->flags);
491 return true; 509 return true;
492 } 510 }
493 } else { 511 } else {
494 if (putu16(&vring_avail_event(&vrh->vring), 512 if (putu16(vrh, &vring_avail_event(&vrh->vring),
495 vrh->last_avail_idx) != 0) { 513 vrh->last_avail_idx) != 0) {
496 vringh_bad("Updating avail event index %p", 514 vringh_bad("Updating avail event index %p",
497 &vring_avail_event(&vrh->vring)); 515 &vring_avail_event(&vrh->vring));
@@ -503,7 +521,7 @@ static inline bool __vringh_notify_enable(struct vringh *vrh,
503 * sure it's written, then check again. */ 521 * sure it's written, then check again. */
504 virtio_mb(vrh->weak_barriers); 522 virtio_mb(vrh->weak_barriers);
505 523
506 if (getu16(&avail, &vrh->vring.avail->idx) != 0) { 524 if (getu16(vrh, &avail, &vrh->vring.avail->idx) != 0) {
507 vringh_bad("Failed to check avail idx at %p", 525 vringh_bad("Failed to check avail idx at %p",
508 &vrh->vring.avail->idx); 526 &vrh->vring.avail->idx);
509 return true; 527 return true;
@@ -516,11 +534,13 @@ static inline bool __vringh_notify_enable(struct vringh *vrh,
516} 534}
517 535
518static inline void __vringh_notify_disable(struct vringh *vrh, 536static inline void __vringh_notify_disable(struct vringh *vrh,
519 int (*putu16)(u16 *p, u16 val)) 537 int (*putu16)(const struct vringh *vrh,
538 __virtio16 *p, u16 val))
520{ 539{
521 if (!vrh->event_indices) { 540 if (!vrh->event_indices) {
522 /* Old-school; update flags. */ 541 /* Old-school; update flags. */
523 if (putu16(&vrh->vring.used->flags, VRING_USED_F_NO_NOTIFY)) { 542 if (putu16(vrh, &vrh->vring.used->flags,
543 VRING_USED_F_NO_NOTIFY)) {
524 vringh_bad("Setting used flags %p", 544 vringh_bad("Setting used flags %p",
525 &vrh->vring.used->flags); 545 &vrh->vring.used->flags);
526 } 546 }
@@ -528,14 +548,18 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
528} 548}
529 549
530/* Userspace access helpers: in this case, addresses are really userspace. */ 550/* Userspace access helpers: in this case, addresses are really userspace. */
531static inline int getu16_user(u16 *val, const u16 *p) 551static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
532{ 552{
533 return get_user(*val, (__force u16 __user *)p); 553 __virtio16 v = 0;
554 int rc = get_user(v, (__force __virtio16 __user *)p);
555 *val = vringh16_to_cpu(vrh, v);
556 return rc;
534} 557}
535 558
536static inline int putu16_user(u16 *p, u16 val) 559static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
537{ 560{
538 return put_user(val, (__force u16 __user *)p); 561 __virtio16 v = cpu_to_vringh16(vrh, val);
562 return put_user(v, (__force __virtio16 __user *)p);
539} 563}
540 564
541static inline int copydesc_user(void *dst, const void *src, size_t len) 565static inline int copydesc_user(void *dst, const void *src, size_t len)
@@ -589,6 +613,7 @@ int vringh_init_user(struct vringh *vrh, u64 features,
589 return -EINVAL; 613 return -EINVAL;
590 } 614 }
591 615
616 vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
592 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX)); 617 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
593 vrh->weak_barriers = weak_barriers; 618 vrh->weak_barriers = weak_barriers;
594 vrh->completed = 0; 619 vrh->completed = 0;
@@ -729,8 +754,8 @@ int vringh_complete_user(struct vringh *vrh, u16 head, u32 len)
729{ 754{
730 struct vring_used_elem used; 755 struct vring_used_elem used;
731 756
732 used.id = head; 757 used.id = cpu_to_vringh32(vrh, head);
733 used.len = len; 758 used.len = cpu_to_vringh32(vrh, len);
734 return __vringh_complete(vrh, &used, 1, putu16_user, putused_user); 759 return __vringh_complete(vrh, &used, 1, putu16_user, putused_user);
735} 760}
736EXPORT_SYMBOL(vringh_complete_user); 761EXPORT_SYMBOL(vringh_complete_user);
@@ -792,15 +817,16 @@ int vringh_need_notify_user(struct vringh *vrh)
792EXPORT_SYMBOL(vringh_need_notify_user); 817EXPORT_SYMBOL(vringh_need_notify_user);
793 818
794/* Kernelspace access helpers. */ 819/* Kernelspace access helpers. */
795static inline int getu16_kern(u16 *val, const u16 *p) 820static inline int getu16_kern(const struct vringh *vrh,
821 u16 *val, const __virtio16 *p)
796{ 822{
797 *val = ACCESS_ONCE(*p); 823 *val = vringh16_to_cpu(vrh, ACCESS_ONCE(*p));
798 return 0; 824 return 0;
799} 825}
800 826
801static inline int putu16_kern(u16 *p, u16 val) 827static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
802{ 828{
803 ACCESS_ONCE(*p) = val; 829 ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val);
804 return 0; 830 return 0;
805} 831}
806 832
@@ -848,6 +874,7 @@ int vringh_init_kern(struct vringh *vrh, u64 features,
848 return -EINVAL; 874 return -EINVAL;
849 } 875 }
850 876
877 vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
851 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX)); 878 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
852 vrh->weak_barriers = weak_barriers; 879 vrh->weak_barriers = weak_barriers;
853 vrh->completed = 0; 880 vrh->completed = 0;
@@ -962,8 +989,8 @@ int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len)
962{ 989{
963 struct vring_used_elem used; 990 struct vring_used_elem used;
964 991
965 used.id = head; 992 used.id = cpu_to_vringh32(vrh, head);
966 used.len = len; 993 used.len = cpu_to_vringh32(vrh, len);
967 994
968 return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern); 995 return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern);
969} 996}