aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2016-06-23 02:04:31 -0400
committerMichael S. Tsirkin <mst@redhat.com>2016-08-01 19:57:31 -0400
commita9709d6874d55130663567577a9b05c35138cc6b (patch)
tree33319aae722be870f30402b40cf2c22ac8f6caaa
parentbfe2bc512884d0b1c5297a15350f940ca80e439b (diff)
vhost: convert pre sorted vhost memory array to interval tree
Current pre-sorted memory region array has some limitations for future device IOTLB conversion: 1) need extra work for adding and removing a single region, and it's expected to be slow because of sorting or memory re-allocation. 2) need extra work of removing a large range which may intersect several regions with different size. 3) need trick for a replacement policy like LRU To overcome the above shortcomings, this patch convert it to interval tree which can easily address the above issue with almost no extra work. The patch could be used for: - Extend the current API and only let the userspace to send diffs of memory table. - Simplify Device IOTLB implementation. Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
-rw-r--r--drivers/vhost/net.c8
-rw-r--r--drivers/vhost/vhost.c182
-rw-r--r--drivers/vhost/vhost.h27
3 files changed, 128 insertions, 89 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index f744eeb3e2b4..a6b270aff9ef 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1036,20 +1036,20 @@ static long vhost_net_reset_owner(struct vhost_net *n)
1036 struct socket *tx_sock = NULL; 1036 struct socket *tx_sock = NULL;
1037 struct socket *rx_sock = NULL; 1037 struct socket *rx_sock = NULL;
1038 long err; 1038 long err;
1039 struct vhost_memory *memory; 1039 struct vhost_umem *umem;
1040 1040
1041 mutex_lock(&n->dev.mutex); 1041 mutex_lock(&n->dev.mutex);
1042 err = vhost_dev_check_owner(&n->dev); 1042 err = vhost_dev_check_owner(&n->dev);
1043 if (err) 1043 if (err)
1044 goto done; 1044 goto done;
1045 memory = vhost_dev_reset_owner_prepare(); 1045 umem = vhost_dev_reset_owner_prepare();
1046 if (!memory) { 1046 if (!umem) {
1047 err = -ENOMEM; 1047 err = -ENOMEM;
1048 goto done; 1048 goto done;
1049 } 1049 }
1050 vhost_net_stop(n, &tx_sock, &rx_sock); 1050 vhost_net_stop(n, &tx_sock, &rx_sock);
1051 vhost_net_flush(n); 1051 vhost_net_flush(n);
1052 vhost_dev_reset_owner(&n->dev, memory); 1052 vhost_dev_reset_owner(&n->dev, umem);
1053 vhost_net_vq_reset(n); 1053 vhost_net_vq_reset(n);
1054done: 1054done:
1055 mutex_unlock(&n->dev.mutex); 1055 mutex_unlock(&n->dev.mutex);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index e1b5047d8f19..8071f3638db9 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -27,6 +27,7 @@
27#include <linux/cgroup.h> 27#include <linux/cgroup.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/sort.h> 29#include <linux/sort.h>
30#include <linux/interval_tree_generic.h>
30 31
31#include "vhost.h" 32#include "vhost.h"
32 33
@@ -42,6 +43,10 @@ enum {
42#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num]) 43#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
43#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num]) 44#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
44 45
46INTERVAL_TREE_DEFINE(struct vhost_umem_node,
47 rb, __u64, __subtree_last,
48 START, LAST, , vhost_umem_interval_tree);
49
45#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY 50#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
46static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) 51static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
47{ 52{
@@ -297,10 +302,10 @@ static void vhost_vq_reset(struct vhost_dev *dev,
297 vq->call_ctx = NULL; 302 vq->call_ctx = NULL;
298 vq->call = NULL; 303 vq->call = NULL;
299 vq->log_ctx = NULL; 304 vq->log_ctx = NULL;
300 vq->memory = NULL;
301 vhost_reset_is_le(vq); 305 vhost_reset_is_le(vq);
302 vhost_disable_cross_endian(vq); 306 vhost_disable_cross_endian(vq);
303 vq->busyloop_timeout = 0; 307 vq->busyloop_timeout = 0;
308 vq->umem = NULL;
304} 309}
305 310
306static int vhost_worker(void *data) 311static int vhost_worker(void *data)
@@ -394,7 +399,7 @@ void vhost_dev_init(struct vhost_dev *dev,
394 mutex_init(&dev->mutex); 399 mutex_init(&dev->mutex);
395 dev->log_ctx = NULL; 400 dev->log_ctx = NULL;
396 dev->log_file = NULL; 401 dev->log_file = NULL;
397 dev->memory = NULL; 402 dev->umem = NULL;
398 dev->mm = NULL; 403 dev->mm = NULL;
399 dev->worker = NULL; 404 dev->worker = NULL;
400 init_llist_head(&dev->work_list); 405 init_llist_head(&dev->work_list);
@@ -499,27 +504,36 @@ err_mm:
499} 504}
500EXPORT_SYMBOL_GPL(vhost_dev_set_owner); 505EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
501 506
502struct vhost_memory *vhost_dev_reset_owner_prepare(void) 507static void *vhost_kvzalloc(unsigned long size)
503{ 508{
504 return kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL); 509 void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
510
511 if (!n)
512 n = vzalloc(size);
513 return n;
514}
515
516struct vhost_umem *vhost_dev_reset_owner_prepare(void)
517{
518 return vhost_kvzalloc(sizeof(struct vhost_umem));
505} 519}
506EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare); 520EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
507 521
508/* Caller should have device mutex */ 522/* Caller should have device mutex */
509void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory) 523void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_umem *umem)
510{ 524{
511 int i; 525 int i;
512 526
513 vhost_dev_cleanup(dev, true); 527 vhost_dev_cleanup(dev, true);
514 528
515 /* Restore memory to default empty mapping. */ 529 /* Restore memory to default empty mapping. */
516 memory->nregions = 0; 530 INIT_LIST_HEAD(&umem->umem_list);
517 dev->memory = memory; 531 dev->umem = umem;
518 /* We don't need VQ locks below since vhost_dev_cleanup makes sure 532 /* We don't need VQ locks below since vhost_dev_cleanup makes sure
519 * VQs aren't running. 533 * VQs aren't running.
520 */ 534 */
521 for (i = 0; i < dev->nvqs; ++i) 535 for (i = 0; i < dev->nvqs; ++i)
522 dev->vqs[i]->memory = memory; 536 dev->vqs[i]->umem = umem;
523} 537}
524EXPORT_SYMBOL_GPL(vhost_dev_reset_owner); 538EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
525 539
@@ -536,6 +550,21 @@ void vhost_dev_stop(struct vhost_dev *dev)
536} 550}
537EXPORT_SYMBOL_GPL(vhost_dev_stop); 551EXPORT_SYMBOL_GPL(vhost_dev_stop);
538 552
553static void vhost_umem_clean(struct vhost_umem *umem)
554{
555 struct vhost_umem_node *node, *tmp;
556
557 if (!umem)
558 return;
559
560 list_for_each_entry_safe(node, tmp, &umem->umem_list, link) {
561 vhost_umem_interval_tree_remove(node, &umem->umem_tree);
562 list_del(&node->link);
563 kvfree(node);
564 }
565 kvfree(umem);
566}
567
539/* Caller should have device mutex if and only if locked is set */ 568/* Caller should have device mutex if and only if locked is set */
540void vhost_dev_cleanup(struct vhost_dev *dev, bool locked) 569void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
541{ 570{
@@ -562,8 +591,8 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
562 fput(dev->log_file); 591 fput(dev->log_file);
563 dev->log_file = NULL; 592 dev->log_file = NULL;
564 /* No one will access memory at this point */ 593 /* No one will access memory at this point */
565 kvfree(dev->memory); 594 vhost_umem_clean(dev->umem);
566 dev->memory = NULL; 595 dev->umem = NULL;
567 WARN_ON(!llist_empty(&dev->work_list)); 596 WARN_ON(!llist_empty(&dev->work_list));
568 if (dev->worker) { 597 if (dev->worker) {
569 kthread_stop(dev->worker); 598 kthread_stop(dev->worker);
@@ -589,25 +618,25 @@ static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
589} 618}
590 619
591/* Caller should have vq mutex and device mutex. */ 620/* Caller should have vq mutex and device mutex. */
592static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem, 621static int vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem,
593 int log_all) 622 int log_all)
594{ 623{
595 int i; 624 struct vhost_umem_node *node;
596 625
597 if (!mem) 626 if (!umem)
598 return 0; 627 return 0;
599 628
600 for (i = 0; i < mem->nregions; ++i) { 629 list_for_each_entry(node, &umem->umem_list, link) {
601 struct vhost_memory_region *m = mem->regions + i; 630 unsigned long a = node->userspace_addr;
602 unsigned long a = m->userspace_addr; 631
603 if (m->memory_size > ULONG_MAX) 632 if (node->size > ULONG_MAX)
604 return 0; 633 return 0;
605 else if (!access_ok(VERIFY_WRITE, (void __user *)a, 634 else if (!access_ok(VERIFY_WRITE, (void __user *)a,
606 m->memory_size)) 635 node->size))
607 return 0; 636 return 0;
608 else if (log_all && !log_access_ok(log_base, 637 else if (log_all && !log_access_ok(log_base,
609 m->guest_phys_addr, 638 node->start,
610 m->memory_size)) 639 node->size))
611 return 0; 640 return 0;
612 } 641 }
613 return 1; 642 return 1;
@@ -615,7 +644,7 @@ static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
615 644
616/* Can we switch to this memory table? */ 645/* Can we switch to this memory table? */
617/* Caller should have device mutex but not vq mutex */ 646/* Caller should have device mutex but not vq mutex */
618static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem, 647static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
619 int log_all) 648 int log_all)
620{ 649{
621 int i; 650 int i;
@@ -628,7 +657,8 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
628 log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL); 657 log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
629 /* If ring is inactive, will check when it's enabled. */ 658 /* If ring is inactive, will check when it's enabled. */
630 if (d->vqs[i]->private_data) 659 if (d->vqs[i]->private_data)
631 ok = vq_memory_access_ok(d->vqs[i]->log_base, mem, log); 660 ok = vq_memory_access_ok(d->vqs[i]->log_base,
661 umem, log);
632 else 662 else
633 ok = 1; 663 ok = 1;
634 mutex_unlock(&d->vqs[i]->mutex); 664 mutex_unlock(&d->vqs[i]->mutex);
@@ -671,7 +701,7 @@ static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
671/* Caller should have device mutex but not vq mutex */ 701/* Caller should have device mutex but not vq mutex */
672int vhost_log_access_ok(struct vhost_dev *dev) 702int vhost_log_access_ok(struct vhost_dev *dev)
673{ 703{
674 return memory_access_ok(dev, dev->memory, 1); 704 return memory_access_ok(dev, dev->umem, 1);
675} 705}
676EXPORT_SYMBOL_GPL(vhost_log_access_ok); 706EXPORT_SYMBOL_GPL(vhost_log_access_ok);
677 707
@@ -682,7 +712,7 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq,
682{ 712{
683 size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; 713 size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
684 714
685 return vq_memory_access_ok(log_base, vq->memory, 715 return vq_memory_access_ok(log_base, vq->umem,
686 vhost_has_feature(vq, VHOST_F_LOG_ALL)) && 716 vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
687 (!vq->log_used || log_access_ok(log_base, vq->log_addr, 717 (!vq->log_used || log_access_ok(log_base, vq->log_addr,
688 sizeof *vq->used + 718 sizeof *vq->used +
@@ -698,28 +728,12 @@ int vhost_vq_access_ok(struct vhost_virtqueue *vq)
698} 728}
699EXPORT_SYMBOL_GPL(vhost_vq_access_ok); 729EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
700 730
701static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2)
702{
703 const struct vhost_memory_region *r1 = p1, *r2 = p2;
704 if (r1->guest_phys_addr < r2->guest_phys_addr)
705 return 1;
706 if (r1->guest_phys_addr > r2->guest_phys_addr)
707 return -1;
708 return 0;
709}
710
711static void *vhost_kvzalloc(unsigned long size)
712{
713 void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
714
715 if (!n)
716 n = vzalloc(size);
717 return n;
718}
719
720static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) 731static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
721{ 732{
722 struct vhost_memory mem, *newmem, *oldmem; 733 struct vhost_memory mem, *newmem;
734 struct vhost_memory_region *region;
735 struct vhost_umem_node *node;
736 struct vhost_umem *newumem, *oldumem;
723 unsigned long size = offsetof(struct vhost_memory, regions); 737 unsigned long size = offsetof(struct vhost_memory, regions);
724 int i; 738 int i;
725 739
@@ -739,24 +753,52 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
739 kvfree(newmem); 753 kvfree(newmem);
740 return -EFAULT; 754 return -EFAULT;
741 } 755 }
742 sort(newmem->regions, newmem->nregions, sizeof(*newmem->regions),
743 vhost_memory_reg_sort_cmp, NULL);
744 756
745 if (!memory_access_ok(d, newmem, 0)) { 757 newumem = vhost_kvzalloc(sizeof(*newumem));
758 if (!newumem) {
746 kvfree(newmem); 759 kvfree(newmem);
747 return -EFAULT; 760 return -ENOMEM;
761 }
762
763 newumem->umem_tree = RB_ROOT;
764 INIT_LIST_HEAD(&newumem->umem_list);
765
766 for (region = newmem->regions;
767 region < newmem->regions + mem.nregions;
768 region++) {
769 node = vhost_kvzalloc(sizeof(*node));
770 if (!node)
771 goto err;
772 node->start = region->guest_phys_addr;
773 node->size = region->memory_size;
774 node->last = node->start + node->size - 1;
775 node->userspace_addr = region->userspace_addr;
776 INIT_LIST_HEAD(&node->link);
777 list_add_tail(&node->link, &newumem->umem_list);
778 vhost_umem_interval_tree_insert(node, &newumem->umem_tree);
748 } 779 }
749 oldmem = d->memory; 780
750 d->memory = newmem; 781 if (!memory_access_ok(d, newumem, 0))
782 goto err;
783
784 oldumem = d->umem;
785 d->umem = newumem;
751 786
752 /* All memory accesses are done under some VQ mutex. */ 787 /* All memory accesses are done under some VQ mutex. */
753 for (i = 0; i < d->nvqs; ++i) { 788 for (i = 0; i < d->nvqs; ++i) {
754 mutex_lock(&d->vqs[i]->mutex); 789 mutex_lock(&d->vqs[i]->mutex);
755 d->vqs[i]->memory = newmem; 790 d->vqs[i]->umem = newumem;
756 mutex_unlock(&d->vqs[i]->mutex); 791 mutex_unlock(&d->vqs[i]->mutex);
757 } 792 }
758 kvfree(oldmem); 793
794 kvfree(newmem);
795 vhost_umem_clean(oldumem);
759 return 0; 796 return 0;
797
798err:
799 vhost_umem_clean(newumem);
800 kvfree(newmem);
801 return -EFAULT;
760} 802}
761 803
762long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp) 804long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
@@ -1059,28 +1101,6 @@ done:
1059} 1101}
1060EXPORT_SYMBOL_GPL(vhost_dev_ioctl); 1102EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
1061 1103
1062static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
1063 __u64 addr, __u32 len)
1064{
1065 const struct vhost_memory_region *reg;
1066 int start = 0, end = mem->nregions;
1067
1068 while (start < end) {
1069 int slot = start + (end - start) / 2;
1070 reg = mem->regions + slot;
1071 if (addr >= reg->guest_phys_addr)
1072 end = slot;
1073 else
1074 start = slot + 1;
1075 }
1076
1077 reg = mem->regions + start;
1078 if (addr >= reg->guest_phys_addr &&
1079 reg->guest_phys_addr + reg->memory_size > addr)
1080 return reg;
1081 return NULL;
1082}
1083
1084/* TODO: This is really inefficient. We need something like get_user() 1104/* TODO: This is really inefficient. We need something like get_user()
1085 * (instruction directly accesses the data, with an exception table entry 1105 * (instruction directly accesses the data, with an exception table entry
1086 * returning -EFAULT). See Documentation/x86/exception-tables.txt. 1106 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
@@ -1231,29 +1251,29 @@ EXPORT_SYMBOL_GPL(vhost_vq_init_access);
1231static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, 1251static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
1232 struct iovec iov[], int iov_size) 1252 struct iovec iov[], int iov_size)
1233{ 1253{
1234 const struct vhost_memory_region *reg; 1254 const struct vhost_umem_node *node;
1235 struct vhost_memory *mem; 1255 struct vhost_umem *umem = vq->umem;
1236 struct iovec *_iov; 1256 struct iovec *_iov;
1237 u64 s = 0; 1257 u64 s = 0;
1238 int ret = 0; 1258 int ret = 0;
1239 1259
1240 mem = vq->memory;
1241 while ((u64)len > s) { 1260 while ((u64)len > s) {
1242 u64 size; 1261 u64 size;
1243 if (unlikely(ret >= iov_size)) { 1262 if (unlikely(ret >= iov_size)) {
1244 ret = -ENOBUFS; 1263 ret = -ENOBUFS;
1245 break; 1264 break;
1246 } 1265 }
1247 reg = find_region(mem, addr, len); 1266 node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
1248 if (unlikely(!reg)) { 1267 addr, addr + len - 1);
1268 if (node == NULL || node->start > addr) {
1249 ret = -EFAULT; 1269 ret = -EFAULT;
1250 break; 1270 break;
1251 } 1271 }
1252 _iov = iov + ret; 1272 _iov = iov + ret;
1253 size = reg->memory_size - addr + reg->guest_phys_addr; 1273 size = node->size - addr + node->start;
1254 _iov->iov_len = min((u64)len - s, size); 1274 _iov->iov_len = min((u64)len - s, size);
1255 _iov->iov_base = (void __user *)(unsigned long) 1275 _iov->iov_base = (void __user *)(unsigned long)
1256 (reg->userspace_addr + addr - reg->guest_phys_addr); 1276 (node->userspace_addr + addr - node->start);
1257 s += size; 1277 s += size;
1258 addr += size; 1278 addr += size;
1259 ++ret; 1279 ++ret;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 6690e645d2f8..eaaf6df72218 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -55,6 +55,25 @@ struct vhost_log {
55 u64 len; 55 u64 len;
56}; 56};
57 57
58#define START(node) ((node)->start)
59#define LAST(node) ((node)->last)
60
61struct vhost_umem_node {
62 struct rb_node rb;
63 struct list_head link;
64 __u64 start;
65 __u64 last;
66 __u64 size;
67 __u64 userspace_addr;
68 __u64 flags_padding;
69 __u64 __subtree_last;
70};
71
72struct vhost_umem {
73 struct rb_root umem_tree;
74 struct list_head umem_list;
75};
76
58/* The virtqueue structure describes a queue attached to a device. */ 77/* The virtqueue structure describes a queue attached to a device. */
59struct vhost_virtqueue { 78struct vhost_virtqueue {
60 struct vhost_dev *dev; 79 struct vhost_dev *dev;
@@ -103,7 +122,7 @@ struct vhost_virtqueue {
103 struct iovec *indirect; 122 struct iovec *indirect;
104 struct vring_used_elem *heads; 123 struct vring_used_elem *heads;
105 /* Protected by virtqueue mutex. */ 124 /* Protected by virtqueue mutex. */
106 struct vhost_memory *memory; 125 struct vhost_umem *umem;
107 void *private_data; 126 void *private_data;
108 u64 acked_features; 127 u64 acked_features;
109 /* Log write descriptors */ 128 /* Log write descriptors */
@@ -121,7 +140,6 @@ struct vhost_virtqueue {
121}; 140};
122 141
123struct vhost_dev { 142struct vhost_dev {
124 struct vhost_memory *memory;
125 struct mm_struct *mm; 143 struct mm_struct *mm;
126 struct mutex mutex; 144 struct mutex mutex;
127 struct vhost_virtqueue **vqs; 145 struct vhost_virtqueue **vqs;
@@ -130,14 +148,15 @@ struct vhost_dev {
130 struct eventfd_ctx *log_ctx; 148 struct eventfd_ctx *log_ctx;
131 struct llist_head work_list; 149 struct llist_head work_list;
132 struct task_struct *worker; 150 struct task_struct *worker;
151 struct vhost_umem *umem;
133}; 152};
134 153
135void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs); 154void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
136long vhost_dev_set_owner(struct vhost_dev *dev); 155long vhost_dev_set_owner(struct vhost_dev *dev);
137bool vhost_dev_has_owner(struct vhost_dev *dev); 156bool vhost_dev_has_owner(struct vhost_dev *dev);
138long vhost_dev_check_owner(struct vhost_dev *); 157long vhost_dev_check_owner(struct vhost_dev *);
139struct vhost_memory *vhost_dev_reset_owner_prepare(void); 158struct vhost_umem *vhost_dev_reset_owner_prepare(void);
140void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_memory *); 159void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_umem *);
141void vhost_dev_cleanup(struct vhost_dev *, bool locked); 160void vhost_dev_cleanup(struct vhost_dev *, bool locked);
142void vhost_dev_stop(struct vhost_dev *); 161void vhost_dev_stop(struct vhost_dev *);
143long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp); 162long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);