aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAsias He <asias@redhat.com>2013-05-06 04:38:21 -0400
committerMichael S. Tsirkin <mst@redhat.com>2013-07-07 10:33:44 -0400
commit6ac1afbf6132df0fcb0898f3509371305af7de16 (patch)
tree78a3115a6e6f5533779806c5b13c040a1f7b1735
parent3c63f66a0dcdd6cb8bcacf210181f2b3baed19be (diff)
vhost: Make vhost a separate module
Currently, vhost-net and vhost-scsi are sharing the vhost core code. However, vhost-scsi shares the code by including the vhost.c file directly. Making vhost a separate module makes it is easier to share code with other vhost devices. Signed-off-by: Asias He <asias@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
-rw-r--r--drivers/vhost/Kconfig8
-rw-r--r--drivers/vhost/Makefile3
-rw-r--r--drivers/vhost/scsi.c1
-rw-r--r--drivers/vhost/test.c2
-rw-r--r--drivers/vhost/vhost.c51
-rw-r--r--drivers/vhost/vhost.h2
6 files changed, 63 insertions, 4 deletions
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
index 8b9226da3f54..017a1e8a8f6f 100644
--- a/drivers/vhost/Kconfig
+++ b/drivers/vhost/Kconfig
@@ -1,6 +1,7 @@
1config VHOST_NET 1config VHOST_NET
2 tristate "Host kernel accelerator for virtio net" 2 tristate "Host kernel accelerator for virtio net"
3 depends on NET && EVENTFD && (TUN || !TUN) && (MACVTAP || !MACVTAP) 3 depends on NET && EVENTFD && (TUN || !TUN) && (MACVTAP || !MACVTAP)
4 select VHOST
4 select VHOST_RING 5 select VHOST_RING
5 ---help--- 6 ---help---
6 This kernel module can be loaded in host kernel to accelerate 7 This kernel module can be loaded in host kernel to accelerate
@@ -13,6 +14,7 @@ config VHOST_NET
13config VHOST_SCSI 14config VHOST_SCSI
14 tristate "VHOST_SCSI TCM fabric driver" 15 tristate "VHOST_SCSI TCM fabric driver"
15 depends on TARGET_CORE && EVENTFD && m 16 depends on TARGET_CORE && EVENTFD && m
17 select VHOST
16 select VHOST_RING 18 select VHOST_RING
17 default n 19 default n
18 ---help--- 20 ---help---
@@ -24,3 +26,9 @@ config VHOST_RING
24 ---help--- 26 ---help---
25 This option is selected by any driver which needs to access 27 This option is selected by any driver which needs to access
26 the host side of a virtio ring. 28 the host side of a virtio ring.
29
30config VHOST
31 tristate
32 ---help---
33 This option is selected by any driver which needs to access
34 the core of vhost.
diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile
index 654e9afb11f5..e0441c34db1c 100644
--- a/drivers/vhost/Makefile
+++ b/drivers/vhost/Makefile
@@ -1,7 +1,8 @@
1obj-$(CONFIG_VHOST_NET) += vhost_net.o 1obj-$(CONFIG_VHOST_NET) += vhost_net.o
2vhost_net-y := vhost.o net.o 2vhost_net-y := net.o
3 3
4obj-$(CONFIG_VHOST_SCSI) += vhost_scsi.o 4obj-$(CONFIG_VHOST_SCSI) += vhost_scsi.o
5vhost_scsi-y := scsi.o 5vhost_scsi-y := scsi.o
6 6
7obj-$(CONFIG_VHOST_RING) += vringh.o 7obj-$(CONFIG_VHOST_RING) += vringh.o
8obj-$(CONFIG_VHOST) += vhost.o
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 03765e17c154..5531ebcc7276 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -49,7 +49,6 @@
49#include <linux/llist.h> 49#include <linux/llist.h>
50#include <linux/bitmap.h> 50#include <linux/bitmap.h>
51 51
52#include "vhost.c"
53#include "vhost.h" 52#include "vhost.h"
54 53
55#define TCM_VHOST_VERSION "v0.1" 54#define TCM_VHOST_VERSION "v0.1"
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 1ee45bc85f67..9b71a577f7bd 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -18,7 +18,7 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19 19
20#include "test.h" 20#include "test.h"
21#include "vhost.c" 21#include "vhost.h"
22 22
23/* Max number of bytes transferred before requeueing the job. 23/* Max number of bytes transferred before requeueing the job.
24 * Using this limit prevents one virtqueue from starving others. */ 24 * Using this limit prevents one virtqueue from starving others. */
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 4d135b1a861e..e58cf0001cee 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -25,6 +25,7 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/kthread.h> 26#include <linux/kthread.h>
27#include <linux/cgroup.h> 27#include <linux/cgroup.h>
28#include <linux/module.h>
28 29
29#include "vhost.h" 30#include "vhost.h"
30 31
@@ -66,6 +67,7 @@ void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
66 work->flushing = 0; 67 work->flushing = 0;
67 work->queue_seq = work->done_seq = 0; 68 work->queue_seq = work->done_seq = 0;
68} 69}
70EXPORT_SYMBOL_GPL(vhost_work_init);
69 71
70/* Init poll structure */ 72/* Init poll structure */
71void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, 73void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
@@ -79,6 +81,7 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
79 81
80 vhost_work_init(&poll->work, fn); 82 vhost_work_init(&poll->work, fn);
81} 83}
84EXPORT_SYMBOL_GPL(vhost_poll_init);
82 85
83/* Start polling a file. We add ourselves to file's wait queue. The caller must 86/* Start polling a file. We add ourselves to file's wait queue. The caller must
84 * keep a reference to a file until after vhost_poll_stop is called. */ 87 * keep a reference to a file until after vhost_poll_stop is called. */
@@ -101,6 +104,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
101 104
102 return ret; 105 return ret;
103} 106}
107EXPORT_SYMBOL_GPL(vhost_poll_start);
104 108
105/* Stop polling a file. After this function returns, it becomes safe to drop the 109/* Stop polling a file. After this function returns, it becomes safe to drop the
106 * file reference. You must also flush afterwards. */ 110 * file reference. You must also flush afterwards. */
@@ -111,6 +115,7 @@ void vhost_poll_stop(struct vhost_poll *poll)
111 poll->wqh = NULL; 115 poll->wqh = NULL;
112 } 116 }
113} 117}
118EXPORT_SYMBOL_GPL(vhost_poll_stop);
114 119
115static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work, 120static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
116 unsigned seq) 121 unsigned seq)
@@ -123,7 +128,7 @@ static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
123 return left <= 0; 128 return left <= 0;
124} 129}
125 130
126static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) 131void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
127{ 132{
128 unsigned seq; 133 unsigned seq;
129 int flushing; 134 int flushing;
@@ -138,6 +143,7 @@ static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
138 spin_unlock_irq(&dev->work_lock); 143 spin_unlock_irq(&dev->work_lock);
139 BUG_ON(flushing < 0); 144 BUG_ON(flushing < 0);
140} 145}
146EXPORT_SYMBOL_GPL(vhost_work_flush);
141 147
142/* Flush any work that has been scheduled. When calling this, don't hold any 148/* Flush any work that has been scheduled. When calling this, don't hold any
143 * locks that are also used by the callback. */ 149 * locks that are also used by the callback. */
@@ -145,6 +151,7 @@ void vhost_poll_flush(struct vhost_poll *poll)
145{ 151{
146 vhost_work_flush(poll->dev, &poll->work); 152 vhost_work_flush(poll->dev, &poll->work);
147} 153}
154EXPORT_SYMBOL_GPL(vhost_poll_flush);
148 155
149void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) 156void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
150{ 157{
@@ -158,11 +165,13 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
158 } 165 }
159 spin_unlock_irqrestore(&dev->work_lock, flags); 166 spin_unlock_irqrestore(&dev->work_lock, flags);
160} 167}
168EXPORT_SYMBOL_GPL(vhost_work_queue);
161 169
162void vhost_poll_queue(struct vhost_poll *poll) 170void vhost_poll_queue(struct vhost_poll *poll)
163{ 171{
164 vhost_work_queue(poll->dev, &poll->work); 172 vhost_work_queue(poll->dev, &poll->work);
165} 173}
174EXPORT_SYMBOL_GPL(vhost_poll_queue);
166 175
167static void vhost_vq_reset(struct vhost_dev *dev, 176static void vhost_vq_reset(struct vhost_dev *dev,
168 struct vhost_virtqueue *vq) 177 struct vhost_virtqueue *vq)
@@ -311,6 +320,7 @@ long vhost_dev_init(struct vhost_dev *dev,
311 320
312 return 0; 321 return 0;
313} 322}
323EXPORT_SYMBOL_GPL(vhost_dev_init);
314 324
315/* Caller should have device mutex */ 325/* Caller should have device mutex */
316long vhost_dev_check_owner(struct vhost_dev *dev) 326long vhost_dev_check_owner(struct vhost_dev *dev)
@@ -318,6 +328,7 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
318 /* Are you the owner? If not, I don't think you mean to do that */ 328 /* Are you the owner? If not, I don't think you mean to do that */
319 return dev->mm == current->mm ? 0 : -EPERM; 329 return dev->mm == current->mm ? 0 : -EPERM;
320} 330}
331EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
321 332
322struct vhost_attach_cgroups_struct { 333struct vhost_attach_cgroups_struct {
323 struct vhost_work work; 334 struct vhost_work work;
@@ -349,6 +360,7 @@ bool vhost_dev_has_owner(struct vhost_dev *dev)
349{ 360{
350 return dev->mm; 361 return dev->mm;
351} 362}
363EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
352 364
353/* Caller should have device mutex */ 365/* Caller should have device mutex */
354long vhost_dev_set_owner(struct vhost_dev *dev) 366long vhost_dev_set_owner(struct vhost_dev *dev)
@@ -392,11 +404,13 @@ err_worker:
392err_mm: 404err_mm:
393 return err; 405 return err;
394} 406}
407EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
395 408
396struct vhost_memory *vhost_dev_reset_owner_prepare(void) 409struct vhost_memory *vhost_dev_reset_owner_prepare(void)
397{ 410{
398 return kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL); 411 return kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
399} 412}
413EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
400 414
401/* Caller should have device mutex */ 415/* Caller should have device mutex */
402void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory) 416void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory)
@@ -407,6 +421,7 @@ void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory)
407 memory->nregions = 0; 421 memory->nregions = 0;
408 RCU_INIT_POINTER(dev->memory, memory); 422 RCU_INIT_POINTER(dev->memory, memory);
409} 423}
424EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
410 425
411void vhost_dev_stop(struct vhost_dev *dev) 426void vhost_dev_stop(struct vhost_dev *dev)
412{ 427{
@@ -419,6 +434,7 @@ void vhost_dev_stop(struct vhost_dev *dev)
419 } 434 }
420 } 435 }
421} 436}
437EXPORT_SYMBOL_GPL(vhost_dev_stop);
422 438
423/* Caller should have device mutex if and only if locked is set */ 439/* Caller should have device mutex if and only if locked is set */
424void vhost_dev_cleanup(struct vhost_dev *dev, bool locked) 440void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
@@ -459,6 +475,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
459 mmput(dev->mm); 475 mmput(dev->mm);
460 dev->mm = NULL; 476 dev->mm = NULL;
461} 477}
478EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
462 479
463static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) 480static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
464{ 481{
@@ -544,6 +561,7 @@ int vhost_log_access_ok(struct vhost_dev *dev)
544 lockdep_is_held(&dev->mutex)); 561 lockdep_is_held(&dev->mutex));
545 return memory_access_ok(dev, mp, 1); 562 return memory_access_ok(dev, mp, 1);
546} 563}
564EXPORT_SYMBOL_GPL(vhost_log_access_ok);
547 565
548/* Verify access for write logging. */ 566/* Verify access for write logging. */
549/* Caller should have vq mutex and device mutex */ 567/* Caller should have vq mutex and device mutex */
@@ -569,6 +587,7 @@ int vhost_vq_access_ok(struct vhost_virtqueue *vq)
569 return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) && 587 return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) &&
570 vq_log_access_ok(vq->dev, vq, vq->log_base); 588 vq_log_access_ok(vq->dev, vq, vq->log_base);
571} 589}
590EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
572 591
573static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) 592static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
574{ 593{
@@ -798,6 +817,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
798 vhost_poll_flush(&vq->poll); 817 vhost_poll_flush(&vq->poll);
799 return r; 818 return r;
800} 819}
820EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
801 821
802/* Caller must have device mutex */ 822/* Caller must have device mutex */
803long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) 823long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
@@ -878,6 +898,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
878done: 898done:
879 return r; 899 return r;
880} 900}
901EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
881 902
882static const struct vhost_memory_region *find_region(struct vhost_memory *mem, 903static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
883 __u64 addr, __u32 len) 904 __u64 addr, __u32 len)
@@ -969,6 +990,7 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
969 BUG(); 990 BUG();
970 return 0; 991 return 0;
971} 992}
993EXPORT_SYMBOL_GPL(vhost_log_write);
972 994
973static int vhost_update_used_flags(struct vhost_virtqueue *vq) 995static int vhost_update_used_flags(struct vhost_virtqueue *vq)
974{ 996{
@@ -1020,6 +1042,7 @@ int vhost_init_used(struct vhost_virtqueue *vq)
1020 vq->signalled_used_valid = false; 1042 vq->signalled_used_valid = false;
1021 return get_user(vq->last_used_idx, &vq->used->idx); 1043 return get_user(vq->last_used_idx, &vq->used->idx);
1022} 1044}
1045EXPORT_SYMBOL_GPL(vhost_init_used);
1023 1046
1024static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len, 1047static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
1025 struct iovec iov[], int iov_size) 1048 struct iovec iov[], int iov_size)
@@ -1296,12 +1319,14 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1296 BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY)); 1319 BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
1297 return head; 1320 return head;
1298} 1321}
1322EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
1299 1323
1300/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */ 1324/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
1301void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n) 1325void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
1302{ 1326{
1303 vq->last_avail_idx -= n; 1327 vq->last_avail_idx -= n;
1304} 1328}
1329EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
1305 1330
1306/* After we've used one of their buffers, we tell them about it. We'll then 1331/* After we've used one of their buffers, we tell them about it. We'll then
1307 * want to notify the guest, using eventfd. */ 1332 * want to notify the guest, using eventfd. */
@@ -1350,6 +1375,7 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
1350 vq->signalled_used_valid = false; 1375 vq->signalled_used_valid = false;
1351 return 0; 1376 return 0;
1352} 1377}
1378EXPORT_SYMBOL_GPL(vhost_add_used);
1353 1379
1354static int __vhost_add_used_n(struct vhost_virtqueue *vq, 1380static int __vhost_add_used_n(struct vhost_virtqueue *vq,
1355 struct vring_used_elem *heads, 1381 struct vring_used_elem *heads,
@@ -1419,6 +1445,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
1419 } 1445 }
1420 return r; 1446 return r;
1421} 1447}
1448EXPORT_SYMBOL_GPL(vhost_add_used_n);
1422 1449
1423static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) 1450static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1424{ 1451{
@@ -1463,6 +1490,7 @@ void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1463 if (vq->call_ctx && vhost_notify(dev, vq)) 1490 if (vq->call_ctx && vhost_notify(dev, vq))
1464 eventfd_signal(vq->call_ctx, 1); 1491 eventfd_signal(vq->call_ctx, 1);
1465} 1492}
1493EXPORT_SYMBOL_GPL(vhost_signal);
1466 1494
1467/* And here's the combo meal deal. Supersize me! */ 1495/* And here's the combo meal deal. Supersize me! */
1468void vhost_add_used_and_signal(struct vhost_dev *dev, 1496void vhost_add_used_and_signal(struct vhost_dev *dev,
@@ -1472,6 +1500,7 @@ void vhost_add_used_and_signal(struct vhost_dev *dev,
1472 vhost_add_used(vq, head, len); 1500 vhost_add_used(vq, head, len);
1473 vhost_signal(dev, vq); 1501 vhost_signal(dev, vq);
1474} 1502}
1503EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
1475 1504
1476/* multi-buffer version of vhost_add_used_and_signal */ 1505/* multi-buffer version of vhost_add_used_and_signal */
1477void vhost_add_used_and_signal_n(struct vhost_dev *dev, 1506void vhost_add_used_and_signal_n(struct vhost_dev *dev,
@@ -1481,6 +1510,7 @@ void vhost_add_used_and_signal_n(struct vhost_dev *dev,
1481 vhost_add_used_n(vq, heads, count); 1510 vhost_add_used_n(vq, heads, count);
1482 vhost_signal(dev, vq); 1511 vhost_signal(dev, vq);
1483} 1512}
1513EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
1484 1514
1485/* OK, now we need to know about added descriptors. */ 1515/* OK, now we need to know about added descriptors. */
1486bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) 1516bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
@@ -1518,6 +1548,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1518 1548
1519 return avail_idx != vq->avail_idx; 1549 return avail_idx != vq->avail_idx;
1520} 1550}
1551EXPORT_SYMBOL_GPL(vhost_enable_notify);
1521 1552
1522/* We don't need to be notified again. */ 1553/* We don't need to be notified again. */
1523void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) 1554void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
@@ -1534,3 +1565,21 @@ void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1534 &vq->used->flags, r); 1565 &vq->used->flags, r);
1535 } 1566 }
1536} 1567}
1568EXPORT_SYMBOL_GPL(vhost_disable_notify);
1569
1570static int __init vhost_init(void)
1571{
1572 return 0;
1573}
1574
1575static void __exit vhost_exit(void)
1576{
1577}
1578
1579module_init(vhost_init);
1580module_exit(vhost_exit);
1581
1582MODULE_VERSION("0.0.1");
1583MODULE_LICENSE("GPL v2");
1584MODULE_AUTHOR("Michael S. Tsirkin");
1585MODULE_DESCRIPTION("Host kernel accelerator for virtio");
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 64adcf99ff33..42298cd23c73 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -46,6 +46,8 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file);
46void vhost_poll_stop(struct vhost_poll *poll); 46void vhost_poll_stop(struct vhost_poll *poll);
47void vhost_poll_flush(struct vhost_poll *poll); 47void vhost_poll_flush(struct vhost_poll *poll);
48void vhost_poll_queue(struct vhost_poll *poll); 48void vhost_poll_queue(struct vhost_poll *poll);
49void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work);
50long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp);
49 51
50struct vhost_log { 52struct vhost_log {
51 u64 addr; 53 u64 addr;