aboutsummaryrefslogtreecommitdiffstats
path: root/tools/virtio
diff options
context:
space:
mode:
Diffstat (limited to 'tools/virtio')
-rw-r--r--tools/virtio/Makefile10
-rw-r--r--tools/virtio/asm/barrier.h14
-rw-r--r--tools/virtio/linux/bug.h10
-rw-r--r--tools/virtio/linux/err.h26
-rw-r--r--tools/virtio/linux/export.h5
-rw-r--r--tools/virtio/linux/irqreturn.h1
-rw-r--r--tools/virtio/linux/kernel.h112
-rw-r--r--tools/virtio/linux/module.h1
-rw-r--r--tools/virtio/linux/printk.h4
-rw-r--r--tools/virtio/linux/ratelimit.h4
-rw-r--r--tools/virtio/linux/scatterlist.h189
-rw-r--r--tools/virtio/linux/types.h28
-rw-r--r--tools/virtio/linux/uaccess.h50
-rw-r--r--tools/virtio/linux/uio.h3
-rw-r--r--tools/virtio/linux/virtio.h171
-rw-r--r--tools/virtio/linux/virtio_config.h6
-rw-r--r--tools/virtio/linux/virtio_ring.h1
-rw-r--r--tools/virtio/linux/vringh.h1
-rw-r--r--tools/virtio/uapi/linux/uio.h1
-rw-r--r--tools/virtio/uapi/linux/virtio_config.h1
-rw-r--r--tools/virtio/uapi/linux/virtio_ring.h4
-rw-r--r--tools/virtio/virtio_test.c13
-rw-r--r--tools/virtio/vringh_test.c741
23 files changed, 1238 insertions, 158 deletions
diff --git a/tools/virtio/Makefile b/tools/virtio/Makefile
index d1d442ed106a..3187c62d9814 100644
--- a/tools/virtio/Makefile
+++ b/tools/virtio/Makefile
@@ -1,12 +1,14 @@
1all: test mod 1all: test mod
2test: virtio_test 2test: virtio_test vringh_test
3virtio_test: virtio_ring.o virtio_test.o 3virtio_test: virtio_ring.o virtio_test.o
4CFLAGS += -g -O2 -Wall -I. -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -MMD 4vringh_test: vringh_test.o vringh.o virtio_ring.o
5vpath %.c ../../drivers/virtio 5
6CFLAGS += -g -O2 -Wall -I. -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE
7vpath %.c ../../drivers/virtio ../../drivers/vhost
6mod: 8mod:
7 ${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test 9 ${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test
8.PHONY: all test mod clean 10.PHONY: all test mod clean
9clean: 11clean:
10 ${RM} *.o vhost_test/*.o vhost_test/.*.cmd \ 12 ${RM} *.o vringh_test virtio_test vhost_test/*.o vhost_test/.*.cmd \
11 vhost_test/Module.symvers vhost_test/modules.order *.d 13 vhost_test/Module.symvers vhost_test/modules.order *.d
12-include *.d 14-include *.d
diff --git a/tools/virtio/asm/barrier.h b/tools/virtio/asm/barrier.h
new file mode 100644
index 000000000000..aff61e13306c
--- /dev/null
+++ b/tools/virtio/asm/barrier.h
@@ -0,0 +1,14 @@
1#if defined(__i386__) || defined(__x86_64__)
2#define barrier() asm volatile("" ::: "memory")
3#define mb() __sync_synchronize()
4
5#define smp_mb() mb()
6# define smp_rmb() barrier()
7# define smp_wmb() barrier()
8/* Weak barriers should be used. If not - it's a bug */
9# define rmb() abort()
10# define wmb() abort()
11#else
12#error Please fill in barrier macros
13#endif
14
diff --git a/tools/virtio/linux/bug.h b/tools/virtio/linux/bug.h
new file mode 100644
index 000000000000..fb94f0787c47
--- /dev/null
+++ b/tools/virtio/linux/bug.h
@@ -0,0 +1,10 @@
1#ifndef BUG_H
2#define BUG_H
3
4#define BUG_ON(__BUG_ON_cond) assert(!(__BUG_ON_cond))
5
6#define BUILD_BUG_ON(x)
7
8#define BUG() abort()
9
10#endif /* BUG_H */
diff --git a/tools/virtio/linux/err.h b/tools/virtio/linux/err.h
new file mode 100644
index 000000000000..e32eff8b2a14
--- /dev/null
+++ b/tools/virtio/linux/err.h
@@ -0,0 +1,26 @@
1#ifndef ERR_H
2#define ERR_H
3#define MAX_ERRNO 4095
4
5#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
6
7static inline void * __must_check ERR_PTR(long error)
8{
9 return (void *) error;
10}
11
12static inline long __must_check PTR_ERR(const void *ptr)
13{
14 return (long) ptr;
15}
16
17static inline long __must_check IS_ERR(const void *ptr)
18{
19 return IS_ERR_VALUE((unsigned long)ptr);
20}
21
22static inline long __must_check IS_ERR_OR_NULL(const void *ptr)
23{
24 return !ptr || IS_ERR_VALUE((unsigned long)ptr);
25}
26#endif /* ERR_H */
diff --git a/tools/virtio/linux/export.h b/tools/virtio/linux/export.h
new file mode 100644
index 000000000000..7311d326894a
--- /dev/null
+++ b/tools/virtio/linux/export.h
@@ -0,0 +1,5 @@
1#define EXPORT_SYMBOL(sym)
2#define EXPORT_SYMBOL_GPL(sym)
3#define EXPORT_SYMBOL_GPL_FUTURE(sym)
4#define EXPORT_UNUSED_SYMBOL(sym)
5#define EXPORT_UNUSED_SYMBOL_GPL(sym)
diff --git a/tools/virtio/linux/irqreturn.h b/tools/virtio/linux/irqreturn.h
new file mode 100644
index 000000000000..a3c4e7be7089
--- /dev/null
+++ b/tools/virtio/linux/irqreturn.h
@@ -0,0 +1 @@
#include "../../../include/linux/irqreturn.h"
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
new file mode 100644
index 000000000000..fba705963968
--- /dev/null
+++ b/tools/virtio/linux/kernel.h
@@ -0,0 +1,112 @@
1#ifndef KERNEL_H
2#define KERNEL_H
3#include <stdbool.h>
4#include <stdlib.h>
5#include <stddef.h>
6#include <stdio.h>
7#include <string.h>
8#include <assert.h>
9#include <stdarg.h>
10
11#include <linux/types.h>
12#include <linux/printk.h>
13#include <linux/bug.h>
14#include <errno.h>
15#include <unistd.h>
16#include <asm/barrier.h>
17
18#define CONFIG_SMP
19
20#define PAGE_SIZE getpagesize()
21#define PAGE_MASK (~(PAGE_SIZE-1))
22
23typedef unsigned long long dma_addr_t;
24typedef size_t __kernel_size_t;
25
26struct page {
27 unsigned long long dummy;
28};
29
30/* Physical == Virtual */
31#define virt_to_phys(p) ((unsigned long)p)
32#define phys_to_virt(a) ((void *)(unsigned long)(a))
33/* Page address: Virtual / 4K */
34#define page_to_phys(p) ((dma_addr_t)(unsigned long)(p))
35#define virt_to_page(p) ((struct page *)((unsigned long)p & PAGE_MASK))
36
37#define offset_in_page(p) (((unsigned long)p) % PAGE_SIZE)
38
39#define __printf(a,b) __attribute__((format(printf,a,b)))
40
41typedef enum {
42 GFP_KERNEL,
43 GFP_ATOMIC,
44 __GFP_HIGHMEM,
45 __GFP_HIGH
46} gfp_t;
47
48#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
49
50extern void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end;
51static inline void *kmalloc(size_t s, gfp_t gfp)
52{
53 if (__kmalloc_fake)
54 return __kmalloc_fake;
55 return malloc(s);
56}
57
58static inline void kfree(void *p)
59{
60 if (p >= __kfree_ignore_start && p < __kfree_ignore_end)
61 return;
62 free(p);
63}
64
65static inline void *krealloc(void *p, size_t s, gfp_t gfp)
66{
67 return realloc(p, s);
68}
69
70
71static inline unsigned long __get_free_page(gfp_t gfp)
72{
73 void *p;
74
75 posix_memalign(&p, PAGE_SIZE, PAGE_SIZE);
76 return (unsigned long)p;
77}
78
79static inline void free_page(unsigned long addr)
80{
81 free((void *)addr);
82}
83
84#define container_of(ptr, type, member) ({ \
85 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
86 (type *)( (char *)__mptr - offsetof(type,member) );})
87
88#define uninitialized_var(x) x = x
89
90# ifndef likely
91# define likely(x) (__builtin_expect(!!(x), 1))
92# endif
93# ifndef unlikely
94# define unlikely(x) (__builtin_expect(!!(x), 0))
95# endif
96
97#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__)
98#ifdef DEBUG
99#define pr_debug(format, ...) fprintf (stderr, format, ## __VA_ARGS__)
100#else
101#define pr_debug(format, ...) do {} while (0)
102#endif
103#define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
104#define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
105
106#define min(x, y) ({ \
107 typeof(x) _min1 = (x); \
108 typeof(y) _min2 = (y); \
109 (void) (&_min1 == &_min2); \
110 _min1 < _min2 ? _min1 : _min2; })
111
112#endif /* KERNEL_H */
diff --git a/tools/virtio/linux/module.h b/tools/virtio/linux/module.h
index e69de29bb2d1..3039a7e972b6 100644
--- a/tools/virtio/linux/module.h
+++ b/tools/virtio/linux/module.h
@@ -0,0 +1 @@
#include <linux/export.h>
diff --git a/tools/virtio/linux/printk.h b/tools/virtio/linux/printk.h
new file mode 100644
index 000000000000..9f2423bd89c2
--- /dev/null
+++ b/tools/virtio/linux/printk.h
@@ -0,0 +1,4 @@
1#include "../../../include/linux/kern_levels.h"
2
3#define printk printf
4#define vprintk vprintf
diff --git a/tools/virtio/linux/ratelimit.h b/tools/virtio/linux/ratelimit.h
new file mode 100644
index 000000000000..dcce1725f90d
--- /dev/null
+++ b/tools/virtio/linux/ratelimit.h
@@ -0,0 +1,4 @@
1#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) int name = 0
2
3#define __ratelimit(x) (*(x))
4
diff --git a/tools/virtio/linux/scatterlist.h b/tools/virtio/linux/scatterlist.h
new file mode 100644
index 000000000000..68c9e2adc996
--- /dev/null
+++ b/tools/virtio/linux/scatterlist.h
@@ -0,0 +1,189 @@
1#ifndef SCATTERLIST_H
2#define SCATTERLIST_H
3#include <linux/kernel.h>
4
5struct scatterlist {
6 unsigned long page_link;
7 unsigned int offset;
8 unsigned int length;
9 dma_addr_t dma_address;
10};
11
12/* Scatterlist helpers, stolen from linux/scatterlist.h */
13#define sg_is_chain(sg) ((sg)->page_link & 0x01)
14#define sg_is_last(sg) ((sg)->page_link & 0x02)
15#define sg_chain_ptr(sg) \
16 ((struct scatterlist *) ((sg)->page_link & ~0x03))
17
18/**
19 * sg_assign_page - Assign a given page to an SG entry
20 * @sg: SG entry
21 * @page: The page
22 *
23 * Description:
24 * Assign page to sg entry. Also see sg_set_page(), the most commonly used
25 * variant.
26 *
27 **/
28static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
29{
30 unsigned long page_link = sg->page_link & 0x3;
31
32 /*
33 * In order for the low bit stealing approach to work, pages
34 * must be aligned at a 32-bit boundary as a minimum.
35 */
36 BUG_ON((unsigned long) page & 0x03);
37#ifdef CONFIG_DEBUG_SG
38 BUG_ON(sg->sg_magic != SG_MAGIC);
39 BUG_ON(sg_is_chain(sg));
40#endif
41 sg->page_link = page_link | (unsigned long) page;
42}
43
44/**
45 * sg_set_page - Set sg entry to point at given page
46 * @sg: SG entry
47 * @page: The page
48 * @len: Length of data
49 * @offset: Offset into page
50 *
51 * Description:
52 * Use this function to set an sg entry pointing at a page, never assign
53 * the page directly. We encode sg table information in the lower bits
54 * of the page pointer. See sg_page() for looking up the page belonging
55 * to an sg entry.
56 *
57 **/
58static inline void sg_set_page(struct scatterlist *sg, struct page *page,
59 unsigned int len, unsigned int offset)
60{
61 sg_assign_page(sg, page);
62 sg->offset = offset;
63 sg->length = len;
64}
65
66static inline struct page *sg_page(struct scatterlist *sg)
67{
68#ifdef CONFIG_DEBUG_SG
69 BUG_ON(sg->sg_magic != SG_MAGIC);
70 BUG_ON(sg_is_chain(sg));
71#endif
72 return (struct page *)((sg)->page_link & ~0x3);
73}
74
75/*
76 * Loop over each sg element, following the pointer to a new list if necessary
77 */
78#define for_each_sg(sglist, sg, nr, __i) \
79 for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
80
81/**
82 * sg_chain - Chain two sglists together
83 * @prv: First scatterlist
84 * @prv_nents: Number of entries in prv
85 * @sgl: Second scatterlist
86 *
87 * Description:
88 * Links @prv@ and @sgl@ together, to form a longer scatterlist.
89 *
90 **/
91static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
92 struct scatterlist *sgl)
93{
94 /*
95 * offset and length are unused for chain entry. Clear them.
96 */
97 prv[prv_nents - 1].offset = 0;
98 prv[prv_nents - 1].length = 0;
99
100 /*
101 * Set lowest bit to indicate a link pointer, and make sure to clear
102 * the termination bit if it happens to be set.
103 */
104 prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02;
105}
106
107/**
108 * sg_mark_end - Mark the end of the scatterlist
109 * @sg: SG entryScatterlist
110 *
111 * Description:
112 * Marks the passed in sg entry as the termination point for the sg
113 * table. A call to sg_next() on this entry will return NULL.
114 *
115 **/
116static inline void sg_mark_end(struct scatterlist *sg)
117{
118#ifdef CONFIG_DEBUG_SG
119 BUG_ON(sg->sg_magic != SG_MAGIC);
120#endif
121 /*
122 * Set termination bit, clear potential chain bit
123 */
124 sg->page_link |= 0x02;
125 sg->page_link &= ~0x01;
126}
127
128/**
129 * sg_unmark_end - Undo setting the end of the scatterlist
130 * @sg: SG entryScatterlist
131 *
132 * Description:
133 * Removes the termination marker from the given entry of the scatterlist.
134 *
135 **/
136static inline void sg_unmark_end(struct scatterlist *sg)
137{
138#ifdef CONFIG_DEBUG_SG
139 BUG_ON(sg->sg_magic != SG_MAGIC);
140#endif
141 sg->page_link &= ~0x02;
142}
143
144static inline struct scatterlist *sg_next(struct scatterlist *sg)
145{
146#ifdef CONFIG_DEBUG_SG
147 BUG_ON(sg->sg_magic != SG_MAGIC);
148#endif
149 if (sg_is_last(sg))
150 return NULL;
151
152 sg++;
153 if (unlikely(sg_is_chain(sg)))
154 sg = sg_chain_ptr(sg);
155
156 return sg;
157}
158
159static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents)
160{
161 memset(sgl, 0, sizeof(*sgl) * nents);
162#ifdef CONFIG_DEBUG_SG
163 {
164 unsigned int i;
165 for (i = 0; i < nents; i++)
166 sgl[i].sg_magic = SG_MAGIC;
167 }
168#endif
169 sg_mark_end(&sgl[nents - 1]);
170}
171
172static inline dma_addr_t sg_phys(struct scatterlist *sg)
173{
174 return page_to_phys(sg_page(sg)) + sg->offset;
175}
176
177static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
178 unsigned int buflen)
179{
180 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
181}
182
183static inline void sg_init_one(struct scatterlist *sg,
184 const void *buf, unsigned int buflen)
185{
186 sg_init_table(sg, 1);
187 sg_set_buf(sg, buf, buflen);
188}
189#endif /* SCATTERLIST_H */
diff --git a/tools/virtio/linux/types.h b/tools/virtio/linux/types.h
new file mode 100644
index 000000000000..f8ebb9a2b3d6
--- /dev/null
+++ b/tools/virtio/linux/types.h
@@ -0,0 +1,28 @@
1#ifndef TYPES_H
2#define TYPES_H
3#include <stdint.h>
4
5#define __force
6#define __user
7#define __must_check
8#define __cold
9
10typedef uint64_t u64;
11typedef int64_t s64;
12typedef uint32_t u32;
13typedef int32_t s32;
14typedef uint16_t u16;
15typedef int16_t s16;
16typedef uint8_t u8;
17typedef int8_t s8;
18
19typedef uint64_t __u64;
20typedef int64_t __s64;
21typedef uint32_t __u32;
22typedef int32_t __s32;
23typedef uint16_t __u16;
24typedef int16_t __s16;
25typedef uint8_t __u8;
26typedef int8_t __s8;
27
28#endif /* TYPES_H */
diff --git a/tools/virtio/linux/uaccess.h b/tools/virtio/linux/uaccess.h
new file mode 100644
index 000000000000..0a578fe18653
--- /dev/null
+++ b/tools/virtio/linux/uaccess.h
@@ -0,0 +1,50 @@
1#ifndef UACCESS_H
2#define UACCESS_H
3extern void *__user_addr_min, *__user_addr_max;
4
5#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
6
7static inline void __chk_user_ptr(const volatile void *p, size_t size)
8{
9 assert(p >= __user_addr_min && p + size <= __user_addr_max);
10}
11
12#define put_user(x, ptr) \
13({ \
14 typeof(ptr) __pu_ptr = (ptr); \
15 __chk_user_ptr(__pu_ptr, sizeof(*__pu_ptr)); \
16 ACCESS_ONCE(*(__pu_ptr)) = x; \
17 0; \
18})
19
20#define get_user(x, ptr) \
21({ \
22 typeof(ptr) __pu_ptr = (ptr); \
23 __chk_user_ptr(__pu_ptr, sizeof(*__pu_ptr)); \
24 x = ACCESS_ONCE(*(__pu_ptr)); \
25 0; \
26})
27
28static void volatile_memcpy(volatile char *to, const volatile char *from,
29 unsigned long n)
30{
31 while (n--)
32 *(to++) = *(from++);
33}
34
35static inline int copy_from_user(void *to, const void __user volatile *from,
36 unsigned long n)
37{
38 __chk_user_ptr(from, n);
39 volatile_memcpy(to, from, n);
40 return 0;
41}
42
43static inline int copy_to_user(void __user volatile *to, const void *from,
44 unsigned long n)
45{
46 __chk_user_ptr(to, n);
47 volatile_memcpy(to, from, n);
48 return 0;
49}
50#endif /* UACCESS_H */
diff --git a/tools/virtio/linux/uio.h b/tools/virtio/linux/uio.h
new file mode 100644
index 000000000000..cd20f0ba3081
--- /dev/null
+++ b/tools/virtio/linux/uio.h
@@ -0,0 +1,3 @@
1#include <linux/kernel.h>
2
3#include "../../../include/linux/uio.h"
diff --git a/tools/virtio/linux/virtio.h b/tools/virtio/linux/virtio.h
index 81847dd08bd0..cd801838156f 100644
--- a/tools/virtio/linux/virtio.h
+++ b/tools/virtio/linux/virtio.h
@@ -1,127 +1,7 @@
1#ifndef LINUX_VIRTIO_H 1#ifndef LINUX_VIRTIO_H
2#define LINUX_VIRTIO_H 2#define LINUX_VIRTIO_H
3 3#include <linux/scatterlist.h>
4#include <stdbool.h> 4#include <linux/kernel.h>
5#include <stdlib.h>
6#include <stddef.h>
7#include <stdio.h>
8#include <string.h>
9#include <assert.h>
10
11#include <linux/types.h>
12#include <errno.h>
13
14typedef unsigned long long dma_addr_t;
15
16struct scatterlist {
17 unsigned long page_link;
18 unsigned int offset;
19 unsigned int length;
20 dma_addr_t dma_address;
21};
22
23struct page {
24 unsigned long long dummy;
25};
26
27#define BUG_ON(__BUG_ON_cond) assert(!(__BUG_ON_cond))
28
29/* Physical == Virtual */
30#define virt_to_phys(p) ((unsigned long)p)
31#define phys_to_virt(a) ((void *)(unsigned long)(a))
32/* Page address: Virtual / 4K */
33#define virt_to_page(p) ((struct page*)((virt_to_phys(p) / 4096) * \
34 sizeof(struct page)))
35#define offset_in_page(p) (((unsigned long)p) % 4096)
36#define sg_phys(sg) ((sg->page_link & ~0x3) / sizeof(struct page) * 4096 + \
37 sg->offset)
38static inline void sg_mark_end(struct scatterlist *sg)
39{
40 /*
41 * Set termination bit, clear potential chain bit
42 */
43 sg->page_link |= 0x02;
44 sg->page_link &= ~0x01;
45}
46static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents)
47{
48 memset(sgl, 0, sizeof(*sgl) * nents);
49 sg_mark_end(&sgl[nents - 1]);
50}
51static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
52{
53 unsigned long page_link = sg->page_link & 0x3;
54
55 /*
56 * In order for the low bit stealing approach to work, pages
57 * must be aligned at a 32-bit boundary as a minimum.
58 */
59 BUG_ON((unsigned long) page & 0x03);
60 sg->page_link = page_link | (unsigned long) page;
61}
62
63static inline void sg_set_page(struct scatterlist *sg, struct page *page,
64 unsigned int len, unsigned int offset)
65{
66 sg_assign_page(sg, page);
67 sg->offset = offset;
68 sg->length = len;
69}
70
71static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
72 unsigned int buflen)
73{
74 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
75}
76
77static inline void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
78{
79 sg_init_table(sg, 1);
80 sg_set_buf(sg, buf, buflen);
81}
82
83typedef __u16 u16;
84
85typedef enum {
86 GFP_KERNEL,
87 GFP_ATOMIC,
88} gfp_t;
89typedef enum {
90 IRQ_NONE,
91 IRQ_HANDLED
92} irqreturn_t;
93
94static inline void *kmalloc(size_t s, gfp_t gfp)
95{
96 return malloc(s);
97}
98
99static inline void kfree(void *p)
100{
101 free(p);
102}
103
104#define container_of(ptr, type, member) ({ \
105 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
106 (type *)( (char *)__mptr - offsetof(type,member) );})
107
108#define uninitialized_var(x) x = x
109
110# ifndef likely
111# define likely(x) (__builtin_expect(!!(x), 1))
112# endif
113# ifndef unlikely
114# define unlikely(x) (__builtin_expect(!!(x), 0))
115# endif
116
117#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__)
118#ifdef DEBUG
119#define pr_debug(format, ...) fprintf (stderr, format, ## __VA_ARGS__)
120#else
121#define pr_debug(format, ...) do {} while (0)
122#endif
123#define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
124#define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
125 5
126/* TODO: empty stubs for now. Broken but enough for virtio_ring.c */ 6/* TODO: empty stubs for now. Broken but enough for virtio_ring.c */
127#define list_add_tail(a, b) do {} while (0) 7#define list_add_tail(a, b) do {} while (0)
@@ -131,6 +11,7 @@ static inline void kfree(void *p)
131#define BITS_PER_BYTE 8 11#define BITS_PER_BYTE 8
132#define BITS_PER_LONG (sizeof(long) * BITS_PER_BYTE) 12#define BITS_PER_LONG (sizeof(long) * BITS_PER_BYTE)
133#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) 13#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
14
134/* TODO: Not atomic as it should be: 15/* TODO: Not atomic as it should be:
135 * we don't use this for anything important. */ 16 * we don't use this for anything important. */
136static inline void clear_bit(int nr, volatile unsigned long *addr) 17static inline void clear_bit(int nr, volatile unsigned long *addr)
@@ -145,10 +26,6 @@ static inline int test_bit(int nr, const volatile unsigned long *addr)
145{ 26{
146 return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); 27 return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
147} 28}
148
149/* The only feature we care to support */
150#define virtio_has_feature(dev, feature) \
151 test_bit((feature), (dev)->features)
152/* end of stubs */ 29/* end of stubs */
153 30
154struct virtio_device { 31struct virtio_device {
@@ -163,39 +40,32 @@ struct virtqueue {
163 void (*callback)(struct virtqueue *vq); 40 void (*callback)(struct virtqueue *vq);
164 const char *name; 41 const char *name;
165 struct virtio_device *vdev; 42 struct virtio_device *vdev;
43 unsigned int index;
44 unsigned int num_free;
166 void *priv; 45 void *priv;
167}; 46};
168 47
169#define EXPORT_SYMBOL_GPL(__EXPORT_SYMBOL_GPL_name) \
170 void __EXPORT_SYMBOL_GPL##__EXPORT_SYMBOL_GPL_name() { \
171}
172#define MODULE_LICENSE(__MODULE_LICENSE_value) \ 48#define MODULE_LICENSE(__MODULE_LICENSE_value) \
173 const char *__MODULE_LICENSE_name = __MODULE_LICENSE_value 49 const char *__MODULE_LICENSE_name = __MODULE_LICENSE_value
174 50
175#define CONFIG_SMP
176
177#if defined(__i386__) || defined(__x86_64__)
178#define barrier() asm volatile("" ::: "memory")
179#define mb() __sync_synchronize()
180
181#define smp_mb() mb()
182# define smp_rmb() barrier()
183# define smp_wmb() barrier()
184/* Weak barriers should be used. If not - it's a bug */
185# define rmb() abort()
186# define wmb() abort()
187#else
188#error Please fill in barrier macros
189#endif
190
191/* Interfaces exported by virtio_ring. */ 51/* Interfaces exported by virtio_ring. */
192int virtqueue_add_buf(struct virtqueue *vq, 52int virtqueue_add_sgs(struct virtqueue *vq,
193 struct scatterlist sg[], 53 struct scatterlist *sgs[],
194 unsigned int out_num, 54 unsigned int out_sgs,
195 unsigned int in_num, 55 unsigned int in_sgs,
196 void *data, 56 void *data,
197 gfp_t gfp); 57 gfp_t gfp);
198 58
59int virtqueue_add_outbuf(struct virtqueue *vq,
60 struct scatterlist sg[], unsigned int num,
61 void *data,
62 gfp_t gfp);
63
64int virtqueue_add_inbuf(struct virtqueue *vq,
65 struct scatterlist sg[], unsigned int num,
66 void *data,
67 gfp_t gfp);
68
199void virtqueue_kick(struct virtqueue *vq); 69void virtqueue_kick(struct virtqueue *vq);
200 70
201void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len); 71void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
@@ -206,7 +76,8 @@ bool virtqueue_enable_cb(struct virtqueue *vq);
206bool virtqueue_enable_cb_delayed(struct virtqueue *vq); 76bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
207 77
208void *virtqueue_detach_unused_buf(struct virtqueue *vq); 78void *virtqueue_detach_unused_buf(struct virtqueue *vq);
209struct virtqueue *vring_new_virtqueue(unsigned int num, 79struct virtqueue *vring_new_virtqueue(unsigned int index,
80 unsigned int num,
210 unsigned int vring_align, 81 unsigned int vring_align,
211 struct virtio_device *vdev, 82 struct virtio_device *vdev,
212 bool weak_barriers, 83 bool weak_barriers,
diff --git a/tools/virtio/linux/virtio_config.h b/tools/virtio/linux/virtio_config.h
new file mode 100644
index 000000000000..5049967f99f7
--- /dev/null
+++ b/tools/virtio/linux/virtio_config.h
@@ -0,0 +1,6 @@
1#define VIRTIO_TRANSPORT_F_START 28
2#define VIRTIO_TRANSPORT_F_END 32
3
4#define virtio_has_feature(dev, feature) \
5 test_bit((feature), (dev)->features)
6
diff --git a/tools/virtio/linux/virtio_ring.h b/tools/virtio/linux/virtio_ring.h
new file mode 100644
index 000000000000..8949c4e2772c
--- /dev/null
+++ b/tools/virtio/linux/virtio_ring.h
@@ -0,0 +1 @@
#include "../../../include/linux/virtio_ring.h"
diff --git a/tools/virtio/linux/vringh.h b/tools/virtio/linux/vringh.h
new file mode 100644
index 000000000000..9348957be56e
--- /dev/null
+++ b/tools/virtio/linux/vringh.h
@@ -0,0 +1 @@
#include "../../../include/linux/vringh.h"
diff --git a/tools/virtio/uapi/linux/uio.h b/tools/virtio/uapi/linux/uio.h
new file mode 100644
index 000000000000..7230e9002207
--- /dev/null
+++ b/tools/virtio/uapi/linux/uio.h
@@ -0,0 +1 @@
#include <sys/uio.h>
diff --git a/tools/virtio/uapi/linux/virtio_config.h b/tools/virtio/uapi/linux/virtio_config.h
new file mode 100644
index 000000000000..4c86675f0159
--- /dev/null
+++ b/tools/virtio/uapi/linux/virtio_config.h
@@ -0,0 +1 @@
#include "../../../../include/uapi/linux/virtio_config.h"
diff --git a/tools/virtio/uapi/linux/virtio_ring.h b/tools/virtio/uapi/linux/virtio_ring.h
new file mode 100644
index 000000000000..4d99c78234d3
--- /dev/null
+++ b/tools/virtio/uapi/linux/virtio_ring.h
@@ -0,0 +1,4 @@
1#ifndef VIRTIO_RING_H
2#define VIRTIO_RING_H
3#include "../../../../include/uapi/linux/virtio_ring.h"
4#endif /* VIRTIO_RING_H */
diff --git a/tools/virtio/virtio_test.c b/tools/virtio/virtio_test.c
index fcc9aa25fd08..da7a19558281 100644
--- a/tools/virtio/virtio_test.c
+++ b/tools/virtio/virtio_test.c
@@ -10,11 +10,15 @@
10#include <sys/stat.h> 10#include <sys/stat.h>
11#include <sys/types.h> 11#include <sys/types.h>
12#include <fcntl.h> 12#include <fcntl.h>
13#include <stdbool.h>
13#include <linux/vhost.h> 14#include <linux/vhost.h>
14#include <linux/virtio.h> 15#include <linux/virtio.h>
15#include <linux/virtio_ring.h> 16#include <linux/virtio_ring.h>
16#include "../../drivers/vhost/test.h" 17#include "../../drivers/vhost/test.h"
17 18
19/* Unused */
20void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end;
21
18struct vq_info { 22struct vq_info {
19 int kick; 23 int kick;
20 int call; 24 int call;
@@ -92,7 +96,8 @@ static void vq_info_add(struct vdev_info *dev, int num)
92 assert(r >= 0); 96 assert(r >= 0);
93 memset(info->ring, 0, vring_size(num, 4096)); 97 memset(info->ring, 0, vring_size(num, 4096));
94 vring_init(&info->vring, num, info->ring, 4096); 98 vring_init(&info->vring, num, info->ring, 4096);
95 info->vq = vring_new_virtqueue(info->vring.num, 4096, &dev->vdev, 99 info->vq = vring_new_virtqueue(info->idx,
100 info->vring.num, 4096, &dev->vdev,
96 true, info->ring, 101 true, info->ring,
97 vq_notify, vq_callback, "test"); 102 vq_notify, vq_callback, "test");
98 assert(info->vq); 103 assert(info->vq);
@@ -161,9 +166,9 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
161 do { 166 do {
162 if (started < bufs) { 167 if (started < bufs) {
163 sg_init_one(&sl, dev->buf, dev->buf_size); 168 sg_init_one(&sl, dev->buf, dev->buf_size);
164 r = virtqueue_add_buf(vq->vq, &sl, 1, 0, 169 r = virtqueue_add_outbuf(vq->vq, &sl, 1,
165 dev->buf + started, 170 dev->buf + started,
166 GFP_ATOMIC); 171 GFP_ATOMIC);
167 if (likely(r == 0)) { 172 if (likely(r == 0)) {
168 ++started; 173 ++started;
169 virtqueue_kick(vq->vq); 174 virtqueue_kick(vq->vq);
diff --git a/tools/virtio/vringh_test.c b/tools/virtio/vringh_test.c
new file mode 100644
index 000000000000..d053ea40c001
--- /dev/null
+++ b/tools/virtio/vringh_test.c
@@ -0,0 +1,741 @@
1/* Simple test of virtio code, entirely in userpsace. */
2#define _GNU_SOURCE
3#include <sched.h>
4#include <err.h>
5#include <linux/kernel.h>
6#include <linux/err.h>
7#include <linux/virtio.h>
8#include <linux/vringh.h>
9#include <linux/virtio_ring.h>
10#include <linux/uaccess.h>
11#include <sys/types.h>
12#include <sys/stat.h>
13#include <sys/mman.h>
14#include <sys/wait.h>
15#include <fcntl.h>
16
17#define USER_MEM (1024*1024)
18void *__user_addr_min, *__user_addr_max;
19void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end;
20static u64 user_addr_offset;
21
22#define RINGSIZE 256
23#define ALIGN 4096
24
25static void never_notify_host(struct virtqueue *vq)
26{
27 abort();
28}
29
30static void never_callback_guest(struct virtqueue *vq)
31{
32 abort();
33}
34
35static bool getrange_iov(struct vringh *vrh, u64 addr, struct vringh_range *r)
36{
37 if (addr < (u64)(unsigned long)__user_addr_min - user_addr_offset)
38 return false;
39 if (addr >= (u64)(unsigned long)__user_addr_max - user_addr_offset)
40 return false;
41
42 r->start = (u64)(unsigned long)__user_addr_min - user_addr_offset;
43 r->end_incl = (u64)(unsigned long)__user_addr_max - 1 - user_addr_offset;
44 r->offset = user_addr_offset;
45 return true;
46}
47
48/* We return single byte ranges. */
49static bool getrange_slow(struct vringh *vrh, u64 addr, struct vringh_range *r)
50{
51 if (addr < (u64)(unsigned long)__user_addr_min - user_addr_offset)
52 return false;
53 if (addr >= (u64)(unsigned long)__user_addr_max - user_addr_offset)
54 return false;
55
56 r->start = addr;
57 r->end_incl = r->start;
58 r->offset = user_addr_offset;
59 return true;
60}
61
62struct guest_virtio_device {
63 struct virtio_device vdev;
64 int to_host_fd;
65 unsigned long notifies;
66};
67
68static void parallel_notify_host(struct virtqueue *vq)
69{
70 struct guest_virtio_device *gvdev;
71
72 gvdev = container_of(vq->vdev, struct guest_virtio_device, vdev);
73 write(gvdev->to_host_fd, "", 1);
74 gvdev->notifies++;
75}
76
77static void no_notify_host(struct virtqueue *vq)
78{
79}
80
81#define NUM_XFERS (10000000)
82
83/* We aim for two "distant" cpus. */
84static void find_cpus(unsigned int *first, unsigned int *last)
85{
86 unsigned int i;
87
88 *first = -1U;
89 *last = 0;
90 for (i = 0; i < 4096; i++) {
91 cpu_set_t set;
92 CPU_ZERO(&set);
93 CPU_SET(i, &set);
94 if (sched_setaffinity(getpid(), sizeof(set), &set) == 0) {
95 if (i < *first)
96 *first = i;
97 if (i > *last)
98 *last = i;
99 }
100 }
101}
102
103/* Opencoded version for fast mode */
104static inline int vringh_get_head(struct vringh *vrh, u16 *head)
105{
106 u16 avail_idx, i;
107 int err;
108
109 err = get_user(avail_idx, &vrh->vring.avail->idx);
110 if (err)
111 return err;
112
113 if (vrh->last_avail_idx == avail_idx)
114 return 0;
115
116 /* Only get avail ring entries after they have been exposed by guest. */
117 virtio_rmb(vrh->weak_barriers);
118
119 i = vrh->last_avail_idx & (vrh->vring.num - 1);
120
121 err = get_user(*head, &vrh->vring.avail->ring[i]);
122 if (err)
123 return err;
124
125 vrh->last_avail_idx++;
126 return 1;
127}
128
129static int parallel_test(unsigned long features,
130 bool (*getrange)(struct vringh *vrh,
131 u64 addr, struct vringh_range *r),
132 bool fast_vringh)
133{
134 void *host_map, *guest_map;
135 int fd, mapsize, to_guest[2], to_host[2];
136 unsigned long xfers = 0, notifies = 0, receives = 0;
137 unsigned int first_cpu, last_cpu;
138 cpu_set_t cpu_set;
139 char buf[128];
140
141 /* Create real file to mmap. */
142 fd = open("/tmp/vringh_test-file", O_RDWR|O_CREAT|O_TRUNC, 0600);
143 if (fd < 0)
144 err(1, "Opening /tmp/vringh_test-file");
145
146 /* Extra room at the end for some data, and indirects */
147 mapsize = vring_size(RINGSIZE, ALIGN)
148 + RINGSIZE * 2 * sizeof(int)
149 + RINGSIZE * 6 * sizeof(struct vring_desc);
150 mapsize = (mapsize + getpagesize() - 1) & ~(getpagesize() - 1);
151 ftruncate(fd, mapsize);
152
153 /* Parent and child use separate addresses, to check our mapping logic! */
154 host_map = mmap(NULL, mapsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
155 guest_map = mmap(NULL, mapsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
156
157 pipe(to_guest);
158 pipe(to_host);
159
160 CPU_ZERO(&cpu_set);
161 find_cpus(&first_cpu, &last_cpu);
162 printf("Using CPUS %u and %u\n", first_cpu, last_cpu);
163 fflush(stdout);
164
165 if (fork() != 0) {
166 struct vringh vrh;
167 int status, err, rlen = 0;
168 char rbuf[5];
169
170 /* We are the host: never access guest addresses! */
171 munmap(guest_map, mapsize);
172
173 __user_addr_min = host_map;
174 __user_addr_max = __user_addr_min + mapsize;
175 user_addr_offset = host_map - guest_map;
176 assert(user_addr_offset);
177
178 close(to_guest[0]);
179 close(to_host[1]);
180
181 vring_init(&vrh.vring, RINGSIZE, host_map, ALIGN);
182 vringh_init_user(&vrh, features, RINGSIZE, true,
183 vrh.vring.desc, vrh.vring.avail, vrh.vring.used);
184 CPU_SET(first_cpu, &cpu_set);
185 if (sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set))
186 errx(1, "Could not set affinity to cpu %u", first_cpu);
187
188 while (xfers < NUM_XFERS) {
189 struct iovec host_riov[2], host_wiov[2];
190 struct vringh_iov riov, wiov;
191 u16 head, written;
192
193 if (fast_vringh) {
194 for (;;) {
195 err = vringh_get_head(&vrh, &head);
196 if (err != 0)
197 break;
198 err = vringh_need_notify_user(&vrh);
199 if (err < 0)
200 errx(1, "vringh_need_notify_user: %i",
201 err);
202 if (err) {
203 write(to_guest[1], "", 1);
204 notifies++;
205 }
206 }
207 if (err != 1)
208 errx(1, "vringh_get_head");
209 written = 0;
210 goto complete;
211 } else {
212 vringh_iov_init(&riov,
213 host_riov,
214 ARRAY_SIZE(host_riov));
215 vringh_iov_init(&wiov,
216 host_wiov,
217 ARRAY_SIZE(host_wiov));
218
219 err = vringh_getdesc_user(&vrh, &riov, &wiov,
220 getrange, &head);
221 }
222 if (err == 0) {
223 err = vringh_need_notify_user(&vrh);
224 if (err < 0)
225 errx(1, "vringh_need_notify_user: %i",
226 err);
227 if (err) {
228 write(to_guest[1], "", 1);
229 notifies++;
230 }
231
232 if (!vringh_notify_enable_user(&vrh))
233 continue;
234
235 /* Swallow all notifies at once. */
236 if (read(to_host[0], buf, sizeof(buf)) < 1)
237 break;
238
239 vringh_notify_disable_user(&vrh);
240 receives++;
241 continue;
242 }
243 if (err != 1)
244 errx(1, "vringh_getdesc_user: %i", err);
245
246 /* We simply copy bytes. */
247 if (riov.used) {
248 rlen = vringh_iov_pull_user(&riov, rbuf,
249 sizeof(rbuf));
250 if (rlen != 4)
251 errx(1, "vringh_iov_pull_user: %i",
252 rlen);
253 assert(riov.i == riov.used);
254 written = 0;
255 } else {
256 err = vringh_iov_push_user(&wiov, rbuf, rlen);
257 if (err != rlen)
258 errx(1, "vringh_iov_push_user: %i",
259 err);
260 assert(wiov.i == wiov.used);
261 written = err;
262 }
263 complete:
264 xfers++;
265
266 err = vringh_complete_user(&vrh, head, written);
267 if (err != 0)
268 errx(1, "vringh_complete_user: %i", err);
269 }
270
271 err = vringh_need_notify_user(&vrh);
272 if (err < 0)
273 errx(1, "vringh_need_notify_user: %i", err);
274 if (err) {
275 write(to_guest[1], "", 1);
276 notifies++;
277 }
278 wait(&status);
279 if (!WIFEXITED(status))
280 errx(1, "Child died with signal %i?", WTERMSIG(status));
281 if (WEXITSTATUS(status) != 0)
282 errx(1, "Child exited %i?", WEXITSTATUS(status));
283 printf("Host: notified %lu, pinged %lu\n", notifies, receives);
284 return 0;
285 } else {
286 struct guest_virtio_device gvdev;
287 struct virtqueue *vq;
288 unsigned int *data;
289 struct vring_desc *indirects;
290 unsigned int finished = 0;
291
292 /* We pass sg[]s pointing into here, but we need RINGSIZE+1 */
293 data = guest_map + vring_size(RINGSIZE, ALIGN);
294 indirects = (void *)data + (RINGSIZE + 1) * 2 * sizeof(int);
295
296 /* We are the guest. */
297 munmap(host_map, mapsize);
298
299 close(to_guest[1]);
300 close(to_host[0]);
301
302 gvdev.vdev.features[0] = features;
303 gvdev.to_host_fd = to_host[1];
304 gvdev.notifies = 0;
305
306 CPU_SET(first_cpu, &cpu_set);
307 if (sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set))
308 err(1, "Could not set affinity to cpu %u", first_cpu);
309
310 vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &gvdev.vdev, true,
311 guest_map, fast_vringh ? no_notify_host
312 : parallel_notify_host,
313 never_callback_guest, "guest vq");
314
315 /* Don't kfree indirects. */
316 __kfree_ignore_start = indirects;
317 __kfree_ignore_end = indirects + RINGSIZE * 6;
318
319 while (xfers < NUM_XFERS) {
320 struct scatterlist sg[4];
321 unsigned int num_sg, len;
322 int *dbuf, err;
323 bool output = !(xfers % 2);
324
325 /* Consume bufs. */
326 while ((dbuf = virtqueue_get_buf(vq, &len)) != NULL) {
327 if (len == 4)
328 assert(*dbuf == finished - 1);
329 else if (!fast_vringh)
330 assert(*dbuf == finished);
331 finished++;
332 }
333
334 /* Produce a buffer. */
335 dbuf = data + (xfers % (RINGSIZE + 1));
336
337 if (output)
338 *dbuf = xfers;
339 else
340 *dbuf = -1;
341
342 switch ((xfers / sizeof(*dbuf)) % 4) {
343 case 0:
344 /* Nasty three-element sg list. */
345 sg_init_table(sg, num_sg = 3);
346 sg_set_buf(&sg[0], (void *)dbuf, 1);
347 sg_set_buf(&sg[1], (void *)dbuf + 1, 2);
348 sg_set_buf(&sg[2], (void *)dbuf + 3, 1);
349 break;
350 case 1:
351 sg_init_table(sg, num_sg = 2);
352 sg_set_buf(&sg[0], (void *)dbuf, 1);
353 sg_set_buf(&sg[1], (void *)dbuf + 1, 3);
354 break;
355 case 2:
356 sg_init_table(sg, num_sg = 1);
357 sg_set_buf(&sg[0], (void *)dbuf, 4);
358 break;
359 case 3:
360 sg_init_table(sg, num_sg = 4);
361 sg_set_buf(&sg[0], (void *)dbuf, 1);
362 sg_set_buf(&sg[1], (void *)dbuf + 1, 1);
363 sg_set_buf(&sg[2], (void *)dbuf + 2, 1);
364 sg_set_buf(&sg[3], (void *)dbuf + 3, 1);
365 break;
366 }
367
368 /* May allocate an indirect, so force it to allocate
369 * user addr */
370 __kmalloc_fake = indirects + (xfers % RINGSIZE) * 4;
371 if (output)
372 err = virtqueue_add_outbuf(vq, sg, num_sg, dbuf,
373 GFP_KERNEL);
374 else
375 err = virtqueue_add_inbuf(vq, sg, num_sg,
376 dbuf, GFP_KERNEL);
377
378 if (err == -ENOSPC) {
379 if (!virtqueue_enable_cb_delayed(vq))
380 continue;
381 /* Swallow all notifies at once. */
382 if (read(to_guest[0], buf, sizeof(buf)) < 1)
383 break;
384
385 receives++;
386 virtqueue_disable_cb(vq);
387 continue;
388 }
389
390 if (err)
391 errx(1, "virtqueue_add_in/outbuf: %i", err);
392
393 xfers++;
394 virtqueue_kick(vq);
395 }
396
397 /* Any extra? */
398 while (finished != xfers) {
399 int *dbuf;
400 unsigned int len;
401
402 /* Consume bufs. */
403 dbuf = virtqueue_get_buf(vq, &len);
404 if (dbuf) {
405 if (len == 4)
406 assert(*dbuf == finished - 1);
407 else
408 assert(len == 0);
409 finished++;
410 continue;
411 }
412
413 if (!virtqueue_enable_cb_delayed(vq))
414 continue;
415 if (read(to_guest[0], buf, sizeof(buf)) < 1)
416 break;
417
418 receives++;
419 virtqueue_disable_cb(vq);
420 }
421
422 printf("Guest: notified %lu, pinged %lu\n",
423 gvdev.notifies, receives);
424 vring_del_virtqueue(vq);
425 return 0;
426 }
427}
428
429int main(int argc, char *argv[])
430{
431 struct virtio_device vdev;
432 struct virtqueue *vq;
433 struct vringh vrh;
434 struct scatterlist guest_sg[RINGSIZE], *sgs[2];
435 struct iovec host_riov[2], host_wiov[2];
436 struct vringh_iov riov, wiov;
437 struct vring_used_elem used[RINGSIZE];
438 char buf[28];
439 u16 head;
440 int err;
441 unsigned i;
442 void *ret;
443 bool (*getrange)(struct vringh *vrh, u64 addr, struct vringh_range *r);
444 bool fast_vringh = false, parallel = false;
445
446 getrange = getrange_iov;
447 vdev.features[0] = 0;
448
449 while (argv[1]) {
450 if (strcmp(argv[1], "--indirect") == 0)
451 vdev.features[0] |= (1 << VIRTIO_RING_F_INDIRECT_DESC);
452 else if (strcmp(argv[1], "--eventidx") == 0)
453 vdev.features[0] |= (1 << VIRTIO_RING_F_EVENT_IDX);
454 else if (strcmp(argv[1], "--slow-range") == 0)
455 getrange = getrange_slow;
456 else if (strcmp(argv[1], "--fast-vringh") == 0)
457 fast_vringh = true;
458 else if (strcmp(argv[1], "--parallel") == 0)
459 parallel = true;
460 else
461 errx(1, "Unknown arg %s", argv[1]);
462 argv++;
463 }
464
465 if (parallel)
466 return parallel_test(vdev.features[0], getrange, fast_vringh);
467
468 if (posix_memalign(&__user_addr_min, PAGE_SIZE, USER_MEM) != 0)
469 abort();
470 __user_addr_max = __user_addr_min + USER_MEM;
471 memset(__user_addr_min, 0, vring_size(RINGSIZE, ALIGN));
472
473 /* Set up guest side. */
474 vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &vdev, true,
475 __user_addr_min,
476 never_notify_host, never_callback_guest,
477 "guest vq");
478
479 /* Set up host side. */
480 vring_init(&vrh.vring, RINGSIZE, __user_addr_min, ALIGN);
481 vringh_init_user(&vrh, vdev.features[0], RINGSIZE, true,
482 vrh.vring.desc, vrh.vring.avail, vrh.vring.used);
483
484 /* No descriptor to get yet... */
485 err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
486 if (err != 0)
487 errx(1, "vringh_getdesc_user: %i", err);
488
489 /* Guest puts in a descriptor. */
490 memcpy(__user_addr_max - 1, "a", 1);
491 sg_init_table(guest_sg, 1);
492 sg_set_buf(&guest_sg[0], __user_addr_max - 1, 1);
493 sg_init_table(guest_sg+1, 1);
494 sg_set_buf(&guest_sg[1], __user_addr_max - 3, 2);
495 sgs[0] = &guest_sg[0];
496 sgs[1] = &guest_sg[1];
497
498 /* May allocate an indirect, so force it to allocate user addr */
499 __kmalloc_fake = __user_addr_min + vring_size(RINGSIZE, ALIGN);
500 err = virtqueue_add_sgs(vq, sgs, 1, 1, &err, GFP_KERNEL);
501 if (err)
502 errx(1, "virtqueue_add_sgs: %i", err);
503 __kmalloc_fake = NULL;
504
505 /* Host retreives it. */
506 vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
507 vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
508
509 err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
510 if (err != 1)
511 errx(1, "vringh_getdesc_user: %i", err);
512
513 assert(riov.used == 1);
514 assert(riov.iov[0].iov_base == __user_addr_max - 1);
515 assert(riov.iov[0].iov_len == 1);
516 if (getrange != getrange_slow) {
517 assert(wiov.used == 1);
518 assert(wiov.iov[0].iov_base == __user_addr_max - 3);
519 assert(wiov.iov[0].iov_len == 2);
520 } else {
521 assert(wiov.used == 2);
522 assert(wiov.iov[0].iov_base == __user_addr_max - 3);
523 assert(wiov.iov[0].iov_len == 1);
524 assert(wiov.iov[1].iov_base == __user_addr_max - 2);
525 assert(wiov.iov[1].iov_len == 1);
526 }
527
528 err = vringh_iov_pull_user(&riov, buf, 5);
529 if (err != 1)
530 errx(1, "vringh_iov_pull_user: %i", err);
531 assert(buf[0] == 'a');
532 assert(riov.i == 1);
533 assert(vringh_iov_pull_user(&riov, buf, 5) == 0);
534
535 memcpy(buf, "bcdef", 5);
536 err = vringh_iov_push_user(&wiov, buf, 5);
537 if (err != 2)
538 errx(1, "vringh_iov_push_user: %i", err);
539 assert(memcmp(__user_addr_max - 3, "bc", 2) == 0);
540 assert(wiov.i == wiov.used);
541 assert(vringh_iov_push_user(&wiov, buf, 5) == 0);
542
543 /* Host is done. */
544 err = vringh_complete_user(&vrh, head, err);
545 if (err != 0)
546 errx(1, "vringh_complete_user: %i", err);
547
548 /* Guest should see used token now. */
549 __kfree_ignore_start = __user_addr_min + vring_size(RINGSIZE, ALIGN);
550 __kfree_ignore_end = __kfree_ignore_start + 1;
551 ret = virtqueue_get_buf(vq, &i);
552 if (ret != &err)
553 errx(1, "virtqueue_get_buf: %p", ret);
554 assert(i == 2);
555
556 /* Guest puts in a huge descriptor. */
557 sg_init_table(guest_sg, RINGSIZE);
558 for (i = 0; i < RINGSIZE; i++) {
559 sg_set_buf(&guest_sg[i],
560 __user_addr_max - USER_MEM/4, USER_MEM/4);
561 }
562
563 /* Fill contents with recognisable garbage. */
564 for (i = 0; i < USER_MEM/4; i++)
565 ((char *)__user_addr_max - USER_MEM/4)[i] = i;
566
567 /* This will allocate an indirect, so force it to allocate user addr */
568 __kmalloc_fake = __user_addr_min + vring_size(RINGSIZE, ALIGN);
569 err = virtqueue_add_outbuf(vq, guest_sg, RINGSIZE, &err, GFP_KERNEL);
570 if (err)
571 errx(1, "virtqueue_add_outbuf (large): %i", err);
572 __kmalloc_fake = NULL;
573
574 /* Host picks it up (allocates new iov). */
575 vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
576 vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
577
578 err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
579 if (err != 1)
580 errx(1, "vringh_getdesc_user: %i", err);
581
582 assert(riov.max_num & VRINGH_IOV_ALLOCATED);
583 assert(riov.iov != host_riov);
584 if (getrange != getrange_slow)
585 assert(riov.used == RINGSIZE);
586 else
587 assert(riov.used == RINGSIZE * USER_MEM/4);
588
589 assert(!(wiov.max_num & VRINGH_IOV_ALLOCATED));
590 assert(wiov.used == 0);
591
592 /* Pull data back out (in odd chunks), should be as expected. */
593 for (i = 0; i < RINGSIZE * USER_MEM/4; i += 3) {
594 err = vringh_iov_pull_user(&riov, buf, 3);
595 if (err != 3 && i + err != RINGSIZE * USER_MEM/4)
596 errx(1, "vringh_iov_pull_user large: %i", err);
597 assert(buf[0] == (char)i);
598 assert(err < 2 || buf[1] == (char)(i + 1));
599 assert(err < 3 || buf[2] == (char)(i + 2));
600 }
601 assert(riov.i == riov.used);
602 vringh_iov_cleanup(&riov);
603 vringh_iov_cleanup(&wiov);
604
605 /* Complete using multi interface, just because we can. */
606 used[0].id = head;
607 used[0].len = 0;
608 err = vringh_complete_multi_user(&vrh, used, 1);
609 if (err)
610 errx(1, "vringh_complete_multi_user(1): %i", err);
611
612 /* Free up those descriptors. */
613 ret = virtqueue_get_buf(vq, &i);
614 if (ret != &err)
615 errx(1, "virtqueue_get_buf: %p", ret);
616
617 /* Add lots of descriptors. */
618 sg_init_table(guest_sg, 1);
619 sg_set_buf(&guest_sg[0], __user_addr_max - 1, 1);
620 for (i = 0; i < RINGSIZE; i++) {
621 err = virtqueue_add_outbuf(vq, guest_sg, 1, &err, GFP_KERNEL);
622 if (err)
623 errx(1, "virtqueue_add_outbuf (multiple): %i", err);
624 }
625
626 /* Now get many, and consume them all at once. */
627 vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
628 vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
629
630 for (i = 0; i < RINGSIZE; i++) {
631 err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
632 if (err != 1)
633 errx(1, "vringh_getdesc_user: %i", err);
634 used[i].id = head;
635 used[i].len = 0;
636 }
637 /* Make sure it wraps around ring, to test! */
638 assert(vrh.vring.used->idx % RINGSIZE != 0);
639 err = vringh_complete_multi_user(&vrh, used, RINGSIZE);
640 if (err)
641 errx(1, "vringh_complete_multi_user: %i", err);
642
643 /* Free those buffers. */
644 for (i = 0; i < RINGSIZE; i++) {
645 unsigned len;
646 assert(virtqueue_get_buf(vq, &len) != NULL);
647 }
648
649 /* Test weird (but legal!) indirect. */
650 if (vdev.features[0] & (1 << VIRTIO_RING_F_INDIRECT_DESC)) {
651 char *data = __user_addr_max - USER_MEM/4;
652 struct vring_desc *d = __user_addr_max - USER_MEM/2;
653 struct vring vring;
654
655 /* Force creation of direct, which we modify. */
656 vdev.features[0] &= ~(1 << VIRTIO_RING_F_INDIRECT_DESC);
657 vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &vdev, true,
658 __user_addr_min,
659 never_notify_host,
660 never_callback_guest,
661 "guest vq");
662
663 sg_init_table(guest_sg, 4);
664 sg_set_buf(&guest_sg[0], d, sizeof(*d)*2);
665 sg_set_buf(&guest_sg[1], d + 2, sizeof(*d)*1);
666 sg_set_buf(&guest_sg[2], data + 6, 4);
667 sg_set_buf(&guest_sg[3], d + 3, sizeof(*d)*3);
668
669 err = virtqueue_add_outbuf(vq, guest_sg, 4, &err, GFP_KERNEL);
670 if (err)
671 errx(1, "virtqueue_add_outbuf (indirect): %i", err);
672
673 vring_init(&vring, RINGSIZE, __user_addr_min, ALIGN);
674
675 /* They're used in order, but double-check... */
676 assert(vring.desc[0].addr == (unsigned long)d);
677 assert(vring.desc[1].addr == (unsigned long)(d+2));
678 assert(vring.desc[2].addr == (unsigned long)data + 6);
679 assert(vring.desc[3].addr == (unsigned long)(d+3));
680 vring.desc[0].flags |= VRING_DESC_F_INDIRECT;
681 vring.desc[1].flags |= VRING_DESC_F_INDIRECT;
682 vring.desc[3].flags |= VRING_DESC_F_INDIRECT;
683
684 /* First indirect */
685 d[0].addr = (unsigned long)data;
686 d[0].len = 1;
687 d[0].flags = VRING_DESC_F_NEXT;
688 d[0].next = 1;
689 d[1].addr = (unsigned long)data + 1;
690 d[1].len = 2;
691 d[1].flags = 0;
692
693 /* Second indirect */
694 d[2].addr = (unsigned long)data + 3;
695 d[2].len = 3;
696 d[2].flags = 0;
697
698 /* Third indirect */
699 d[3].addr = (unsigned long)data + 10;
700 d[3].len = 5;
701 d[3].flags = VRING_DESC_F_NEXT;
702 d[3].next = 1;
703 d[4].addr = (unsigned long)data + 15;
704 d[4].len = 6;
705 d[4].flags = VRING_DESC_F_NEXT;
706 d[4].next = 2;
707 d[5].addr = (unsigned long)data + 21;
708 d[5].len = 7;
709 d[5].flags = 0;
710
711 /* Host picks it up (allocates new iov). */
712 vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
713 vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
714
715 err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
716 if (err != 1)
717 errx(1, "vringh_getdesc_user: %i", err);
718
719 if (head != 0)
720 errx(1, "vringh_getdesc_user: head %i not 0", head);
721
722 assert(riov.max_num & VRINGH_IOV_ALLOCATED);
723 if (getrange != getrange_slow)
724 assert(riov.used == 7);
725 else
726 assert(riov.used == 28);
727 err = vringh_iov_pull_user(&riov, buf, 29);
728 assert(err == 28);
729
730 /* Data should be linear. */
731 for (i = 0; i < err; i++)
732 assert(buf[i] == i);
733 vringh_iov_cleanup(&riov);
734 }
735
736 /* Don't leak memory... */
737 vring_del_virtqueue(vq);
738 free(__user_addr_min);
739
740 return 0;
741}