diff options
-rw-r--r-- | include/linux/ptr_ring.h | 393 | ||||
-rw-r--r-- | include/linux/skb_array.h | 169 | ||||
-rw-r--r-- | tools/virtio/ringtest/Makefile | 5 | ||||
-rw-r--r-- | tools/virtio/ringtest/ptr_ring.c | 192 |
4 files changed, 758 insertions, 1 deletions
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h new file mode 100644 index 000000000000..562a65e8bcc0 --- /dev/null +++ b/include/linux/ptr_ring.h | |||
@@ -0,0 +1,393 @@ | |||
1 | /* | ||
2 | * Definitions for the 'struct ptr_ring' datastructure. | ||
3 | * | ||
4 | * Author: | ||
5 | * Michael S. Tsirkin <mst@redhat.com> | ||
6 | * | ||
7 | * Copyright (C) 2016 Red Hat, Inc. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the | ||
11 | * Free Software Foundation; either version 2 of the License, or (at your | ||
12 | * option) any later version. | ||
13 | * | ||
14 | * This is a limited-size FIFO maintaining pointers in FIFO order, with | ||
15 | * one CPU producing entries and another consuming entries from a FIFO. | ||
16 | * | ||
17 | * This implementation tries to minimize cache-contention when there is a | ||
18 | * single producer and a single consumer CPU. | ||
19 | */ | ||
20 | |||
21 | #ifndef _LINUX_PTR_RING_H | ||
22 | #define _LINUX_PTR_RING_H 1 | ||
23 | |||
24 | #ifdef __KERNEL__ | ||
25 | #include <linux/spinlock.h> | ||
26 | #include <linux/cache.h> | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/compiler.h> | ||
29 | #include <linux/cache.h> | ||
30 | #include <linux/slab.h> | ||
31 | #include <asm/errno.h> | ||
32 | #endif | ||
33 | |||
34 | struct ptr_ring { | ||
35 | int producer ____cacheline_aligned_in_smp; | ||
36 | spinlock_t producer_lock; | ||
37 | int consumer ____cacheline_aligned_in_smp; | ||
38 | spinlock_t consumer_lock; | ||
39 | /* Shared consumer/producer data */ | ||
40 | /* Read-only by both the producer and the consumer */ | ||
41 | int size ____cacheline_aligned_in_smp; /* max entries in queue */ | ||
42 | void **queue; | ||
43 | }; | ||
44 | |||
45 | /* Note: callers invoking this in a loop must use a compiler barrier, | ||
46 | * for example cpu_relax(). If ring is ever resized, callers must hold | ||
47 | * producer_lock - see e.g. ptr_ring_full. Otherwise, if callers don't hold | ||
48 | * producer_lock, the next call to __ptr_ring_produce may fail. | ||
49 | */ | ||
50 | static inline bool __ptr_ring_full(struct ptr_ring *r) | ||
51 | { | ||
52 | return r->queue[r->producer]; | ||
53 | } | ||
54 | |||
55 | static inline bool ptr_ring_full(struct ptr_ring *r) | ||
56 | { | ||
57 | bool ret; | ||
58 | |||
59 | spin_lock(&r->producer_lock); | ||
60 | ret = __ptr_ring_full(r); | ||
61 | spin_unlock(&r->producer_lock); | ||
62 | |||
63 | return ret; | ||
64 | } | ||
65 | |||
66 | static inline bool ptr_ring_full_irq(struct ptr_ring *r) | ||
67 | { | ||
68 | bool ret; | ||
69 | |||
70 | spin_lock_irq(&r->producer_lock); | ||
71 | ret = __ptr_ring_full(r); | ||
72 | spin_unlock_irq(&r->producer_lock); | ||
73 | |||
74 | return ret; | ||
75 | } | ||
76 | |||
77 | static inline bool ptr_ring_full_any(struct ptr_ring *r) | ||
78 | { | ||
79 | unsigned long flags; | ||
80 | bool ret; | ||
81 | |||
82 | spin_lock_irqsave(&r->producer_lock, flags); | ||
83 | ret = __ptr_ring_full(r); | ||
84 | spin_unlock_irqrestore(&r->producer_lock, flags); | ||
85 | |||
86 | return ret; | ||
87 | } | ||
88 | |||
89 | static inline bool ptr_ring_full_bh(struct ptr_ring *r) | ||
90 | { | ||
91 | bool ret; | ||
92 | |||
93 | spin_lock_bh(&r->producer_lock); | ||
94 | ret = __ptr_ring_full(r); | ||
95 | spin_unlock_bh(&r->producer_lock); | ||
96 | |||
97 | return ret; | ||
98 | } | ||
99 | |||
100 | /* Note: callers invoking this in a loop must use a compiler barrier, | ||
101 | * for example cpu_relax(). Callers must hold producer_lock. | ||
102 | */ | ||
103 | static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) | ||
104 | { | ||
105 | if (r->queue[r->producer]) | ||
106 | return -ENOSPC; | ||
107 | |||
108 | r->queue[r->producer++] = ptr; | ||
109 | if (unlikely(r->producer >= r->size)) | ||
110 | r->producer = 0; | ||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr) | ||
115 | { | ||
116 | int ret; | ||
117 | |||
118 | spin_lock(&r->producer_lock); | ||
119 | ret = __ptr_ring_produce(r, ptr); | ||
120 | spin_unlock(&r->producer_lock); | ||
121 | |||
122 | return ret; | ||
123 | } | ||
124 | |||
125 | static inline int ptr_ring_produce_irq(struct ptr_ring *r, void *ptr) | ||
126 | { | ||
127 | int ret; | ||
128 | |||
129 | spin_lock_irq(&r->producer_lock); | ||
130 | ret = __ptr_ring_produce(r, ptr); | ||
131 | spin_unlock_irq(&r->producer_lock); | ||
132 | |||
133 | return ret; | ||
134 | } | ||
135 | |||
136 | static inline int ptr_ring_produce_any(struct ptr_ring *r, void *ptr) | ||
137 | { | ||
138 | unsigned long flags; | ||
139 | int ret; | ||
140 | |||
141 | spin_lock_irqsave(&r->producer_lock, flags); | ||
142 | ret = __ptr_ring_produce(r, ptr); | ||
143 | spin_unlock_irqrestore(&r->producer_lock, flags); | ||
144 | |||
145 | return ret; | ||
146 | } | ||
147 | |||
148 | static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr) | ||
149 | { | ||
150 | int ret; | ||
151 | |||
152 | spin_lock_bh(&r->producer_lock); | ||
153 | ret = __ptr_ring_produce(r, ptr); | ||
154 | spin_unlock_bh(&r->producer_lock); | ||
155 | |||
156 | return ret; | ||
157 | } | ||
158 | |||
159 | /* Note: callers invoking this in a loop must use a compiler barrier, | ||
160 | * for example cpu_relax(). Callers must take consumer_lock | ||
161 | * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL. | ||
162 | * If ring is never resized, and if the pointer is merely | ||
163 | * tested, there's no need to take the lock - see e.g. __ptr_ring_empty. | ||
164 | */ | ||
165 | static inline void *__ptr_ring_peek(struct ptr_ring *r) | ||
166 | { | ||
167 | return r->queue[r->consumer]; | ||
168 | } | ||
169 | |||
170 | /* Note: callers invoking this in a loop must use a compiler barrier, | ||
171 | * for example cpu_relax(). Callers must take consumer_lock | ||
172 | * if the ring is ever resized - see e.g. ptr_ring_empty. | ||
173 | */ | ||
174 | static inline bool __ptr_ring_empty(struct ptr_ring *r) | ||
175 | { | ||
176 | return !__ptr_ring_peek(r); | ||
177 | } | ||
178 | |||
179 | static inline bool ptr_ring_empty(struct ptr_ring *r) | ||
180 | { | ||
181 | bool ret; | ||
182 | |||
183 | spin_lock(&r->consumer_lock); | ||
184 | ret = __ptr_ring_empty(r); | ||
185 | spin_unlock(&r->consumer_lock); | ||
186 | |||
187 | return ret; | ||
188 | } | ||
189 | |||
190 | static inline bool ptr_ring_empty_irq(struct ptr_ring *r) | ||
191 | { | ||
192 | bool ret; | ||
193 | |||
194 | spin_lock_irq(&r->consumer_lock); | ||
195 | ret = __ptr_ring_empty(r); | ||
196 | spin_unlock_irq(&r->consumer_lock); | ||
197 | |||
198 | return ret; | ||
199 | } | ||
200 | |||
201 | static inline bool ptr_ring_empty_any(struct ptr_ring *r) | ||
202 | { | ||
203 | unsigned long flags; | ||
204 | bool ret; | ||
205 | |||
206 | spin_lock_irqsave(&r->consumer_lock, flags); | ||
207 | ret = __ptr_ring_empty(r); | ||
208 | spin_unlock_irqrestore(&r->consumer_lock, flags); | ||
209 | |||
210 | return ret; | ||
211 | } | ||
212 | |||
213 | static inline bool ptr_ring_empty_bh(struct ptr_ring *r) | ||
214 | { | ||
215 | bool ret; | ||
216 | |||
217 | spin_lock_bh(&r->consumer_lock); | ||
218 | ret = __ptr_ring_empty(r); | ||
219 | spin_unlock_bh(&r->consumer_lock); | ||
220 | |||
221 | return ret; | ||
222 | } | ||
223 | |||
224 | /* Must only be called after __ptr_ring_peek returned !NULL */ | ||
225 | static inline void __ptr_ring_discard_one(struct ptr_ring *r) | ||
226 | { | ||
227 | r->queue[r->consumer++] = NULL; | ||
228 | if (unlikely(r->consumer >= r->size)) | ||
229 | r->consumer = 0; | ||
230 | } | ||
231 | |||
232 | static inline void *__ptr_ring_consume(struct ptr_ring *r) | ||
233 | { | ||
234 | void *ptr; | ||
235 | |||
236 | ptr = __ptr_ring_peek(r); | ||
237 | if (ptr) | ||
238 | __ptr_ring_discard_one(r); | ||
239 | |||
240 | return ptr; | ||
241 | } | ||
242 | |||
243 | static inline void *ptr_ring_consume(struct ptr_ring *r) | ||
244 | { | ||
245 | void *ptr; | ||
246 | |||
247 | spin_lock(&r->consumer_lock); | ||
248 | ptr = __ptr_ring_consume(r); | ||
249 | spin_unlock(&r->consumer_lock); | ||
250 | |||
251 | return ptr; | ||
252 | } | ||
253 | |||
254 | static inline void *ptr_ring_consume_irq(struct ptr_ring *r) | ||
255 | { | ||
256 | void *ptr; | ||
257 | |||
258 | spin_lock_irq(&r->consumer_lock); | ||
259 | ptr = __ptr_ring_consume(r); | ||
260 | spin_unlock_irq(&r->consumer_lock); | ||
261 | |||
262 | return ptr; | ||
263 | } | ||
264 | |||
265 | static inline void *ptr_ring_consume_any(struct ptr_ring *r) | ||
266 | { | ||
267 | unsigned long flags; | ||
268 | void *ptr; | ||
269 | |||
270 | spin_lock_irqsave(&r->consumer_lock, flags); | ||
271 | ptr = __ptr_ring_consume(r); | ||
272 | spin_unlock_irqrestore(&r->consumer_lock, flags); | ||
273 | |||
274 | return ptr; | ||
275 | } | ||
276 | |||
277 | static inline void *ptr_ring_consume_bh(struct ptr_ring *r) | ||
278 | { | ||
279 | void *ptr; | ||
280 | |||
281 | spin_lock_bh(&r->consumer_lock); | ||
282 | ptr = __ptr_ring_consume(r); | ||
283 | spin_unlock_bh(&r->consumer_lock); | ||
284 | |||
285 | return ptr; | ||
286 | } | ||
287 | |||
288 | /* Cast to structure type and call a function without discarding from FIFO. | ||
289 | * Function must return a value. | ||
290 | * Callers must take consumer_lock. | ||
291 | */ | ||
292 | #define __PTR_RING_PEEK_CALL(r, f) ((f)(__ptr_ring_peek(r))) | ||
293 | |||
294 | #define PTR_RING_PEEK_CALL(r, f) ({ \ | ||
295 | typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ | ||
296 | \ | ||
297 | spin_lock(&(r)->consumer_lock); \ | ||
298 | __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ | ||
299 | spin_unlock(&(r)->consumer_lock); \ | ||
300 | __PTR_RING_PEEK_CALL_v; \ | ||
301 | }) | ||
302 | |||
303 | #define PTR_RING_PEEK_CALL_IRQ(r, f) ({ \ | ||
304 | typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ | ||
305 | \ | ||
306 | spin_lock_irq(&(r)->consumer_lock); \ | ||
307 | __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ | ||
308 | spin_unlock_irq(&(r)->consumer_lock); \ | ||
309 | __PTR_RING_PEEK_CALL_v; \ | ||
310 | }) | ||
311 | |||
312 | #define PTR_RING_PEEK_CALL_BH(r, f) ({ \ | ||
313 | typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ | ||
314 | \ | ||
315 | spin_lock_bh(&(r)->consumer_lock); \ | ||
316 | __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ | ||
317 | spin_unlock_bh(&(r)->consumer_lock); \ | ||
318 | __PTR_RING_PEEK_CALL_v; \ | ||
319 | }) | ||
320 | |||
321 | #define PTR_RING_PEEK_CALL_ANY(r, f) ({ \ | ||
322 | typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ | ||
323 | unsigned long __PTR_RING_PEEK_CALL_f;\ | ||
324 | \ | ||
325 | spin_lock_irqsave(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ | ||
326 | __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ | ||
327 | spin_unlock_irqrestore(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ | ||
328 | __PTR_RING_PEEK_CALL_v; \ | ||
329 | }) | ||
330 | |||
331 | static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp) | ||
332 | { | ||
333 | return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp); | ||
334 | } | ||
335 | |||
336 | static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) | ||
337 | { | ||
338 | r->queue = __ptr_ring_init_queue_alloc(size, gfp); | ||
339 | if (!r->queue) | ||
340 | return -ENOMEM; | ||
341 | |||
342 | r->size = size; | ||
343 | r->producer = r->consumer = 0; | ||
344 | spin_lock_init(&r->producer_lock); | ||
345 | spin_lock_init(&r->consumer_lock); | ||
346 | |||
347 | return 0; | ||
348 | } | ||
349 | |||
350 | static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, | ||
351 | void (*destroy)(void *)) | ||
352 | { | ||
353 | unsigned long flags; | ||
354 | int producer = 0; | ||
355 | void **queue = __ptr_ring_init_queue_alloc(size, gfp); | ||
356 | void **old; | ||
357 | void *ptr; | ||
358 | |||
359 | if (!queue) | ||
360 | return -ENOMEM; | ||
361 | |||
362 | spin_lock_irqsave(&(r)->producer_lock, flags); | ||
363 | |||
364 | while ((ptr = ptr_ring_consume(r))) | ||
365 | if (producer < size) | ||
366 | queue[producer++] = ptr; | ||
367 | else if (destroy) | ||
368 | destroy(ptr); | ||
369 | |||
370 | r->size = size; | ||
371 | r->producer = producer; | ||
372 | r->consumer = 0; | ||
373 | old = r->queue; | ||
374 | r->queue = queue; | ||
375 | |||
376 | spin_unlock_irqrestore(&(r)->producer_lock, flags); | ||
377 | |||
378 | kfree(old); | ||
379 | |||
380 | return 0; | ||
381 | } | ||
382 | |||
383 | static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *)) | ||
384 | { | ||
385 | void *ptr; | ||
386 | |||
387 | if (destroy) | ||
388 | while ((ptr = ptr_ring_consume(r))) | ||
389 | destroy(ptr); | ||
390 | kfree(r->queue); | ||
391 | } | ||
392 | |||
393 | #endif /* _LINUX_PTR_RING_H */ | ||
diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h new file mode 100644 index 000000000000..678bfbf78ac4 --- /dev/null +++ b/include/linux/skb_array.h | |||
@@ -0,0 +1,169 @@ | |||
1 | /* | ||
2 | * Definitions for the 'struct skb_array' datastructure. | ||
3 | * | ||
4 | * Author: | ||
5 | * Michael S. Tsirkin <mst@redhat.com> | ||
6 | * | ||
7 | * Copyright (C) 2016 Red Hat, Inc. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the | ||
11 | * Free Software Foundation; either version 2 of the License, or (at your | ||
12 | * option) any later version. | ||
13 | * | ||
14 | * Limited-size FIFO of skbs. Can be used more or less whenever | ||
15 | * sk_buff_head can be used, except you need to know the queue size in | ||
16 | * advance. | ||
17 | * Implemented as a type-safe wrapper around ptr_ring. | ||
18 | */ | ||
19 | |||
20 | #ifndef _LINUX_SKB_ARRAY_H | ||
21 | #define _LINUX_SKB_ARRAY_H 1 | ||
22 | |||
23 | #ifdef __KERNEL__ | ||
24 | #include <linux/ptr_ring.h> | ||
25 | #include <linux/skbuff.h> | ||
26 | #include <linux/if_vlan.h> | ||
27 | #endif | ||
28 | |||
29 | struct skb_array { | ||
30 | struct ptr_ring ring; | ||
31 | }; | ||
32 | |||
33 | /* Might be slightly faster than skb_array_full below, but callers invoking | ||
34 | * this in a loop must use a compiler barrier, for example cpu_relax(). | ||
35 | */ | ||
36 | static inline bool __skb_array_full(struct skb_array *a) | ||
37 | { | ||
38 | return __ptr_ring_full(&a->ring); | ||
39 | } | ||
40 | |||
41 | static inline bool skb_array_full(struct skb_array *a) | ||
42 | { | ||
43 | return ptr_ring_full(&a->ring); | ||
44 | } | ||
45 | |||
46 | static inline int skb_array_produce(struct skb_array *a, struct sk_buff *skb) | ||
47 | { | ||
48 | return ptr_ring_produce(&a->ring, skb); | ||
49 | } | ||
50 | |||
51 | static inline int skb_array_produce_irq(struct skb_array *a, struct sk_buff *skb) | ||
52 | { | ||
53 | return ptr_ring_produce_irq(&a->ring, skb); | ||
54 | } | ||
55 | |||
56 | static inline int skb_array_produce_bh(struct skb_array *a, struct sk_buff *skb) | ||
57 | { | ||
58 | return ptr_ring_produce_bh(&a->ring, skb); | ||
59 | } | ||
60 | |||
61 | static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb) | ||
62 | { | ||
63 | return ptr_ring_produce_any(&a->ring, skb); | ||
64 | } | ||
65 | |||
66 | /* Might be slightly faster than skb_array_empty below, but only safe if the | ||
67 | * array is never resized. Also, callers invoking this in a loop must take care | ||
68 | * to use a compiler barrier, for example cpu_relax(). | ||
69 | */ | ||
70 | static inline bool __skb_array_empty(struct skb_array *a) | ||
71 | { | ||
72 | return !__ptr_ring_peek(&a->ring); | ||
73 | } | ||
74 | |||
75 | static inline bool skb_array_empty(struct skb_array *a) | ||
76 | { | ||
77 | return ptr_ring_empty(&a->ring); | ||
78 | } | ||
79 | |||
80 | static inline bool skb_array_empty_bh(struct skb_array *a) | ||
81 | { | ||
82 | return ptr_ring_empty_bh(&a->ring); | ||
83 | } | ||
84 | |||
85 | static inline bool skb_array_empty_irq(struct skb_array *a) | ||
86 | { | ||
87 | return ptr_ring_empty_irq(&a->ring); | ||
88 | } | ||
89 | |||
90 | static inline bool skb_array_empty_any(struct skb_array *a) | ||
91 | { | ||
92 | return ptr_ring_empty_any(&a->ring); | ||
93 | } | ||
94 | |||
95 | static inline struct sk_buff *skb_array_consume(struct skb_array *a) | ||
96 | { | ||
97 | return ptr_ring_consume(&a->ring); | ||
98 | } | ||
99 | |||
100 | static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a) | ||
101 | { | ||
102 | return ptr_ring_consume_irq(&a->ring); | ||
103 | } | ||
104 | |||
105 | static inline struct sk_buff *skb_array_consume_any(struct skb_array *a) | ||
106 | { | ||
107 | return ptr_ring_consume_any(&a->ring); | ||
108 | } | ||
109 | |||
110 | static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a) | ||
111 | { | ||
112 | return ptr_ring_consume_bh(&a->ring); | ||
113 | } | ||
114 | |||
115 | static inline int __skb_array_len_with_tag(struct sk_buff *skb) | ||
116 | { | ||
117 | if (likely(skb)) { | ||
118 | int len = skb->len; | ||
119 | |||
120 | if (skb_vlan_tag_present(skb)) | ||
121 | len += VLAN_HLEN; | ||
122 | |||
123 | return len; | ||
124 | } else { | ||
125 | return 0; | ||
126 | } | ||
127 | } | ||
128 | |||
129 | static inline int skb_array_peek_len(struct skb_array *a) | ||
130 | { | ||
131 | return PTR_RING_PEEK_CALL(&a->ring, __skb_array_len_with_tag); | ||
132 | } | ||
133 | |||
134 | static inline int skb_array_peek_len_irq(struct skb_array *a) | ||
135 | { | ||
136 | return PTR_RING_PEEK_CALL_IRQ(&a->ring, __skb_array_len_with_tag); | ||
137 | } | ||
138 | |||
139 | static inline int skb_array_peek_len_bh(struct skb_array *a) | ||
140 | { | ||
141 | return PTR_RING_PEEK_CALL_BH(&a->ring, __skb_array_len_with_tag); | ||
142 | } | ||
143 | |||
144 | static inline int skb_array_peek_len_any(struct skb_array *a) | ||
145 | { | ||
146 | return PTR_RING_PEEK_CALL_ANY(&a->ring, __skb_array_len_with_tag); | ||
147 | } | ||
148 | |||
149 | static inline int skb_array_init(struct skb_array *a, int size, gfp_t gfp) | ||
150 | { | ||
151 | return ptr_ring_init(&a->ring, size, gfp); | ||
152 | } | ||
153 | |||
154 | void __skb_array_destroy_skb(void *ptr) | ||
155 | { | ||
156 | kfree_skb(ptr); | ||
157 | } | ||
158 | |||
159 | int skb_array_resize(struct skb_array *a, int size, gfp_t gfp) | ||
160 | { | ||
161 | return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb); | ||
162 | } | ||
163 | |||
164 | static inline void skb_array_cleanup(struct skb_array *a) | ||
165 | { | ||
166 | ptr_ring_cleanup(&a->ring, __skb_array_destroy_skb); | ||
167 | } | ||
168 | |||
169 | #endif /* _LINUX_SKB_ARRAY_H */ | ||
diff --git a/tools/virtio/ringtest/Makefile b/tools/virtio/ringtest/Makefile index 6ba745529833..50e086c6a7b6 100644 --- a/tools/virtio/ringtest/Makefile +++ b/tools/virtio/ringtest/Makefile | |||
@@ -1,6 +1,6 @@ | |||
1 | all: | 1 | all: |
2 | 2 | ||
3 | all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder | 3 | all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder ptr_ring |
4 | 4 | ||
5 | CFLAGS += -Wall | 5 | CFLAGS += -Wall |
6 | CFLAGS += -pthread -O2 -ggdb | 6 | CFLAGS += -pthread -O2 -ggdb |
@@ -8,6 +8,7 @@ LDFLAGS += -pthread -O2 -ggdb | |||
8 | 8 | ||
9 | main.o: main.c main.h | 9 | main.o: main.c main.h |
10 | ring.o: ring.c main.h | 10 | ring.o: ring.c main.h |
11 | ptr_ring.o: ptr_ring.c main.h ../../../include/linux/ptr_ring.h | ||
11 | virtio_ring_0_9.o: virtio_ring_0_9.c main.h | 12 | virtio_ring_0_9.o: virtio_ring_0_9.c main.h |
12 | virtio_ring_poll.o: virtio_ring_poll.c virtio_ring_0_9.c main.h | 13 | virtio_ring_poll.o: virtio_ring_poll.c virtio_ring_0_9.c main.h |
13 | virtio_ring_inorder.o: virtio_ring_inorder.c virtio_ring_0_9.c main.h | 14 | virtio_ring_inorder.o: virtio_ring_inorder.c virtio_ring_0_9.c main.h |
@@ -15,11 +16,13 @@ ring: ring.o main.o | |||
15 | virtio_ring_0_9: virtio_ring_0_9.o main.o | 16 | virtio_ring_0_9: virtio_ring_0_9.o main.o |
16 | virtio_ring_poll: virtio_ring_poll.o main.o | 17 | virtio_ring_poll: virtio_ring_poll.o main.o |
17 | virtio_ring_inorder: virtio_ring_inorder.o main.o | 18 | virtio_ring_inorder: virtio_ring_inorder.o main.o |
19 | ptr_ring: ptr_ring.o main.o | ||
18 | clean: | 20 | clean: |
19 | -rm main.o | 21 | -rm main.o |
20 | -rm ring.o ring | 22 | -rm ring.o ring |
21 | -rm virtio_ring_0_9.o virtio_ring_0_9 | 23 | -rm virtio_ring_0_9.o virtio_ring_0_9 |
22 | -rm virtio_ring_poll.o virtio_ring_poll | 24 | -rm virtio_ring_poll.o virtio_ring_poll |
23 | -rm virtio_ring_inorder.o virtio_ring_inorder | 25 | -rm virtio_ring_inorder.o virtio_ring_inorder |
26 | -rm ptr_ring.o ptr_ring | ||
24 | 27 | ||
25 | .PHONY: all clean | 28 | .PHONY: all clean |
diff --git a/tools/virtio/ringtest/ptr_ring.c b/tools/virtio/ringtest/ptr_ring.c new file mode 100644 index 000000000000..74abd746ae91 --- /dev/null +++ b/tools/virtio/ringtest/ptr_ring.c | |||
@@ -0,0 +1,192 @@ | |||
1 | #define _GNU_SOURCE | ||
2 | #include "main.h" | ||
3 | #include <stdlib.h> | ||
4 | #include <stdio.h> | ||
5 | #include <string.h> | ||
6 | #include <pthread.h> | ||
7 | #include <malloc.h> | ||
8 | #include <assert.h> | ||
9 | #include <errno.h> | ||
10 | #include <limits.h> | ||
11 | |||
12 | #define SMP_CACHE_BYTES 64 | ||
13 | #define cache_line_size() SMP_CACHE_BYTES | ||
14 | #define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES))) | ||
15 | #define unlikely(x) (__builtin_expect(!!(x), 0)) | ||
16 | #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a)) | ||
17 | typedef pthread_spinlock_t spinlock_t; | ||
18 | |||
19 | typedef int gfp_t; | ||
20 | static void *kzalloc(unsigned size, gfp_t gfp) | ||
21 | { | ||
22 | void *p = memalign(64, size); | ||
23 | if (!p) | ||
24 | return p; | ||
25 | memset(p, 0, size); | ||
26 | |||
27 | return p; | ||
28 | } | ||
29 | |||
30 | static void kfree(void *p) | ||
31 | { | ||
32 | if (p) | ||
33 | free(p); | ||
34 | } | ||
35 | |||
36 | static void spin_lock_init(spinlock_t *lock) | ||
37 | { | ||
38 | int r = pthread_spin_init(lock, 0); | ||
39 | assert(!r); | ||
40 | } | ||
41 | |||
42 | static void spin_lock(spinlock_t *lock) | ||
43 | { | ||
44 | int ret = pthread_spin_lock(lock); | ||
45 | assert(!ret); | ||
46 | } | ||
47 | |||
48 | static void spin_unlock(spinlock_t *lock) | ||
49 | { | ||
50 | int ret = pthread_spin_unlock(lock); | ||
51 | assert(!ret); | ||
52 | } | ||
53 | |||
54 | static void spin_lock_bh(spinlock_t *lock) | ||
55 | { | ||
56 | spin_lock(lock); | ||
57 | } | ||
58 | |||
59 | static void spin_unlock_bh(spinlock_t *lock) | ||
60 | { | ||
61 | spin_unlock(lock); | ||
62 | } | ||
63 | |||
64 | static void spin_lock_irq(spinlock_t *lock) | ||
65 | { | ||
66 | spin_lock(lock); | ||
67 | } | ||
68 | |||
69 | static void spin_unlock_irq(spinlock_t *lock) | ||
70 | { | ||
71 | spin_unlock(lock); | ||
72 | } | ||
73 | |||
74 | static void spin_lock_irqsave(spinlock_t *lock, unsigned long f) | ||
75 | { | ||
76 | spin_lock(lock); | ||
77 | } | ||
78 | |||
79 | static void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f) | ||
80 | { | ||
81 | spin_unlock(lock); | ||
82 | } | ||
83 | |||
84 | #include "../../../include/linux/ptr_ring.h" | ||
85 | |||
86 | static unsigned long long headcnt, tailcnt; | ||
87 | static struct ptr_ring array ____cacheline_aligned_in_smp; | ||
88 | |||
89 | /* implemented by ring */ | ||
90 | void alloc_ring(void) | ||
91 | { | ||
92 | int ret = ptr_ring_init(&array, ring_size, 0); | ||
93 | assert(!ret); | ||
94 | } | ||
95 | |||
96 | /* guest side */ | ||
97 | int add_inbuf(unsigned len, void *buf, void *datap) | ||
98 | { | ||
99 | int ret; | ||
100 | |||
101 | ret = __ptr_ring_produce(&array, buf); | ||
102 | if (ret >= 0) { | ||
103 | ret = 0; | ||
104 | headcnt++; | ||
105 | } | ||
106 | |||
107 | return ret; | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * ptr_ring API provides no way for producer to find out whether a given | ||
112 | * buffer was consumed. Our tests merely require that a successful get_buf | ||
113 | * implies that add_inbuf succeed in the past, and that add_inbuf will succeed, | ||
114 | * fake it accordingly. | ||
115 | */ | ||
116 | void *get_buf(unsigned *lenp, void **bufp) | ||
117 | { | ||
118 | void *datap; | ||
119 | |||
120 | if (tailcnt == headcnt || __ptr_ring_full(&array)) | ||
121 | datap = NULL; | ||
122 | else { | ||
123 | datap = "Buffer\n"; | ||
124 | ++tailcnt; | ||
125 | } | ||
126 | |||
127 | return datap; | ||
128 | } | ||
129 | |||
130 | void poll_used(void) | ||
131 | { | ||
132 | void *b; | ||
133 | |||
134 | do { | ||
135 | if (tailcnt == headcnt || __ptr_ring_full(&array)) { | ||
136 | b = NULL; | ||
137 | barrier(); | ||
138 | } else { | ||
139 | b = "Buffer\n"; | ||
140 | } | ||
141 | } while (!b); | ||
142 | } | ||
143 | |||
144 | void disable_call() | ||
145 | { | ||
146 | assert(0); | ||
147 | } | ||
148 | |||
149 | bool enable_call() | ||
150 | { | ||
151 | assert(0); | ||
152 | } | ||
153 | |||
154 | void kick_available(void) | ||
155 | { | ||
156 | assert(0); | ||
157 | } | ||
158 | |||
159 | /* host side */ | ||
160 | void disable_kick() | ||
161 | { | ||
162 | assert(0); | ||
163 | } | ||
164 | |||
165 | bool enable_kick() | ||
166 | { | ||
167 | assert(0); | ||
168 | } | ||
169 | |||
170 | void poll_avail(void) | ||
171 | { | ||
172 | void *b; | ||
173 | |||
174 | do { | ||
175 | barrier(); | ||
176 | b = __ptr_ring_peek(&array); | ||
177 | } while (!b); | ||
178 | } | ||
179 | |||
180 | bool use_buf(unsigned *lenp, void **bufp) | ||
181 | { | ||
182 | void *ptr; | ||
183 | |||
184 | ptr = __ptr_ring_consume(&array); | ||
185 | |||
186 | return ptr; | ||
187 | } | ||
188 | |||
189 | void call_used(void) | ||
190 | { | ||
191 | assert(0); | ||
192 | } | ||