diff options
author | Michael S. Tsirkin <mst@redhat.com> | 2016-06-13 16:54:31 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-06-15 16:57:21 -0400 |
commit | 2e0ab8ca83c122f275b21ea917d52fee506910bf (patch) | |
tree | 6feb905e24c58660a11ed93aa1117bf3fc82b62e | |
parent | b2313077ed0db35ee186905d8076a737248edd24 (diff) |
ptr_ring: array based FIFO for pointers
A simple array based FIFO of pointers. Intended for net stack which
commonly has a single consumer/producer.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/ptr_ring.h | 264 |
1 files changed, 264 insertions, 0 deletions
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h new file mode 100644 index 000000000000..633406f9af8e --- /dev/null +++ b/include/linux/ptr_ring.h | |||
@@ -0,0 +1,264 @@ | |||
1 | /* | ||
2 | * Definitions for the 'struct ptr_ring' datastructure. | ||
3 | * | ||
4 | * Author: | ||
5 | * Michael S. Tsirkin <mst@redhat.com> | ||
6 | * | ||
7 | * Copyright (C) 2016 Red Hat, Inc. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the | ||
11 | * Free Software Foundation; either version 2 of the License, or (at your | ||
12 | * option) any later version. | ||
13 | * | ||
14 | * This is a limited-size FIFO maintaining pointers in FIFO order, with | ||
15 | * one CPU producing entries and another consuming entries from a FIFO. | ||
16 | * | ||
17 | * This implementation tries to minimize cache-contention when there is a | ||
18 | * single producer and a single consumer CPU. | ||
19 | */ | ||
20 | |||
21 | #ifndef _LINUX_PTR_RING_H | ||
22 | #define _LINUX_PTR_RING_H 1 | ||
23 | |||
24 | #ifdef __KERNEL__ | ||
25 | #include <linux/spinlock.h> | ||
26 | #include <linux/cache.h> | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/compiler.h> | ||
29 | #include <linux/cache.h> | ||
30 | #include <linux/slab.h> | ||
31 | #include <asm/errno.h> | ||
32 | #endif | ||
33 | |||
34 | struct ptr_ring { | ||
35 | int producer ____cacheline_aligned_in_smp; | ||
36 | spinlock_t producer_lock; | ||
37 | int consumer ____cacheline_aligned_in_smp; | ||
38 | spinlock_t consumer_lock; | ||
39 | /* Shared consumer/producer data */ | ||
40 | /* Read-only by both the producer and the consumer */ | ||
41 | int size ____cacheline_aligned_in_smp; /* max entries in queue */ | ||
42 | void **queue; | ||
43 | }; | ||
44 | |||
45 | /* Note: callers invoking this in a loop must use a compiler barrier, | ||
46 | * for example cpu_relax(). | ||
47 | * Callers don't need to take producer lock - if they don't | ||
48 | * the next call to __ptr_ring_produce may fail. | ||
49 | */ | ||
50 | static inline bool __ptr_ring_full(struct ptr_ring *r) | ||
51 | { | ||
52 | return r->queue[r->producer]; | ||
53 | } | ||
54 | |||
55 | static inline bool ptr_ring_full(struct ptr_ring *r) | ||
56 | { | ||
57 | barrier(); | ||
58 | return __ptr_ring_full(r); | ||
59 | } | ||
60 | |||
61 | /* Note: callers invoking this in a loop must use a compiler barrier, | ||
62 | * for example cpu_relax(). | ||
63 | */ | ||
64 | static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) | ||
65 | { | ||
66 | if (__ptr_ring_full(r)) | ||
67 | return -ENOSPC; | ||
68 | |||
69 | r->queue[r->producer++] = ptr; | ||
70 | if (unlikely(r->producer >= r->size)) | ||
71 | r->producer = 0; | ||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr) | ||
76 | { | ||
77 | int ret; | ||
78 | |||
79 | spin_lock(&r->producer_lock); | ||
80 | ret = __ptr_ring_produce(r, ptr); | ||
81 | spin_unlock(&r->producer_lock); | ||
82 | |||
83 | return ret; | ||
84 | } | ||
85 | |||
86 | static inline int ptr_ring_produce_irq(struct ptr_ring *r, void *ptr) | ||
87 | { | ||
88 | int ret; | ||
89 | |||
90 | spin_lock_irq(&r->producer_lock); | ||
91 | ret = __ptr_ring_produce(r, ptr); | ||
92 | spin_unlock_irq(&r->producer_lock); | ||
93 | |||
94 | return ret; | ||
95 | } | ||
96 | |||
97 | static inline int ptr_ring_produce_any(struct ptr_ring *r, void *ptr) | ||
98 | { | ||
99 | unsigned long flags; | ||
100 | int ret; | ||
101 | |||
102 | spin_lock_irqsave(&r->producer_lock, flags); | ||
103 | ret = __ptr_ring_produce(r, ptr); | ||
104 | spin_unlock_irqrestore(&r->producer_lock, flags); | ||
105 | |||
106 | return ret; | ||
107 | } | ||
108 | |||
109 | static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr) | ||
110 | { | ||
111 | int ret; | ||
112 | |||
113 | spin_lock_bh(&r->producer_lock); | ||
114 | ret = __ptr_ring_produce(r, ptr); | ||
115 | spin_unlock_bh(&r->producer_lock); | ||
116 | |||
117 | return ret; | ||
118 | } | ||
119 | |||
120 | /* Note: callers invoking this in a loop must use a compiler barrier, | ||
121 | * for example cpu_relax(). Callers must take consumer_lock | ||
122 | * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL. | ||
123 | * There's no need for a lock if pointer is merely tested - see e.g. | ||
124 | * ptr_ring_empty. | ||
125 | */ | ||
126 | static inline void *__ptr_ring_peek(struct ptr_ring *r) | ||
127 | { | ||
128 | return r->queue[r->consumer]; | ||
129 | } | ||
130 | |||
131 | static inline bool ptr_ring_empty(struct ptr_ring *r) | ||
132 | { | ||
133 | barrier(); | ||
134 | return !__ptr_ring_peek(r); | ||
135 | } | ||
136 | |||
137 | /* Must only be called after __ptr_ring_peek returned !NULL */ | ||
138 | static inline void __ptr_ring_discard_one(struct ptr_ring *r) | ||
139 | { | ||
140 | r->queue[r->consumer++] = NULL; | ||
141 | if (unlikely(r->consumer >= r->size)) | ||
142 | r->consumer = 0; | ||
143 | } | ||
144 | |||
145 | static inline void *__ptr_ring_consume(struct ptr_ring *r) | ||
146 | { | ||
147 | void *ptr; | ||
148 | |||
149 | ptr = __ptr_ring_peek(r); | ||
150 | if (ptr) | ||
151 | __ptr_ring_discard_one(r); | ||
152 | |||
153 | return ptr; | ||
154 | } | ||
155 | |||
156 | static inline void *ptr_ring_consume(struct ptr_ring *r) | ||
157 | { | ||
158 | void *ptr; | ||
159 | |||
160 | spin_lock(&r->consumer_lock); | ||
161 | ptr = __ptr_ring_consume(r); | ||
162 | spin_unlock(&r->consumer_lock); | ||
163 | |||
164 | return ptr; | ||
165 | } | ||
166 | |||
167 | static inline void *ptr_ring_consume_irq(struct ptr_ring *r) | ||
168 | { | ||
169 | void *ptr; | ||
170 | |||
171 | spin_lock_irq(&r->consumer_lock); | ||
172 | ptr = __ptr_ring_consume(r); | ||
173 | spin_unlock_irq(&r->consumer_lock); | ||
174 | |||
175 | return ptr; | ||
176 | } | ||
177 | |||
178 | static inline void *ptr_ring_consume_any(struct ptr_ring *r) | ||
179 | { | ||
180 | unsigned long flags; | ||
181 | void *ptr; | ||
182 | |||
183 | spin_lock_irqsave(&r->consumer_lock, flags); | ||
184 | ptr = __ptr_ring_consume(r); | ||
185 | spin_unlock_irqrestore(&r->consumer_lock, flags); | ||
186 | |||
187 | return ptr; | ||
188 | } | ||
189 | |||
190 | static inline void *ptr_ring_consume_bh(struct ptr_ring *r) | ||
191 | { | ||
192 | void *ptr; | ||
193 | |||
194 | spin_lock_bh(&r->consumer_lock); | ||
195 | ptr = __ptr_ring_consume(r); | ||
196 | spin_unlock_bh(&r->consumer_lock); | ||
197 | |||
198 | return ptr; | ||
199 | } | ||
200 | |||
201 | /* Cast to structure type and call a function without discarding from FIFO. | ||
202 | * Function must return a value. | ||
203 | * Callers must take consumer_lock. | ||
204 | */ | ||
205 | #define __PTR_RING_PEEK_CALL(r, f) ((f)(__ptr_ring_peek(r))) | ||
206 | |||
207 | #define PTR_RING_PEEK_CALL(r, f) ({ \ | ||
208 | typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ | ||
209 | \ | ||
210 | spin_lock(&(r)->consumer_lock); \ | ||
211 | __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ | ||
212 | spin_unlock(&(r)->consumer_lock); \ | ||
213 | __PTR_RING_PEEK_CALL_v; \ | ||
214 | }) | ||
215 | |||
216 | #define PTR_RING_PEEK_CALL_IRQ(r, f) ({ \ | ||
217 | typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ | ||
218 | \ | ||
219 | spin_lock_irq(&(r)->consumer_lock); \ | ||
220 | __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ | ||
221 | spin_unlock_irq(&(r)->consumer_lock); \ | ||
222 | __PTR_RING_PEEK_CALL_v; \ | ||
223 | }) | ||
224 | |||
225 | #define PTR_RING_PEEK_CALL_BH(r, f) ({ \ | ||
226 | typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ | ||
227 | \ | ||
228 | spin_lock_bh(&(r)->consumer_lock); \ | ||
229 | __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ | ||
230 | spin_unlock_bh(&(r)->consumer_lock); \ | ||
231 | __PTR_RING_PEEK_CALL_v; \ | ||
232 | }) | ||
233 | |||
234 | #define PTR_RING_PEEK_CALL_ANY(r, f) ({ \ | ||
235 | typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ | ||
236 | unsigned long __PTR_RING_PEEK_CALL_f;\ | ||
237 | \ | ||
238 | spin_lock_irqsave(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ | ||
239 | __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ | ||
240 | spin_unlock_irqrestore(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ | ||
241 | __PTR_RING_PEEK_CALL_v; \ | ||
242 | }) | ||
243 | |||
244 | static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) | ||
245 | { | ||
246 | r->queue = kzalloc(ALIGN(size * sizeof *(r->queue), SMP_CACHE_BYTES), | ||
247 | gfp); | ||
248 | if (!r->queue) | ||
249 | return -ENOMEM; | ||
250 | |||
251 | r->size = size; | ||
252 | r->producer = r->consumer = 0; | ||
253 | spin_lock_init(&r->producer_lock); | ||
254 | spin_lock_init(&r->consumer_lock); | ||
255 | |||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | static inline void ptr_ring_cleanup(struct ptr_ring *r) | ||
260 | { | ||
261 | kfree(r->queue); | ||
262 | } | ||
263 | |||
264 | #endif /* _LINUX_PTR_RING_H */ | ||