aboutsummaryrefslogtreecommitdiffstats
path: root/include/xen/interface/io/ring.h
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@xensource.com>2007-07-17 21:37:04 -0400
committerJeremy Fitzhardinge <jeremy@goop.org>2007-07-18 11:47:42 -0400
commita42089dd358a7673a0a23126589a9029e57c2049 (patch)
treeaa076610832f5cdb0ee209c42ea7e40d54534ef4 /include/xen/interface/io/ring.h
parent24037a8b69dbf15bfed8fd42a2a2e442d7b0395b (diff)
xen: Add Xen interface header files
Add Xen interface header files. These are taken fairly directly from the Xen tree, but somewhat rearranged to suit the kernel's conventions. Define macros and inline functions for doing hypercalls into the hypervisor. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Ian Pratt <ian.pratt@xensource.com> Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk> Signed-off-by: Chris Wright <chrisw@sous-sol.org>
Diffstat (limited to 'include/xen/interface/io/ring.h')
-rw-r--r--include/xen/interface/io/ring.h260
1 files changed, 260 insertions, 0 deletions
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
new file mode 100644
index 000000000000..e8cbf431c8cc
--- /dev/null
+++ b/include/xen/interface/io/ring.h
@@ -0,0 +1,260 @@
1/******************************************************************************
2 * ring.h
3 *
4 * Shared producer-consumer ring macros.
5 *
6 * Tim Deegan and Andrew Warfield November 2004.
7 */
8
9#ifndef __XEN_PUBLIC_IO_RING_H__
10#define __XEN_PUBLIC_IO_RING_H__
11
12typedef unsigned int RING_IDX;
13
14/* Round a 32-bit unsigned constant down to the nearest power of two. */
15#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
16#define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
17#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
18#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
19#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
20
21/*
22 * Calculate size of a shared ring, given the total available space for the
23 * ring and indexes (_sz), and the name tag of the request/response structure.
24 * A ring contains as many entries as will fit, rounded down to the nearest
25 * power of two (so we can mask with (size-1) to loop around).
26 */
27#define __RING_SIZE(_s, _sz) \
28 (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
29
30/*
31 * Macros to make the correct C datatypes for a new kind of ring.
32 *
33 * To make a new ring datatype, you need to have two message structures,
34 * let's say struct request, and struct response already defined.
35 *
36 * In a header where you want the ring datatype declared, you then do:
37 *
38 * DEFINE_RING_TYPES(mytag, struct request, struct response);
39 *
40 * These expand out to give you a set of types, as you can see below.
41 * The most important of these are:
42 *
43 * struct mytag_sring - The shared ring.
44 * struct mytag_front_ring - The 'front' half of the ring.
45 * struct mytag_back_ring - The 'back' half of the ring.
46 *
47 * To initialize a ring in your code you need to know the location and size
48 * of the shared memory area (PAGE_SIZE, for instance). To initialise
49 * the front half:
50 *
51 * struct mytag_front_ring front_ring;
52 * SHARED_RING_INIT((struct mytag_sring *)shared_page);
53 * FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page,
54 * PAGE_SIZE);
55 *
56 * Initializing the back follows similarly (note that only the front
57 * initializes the shared ring):
58 *
59 * struct mytag_back_ring back_ring;
60 * BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page,
61 * PAGE_SIZE);
62 */
63
64#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
65 \
66/* Shared ring entry */ \
67union __name##_sring_entry { \
68 __req_t req; \
69 __rsp_t rsp; \
70}; \
71 \
72/* Shared ring page */ \
73struct __name##_sring { \
74 RING_IDX req_prod, req_event; \
75 RING_IDX rsp_prod, rsp_event; \
76 uint8_t pad[48]; \
77 union __name##_sring_entry ring[1]; /* variable-length */ \
78}; \
79 \
80/* "Front" end's private variables */ \
81struct __name##_front_ring { \
82 RING_IDX req_prod_pvt; \
83 RING_IDX rsp_cons; \
84 unsigned int nr_ents; \
85 struct __name##_sring *sring; \
86}; \
87 \
88/* "Back" end's private variables */ \
89struct __name##_back_ring { \
90 RING_IDX rsp_prod_pvt; \
91 RING_IDX req_cons; \
92 unsigned int nr_ents; \
93 struct __name##_sring *sring; \
94};
95
96/*
97 * Macros for manipulating rings.
98 *
99 * FRONT_RING_whatever works on the "front end" of a ring: here
100 * requests are pushed on to the ring and responses taken off it.
101 *
102 * BACK_RING_whatever works on the "back end" of a ring: here
103 * requests are taken off the ring and responses put on.
104 *
105 * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
106 * This is OK in 1-for-1 request-response situations where the
107 * requestor (front end) never has more than RING_SIZE()-1
108 * outstanding requests.
109 */
110
111/* Initialising empty rings */
112#define SHARED_RING_INIT(_s) do { \
113 (_s)->req_prod = (_s)->rsp_prod = 0; \
114 (_s)->req_event = (_s)->rsp_event = 1; \
115 memset((_s)->pad, 0, sizeof((_s)->pad)); \
116} while(0)
117
118#define FRONT_RING_INIT(_r, _s, __size) do { \
119 (_r)->req_prod_pvt = 0; \
120 (_r)->rsp_cons = 0; \
121 (_r)->nr_ents = __RING_SIZE(_s, __size); \
122 (_r)->sring = (_s); \
123} while (0)
124
125#define BACK_RING_INIT(_r, _s, __size) do { \
126 (_r)->rsp_prod_pvt = 0; \
127 (_r)->req_cons = 0; \
128 (_r)->nr_ents = __RING_SIZE(_s, __size); \
129 (_r)->sring = (_s); \
130} while (0)
131
132/* Initialize to existing shared indexes -- for recovery */
133#define FRONT_RING_ATTACH(_r, _s, __size) do { \
134 (_r)->sring = (_s); \
135 (_r)->req_prod_pvt = (_s)->req_prod; \
136 (_r)->rsp_cons = (_s)->rsp_prod; \
137 (_r)->nr_ents = __RING_SIZE(_s, __size); \
138} while (0)
139
140#define BACK_RING_ATTACH(_r, _s, __size) do { \
141 (_r)->sring = (_s); \
142 (_r)->rsp_prod_pvt = (_s)->rsp_prod; \
143 (_r)->req_cons = (_s)->req_prod; \
144 (_r)->nr_ents = __RING_SIZE(_s, __size); \
145} while (0)
146
147/* How big is this ring? */
148#define RING_SIZE(_r) \
149 ((_r)->nr_ents)
150
151/* Number of free requests (for use on front side only). */
152#define RING_FREE_REQUESTS(_r) \
153 (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
154
155/* Test if there is an empty slot available on the front ring.
156 * (This is only meaningful from the front. )
157 */
158#define RING_FULL(_r) \
159 (RING_FREE_REQUESTS(_r) == 0)
160
161/* Test if there are outstanding messages to be processed on a ring. */
162#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
163 ((_r)->sring->rsp_prod - (_r)->rsp_cons)
164
165#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
166 ({ \
167 unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
168 unsigned int rsp = RING_SIZE(_r) - \
169 ((_r)->req_cons - (_r)->rsp_prod_pvt); \
170 req < rsp ? req : rsp; \
171 })
172
173/* Direct access to individual ring elements, by index. */
174#define RING_GET_REQUEST(_r, _idx) \
175 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
176
177#define RING_GET_RESPONSE(_r, _idx) \
178 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
179
180/* Loop termination condition: Would the specified index overflow the ring? */
181#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
182 (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
183
184#define RING_PUSH_REQUESTS(_r) do { \
185 wmb(); /* back sees requests /before/ updated producer index */ \
186 (_r)->sring->req_prod = (_r)->req_prod_pvt; \
187} while (0)
188
189#define RING_PUSH_RESPONSES(_r) do { \
190 wmb(); /* front sees responses /before/ updated producer index */ \
191 (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
192} while (0)
193
194/*
195 * Notification hold-off (req_event and rsp_event):
196 *
197 * When queueing requests or responses on a shared ring, it may not always be
198 * necessary to notify the remote end. For example, if requests are in flight
199 * in a backend, the front may be able to queue further requests without
200 * notifying the back (if the back checks for new requests when it queues
201 * responses).
202 *
203 * When enqueuing requests or responses:
204 *
205 * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
206 * is a boolean return value. True indicates that the receiver requires an
207 * asynchronous notification.
208 *
209 * After dequeuing requests or responses (before sleeping the connection):
210 *
211 * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
212 * The second argument is a boolean return value. True indicates that there
213 * are pending messages on the ring (i.e., the connection should not be put
214 * to sleep).
215 *
216 * These macros will set the req_event/rsp_event field to trigger a
217 * notification on the very next message that is enqueued. If you want to
218 * create batches of work (i.e., only receive a notification after several
219 * messages have been enqueued) then you will need to create a customised
220 * version of the FINAL_CHECK macro in your own code, which sets the event
221 * field appropriately.
222 */
223
224#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
225 RING_IDX __old = (_r)->sring->req_prod; \
226 RING_IDX __new = (_r)->req_prod_pvt; \
227 wmb(); /* back sees requests /before/ updated producer index */ \
228 (_r)->sring->req_prod = __new; \
229 mb(); /* back sees new requests /before/ we check req_event */ \
230 (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
231 (RING_IDX)(__new - __old)); \
232} while (0)
233
234#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
235 RING_IDX __old = (_r)->sring->rsp_prod; \
236 RING_IDX __new = (_r)->rsp_prod_pvt; \
237 wmb(); /* front sees responses /before/ updated producer index */ \
238 (_r)->sring->rsp_prod = __new; \
239 mb(); /* front sees new responses /before/ we check rsp_event */ \
240 (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
241 (RING_IDX)(__new - __old)); \
242} while (0)
243
244#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
245 (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
246 if (_work_to_do) break; \
247 (_r)->sring->req_event = (_r)->req_cons + 1; \
248 mb(); \
249 (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
250} while (0)
251
252#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
253 (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
254 if (_work_to_do) break; \
255 (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
256 mb(); \
257 (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
258} while (0)
259
260#endif /* __XEN_PUBLIC_IO_RING_H__ */