aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/fence.h
diff options
context:
space:
mode:
authorMaarten Lankhorst <maarten.lankhorst@canonical.com>2014-07-01 06:57:14 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-07-08 15:18:56 -0400
commite941759c74a44d6ac2eed21bb0a38b21fe4559e2 (patch)
tree717acd3b5d198f22a04a61c97a7b806785ba3166 /include/linux/fence.h
parent35fac7e305dc5e0bd1e52f2505944674337de57c (diff)
fence: dma-buf cross-device synchronization (v18)
A fence can be attached to a buffer which is being filled or consumed by hw, to allow userspace to pass the buffer without waiting to another device. For example, userspace can call page_flip ioctl to display the next frame of graphics after kicking the GPU but while the GPU is still rendering. The display device sharing the buffer with the GPU would attach a callback to get notified when the GPU's rendering-complete IRQ fires, to update the scan-out address of the display, without having to wake up userspace. A driver must allocate a fence context for each execution ring that can run in parallel. The function for this takes an argument with how many contexts to allocate: + fence_context_alloc() A fence is transient, one-shot deal. It is allocated and attached to one or more dma-buf's. When the one that attached it is done, with the pending operation, it can signal the fence: + fence_signal() To have a rough approximation whether a fence is fired, call: + fence_is_signaled() The dma-buf-mgr handles tracking, and waiting on, the fences associated with a dma-buf. The one pending on the fence can add an async callback: + fence_add_callback() The callback can optionally be cancelled with: + fence_remove_callback() To wait synchronously, optionally with a timeout: + fence_wait() + fence_wait_timeout() When emitting a fence, call: + trace_fence_emit() To annotate that a fence is blocking on another fence, call: + trace_fence_annotate_wait_on(fence, on_fence) A default software-only implementation is provided, which can be used by drivers attaching a fence to a buffer when they have no other means for hw sync. But a memory backed fence is also envisioned, because it is common that GPU's can write to, or poll on some memory location for synchronization. For example: fence = custom_get_fence(...); if ((seqno_fence = to_seqno_fence(fence)) != NULL) { dma_buf *fence_buf = seqno_fence->sync_buf; get_dma_buf(fence_buf); ... tell the hw the memory location to wait ... custom_wait_on(fence_buf, seqno_fence->seqno_ofs, fence->seqno); } else { /* fall-back to sw sync * / fence_add_callback(fence, my_cb); } On SoC platforms, if some other hw mechanism is provided for synchronizing between IP blocks, it could be supported as an alternate implementation with it's own fence ops in a similar way. enable_signaling callback is used to provide sw signaling in case a cpu waiter is requested or no compatible hardware signaling could be used. The intention is to provide a userspace interface (presumably via eventfd) later, to be used in conjunction with dma-buf's mmap support for sw access to buffers (or for userspace apps that would prefer to do their own synchronization). v1: Original v2: After discussion w/ danvet and mlankhorst on #dri-devel, we decided that dma-fence didn't need to care about the sw->hw signaling path (it can be handled same as sw->sw case), and therefore the fence->ops can be simplified and more handled in the core. So remove the signal, add_callback, cancel_callback, and wait ops, and replace with a simple enable_signaling() op which can be used to inform a fence supporting hw->hw signaling that one or more devices which do not support hw signaling are waiting (and therefore it should enable an irq or do whatever is necessary in order that the CPU is notified when the fence is passed). v3: Fix locking fail in attach_fence() and get_fence() v4: Remove tie-in w/ dma-buf.. after discussion w/ danvet and mlankorst we decided that we need to be able to attach one fence to N dma-buf's, so using the list_head in dma-fence struct would be problematic. v5: [ Maarten Lankhorst ] Updated for dma-bikeshed-fence and dma-buf-manager. v6: [ Maarten Lankhorst ] I removed dma_fence_cancel_callback and some comments about checking if fence fired or not. This is broken by design. waitqueue_active during destruction is now fatal, since the signaller should be holding a reference in enable_signalling until it signalled the fence. Pass the original dma_fence_cb along, and call __remove_wait in the dma_fence_callback handler, so that no cleanup needs to be performed. v7: [ Maarten Lankhorst ] Set cb->func and only enable sw signaling if fence wasn't signaled yet, for example for hardware fences that may choose to signal blindly. v8: [ Maarten Lankhorst ] Tons of tiny fixes, moved __dma_fence_init to header and fixed include mess. dma-fence.h now includes dma-buf.h All members are now initialized, so kmalloc can be used for allocating a dma-fence. More documentation added. v9: Change compiler bitfields to flags, change return type of enable_signaling to bool. Rework dma_fence_wait. Added dma_fence_is_signaled and dma_fence_wait_timeout. s/dma// and change exports to non GPL. Added fence_is_signaled and fence_enable_sw_signaling calls, add ability to override default wait operation. v10: remove event_queue, use a custom list, export try_to_wake_up from scheduler. Remove fence lock and use a global spinlock instead, this should hopefully remove all the locking headaches I was having on trying to implement this. enable_signaling is called with this lock held. v11: Use atomic ops for flags, lifting the need for some spin_lock_irqsaves. However I kept the guarantee that after fence_signal returns, it is guaranteed that enable_signaling has either been called to completion, or will not be called any more. Add contexts and seqno to base fence implementation. This allows you to wait for less fences, by testing for seqno + signaled, and then only wait on the later fence. Add FENCE_TRACE, FENCE_WARN, and FENCE_ERR. This makes debugging easier. An CONFIG_DEBUG_FENCE will be added to turn off the FENCE_TRACE spam, and another runtime option can turn it off at runtime. v12: Add CONFIG_FENCE_TRACE. Add missing documentation for the fence->context and fence->seqno members. v13: Fixup CONFIG_FENCE_TRACE kconfig description. Move fence_context_alloc to fence. Simplify fence_later. Kill priv member to fence_cb. v14: Remove priv argument from fence_add_callback, oops! v15: Remove priv from documentation. Explicitly include linux/atomic.h. v16: Add trace events. Import changes required by android syncpoints. v17: Use wake_up_state instead of try_to_wake_up. (Colin Cross) Fix up commit description for seqno_fence. (Rob Clark) v18: Rename release_fence to fence_release. Move to drivers/dma-buf/. Rename __fence_is_signaled and __fence_signal to *_locked. Rename __fence_init to fence_init. Make fence_default_wait return a signed long, and fix wait ops too. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Signed-off-by: Thierry Reding <thierry.reding@gmail.com> #use smp_mb__before_atomic() Acked-by: Sumit Semwal <sumit.semwal@linaro.org> Acked-by: Daniel Vetter <daniel@ffwll.ch> Reviewed-by: Rob Clark <robdclark@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'include/linux/fence.h')
-rw-r--r--include/linux/fence.h343
1 files changed, 343 insertions, 0 deletions
diff --git a/include/linux/fence.h b/include/linux/fence.h
new file mode 100644
index 000000000000..b935cc650123
--- /dev/null
+++ b/include/linux/fence.h
@@ -0,0 +1,343 @@
1/*
2 * Fence mechanism for dma-buf to allow for asynchronous dma access
3 *
4 * Copyright (C) 2012 Canonical Ltd
5 * Copyright (C) 2012 Texas Instruments
6 *
7 * Authors:
8 * Rob Clark <robdclark@gmail.com>
9 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published by
13 * the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 */
20
21#ifndef __LINUX_FENCE_H
22#define __LINUX_FENCE_H
23
24#include <linux/err.h>
25#include <linux/wait.h>
26#include <linux/list.h>
27#include <linux/bitops.h>
28#include <linux/kref.h>
29#include <linux/sched.h>
30#include <linux/printk.h>
31
32struct fence;
33struct fence_ops;
34struct fence_cb;
35
36/**
37 * struct fence - software synchronization primitive
38 * @refcount: refcount for this fence
39 * @ops: fence_ops associated with this fence
40 * @cb_list: list of all callbacks to call
41 * @lock: spin_lock_irqsave used for locking
42 * @context: execution context this fence belongs to, returned by
43 * fence_context_alloc()
44 * @seqno: the sequence number of this fence inside the execution context,
45 * can be compared to decide which fence would be signaled later.
46 * @flags: A mask of FENCE_FLAG_* defined below
47 * @timestamp: Timestamp when the fence was signaled.
48 * @status: Optional, only valid if < 0, must be set before calling
49 * fence_signal, indicates that the fence has completed with an error.
50 *
51 * the flags member must be manipulated and read using the appropriate
52 * atomic ops (bit_*), so taking the spinlock will not be needed most
53 * of the time.
54 *
55 * FENCE_FLAG_SIGNALED_BIT - fence is already signaled
56 * FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called*
57 * FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
58 * implementer of the fence for its own purposes. Can be used in different
59 * ways by different fence implementers, so do not rely on this.
60 *
61 * *) Since atomic bitops are used, this is not guaranteed to be the case.
62 * Particularly, if the bit was set, but fence_signal was called right
63 * before this bit was set, it would have been able to set the
64 * FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
65 * Adding a check for FENCE_FLAG_SIGNALED_BIT after setting
66 * FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that
67 * after fence_signal was called, any enable_signaling call will have either
68 * been completed, or never called at all.
69 */
70struct fence {
71 struct kref refcount;
72 const struct fence_ops *ops;
73 struct list_head cb_list;
74 spinlock_t *lock;
75 unsigned context, seqno;
76 unsigned long flags;
77 ktime_t timestamp;
78 int status;
79};
80
81enum fence_flag_bits {
82 FENCE_FLAG_SIGNALED_BIT,
83 FENCE_FLAG_ENABLE_SIGNAL_BIT,
84 FENCE_FLAG_USER_BITS, /* must always be last member */
85};
86
87typedef void (*fence_func_t)(struct fence *fence, struct fence_cb *cb);
88
89/**
90 * struct fence_cb - callback for fence_add_callback
91 * @node: used by fence_add_callback to append this struct to fence::cb_list
92 * @func: fence_func_t to call
93 *
94 * This struct will be initialized by fence_add_callback, additional
95 * data can be passed along by embedding fence_cb in another struct.
96 */
97struct fence_cb {
98 struct list_head node;
99 fence_func_t func;
100};
101
102/**
103 * struct fence_ops - operations implemented for fence
104 * @get_driver_name: returns the driver name.
105 * @get_timeline_name: return the name of the context this fence belongs to.
106 * @enable_signaling: enable software signaling of fence.
107 * @signaled: [optional] peek whether the fence is signaled, can be null.
108 * @wait: custom wait implementation, or fence_default_wait.
109 * @release: [optional] called on destruction of fence, can be null
110 * @fill_driver_data: [optional] callback to fill in free-form debug info
111 * Returns amount of bytes filled, or -errno.
112 * @fence_value_str: [optional] fills in the value of the fence as a string
113 * @timeline_value_str: [optional] fills in the current value of the timeline
114 * as a string
115 *
116 * Notes on enable_signaling:
117 * For fence implementations that have the capability for hw->hw
118 * signaling, they can implement this op to enable the necessary
119 * irqs, or insert commands into cmdstream, etc. This is called
120 * in the first wait() or add_callback() path to let the fence
121 * implementation know that there is another driver waiting on
122 * the signal (ie. hw->sw case).
123 *
124 * This function can be called called from atomic context, but not
125 * from irq context, so normal spinlocks can be used.
126 *
127 * A return value of false indicates the fence already passed,
128 * or some failure occured that made it impossible to enable
129 * signaling. True indicates succesful enabling.
130 *
131 * fence->status may be set in enable_signaling, but only when false is
132 * returned.
133 *
134 * Calling fence_signal before enable_signaling is called allows
135 * for a tiny race window in which enable_signaling is called during,
136 * before, or after fence_signal. To fight this, it is recommended
137 * that before enable_signaling returns true an extra reference is
138 * taken on the fence, to be released when the fence is signaled.
139 * This will mean fence_signal will still be called twice, but
140 * the second time will be a noop since it was already signaled.
141 *
142 * Notes on signaled:
143 * May set fence->status if returning true.
144 *
145 * Notes on wait:
146 * Must not be NULL, set to fence_default_wait for default implementation.
147 * the fence_default_wait implementation should work for any fence, as long
148 * as enable_signaling works correctly.
149 *
150 * Must return -ERESTARTSYS if the wait is intr = true and the wait was
151 * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
152 * timed out. Can also return other error values on custom implementations,
153 * which should be treated as if the fence is signaled. For example a hardware
154 * lockup could be reported like that.
155 *
156 * Notes on release:
157 * Can be NULL, this function allows additional commands to run on
158 * destruction of the fence. Can be called from irq context.
159 * If pointer is set to NULL, kfree will get called instead.
160 */
161
162struct fence_ops {
163 const char * (*get_driver_name)(struct fence *fence);
164 const char * (*get_timeline_name)(struct fence *fence);
165 bool (*enable_signaling)(struct fence *fence);
166 bool (*signaled)(struct fence *fence);
167 signed long (*wait)(struct fence *fence, bool intr, signed long timeout);
168 void (*release)(struct fence *fence);
169
170 int (*fill_driver_data)(struct fence *fence, void *data, int size);
171 void (*fence_value_str)(struct fence *fence, char *str, int size);
172 void (*timeline_value_str)(struct fence *fence, char *str, int size);
173};
174
175void fence_init(struct fence *fence, const struct fence_ops *ops,
176 spinlock_t *lock, unsigned context, unsigned seqno);
177
178void fence_release(struct kref *kref);
179void fence_free(struct fence *fence);
180
181/**
182 * fence_get - increases refcount of the fence
183 * @fence: [in] fence to increase refcount of
184 *
185 * Returns the same fence, with refcount increased by 1.
186 */
187static inline struct fence *fence_get(struct fence *fence)
188{
189 if (fence)
190 kref_get(&fence->refcount);
191 return fence;
192}
193
194/**
195 * fence_put - decreases refcount of the fence
196 * @fence: [in] fence to reduce refcount of
197 */
198static inline void fence_put(struct fence *fence)
199{
200 if (fence)
201 kref_put(&fence->refcount, fence_release);
202}
203
204int fence_signal(struct fence *fence);
205int fence_signal_locked(struct fence *fence);
206signed long fence_default_wait(struct fence *fence, bool intr, signed long timeout);
207int fence_add_callback(struct fence *fence, struct fence_cb *cb,
208 fence_func_t func);
209bool fence_remove_callback(struct fence *fence, struct fence_cb *cb);
210void fence_enable_sw_signaling(struct fence *fence);
211
212/**
213 * fence_is_signaled_locked - Return an indication if the fence is signaled yet.
214 * @fence: [in] the fence to check
215 *
216 * Returns true if the fence was already signaled, false if not. Since this
217 * function doesn't enable signaling, it is not guaranteed to ever return
218 * true if fence_add_callback, fence_wait or fence_enable_sw_signaling
219 * haven't been called before.
220 *
221 * This function requires fence->lock to be held.
222 */
223static inline bool
224fence_is_signaled_locked(struct fence *fence)
225{
226 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
227 return true;
228
229 if (fence->ops->signaled && fence->ops->signaled(fence)) {
230 fence_signal_locked(fence);
231 return true;
232 }
233
234 return false;
235}
236
237/**
238 * fence_is_signaled - Return an indication if the fence is signaled yet.
239 * @fence: [in] the fence to check
240 *
241 * Returns true if the fence was already signaled, false if not. Since this
242 * function doesn't enable signaling, it is not guaranteed to ever return
243 * true if fence_add_callback, fence_wait or fence_enable_sw_signaling
244 * haven't been called before.
245 *
246 * It's recommended for seqno fences to call fence_signal when the
247 * operation is complete, it makes it possible to prevent issues from
248 * wraparound between time of issue and time of use by checking the return
249 * value of this function before calling hardware-specific wait instructions.
250 */
251static inline bool
252fence_is_signaled(struct fence *fence)
253{
254 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
255 return true;
256
257 if (fence->ops->signaled && fence->ops->signaled(fence)) {
258 fence_signal(fence);
259 return true;
260 }
261
262 return false;
263}
264
265/**
266 * fence_later - return the chronologically later fence
267 * @f1: [in] the first fence from the same context
268 * @f2: [in] the second fence from the same context
269 *
270 * Returns NULL if both fences are signaled, otherwise the fence that would be
271 * signaled last. Both fences must be from the same context, since a seqno is
272 * not re-used across contexts.
273 */
274static inline struct fence *fence_later(struct fence *f1, struct fence *f2)
275{
276 if (WARN_ON(f1->context != f2->context))
277 return NULL;
278
279 /*
280 * can't check just FENCE_FLAG_SIGNALED_BIT here, it may never have been
281 * set if enable_signaling wasn't called, and enabling that here is
282 * overkill.
283 */
284 if (f2->seqno - f1->seqno <= INT_MAX)
285 return fence_is_signaled(f2) ? NULL : f2;
286 else
287 return fence_is_signaled(f1) ? NULL : f1;
288}
289
290signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout);
291
292
293/**
294 * fence_wait - sleep until the fence gets signaled
295 * @fence: [in] the fence to wait on
296 * @intr: [in] if true, do an interruptible wait
297 *
298 * This function will return -ERESTARTSYS if interrupted by a signal,
299 * or 0 if the fence was signaled. Other error values may be
300 * returned on custom implementations.
301 *
302 * Performs a synchronous wait on this fence. It is assumed the caller
303 * directly or indirectly holds a reference to the fence, otherwise the
304 * fence might be freed before return, resulting in undefined behavior.
305 */
306static inline signed long fence_wait(struct fence *fence, bool intr)
307{
308 signed long ret;
309
310 /* Since fence_wait_timeout cannot timeout with
311 * MAX_SCHEDULE_TIMEOUT, only valid return values are
312 * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT.
313 */
314 ret = fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
315
316 return ret < 0 ? ret : 0;
317}
318
319unsigned fence_context_alloc(unsigned num);
320
321#define FENCE_TRACE(f, fmt, args...) \
322 do { \
323 struct fence *__ff = (f); \
324 if (config_enabled(CONFIG_FENCE_TRACE)) \
325 pr_info("f %u#%u: " fmt, \
326 __ff->context, __ff->seqno, ##args); \
327 } while (0)
328
329#define FENCE_WARN(f, fmt, args...) \
330 do { \
331 struct fence *__ff = (f); \
332 pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno, \
333 ##args); \
334 } while (0)
335
336#define FENCE_ERR(f, fmt, args...) \
337 do { \
338 struct fence *__ff = (f); \
339 pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno, \
340 ##args); \
341 } while (0)
342
343#endif /* __LINUX_FENCE_H */