diff options
-rw-r--r-- | Documentation/DocBook/device-drivers.tmpl | 2 | ||||
-rw-r--r-- | MAINTAINERS | 2 | ||||
-rw-r--r-- | drivers/base/Kconfig | 9 | ||||
-rw-r--r-- | drivers/dma-buf/Makefile | 2 | ||||
-rw-r--r-- | drivers/dma-buf/fence.c | 431 | ||||
-rw-r--r-- | include/linux/fence.h | 343 | ||||
-rw-r--r-- | include/trace/events/fence.h | 128 |
7 files changed, 915 insertions, 2 deletions
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl index ac61ebd92875..e634657efb52 100644 --- a/Documentation/DocBook/device-drivers.tmpl +++ b/Documentation/DocBook/device-drivers.tmpl | |||
@@ -129,6 +129,8 @@ X!Edrivers/base/interface.c | |||
129 | </sect1> | 129 | </sect1> |
130 | <sect1><title>Device Drivers DMA Management</title> | 130 | <sect1><title>Device Drivers DMA Management</title> |
131 | !Edrivers/dma-buf/dma-buf.c | 131 | !Edrivers/dma-buf/dma-buf.c |
132 | !Edrivers/dma-buf/fence.c | ||
133 | !Iinclude/linux/fence.h | ||
132 | !Iinclude/linux/reservation.h | 134 | !Iinclude/linux/reservation.h |
133 | !Edrivers/base/dma-coherent.c | 135 | !Edrivers/base/dma-coherent.c |
134 | !Edrivers/base/dma-mapping.c | 136 | !Edrivers/base/dma-mapping.c |
diff --git a/MAINTAINERS b/MAINTAINERS index 2eefee768d46..65c8f534b22f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2901,7 +2901,7 @@ L: linux-media@vger.kernel.org | |||
2901 | L: dri-devel@lists.freedesktop.org | 2901 | L: dri-devel@lists.freedesktop.org |
2902 | L: linaro-mm-sig@lists.linaro.org | 2902 | L: linaro-mm-sig@lists.linaro.org |
2903 | F: drivers/dma-buf/ | 2903 | F: drivers/dma-buf/ |
2904 | F: include/linux/dma-buf* include/linux/reservation.h | 2904 | F: include/linux/dma-buf* include/linux/reservation.h include/linux/fence.h |
2905 | F: Documentation/dma-buf-sharing.txt | 2905 | F: Documentation/dma-buf-sharing.txt |
2906 | T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git | 2906 | T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git |
2907 | 2907 | ||
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 23b8726962af..00e13ce5cbbd 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig | |||
@@ -208,6 +208,15 @@ config DMA_SHARED_BUFFER | |||
208 | APIs extension; the file's descriptor can then be passed on to other | 208 | APIs extension; the file's descriptor can then be passed on to other |
209 | driver. | 209 | driver. |
210 | 210 | ||
211 | config FENCE_TRACE | ||
212 | bool "Enable verbose FENCE_TRACE messages" | ||
213 | depends on DMA_SHARED_BUFFER | ||
214 | help | ||
215 | Enable the FENCE_TRACE printks. This will add extra | ||
216 | spam to the console log, but will make it easier to diagnose | ||
217 | lockup related problems for dma-buffers shared across multiple | ||
218 | devices. | ||
219 | |||
211 | config DMA_CMA | 220 | config DMA_CMA |
212 | bool "DMA Contiguous Memory Allocator" | 221 | bool "DMA Contiguous Memory Allocator" |
213 | depends on HAVE_DMA_CONTIGUOUS && CMA | 222 | depends on HAVE_DMA_CONTIGUOUS && CMA |
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 4a4f4c9bacd0..d7825bfe630e 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile | |||
@@ -1 +1 @@ | |||
obj-y := dma-buf.o reservation.o | obj-y := dma-buf.o fence.o reservation.o | ||
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c new file mode 100644 index 000000000000..948bf00d955e --- /dev/null +++ b/drivers/dma-buf/fence.c | |||
@@ -0,0 +1,431 @@ | |||
1 | /* | ||
2 | * Fence mechanism for dma-buf and to allow for asynchronous dma access | ||
3 | * | ||
4 | * Copyright (C) 2012 Canonical Ltd | ||
5 | * Copyright (C) 2012 Texas Instruments | ||
6 | * | ||
7 | * Authors: | ||
8 | * Rob Clark <robdclark@gmail.com> | ||
9 | * Maarten Lankhorst <maarten.lankhorst@canonical.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify it | ||
12 | * under the terms of the GNU General Public License version 2 as published by | ||
13 | * the Free Software Foundation. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
16 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
18 | * more details. | ||
19 | */ | ||
20 | |||
21 | #include <linux/slab.h> | ||
22 | #include <linux/export.h> | ||
23 | #include <linux/atomic.h> | ||
24 | #include <linux/fence.h> | ||
25 | |||
26 | #define CREATE_TRACE_POINTS | ||
27 | #include <trace/events/fence.h> | ||
28 | |||
29 | EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on); | ||
30 | EXPORT_TRACEPOINT_SYMBOL(fence_emit); | ||
31 | |||
32 | /** | ||
33 | * fence context counter: each execution context should have its own | ||
34 | * fence context, this allows checking if fences belong to the same | ||
35 | * context or not. One device can have multiple separate contexts, | ||
36 | * and they're used if some engine can run independently of another. | ||
37 | */ | ||
38 | static atomic_t fence_context_counter = ATOMIC_INIT(0); | ||
39 | |||
40 | /** | ||
41 | * fence_context_alloc - allocate an array of fence contexts | ||
42 | * @num: [in] amount of contexts to allocate | ||
43 | * | ||
44 | * This function will return the first index of the number of fences allocated. | ||
45 | * The fence context is used for setting fence->context to a unique number. | ||
46 | */ | ||
47 | unsigned fence_context_alloc(unsigned num) | ||
48 | { | ||
49 | BUG_ON(!num); | ||
50 | return atomic_add_return(num, &fence_context_counter) - num; | ||
51 | } | ||
52 | EXPORT_SYMBOL(fence_context_alloc); | ||
53 | |||
54 | /** | ||
55 | * fence_signal_locked - signal completion of a fence | ||
56 | * @fence: the fence to signal | ||
57 | * | ||
58 | * Signal completion for software callbacks on a fence, this will unblock | ||
59 | * fence_wait() calls and run all the callbacks added with | ||
60 | * fence_add_callback(). Can be called multiple times, but since a fence | ||
61 | * can only go from unsignaled to signaled state, it will only be effective | ||
62 | * the first time. | ||
63 | * | ||
64 | * Unlike fence_signal, this function must be called with fence->lock held. | ||
65 | */ | ||
66 | int fence_signal_locked(struct fence *fence) | ||
67 | { | ||
68 | struct fence_cb *cur, *tmp; | ||
69 | int ret = 0; | ||
70 | |||
71 | if (WARN_ON(!fence)) | ||
72 | return -EINVAL; | ||
73 | |||
74 | if (!ktime_to_ns(fence->timestamp)) { | ||
75 | fence->timestamp = ktime_get(); | ||
76 | smp_mb__before_atomic(); | ||
77 | } | ||
78 | |||
79 | if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { | ||
80 | ret = -EINVAL; | ||
81 | |||
82 | /* | ||
83 | * we might have raced with the unlocked fence_signal, | ||
84 | * still run through all callbacks | ||
85 | */ | ||
86 | } else | ||
87 | trace_fence_signaled(fence); | ||
88 | |||
89 | list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { | ||
90 | list_del_init(&cur->node); | ||
91 | cur->func(fence, cur); | ||
92 | } | ||
93 | return ret; | ||
94 | } | ||
95 | EXPORT_SYMBOL(fence_signal_locked); | ||
96 | |||
97 | /** | ||
98 | * fence_signal - signal completion of a fence | ||
99 | * @fence: the fence to signal | ||
100 | * | ||
101 | * Signal completion for software callbacks on a fence, this will unblock | ||
102 | * fence_wait() calls and run all the callbacks added with | ||
103 | * fence_add_callback(). Can be called multiple times, but since a fence | ||
104 | * can only go from unsignaled to signaled state, it will only be effective | ||
105 | * the first time. | ||
106 | */ | ||
107 | int fence_signal(struct fence *fence) | ||
108 | { | ||
109 | unsigned long flags; | ||
110 | |||
111 | if (!fence) | ||
112 | return -EINVAL; | ||
113 | |||
114 | if (!ktime_to_ns(fence->timestamp)) { | ||
115 | fence->timestamp = ktime_get(); | ||
116 | smp_mb__before_atomic(); | ||
117 | } | ||
118 | |||
119 | if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | ||
120 | return -EINVAL; | ||
121 | |||
122 | trace_fence_signaled(fence); | ||
123 | |||
124 | if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { | ||
125 | struct fence_cb *cur, *tmp; | ||
126 | |||
127 | spin_lock_irqsave(fence->lock, flags); | ||
128 | list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { | ||
129 | list_del_init(&cur->node); | ||
130 | cur->func(fence, cur); | ||
131 | } | ||
132 | spin_unlock_irqrestore(fence->lock, flags); | ||
133 | } | ||
134 | return 0; | ||
135 | } | ||
136 | EXPORT_SYMBOL(fence_signal); | ||
137 | |||
138 | /** | ||
139 | * fence_wait_timeout - sleep until the fence gets signaled | ||
140 | * or until timeout elapses | ||
141 | * @fence: [in] the fence to wait on | ||
142 | * @intr: [in] if true, do an interruptible wait | ||
143 | * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT | ||
144 | * | ||
145 | * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the | ||
146 | * remaining timeout in jiffies on success. Other error values may be | ||
147 | * returned on custom implementations. | ||
148 | * | ||
149 | * Performs a synchronous wait on this fence. It is assumed the caller | ||
150 | * directly or indirectly (buf-mgr between reservation and committing) | ||
151 | * holds a reference to the fence, otherwise the fence might be | ||
152 | * freed before return, resulting in undefined behavior. | ||
153 | */ | ||
154 | signed long | ||
155 | fence_wait_timeout(struct fence *fence, bool intr, signed long timeout) | ||
156 | { | ||
157 | signed long ret; | ||
158 | |||
159 | if (WARN_ON(timeout < 0)) | ||
160 | return -EINVAL; | ||
161 | |||
162 | trace_fence_wait_start(fence); | ||
163 | ret = fence->ops->wait(fence, intr, timeout); | ||
164 | trace_fence_wait_end(fence); | ||
165 | return ret; | ||
166 | } | ||
167 | EXPORT_SYMBOL(fence_wait_timeout); | ||
168 | |||
169 | void fence_release(struct kref *kref) | ||
170 | { | ||
171 | struct fence *fence = | ||
172 | container_of(kref, struct fence, refcount); | ||
173 | |||
174 | trace_fence_destroy(fence); | ||
175 | |||
176 | BUG_ON(!list_empty(&fence->cb_list)); | ||
177 | |||
178 | if (fence->ops->release) | ||
179 | fence->ops->release(fence); | ||
180 | else | ||
181 | fence_free(fence); | ||
182 | } | ||
183 | EXPORT_SYMBOL(fence_release); | ||
184 | |||
185 | void fence_free(struct fence *fence) | ||
186 | { | ||
187 | kfree(fence); | ||
188 | } | ||
189 | EXPORT_SYMBOL(fence_free); | ||
190 | |||
191 | /** | ||
192 | * fence_enable_sw_signaling - enable signaling on fence | ||
193 | * @fence: [in] the fence to enable | ||
194 | * | ||
195 | * this will request for sw signaling to be enabled, to make the fence | ||
196 | * complete as soon as possible | ||
197 | */ | ||
198 | void fence_enable_sw_signaling(struct fence *fence) | ||
199 | { | ||
200 | unsigned long flags; | ||
201 | |||
202 | if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) && | ||
203 | !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { | ||
204 | trace_fence_enable_signal(fence); | ||
205 | |||
206 | spin_lock_irqsave(fence->lock, flags); | ||
207 | |||
208 | if (!fence->ops->enable_signaling(fence)) | ||
209 | fence_signal_locked(fence); | ||
210 | |||
211 | spin_unlock_irqrestore(fence->lock, flags); | ||
212 | } | ||
213 | } | ||
214 | EXPORT_SYMBOL(fence_enable_sw_signaling); | ||
215 | |||
216 | /** | ||
217 | * fence_add_callback - add a callback to be called when the fence | ||
218 | * is signaled | ||
219 | * @fence: [in] the fence to wait on | ||
220 | * @cb: [in] the callback to register | ||
221 | * @func: [in] the function to call | ||
222 | * | ||
223 | * cb will be initialized by fence_add_callback, no initialization | ||
224 | * by the caller is required. Any number of callbacks can be registered | ||
225 | * to a fence, but a callback can only be registered to one fence at a time. | ||
226 | * | ||
227 | * Note that the callback can be called from an atomic context. If | ||
228 | * fence is already signaled, this function will return -ENOENT (and | ||
229 | * *not* call the callback) | ||
230 | * | ||
231 | * Add a software callback to the fence. Same restrictions apply to | ||
232 | * refcount as it does to fence_wait, however the caller doesn't need to | ||
233 | * keep a refcount to fence afterwards: when software access is enabled, | ||
234 | * the creator of the fence is required to keep the fence alive until | ||
235 | * after it signals with fence_signal. The callback itself can be called | ||
236 | * from irq context. | ||
237 | * | ||
238 | */ | ||
239 | int fence_add_callback(struct fence *fence, struct fence_cb *cb, | ||
240 | fence_func_t func) | ||
241 | { | ||
242 | unsigned long flags; | ||
243 | int ret = 0; | ||
244 | bool was_set; | ||
245 | |||
246 | if (WARN_ON(!fence || !func)) | ||
247 | return -EINVAL; | ||
248 | |||
249 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { | ||
250 | INIT_LIST_HEAD(&cb->node); | ||
251 | return -ENOENT; | ||
252 | } | ||
253 | |||
254 | spin_lock_irqsave(fence->lock, flags); | ||
255 | |||
256 | was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); | ||
257 | |||
258 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | ||
259 | ret = -ENOENT; | ||
260 | else if (!was_set) { | ||
261 | trace_fence_enable_signal(fence); | ||
262 | |||
263 | if (!fence->ops->enable_signaling(fence)) { | ||
264 | fence_signal_locked(fence); | ||
265 | ret = -ENOENT; | ||
266 | } | ||
267 | } | ||
268 | |||
269 | if (!ret) { | ||
270 | cb->func = func; | ||
271 | list_add_tail(&cb->node, &fence->cb_list); | ||
272 | } else | ||
273 | INIT_LIST_HEAD(&cb->node); | ||
274 | spin_unlock_irqrestore(fence->lock, flags); | ||
275 | |||
276 | return ret; | ||
277 | } | ||
278 | EXPORT_SYMBOL(fence_add_callback); | ||
279 | |||
280 | /** | ||
281 | * fence_remove_callback - remove a callback from the signaling list | ||
282 | * @fence: [in] the fence to wait on | ||
283 | * @cb: [in] the callback to remove | ||
284 | * | ||
285 | * Remove a previously queued callback from the fence. This function returns | ||
286 | * true if the callback is succesfully removed, or false if the fence has | ||
287 | * already been signaled. | ||
288 | * | ||
289 | * *WARNING*: | ||
290 | * Cancelling a callback should only be done if you really know what you're | ||
291 | * doing, since deadlocks and race conditions could occur all too easily. For | ||
292 | * this reason, it should only ever be done on hardware lockup recovery, | ||
293 | * with a reference held to the fence. | ||
294 | */ | ||
295 | bool | ||
296 | fence_remove_callback(struct fence *fence, struct fence_cb *cb) | ||
297 | { | ||
298 | unsigned long flags; | ||
299 | bool ret; | ||
300 | |||
301 | spin_lock_irqsave(fence->lock, flags); | ||
302 | |||
303 | ret = !list_empty(&cb->node); | ||
304 | if (ret) | ||
305 | list_del_init(&cb->node); | ||
306 | |||
307 | spin_unlock_irqrestore(fence->lock, flags); | ||
308 | |||
309 | return ret; | ||
310 | } | ||
311 | EXPORT_SYMBOL(fence_remove_callback); | ||
312 | |||
313 | struct default_wait_cb { | ||
314 | struct fence_cb base; | ||
315 | struct task_struct *task; | ||
316 | }; | ||
317 | |||
318 | static void | ||
319 | fence_default_wait_cb(struct fence *fence, struct fence_cb *cb) | ||
320 | { | ||
321 | struct default_wait_cb *wait = | ||
322 | container_of(cb, struct default_wait_cb, base); | ||
323 | |||
324 | wake_up_state(wait->task, TASK_NORMAL); | ||
325 | } | ||
326 | |||
327 | /** | ||
328 | * fence_default_wait - default sleep until the fence gets signaled | ||
329 | * or until timeout elapses | ||
330 | * @fence: [in] the fence to wait on | ||
331 | * @intr: [in] if true, do an interruptible wait | ||
332 | * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT | ||
333 | * | ||
334 | * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the | ||
335 | * remaining timeout in jiffies on success. | ||
336 | */ | ||
337 | signed long | ||
338 | fence_default_wait(struct fence *fence, bool intr, signed long timeout) | ||
339 | { | ||
340 | struct default_wait_cb cb; | ||
341 | unsigned long flags; | ||
342 | signed long ret = timeout; | ||
343 | bool was_set; | ||
344 | |||
345 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | ||
346 | return timeout; | ||
347 | |||
348 | spin_lock_irqsave(fence->lock, flags); | ||
349 | |||
350 | if (intr && signal_pending(current)) { | ||
351 | ret = -ERESTARTSYS; | ||
352 | goto out; | ||
353 | } | ||
354 | |||
355 | was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); | ||
356 | |||
357 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | ||
358 | goto out; | ||
359 | |||
360 | if (!was_set) { | ||
361 | trace_fence_enable_signal(fence); | ||
362 | |||
363 | if (!fence->ops->enable_signaling(fence)) { | ||
364 | fence_signal_locked(fence); | ||
365 | goto out; | ||
366 | } | ||
367 | } | ||
368 | |||
369 | cb.base.func = fence_default_wait_cb; | ||
370 | cb.task = current; | ||
371 | list_add(&cb.base.node, &fence->cb_list); | ||
372 | |||
373 | while (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) { | ||
374 | if (intr) | ||
375 | __set_current_state(TASK_INTERRUPTIBLE); | ||
376 | else | ||
377 | __set_current_state(TASK_UNINTERRUPTIBLE); | ||
378 | spin_unlock_irqrestore(fence->lock, flags); | ||
379 | |||
380 | ret = schedule_timeout(ret); | ||
381 | |||
382 | spin_lock_irqsave(fence->lock, flags); | ||
383 | if (ret > 0 && intr && signal_pending(current)) | ||
384 | ret = -ERESTARTSYS; | ||
385 | } | ||
386 | |||
387 | if (!list_empty(&cb.base.node)) | ||
388 | list_del(&cb.base.node); | ||
389 | __set_current_state(TASK_RUNNING); | ||
390 | |||
391 | out: | ||
392 | spin_unlock_irqrestore(fence->lock, flags); | ||
393 | return ret; | ||
394 | } | ||
395 | EXPORT_SYMBOL(fence_default_wait); | ||
396 | |||
397 | /** | ||
398 | * fence_init - Initialize a custom fence. | ||
399 | * @fence: [in] the fence to initialize | ||
400 | * @ops: [in] the fence_ops for operations on this fence | ||
401 | * @lock: [in] the irqsafe spinlock to use for locking this fence | ||
402 | * @context: [in] the execution context this fence is run on | ||
403 | * @seqno: [in] a linear increasing sequence number for this context | ||
404 | * | ||
405 | * Initializes an allocated fence, the caller doesn't have to keep its | ||
406 | * refcount after committing with this fence, but it will need to hold a | ||
407 | * refcount again if fence_ops.enable_signaling gets called. This can | ||
408 | * be used for other implementing other types of fence. | ||
409 | * | ||
410 | * context and seqno are used for easy comparison between fences, allowing | ||
411 | * to check which fence is later by simply using fence_later. | ||
412 | */ | ||
413 | void | ||
414 | fence_init(struct fence *fence, const struct fence_ops *ops, | ||
415 | spinlock_t *lock, unsigned context, unsigned seqno) | ||
416 | { | ||
417 | BUG_ON(!lock); | ||
418 | BUG_ON(!ops || !ops->wait || !ops->enable_signaling || | ||
419 | !ops->get_driver_name || !ops->get_timeline_name); | ||
420 | |||
421 | kref_init(&fence->refcount); | ||
422 | fence->ops = ops; | ||
423 | INIT_LIST_HEAD(&fence->cb_list); | ||
424 | fence->lock = lock; | ||
425 | fence->context = context; | ||
426 | fence->seqno = seqno; | ||
427 | fence->flags = 0UL; | ||
428 | |||
429 | trace_fence_init(fence); | ||
430 | } | ||
431 | EXPORT_SYMBOL(fence_init); | ||
diff --git a/include/linux/fence.h b/include/linux/fence.h new file mode 100644 index 000000000000..b935cc650123 --- /dev/null +++ b/include/linux/fence.h | |||
@@ -0,0 +1,343 @@ | |||
1 | /* | ||
2 | * Fence mechanism for dma-buf to allow for asynchronous dma access | ||
3 | * | ||
4 | * Copyright (C) 2012 Canonical Ltd | ||
5 | * Copyright (C) 2012 Texas Instruments | ||
6 | * | ||
7 | * Authors: | ||
8 | * Rob Clark <robdclark@gmail.com> | ||
9 | * Maarten Lankhorst <maarten.lankhorst@canonical.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify it | ||
12 | * under the terms of the GNU General Public License version 2 as published by | ||
13 | * the Free Software Foundation. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
16 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
18 | * more details. | ||
19 | */ | ||
20 | |||
21 | #ifndef __LINUX_FENCE_H | ||
22 | #define __LINUX_FENCE_H | ||
23 | |||
24 | #include <linux/err.h> | ||
25 | #include <linux/wait.h> | ||
26 | #include <linux/list.h> | ||
27 | #include <linux/bitops.h> | ||
28 | #include <linux/kref.h> | ||
29 | #include <linux/sched.h> | ||
30 | #include <linux/printk.h> | ||
31 | |||
32 | struct fence; | ||
33 | struct fence_ops; | ||
34 | struct fence_cb; | ||
35 | |||
36 | /** | ||
37 | * struct fence - software synchronization primitive | ||
38 | * @refcount: refcount for this fence | ||
39 | * @ops: fence_ops associated with this fence | ||
40 | * @cb_list: list of all callbacks to call | ||
41 | * @lock: spin_lock_irqsave used for locking | ||
42 | * @context: execution context this fence belongs to, returned by | ||
43 | * fence_context_alloc() | ||
44 | * @seqno: the sequence number of this fence inside the execution context, | ||
45 | * can be compared to decide which fence would be signaled later. | ||
46 | * @flags: A mask of FENCE_FLAG_* defined below | ||
47 | * @timestamp: Timestamp when the fence was signaled. | ||
48 | * @status: Optional, only valid if < 0, must be set before calling | ||
49 | * fence_signal, indicates that the fence has completed with an error. | ||
50 | * | ||
51 | * the flags member must be manipulated and read using the appropriate | ||
52 | * atomic ops (bit_*), so taking the spinlock will not be needed most | ||
53 | * of the time. | ||
54 | * | ||
55 | * FENCE_FLAG_SIGNALED_BIT - fence is already signaled | ||
56 | * FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called* | ||
57 | * FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the | ||
58 | * implementer of the fence for its own purposes. Can be used in different | ||
59 | * ways by different fence implementers, so do not rely on this. | ||
60 | * | ||
61 | * *) Since atomic bitops are used, this is not guaranteed to be the case. | ||
62 | * Particularly, if the bit was set, but fence_signal was called right | ||
63 | * before this bit was set, it would have been able to set the | ||
64 | * FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called. | ||
65 | * Adding a check for FENCE_FLAG_SIGNALED_BIT after setting | ||
66 | * FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that | ||
67 | * after fence_signal was called, any enable_signaling call will have either | ||
68 | * been completed, or never called at all. | ||
69 | */ | ||
70 | struct fence { | ||
71 | struct kref refcount; | ||
72 | const struct fence_ops *ops; | ||
73 | struct list_head cb_list; | ||
74 | spinlock_t *lock; | ||
75 | unsigned context, seqno; | ||
76 | unsigned long flags; | ||
77 | ktime_t timestamp; | ||
78 | int status; | ||
79 | }; | ||
80 | |||
81 | enum fence_flag_bits { | ||
82 | FENCE_FLAG_SIGNALED_BIT, | ||
83 | FENCE_FLAG_ENABLE_SIGNAL_BIT, | ||
84 | FENCE_FLAG_USER_BITS, /* must always be last member */ | ||
85 | }; | ||
86 | |||
87 | typedef void (*fence_func_t)(struct fence *fence, struct fence_cb *cb); | ||
88 | |||
89 | /** | ||
90 | * struct fence_cb - callback for fence_add_callback | ||
91 | * @node: used by fence_add_callback to append this struct to fence::cb_list | ||
92 | * @func: fence_func_t to call | ||
93 | * | ||
94 | * This struct will be initialized by fence_add_callback, additional | ||
95 | * data can be passed along by embedding fence_cb in another struct. | ||
96 | */ | ||
97 | struct fence_cb { | ||
98 | struct list_head node; | ||
99 | fence_func_t func; | ||
100 | }; | ||
101 | |||
102 | /** | ||
103 | * struct fence_ops - operations implemented for fence | ||
104 | * @get_driver_name: returns the driver name. | ||
105 | * @get_timeline_name: return the name of the context this fence belongs to. | ||
106 | * @enable_signaling: enable software signaling of fence. | ||
107 | * @signaled: [optional] peek whether the fence is signaled, can be null. | ||
108 | * @wait: custom wait implementation, or fence_default_wait. | ||
109 | * @release: [optional] called on destruction of fence, can be null | ||
110 | * @fill_driver_data: [optional] callback to fill in free-form debug info | ||
111 | * Returns amount of bytes filled, or -errno. | ||
112 | * @fence_value_str: [optional] fills in the value of the fence as a string | ||
113 | * @timeline_value_str: [optional] fills in the current value of the timeline | ||
114 | * as a string | ||
115 | * | ||
116 | * Notes on enable_signaling: | ||
117 | * For fence implementations that have the capability for hw->hw | ||
118 | * signaling, they can implement this op to enable the necessary | ||
119 | * irqs, or insert commands into cmdstream, etc. This is called | ||
120 | * in the first wait() or add_callback() path to let the fence | ||
121 | * implementation know that there is another driver waiting on | ||
122 | * the signal (ie. hw->sw case). | ||
123 | * | ||
124 | * This function can be called called from atomic context, but not | ||
125 | * from irq context, so normal spinlocks can be used. | ||
126 | * | ||
127 | * A return value of false indicates the fence already passed, | ||
128 | * or some failure occured that made it impossible to enable | ||
129 | * signaling. True indicates succesful enabling. | ||
130 | * | ||
131 | * fence->status may be set in enable_signaling, but only when false is | ||
132 | * returned. | ||
133 | * | ||
134 | * Calling fence_signal before enable_signaling is called allows | ||
135 | * for a tiny race window in which enable_signaling is called during, | ||
136 | * before, or after fence_signal. To fight this, it is recommended | ||
137 | * that before enable_signaling returns true an extra reference is | ||
138 | * taken on the fence, to be released when the fence is signaled. | ||
139 | * This will mean fence_signal will still be called twice, but | ||
140 | * the second time will be a noop since it was already signaled. | ||
141 | * | ||
142 | * Notes on signaled: | ||
143 | * May set fence->status if returning true. | ||
144 | * | ||
145 | * Notes on wait: | ||
146 | * Must not be NULL, set to fence_default_wait for default implementation. | ||
147 | * the fence_default_wait implementation should work for any fence, as long | ||
148 | * as enable_signaling works correctly. | ||
149 | * | ||
150 | * Must return -ERESTARTSYS if the wait is intr = true and the wait was | ||
151 | * interrupted, and remaining jiffies if fence has signaled, or 0 if wait | ||
152 | * timed out. Can also return other error values on custom implementations, | ||
153 | * which should be treated as if the fence is signaled. For example a hardware | ||
154 | * lockup could be reported like that. | ||
155 | * | ||
156 | * Notes on release: | ||
157 | * Can be NULL, this function allows additional commands to run on | ||
158 | * destruction of the fence. Can be called from irq context. | ||
159 | * If pointer is set to NULL, kfree will get called instead. | ||
160 | */ | ||
161 | |||
162 | struct fence_ops { | ||
163 | const char * (*get_driver_name)(struct fence *fence); | ||
164 | const char * (*get_timeline_name)(struct fence *fence); | ||
165 | bool (*enable_signaling)(struct fence *fence); | ||
166 | bool (*signaled)(struct fence *fence); | ||
167 | signed long (*wait)(struct fence *fence, bool intr, signed long timeout); | ||
168 | void (*release)(struct fence *fence); | ||
169 | |||
170 | int (*fill_driver_data)(struct fence *fence, void *data, int size); | ||
171 | void (*fence_value_str)(struct fence *fence, char *str, int size); | ||
172 | void (*timeline_value_str)(struct fence *fence, char *str, int size); | ||
173 | }; | ||
174 | |||
175 | void fence_init(struct fence *fence, const struct fence_ops *ops, | ||
176 | spinlock_t *lock, unsigned context, unsigned seqno); | ||
177 | |||
178 | void fence_release(struct kref *kref); | ||
179 | void fence_free(struct fence *fence); | ||
180 | |||
181 | /** | ||
182 | * fence_get - increases refcount of the fence | ||
183 | * @fence: [in] fence to increase refcount of | ||
184 | * | ||
185 | * Returns the same fence, with refcount increased by 1. | ||
186 | */ | ||
187 | static inline struct fence *fence_get(struct fence *fence) | ||
188 | { | ||
189 | if (fence) | ||
190 | kref_get(&fence->refcount); | ||
191 | return fence; | ||
192 | } | ||
193 | |||
194 | /** | ||
195 | * fence_put - decreases refcount of the fence | ||
196 | * @fence: [in] fence to reduce refcount of | ||
197 | */ | ||
198 | static inline void fence_put(struct fence *fence) | ||
199 | { | ||
200 | if (fence) | ||
201 | kref_put(&fence->refcount, fence_release); | ||
202 | } | ||
203 | |||
204 | int fence_signal(struct fence *fence); | ||
205 | int fence_signal_locked(struct fence *fence); | ||
206 | signed long fence_default_wait(struct fence *fence, bool intr, signed long timeout); | ||
207 | int fence_add_callback(struct fence *fence, struct fence_cb *cb, | ||
208 | fence_func_t func); | ||
209 | bool fence_remove_callback(struct fence *fence, struct fence_cb *cb); | ||
210 | void fence_enable_sw_signaling(struct fence *fence); | ||
211 | |||
212 | /** | ||
213 | * fence_is_signaled_locked - Return an indication if the fence is signaled yet. | ||
214 | * @fence: [in] the fence to check | ||
215 | * | ||
216 | * Returns true if the fence was already signaled, false if not. Since this | ||
217 | * function doesn't enable signaling, it is not guaranteed to ever return | ||
218 | * true if fence_add_callback, fence_wait or fence_enable_sw_signaling | ||
219 | * haven't been called before. | ||
220 | * | ||
221 | * This function requires fence->lock to be held. | ||
222 | */ | ||
223 | static inline bool | ||
224 | fence_is_signaled_locked(struct fence *fence) | ||
225 | { | ||
226 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | ||
227 | return true; | ||
228 | |||
229 | if (fence->ops->signaled && fence->ops->signaled(fence)) { | ||
230 | fence_signal_locked(fence); | ||
231 | return true; | ||
232 | } | ||
233 | |||
234 | return false; | ||
235 | } | ||
236 | |||
237 | /** | ||
238 | * fence_is_signaled - Return an indication if the fence is signaled yet. | ||
239 | * @fence: [in] the fence to check | ||
240 | * | ||
241 | * Returns true if the fence was already signaled, false if not. Since this | ||
242 | * function doesn't enable signaling, it is not guaranteed to ever return | ||
243 | * true if fence_add_callback, fence_wait or fence_enable_sw_signaling | ||
244 | * haven't been called before. | ||
245 | * | ||
246 | * It's recommended for seqno fences to call fence_signal when the | ||
247 | * operation is complete, it makes it possible to prevent issues from | ||
248 | * wraparound between time of issue and time of use by checking the return | ||
249 | * value of this function before calling hardware-specific wait instructions. | ||
250 | */ | ||
251 | static inline bool | ||
252 | fence_is_signaled(struct fence *fence) | ||
253 | { | ||
254 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | ||
255 | return true; | ||
256 | |||
257 | if (fence->ops->signaled && fence->ops->signaled(fence)) { | ||
258 | fence_signal(fence); | ||
259 | return true; | ||
260 | } | ||
261 | |||
262 | return false; | ||
263 | } | ||
264 | |||
265 | /** | ||
266 | * fence_later - return the chronologically later fence | ||
267 | * @f1: [in] the first fence from the same context | ||
268 | * @f2: [in] the second fence from the same context | ||
269 | * | ||
270 | * Returns NULL if both fences are signaled, otherwise the fence that would be | ||
271 | * signaled last. Both fences must be from the same context, since a seqno is | ||
272 | * not re-used across contexts. | ||
273 | */ | ||
274 | static inline struct fence *fence_later(struct fence *f1, struct fence *f2) | ||
275 | { | ||
276 | if (WARN_ON(f1->context != f2->context)) | ||
277 | return NULL; | ||
278 | |||
279 | /* | ||
280 | * can't check just FENCE_FLAG_SIGNALED_BIT here, it may never have been | ||
281 | * set if enable_signaling wasn't called, and enabling that here is | ||
282 | * overkill. | ||
283 | */ | ||
284 | if (f2->seqno - f1->seqno <= INT_MAX) | ||
285 | return fence_is_signaled(f2) ? NULL : f2; | ||
286 | else | ||
287 | return fence_is_signaled(f1) ? NULL : f1; | ||
288 | } | ||
289 | |||
290 | signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout); | ||
291 | |||
292 | |||
293 | /** | ||
294 | * fence_wait - sleep until the fence gets signaled | ||
295 | * @fence: [in] the fence to wait on | ||
296 | * @intr: [in] if true, do an interruptible wait | ||
297 | * | ||
298 | * This function will return -ERESTARTSYS if interrupted by a signal, | ||
299 | * or 0 if the fence was signaled. Other error values may be | ||
300 | * returned on custom implementations. | ||
301 | * | ||
302 | * Performs a synchronous wait on this fence. It is assumed the caller | ||
303 | * directly or indirectly holds a reference to the fence, otherwise the | ||
304 | * fence might be freed before return, resulting in undefined behavior. | ||
305 | */ | ||
306 | static inline signed long fence_wait(struct fence *fence, bool intr) | ||
307 | { | ||
308 | signed long ret; | ||
309 | |||
310 | /* Since fence_wait_timeout cannot timeout with | ||
311 | * MAX_SCHEDULE_TIMEOUT, only valid return values are | ||
312 | * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT. | ||
313 | */ | ||
314 | ret = fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); | ||
315 | |||
316 | return ret < 0 ? ret : 0; | ||
317 | } | ||
318 | |||
319 | unsigned fence_context_alloc(unsigned num); | ||
320 | |||
321 | #define FENCE_TRACE(f, fmt, args...) \ | ||
322 | do { \ | ||
323 | struct fence *__ff = (f); \ | ||
324 | if (config_enabled(CONFIG_FENCE_TRACE)) \ | ||
325 | pr_info("f %u#%u: " fmt, \ | ||
326 | __ff->context, __ff->seqno, ##args); \ | ||
327 | } while (0) | ||
328 | |||
329 | #define FENCE_WARN(f, fmt, args...) \ | ||
330 | do { \ | ||
331 | struct fence *__ff = (f); \ | ||
332 | pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno, \ | ||
333 | ##args); \ | ||
334 | } while (0) | ||
335 | |||
336 | #define FENCE_ERR(f, fmt, args...) \ | ||
337 | do { \ | ||
338 | struct fence *__ff = (f); \ | ||
339 | pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno, \ | ||
340 | ##args); \ | ||
341 | } while (0) | ||
342 | |||
343 | #endif /* __LINUX_FENCE_H */ | ||
diff --git a/include/trace/events/fence.h b/include/trace/events/fence.h new file mode 100644 index 000000000000..98feb1b82896 --- /dev/null +++ b/include/trace/events/fence.h | |||
@@ -0,0 +1,128 @@ | |||
1 | #undef TRACE_SYSTEM | ||
2 | #define TRACE_SYSTEM fence | ||
3 | |||
4 | #if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ) | ||
5 | #define _TRACE_FENCE_H | ||
6 | |||
7 | #include <linux/tracepoint.h> | ||
8 | |||
9 | struct fence; | ||
10 | |||
11 | TRACE_EVENT(fence_annotate_wait_on, | ||
12 | |||
13 | /* fence: the fence waiting on f1, f1: the fence to be waited on. */ | ||
14 | TP_PROTO(struct fence *fence, struct fence *f1), | ||
15 | |||
16 | TP_ARGS(fence, f1), | ||
17 | |||
18 | TP_STRUCT__entry( | ||
19 | __string(driver, fence->ops->get_driver_name(fence)) | ||
20 | __string(timeline, fence->ops->get_driver_name(fence)) | ||
21 | __field(unsigned int, context) | ||
22 | __field(unsigned int, seqno) | ||
23 | |||
24 | __string(waiting_driver, f1->ops->get_driver_name(f1)) | ||
25 | __string(waiting_timeline, f1->ops->get_timeline_name(f1)) | ||
26 | __field(unsigned int, waiting_context) | ||
27 | __field(unsigned int, waiting_seqno) | ||
28 | ), | ||
29 | |||
30 | TP_fast_assign( | ||
31 | __assign_str(driver, fence->ops->get_driver_name(fence)) | ||
32 | __assign_str(timeline, fence->ops->get_timeline_name(fence)) | ||
33 | __entry->context = fence->context; | ||
34 | __entry->seqno = fence->seqno; | ||
35 | |||
36 | __assign_str(waiting_driver, f1->ops->get_driver_name(f1)) | ||
37 | __assign_str(waiting_timeline, f1->ops->get_timeline_name(f1)) | ||
38 | __entry->waiting_context = f1->context; | ||
39 | __entry->waiting_seqno = f1->seqno; | ||
40 | |||
41 | ), | ||
42 | |||
43 | TP_printk("driver=%s timeline=%s context=%u seqno=%u " \ | ||
44 | "waits on driver=%s timeline=%s context=%u seqno=%u", | ||
45 | __get_str(driver), __get_str(timeline), __entry->context, | ||
46 | __entry->seqno, | ||
47 | __get_str(waiting_driver), __get_str(waiting_timeline), | ||
48 | __entry->waiting_context, __entry->waiting_seqno) | ||
49 | ); | ||
50 | |||
51 | DECLARE_EVENT_CLASS(fence, | ||
52 | |||
53 | TP_PROTO(struct fence *fence), | ||
54 | |||
55 | TP_ARGS(fence), | ||
56 | |||
57 | TP_STRUCT__entry( | ||
58 | __string(driver, fence->ops->get_driver_name(fence)) | ||
59 | __string(timeline, fence->ops->get_timeline_name(fence)) | ||
60 | __field(unsigned int, context) | ||
61 | __field(unsigned int, seqno) | ||
62 | ), | ||
63 | |||
64 | TP_fast_assign( | ||
65 | __assign_str(driver, fence->ops->get_driver_name(fence)) | ||
66 | __assign_str(timeline, fence->ops->get_timeline_name(fence)) | ||
67 | __entry->context = fence->context; | ||
68 | __entry->seqno = fence->seqno; | ||
69 | ), | ||
70 | |||
71 | TP_printk("driver=%s timeline=%s context=%u seqno=%u", | ||
72 | __get_str(driver), __get_str(timeline), __entry->context, | ||
73 | __entry->seqno) | ||
74 | ); | ||
75 | |||
76 | DEFINE_EVENT(fence, fence_emit, | ||
77 | |||
78 | TP_PROTO(struct fence *fence), | ||
79 | |||
80 | TP_ARGS(fence) | ||
81 | ); | ||
82 | |||
83 | DEFINE_EVENT(fence, fence_init, | ||
84 | |||
85 | TP_PROTO(struct fence *fence), | ||
86 | |||
87 | TP_ARGS(fence) | ||
88 | ); | ||
89 | |||
90 | DEFINE_EVENT(fence, fence_destroy, | ||
91 | |||
92 | TP_PROTO(struct fence *fence), | ||
93 | |||
94 | TP_ARGS(fence) | ||
95 | ); | ||
96 | |||
97 | DEFINE_EVENT(fence, fence_enable_signal, | ||
98 | |||
99 | TP_PROTO(struct fence *fence), | ||
100 | |||
101 | TP_ARGS(fence) | ||
102 | ); | ||
103 | |||
104 | DEFINE_EVENT(fence, fence_signaled, | ||
105 | |||
106 | TP_PROTO(struct fence *fence), | ||
107 | |||
108 | TP_ARGS(fence) | ||
109 | ); | ||
110 | |||
111 | DEFINE_EVENT(fence, fence_wait_start, | ||
112 | |||
113 | TP_PROTO(struct fence *fence), | ||
114 | |||
115 | TP_ARGS(fence) | ||
116 | ); | ||
117 | |||
118 | DEFINE_EVENT(fence, fence_wait_end, | ||
119 | |||
120 | TP_PROTO(struct fence *fence), | ||
121 | |||
122 | TP_ARGS(fence) | ||
123 | ); | ||
124 | |||
125 | #endif /* _TRACE_FENCE_H */ | ||
126 | |||
127 | /* This part must be outside protection */ | ||
128 | #include <trace/define_trace.h> | ||