diff options
author | Maarten Lankhorst <maarten.lankhorst@canonical.com> | 2014-07-01 06:57:31 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2014-07-08 16:28:26 -0400 |
commit | 0f0d8406fb9c3c5ed1b1609a0f51c504c5b37aea (patch) | |
tree | 63dbd1e8f3f41406ea0715b0889d7bd6545301e2 | |
parent | 3aac4502fd3f80dcf7e65dbf6edd8676893c1f46 (diff) |
android: convert sync to fence api, v6
Just to show it's easy.
Android syncpoints can be mapped to a timeline. This removes the need
to maintain a separate api for synchronization. I've left the android
trace events in place, but the core fence events should already be
sufficient for debugging.
v2:
- Call fence_remove_callback in sync_fence_free if not all fences have fired.
v3:
- Merge Colin Cross' bugfixes, and the android fence merge optimization.
v4:
- Merge with the upstream fixes.
v5:
- Fix small style issues pointed out by Thomas Hellstrom.
v6:
- Fix for updates to fence api.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Acked-by: John Stultz <john.stultz@linaro.org>
Acked-by: Sumit Semwal <sumit.semwal@linaro.org>
Acked-by: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | drivers/staging/android/Kconfig | 1 | ||||
-rw-r--r-- | drivers/staging/android/Makefile | 2 | ||||
-rw-r--r-- | drivers/staging/android/sw_sync.c | 6 | ||||
-rw-r--r-- | drivers/staging/android/sync.c | 913 | ||||
-rw-r--r-- | drivers/staging/android/sync.h | 79 | ||||
-rw-r--r-- | drivers/staging/android/sync_debug.c | 247 | ||||
-rw-r--r-- | drivers/staging/android/trace/sync.h | 12 |
7 files changed, 609 insertions, 651 deletions
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig index 99e484f845f2..51607e9aa049 100644 --- a/drivers/staging/android/Kconfig +++ b/drivers/staging/android/Kconfig | |||
@@ -88,6 +88,7 @@ config SYNC | |||
88 | bool "Synchronization framework" | 88 | bool "Synchronization framework" |
89 | default n | 89 | default n |
90 | select ANON_INODES | 90 | select ANON_INODES |
91 | select DMA_SHARED_BUFFER | ||
91 | ---help--- | 92 | ---help--- |
92 | This option enables the framework for synchronization between multiple | 93 | This option enables the framework for synchronization between multiple |
93 | drivers. Sync implementations can take advantage of hardware | 94 | drivers. Sync implementations can take advantage of hardware |
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile index 0a01e1914905..517ad5ffa429 100644 --- a/drivers/staging/android/Makefile +++ b/drivers/staging/android/Makefile | |||
@@ -9,5 +9,5 @@ obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o | |||
9 | obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o | 9 | obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o |
10 | obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o | 10 | obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o |
11 | obj-$(CONFIG_ANDROID_INTF_ALARM_DEV) += alarm-dev.o | 11 | obj-$(CONFIG_ANDROID_INTF_ALARM_DEV) += alarm-dev.o |
12 | obj-$(CONFIG_SYNC) += sync.o | 12 | obj-$(CONFIG_SYNC) += sync.o sync_debug.o |
13 | obj-$(CONFIG_SW_SYNC) += sw_sync.o | 13 | obj-$(CONFIG_SW_SYNC) += sw_sync.o |
diff --git a/drivers/staging/android/sw_sync.c b/drivers/staging/android/sw_sync.c index 12a136ec1cec..a76db3ff87cb 100644 --- a/drivers/staging/android/sw_sync.c +++ b/drivers/staging/android/sw_sync.c | |||
@@ -50,7 +50,7 @@ static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt) | |||
50 | { | 50 | { |
51 | struct sw_sync_pt *pt = (struct sw_sync_pt *) sync_pt; | 51 | struct sw_sync_pt *pt = (struct sw_sync_pt *) sync_pt; |
52 | struct sw_sync_timeline *obj = | 52 | struct sw_sync_timeline *obj = |
53 | (struct sw_sync_timeline *)sync_pt->parent; | 53 | (struct sw_sync_timeline *)sync_pt_parent(sync_pt); |
54 | 54 | ||
55 | return (struct sync_pt *) sw_sync_pt_create(obj, pt->value); | 55 | return (struct sync_pt *) sw_sync_pt_create(obj, pt->value); |
56 | } | 56 | } |
@@ -59,7 +59,7 @@ static int sw_sync_pt_has_signaled(struct sync_pt *sync_pt) | |||
59 | { | 59 | { |
60 | struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; | 60 | struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; |
61 | struct sw_sync_timeline *obj = | 61 | struct sw_sync_timeline *obj = |
62 | (struct sw_sync_timeline *)sync_pt->parent; | 62 | (struct sw_sync_timeline *)sync_pt_parent(sync_pt); |
63 | 63 | ||
64 | return sw_sync_cmp(obj->value, pt->value) >= 0; | 64 | return sw_sync_cmp(obj->value, pt->value) >= 0; |
65 | } | 65 | } |
@@ -97,7 +97,6 @@ static void sw_sync_pt_value_str(struct sync_pt *sync_pt, | |||
97 | char *str, int size) | 97 | char *str, int size) |
98 | { | 98 | { |
99 | struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; | 99 | struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; |
100 | |||
101 | snprintf(str, size, "%d", pt->value); | 100 | snprintf(str, size, "%d", pt->value); |
102 | } | 101 | } |
103 | 102 | ||
@@ -157,7 +156,6 @@ static int sw_sync_open(struct inode *inode, struct file *file) | |||
157 | static int sw_sync_release(struct inode *inode, struct file *file) | 156 | static int sw_sync_release(struct inode *inode, struct file *file) |
158 | { | 157 | { |
159 | struct sw_sync_timeline *obj = file->private_data; | 158 | struct sw_sync_timeline *obj = file->private_data; |
160 | |||
161 | sync_timeline_destroy(&obj->obj); | 159 | sync_timeline_destroy(&obj->obj); |
162 | return 0; | 160 | return 0; |
163 | } | 161 | } |
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c index 18174f7c871c..c9a0c2cdc81a 100644 --- a/drivers/staging/android/sync.c +++ b/drivers/staging/android/sync.c | |||
@@ -31,22 +31,13 @@ | |||
31 | #define CREATE_TRACE_POINTS | 31 | #define CREATE_TRACE_POINTS |
32 | #include "trace/sync.h" | 32 | #include "trace/sync.h" |
33 | 33 | ||
34 | static void sync_fence_signal_pt(struct sync_pt *pt); | 34 | static const struct fence_ops android_fence_ops; |
35 | static int _sync_pt_has_signaled(struct sync_pt *pt); | 35 | static const struct file_operations sync_fence_fops; |
36 | static void sync_fence_free(struct kref *kref); | ||
37 | static void sync_dump(void); | ||
38 | |||
39 | static LIST_HEAD(sync_timeline_list_head); | ||
40 | static DEFINE_SPINLOCK(sync_timeline_list_lock); | ||
41 | |||
42 | static LIST_HEAD(sync_fence_list_head); | ||
43 | static DEFINE_SPINLOCK(sync_fence_list_lock); | ||
44 | 36 | ||
45 | struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, | 37 | struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, |
46 | int size, const char *name) | 38 | int size, const char *name) |
47 | { | 39 | { |
48 | struct sync_timeline *obj; | 40 | struct sync_timeline *obj; |
49 | unsigned long flags; | ||
50 | 41 | ||
51 | if (size < sizeof(struct sync_timeline)) | 42 | if (size < sizeof(struct sync_timeline)) |
52 | return NULL; | 43 | return NULL; |
@@ -57,17 +48,14 @@ struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, | |||
57 | 48 | ||
58 | kref_init(&obj->kref); | 49 | kref_init(&obj->kref); |
59 | obj->ops = ops; | 50 | obj->ops = ops; |
51 | obj->context = fence_context_alloc(1); | ||
60 | strlcpy(obj->name, name, sizeof(obj->name)); | 52 | strlcpy(obj->name, name, sizeof(obj->name)); |
61 | 53 | ||
62 | INIT_LIST_HEAD(&obj->child_list_head); | 54 | INIT_LIST_HEAD(&obj->child_list_head); |
63 | spin_lock_init(&obj->child_list_lock); | ||
64 | |||
65 | INIT_LIST_HEAD(&obj->active_list_head); | 55 | INIT_LIST_HEAD(&obj->active_list_head); |
66 | spin_lock_init(&obj->active_list_lock); | 56 | spin_lock_init(&obj->child_list_lock); |
67 | 57 | ||
68 | spin_lock_irqsave(&sync_timeline_list_lock, flags); | 58 | sync_timeline_debug_add(obj); |
69 | list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head); | ||
70 | spin_unlock_irqrestore(&sync_timeline_list_lock, flags); | ||
71 | 59 | ||
72 | return obj; | 60 | return obj; |
73 | } | 61 | } |
@@ -77,11 +65,8 @@ static void sync_timeline_free(struct kref *kref) | |||
77 | { | 65 | { |
78 | struct sync_timeline *obj = | 66 | struct sync_timeline *obj = |
79 | container_of(kref, struct sync_timeline, kref); | 67 | container_of(kref, struct sync_timeline, kref); |
80 | unsigned long flags; | ||
81 | 68 | ||
82 | spin_lock_irqsave(&sync_timeline_list_lock, flags); | 69 | sync_timeline_debug_remove(obj); |
83 | list_del(&obj->sync_timeline_list); | ||
84 | spin_unlock_irqrestore(&sync_timeline_list_lock, flags); | ||
85 | 70 | ||
86 | if (obj->ops->release_obj) | 71 | if (obj->ops->release_obj) |
87 | obj->ops->release_obj(obj); | 72 | obj->ops->release_obj(obj); |
@@ -89,6 +74,16 @@ static void sync_timeline_free(struct kref *kref) | |||
89 | kfree(obj); | 74 | kfree(obj); |
90 | } | 75 | } |
91 | 76 | ||
77 | static void sync_timeline_get(struct sync_timeline *obj) | ||
78 | { | ||
79 | kref_get(&obj->kref); | ||
80 | } | ||
81 | |||
82 | static void sync_timeline_put(struct sync_timeline *obj) | ||
83 | { | ||
84 | kref_put(&obj->kref, sync_timeline_free); | ||
85 | } | ||
86 | |||
92 | void sync_timeline_destroy(struct sync_timeline *obj) | 87 | void sync_timeline_destroy(struct sync_timeline *obj) |
93 | { | 88 | { |
94 | obj->destroyed = true; | 89 | obj->destroyed = true; |
@@ -102,75 +97,33 @@ void sync_timeline_destroy(struct sync_timeline *obj) | |||
102 | * signal any children that their parent is going away. | 97 | * signal any children that their parent is going away. |
103 | */ | 98 | */ |
104 | sync_timeline_signal(obj); | 99 | sync_timeline_signal(obj); |
105 | 100 | sync_timeline_put(obj); | |
106 | kref_put(&obj->kref, sync_timeline_free); | ||
107 | } | 101 | } |
108 | EXPORT_SYMBOL(sync_timeline_destroy); | 102 | EXPORT_SYMBOL(sync_timeline_destroy); |
109 | 103 | ||
110 | static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt) | ||
111 | { | ||
112 | unsigned long flags; | ||
113 | |||
114 | pt->parent = obj; | ||
115 | |||
116 | spin_lock_irqsave(&obj->child_list_lock, flags); | ||
117 | list_add_tail(&pt->child_list, &obj->child_list_head); | ||
118 | spin_unlock_irqrestore(&obj->child_list_lock, flags); | ||
119 | } | ||
120 | |||
121 | static void sync_timeline_remove_pt(struct sync_pt *pt) | ||
122 | { | ||
123 | struct sync_timeline *obj = pt->parent; | ||
124 | unsigned long flags; | ||
125 | |||
126 | spin_lock_irqsave(&obj->active_list_lock, flags); | ||
127 | if (!list_empty(&pt->active_list)) | ||
128 | list_del_init(&pt->active_list); | ||
129 | spin_unlock_irqrestore(&obj->active_list_lock, flags); | ||
130 | |||
131 | spin_lock_irqsave(&obj->child_list_lock, flags); | ||
132 | if (!list_empty(&pt->child_list)) | ||
133 | list_del_init(&pt->child_list); | ||
134 | |||
135 | spin_unlock_irqrestore(&obj->child_list_lock, flags); | ||
136 | } | ||
137 | |||
138 | void sync_timeline_signal(struct sync_timeline *obj) | 104 | void sync_timeline_signal(struct sync_timeline *obj) |
139 | { | 105 | { |
140 | unsigned long flags; | 106 | unsigned long flags; |
141 | LIST_HEAD(signaled_pts); | 107 | LIST_HEAD(signaled_pts); |
142 | struct list_head *pos, *n; | 108 | struct sync_pt *pt, *next; |
143 | 109 | ||
144 | trace_sync_timeline(obj); | 110 | trace_sync_timeline(obj); |
145 | 111 | ||
146 | spin_lock_irqsave(&obj->active_list_lock, flags); | 112 | spin_lock_irqsave(&obj->child_list_lock, flags); |
147 | |||
148 | list_for_each_safe(pos, n, &obj->active_list_head) { | ||
149 | struct sync_pt *pt = | ||
150 | container_of(pos, struct sync_pt, active_list); | ||
151 | 113 | ||
152 | if (_sync_pt_has_signaled(pt)) { | 114 | list_for_each_entry_safe(pt, next, &obj->active_list_head, |
153 | list_del_init(pos); | 115 | active_list) { |
154 | list_add(&pt->signaled_list, &signaled_pts); | 116 | if (fence_is_signaled_locked(&pt->base)) |
155 | kref_get(&pt->fence->kref); | 117 | list_del(&pt->active_list); |
156 | } | ||
157 | } | 118 | } |
158 | 119 | ||
159 | spin_unlock_irqrestore(&obj->active_list_lock, flags); | 120 | spin_unlock_irqrestore(&obj->child_list_lock, flags); |
160 | |||
161 | list_for_each_safe(pos, n, &signaled_pts) { | ||
162 | struct sync_pt *pt = | ||
163 | container_of(pos, struct sync_pt, signaled_list); | ||
164 | |||
165 | list_del_init(pos); | ||
166 | sync_fence_signal_pt(pt); | ||
167 | kref_put(&pt->fence->kref, sync_fence_free); | ||
168 | } | ||
169 | } | 121 | } |
170 | EXPORT_SYMBOL(sync_timeline_signal); | 122 | EXPORT_SYMBOL(sync_timeline_signal); |
171 | 123 | ||
172 | struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size) | 124 | struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size) |
173 | { | 125 | { |
126 | unsigned long flags; | ||
174 | struct sync_pt *pt; | 127 | struct sync_pt *pt; |
175 | 128 | ||
176 | if (size < sizeof(struct sync_pt)) | 129 | if (size < sizeof(struct sync_pt)) |
@@ -180,87 +133,28 @@ struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size) | |||
180 | if (pt == NULL) | 133 | if (pt == NULL) |
181 | return NULL; | 134 | return NULL; |
182 | 135 | ||
136 | spin_lock_irqsave(&obj->child_list_lock, flags); | ||
137 | sync_timeline_get(obj); | ||
138 | fence_init(&pt->base, &android_fence_ops, &obj->child_list_lock, | ||
139 | obj->context, ++obj->value); | ||
140 | list_add_tail(&pt->child_list, &obj->child_list_head); | ||
183 | INIT_LIST_HEAD(&pt->active_list); | 141 | INIT_LIST_HEAD(&pt->active_list); |
184 | kref_get(&parent->kref); | 142 | spin_unlock_irqrestore(&obj->child_list_lock, flags); |
185 | sync_timeline_add_pt(parent, pt); | ||
186 | |||
187 | return pt; | 143 | return pt; |
188 | } | 144 | } |
189 | EXPORT_SYMBOL(sync_pt_create); | 145 | EXPORT_SYMBOL(sync_pt_create); |
190 | 146 | ||
191 | void sync_pt_free(struct sync_pt *pt) | 147 | void sync_pt_free(struct sync_pt *pt) |
192 | { | 148 | { |
193 | if (pt->parent->ops->free_pt) | 149 | fence_put(&pt->base); |
194 | pt->parent->ops->free_pt(pt); | ||
195 | |||
196 | sync_timeline_remove_pt(pt); | ||
197 | |||
198 | kref_put(&pt->parent->kref, sync_timeline_free); | ||
199 | |||
200 | kfree(pt); | ||
201 | } | 150 | } |
202 | EXPORT_SYMBOL(sync_pt_free); | 151 | EXPORT_SYMBOL(sync_pt_free); |
203 | 152 | ||
204 | /* call with pt->parent->active_list_lock held */ | 153 | static struct sync_fence *sync_fence_alloc(int size, const char *name) |
205 | static int _sync_pt_has_signaled(struct sync_pt *pt) | ||
206 | { | ||
207 | int old_status = pt->status; | ||
208 | |||
209 | if (!pt->status) | ||
210 | pt->status = pt->parent->ops->has_signaled(pt); | ||
211 | |||
212 | if (!pt->status && pt->parent->destroyed) | ||
213 | pt->status = -ENOENT; | ||
214 | |||
215 | if (pt->status != old_status) | ||
216 | pt->timestamp = ktime_get(); | ||
217 | |||
218 | return pt->status; | ||
219 | } | ||
220 | |||
221 | static struct sync_pt *sync_pt_dup(struct sync_pt *pt) | ||
222 | { | ||
223 | return pt->parent->ops->dup(pt); | ||
224 | } | ||
225 | |||
226 | /* Adds a sync pt to the active queue. Called when added to a fence */ | ||
227 | static void sync_pt_activate(struct sync_pt *pt) | ||
228 | { | ||
229 | struct sync_timeline *obj = pt->parent; | ||
230 | unsigned long flags; | ||
231 | int err; | ||
232 | |||
233 | spin_lock_irqsave(&obj->active_list_lock, flags); | ||
234 | |||
235 | err = _sync_pt_has_signaled(pt); | ||
236 | if (err != 0) | ||
237 | goto out; | ||
238 | |||
239 | list_add_tail(&pt->active_list, &obj->active_list_head); | ||
240 | |||
241 | out: | ||
242 | spin_unlock_irqrestore(&obj->active_list_lock, flags); | ||
243 | } | ||
244 | |||
245 | static int sync_fence_release(struct inode *inode, struct file *file); | ||
246 | static unsigned int sync_fence_poll(struct file *file, poll_table *wait); | ||
247 | static long sync_fence_ioctl(struct file *file, unsigned int cmd, | ||
248 | unsigned long arg); | ||
249 | |||
250 | |||
251 | static const struct file_operations sync_fence_fops = { | ||
252 | .release = sync_fence_release, | ||
253 | .poll = sync_fence_poll, | ||
254 | .unlocked_ioctl = sync_fence_ioctl, | ||
255 | .compat_ioctl = sync_fence_ioctl, | ||
256 | }; | ||
257 | |||
258 | static struct sync_fence *sync_fence_alloc(const char *name) | ||
259 | { | 154 | { |
260 | struct sync_fence *fence; | 155 | struct sync_fence *fence; |
261 | unsigned long flags; | ||
262 | 156 | ||
263 | fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL); | 157 | fence = kzalloc(size, GFP_KERNEL); |
264 | if (fence == NULL) | 158 | if (fence == NULL) |
265 | return NULL; | 159 | return NULL; |
266 | 160 | ||
@@ -272,16 +166,8 @@ static struct sync_fence *sync_fence_alloc(const char *name) | |||
272 | kref_init(&fence->kref); | 166 | kref_init(&fence->kref); |
273 | strlcpy(fence->name, name, sizeof(fence->name)); | 167 | strlcpy(fence->name, name, sizeof(fence->name)); |
274 | 168 | ||
275 | INIT_LIST_HEAD(&fence->pt_list_head); | ||
276 | INIT_LIST_HEAD(&fence->waiter_list_head); | ||
277 | spin_lock_init(&fence->waiter_list_lock); | ||
278 | |||
279 | init_waitqueue_head(&fence->wq); | 169 | init_waitqueue_head(&fence->wq); |
280 | 170 | ||
281 | spin_lock_irqsave(&sync_fence_list_lock, flags); | ||
282 | list_add_tail(&fence->sync_fence_list, &sync_fence_list_head); | ||
283 | spin_unlock_irqrestore(&sync_fence_list_lock, flags); | ||
284 | |||
285 | return fence; | 171 | return fence; |
286 | 172 | ||
287 | err: | 173 | err: |
@@ -289,120 +175,42 @@ err: | |||
289 | return NULL; | 175 | return NULL; |
290 | } | 176 | } |
291 | 177 | ||
292 | /* TODO: implement a create which takes more that one sync_pt */ | 178 | static void fence_check_cb_func(struct fence *f, struct fence_cb *cb) |
293 | struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt) | ||
294 | { | 179 | { |
180 | struct sync_fence_cb *check; | ||
295 | struct sync_fence *fence; | 181 | struct sync_fence *fence; |
296 | 182 | ||
297 | if (pt->fence) | 183 | check = container_of(cb, struct sync_fence_cb, cb); |
298 | return NULL; | 184 | fence = check->fence; |
299 | |||
300 | fence = sync_fence_alloc(name); | ||
301 | if (fence == NULL) | ||
302 | return NULL; | ||
303 | 185 | ||
304 | pt->fence = fence; | 186 | if (atomic_dec_and_test(&fence->status)) |
305 | list_add(&pt->pt_list, &fence->pt_list_head); | 187 | wake_up_all(&fence->wq); |
306 | sync_pt_activate(pt); | ||
307 | |||
308 | /* | ||
309 | * signal the fence in case pt was activated before | ||
310 | * sync_pt_activate(pt) was called | ||
311 | */ | ||
312 | sync_fence_signal_pt(pt); | ||
313 | |||
314 | return fence; | ||
315 | } | ||
316 | EXPORT_SYMBOL(sync_fence_create); | ||
317 | |||
318 | static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src) | ||
319 | { | ||
320 | struct list_head *pos; | ||
321 | |||
322 | list_for_each(pos, &src->pt_list_head) { | ||
323 | struct sync_pt *orig_pt = | ||
324 | container_of(pos, struct sync_pt, pt_list); | ||
325 | struct sync_pt *new_pt = sync_pt_dup(orig_pt); | ||
326 | |||
327 | if (new_pt == NULL) | ||
328 | return -ENOMEM; | ||
329 | |||
330 | new_pt->fence = dst; | ||
331 | list_add(&new_pt->pt_list, &dst->pt_list_head); | ||
332 | } | ||
333 | |||
334 | return 0; | ||
335 | } | 188 | } |
336 | 189 | ||
337 | static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src) | 190 | /* TODO: implement a create which takes more that one sync_pt */ |
338 | { | 191 | struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt) |
339 | struct list_head *src_pos, *dst_pos, *n; | ||
340 | |||
341 | list_for_each(src_pos, &src->pt_list_head) { | ||
342 | struct sync_pt *src_pt = | ||
343 | container_of(src_pos, struct sync_pt, pt_list); | ||
344 | bool collapsed = false; | ||
345 | |||
346 | list_for_each_safe(dst_pos, n, &dst->pt_list_head) { | ||
347 | struct sync_pt *dst_pt = | ||
348 | container_of(dst_pos, struct sync_pt, pt_list); | ||
349 | /* collapse two sync_pts on the same timeline | ||
350 | * to a single sync_pt that will signal at | ||
351 | * the later of the two | ||
352 | */ | ||
353 | if (dst_pt->parent == src_pt->parent) { | ||
354 | if (dst_pt->parent->ops->compare(dst_pt, src_pt) | ||
355 | == -1) { | ||
356 | struct sync_pt *new_pt = | ||
357 | sync_pt_dup(src_pt); | ||
358 | if (new_pt == NULL) | ||
359 | return -ENOMEM; | ||
360 | |||
361 | new_pt->fence = dst; | ||
362 | list_replace(&dst_pt->pt_list, | ||
363 | &new_pt->pt_list); | ||
364 | sync_pt_free(dst_pt); | ||
365 | } | ||
366 | collapsed = true; | ||
367 | break; | ||
368 | } | ||
369 | } | ||
370 | |||
371 | if (!collapsed) { | ||
372 | struct sync_pt *new_pt = sync_pt_dup(src_pt); | ||
373 | |||
374 | if (new_pt == NULL) | ||
375 | return -ENOMEM; | ||
376 | |||
377 | new_pt->fence = dst; | ||
378 | list_add(&new_pt->pt_list, &dst->pt_list_head); | ||
379 | } | ||
380 | } | ||
381 | |||
382 | return 0; | ||
383 | } | ||
384 | |||
385 | static void sync_fence_detach_pts(struct sync_fence *fence) | ||
386 | { | 192 | { |
387 | struct list_head *pos, *n; | 193 | struct sync_fence *fence; |
388 | 194 | ||
389 | list_for_each_safe(pos, n, &fence->pt_list_head) { | 195 | fence = sync_fence_alloc(offsetof(struct sync_fence, cbs[1]), name); |
390 | struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); | 196 | if (fence == NULL) |
197 | return NULL; | ||
391 | 198 | ||
392 | sync_timeline_remove_pt(pt); | 199 | fence->num_fences = 1; |
393 | } | 200 | atomic_set(&fence->status, 1); |
394 | } | ||
395 | 201 | ||
396 | static void sync_fence_free_pts(struct sync_fence *fence) | 202 | fence_get(&pt->base); |
397 | { | 203 | fence->cbs[0].sync_pt = &pt->base; |
398 | struct list_head *pos, *n; | 204 | fence->cbs[0].fence = fence; |
205 | if (fence_add_callback(&pt->base, &fence->cbs[0].cb, | ||
206 | fence_check_cb_func)) | ||
207 | atomic_dec(&fence->status); | ||
399 | 208 | ||
400 | list_for_each_safe(pos, n, &fence->pt_list_head) { | 209 | sync_fence_debug_add(fence); |
401 | struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); | ||
402 | 210 | ||
403 | sync_pt_free(pt); | 211 | return fence; |
404 | } | ||
405 | } | 212 | } |
213 | EXPORT_SYMBOL(sync_fence_create); | ||
406 | 214 | ||
407 | struct sync_fence *sync_fence_fdget(int fd) | 215 | struct sync_fence *sync_fence_fdget(int fd) |
408 | { | 216 | { |
@@ -434,197 +242,155 @@ void sync_fence_install(struct sync_fence *fence, int fd) | |||
434 | } | 242 | } |
435 | EXPORT_SYMBOL(sync_fence_install); | 243 | EXPORT_SYMBOL(sync_fence_install); |
436 | 244 | ||
437 | static int sync_fence_get_status(struct sync_fence *fence) | 245 | static void sync_fence_add_pt(struct sync_fence *fence, |
246 | int *i, struct fence *pt) | ||
438 | { | 247 | { |
439 | struct list_head *pos; | 248 | fence->cbs[*i].sync_pt = pt; |
440 | int status = 1; | 249 | fence->cbs[*i].fence = fence; |
441 | |||
442 | list_for_each(pos, &fence->pt_list_head) { | ||
443 | struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); | ||
444 | int pt_status = pt->status; | ||
445 | |||
446 | if (pt_status < 0) { | ||
447 | status = pt_status; | ||
448 | break; | ||
449 | } else if (status == 1) { | ||
450 | status = pt_status; | ||
451 | } | ||
452 | } | ||
453 | 250 | ||
454 | return status; | 251 | if (!fence_add_callback(pt, &fence->cbs[*i].cb, fence_check_cb_func)) { |
252 | fence_get(pt); | ||
253 | (*i)++; | ||
254 | } | ||
455 | } | 255 | } |
456 | 256 | ||
457 | struct sync_fence *sync_fence_merge(const char *name, | 257 | struct sync_fence *sync_fence_merge(const char *name, |
458 | struct sync_fence *a, struct sync_fence *b) | 258 | struct sync_fence *a, struct sync_fence *b) |
459 | { | 259 | { |
260 | int num_fences = a->num_fences + b->num_fences; | ||
460 | struct sync_fence *fence; | 261 | struct sync_fence *fence; |
461 | struct list_head *pos; | 262 | int i, i_a, i_b; |
462 | int err; | 263 | unsigned long size = offsetof(struct sync_fence, cbs[num_fences]); |
463 | 264 | ||
464 | fence = sync_fence_alloc(name); | 265 | fence = sync_fence_alloc(size, name); |
465 | if (fence == NULL) | 266 | if (fence == NULL) |
466 | return NULL; | 267 | return NULL; |
467 | 268 | ||
468 | err = sync_fence_copy_pts(fence, a); | 269 | atomic_set(&fence->status, num_fences); |
469 | if (err < 0) | ||
470 | goto err; | ||
471 | 270 | ||
472 | err = sync_fence_merge_pts(fence, b); | 271 | /* |
473 | if (err < 0) | 272 | * Assume sync_fence a and b are both ordered and have no |
474 | goto err; | 273 | * duplicates with the same context. |
274 | * | ||
275 | * If a sync_fence can only be created with sync_fence_merge | ||
276 | * and sync_fence_create, this is a reasonable assumption. | ||
277 | */ | ||
278 | for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) { | ||
279 | struct fence *pt_a = a->cbs[i_a].sync_pt; | ||
280 | struct fence *pt_b = b->cbs[i_b].sync_pt; | ||
281 | |||
282 | if (pt_a->context < pt_b->context) { | ||
283 | sync_fence_add_pt(fence, &i, pt_a); | ||
284 | |||
285 | i_a++; | ||
286 | } else if (pt_a->context > pt_b->context) { | ||
287 | sync_fence_add_pt(fence, &i, pt_b); | ||
475 | 288 | ||
476 | list_for_each(pos, &fence->pt_list_head) { | 289 | i_b++; |
477 | struct sync_pt *pt = | 290 | } else { |
478 | container_of(pos, struct sync_pt, pt_list); | 291 | if (pt_a->seqno - pt_b->seqno <= INT_MAX) |
479 | sync_pt_activate(pt); | 292 | sync_fence_add_pt(fence, &i, pt_a); |
293 | else | ||
294 | sync_fence_add_pt(fence, &i, pt_b); | ||
295 | |||
296 | i_a++; | ||
297 | i_b++; | ||
298 | } | ||
480 | } | 299 | } |
481 | 300 | ||
482 | /* | 301 | for (; i_a < a->num_fences; i_a++) |
483 | * signal the fence in case one of it's pts were activated before | 302 | sync_fence_add_pt(fence, &i, a->cbs[i_a].sync_pt); |
484 | * they were activated | ||
485 | */ | ||
486 | sync_fence_signal_pt(list_first_entry(&fence->pt_list_head, | ||
487 | struct sync_pt, | ||
488 | pt_list)); | ||
489 | 303 | ||
304 | for (; i_b < b->num_fences; i_b++) | ||
305 | sync_fence_add_pt(fence, &i, b->cbs[i_b].sync_pt); | ||
306 | |||
307 | if (num_fences > i) | ||
308 | atomic_sub(num_fences - i, &fence->status); | ||
309 | fence->num_fences = i; | ||
310 | |||
311 | sync_fence_debug_add(fence); | ||
490 | return fence; | 312 | return fence; |
491 | err: | ||
492 | sync_fence_free_pts(fence); | ||
493 | kfree(fence); | ||
494 | return NULL; | ||
495 | } | 313 | } |
496 | EXPORT_SYMBOL(sync_fence_merge); | 314 | EXPORT_SYMBOL(sync_fence_merge); |
497 | 315 | ||
498 | static void sync_fence_signal_pt(struct sync_pt *pt) | 316 | int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode, |
317 | int wake_flags, void *key) | ||
499 | { | 318 | { |
500 | LIST_HEAD(signaled_waiters); | 319 | struct sync_fence_waiter *wait; |
501 | struct sync_fence *fence = pt->fence; | ||
502 | struct list_head *pos; | ||
503 | struct list_head *n; | ||
504 | unsigned long flags; | ||
505 | int status; | ||
506 | |||
507 | status = sync_fence_get_status(fence); | ||
508 | |||
509 | spin_lock_irqsave(&fence->waiter_list_lock, flags); | ||
510 | /* | ||
511 | * this should protect against two threads racing on the signaled | ||
512 | * false -> true transition | ||
513 | */ | ||
514 | if (status && !fence->status) { | ||
515 | list_for_each_safe(pos, n, &fence->waiter_list_head) | ||
516 | list_move(pos, &signaled_waiters); | ||
517 | |||
518 | fence->status = status; | ||
519 | } else { | ||
520 | status = 0; | ||
521 | } | ||
522 | spin_unlock_irqrestore(&fence->waiter_list_lock, flags); | ||
523 | 320 | ||
524 | if (status) { | 321 | wait = container_of(curr, struct sync_fence_waiter, work); |
525 | list_for_each_safe(pos, n, &signaled_waiters) { | 322 | list_del_init(&wait->work.task_list); |
526 | struct sync_fence_waiter *waiter = | ||
527 | container_of(pos, struct sync_fence_waiter, | ||
528 | waiter_list); | ||
529 | 323 | ||
530 | list_del(pos); | 324 | wait->callback(wait->work.private, wait); |
531 | waiter->callback(fence, waiter); | 325 | return 1; |
532 | } | ||
533 | wake_up(&fence->wq); | ||
534 | } | ||
535 | } | 326 | } |
536 | 327 | ||
537 | int sync_fence_wait_async(struct sync_fence *fence, | 328 | int sync_fence_wait_async(struct sync_fence *fence, |
538 | struct sync_fence_waiter *waiter) | 329 | struct sync_fence_waiter *waiter) |
539 | { | 330 | { |
331 | int err = atomic_read(&fence->status); | ||
540 | unsigned long flags; | 332 | unsigned long flags; |
541 | int err = 0; | ||
542 | 333 | ||
543 | spin_lock_irqsave(&fence->waiter_list_lock, flags); | 334 | if (err < 0) |
335 | return err; | ||
544 | 336 | ||
545 | if (fence->status) { | 337 | if (!err) |
546 | err = fence->status; | 338 | return 1; |
547 | goto out; | ||
548 | } | ||
549 | 339 | ||
550 | list_add_tail(&waiter->waiter_list, &fence->waiter_list_head); | 340 | init_waitqueue_func_entry(&waiter->work, sync_fence_wake_up_wq); |
551 | out: | 341 | waiter->work.private = fence; |
552 | spin_unlock_irqrestore(&fence->waiter_list_lock, flags); | ||
553 | 342 | ||
554 | return err; | 343 | spin_lock_irqsave(&fence->wq.lock, flags); |
344 | err = atomic_read(&fence->status); | ||
345 | if (err > 0) | ||
346 | __add_wait_queue_tail(&fence->wq, &waiter->work); | ||
347 | spin_unlock_irqrestore(&fence->wq.lock, flags); | ||
348 | |||
349 | if (err < 0) | ||
350 | return err; | ||
351 | |||
352 | return !err; | ||
555 | } | 353 | } |
556 | EXPORT_SYMBOL(sync_fence_wait_async); | 354 | EXPORT_SYMBOL(sync_fence_wait_async); |
557 | 355 | ||
558 | int sync_fence_cancel_async(struct sync_fence *fence, | 356 | int sync_fence_cancel_async(struct sync_fence *fence, |
559 | struct sync_fence_waiter *waiter) | 357 | struct sync_fence_waiter *waiter) |
560 | { | 358 | { |
561 | struct list_head *pos; | ||
562 | struct list_head *n; | ||
563 | unsigned long flags; | 359 | unsigned long flags; |
564 | int ret = -ENOENT; | 360 | int ret = 0; |
565 | 361 | ||
566 | spin_lock_irqsave(&fence->waiter_list_lock, flags); | 362 | spin_lock_irqsave(&fence->wq.lock, flags); |
567 | /* | 363 | if (!list_empty(&waiter->work.task_list)) |
568 | * Make sure waiter is still in waiter_list because it is possible for | 364 | list_del_init(&waiter->work.task_list); |
569 | * the waiter to be removed from the list while the callback is still | 365 | else |
570 | * pending. | 366 | ret = -ENOENT; |
571 | */ | 367 | spin_unlock_irqrestore(&fence->wq.lock, flags); |
572 | list_for_each_safe(pos, n, &fence->waiter_list_head) { | ||
573 | struct sync_fence_waiter *list_waiter = | ||
574 | container_of(pos, struct sync_fence_waiter, | ||
575 | waiter_list); | ||
576 | if (list_waiter == waiter) { | ||
577 | list_del(pos); | ||
578 | ret = 0; | ||
579 | break; | ||
580 | } | ||
581 | } | ||
582 | spin_unlock_irqrestore(&fence->waiter_list_lock, flags); | ||
583 | return ret; | 368 | return ret; |
584 | } | 369 | } |
585 | EXPORT_SYMBOL(sync_fence_cancel_async); | 370 | EXPORT_SYMBOL(sync_fence_cancel_async); |
586 | 371 | ||
587 | static bool sync_fence_check(struct sync_fence *fence) | ||
588 | { | ||
589 | /* | ||
590 | * Make sure that reads to fence->status are ordered with the | ||
591 | * wait queue event triggering | ||
592 | */ | ||
593 | smp_rmb(); | ||
594 | return fence->status != 0; | ||
595 | } | ||
596 | |||
597 | int sync_fence_wait(struct sync_fence *fence, long timeout) | 372 | int sync_fence_wait(struct sync_fence *fence, long timeout) |
598 | { | 373 | { |
599 | int err = 0; | 374 | long ret; |
600 | struct sync_pt *pt; | 375 | int i; |
601 | |||
602 | trace_sync_wait(fence, 1); | ||
603 | list_for_each_entry(pt, &fence->pt_list_head, pt_list) | ||
604 | trace_sync_pt(pt); | ||
605 | 376 | ||
606 | if (timeout > 0) { | 377 | if (timeout < 0) |
378 | timeout = MAX_SCHEDULE_TIMEOUT; | ||
379 | else | ||
607 | timeout = msecs_to_jiffies(timeout); | 380 | timeout = msecs_to_jiffies(timeout); |
608 | err = wait_event_interruptible_timeout(fence->wq, | ||
609 | sync_fence_check(fence), | ||
610 | timeout); | ||
611 | } else if (timeout < 0) { | ||
612 | err = wait_event_interruptible(fence->wq, | ||
613 | sync_fence_check(fence)); | ||
614 | } | ||
615 | trace_sync_wait(fence, 0); | ||
616 | 381 | ||
617 | if (err < 0) | 382 | trace_sync_wait(fence, 1); |
618 | return err; | 383 | for (i = 0; i < fence->num_fences; ++i) |
619 | 384 | trace_sync_pt(fence->cbs[i].sync_pt); | |
620 | if (fence->status < 0) { | 385 | ret = wait_event_interruptible_timeout(fence->wq, |
621 | pr_info("fence error %d on [%p]\n", fence->status, fence); | 386 | atomic_read(&fence->status) <= 0, |
622 | sync_dump(); | 387 | timeout); |
623 | return fence->status; | 388 | trace_sync_wait(fence, 0); |
624 | } | ||
625 | 389 | ||
626 | if (fence->status == 0) { | 390 | if (ret < 0) |
627 | if (timeout > 0) { | 391 | return ret; |
392 | else if (ret == 0) { | ||
393 | if (timeout) { | ||
628 | pr_info("fence timeout on [%p] after %dms\n", fence, | 394 | pr_info("fence timeout on [%p] after %dms\n", fence, |
629 | jiffies_to_msecs(timeout)); | 395 | jiffies_to_msecs(timeout)); |
630 | sync_dump(); | 396 | sync_dump(); |
@@ -632,15 +398,136 @@ int sync_fence_wait(struct sync_fence *fence, long timeout) | |||
632 | return -ETIME; | 398 | return -ETIME; |
633 | } | 399 | } |
634 | 400 | ||
635 | return 0; | 401 | ret = atomic_read(&fence->status); |
402 | if (ret) { | ||
403 | pr_info("fence error %ld on [%p]\n", ret, fence); | ||
404 | sync_dump(); | ||
405 | } | ||
406 | return ret; | ||
636 | } | 407 | } |
637 | EXPORT_SYMBOL(sync_fence_wait); | 408 | EXPORT_SYMBOL(sync_fence_wait); |
638 | 409 | ||
410 | static const char *android_fence_get_driver_name(struct fence *fence) | ||
411 | { | ||
412 | struct sync_pt *pt = container_of(fence, struct sync_pt, base); | ||
413 | struct sync_timeline *parent = sync_pt_parent(pt); | ||
414 | |||
415 | return parent->ops->driver_name; | ||
416 | } | ||
417 | |||
418 | static const char *android_fence_get_timeline_name(struct fence *fence) | ||
419 | { | ||
420 | struct sync_pt *pt = container_of(fence, struct sync_pt, base); | ||
421 | struct sync_timeline *parent = sync_pt_parent(pt); | ||
422 | |||
423 | return parent->name; | ||
424 | } | ||
425 | |||
426 | static void android_fence_release(struct fence *fence) | ||
427 | { | ||
428 | struct sync_pt *pt = container_of(fence, struct sync_pt, base); | ||
429 | struct sync_timeline *parent = sync_pt_parent(pt); | ||
430 | unsigned long flags; | ||
431 | |||
432 | spin_lock_irqsave(fence->lock, flags); | ||
433 | list_del(&pt->child_list); | ||
434 | if (WARN_ON_ONCE(!list_empty(&pt->active_list))) | ||
435 | list_del(&pt->active_list); | ||
436 | spin_unlock_irqrestore(fence->lock, flags); | ||
437 | |||
438 | if (parent->ops->free_pt) | ||
439 | parent->ops->free_pt(pt); | ||
440 | |||
441 | sync_timeline_put(parent); | ||
442 | fence_free(&pt->base); | ||
443 | } | ||
444 | |||
445 | static bool android_fence_signaled(struct fence *fence) | ||
446 | { | ||
447 | struct sync_pt *pt = container_of(fence, struct sync_pt, base); | ||
448 | struct sync_timeline *parent = sync_pt_parent(pt); | ||
449 | int ret; | ||
450 | |||
451 | ret = parent->ops->has_signaled(pt); | ||
452 | if (ret < 0) | ||
453 | fence->status = ret; | ||
454 | return ret; | ||
455 | } | ||
456 | |||
457 | static bool android_fence_enable_signaling(struct fence *fence) | ||
458 | { | ||
459 | struct sync_pt *pt = container_of(fence, struct sync_pt, base); | ||
460 | struct sync_timeline *parent = sync_pt_parent(pt); | ||
461 | |||
462 | if (android_fence_signaled(fence)) | ||
463 | return false; | ||
464 | |||
465 | list_add_tail(&pt->active_list, &parent->active_list_head); | ||
466 | return true; | ||
467 | } | ||
468 | |||
469 | static int android_fence_fill_driver_data(struct fence *fence, | ||
470 | void *data, int size) | ||
471 | { | ||
472 | struct sync_pt *pt = container_of(fence, struct sync_pt, base); | ||
473 | struct sync_timeline *parent = sync_pt_parent(pt); | ||
474 | |||
475 | if (!parent->ops->fill_driver_data) | ||
476 | return 0; | ||
477 | return parent->ops->fill_driver_data(pt, data, size); | ||
478 | } | ||
479 | |||
480 | static void android_fence_value_str(struct fence *fence, | ||
481 | char *str, int size) | ||
482 | { | ||
483 | struct sync_pt *pt = container_of(fence, struct sync_pt, base); | ||
484 | struct sync_timeline *parent = sync_pt_parent(pt); | ||
485 | |||
486 | if (!parent->ops->pt_value_str) { | ||
487 | if (size) | ||
488 | *str = 0; | ||
489 | return; | ||
490 | } | ||
491 | parent->ops->pt_value_str(pt, str, size); | ||
492 | } | ||
493 | |||
494 | static void android_fence_timeline_value_str(struct fence *fence, | ||
495 | char *str, int size) | ||
496 | { | ||
497 | struct sync_pt *pt = container_of(fence, struct sync_pt, base); | ||
498 | struct sync_timeline *parent = sync_pt_parent(pt); | ||
499 | |||
500 | if (!parent->ops->timeline_value_str) { | ||
501 | if (size) | ||
502 | *str = 0; | ||
503 | return; | ||
504 | } | ||
505 | parent->ops->timeline_value_str(parent, str, size); | ||
506 | } | ||
507 | |||
508 | static const struct fence_ops android_fence_ops = { | ||
509 | .get_driver_name = android_fence_get_driver_name, | ||
510 | .get_timeline_name = android_fence_get_timeline_name, | ||
511 | .enable_signaling = android_fence_enable_signaling, | ||
512 | .signaled = android_fence_signaled, | ||
513 | .wait = fence_default_wait, | ||
514 | .release = android_fence_release, | ||
515 | .fill_driver_data = android_fence_fill_driver_data, | ||
516 | .fence_value_str = android_fence_value_str, | ||
517 | .timeline_value_str = android_fence_timeline_value_str, | ||
518 | }; | ||
519 | |||
639 | static void sync_fence_free(struct kref *kref) | 520 | static void sync_fence_free(struct kref *kref) |
640 | { | 521 | { |
641 | struct sync_fence *fence = container_of(kref, struct sync_fence, kref); | 522 | struct sync_fence *fence = container_of(kref, struct sync_fence, kref); |
523 | int i, status = atomic_read(&fence->status); | ||
642 | 524 | ||
643 | sync_fence_free_pts(fence); | 525 | for (i = 0; i < fence->num_fences; ++i) { |
526 | if (status) | ||
527 | fence_remove_callback(fence->cbs[i].sync_pt, | ||
528 | &fence->cbs[i].cb); | ||
529 | fence_put(fence->cbs[i].sync_pt); | ||
530 | } | ||
644 | 531 | ||
645 | kfree(fence); | 532 | kfree(fence); |
646 | } | 533 | } |
@@ -648,44 +535,25 @@ static void sync_fence_free(struct kref *kref) | |||
648 | static int sync_fence_release(struct inode *inode, struct file *file) | 535 | static int sync_fence_release(struct inode *inode, struct file *file) |
649 | { | 536 | { |
650 | struct sync_fence *fence = file->private_data; | 537 | struct sync_fence *fence = file->private_data; |
651 | unsigned long flags; | ||
652 | |||
653 | /* | ||
654 | * We need to remove all ways to access this fence before droping | ||
655 | * our ref. | ||
656 | * | ||
657 | * start with its membership in the global fence list | ||
658 | */ | ||
659 | spin_lock_irqsave(&sync_fence_list_lock, flags); | ||
660 | list_del(&fence->sync_fence_list); | ||
661 | spin_unlock_irqrestore(&sync_fence_list_lock, flags); | ||
662 | 538 | ||
663 | /* | 539 | sync_fence_debug_remove(fence); |
664 | * remove its pts from their parents so that sync_timeline_signal() | ||
665 | * can't reference the fence. | ||
666 | */ | ||
667 | sync_fence_detach_pts(fence); | ||
668 | 540 | ||
669 | kref_put(&fence->kref, sync_fence_free); | 541 | kref_put(&fence->kref, sync_fence_free); |
670 | |||
671 | return 0; | 542 | return 0; |
672 | } | 543 | } |
673 | 544 | ||
674 | static unsigned int sync_fence_poll(struct file *file, poll_table *wait) | 545 | static unsigned int sync_fence_poll(struct file *file, poll_table *wait) |
675 | { | 546 | { |
676 | struct sync_fence *fence = file->private_data; | 547 | struct sync_fence *fence = file->private_data; |
548 | int status; | ||
677 | 549 | ||
678 | poll_wait(file, &fence->wq, wait); | 550 | poll_wait(file, &fence->wq, wait); |
679 | 551 | ||
680 | /* | 552 | status = atomic_read(&fence->status); |
681 | * Make sure that reads to fence->status are ordered with the | ||
682 | * wait queue event triggering | ||
683 | */ | ||
684 | smp_rmb(); | ||
685 | 553 | ||
686 | if (fence->status == 1) | 554 | if (!status) |
687 | return POLLIN; | 555 | return POLLIN; |
688 | else if (fence->status < 0) | 556 | else if (status < 0) |
689 | return POLLERR; | 557 | return POLLERR; |
690 | else | 558 | else |
691 | return 0; | 559 | return 0; |
@@ -750,7 +618,7 @@ err_put_fd: | |||
750 | return err; | 618 | return err; |
751 | } | 619 | } |
752 | 620 | ||
753 | static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size) | 621 | static int sync_fill_pt_info(struct fence *fence, void *data, int size) |
754 | { | 622 | { |
755 | struct sync_pt_info *info = data; | 623 | struct sync_pt_info *info = data; |
756 | int ret; | 624 | int ret; |
@@ -760,20 +628,24 @@ static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size) | |||
760 | 628 | ||
761 | info->len = sizeof(struct sync_pt_info); | 629 | info->len = sizeof(struct sync_pt_info); |
762 | 630 | ||
763 | if (pt->parent->ops->fill_driver_data) { | 631 | if (fence->ops->fill_driver_data) { |
764 | ret = pt->parent->ops->fill_driver_data(pt, info->driver_data, | 632 | ret = fence->ops->fill_driver_data(fence, info->driver_data, |
765 | size - sizeof(*info)); | 633 | size - sizeof(*info)); |
766 | if (ret < 0) | 634 | if (ret < 0) |
767 | return ret; | 635 | return ret; |
768 | 636 | ||
769 | info->len += ret; | 637 | info->len += ret; |
770 | } | 638 | } |
771 | 639 | ||
772 | strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name)); | 640 | strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), |
773 | strlcpy(info->driver_name, pt->parent->ops->driver_name, | 641 | sizeof(info->obj_name)); |
642 | strlcpy(info->driver_name, fence->ops->get_driver_name(fence), | ||
774 | sizeof(info->driver_name)); | 643 | sizeof(info->driver_name)); |
775 | info->status = pt->status; | 644 | if (fence_is_signaled(fence)) |
776 | info->timestamp_ns = ktime_to_ns(pt->timestamp); | 645 | info->status = fence->status >= 0 ? 1 : fence->status; |
646 | else | ||
647 | info->status = 0; | ||
648 | info->timestamp_ns = ktime_to_ns(fence->timestamp); | ||
777 | 649 | ||
778 | return info->len; | 650 | return info->len; |
779 | } | 651 | } |
@@ -782,10 +654,9 @@ static long sync_fence_ioctl_fence_info(struct sync_fence *fence, | |||
782 | unsigned long arg) | 654 | unsigned long arg) |
783 | { | 655 | { |
784 | struct sync_fence_info_data *data; | 656 | struct sync_fence_info_data *data; |
785 | struct list_head *pos; | ||
786 | __u32 size; | 657 | __u32 size; |
787 | __u32 len = 0; | 658 | __u32 len = 0; |
788 | int ret; | 659 | int ret, i; |
789 | 660 | ||
790 | if (copy_from_user(&size, (void __user *)arg, sizeof(size))) | 661 | if (copy_from_user(&size, (void __user *)arg, sizeof(size))) |
791 | return -EFAULT; | 662 | return -EFAULT; |
@@ -801,12 +672,14 @@ static long sync_fence_ioctl_fence_info(struct sync_fence *fence, | |||
801 | return -ENOMEM; | 672 | return -ENOMEM; |
802 | 673 | ||
803 | strlcpy(data->name, fence->name, sizeof(data->name)); | 674 | strlcpy(data->name, fence->name, sizeof(data->name)); |
804 | data->status = fence->status; | 675 | data->status = atomic_read(&fence->status); |
676 | if (data->status >= 0) | ||
677 | data->status = !data->status; | ||
678 | |||
805 | len = sizeof(struct sync_fence_info_data); | 679 | len = sizeof(struct sync_fence_info_data); |
806 | 680 | ||
807 | list_for_each(pos, &fence->pt_list_head) { | 681 | for (i = 0; i < fence->num_fences; ++i) { |
808 | struct sync_pt *pt = | 682 | struct fence *pt = fence->cbs[i].sync_pt; |
809 | container_of(pos, struct sync_pt, pt_list); | ||
810 | 683 | ||
811 | ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len); | 684 | ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len); |
812 | 685 | ||
@@ -833,7 +706,6 @@ static long sync_fence_ioctl(struct file *file, unsigned int cmd, | |||
833 | unsigned long arg) | 706 | unsigned long arg) |
834 | { | 707 | { |
835 | struct sync_fence *fence = file->private_data; | 708 | struct sync_fence *fence = file->private_data; |
836 | |||
837 | switch (cmd) { | 709 | switch (cmd) { |
838 | case SYNC_IOC_WAIT: | 710 | case SYNC_IOC_WAIT: |
839 | return sync_fence_ioctl_wait(fence, arg); | 711 | return sync_fence_ioctl_wait(fence, arg); |
@@ -849,181 +721,10 @@ static long sync_fence_ioctl(struct file *file, unsigned int cmd, | |||
849 | } | 721 | } |
850 | } | 722 | } |
851 | 723 | ||
852 | #ifdef CONFIG_DEBUG_FS | 724 | static const struct file_operations sync_fence_fops = { |
853 | static const char *sync_status_str(int status) | 725 | .release = sync_fence_release, |
854 | { | 726 | .poll = sync_fence_poll, |
855 | if (status > 0) | 727 | .unlocked_ioctl = sync_fence_ioctl, |
856 | return "signaled"; | 728 | .compat_ioctl = sync_fence_ioctl, |
857 | else if (status == 0) | ||
858 | return "active"; | ||
859 | else | ||
860 | return "error"; | ||
861 | } | ||
862 | |||
863 | static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence) | ||
864 | { | ||
865 | int status = pt->status; | ||
866 | |||
867 | seq_printf(s, " %s%spt %s", | ||
868 | fence ? pt->parent->name : "", | ||
869 | fence ? "_" : "", | ||
870 | sync_status_str(status)); | ||
871 | if (pt->status) { | ||
872 | struct timeval tv = ktime_to_timeval(pt->timestamp); | ||
873 | |||
874 | seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec); | ||
875 | } | ||
876 | |||
877 | if (pt->parent->ops->timeline_value_str && | ||
878 | pt->parent->ops->pt_value_str) { | ||
879 | char value[64]; | ||
880 | |||
881 | pt->parent->ops->pt_value_str(pt, value, sizeof(value)); | ||
882 | seq_printf(s, ": %s", value); | ||
883 | if (fence) { | ||
884 | pt->parent->ops->timeline_value_str(pt->parent, value, | ||
885 | sizeof(value)); | ||
886 | seq_printf(s, " / %s", value); | ||
887 | } | ||
888 | } else if (pt->parent->ops->print_pt) { | ||
889 | seq_puts(s, ": "); | ||
890 | pt->parent->ops->print_pt(s, pt); | ||
891 | } | ||
892 | |||
893 | seq_puts(s, "\n"); | ||
894 | } | ||
895 | |||
896 | static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) | ||
897 | { | ||
898 | struct list_head *pos; | ||
899 | unsigned long flags; | ||
900 | |||
901 | seq_printf(s, "%s %s", obj->name, obj->ops->driver_name); | ||
902 | |||
903 | if (obj->ops->timeline_value_str) { | ||
904 | char value[64]; | ||
905 | |||
906 | obj->ops->timeline_value_str(obj, value, sizeof(value)); | ||
907 | seq_printf(s, ": %s", value); | ||
908 | } else if (obj->ops->print_obj) { | ||
909 | seq_puts(s, ": "); | ||
910 | obj->ops->print_obj(s, obj); | ||
911 | } | ||
912 | |||
913 | seq_puts(s, "\n"); | ||
914 | |||
915 | spin_lock_irqsave(&obj->child_list_lock, flags); | ||
916 | list_for_each(pos, &obj->child_list_head) { | ||
917 | struct sync_pt *pt = | ||
918 | container_of(pos, struct sync_pt, child_list); | ||
919 | sync_print_pt(s, pt, false); | ||
920 | } | ||
921 | spin_unlock_irqrestore(&obj->child_list_lock, flags); | ||
922 | } | ||
923 | |||
924 | static void sync_print_fence(struct seq_file *s, struct sync_fence *fence) | ||
925 | { | ||
926 | struct list_head *pos; | ||
927 | unsigned long flags; | ||
928 | |||
929 | seq_printf(s, "[%p] %s: %s\n", fence, fence->name, | ||
930 | sync_status_str(fence->status)); | ||
931 | |||
932 | list_for_each(pos, &fence->pt_list_head) { | ||
933 | struct sync_pt *pt = | ||
934 | container_of(pos, struct sync_pt, pt_list); | ||
935 | sync_print_pt(s, pt, true); | ||
936 | } | ||
937 | |||
938 | spin_lock_irqsave(&fence->waiter_list_lock, flags); | ||
939 | list_for_each(pos, &fence->waiter_list_head) { | ||
940 | struct sync_fence_waiter *waiter = | ||
941 | container_of(pos, struct sync_fence_waiter, | ||
942 | waiter_list); | ||
943 | |||
944 | seq_printf(s, "waiter %pF\n", waiter->callback); | ||
945 | } | ||
946 | spin_unlock_irqrestore(&fence->waiter_list_lock, flags); | ||
947 | } | ||
948 | |||
949 | static int sync_debugfs_show(struct seq_file *s, void *unused) | ||
950 | { | ||
951 | unsigned long flags; | ||
952 | struct list_head *pos; | ||
953 | |||
954 | seq_puts(s, "objs:\n--------------\n"); | ||
955 | |||
956 | spin_lock_irqsave(&sync_timeline_list_lock, flags); | ||
957 | list_for_each(pos, &sync_timeline_list_head) { | ||
958 | struct sync_timeline *obj = | ||
959 | container_of(pos, struct sync_timeline, | ||
960 | sync_timeline_list); | ||
961 | |||
962 | sync_print_obj(s, obj); | ||
963 | seq_puts(s, "\n"); | ||
964 | } | ||
965 | spin_unlock_irqrestore(&sync_timeline_list_lock, flags); | ||
966 | |||
967 | seq_puts(s, "fences:\n--------------\n"); | ||
968 | |||
969 | spin_lock_irqsave(&sync_fence_list_lock, flags); | ||
970 | list_for_each(pos, &sync_fence_list_head) { | ||
971 | struct sync_fence *fence = | ||
972 | container_of(pos, struct sync_fence, sync_fence_list); | ||
973 | |||
974 | sync_print_fence(s, fence); | ||
975 | seq_puts(s, "\n"); | ||
976 | } | ||
977 | spin_unlock_irqrestore(&sync_fence_list_lock, flags); | ||
978 | return 0; | ||
979 | } | ||
980 | |||
981 | static int sync_debugfs_open(struct inode *inode, struct file *file) | ||
982 | { | ||
983 | return single_open(file, sync_debugfs_show, inode->i_private); | ||
984 | } | ||
985 | |||
986 | static const struct file_operations sync_debugfs_fops = { | ||
987 | .open = sync_debugfs_open, | ||
988 | .read = seq_read, | ||
989 | .llseek = seq_lseek, | ||
990 | .release = single_release, | ||
991 | }; | 729 | }; |
992 | 730 | ||
993 | static __init int sync_debugfs_init(void) | ||
994 | { | ||
995 | debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops); | ||
996 | return 0; | ||
997 | } | ||
998 | late_initcall(sync_debugfs_init); | ||
999 | |||
1000 | #define DUMP_CHUNK 256 | ||
1001 | static char sync_dump_buf[64 * 1024]; | ||
1002 | static void sync_dump(void) | ||
1003 | { | ||
1004 | struct seq_file s = { | ||
1005 | .buf = sync_dump_buf, | ||
1006 | .size = sizeof(sync_dump_buf) - 1, | ||
1007 | }; | ||
1008 | int i; | ||
1009 | |||
1010 | sync_debugfs_show(&s, NULL); | ||
1011 | |||
1012 | for (i = 0; i < s.count; i += DUMP_CHUNK) { | ||
1013 | if ((s.count - i) > DUMP_CHUNK) { | ||
1014 | char c = s.buf[i + DUMP_CHUNK]; | ||
1015 | |||
1016 | s.buf[i + DUMP_CHUNK] = 0; | ||
1017 | pr_cont("%s", s.buf + i); | ||
1018 | s.buf[i + DUMP_CHUNK] = c; | ||
1019 | } else { | ||
1020 | s.buf[s.count] = 0; | ||
1021 | pr_cont("%s", s.buf + i); | ||
1022 | } | ||
1023 | } | ||
1024 | } | ||
1025 | #else | ||
1026 | static void sync_dump(void) | ||
1027 | { | ||
1028 | } | ||
1029 | #endif | ||
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h index eaf57cccf626..66b0f431f63e 100644 --- a/drivers/staging/android/sync.h +++ b/drivers/staging/android/sync.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/list.h> | 19 | #include <linux/list.h> |
20 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
21 | #include <linux/wait.h> | 21 | #include <linux/wait.h> |
22 | #include <linux/fence.h> | ||
22 | 23 | ||
23 | #include "uapi/sync.h" | 24 | #include "uapi/sync.h" |
24 | 25 | ||
@@ -40,8 +41,6 @@ struct sync_fence; | |||
40 | * -1 if a will signal before b | 41 | * -1 if a will signal before b |
41 | * @free_pt: called before sync_pt is freed | 42 | * @free_pt: called before sync_pt is freed |
42 | * @release_obj: called before sync_timeline is freed | 43 | * @release_obj: called before sync_timeline is freed |
43 | * @print_obj: deprecated | ||
44 | * @print_pt: deprecated | ||
45 | * @fill_driver_data: write implementation specific driver data to data. | 44 | * @fill_driver_data: write implementation specific driver data to data. |
46 | * should return an error if there is not enough room | 45 | * should return an error if there is not enough room |
47 | * as specified by size. This information is returned | 46 | * as specified by size. This information is returned |
@@ -67,13 +66,6 @@ struct sync_timeline_ops { | |||
67 | /* optional */ | 66 | /* optional */ |
68 | void (*release_obj)(struct sync_timeline *sync_timeline); | 67 | void (*release_obj)(struct sync_timeline *sync_timeline); |
69 | 68 | ||
70 | /* deprecated */ | ||
71 | void (*print_obj)(struct seq_file *s, | ||
72 | struct sync_timeline *sync_timeline); | ||
73 | |||
74 | /* deprecated */ | ||
75 | void (*print_pt)(struct seq_file *s, struct sync_pt *sync_pt); | ||
76 | |||
77 | /* optional */ | 69 | /* optional */ |
78 | int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size); | 70 | int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size); |
79 | 71 | ||
@@ -104,19 +96,21 @@ struct sync_timeline { | |||
104 | 96 | ||
105 | /* protected by child_list_lock */ | 97 | /* protected by child_list_lock */ |
106 | bool destroyed; | 98 | bool destroyed; |
99 | int context, value; | ||
107 | 100 | ||
108 | struct list_head child_list_head; | 101 | struct list_head child_list_head; |
109 | spinlock_t child_list_lock; | 102 | spinlock_t child_list_lock; |
110 | 103 | ||
111 | struct list_head active_list_head; | 104 | struct list_head active_list_head; |
112 | spinlock_t active_list_lock; | ||
113 | 105 | ||
106 | #ifdef CONFIG_DEBUG_FS | ||
114 | struct list_head sync_timeline_list; | 107 | struct list_head sync_timeline_list; |
108 | #endif | ||
115 | }; | 109 | }; |
116 | 110 | ||
117 | /** | 111 | /** |
118 | * struct sync_pt - sync point | 112 | * struct sync_pt - sync point |
119 | * @parent: sync_timeline to which this sync_pt belongs | 113 | * @fence: base fence class |
120 | * @child_list: membership in sync_timeline.child_list_head | 114 | * @child_list: membership in sync_timeline.child_list_head |
121 | * @active_list: membership in sync_timeline.active_list_head | 115 | * @active_list: membership in sync_timeline.active_list_head |
122 | * @signaled_list: membership in temporary signaled_list on stack | 116 | * @signaled_list: membership in temporary signaled_list on stack |
@@ -127,19 +121,22 @@ struct sync_timeline { | |||
127 | * signaled or error. | 121 | * signaled or error. |
128 | */ | 122 | */ |
129 | struct sync_pt { | 123 | struct sync_pt { |
130 | struct sync_timeline *parent; | 124 | struct fence base; |
131 | struct list_head child_list; | ||
132 | 125 | ||
126 | struct list_head child_list; | ||
133 | struct list_head active_list; | 127 | struct list_head active_list; |
134 | struct list_head signaled_list; | 128 | }; |
135 | |||
136 | struct sync_fence *fence; | ||
137 | struct list_head pt_list; | ||
138 | 129 | ||
139 | /* protected by parent->active_list_lock */ | 130 | static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt) |
140 | int status; | 131 | { |
132 | return container_of(pt->base.lock, struct sync_timeline, | ||
133 | child_list_lock); | ||
134 | } | ||
141 | 135 | ||
142 | ktime_t timestamp; | 136 | struct sync_fence_cb { |
137 | struct fence_cb cb; | ||
138 | struct fence *sync_pt; | ||
139 | struct sync_fence *fence; | ||
143 | }; | 140 | }; |
144 | 141 | ||
145 | /** | 142 | /** |
@@ -149,9 +146,7 @@ struct sync_pt { | |||
149 | * @name: name of sync_fence. Useful for debugging | 146 | * @name: name of sync_fence. Useful for debugging |
150 | * @pt_list_head: list of sync_pts in the fence. immutable once fence | 147 | * @pt_list_head: list of sync_pts in the fence. immutable once fence |
151 | * is created | 148 | * is created |
152 | * @waiter_list_head: list of asynchronous waiters on this fence | 149 | * @status: 0: signaled, >0:active, <0: error |
153 | * @waiter_list_lock: lock protecting @waiter_list_head and @status | ||
154 | * @status: 1: signaled, 0:active, <0: error | ||
155 | * | 150 | * |
156 | * @wq: wait queue for fence signaling | 151 | * @wq: wait queue for fence signaling |
157 | * @sync_fence_list: membership in global fence list | 152 | * @sync_fence_list: membership in global fence list |
@@ -160,17 +155,15 @@ struct sync_fence { | |||
160 | struct file *file; | 155 | struct file *file; |
161 | struct kref kref; | 156 | struct kref kref; |
162 | char name[32]; | 157 | char name[32]; |
163 | 158 | #ifdef CONFIG_DEBUG_FS | |
164 | /* this list is immutable once the fence is created */ | 159 | struct list_head sync_fence_list; |
165 | struct list_head pt_list_head; | 160 | #endif |
166 | 161 | int num_fences; | |
167 | struct list_head waiter_list_head; | ||
168 | spinlock_t waiter_list_lock; /* also protects status */ | ||
169 | int status; | ||
170 | 162 | ||
171 | wait_queue_head_t wq; | 163 | wait_queue_head_t wq; |
164 | atomic_t status; | ||
172 | 165 | ||
173 | struct list_head sync_fence_list; | 166 | struct sync_fence_cb cbs[]; |
174 | }; | 167 | }; |
175 | 168 | ||
176 | struct sync_fence_waiter; | 169 | struct sync_fence_waiter; |
@@ -184,14 +177,14 @@ typedef void (*sync_callback_t)(struct sync_fence *fence, | |||
184 | * @callback_data: pointer to pass to @callback | 177 | * @callback_data: pointer to pass to @callback |
185 | */ | 178 | */ |
186 | struct sync_fence_waiter { | 179 | struct sync_fence_waiter { |
187 | struct list_head waiter_list; | 180 | wait_queue_t work; |
188 | 181 | sync_callback_t callback; | |
189 | sync_callback_t callback; | ||
190 | }; | 182 | }; |
191 | 183 | ||
192 | static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter, | 184 | static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter, |
193 | sync_callback_t callback) | 185 | sync_callback_t callback) |
194 | { | 186 | { |
187 | INIT_LIST_HEAD(&waiter->work.task_list); | ||
195 | waiter->callback = callback; | 188 | waiter->callback = callback; |
196 | } | 189 | } |
197 | 190 | ||
@@ -341,4 +334,22 @@ int sync_fence_cancel_async(struct sync_fence *fence, | |||
341 | */ | 334 | */ |
342 | int sync_fence_wait(struct sync_fence *fence, long timeout); | 335 | int sync_fence_wait(struct sync_fence *fence, long timeout); |
343 | 336 | ||
337 | #ifdef CONFIG_DEBUG_FS | ||
338 | |||
339 | extern void sync_timeline_debug_add(struct sync_timeline *obj); | ||
340 | extern void sync_timeline_debug_remove(struct sync_timeline *obj); | ||
341 | extern void sync_fence_debug_add(struct sync_fence *fence); | ||
342 | extern void sync_fence_debug_remove(struct sync_fence *fence); | ||
343 | extern void sync_dump(void); | ||
344 | |||
345 | #else | ||
346 | # define sync_timeline_debug_add(obj) | ||
347 | # define sync_timeline_debug_remove(obj) | ||
348 | # define sync_fence_debug_add(fence) | ||
349 | # define sync_fence_debug_remove(fence) | ||
350 | # define sync_dump() | ||
351 | #endif | ||
352 | int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode, | ||
353 | int wake_flags, void *key); | ||
354 | |||
344 | #endif /* _LINUX_SYNC_H */ | 355 | #endif /* _LINUX_SYNC_H */ |
diff --git a/drivers/staging/android/sync_debug.c b/drivers/staging/android/sync_debug.c new file mode 100644 index 000000000000..a9f7fd506da3 --- /dev/null +++ b/drivers/staging/android/sync_debug.c | |||
@@ -0,0 +1,247 @@ | |||
1 | /* | ||
2 | * drivers/base/sync.c | ||
3 | * | ||
4 | * Copyright (C) 2012 Google, Inc. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/debugfs.h> | ||
18 | #include <linux/export.h> | ||
19 | #include <linux/file.h> | ||
20 | #include <linux/fs.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/poll.h> | ||
23 | #include <linux/sched.h> | ||
24 | #include <linux/seq_file.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/uaccess.h> | ||
27 | #include <linux/anon_inodes.h> | ||
28 | #include "sync.h" | ||
29 | |||
30 | #ifdef CONFIG_DEBUG_FS | ||
31 | |||
32 | static LIST_HEAD(sync_timeline_list_head); | ||
33 | static DEFINE_SPINLOCK(sync_timeline_list_lock); | ||
34 | static LIST_HEAD(sync_fence_list_head); | ||
35 | static DEFINE_SPINLOCK(sync_fence_list_lock); | ||
36 | |||
37 | void sync_timeline_debug_add(struct sync_timeline *obj) | ||
38 | { | ||
39 | unsigned long flags; | ||
40 | |||
41 | spin_lock_irqsave(&sync_timeline_list_lock, flags); | ||
42 | list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head); | ||
43 | spin_unlock_irqrestore(&sync_timeline_list_lock, flags); | ||
44 | } | ||
45 | |||
46 | void sync_timeline_debug_remove(struct sync_timeline *obj) | ||
47 | { | ||
48 | unsigned long flags; | ||
49 | |||
50 | spin_lock_irqsave(&sync_timeline_list_lock, flags); | ||
51 | list_del(&obj->sync_timeline_list); | ||
52 | spin_unlock_irqrestore(&sync_timeline_list_lock, flags); | ||
53 | } | ||
54 | |||
55 | void sync_fence_debug_add(struct sync_fence *fence) | ||
56 | { | ||
57 | unsigned long flags; | ||
58 | |||
59 | spin_lock_irqsave(&sync_fence_list_lock, flags); | ||
60 | list_add_tail(&fence->sync_fence_list, &sync_fence_list_head); | ||
61 | spin_unlock_irqrestore(&sync_fence_list_lock, flags); | ||
62 | } | ||
63 | |||
64 | void sync_fence_debug_remove(struct sync_fence *fence) | ||
65 | { | ||
66 | unsigned long flags; | ||
67 | |||
68 | spin_lock_irqsave(&sync_fence_list_lock, flags); | ||
69 | list_del(&fence->sync_fence_list); | ||
70 | spin_unlock_irqrestore(&sync_fence_list_lock, flags); | ||
71 | } | ||
72 | |||
73 | static const char *sync_status_str(int status) | ||
74 | { | ||
75 | if (status == 0) | ||
76 | return "signaled"; | ||
77 | else if (status > 0) | ||
78 | return "active"; | ||
79 | else | ||
80 | return "error"; | ||
81 | } | ||
82 | |||
83 | static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence) | ||
84 | { | ||
85 | int status = 1; | ||
86 | struct sync_timeline *parent = sync_pt_parent(pt); | ||
87 | |||
88 | if (fence_is_signaled_locked(&pt->base)) | ||
89 | status = pt->base.status; | ||
90 | |||
91 | seq_printf(s, " %s%spt %s", | ||
92 | fence ? parent->name : "", | ||
93 | fence ? "_" : "", | ||
94 | sync_status_str(status)); | ||
95 | |||
96 | if (status <= 0) { | ||
97 | struct timeval tv = ktime_to_timeval(pt->base.timestamp); | ||
98 | seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec); | ||
99 | } | ||
100 | |||
101 | if (parent->ops->timeline_value_str && | ||
102 | parent->ops->pt_value_str) { | ||
103 | char value[64]; | ||
104 | parent->ops->pt_value_str(pt, value, sizeof(value)); | ||
105 | seq_printf(s, ": %s", value); | ||
106 | if (fence) { | ||
107 | parent->ops->timeline_value_str(parent, value, | ||
108 | sizeof(value)); | ||
109 | seq_printf(s, " / %s", value); | ||
110 | } | ||
111 | } | ||
112 | |||
113 | seq_puts(s, "\n"); | ||
114 | } | ||
115 | |||
116 | static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) | ||
117 | { | ||
118 | struct list_head *pos; | ||
119 | unsigned long flags; | ||
120 | |||
121 | seq_printf(s, "%s %s", obj->name, obj->ops->driver_name); | ||
122 | |||
123 | if (obj->ops->timeline_value_str) { | ||
124 | char value[64]; | ||
125 | obj->ops->timeline_value_str(obj, value, sizeof(value)); | ||
126 | seq_printf(s, ": %s", value); | ||
127 | } | ||
128 | |||
129 | seq_puts(s, "\n"); | ||
130 | |||
131 | spin_lock_irqsave(&obj->child_list_lock, flags); | ||
132 | list_for_each(pos, &obj->child_list_head) { | ||
133 | struct sync_pt *pt = | ||
134 | container_of(pos, struct sync_pt, child_list); | ||
135 | sync_print_pt(s, pt, false); | ||
136 | } | ||
137 | spin_unlock_irqrestore(&obj->child_list_lock, flags); | ||
138 | } | ||
139 | |||
140 | static void sync_print_fence(struct seq_file *s, struct sync_fence *fence) | ||
141 | { | ||
142 | wait_queue_t *pos; | ||
143 | unsigned long flags; | ||
144 | int i; | ||
145 | |||
146 | seq_printf(s, "[%p] %s: %s\n", fence, fence->name, | ||
147 | sync_status_str(atomic_read(&fence->status))); | ||
148 | |||
149 | for (i = 0; i < fence->num_fences; ++i) { | ||
150 | struct sync_pt *pt = | ||
151 | container_of(fence->cbs[i].sync_pt, | ||
152 | struct sync_pt, base); | ||
153 | |||
154 | sync_print_pt(s, pt, true); | ||
155 | } | ||
156 | |||
157 | spin_lock_irqsave(&fence->wq.lock, flags); | ||
158 | list_for_each_entry(pos, &fence->wq.task_list, task_list) { | ||
159 | struct sync_fence_waiter *waiter; | ||
160 | |||
161 | if (pos->func != &sync_fence_wake_up_wq) | ||
162 | continue; | ||
163 | |||
164 | waiter = container_of(pos, struct sync_fence_waiter, work); | ||
165 | |||
166 | seq_printf(s, "waiter %pF\n", waiter->callback); | ||
167 | } | ||
168 | spin_unlock_irqrestore(&fence->wq.lock, flags); | ||
169 | } | ||
170 | |||
171 | static int sync_debugfs_show(struct seq_file *s, void *unused) | ||
172 | { | ||
173 | unsigned long flags; | ||
174 | struct list_head *pos; | ||
175 | |||
176 | seq_puts(s, "objs:\n--------------\n"); | ||
177 | |||
178 | spin_lock_irqsave(&sync_timeline_list_lock, flags); | ||
179 | list_for_each(pos, &sync_timeline_list_head) { | ||
180 | struct sync_timeline *obj = | ||
181 | container_of(pos, struct sync_timeline, | ||
182 | sync_timeline_list); | ||
183 | |||
184 | sync_print_obj(s, obj); | ||
185 | seq_puts(s, "\n"); | ||
186 | } | ||
187 | spin_unlock_irqrestore(&sync_timeline_list_lock, flags); | ||
188 | |||
189 | seq_puts(s, "fences:\n--------------\n"); | ||
190 | |||
191 | spin_lock_irqsave(&sync_fence_list_lock, flags); | ||
192 | list_for_each(pos, &sync_fence_list_head) { | ||
193 | struct sync_fence *fence = | ||
194 | container_of(pos, struct sync_fence, sync_fence_list); | ||
195 | |||
196 | sync_print_fence(s, fence); | ||
197 | seq_puts(s, "\n"); | ||
198 | } | ||
199 | spin_unlock_irqrestore(&sync_fence_list_lock, flags); | ||
200 | return 0; | ||
201 | } | ||
202 | |||
203 | static int sync_debugfs_open(struct inode *inode, struct file *file) | ||
204 | { | ||
205 | return single_open(file, sync_debugfs_show, inode->i_private); | ||
206 | } | ||
207 | |||
208 | static const struct file_operations sync_debugfs_fops = { | ||
209 | .open = sync_debugfs_open, | ||
210 | .read = seq_read, | ||
211 | .llseek = seq_lseek, | ||
212 | .release = single_release, | ||
213 | }; | ||
214 | |||
215 | static __init int sync_debugfs_init(void) | ||
216 | { | ||
217 | debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops); | ||
218 | return 0; | ||
219 | } | ||
220 | late_initcall(sync_debugfs_init); | ||
221 | |||
222 | #define DUMP_CHUNK 256 | ||
223 | static char sync_dump_buf[64 * 1024]; | ||
224 | void sync_dump(void) | ||
225 | { | ||
226 | struct seq_file s = { | ||
227 | .buf = sync_dump_buf, | ||
228 | .size = sizeof(sync_dump_buf) - 1, | ||
229 | }; | ||
230 | int i; | ||
231 | |||
232 | sync_debugfs_show(&s, NULL); | ||
233 | |||
234 | for (i = 0; i < s.count; i += DUMP_CHUNK) { | ||
235 | if ((s.count - i) > DUMP_CHUNK) { | ||
236 | char c = s.buf[i + DUMP_CHUNK]; | ||
237 | s.buf[i + DUMP_CHUNK] = 0; | ||
238 | pr_cont("%s", s.buf + i); | ||
239 | s.buf[i + DUMP_CHUNK] = c; | ||
240 | } else { | ||
241 | s.buf[s.count] = 0; | ||
242 | pr_cont("%s", s.buf + i); | ||
243 | } | ||
244 | } | ||
245 | } | ||
246 | |||
247 | #endif | ||
diff --git a/drivers/staging/android/trace/sync.h b/drivers/staging/android/trace/sync.h index 95462359ba57..77edb977a7bf 100644 --- a/drivers/staging/android/trace/sync.h +++ b/drivers/staging/android/trace/sync.h | |||
@@ -45,7 +45,7 @@ TRACE_EVENT(sync_wait, | |||
45 | 45 | ||
46 | TP_fast_assign( | 46 | TP_fast_assign( |
47 | __assign_str(name, fence->name); | 47 | __assign_str(name, fence->name); |
48 | __entry->status = fence->status; | 48 | __entry->status = atomic_read(&fence->status); |
49 | __entry->begin = begin; | 49 | __entry->begin = begin; |
50 | ), | 50 | ), |
51 | 51 | ||
@@ -54,19 +54,19 @@ TRACE_EVENT(sync_wait, | |||
54 | ); | 54 | ); |
55 | 55 | ||
56 | TRACE_EVENT(sync_pt, | 56 | TRACE_EVENT(sync_pt, |
57 | TP_PROTO(struct sync_pt *pt), | 57 | TP_PROTO(struct fence *pt), |
58 | 58 | ||
59 | TP_ARGS(pt), | 59 | TP_ARGS(pt), |
60 | 60 | ||
61 | TP_STRUCT__entry( | 61 | TP_STRUCT__entry( |
62 | __string(timeline, pt->parent->name) | 62 | __string(timeline, pt->ops->get_timeline_name(pt)) |
63 | __array(char, value, 32) | 63 | __array(char, value, 32) |
64 | ), | 64 | ), |
65 | 65 | ||
66 | TP_fast_assign( | 66 | TP_fast_assign( |
67 | __assign_str(timeline, pt->parent->name); | 67 | __assign_str(timeline, pt->ops->get_timeline_name(pt)); |
68 | if (pt->parent->ops->pt_value_str) { | 68 | if (pt->ops->fence_value_str) { |
69 | pt->parent->ops->pt_value_str(pt, __entry->value, | 69 | pt->ops->fence_value_str(pt, __entry->value, |
70 | sizeof(__entry->value)); | 70 | sizeof(__entry->value)); |
71 | } else { | 71 | } else { |
72 | __entry->value[0] = '\0'; | 72 | __entry->value[0] = '\0'; |