diff options
author | Maarten Lankhorst <maarten.lankhorst@canonical.com> | 2014-07-01 06:58:00 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2014-07-08 16:41:08 -0400 |
commit | 3c3b177a9369b26890ced004867fb32708e8ef5b (patch) | |
tree | d7f1840cd62b8c0d427ea56b203485c2e27bb29e /drivers/dma-buf | |
parent | 04a5faa8cbe5a8eaf152cb88959ba6360c26e702 (diff) |
reservation: add suppport for read-only access using rcu
This adds some extra functions to deal with rcu.
reservation_object_get_fences_rcu() will obtain the list of shared
and exclusive fences without obtaining the ww_mutex.
reservation_object_wait_timeout_rcu() will wait on all fences of the
reservation_object, without obtaining the ww_mutex.
reservation_object_test_signaled_rcu() will test if all fences of the
reservation_object are signaled without using the ww_mutex.
reservation_object_get_excl and reservation_object_get_list require
the reservation object to be held, updating requires
write_seqcount_begin/end. If only the exclusive fence is needed,
rcu_dereference followed by fence_get_rcu can be used, if the shared
fences are needed it's recommended to use the supplied functions.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Acked-by: Sumit Semwal <sumit.semwal@linaro.org>
Acked-by: Daniel Vetter <daniel@ffwll.ch>
Reviewed-By: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r-- | drivers/dma-buf/dma-buf.c | 47 | ||||
-rw-r--r-- | drivers/dma-buf/fence.c | 2 | ||||
-rw-r--r-- | drivers/dma-buf/reservation.c | 336 |
3 files changed, 347 insertions, 38 deletions
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index cb8379dfeed5..f3014c448e1e 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c | |||
@@ -137,7 +137,7 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll) | |||
137 | struct reservation_object_list *fobj; | 137 | struct reservation_object_list *fobj; |
138 | struct fence *fence_excl; | 138 | struct fence *fence_excl; |
139 | unsigned long events; | 139 | unsigned long events; |
140 | unsigned shared_count; | 140 | unsigned shared_count, seq; |
141 | 141 | ||
142 | dmabuf = file->private_data; | 142 | dmabuf = file->private_data; |
143 | if (!dmabuf || !dmabuf->resv) | 143 | if (!dmabuf || !dmabuf->resv) |
@@ -151,14 +151,20 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll) | |||
151 | if (!events) | 151 | if (!events) |
152 | return 0; | 152 | return 0; |
153 | 153 | ||
154 | ww_mutex_lock(&resv->lock, NULL); | 154 | retry: |
155 | seq = read_seqcount_begin(&resv->seq); | ||
156 | rcu_read_lock(); | ||
155 | 157 | ||
156 | fobj = resv->fence; | 158 | fobj = rcu_dereference(resv->fence); |
157 | if (!fobj) | 159 | if (fobj) |
158 | goto out; | 160 | shared_count = fobj->shared_count; |
159 | 161 | else | |
160 | shared_count = fobj->shared_count; | 162 | shared_count = 0; |
161 | fence_excl = resv->fence_excl; | 163 | fence_excl = rcu_dereference(resv->fence_excl); |
164 | if (read_seqcount_retry(&resv->seq, seq)) { | ||
165 | rcu_read_unlock(); | ||
166 | goto retry; | ||
167 | } | ||
162 | 168 | ||
163 | if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) { | 169 | if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) { |
164 | struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl; | 170 | struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl; |
@@ -176,14 +182,20 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll) | |||
176 | spin_unlock_irq(&dmabuf->poll.lock); | 182 | spin_unlock_irq(&dmabuf->poll.lock); |
177 | 183 | ||
178 | if (events & pevents) { | 184 | if (events & pevents) { |
179 | if (!fence_add_callback(fence_excl, &dcb->cb, | 185 | if (!fence_get_rcu(fence_excl)) { |
186 | /* force a recheck */ | ||
187 | events &= ~pevents; | ||
188 | dma_buf_poll_cb(NULL, &dcb->cb); | ||
189 | } else if (!fence_add_callback(fence_excl, &dcb->cb, | ||
180 | dma_buf_poll_cb)) { | 190 | dma_buf_poll_cb)) { |
181 | events &= ~pevents; | 191 | events &= ~pevents; |
192 | fence_put(fence_excl); | ||
182 | } else { | 193 | } else { |
183 | /* | 194 | /* |
184 | * No callback queued, wake up any additional | 195 | * No callback queued, wake up any additional |
185 | * waiters. | 196 | * waiters. |
186 | */ | 197 | */ |
198 | fence_put(fence_excl); | ||
187 | dma_buf_poll_cb(NULL, &dcb->cb); | 199 | dma_buf_poll_cb(NULL, &dcb->cb); |
188 | } | 200 | } |
189 | } | 201 | } |
@@ -205,13 +217,26 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll) | |||
205 | goto out; | 217 | goto out; |
206 | 218 | ||
207 | for (i = 0; i < shared_count; ++i) { | 219 | for (i = 0; i < shared_count; ++i) { |
208 | struct fence *fence = fobj->shared[i]; | 220 | struct fence *fence = rcu_dereference(fobj->shared[i]); |
209 | 221 | ||
222 | if (!fence_get_rcu(fence)) { | ||
223 | /* | ||
224 | * fence refcount dropped to zero, this means | ||
225 | * that fobj has been freed | ||
226 | * | ||
227 | * call dma_buf_poll_cb and force a recheck! | ||
228 | */ | ||
229 | events &= ~POLLOUT; | ||
230 | dma_buf_poll_cb(NULL, &dcb->cb); | ||
231 | break; | ||
232 | } | ||
210 | if (!fence_add_callback(fence, &dcb->cb, | 233 | if (!fence_add_callback(fence, &dcb->cb, |
211 | dma_buf_poll_cb)) { | 234 | dma_buf_poll_cb)) { |
235 | fence_put(fence); | ||
212 | events &= ~POLLOUT; | 236 | events &= ~POLLOUT; |
213 | break; | 237 | break; |
214 | } | 238 | } |
239 | fence_put(fence); | ||
215 | } | 240 | } |
216 | 241 | ||
217 | /* No callback queued, wake up any additional waiters. */ | 242 | /* No callback queued, wake up any additional waiters. */ |
@@ -220,7 +245,7 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll) | |||
220 | } | 245 | } |
221 | 246 | ||
222 | out: | 247 | out: |
223 | ww_mutex_unlock(&resv->lock); | 248 | rcu_read_unlock(); |
224 | return events; | 249 | return events; |
225 | } | 250 | } |
226 | 251 | ||
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c index 948bf00d955e..4222cb2aa96a 100644 --- a/drivers/dma-buf/fence.c +++ b/drivers/dma-buf/fence.c | |||
@@ -184,7 +184,7 @@ EXPORT_SYMBOL(fence_release); | |||
184 | 184 | ||
185 | void fence_free(struct fence *fence) | 185 | void fence_free(struct fence *fence) |
186 | { | 186 | { |
187 | kfree(fence); | 187 | kfree_rcu(fence, rcu); |
188 | } | 188 | } |
189 | EXPORT_SYMBOL(fence_free); | 189 | EXPORT_SYMBOL(fence_free); |
190 | 190 | ||
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c index e6166723a9ae..3c97c8fa8d02 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/reservation.c | |||
@@ -38,6 +38,11 @@ | |||
38 | DEFINE_WW_CLASS(reservation_ww_class); | 38 | DEFINE_WW_CLASS(reservation_ww_class); |
39 | EXPORT_SYMBOL(reservation_ww_class); | 39 | EXPORT_SYMBOL(reservation_ww_class); |
40 | 40 | ||
41 | struct lock_class_key reservation_seqcount_class; | ||
42 | EXPORT_SYMBOL(reservation_seqcount_class); | ||
43 | |||
44 | const char reservation_seqcount_string[] = "reservation_seqcount"; | ||
45 | EXPORT_SYMBOL(reservation_seqcount_string); | ||
41 | /* | 46 | /* |
42 | * Reserve space to add a shared fence to a reservation_object, | 47 | * Reserve space to add a shared fence to a reservation_object, |
43 | * must be called with obj->lock held. | 48 | * must be called with obj->lock held. |
@@ -82,27 +87,37 @@ reservation_object_add_shared_inplace(struct reservation_object *obj, | |||
82 | { | 87 | { |
83 | u32 i; | 88 | u32 i; |
84 | 89 | ||
90 | fence_get(fence); | ||
91 | |||
92 | preempt_disable(); | ||
93 | write_seqcount_begin(&obj->seq); | ||
94 | |||
85 | for (i = 0; i < fobj->shared_count; ++i) { | 95 | for (i = 0; i < fobj->shared_count; ++i) { |
86 | if (fobj->shared[i]->context == fence->context) { | 96 | struct fence *old_fence; |
87 | struct fence *old_fence = fobj->shared[i]; | ||
88 | 97 | ||
89 | fence_get(fence); | 98 | old_fence = rcu_dereference_protected(fobj->shared[i], |
99 | reservation_object_held(obj)); | ||
90 | 100 | ||
91 | fobj->shared[i] = fence; | 101 | if (old_fence->context == fence->context) { |
102 | /* memory barrier is added by write_seqcount_begin */ | ||
103 | RCU_INIT_POINTER(fobj->shared[i], fence); | ||
104 | write_seqcount_end(&obj->seq); | ||
105 | preempt_enable(); | ||
92 | 106 | ||
93 | fence_put(old_fence); | 107 | fence_put(old_fence); |
94 | return; | 108 | return; |
95 | } | 109 | } |
96 | } | 110 | } |
97 | 111 | ||
98 | fence_get(fence); | ||
99 | fobj->shared[fobj->shared_count] = fence; | ||
100 | /* | 112 | /* |
101 | * make the new fence visible before incrementing | 113 | * memory barrier is added by write_seqcount_begin, |
102 | * fobj->shared_count | 114 | * fobj->shared_count is protected by this lock too |
103 | */ | 115 | */ |
104 | smp_wmb(); | 116 | RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence); |
105 | fobj->shared_count++; | 117 | fobj->shared_count++; |
118 | |||
119 | write_seqcount_end(&obj->seq); | ||
120 | preempt_enable(); | ||
106 | } | 121 | } |
107 | 122 | ||
108 | static void | 123 | static void |
@@ -112,11 +127,12 @@ reservation_object_add_shared_replace(struct reservation_object *obj, | |||
112 | struct fence *fence) | 127 | struct fence *fence) |
113 | { | 128 | { |
114 | unsigned i; | 129 | unsigned i; |
130 | struct fence *old_fence = NULL; | ||
115 | 131 | ||
116 | fence_get(fence); | 132 | fence_get(fence); |
117 | 133 | ||
118 | if (!old) { | 134 | if (!old) { |
119 | fobj->shared[0] = fence; | 135 | RCU_INIT_POINTER(fobj->shared[0], fence); |
120 | fobj->shared_count = 1; | 136 | fobj->shared_count = 1; |
121 | goto done; | 137 | goto done; |
122 | } | 138 | } |
@@ -130,19 +146,38 @@ reservation_object_add_shared_replace(struct reservation_object *obj, | |||
130 | fobj->shared_count = old->shared_count; | 146 | fobj->shared_count = old->shared_count; |
131 | 147 | ||
132 | for (i = 0; i < old->shared_count; ++i) { | 148 | for (i = 0; i < old->shared_count; ++i) { |
133 | if (fence && old->shared[i]->context == fence->context) { | 149 | struct fence *check; |
134 | fence_put(old->shared[i]); | 150 | |
135 | fobj->shared[i] = fence; | 151 | check = rcu_dereference_protected(old->shared[i], |
136 | fence = NULL; | 152 | reservation_object_held(obj)); |
153 | |||
154 | if (!old_fence && check->context == fence->context) { | ||
155 | old_fence = check; | ||
156 | RCU_INIT_POINTER(fobj->shared[i], fence); | ||
137 | } else | 157 | } else |
138 | fobj->shared[i] = old->shared[i]; | 158 | RCU_INIT_POINTER(fobj->shared[i], check); |
159 | } | ||
160 | if (!old_fence) { | ||
161 | RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence); | ||
162 | fobj->shared_count++; | ||
139 | } | 163 | } |
140 | if (fence) | ||
141 | fobj->shared[fobj->shared_count++] = fence; | ||
142 | 164 | ||
143 | done: | 165 | done: |
144 | obj->fence = fobj; | 166 | preempt_disable(); |
145 | kfree(old); | 167 | write_seqcount_begin(&obj->seq); |
168 | /* | ||
169 | * RCU_INIT_POINTER can be used here, | ||
170 | * seqcount provides the necessary barriers | ||
171 | */ | ||
172 | RCU_INIT_POINTER(obj->fence, fobj); | ||
173 | write_seqcount_end(&obj->seq); | ||
174 | preempt_enable(); | ||
175 | |||
176 | if (old) | ||
177 | kfree_rcu(old, rcu); | ||
178 | |||
179 | if (old_fence) | ||
180 | fence_put(old_fence); | ||
146 | } | 181 | } |
147 | 182 | ||
148 | /* | 183 | /* |
@@ -158,7 +193,7 @@ void reservation_object_add_shared_fence(struct reservation_object *obj, | |||
158 | obj->staged = NULL; | 193 | obj->staged = NULL; |
159 | 194 | ||
160 | if (!fobj) { | 195 | if (!fobj) { |
161 | BUG_ON(old->shared_count == old->shared_max); | 196 | BUG_ON(old->shared_count >= old->shared_max); |
162 | reservation_object_add_shared_inplace(obj, old, fence); | 197 | reservation_object_add_shared_inplace(obj, old, fence); |
163 | } else | 198 | } else |
164 | reservation_object_add_shared_replace(obj, old, fobj, fence); | 199 | reservation_object_add_shared_replace(obj, old, fobj, fence); |
@@ -168,26 +203,275 @@ EXPORT_SYMBOL(reservation_object_add_shared_fence); | |||
168 | void reservation_object_add_excl_fence(struct reservation_object *obj, | 203 | void reservation_object_add_excl_fence(struct reservation_object *obj, |
169 | struct fence *fence) | 204 | struct fence *fence) |
170 | { | 205 | { |
171 | struct fence *old_fence = obj->fence_excl; | 206 | struct fence *old_fence = reservation_object_get_excl(obj); |
172 | struct reservation_object_list *old; | 207 | struct reservation_object_list *old; |
173 | u32 i = 0; | 208 | u32 i = 0; |
174 | 209 | ||
175 | old = reservation_object_get_list(obj); | 210 | old = reservation_object_get_list(obj); |
176 | if (old) { | 211 | if (old) |
177 | i = old->shared_count; | 212 | i = old->shared_count; |
178 | old->shared_count = 0; | ||
179 | } | ||
180 | 213 | ||
181 | if (fence) | 214 | if (fence) |
182 | fence_get(fence); | 215 | fence_get(fence); |
183 | 216 | ||
184 | obj->fence_excl = fence; | 217 | preempt_disable(); |
218 | write_seqcount_begin(&obj->seq); | ||
219 | /* write_seqcount_begin provides the necessary memory barrier */ | ||
220 | RCU_INIT_POINTER(obj->fence_excl, fence); | ||
221 | if (old) | ||
222 | old->shared_count = 0; | ||
223 | write_seqcount_end(&obj->seq); | ||
224 | preempt_enable(); | ||
185 | 225 | ||
186 | /* inplace update, no shared fences */ | 226 | /* inplace update, no shared fences */ |
187 | while (i--) | 227 | while (i--) |
188 | fence_put(old->shared[i]); | 228 | fence_put(rcu_dereference_protected(old->shared[i], |
229 | reservation_object_held(obj))); | ||
189 | 230 | ||
190 | if (old_fence) | 231 | if (old_fence) |
191 | fence_put(old_fence); | 232 | fence_put(old_fence); |
192 | } | 233 | } |
193 | EXPORT_SYMBOL(reservation_object_add_excl_fence); | 234 | EXPORT_SYMBOL(reservation_object_add_excl_fence); |
235 | |||
236 | int reservation_object_get_fences_rcu(struct reservation_object *obj, | ||
237 | struct fence **pfence_excl, | ||
238 | unsigned *pshared_count, | ||
239 | struct fence ***pshared) | ||
240 | { | ||
241 | unsigned shared_count = 0; | ||
242 | unsigned retry = 1; | ||
243 | struct fence **shared = NULL, *fence_excl = NULL; | ||
244 | int ret = 0; | ||
245 | |||
246 | while (retry) { | ||
247 | struct reservation_object_list *fobj; | ||
248 | unsigned seq; | ||
249 | |||
250 | seq = read_seqcount_begin(&obj->seq); | ||
251 | |||
252 | rcu_read_lock(); | ||
253 | |||
254 | fobj = rcu_dereference(obj->fence); | ||
255 | if (fobj) { | ||
256 | struct fence **nshared; | ||
257 | size_t sz = sizeof(*shared) * fobj->shared_max; | ||
258 | |||
259 | nshared = krealloc(shared, sz, | ||
260 | GFP_NOWAIT | __GFP_NOWARN); | ||
261 | if (!nshared) { | ||
262 | rcu_read_unlock(); | ||
263 | nshared = krealloc(shared, sz, GFP_KERNEL); | ||
264 | if (nshared) { | ||
265 | shared = nshared; | ||
266 | continue; | ||
267 | } | ||
268 | |||
269 | ret = -ENOMEM; | ||
270 | shared_count = 0; | ||
271 | break; | ||
272 | } | ||
273 | shared = nshared; | ||
274 | memcpy(shared, fobj->shared, sz); | ||
275 | shared_count = fobj->shared_count; | ||
276 | } else | ||
277 | shared_count = 0; | ||
278 | fence_excl = rcu_dereference(obj->fence_excl); | ||
279 | |||
280 | retry = read_seqcount_retry(&obj->seq, seq); | ||
281 | if (retry) | ||
282 | goto unlock; | ||
283 | |||
284 | if (!fence_excl || fence_get_rcu(fence_excl)) { | ||
285 | unsigned i; | ||
286 | |||
287 | for (i = 0; i < shared_count; ++i) { | ||
288 | if (fence_get_rcu(shared[i])) | ||
289 | continue; | ||
290 | |||
291 | /* uh oh, refcount failed, abort and retry */ | ||
292 | while (i--) | ||
293 | fence_put(shared[i]); | ||
294 | |||
295 | if (fence_excl) { | ||
296 | fence_put(fence_excl); | ||
297 | fence_excl = NULL; | ||
298 | } | ||
299 | |||
300 | retry = 1; | ||
301 | break; | ||
302 | } | ||
303 | } else | ||
304 | retry = 1; | ||
305 | |||
306 | unlock: | ||
307 | rcu_read_unlock(); | ||
308 | } | ||
309 | *pshared_count = shared_count; | ||
310 | if (shared_count) | ||
311 | *pshared = shared; | ||
312 | else { | ||
313 | *pshared = NULL; | ||
314 | kfree(shared); | ||
315 | } | ||
316 | *pfence_excl = fence_excl; | ||
317 | |||
318 | return ret; | ||
319 | } | ||
320 | EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); | ||
321 | |||
322 | long reservation_object_wait_timeout_rcu(struct reservation_object *obj, | ||
323 | bool wait_all, bool intr, | ||
324 | unsigned long timeout) | ||
325 | { | ||
326 | struct fence *fence; | ||
327 | unsigned seq, shared_count, i = 0; | ||
328 | long ret = timeout; | ||
329 | |||
330 | retry: | ||
331 | fence = NULL; | ||
332 | shared_count = 0; | ||
333 | seq = read_seqcount_begin(&obj->seq); | ||
334 | rcu_read_lock(); | ||
335 | |||
336 | if (wait_all) { | ||
337 | struct reservation_object_list *fobj = rcu_dereference(obj->fence); | ||
338 | |||
339 | if (fobj) | ||
340 | shared_count = fobj->shared_count; | ||
341 | |||
342 | if (read_seqcount_retry(&obj->seq, seq)) | ||
343 | goto unlock_retry; | ||
344 | |||
345 | for (i = 0; i < shared_count; ++i) { | ||
346 | struct fence *lfence = rcu_dereference(fobj->shared[i]); | ||
347 | |||
348 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) | ||
349 | continue; | ||
350 | |||
351 | if (!fence_get_rcu(lfence)) | ||
352 | goto unlock_retry; | ||
353 | |||
354 | if (fence_is_signaled(lfence)) { | ||
355 | fence_put(lfence); | ||
356 | continue; | ||
357 | } | ||
358 | |||
359 | fence = lfence; | ||
360 | break; | ||
361 | } | ||
362 | } | ||
363 | |||
364 | if (!shared_count) { | ||
365 | struct fence *fence_excl = rcu_dereference(obj->fence_excl); | ||
366 | |||
367 | if (read_seqcount_retry(&obj->seq, seq)) | ||
368 | goto unlock_retry; | ||
369 | |||
370 | if (fence_excl && | ||
371 | !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) { | ||
372 | if (!fence_get_rcu(fence_excl)) | ||
373 | goto unlock_retry; | ||
374 | |||
375 | if (fence_is_signaled(fence_excl)) | ||
376 | fence_put(fence_excl); | ||
377 | else | ||
378 | fence = fence_excl; | ||
379 | } | ||
380 | } | ||
381 | |||
382 | rcu_read_unlock(); | ||
383 | if (fence) { | ||
384 | ret = fence_wait_timeout(fence, intr, ret); | ||
385 | fence_put(fence); | ||
386 | if (ret > 0 && wait_all && (i + 1 < shared_count)) | ||
387 | goto retry; | ||
388 | } | ||
389 | return ret; | ||
390 | |||
391 | unlock_retry: | ||
392 | rcu_read_unlock(); | ||
393 | goto retry; | ||
394 | } | ||
395 | EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu); | ||
396 | |||
397 | |||
398 | static inline int | ||
399 | reservation_object_test_signaled_single(struct fence *passed_fence) | ||
400 | { | ||
401 | struct fence *fence, *lfence = passed_fence; | ||
402 | int ret = 1; | ||
403 | |||
404 | if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { | ||
405 | int ret; | ||
406 | |||
407 | fence = fence_get_rcu(lfence); | ||
408 | if (!fence) | ||
409 | return -1; | ||
410 | |||
411 | ret = !!fence_is_signaled(fence); | ||
412 | fence_put(fence); | ||
413 | } | ||
414 | return ret; | ||
415 | } | ||
416 | |||
417 | bool reservation_object_test_signaled_rcu(struct reservation_object *obj, | ||
418 | bool test_all) | ||
419 | { | ||
420 | unsigned seq, shared_count; | ||
421 | int ret = true; | ||
422 | |||
423 | retry: | ||
424 | shared_count = 0; | ||
425 | seq = read_seqcount_begin(&obj->seq); | ||
426 | rcu_read_lock(); | ||
427 | |||
428 | if (test_all) { | ||
429 | unsigned i; | ||
430 | |||
431 | struct reservation_object_list *fobj = rcu_dereference(obj->fence); | ||
432 | |||
433 | if (fobj) | ||
434 | shared_count = fobj->shared_count; | ||
435 | |||
436 | if (read_seqcount_retry(&obj->seq, seq)) | ||
437 | goto unlock_retry; | ||
438 | |||
439 | for (i = 0; i < shared_count; ++i) { | ||
440 | struct fence *fence = rcu_dereference(fobj->shared[i]); | ||
441 | |||
442 | ret = reservation_object_test_signaled_single(fence); | ||
443 | if (ret < 0) | ||
444 | goto unlock_retry; | ||
445 | else if (!ret) | ||
446 | break; | ||
447 | } | ||
448 | |||
449 | /* | ||
450 | * There could be a read_seqcount_retry here, but nothing cares | ||
451 | * about whether it's the old or newer fence pointers that are | ||
452 | * signaled. That race could still have happened after checking | ||
453 | * read_seqcount_retry. If you care, use ww_mutex_lock. | ||
454 | */ | ||
455 | } | ||
456 | |||
457 | if (!shared_count) { | ||
458 | struct fence *fence_excl = rcu_dereference(obj->fence_excl); | ||
459 | |||
460 | if (read_seqcount_retry(&obj->seq, seq)) | ||
461 | goto unlock_retry; | ||
462 | |||
463 | if (fence_excl) { | ||
464 | ret = reservation_object_test_signaled_single(fence_excl); | ||
465 | if (ret < 0) | ||
466 | goto unlock_retry; | ||
467 | } | ||
468 | } | ||
469 | |||
470 | rcu_read_unlock(); | ||
471 | return ret; | ||
472 | |||
473 | unlock_retry: | ||
474 | rcu_read_unlock(); | ||
475 | goto retry; | ||
476 | } | ||
477 | EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu); | ||