aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-08-14 14:24:01 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2019-08-16 07:40:58 -0400
commitb016cd6ed4b772759804e0d6082bd1f5ca63b8ee (patch)
tree278f8b1426b40316f5744cd6155ec51ecc9dd6c7
parentdc2e1e5b279966affbd11ff7cfef52eb634ca2c9 (diff)
dma-buf: Restore seqlock around dma_resv updates
This reverts 67c97fb79a7f ("dma-buf: add reservation_object_fences helper") dd7a7d1ff2f1 ("drm/i915: use new reservation_object_fences helper") 0e1d8083bddb ("dma-buf: further relax reservation_object_add_shared_fence") 5d344f58da76 ("dma-buf: nuke reservation_object seq number") The scenario that defeats simply grabbing a set of shared/exclusive fences and using them blissfully under RCU is that any of those fences may be reallocated by a SLAB_TYPESAFE_BY_RCU fence slab cache. In this scenario, while keeping the rcu_read_lock we need to establish that no fence was changed in the dma_resv after a read (or full) memory barrier. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Acked-by: Christian König <christian.koenig@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190814182401.25009-1-chris@chris-wilson.co.uk
-rw-r--r--drivers/dma-buf/dma-buf.c31
-rw-r--r--drivers/dma-buf/dma-resv.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c24
-rw-r--r--include/linux/dma-resv.h113
5 files changed, 175 insertions, 109 deletions
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index b3400d6524ab..433d91d710e4 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -199,7 +199,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
199 struct dma_resv_list *fobj; 199 struct dma_resv_list *fobj;
200 struct dma_fence *fence_excl; 200 struct dma_fence *fence_excl;
201 __poll_t events; 201 __poll_t events;
202 unsigned shared_count; 202 unsigned shared_count, seq;
203 203
204 dmabuf = file->private_data; 204 dmabuf = file->private_data;
205 if (!dmabuf || !dmabuf->resv) 205 if (!dmabuf || !dmabuf->resv)
@@ -213,8 +213,21 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
213 if (!events) 213 if (!events)
214 return 0; 214 return 0;
215 215
216retry:
217 seq = read_seqcount_begin(&resv->seq);
216 rcu_read_lock(); 218 rcu_read_lock();
217 dma_resv_fences(resv, &fence_excl, &fobj, &shared_count); 219
220 fobj = rcu_dereference(resv->fence);
221 if (fobj)
222 shared_count = fobj->shared_count;
223 else
224 shared_count = 0;
225 fence_excl = rcu_dereference(resv->fence_excl);
226 if (read_seqcount_retry(&resv->seq, seq)) {
227 rcu_read_unlock();
228 goto retry;
229 }
230
218 if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) { 231 if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
219 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl; 232 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
220 __poll_t pevents = EPOLLIN; 233 __poll_t pevents = EPOLLIN;
@@ -1144,6 +1157,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
1144 struct dma_resv *robj; 1157 struct dma_resv *robj;
1145 struct dma_resv_list *fobj; 1158 struct dma_resv_list *fobj;
1146 struct dma_fence *fence; 1159 struct dma_fence *fence;
1160 unsigned seq;
1147 int count = 0, attach_count, shared_count, i; 1161 int count = 0, attach_count, shared_count, i;
1148 size_t size = 0; 1162 size_t size = 0;
1149 1163
@@ -1174,9 +1188,16 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
1174 buf_obj->name ?: ""); 1188 buf_obj->name ?: "");
1175 1189
1176 robj = buf_obj->resv; 1190 robj = buf_obj->resv;
1177 rcu_read_lock(); 1191 while (true) {
1178 dma_resv_fences(robj, &fence, &fobj, &shared_count); 1192 seq = read_seqcount_begin(&robj->seq);
1179 rcu_read_unlock(); 1193 rcu_read_lock();
1194 fobj = rcu_dereference(robj->fence);
1195 shared_count = fobj ? fobj->shared_count : 0;
1196 fence = rcu_dereference(robj->fence_excl);
1197 if (!read_seqcount_retry(&robj->seq, seq))
1198 break;
1199 rcu_read_unlock();
1200 }
1180 1201
1181 if (fence) 1202 if (fence)
1182 seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n", 1203 seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index f5142683c851..42a8f3f11681 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -49,6 +49,12 @@
49DEFINE_WD_CLASS(reservation_ww_class); 49DEFINE_WD_CLASS(reservation_ww_class);
50EXPORT_SYMBOL(reservation_ww_class); 50EXPORT_SYMBOL(reservation_ww_class);
51 51
52struct lock_class_key reservation_seqcount_class;
53EXPORT_SYMBOL(reservation_seqcount_class);
54
55const char reservation_seqcount_string[] = "reservation_seqcount";
56EXPORT_SYMBOL(reservation_seqcount_string);
57
52/** 58/**
53 * dma_resv_list_alloc - allocate fence list 59 * dma_resv_list_alloc - allocate fence list
54 * @shared_max: number of fences we need space for 60 * @shared_max: number of fences we need space for
@@ -96,6 +102,9 @@ static void dma_resv_list_free(struct dma_resv_list *list)
96void dma_resv_init(struct dma_resv *obj) 102void dma_resv_init(struct dma_resv *obj)
97{ 103{
98 ww_mutex_init(&obj->lock, &reservation_ww_class); 104 ww_mutex_init(&obj->lock, &reservation_ww_class);
105
106 __seqcount_init(&obj->seq, reservation_seqcount_string,
107 &reservation_seqcount_class);
99 RCU_INIT_POINTER(obj->fence, NULL); 108 RCU_INIT_POINTER(obj->fence, NULL);
100 RCU_INIT_POINTER(obj->fence_excl, NULL); 109 RCU_INIT_POINTER(obj->fence_excl, NULL);
101} 110}
@@ -225,6 +234,9 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
225 fobj = dma_resv_get_list(obj); 234 fobj = dma_resv_get_list(obj);
226 count = fobj->shared_count; 235 count = fobj->shared_count;
227 236
237 preempt_disable();
238 write_seqcount_begin(&obj->seq);
239
228 for (i = 0; i < count; ++i) { 240 for (i = 0; i < count; ++i) {
229 241
230 old = rcu_dereference_protected(fobj->shared[i], 242 old = rcu_dereference_protected(fobj->shared[i],
@@ -242,6 +254,9 @@ replace:
242 RCU_INIT_POINTER(fobj->shared[i], fence); 254 RCU_INIT_POINTER(fobj->shared[i], fence);
243 /* pointer update must be visible before we extend the shared_count */ 255 /* pointer update must be visible before we extend the shared_count */
244 smp_store_mb(fobj->shared_count, count); 256 smp_store_mb(fobj->shared_count, count);
257
258 write_seqcount_end(&obj->seq);
259 preempt_enable();
245 dma_fence_put(old); 260 dma_fence_put(old);
246} 261}
247EXPORT_SYMBOL(dma_resv_add_shared_fence); 262EXPORT_SYMBOL(dma_resv_add_shared_fence);
@@ -269,10 +284,12 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
269 dma_fence_get(fence); 284 dma_fence_get(fence);
270 285
271 preempt_disable(); 286 preempt_disable();
272 rcu_assign_pointer(obj->fence_excl, fence); 287 write_seqcount_begin(&obj->seq);
273 /* pointer update must be visible before we modify the shared_count */ 288 /* write_seqcount_begin provides the necessary memory barrier */
289 RCU_INIT_POINTER(obj->fence_excl, fence);
274 if (old) 290 if (old)
275 smp_store_mb(old->shared_count, 0); 291 old->shared_count = 0;
292 write_seqcount_end(&obj->seq);
276 preempt_enable(); 293 preempt_enable();
277 294
278 /* inplace update, no shared fences */ 295 /* inplace update, no shared fences */
@@ -295,15 +312,17 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
295{ 312{
296 struct dma_resv_list *src_list, *dst_list; 313 struct dma_resv_list *src_list, *dst_list;
297 struct dma_fence *old, *new; 314 struct dma_fence *old, *new;
298 unsigned int i, shared_count; 315 unsigned i;
299 316
300 dma_resv_assert_held(dst); 317 dma_resv_assert_held(dst);
301 318
302 rcu_read_lock(); 319 rcu_read_lock();
320 src_list = rcu_dereference(src->fence);
303 321
304retry: 322retry:
305 dma_resv_fences(src, &new, &src_list, &shared_count); 323 if (src_list) {
306 if (shared_count) { 324 unsigned shared_count = src_list->shared_count;
325
307 rcu_read_unlock(); 326 rcu_read_unlock();
308 327
309 dst_list = dma_resv_list_alloc(shared_count); 328 dst_list = dma_resv_list_alloc(shared_count);
@@ -311,14 +330,14 @@ retry:
311 return -ENOMEM; 330 return -ENOMEM;
312 331
313 rcu_read_lock(); 332 rcu_read_lock();
314 dma_resv_fences(src, &new, &src_list, &shared_count); 333 src_list = rcu_dereference(src->fence);
315 if (!src_list || shared_count > dst_list->shared_max) { 334 if (!src_list || src_list->shared_count > shared_count) {
316 kfree(dst_list); 335 kfree(dst_list);
317 goto retry; 336 goto retry;
318 } 337 }
319 338
320 dst_list->shared_count = 0; 339 dst_list->shared_count = 0;
321 for (i = 0; i < shared_count; ++i) { 340 for (i = 0; i < src_list->shared_count; ++i) {
322 struct dma_fence *fence; 341 struct dma_fence *fence;
323 342
324 fence = rcu_dereference(src_list->shared[i]); 343 fence = rcu_dereference(src_list->shared[i]);
@@ -328,6 +347,7 @@ retry:
328 347
329 if (!dma_fence_get_rcu(fence)) { 348 if (!dma_fence_get_rcu(fence)) {
330 dma_resv_list_free(dst_list); 349 dma_resv_list_free(dst_list);
350 src_list = rcu_dereference(src->fence);
331 goto retry; 351 goto retry;
332 } 352 }
333 353
@@ -342,18 +362,18 @@ retry:
342 dst_list = NULL; 362 dst_list = NULL;
343 } 363 }
344 364
345 if (new && !dma_fence_get_rcu(new)) { 365 new = dma_fence_get_rcu_safe(&src->fence_excl);
346 dma_resv_list_free(dst_list);
347 goto retry;
348 }
349 rcu_read_unlock(); 366 rcu_read_unlock();
350 367
351 src_list = dma_resv_get_list(dst); 368 src_list = dma_resv_get_list(dst);
352 old = dma_resv_get_excl(dst); 369 old = dma_resv_get_excl(dst);
353 370
354 preempt_disable(); 371 preempt_disable();
355 rcu_assign_pointer(dst->fence_excl, new); 372 write_seqcount_begin(&dst->seq);
356 rcu_assign_pointer(dst->fence, dst_list); 373 /* write_seqcount_begin provides the necessary memory barrier */
374 RCU_INIT_POINTER(dst->fence_excl, new);
375 RCU_INIT_POINTER(dst->fence, dst_list);
376 write_seqcount_end(&dst->seq);
357 preempt_enable(); 377 preempt_enable();
358 378
359 dma_resv_list_free(src_list); 379 dma_resv_list_free(src_list);
@@ -388,18 +408,19 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
388 408
389 do { 409 do {
390 struct dma_resv_list *fobj; 410 struct dma_resv_list *fobj;
391 unsigned int i; 411 unsigned int i, seq;
392 size_t sz = 0; 412 size_t sz = 0;
393 413
394 i = 0; 414 shared_count = i = 0;
395 415
396 rcu_read_lock(); 416 rcu_read_lock();
397 dma_resv_fences(obj, &fence_excl, &fobj, 417 seq = read_seqcount_begin(&obj->seq);
398 &shared_count);
399 418
419 fence_excl = rcu_dereference(obj->fence_excl);
400 if (fence_excl && !dma_fence_get_rcu(fence_excl)) 420 if (fence_excl && !dma_fence_get_rcu(fence_excl))
401 goto unlock; 421 goto unlock;
402 422
423 fobj = rcu_dereference(obj->fence);
403 if (fobj) 424 if (fobj)
404 sz += sizeof(*shared) * fobj->shared_max; 425 sz += sizeof(*shared) * fobj->shared_max;
405 426
@@ -427,6 +448,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
427 break; 448 break;
428 } 449 }
429 shared = nshared; 450 shared = nshared;
451 shared_count = fobj ? fobj->shared_count : 0;
430 for (i = 0; i < shared_count; ++i) { 452 for (i = 0; i < shared_count; ++i) {
431 shared[i] = rcu_dereference(fobj->shared[i]); 453 shared[i] = rcu_dereference(fobj->shared[i]);
432 if (!dma_fence_get_rcu(shared[i])) 454 if (!dma_fence_get_rcu(shared[i]))
@@ -434,7 +456,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
434 } 456 }
435 } 457 }
436 458
437 if (i != shared_count) { 459 if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
438 while (i--) 460 while (i--)
439 dma_fence_put(shared[i]); 461 dma_fence_put(shared[i]);
440 dma_fence_put(fence_excl); 462 dma_fence_put(fence_excl);
@@ -478,17 +500,18 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
478 bool wait_all, bool intr, 500 bool wait_all, bool intr,
479 unsigned long timeout) 501 unsigned long timeout)
480{ 502{
481 struct dma_resv_list *fobj;
482 struct dma_fence *fence; 503 struct dma_fence *fence;
483 unsigned shared_count; 504 unsigned seq, shared_count;
484 long ret = timeout ? timeout : 1; 505 long ret = timeout ? timeout : 1;
485 int i; 506 int i;
486 507
487retry: 508retry:
509 shared_count = 0;
510 seq = read_seqcount_begin(&obj->seq);
488 rcu_read_lock(); 511 rcu_read_lock();
489 i = -1; 512 i = -1;
490 513
491 dma_resv_fences(obj, &fence, &fobj, &shared_count); 514 fence = rcu_dereference(obj->fence_excl);
492 if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { 515 if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
493 if (!dma_fence_get_rcu(fence)) 516 if (!dma_fence_get_rcu(fence))
494 goto unlock_retry; 517 goto unlock_retry;
@@ -503,6 +526,11 @@ retry:
503 } 526 }
504 527
505 if (wait_all) { 528 if (wait_all) {
529 struct dma_resv_list *fobj = rcu_dereference(obj->fence);
530
531 if (fobj)
532 shared_count = fobj->shared_count;
533
506 for (i = 0; !fence && i < shared_count; ++i) { 534 for (i = 0; !fence && i < shared_count; ++i) {
507 struct dma_fence *lfence = rcu_dereference(fobj->shared[i]); 535 struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
508 536
@@ -525,6 +553,11 @@ retry:
525 553
526 rcu_read_unlock(); 554 rcu_read_unlock();
527 if (fence) { 555 if (fence) {
556 if (read_seqcount_retry(&obj->seq, seq)) {
557 dma_fence_put(fence);
558 goto retry;
559 }
560
528 ret = dma_fence_wait_timeout(fence, intr, ret); 561 ret = dma_fence_wait_timeout(fence, intr, ret);
529 dma_fence_put(fence); 562 dma_fence_put(fence);
530 if (ret > 0 && wait_all && (i + 1 < shared_count)) 563 if (ret > 0 && wait_all && (i + 1 < shared_count))
@@ -567,19 +600,23 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
567 */ 600 */
568bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all) 601bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
569{ 602{
570 struct dma_resv_list *fobj; 603 unsigned seq, shared_count;
571 struct dma_fence *fence_excl;
572 unsigned shared_count;
573 int ret; 604 int ret;
574 605
575 rcu_read_lock(); 606 rcu_read_lock();
576retry: 607retry:
577 ret = true; 608 ret = true;
609 shared_count = 0;
610 seq = read_seqcount_begin(&obj->seq);
578 611
579 dma_resv_fences(obj, &fence_excl, &fobj, &shared_count);
580 if (test_all) { 612 if (test_all) {
581 unsigned i; 613 unsigned i;
582 614
615 struct dma_resv_list *fobj = rcu_dereference(obj->fence);
616
617 if (fobj)
618 shared_count = fobj->shared_count;
619
583 for (i = 0; i < shared_count; ++i) { 620 for (i = 0; i < shared_count; ++i) {
584 struct dma_fence *fence = rcu_dereference(fobj->shared[i]); 621 struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
585 622
@@ -589,14 +626,24 @@ retry:
589 else if (!ret) 626 else if (!ret)
590 break; 627 break;
591 } 628 }
592 }
593 629
594 if (!shared_count && fence_excl) { 630 if (read_seqcount_retry(&obj->seq, seq))
595 ret = dma_resv_test_signaled_single(fence_excl);
596 if (ret < 0)
597 goto retry; 631 goto retry;
598 } 632 }
599 633
634 if (!shared_count) {
635 struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
636
637 if (fence_excl) {
638 ret = dma_resv_test_signaled_single(fence_excl);
639 if (ret < 0)
640 goto retry;
641
642 if (read_seqcount_retry(&obj->seq, seq))
643 goto retry;
644 }
645 }
646
600 rcu_read_unlock(); 647 rcu_read_unlock();
601 return ret; 648 return ret;
602} 649}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index bc4ec6b20a87..76e3516484e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -251,7 +251,12 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
251 new->shared_max = old->shared_max; 251 new->shared_max = old->shared_max;
252 new->shared_count = k; 252 new->shared_count = k;
253 253
254 rcu_assign_pointer(resv->fence, new); 254 /* Install the new fence list, seqcount provides the barriers */
255 preempt_disable();
256 write_seqcount_begin(&resv->seq);
257 RCU_INIT_POINTER(resv->fence, new);
258 write_seqcount_end(&resv->seq);
259 preempt_enable();
255 260
256 /* Drop the references to the removed fences or move them to ef_list */ 261 /* Drop the references to the removed fences or move them to ef_list */
257 for (i = j, k = 0; i < old->shared_count; ++i) { 262 for (i = j, k = 0; i < old->shared_count; ++i) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index a2aff1d8290e..3d4f5775a4ba 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -83,8 +83,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
83 struct drm_i915_gem_busy *args = data; 83 struct drm_i915_gem_busy *args = data;
84 struct drm_i915_gem_object *obj; 84 struct drm_i915_gem_object *obj;
85 struct dma_resv_list *list; 85 struct dma_resv_list *list;
86 unsigned int i, shared_count; 86 unsigned int seq;
87 struct dma_fence *excl;
88 int err; 87 int err;
89 88
90 err = -ENOENT; 89 err = -ENOENT;
@@ -110,18 +109,29 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
110 * to report the overall busyness. This is what the wait-ioctl does. 109 * to report the overall busyness. This is what the wait-ioctl does.
111 * 110 *
112 */ 111 */
113 dma_resv_fences(obj->base.resv, &excl, &list, &shared_count); 112retry:
113 seq = raw_read_seqcount(&obj->base.resv->seq);
114 114
115 /* Translate the exclusive fence to the READ *and* WRITE engine */ 115 /* Translate the exclusive fence to the READ *and* WRITE engine */
116 args->busy = busy_check_writer(excl); 116 args->busy =
117 busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
117 118
118 /* Translate shared fences to READ set of engines */ 119 /* Translate shared fences to READ set of engines */
119 for (i = 0; i < shared_count; ++i) { 120 list = rcu_dereference(obj->base.resv->fence);
120 struct dma_fence *fence = rcu_dereference(list->shared[i]); 121 if (list) {
122 unsigned int shared_count = list->shared_count, i;
121 123
122 args->busy |= busy_check_reader(fence); 124 for (i = 0; i < shared_count; ++i) {
125 struct dma_fence *fence =
126 rcu_dereference(list->shared[i]);
127
128 args->busy |= busy_check_reader(fence);
129 }
123 } 130 }
124 131
132 if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
133 goto retry;
134
125 err = 0; 135 err = 0;
126out: 136out:
127 rcu_read_unlock(); 137 rcu_read_unlock();
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 38f2802afabb..ee50d10f052b 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -46,6 +46,8 @@
46#include <linux/rcupdate.h> 46#include <linux/rcupdate.h>
47 47
48extern struct ww_class reservation_ww_class; 48extern struct ww_class reservation_ww_class;
49extern struct lock_class_key reservation_seqcount_class;
50extern const char reservation_seqcount_string[];
49 51
50/** 52/**
51 * struct dma_resv_list - a list of shared fences 53 * struct dma_resv_list - a list of shared fences
@@ -69,6 +71,7 @@ struct dma_resv_list {
69 */ 71 */
70struct dma_resv { 72struct dma_resv {
71 struct ww_mutex lock; 73 struct ww_mutex lock;
74 seqcount_t seq;
72 75
73 struct dma_fence __rcu *fence_excl; 76 struct dma_fence __rcu *fence_excl;
74 struct dma_resv_list __rcu *fence; 77 struct dma_resv_list __rcu *fence;
@@ -78,24 +81,6 @@ struct dma_resv {
78#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) 81#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
79 82
80/** 83/**
81 * dma_resv_get_excl - get the reservation object's
82 * exclusive fence, with update-side lock held
83 * @obj: the reservation object
84 *
85 * Returns the exclusive fence (if any). Does NOT take a
86 * reference. Writers must hold obj->lock, readers may only
87 * hold a RCU read side lock.
88 *
89 * RETURNS
90 * The exclusive fence or NULL
91 */
92static inline struct dma_fence *dma_resv_get_excl(struct dma_resv *obj)
93{
94 return rcu_dereference_protected(obj->fence_excl,
95 dma_resv_held(obj));
96}
97
98/**
99 * dma_resv_get_list - get the reservation object's 84 * dma_resv_get_list - get the reservation object's
100 * shared fence list, with update-side lock held 85 * shared fence list, with update-side lock held
101 * @obj: the reservation object 86 * @obj: the reservation object
@@ -110,53 +95,6 @@ static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
110} 95}
111 96
112/** 97/**
113 * dma_resv_fences - read consistent fence pointers
114 * @obj: reservation object where we get the fences from
115 * @excl: pointer for the exclusive fence
116 * @list: pointer for the shared fence list
117 *
118 * Make sure we have a consisten exclusive fence and shared fence list.
119 * Must be called with rcu read side lock held.
120 */
121static inline void dma_resv_fences(struct dma_resv *obj,
122 struct dma_fence **excl,
123 struct dma_resv_list **list,
124 u32 *shared_count)
125{
126 do {
127 *excl = rcu_dereference(obj->fence_excl);
128 *list = rcu_dereference(obj->fence);
129 *shared_count = *list ? (*list)->shared_count : 0;
130 smp_rmb(); /* See dma_resv_add_excl_fence */
131 } while (rcu_access_pointer(obj->fence_excl) != *excl);
132}
133
134/**
135 * dma_resv_get_excl_rcu - get the reservation object's
136 * exclusive fence, without lock held.
137 * @obj: the reservation object
138 *
139 * If there is an exclusive fence, this atomically increments it's
140 * reference count and returns it.
141 *
142 * RETURNS
143 * The exclusive fence or NULL if none
144 */
145static inline struct dma_fence *dma_resv_get_excl_rcu(struct dma_resv *obj)
146{
147 struct dma_fence *fence;
148
149 if (!rcu_access_pointer(obj->fence_excl))
150 return NULL;
151
152 rcu_read_lock();
153 fence = dma_fence_get_rcu_safe(&obj->fence_excl);
154 rcu_read_unlock();
155
156 return fence;
157}
158
159/**
160 * dma_resv_lock - lock the reservation object 98 * dma_resv_lock - lock the reservation object
161 * @obj: the reservation object 99 * @obj: the reservation object
162 * @ctx: the locking context 100 * @ctx: the locking context
@@ -290,6 +228,51 @@ static inline void dma_resv_unlock(struct dma_resv *obj)
290 ww_mutex_unlock(&obj->lock); 228 ww_mutex_unlock(&obj->lock);
291} 229}
292 230
231/**
232 * dma_resv_get_excl - get the reservation object's
233 * exclusive fence, with update-side lock held
234 * @obj: the reservation object
235 *
236 * Returns the exclusive fence (if any). Does NOT take a
237 * reference. Writers must hold obj->lock, readers may only
238 * hold a RCU read side lock.
239 *
240 * RETURNS
241 * The exclusive fence or NULL
242 */
243static inline struct dma_fence *
244dma_resv_get_excl(struct dma_resv *obj)
245{
246 return rcu_dereference_protected(obj->fence_excl,
247 dma_resv_held(obj));
248}
249
250/**
251 * dma_resv_get_excl_rcu - get the reservation object's
252 * exclusive fence, without lock held.
253 * @obj: the reservation object
254 *
255 * If there is an exclusive fence, this atomically increments it's
256 * reference count and returns it.
257 *
258 * RETURNS
259 * The exclusive fence or NULL if none
260 */
261static inline struct dma_fence *
262dma_resv_get_excl_rcu(struct dma_resv *obj)
263{
264 struct dma_fence *fence;
265
266 if (!rcu_access_pointer(obj->fence_excl))
267 return NULL;
268
269 rcu_read_lock();
270 fence = dma_fence_get_rcu_safe(&obj->fence_excl);
271 rcu_read_unlock();
272
273 return fence;
274}
275
293void dma_resv_init(struct dma_resv *obj); 276void dma_resv_init(struct dma_resv *obj);
294void dma_resv_fini(struct dma_resv *obj); 277void dma_resv_fini(struct dma_resv *obj);
295int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); 278int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);