aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-08-29 03:08:31 -0400
committerSumit Semwal <sumit.semwal@linaro.org>2016-10-12 10:27:14 -0400
commit1cec20f0ea0e3bc617aed47e0936f17386c131f9 (patch)
treed70211f8dd4f56e064775ff746b46447fff3cbb7
parentfedf54132d2410c3949036e3f611ab8dd9dbe89e (diff)
dma-buf: Restart reservation_object_wait_timeout_rcu() after writes
In order to be completely generic, we have to double check the read seqlock after acquiring a reference to the fence. If the driver is allocating fences from a SLAB_DESTROY_BY_RCU, or similar freelist, then within an RCU grace period a fence may be freed and reallocated. The RCU read side critical section does not prevent this reallocation, instead we have to inspect the reservation's seqlock to double check if the fences have been reassigned as we were acquiring our reference. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Christian König <christian.koenig@amd.com> Cc: Alex Deucher <alexander.deucher@amd.com> Cc: Sumit Semwal <sumit.semwal@linaro.org> Cc: linux-media@vger.kernel.org Cc: dri-devel@lists.freedesktop.org Cc: linaro-mm-sig@lists.linaro.org Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org> Link: http://patchwork.freedesktop.org/patch/msgid/20160829070834.22296-8-chris@chris-wilson.co.uk
-rw-r--r--drivers/dma-buf/reservation.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index ba3e25dab95b..648e5d0325f9 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -388,9 +388,6 @@ retry:
388 if (fobj) 388 if (fobj)
389 shared_count = fobj->shared_count; 389 shared_count = fobj->shared_count;
390 390
391 if (read_seqcount_retry(&obj->seq, seq))
392 goto unlock_retry;
393
394 for (i = 0; i < shared_count; ++i) { 391 for (i = 0; i < shared_count; ++i) {
395 struct fence *lfence = rcu_dereference(fobj->shared[i]); 392 struct fence *lfence = rcu_dereference(fobj->shared[i]);
396 393
@@ -413,9 +410,6 @@ retry:
413 if (!shared_count) { 410 if (!shared_count) {
414 struct fence *fence_excl = rcu_dereference(obj->fence_excl); 411 struct fence *fence_excl = rcu_dereference(obj->fence_excl);
415 412
416 if (read_seqcount_retry(&obj->seq, seq))
417 goto unlock_retry;
418
419 if (fence_excl && 413 if (fence_excl &&
420 !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) { 414 !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
421 if (!fence_get_rcu(fence_excl)) 415 if (!fence_get_rcu(fence_excl))
@@ -430,6 +424,11 @@ retry:
430 424
431 rcu_read_unlock(); 425 rcu_read_unlock();
432 if (fence) { 426 if (fence) {
427 if (read_seqcount_retry(&obj->seq, seq)) {
428 fence_put(fence);
429 goto retry;
430 }
431
433 ret = fence_wait_timeout(fence, intr, ret); 432 ret = fence_wait_timeout(fence, intr, ret);
434 fence_put(fence); 433 fence_put(fence);
435 if (ret > 0 && wait_all && (i + 1 < shared_count)) 434 if (ret > 0 && wait_all && (i + 1 < shared_count))