diff options
author | Christian König <christian.koenig@amd.com> | 2019-03-29 14:30:23 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2019-04-12 12:28:17 -0400 |
commit | 3d2aca8c8620346abdba96c6300d2c0b90a1d0cc (patch) | |
tree | f994e5ccf90f901233b4e9ef0752c2759ccf3fb0 | |
parent | 1afeb3144344e303547a211ff29d1e18ba9a9994 (diff) |
drm/amdgpu: fix old fence check in amdgpu_fence_emit
We don't hold a reference to the old fence, so it can go away
any time we are waiting for it to signal.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 24 |
1 files changed, 17 insertions, 7 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index ee47c11e92ce..4dee2326b29c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |||
@@ -136,8 +136,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, | |||
136 | { | 136 | { |
137 | struct amdgpu_device *adev = ring->adev; | 137 | struct amdgpu_device *adev = ring->adev; |
138 | struct amdgpu_fence *fence; | 138 | struct amdgpu_fence *fence; |
139 | struct dma_fence *old, **ptr; | 139 | struct dma_fence __rcu **ptr; |
140 | uint32_t seq; | 140 | uint32_t seq; |
141 | int r; | ||
141 | 142 | ||
142 | fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); | 143 | fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); |
143 | if (fence == NULL) | 144 | if (fence == NULL) |
@@ -153,15 +154,24 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, | |||
153 | seq, flags | AMDGPU_FENCE_FLAG_INT); | 154 | seq, flags | AMDGPU_FENCE_FLAG_INT); |
154 | 155 | ||
155 | ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; | 156 | ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; |
157 | if (unlikely(rcu_dereference_protected(*ptr, 1))) { | ||
158 | struct dma_fence *old; | ||
159 | |||
160 | rcu_read_lock(); | ||
161 | old = dma_fence_get_rcu_safe(ptr); | ||
162 | rcu_read_unlock(); | ||
163 | |||
164 | if (old) { | ||
165 | r = dma_fence_wait(old, false); | ||
166 | dma_fence_put(old); | ||
167 | if (r) | ||
168 | return r; | ||
169 | } | ||
170 | } | ||
171 | |||
156 | /* This function can't be called concurrently anyway, otherwise | 172 | /* This function can't be called concurrently anyway, otherwise |
157 | * emitting the fence would mess up the hardware ring buffer. | 173 | * emitting the fence would mess up the hardware ring buffer. |
158 | */ | 174 | */ |
159 | old = rcu_dereference_protected(*ptr, 1); | ||
160 | if (old && !dma_fence_is_signaled(old)) { | ||
161 | DRM_INFO("rcu slot is busy\n"); | ||
162 | dma_fence_wait(old, false); | ||
163 | } | ||
164 | |||
165 | rcu_assign_pointer(*ptr, dma_fence_get(&fence->base)); | 175 | rcu_assign_pointer(*ptr, dma_fence_get(&fence->base)); |
166 | 176 | ||
167 | *f = &fence->base; | 177 | *f = &fence->base; |