aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-01-30 05:55:17 -0500
committerAlex Deucher <alexander.deucher@amd.com>2019-01-30 12:52:44 -0500
commit6e11ea9de9576a644045ffdc2067c09bc2012eda (patch)
tree1c7ef1ef1d2562f15a966848cf4e538ae85cebc8
parent2f10d823739680d2477ce34437e8a08a53117f40 (diff)
drm/amdgpu: Transfer fences to dmabuf importer
amdgpu only uses shared-fences internally, but dmabuf importers rely on implicit write hazard tracking via the reservation_object.fence_excl. For example, the importer use the write hazard for timing a page flip to only occur after the exporter has finished flushing its write into the surface. As such, on exporting a dmabuf, we must either flush all outstanding fences (for we do not know which are writes and should have been exclusive) or alternatively create a new exclusive fence that is the composite of all the existing shared fences, and so will only be signaled when all earlier fences are signaled (ensuring that we can not be signaled before the completion of any earlier write). v2: reservation_object is already locked by amdgpu_bo_reserve() v3: Replace looping with get_fences_rcu and special case the promotion of a single shared fence directly to an exclusive fence, bypassing the fence array. v4: Drop the fence array ref after assigning to reservation_object Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107341 Testcase: igt/amd_prime/amd-to-i915 References: 8e94a46c1770 ("drm/amdgpu: Attach exclusive fence to prime exported bo's. (v5)") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Alex Deucher <alexander.deucher@amd.com> Cc: "Christian König" <christian.koenig@amd.com> Reviewed-by: "Christian König" <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c59
1 files changed, 51 insertions, 8 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 71913a18d142..a38e0fb4a6fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -38,6 +38,7 @@
38#include "amdgpu_gem.h" 38#include "amdgpu_gem.h"
39#include <drm/amdgpu_drm.h> 39#include <drm/amdgpu_drm.h>
40#include <linux/dma-buf.h> 40#include <linux/dma-buf.h>
41#include <linux/dma-fence-array.h>
41 42
42/** 43/**
43 * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table 44 * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
@@ -187,6 +188,48 @@ error:
187 return ERR_PTR(ret); 188 return ERR_PTR(ret);
188} 189}
189 190
191static int
192__reservation_object_make_exclusive(struct reservation_object *obj)
193{
194 struct dma_fence **fences;
195 unsigned int count;
196 int r;
197
198 if (!reservation_object_get_list(obj)) /* no shared fences to convert */
199 return 0;
200
201 r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
202 if (r)
203 return r;
204
205 if (count == 0) {
206 /* Now that was unexpected. */
207 } else if (count == 1) {
208 reservation_object_add_excl_fence(obj, fences[0]);
209 dma_fence_put(fences[0]);
210 kfree(fences);
211 } else {
212 struct dma_fence_array *array;
213
214 array = dma_fence_array_create(count, fences,
215 dma_fence_context_alloc(1), 0,
216 false);
217 if (!array)
218 goto err_fences_put;
219
220 reservation_object_add_excl_fence(obj, &array->base);
221 dma_fence_put(&array->base);
222 }
223
224 return 0;
225
226err_fences_put:
227 while (count--)
228 dma_fence_put(fences[count]);
229 kfree(fences);
230 return -ENOMEM;
231}
232
190/** 233/**
191 * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation 234 * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
192 * @dma_buf: Shared DMA buffer 235 * @dma_buf: Shared DMA buffer
@@ -218,16 +261,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
218 261
219 if (attach->dev->driver != adev->dev->driver) { 262 if (attach->dev->driver != adev->dev->driver) {
220 /* 263 /*
221 * Wait for all shared fences to complete before we switch to future 264 * We only create shared fences for internal use, but importers
222 * use of exclusive fence on this prime shared bo. 265 * of the dmabuf rely on exclusive fences for implicitly
266 * tracking write hazards. As any of the current fences may
267 * correspond to a write, we need to convert all existing
268 * fences on the reservation object into a single exclusive
269 * fence.
223 */ 270 */
224 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, 271 r = __reservation_object_make_exclusive(bo->tbo.resv);
225 true, false, 272 if (r)
226 MAX_SCHEDULE_TIMEOUT);
227 if (unlikely(r < 0)) {
228 DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
229 goto error_unreserve; 273 goto error_unreserve;
230 }
231 } 274 }
232 275
233 /* pin buffer into GTT */ 276 /* pin buffer into GTT */