aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2016-11-17 06:24:48 -0500
committerAlex Deucher <alexander.deucher@amd.com>2016-11-23 15:08:45 -0500
commitf45dc74c93241ad0125fbc08c48b2ebe20f2f472 (patch)
tree788ff3465ccc33684d596236b8a316a7ad069507 /drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
parentc3af1258a7c784fba6863357b94e31a6824aaff9 (diff)
drm/amdgpu: improve AMDGPU_GEM_CREATE_VRAM_CLEARED handling (v2)
Drop this whole reserve/unreserve dance, just lock the reservation object manually when creating the BO. v2: rebase on dma_fence renaming Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_object.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c29
1 files changed, 14 insertions, 15 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 611da3bd2981..bf79b73e1538 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -371,36 +371,36 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
371 371
372 amdgpu_fill_placement_to_bo(bo, placement); 372 amdgpu_fill_placement_to_bo(bo, placement);
373 /* Kernel allocation are uninterruptible */ 373 /* Kernel allocation are uninterruptible */
374
375 if (!resv) {
376 bool locked;
377
378 reservation_object_init(&bo->tbo.ttm_resv);
379 locked = ww_mutex_trylock(&bo->tbo.ttm_resv.lock);
380 WARN_ON(!locked);
381 }
374 r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, 382 r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
375 &bo->placement, page_align, !kernel, NULL, 383 &bo->placement, page_align, !kernel, NULL,
376 acc_size, sg, resv, &amdgpu_ttm_bo_destroy); 384 acc_size, sg, resv ? resv : &bo->tbo.ttm_resv,
377 if (unlikely(r != 0)) { 385 &amdgpu_ttm_bo_destroy);
386 if (unlikely(r != 0))
378 return r; 387 return r;
379 }
380 388
381 if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && 389 if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
382 bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { 390 bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
383 struct dma_fence *fence; 391 struct dma_fence *fence;
384 392
385 r = amdgpu_bo_reserve(bo, false);
386 if (unlikely(r != 0))
387 goto fail_free;
388
389 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
390 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
391 if (unlikely(r != 0))
392 goto fail_unreserve;
393
394 r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence); 393 r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
395 if (unlikely(r)) 394 if (unlikely(r))
396 goto fail_unreserve; 395 goto fail_unreserve;
397 396
398 amdgpu_bo_fence(bo, fence, false); 397 amdgpu_bo_fence(bo, fence, false);
399 amdgpu_bo_unreserve(bo);
400 dma_fence_put(bo->tbo.moving); 398 dma_fence_put(bo->tbo.moving);
401 bo->tbo.moving = dma_fence_get(fence); 399 bo->tbo.moving = dma_fence_get(fence);
402 dma_fence_put(fence); 400 dma_fence_put(fence);
403 } 401 }
402 if (!resv)
403 ww_mutex_unlock(&bo->tbo.resv->lock);
404 *bo_ptr = bo; 404 *bo_ptr = bo;
405 405
406 trace_amdgpu_bo_create(bo); 406 trace_amdgpu_bo_create(bo);
@@ -408,8 +408,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
408 return 0; 408 return 0;
409 409
410fail_unreserve: 410fail_unreserve:
411 amdgpu_bo_unreserve(bo); 411 ww_mutex_unlock(&bo->tbo.resv->lock);
412fail_free:
413 amdgpu_bo_unref(&bo); 412 amdgpu_bo_unref(&bo);
414 return r; 413 return r;
415} 414}