diff options
author | Christian König <christian.koenig@amd.com> | 2017-10-26 11:54:12 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2017-12-04 16:41:44 -0500 |
commit | 40575732b6a2ad6430ad8056dabe4cdc01224534 (patch) | |
tree | e261091ea93e08800f4dad59ccc8333efc3a0a84 /drivers/gpu/drm/amd/amdgpu | |
parent | c5835bbb1195996c6c5bf2bf2cdbae08c936fc93 (diff) |
drm/amdgpu: don't use ttm_bo_move_ttm in amdgpu_ttm_bind v2
Just allocate the GART space and fill it.
This prevents forcing the BO to be idle.
v2: don't unbind/bind at all, just fill the allocated GART space
Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 21 |
1 files changed, 15 insertions, 6 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 793f41dd4ee4..44983d16bf61 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
@@ -878,9 +878,11 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, | |||
878 | int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) | 878 | int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) |
879 | { | 879 | { |
880 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); | 880 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
881 | struct amdgpu_ttm_tt *gtt = (void*)bo->ttm; | ||
881 | struct ttm_mem_reg tmp; | 882 | struct ttm_mem_reg tmp; |
882 | struct ttm_placement placement; | 883 | struct ttm_placement placement; |
883 | struct ttm_place placements; | 884 | struct ttm_place placements; |
885 | uint64_t flags; | ||
884 | int r; | 886 | int r; |
885 | 887 | ||
886 | if (bo->mem.mem_type != TTM_PL_TT || | 888 | if (bo->mem.mem_type != TTM_PL_TT || |
@@ -902,14 +904,21 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) | |||
902 | if (unlikely(r)) | 904 | if (unlikely(r)) |
903 | return r; | 905 | return r; |
904 | 906 | ||
905 | r = ttm_bo_move_ttm(bo, true, false, &tmp); | 907 | flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp); |
906 | if (unlikely(r)) | 908 | gtt->offset = (u64)tmp.start << PAGE_SHIFT; |
909 | r = amdgpu_gart_bind(adev, gtt->offset, bo->ttm->num_pages, | ||
910 | bo->ttm->pages, gtt->ttm.dma_address, flags); | ||
911 | if (unlikely(r)) { | ||
907 | ttm_bo_mem_put(bo, &tmp); | 912 | ttm_bo_mem_put(bo, &tmp); |
908 | else | 913 | return r; |
909 | bo->offset = (bo->mem.start << PAGE_SHIFT) + | 914 | } |
910 | bo->bdev->man[bo->mem.mem_type].gpu_offset; | ||
911 | 915 | ||
912 | return r; | 916 | ttm_bo_mem_put(bo, &bo->mem); |
917 | bo->mem = tmp; | ||
918 | bo->offset = (bo->mem.start << PAGE_SHIFT) + | ||
919 | bo->bdev->man[bo->mem.mem_type].gpu_offset; | ||
920 | |||
921 | return 0; | ||
913 | } | 922 | } |
914 | 923 | ||
915 | int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) | 924 | int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) |