diff options
author | Christian König <christian.koenig@amd.com> | 2017-10-27 08:17:09 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2017-12-04 16:41:32 -0500 |
commit | 3da917b6c6843ad0162e9768c40a83b6c4448646 (patch) | |
tree | c5982ec901b7e976dd905e9caa875f187fa6112b /drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |
parent | 34a4d2bf06b3ab92024b8e26d6049411369d1f1a (diff) |
drm/amdgpu: nuke amdgpu_ttm_is_bound() v2
Rename amdgpu_gtt_mgr_is_allocated() to amdgpu_gtt_mgr_has_gart_addr() and use
that instead.
v2: rename the function as well.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 24 |
1 files changed, 9 insertions, 15 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 6a724f9f1b86..3d02c2dd06e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
@@ -282,8 +282,7 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, | |||
282 | { | 282 | { |
283 | uint64_t addr = 0; | 283 | uint64_t addr = 0; |
284 | 284 | ||
285 | if (mem->mem_type != TTM_PL_TT || | 285 | if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) { |
286 | amdgpu_gtt_mgr_is_allocated(mem)) { | ||
287 | addr = mm_node->start << PAGE_SHIFT; | 286 | addr = mm_node->start << PAGE_SHIFT; |
288 | addr += bo->bdev->man[mem->mem_type].gpu_offset; | 287 | addr += bo->bdev->man[mem->mem_type].gpu_offset; |
289 | } | 288 | } |
@@ -369,7 +368,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, | |||
369 | * dst to window 1 | 368 | * dst to window 1 |
370 | */ | 369 | */ |
371 | if (src->mem->mem_type == TTM_PL_TT && | 370 | if (src->mem->mem_type == TTM_PL_TT && |
372 | !amdgpu_gtt_mgr_is_allocated(src->mem)) { | 371 | !amdgpu_gtt_mgr_has_gart_addr(src->mem)) { |
373 | r = amdgpu_map_buffer(src->bo, src->mem, | 372 | r = amdgpu_map_buffer(src->bo, src->mem, |
374 | PFN_UP(cur_size + src_page_offset), | 373 | PFN_UP(cur_size + src_page_offset), |
375 | src_node_start, 0, ring, | 374 | src_node_start, 0, ring, |
@@ -383,7 +382,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, | |||
383 | } | 382 | } |
384 | 383 | ||
385 | if (dst->mem->mem_type == TTM_PL_TT && | 384 | if (dst->mem->mem_type == TTM_PL_TT && |
386 | !amdgpu_gtt_mgr_is_allocated(dst->mem)) { | 385 | !amdgpu_gtt_mgr_has_gart_addr(dst->mem)) { |
387 | r = amdgpu_map_buffer(dst->bo, dst->mem, | 386 | r = amdgpu_map_buffer(dst->bo, dst->mem, |
388 | PFN_UP(cur_size + dst_page_offset), | 387 | PFN_UP(cur_size + dst_page_offset), |
389 | dst_node_start, 1, ring, | 388 | dst_node_start, 1, ring, |
@@ -861,8 +860,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, | |||
861 | bo_mem->mem_type == AMDGPU_PL_OA) | 860 | bo_mem->mem_type == AMDGPU_PL_OA) |
862 | return -EINVAL; | 861 | return -EINVAL; |
863 | 862 | ||
864 | if (!amdgpu_gtt_mgr_is_allocated(bo_mem)) | 863 | if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) { |
864 | gtt->offset = AMDGPU_BO_INVALID_OFFSET; | ||
865 | return 0; | 865 | return 0; |
866 | } | ||
866 | 867 | ||
867 | spin_lock(>t->adev->gtt_list_lock); | 868 | spin_lock(>t->adev->gtt_list_lock); |
868 | flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); | 869 | flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); |
@@ -882,23 +883,16 @@ error_gart_bind: | |||
882 | return r; | 883 | return r; |
883 | } | 884 | } |
884 | 885 | ||
885 | bool amdgpu_ttm_is_bound(struct ttm_tt *ttm) | ||
886 | { | ||
887 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||
888 | |||
889 | return gtt && !list_empty(>t->list); | ||
890 | } | ||
891 | |||
892 | int amdgpu_ttm_bind(struct ttm_buffer_object *bo) | 886 | int amdgpu_ttm_bind(struct ttm_buffer_object *bo) |
893 | { | 887 | { |
894 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); | 888 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
895 | struct ttm_tt *ttm = bo->ttm; | ||
896 | struct ttm_mem_reg tmp; | 889 | struct ttm_mem_reg tmp; |
897 | struct ttm_placement placement; | 890 | struct ttm_placement placement; |
898 | struct ttm_place placements; | 891 | struct ttm_place placements; |
899 | int r; | 892 | int r; |
900 | 893 | ||
901 | if (!ttm || amdgpu_ttm_is_bound(ttm)) | 894 | if (bo->mem.mem_type != TTM_PL_TT || |
895 | amdgpu_gtt_mgr_has_gart_addr(&bo->mem)) | ||
902 | return 0; | 896 | return 0; |
903 | 897 | ||
904 | tmp = bo->mem; | 898 | tmp = bo->mem; |
@@ -959,7 +953,7 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) | |||
959 | if (gtt->userptr) | 953 | if (gtt->userptr) |
960 | amdgpu_ttm_tt_unpin_userptr(ttm); | 954 | amdgpu_ttm_tt_unpin_userptr(ttm); |
961 | 955 | ||
962 | if (!amdgpu_ttm_is_bound(ttm)) | 956 | if (gtt->offset == AMDGPU_BO_INVALID_OFFSET) |
963 | return 0; | 957 | return 0; |
964 | 958 | ||
965 | /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ | 959 | /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ |