diff options
author | Christian König <christian.koenig@amd.com> | 2017-10-16 10:50:32 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2017-12-04 16:41:33 -0500 |
commit | c1c7ce8f5687bb01b2eb0db3c19cb375267bb16d (patch) | |
tree | 1f084782b78db4224fc55dacf8246816b36508ab /drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |
parent | 3da917b6c6843ad0162e9768c40a83b6c4448646 (diff) |
drm/amdgpu: move GART recovery into GTT manager v2
The GTT manager handles the GART address space anyway, so it is
completely pointless to keep the same information around twice.
v2: rebased
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 51 |
1 files changed, 15 insertions, 36 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 3d02c2dd06e5..34dbe7afb600 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
@@ -689,7 +689,6 @@ struct amdgpu_ttm_tt { | |||
689 | struct list_head guptasks; | 689 | struct list_head guptasks; |
690 | atomic_t mmu_invalidations; | 690 | atomic_t mmu_invalidations; |
691 | uint32_t last_set_pages; | 691 | uint32_t last_set_pages; |
692 | struct list_head list; | ||
693 | }; | 692 | }; |
694 | 693 | ||
695 | int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) | 694 | int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) |
@@ -865,21 +864,14 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, | |||
865 | return 0; | 864 | return 0; |
866 | } | 865 | } |
867 | 866 | ||
868 | spin_lock(>t->adev->gtt_list_lock); | ||
869 | flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); | 867 | flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); |
870 | gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; | 868 | gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; |
871 | r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, | 869 | r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, |
872 | ttm->pages, gtt->ttm.dma_address, flags); | 870 | ttm->pages, gtt->ttm.dma_address, flags); |
873 | 871 | ||
874 | if (r) { | 872 | if (r) |
875 | DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", | 873 | DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", |
876 | ttm->num_pages, gtt->offset); | 874 | ttm->num_pages, gtt->offset); |
877 | goto error_gart_bind; | ||
878 | } | ||
879 | |||
880 | list_add_tail(>t->list, >t->adev->gtt_list); | ||
881 | error_gart_bind: | ||
882 | spin_unlock(>t->adev->gtt_list_lock); | ||
883 | return r; | 875 | return r; |
884 | } | 876 | } |
885 | 877 | ||
@@ -920,29 +912,23 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo) | |||
920 | return r; | 912 | return r; |
921 | } | 913 | } |
922 | 914 | ||
923 | int amdgpu_ttm_recover_gart(struct amdgpu_device *adev) | 915 | int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) |
924 | { | 916 | { |
925 | struct amdgpu_ttm_tt *gtt, *tmp; | 917 | struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); |
926 | struct ttm_mem_reg bo_mem; | 918 | struct amdgpu_ttm_tt *gtt = (void *)tbo->ttm; |
927 | uint64_t flags; | 919 | uint64_t flags; |
928 | int r; | 920 | int r; |
929 | 921 | ||
930 | bo_mem.mem_type = TTM_PL_TT; | 922 | if (!gtt) |
931 | spin_lock(&adev->gtt_list_lock); | 923 | return 0; |
932 | list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) { | 924 | |
933 | flags = amdgpu_ttm_tt_pte_flags(gtt->adev, >t->ttm.ttm, &bo_mem); | 925 | flags = amdgpu_ttm_tt_pte_flags(adev, >t->ttm.ttm, &tbo->mem); |
934 | r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages, | 926 | r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages, |
935 | gtt->ttm.ttm.pages, gtt->ttm.dma_address, | 927 | gtt->ttm.ttm.pages, gtt->ttm.dma_address, flags); |
936 | flags); | 928 | if (r) |
937 | if (r) { | 929 | DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", |
938 | spin_unlock(&adev->gtt_list_lock); | 930 | gtt->ttm.ttm.num_pages, gtt->offset); |
939 | DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", | 931 | return r; |
940 | gtt->ttm.ttm.num_pages, gtt->offset); | ||
941 | return r; | ||
942 | } | ||
943 | } | ||
944 | spin_unlock(&adev->gtt_list_lock); | ||
945 | return 0; | ||
946 | } | 932 | } |
947 | 933 | ||
948 | static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) | 934 | static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) |
@@ -957,16 +943,10 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) | |||
957 | return 0; | 943 | return 0; |
958 | 944 | ||
959 | /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ | 945 | /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ |
960 | spin_lock(>t->adev->gtt_list_lock); | ||
961 | r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages); | 946 | r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages); |
962 | if (r) { | 947 | if (r) |
963 | DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n", | 948 | DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n", |
964 | gtt->ttm.ttm.num_pages, gtt->offset); | 949 | gtt->ttm.ttm.num_pages, gtt->offset); |
965 | goto error_unbind; | ||
966 | } | ||
967 | list_del_init(>t->list); | ||
968 | error_unbind: | ||
969 | spin_unlock(>t->adev->gtt_list_lock); | ||
970 | return r; | 950 | return r; |
971 | } | 951 | } |
972 | 952 | ||
@@ -1003,7 +983,6 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev, | |||
1003 | kfree(gtt); | 983 | kfree(gtt); |
1004 | return NULL; | 984 | return NULL; |
1005 | } | 985 | } |
1006 | INIT_LIST_HEAD(>t->list); | ||
1007 | return >t->ttm.ttm; | 986 | return >t->ttm.ttm; |
1008 | } | 987 | } |
1009 | 988 | ||