aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRex Zhu <Rex.Zhu@amd.com>2018-10-15 05:08:38 -0400
committerAlex Deucher <alexander.deucher@amd.com>2018-11-05 14:21:48 -0500
commit1e256e2762211c02078c31f839a9b243f62efd5e (patch)
treeda9dc4eb0fbfee937e8ed2812ccf32d60de9ca61 /drivers
parent20bedfe0c13a2e221301e3c889b2a4c48374f78a (diff)
drm/amdgpu: Refine CSA related functions
There is no functional changes, Use function arguments for SRIOV special variables which is hardcode in those functions. so we can share those functions in baremetal. Reviewed-by: Monk Liu <Monk.Liu@amd.com> Signed-off-by: Rex Zhu <Rex.Zhu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h8
4 files changed, 29 insertions, 22 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 416a67672f3d..0bf13d69efbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1656,7 +1656,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1656 1656
1657 /* right after GMC hw init, we create CSA */ 1657 /* right after GMC hw init, we create CSA */
1658 if (amdgpu_sriov_vf(adev)) { 1658 if (amdgpu_sriov_vf(adev)) {
1659 r = amdgpu_allocate_static_csa(adev); 1659 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
1660 AMDGPU_GEM_DOMAIN_VRAM,
1661 AMDGPU_CSA_SIZE);
1660 if (r) { 1662 if (r) {
1661 DRM_ERROR("allocate CSA failed %d\n", r); 1663 DRM_ERROR("allocate CSA failed %d\n", r);
1662 return r; 1664 return r;
@@ -1890,7 +1892,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
1890 1892
1891 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 1893 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1892 amdgpu_ucode_free_bo(adev); 1894 amdgpu_ucode_free_bo(adev);
1893 amdgpu_free_static_csa(adev); 1895 amdgpu_free_static_csa(&adev->virt.csa_obj);
1894 amdgpu_device_wb_fini(adev); 1896 amdgpu_device_wb_fini(adev);
1895 amdgpu_device_vram_scratch_fini(adev); 1897 amdgpu_device_vram_scratch_fini(adev);
1896 } 1898 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 8f6ff9f895c8..9b3164c0f861 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -978,7 +978,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
978 } 978 }
979 979
980 if (amdgpu_sriov_vf(adev)) { 980 if (amdgpu_sriov_vf(adev)) {
981 r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va); 981 uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
982
983 r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
984 &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
982 if (r) 985 if (r)
983 goto error_vm; 986 goto error_vm;
984 } 987 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 9ff16b790c92..f71bc6feea7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -41,25 +41,25 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
41 return RREG32_NO_KIQ(0xc040) == 0xffffffff; 41 return RREG32_NO_KIQ(0xc040) == 0xffffffff;
42} 42}
43 43
44int amdgpu_allocate_static_csa(struct amdgpu_device *adev) 44int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
45 u32 domain, uint32_t size)
45{ 46{
46 int r; 47 int r;
47 void *ptr; 48 void *ptr;
48 49
49 r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE, 50 r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
50 AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj, 51 domain, bo,
51 NULL, &ptr); 52 NULL, &ptr);
52 if (r) 53 if (!bo)
53 return r; 54 return -ENOMEM;
54 55
55 memset(ptr, 0, AMDGPU_CSA_SIZE); 56 memset(ptr, 0, size);
56 return 0; 57 return 0;
57} 58}
58 59
59void amdgpu_free_static_csa(struct amdgpu_device *adev) { 60void amdgpu_free_static_csa(struct amdgpu_bo **bo)
60 amdgpu_bo_free_kernel(&adev->virt.csa_obj, 61{
61 NULL, 62 amdgpu_bo_free_kernel(bo, NULL, NULL);
62 NULL);
63} 63}
64 64
65/* 65/*
@@ -69,9 +69,9 @@ void amdgpu_free_static_csa(struct amdgpu_device *adev) {
69 * package to support SRIOV gfx preemption. 69 * package to support SRIOV gfx preemption.
70 */ 70 */
71int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, 71int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
72 struct amdgpu_bo_va **bo_va) 72 struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
73 uint64_t csa_addr, uint32_t size)
73{ 74{
74 uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
75 struct ww_acquire_ctx ticket; 75 struct ww_acquire_ctx ticket;
76 struct list_head list; 76 struct list_head list;
77 struct amdgpu_bo_list_entry pd; 77 struct amdgpu_bo_list_entry pd;
@@ -80,7 +80,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
80 80
81 INIT_LIST_HEAD(&list); 81 INIT_LIST_HEAD(&list);
82 INIT_LIST_HEAD(&csa_tv.head); 82 INIT_LIST_HEAD(&csa_tv.head);
83 csa_tv.bo = &adev->virt.csa_obj->tbo; 83 csa_tv.bo = &bo->tbo;
84 csa_tv.shared = true; 84 csa_tv.shared = true;
85 85
86 list_add(&csa_tv.head, &list); 86 list_add(&csa_tv.head, &list);
@@ -92,7 +92,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
92 return r; 92 return r;
93 } 93 }
94 94
95 *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj); 95 *bo_va = amdgpu_vm_bo_add(adev, vm, bo);
96 if (!*bo_va) { 96 if (!*bo_va) {
97 ttm_eu_backoff_reservation(&ticket, &list); 97 ttm_eu_backoff_reservation(&ticket, &list);
98 DRM_ERROR("failed to create bo_va for static CSA\n"); 98 DRM_ERROR("failed to create bo_va for static CSA\n");
@@ -100,7 +100,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
100 } 100 }
101 101
102 r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr, 102 r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
103 AMDGPU_CSA_SIZE); 103 size);
104 if (r) { 104 if (r) {
105 DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); 105 DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
106 amdgpu_vm_bo_rmv(adev, *bo_va); 106 amdgpu_vm_bo_rmv(adev, *bo_va);
@@ -108,7 +108,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
108 return r; 108 return r;
109 } 109 }
110 110
111 r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE, 111 r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
112 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | 112 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
113 AMDGPU_PTE_EXECUTABLE); 113 AMDGPU_PTE_EXECUTABLE);
114 114
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index f1a6a50d9444..09a7ebe964d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -280,10 +280,12 @@ struct amdgpu_vm;
280 280
281uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev); 281uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev);
282bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); 282bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
283int amdgpu_allocate_static_csa(struct amdgpu_device *adev); 283int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
284 u32 domain, uint32_t size);
284int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, 285int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
285 struct amdgpu_bo_va **bo_va); 286 struct amdgpu_bo *bo,
286void amdgpu_free_static_csa(struct amdgpu_device *adev); 287 struct amdgpu_bo_va **bo_va, uint64_t csa_addr, uint32_t size);
288void amdgpu_free_static_csa(struct amdgpu_bo **bo);
287void amdgpu_virt_init_setting(struct amdgpu_device *adev); 289void amdgpu_virt_init_setting(struct amdgpu_device *adev);
288uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); 290uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
289void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); 291void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);