aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
diff options
context:
space:
mode:
authorRex Zhu <Rex.Zhu@amd.com>2018-10-15 05:08:38 -0400
committerAlex Deucher <alexander.deucher@amd.com>2018-11-05 14:21:48 -0500
commit1e256e2762211c02078c31f839a9b243f62efd5e (patch)
treeda9dc4eb0fbfee937e8ed2812ccf32d60de9ca61 /drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
parent20bedfe0c13a2e221301e3c889b2a4c48374f78a (diff)
drm/amdgpu: Refine CSA related functions
There is no functional changes, Use function arguments for SRIOV special variables which is hardcode in those functions. so we can share those functions in baremetal. Reviewed-by: Monk Liu <Monk.Liu@amd.com> Signed-off-by: Rex Zhu <Rex.Zhu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 9ff16b790c92..f71bc6feea7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -41,25 +41,25 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
41 return RREG32_NO_KIQ(0xc040) == 0xffffffff; 41 return RREG32_NO_KIQ(0xc040) == 0xffffffff;
42} 42}
43 43
44int amdgpu_allocate_static_csa(struct amdgpu_device *adev) 44int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
45 u32 domain, uint32_t size)
45{ 46{
46 int r; 47 int r;
47 void *ptr; 48 void *ptr;
48 49
49 r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE, 50 r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
50 AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj, 51 domain, bo,
51 NULL, &ptr); 52 NULL, &ptr);
52 if (r) 53 if (!bo)
53 return r; 54 return -ENOMEM;
54 55
55 memset(ptr, 0, AMDGPU_CSA_SIZE); 56 memset(ptr, 0, size);
56 return 0; 57 return 0;
57} 58}
58 59
59void amdgpu_free_static_csa(struct amdgpu_device *adev) { 60void amdgpu_free_static_csa(struct amdgpu_bo **bo)
60 amdgpu_bo_free_kernel(&adev->virt.csa_obj, 61{
61 NULL, 62 amdgpu_bo_free_kernel(bo, NULL, NULL);
62 NULL);
63} 63}
64 64
65/* 65/*
@@ -69,9 +69,9 @@ void amdgpu_free_static_csa(struct amdgpu_device *adev) {
69 * package to support SRIOV gfx preemption. 69 * package to support SRIOV gfx preemption.
70 */ 70 */
71int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, 71int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
72 struct amdgpu_bo_va **bo_va) 72 struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
73 uint64_t csa_addr, uint32_t size)
73{ 74{
74 uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
75 struct ww_acquire_ctx ticket; 75 struct ww_acquire_ctx ticket;
76 struct list_head list; 76 struct list_head list;
77 struct amdgpu_bo_list_entry pd; 77 struct amdgpu_bo_list_entry pd;
@@ -80,7 +80,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
80 80
81 INIT_LIST_HEAD(&list); 81 INIT_LIST_HEAD(&list);
82 INIT_LIST_HEAD(&csa_tv.head); 82 INIT_LIST_HEAD(&csa_tv.head);
83 csa_tv.bo = &adev->virt.csa_obj->tbo; 83 csa_tv.bo = &bo->tbo;
84 csa_tv.shared = true; 84 csa_tv.shared = true;
85 85
86 list_add(&csa_tv.head, &list); 86 list_add(&csa_tv.head, &list);
@@ -92,7 +92,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
92 return r; 92 return r;
93 } 93 }
94 94
95 *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj); 95 *bo_va = amdgpu_vm_bo_add(adev, vm, bo);
96 if (!*bo_va) { 96 if (!*bo_va) {
97 ttm_eu_backoff_reservation(&ticket, &list); 97 ttm_eu_backoff_reservation(&ticket, &list);
98 DRM_ERROR("failed to create bo_va for static CSA\n"); 98 DRM_ERROR("failed to create bo_va for static CSA\n");
@@ -100,7 +100,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
100 } 100 }
101 101
102 r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr, 102 r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
103 AMDGPU_CSA_SIZE); 103 size);
104 if (r) { 104 if (r) {
105 DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); 105 DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
106 amdgpu_vm_bo_rmv(adev, *bo_va); 106 amdgpu_vm_bo_rmv(adev, *bo_va);
@@ -108,7 +108,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
108 return r; 108 return r;
109 } 109 }
110 110
111 r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE, 111 r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
112 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | 112 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
113 AMDGPU_PTE_EXECUTABLE); 113 AMDGPU_PTE_EXECUTABLE);
114 114