aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c25
1 files changed, 12 insertions, 13 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 8a081e162d13..89208456d360 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -46,14 +46,14 @@ int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
46 * address within META_DATA init package to support SRIOV gfx preemption. 46 * address within META_DATA init package to support SRIOV gfx preemption.
47 */ 47 */
48 48
49int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm) 49int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
50 struct amdgpu_bo_va **bo_va)
50{ 51{
51 int r;
52 struct amdgpu_bo_va *bo_va;
53 struct ww_acquire_ctx ticket; 52 struct ww_acquire_ctx ticket;
54 struct list_head list; 53 struct list_head list;
55 struct amdgpu_bo_list_entry pd; 54 struct amdgpu_bo_list_entry pd;
56 struct ttm_validate_buffer csa_tv; 55 struct ttm_validate_buffer csa_tv;
56 int r;
57 57
58 INIT_LIST_HEAD(&list); 58 INIT_LIST_HEAD(&list);
59 INIT_LIST_HEAD(&csa_tv.head); 59 INIT_LIST_HEAD(&csa_tv.head);
@@ -69,34 +69,33 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
69 return r; 69 return r;
70 } 70 }
71 71
72 bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj); 72 *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
73 if (!bo_va) { 73 if (!*bo_va) {
74 ttm_eu_backoff_reservation(&ticket, &list); 74 ttm_eu_backoff_reservation(&ticket, &list);
75 DRM_ERROR("failed to create bo_va for static CSA\n"); 75 DRM_ERROR("failed to create bo_va for static CSA\n");
76 return -ENOMEM; 76 return -ENOMEM;
77 } 77 }
78 78
79 r = amdgpu_vm_alloc_pts(adev, bo_va->vm, AMDGPU_CSA_VADDR, 79 r = amdgpu_vm_alloc_pts(adev, (*bo_va)->vm, AMDGPU_CSA_VADDR,
80 AMDGPU_CSA_SIZE); 80 AMDGPU_CSA_SIZE);
81 if (r) { 81 if (r) {
82 DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); 82 DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
83 amdgpu_vm_bo_rmv(adev, bo_va); 83 amdgpu_vm_bo_rmv(adev, *bo_va);
84 ttm_eu_backoff_reservation(&ticket, &list); 84 ttm_eu_backoff_reservation(&ticket, &list);
85 return r; 85 return r;
86 } 86 }
87 87
88 r = amdgpu_vm_bo_map(adev, bo_va, AMDGPU_CSA_VADDR, 0,AMDGPU_CSA_SIZE, 88 r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE,
89 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | 89 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
90 AMDGPU_PTE_EXECUTABLE); 90 AMDGPU_PTE_EXECUTABLE);
91 91
92 if (r) { 92 if (r) {
93 DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); 93 DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
94 amdgpu_vm_bo_rmv(adev, bo_va); 94 amdgpu_vm_bo_rmv(adev, *bo_va);
95 ttm_eu_backoff_reservation(&ticket, &list); 95 ttm_eu_backoff_reservation(&ticket, &list);
96 return r; 96 return r;
97 } 97 }
98 98
99 vm->csa_bo_va = bo_va;
100 ttm_eu_backoff_reservation(&ticket, &list); 99 ttm_eu_backoff_reservation(&ticket, &list);
101 return 0; 100 return 0;
102} 101}