aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c92
1 files changed, 0 insertions, 92 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index f71bc6feea7a..cca794a1f8ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -23,16 +23,6 @@
23 23
24#include "amdgpu.h" 24#include "amdgpu.h"
25 25
26uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
27{
28 uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
29
30 addr -= AMDGPU_VA_RESERVED_SIZE;
31 addr = amdgpu_gmc_sign_extend(addr);
32
33 return addr;
34}
35
36bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) 26bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
37{ 27{
38 /* By now all MMIO pages except mailbox are blocked */ 28 /* By now all MMIO pages except mailbox are blocked */
@@ -41,88 +31,6 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
41 return RREG32_NO_KIQ(0xc040) == 0xffffffff; 31 return RREG32_NO_KIQ(0xc040) == 0xffffffff;
42} 32}
43 33
44int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
45 u32 domain, uint32_t size)
46{
47 int r;
48 void *ptr;
49
50 r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
51 domain, bo,
52 NULL, &ptr);
53 if (!bo)
54 return -ENOMEM;
55
56 memset(ptr, 0, size);
57 return 0;
58}
59
60void amdgpu_free_static_csa(struct amdgpu_bo **bo)
61{
62 amdgpu_bo_free_kernel(bo, NULL, NULL);
63}
64
65/*
66 * amdgpu_map_static_csa should be called during amdgpu_vm_init
67 * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
68 * submission of GFX should use this virtual address within META_DATA init
69 * package to support SRIOV gfx preemption.
70 */
71int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
72 struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
73 uint64_t csa_addr, uint32_t size)
74{
75 struct ww_acquire_ctx ticket;
76 struct list_head list;
77 struct amdgpu_bo_list_entry pd;
78 struct ttm_validate_buffer csa_tv;
79 int r;
80
81 INIT_LIST_HEAD(&list);
82 INIT_LIST_HEAD(&csa_tv.head);
83 csa_tv.bo = &bo->tbo;
84 csa_tv.shared = true;
85
86 list_add(&csa_tv.head, &list);
87 amdgpu_vm_get_pd_bo(vm, &list, &pd);
88
89 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
90 if (r) {
91 DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
92 return r;
93 }
94
95 *bo_va = amdgpu_vm_bo_add(adev, vm, bo);
96 if (!*bo_va) {
97 ttm_eu_backoff_reservation(&ticket, &list);
98 DRM_ERROR("failed to create bo_va for static CSA\n");
99 return -ENOMEM;
100 }
101
102 r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
103 size);
104 if (r) {
105 DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
106 amdgpu_vm_bo_rmv(adev, *bo_va);
107 ttm_eu_backoff_reservation(&ticket, &list);
108 return r;
109 }
110
111 r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
112 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
113 AMDGPU_PTE_EXECUTABLE);
114
115 if (r) {
116 DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
117 amdgpu_vm_bo_rmv(adev, *bo_va);
118 ttm_eu_backoff_reservation(&ticket, &list);
119 return r;
120 }
121
122 ttm_eu_backoff_reservation(&ticket, &list);
123 return 0;
124}
125
126void amdgpu_virt_init_setting(struct amdgpu_device *adev) 34void amdgpu_virt_init_setting(struct amdgpu_device *adev)
127{ 35{
128 /* enable virtual display */ 36 /* enable virtual display */