aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c24
1 files changed, 18 insertions, 6 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index e7dfb7b44b4b..b832651d2137 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -24,6 +24,18 @@
24#include "amdgpu.h" 24#include "amdgpu.h"
25#define MAX_KIQ_REG_WAIT 100000000 /* in usecs */ 25#define MAX_KIQ_REG_WAIT 100000000 /* in usecs */
26 26
27uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
28{
29 uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
30
31 addr -= AMDGPU_VA_RESERVED_SIZE;
32
33 if (addr >= AMDGPU_VA_HOLE_START)
34 addr |= AMDGPU_VA_HOLE_END;
35
36 return addr;
37}
38
27bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) 39bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
28{ 40{
29 /* By now all MMIO pages except mailbox are blocked */ 41 /* By now all MMIO pages except mailbox are blocked */
@@ -55,14 +67,14 @@ void amdgpu_free_static_csa(struct amdgpu_device *adev) {
55 67
56/* 68/*
57 * amdgpu_map_static_csa should be called during amdgpu_vm_init 69 * amdgpu_map_static_csa should be called during amdgpu_vm_init
58 * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE" 70 * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
59 * to this VM, and each command submission of GFX should use this virtual 71 * submission of GFX should use this virtual address within META_DATA init
60 * address within META_DATA init package to support SRIOV gfx preemption. 72 * package to support SRIOV gfx preemption.
61 */ 73 */
62
63int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, 74int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
64 struct amdgpu_bo_va **bo_va) 75 struct amdgpu_bo_va **bo_va)
65{ 76{
77 uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_VA_HOLE_MASK;
66 struct ww_acquire_ctx ticket; 78 struct ww_acquire_ctx ticket;
67 struct list_head list; 79 struct list_head list;
68 struct amdgpu_bo_list_entry pd; 80 struct amdgpu_bo_list_entry pd;
@@ -90,7 +102,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
90 return -ENOMEM; 102 return -ENOMEM;
91 } 103 }
92 104
93 r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR, 105 r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
94 AMDGPU_CSA_SIZE); 106 AMDGPU_CSA_SIZE);
95 if (r) { 107 if (r) {
96 DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); 108 DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
@@ -99,7 +111,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
99 return r; 111 return r;
100 } 112 }
101 113
102 r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE, 114 r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE,
103 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | 115 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
104 AMDGPU_PTE_EXECUTABLE); 116 AMDGPU_PTE_EXECUTABLE);
105 117