diff options
author | Rex Zhu <Rex.Zhu@amd.com> | 2018-10-19 01:35:48 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2018-11-05 14:21:49 -0500 |
commit | 7946340fa38965705f79273cef0cdc477239bf2d (patch) | |
tree | 402ac5a7dddc2c55a6dba61988f6535e840489b6 /drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | |
parent | 1e256e2762211c02078c31f839a9b243f62efd5e (diff) |
drm/amdgpu: Move csa related code to separate file
In baremetal, also need to reserve csa for preemption.
so move the csa related code out of sriov.
Reviewed-by: Monk Liu <Monk.Liu@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 92 |
1 files changed, 0 insertions, 92 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index f71bc6feea7a..cca794a1f8ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | |||
@@ -23,16 +23,6 @@ | |||
23 | 23 | ||
24 | #include "amdgpu.h" | 24 | #include "amdgpu.h" |
25 | 25 | ||
26 | uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev) | ||
27 | { | ||
28 | uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT; | ||
29 | |||
30 | addr -= AMDGPU_VA_RESERVED_SIZE; | ||
31 | addr = amdgpu_gmc_sign_extend(addr); | ||
32 | |||
33 | return addr; | ||
34 | } | ||
35 | |||
36 | bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) | 26 | bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) |
37 | { | 27 | { |
38 | /* By now all MMIO pages except mailbox are blocked */ | 28 | /* By now all MMIO pages except mailbox are blocked */ |
@@ -41,88 +31,6 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) | |||
41 | return RREG32_NO_KIQ(0xc040) == 0xffffffff; | 31 | return RREG32_NO_KIQ(0xc040) == 0xffffffff; |
42 | } | 32 | } |
43 | 33 | ||
44 | int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo, | ||
45 | u32 domain, uint32_t size) | ||
46 | { | ||
47 | int r; | ||
48 | void *ptr; | ||
49 | |||
50 | r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, | ||
51 | domain, bo, | ||
52 | NULL, &ptr); | ||
53 | if (!bo) | ||
54 | return -ENOMEM; | ||
55 | |||
56 | memset(ptr, 0, size); | ||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | void amdgpu_free_static_csa(struct amdgpu_bo **bo) | ||
61 | { | ||
62 | amdgpu_bo_free_kernel(bo, NULL, NULL); | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * amdgpu_map_static_csa should be called during amdgpu_vm_init | ||
67 | * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command | ||
68 | * submission of GFX should use this virtual address within META_DATA init | ||
69 | * package to support SRIOV gfx preemption. | ||
70 | */ | ||
71 | int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, | ||
72 | struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va, | ||
73 | uint64_t csa_addr, uint32_t size) | ||
74 | { | ||
75 | struct ww_acquire_ctx ticket; | ||
76 | struct list_head list; | ||
77 | struct amdgpu_bo_list_entry pd; | ||
78 | struct ttm_validate_buffer csa_tv; | ||
79 | int r; | ||
80 | |||
81 | INIT_LIST_HEAD(&list); | ||
82 | INIT_LIST_HEAD(&csa_tv.head); | ||
83 | csa_tv.bo = &bo->tbo; | ||
84 | csa_tv.shared = true; | ||
85 | |||
86 | list_add(&csa_tv.head, &list); | ||
87 | amdgpu_vm_get_pd_bo(vm, &list, &pd); | ||
88 | |||
89 | r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); | ||
90 | if (r) { | ||
91 | DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); | ||
92 | return r; | ||
93 | } | ||
94 | |||
95 | *bo_va = amdgpu_vm_bo_add(adev, vm, bo); | ||
96 | if (!*bo_va) { | ||
97 | ttm_eu_backoff_reservation(&ticket, &list); | ||
98 | DRM_ERROR("failed to create bo_va for static CSA\n"); | ||
99 | return -ENOMEM; | ||
100 | } | ||
101 | |||
102 | r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr, | ||
103 | size); | ||
104 | if (r) { | ||
105 | DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); | ||
106 | amdgpu_vm_bo_rmv(adev, *bo_va); | ||
107 | ttm_eu_backoff_reservation(&ticket, &list); | ||
108 | return r; | ||
109 | } | ||
110 | |||
111 | r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size, | ||
112 | AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | | ||
113 | AMDGPU_PTE_EXECUTABLE); | ||
114 | |||
115 | if (r) { | ||
116 | DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); | ||
117 | amdgpu_vm_bo_rmv(adev, *bo_va); | ||
118 | ttm_eu_backoff_reservation(&ticket, &list); | ||
119 | return r; | ||
120 | } | ||
121 | |||
122 | ttm_eu_backoff_reservation(&ticket, &list); | ||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | void amdgpu_virt_init_setting(struct amdgpu_device *adev) | 34 | void amdgpu_virt_init_setting(struct amdgpu_device *adev) |
127 | { | 35 | { |
128 | /* enable virtual display */ | 36 | /* enable virtual display */ |