diff options
author | Christian König <christian.koenig@amd.com> | 2017-12-18 10:53:03 -0500 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2017-12-27 11:33:52 -0500 |
commit | 620f774f4687d86c420152309eefb0ef0fcc7e51 (patch) | |
tree | 1fe01558fe35ae905eebd299b55e57abc31d8ff7 /drivers/gpu/drm/amd/amdgpu | |
parent | df2869abd92b740af141ee2eb081bfc69bd80877 (diff) |
drm/amdgpu: separate VMID and PASID handling
Move both into the new files amdgpu_ids.[ch]. No functional change.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/Makefile | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 459 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h | 91 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 422 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 44 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 |
13 files changed, 579 insertions, 465 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index d8da12c114b1..d6e5b7273853 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile | |||
@@ -52,7 +52,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ | |||
52 | amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ | 52 | amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ |
53 | amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ | 53 | amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ |
54 | amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ | 54 | amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ |
55 | amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o | 55 | amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o \ |
56 | amdgpu_ids.o | ||
56 | 57 | ||
57 | # add asic specific block | 58 | # add asic specific block |
58 | amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ | 59 | amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 1e3e9be7d77e..1ae149456c9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | |||
@@ -169,8 +169,8 @@ static const struct kfd2kgd_calls kfd2kgd = { | |||
169 | .get_vmem_size = get_vmem_size, | 169 | .get_vmem_size = get_vmem_size, |
170 | .get_gpu_clock_counter = get_gpu_clock_counter, | 170 | .get_gpu_clock_counter = get_gpu_clock_counter, |
171 | .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, | 171 | .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, |
172 | .alloc_pasid = amdgpu_vm_alloc_pasid, | 172 | .alloc_pasid = amdgpu_pasid_alloc, |
173 | .free_pasid = amdgpu_vm_free_pasid, | 173 | .free_pasid = amdgpu_pasid_free, |
174 | .program_sh_mem_settings = kgd_program_sh_mem_settings, | 174 | .program_sh_mem_settings = kgd_program_sh_mem_settings, |
175 | .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, | 175 | .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, |
176 | .init_pipeline = kgd_init_pipeline, | 176 | .init_pipeline = kgd_init_pipeline, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 056929b8ccd0..e9b436bc8dcb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | |||
@@ -128,8 +128,8 @@ static const struct kfd2kgd_calls kfd2kgd = { | |||
128 | .get_vmem_size = get_vmem_size, | 128 | .get_vmem_size = get_vmem_size, |
129 | .get_gpu_clock_counter = get_gpu_clock_counter, | 129 | .get_gpu_clock_counter = get_gpu_clock_counter, |
130 | .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, | 130 | .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, |
131 | .alloc_pasid = amdgpu_vm_alloc_pasid, | 131 | .alloc_pasid = amdgpu_pasid_alloc, |
132 | .free_pasid = amdgpu_vm_free_pasid, | 132 | .free_pasid = amdgpu_pasid_free, |
133 | .program_sh_mem_settings = kgd_program_sh_mem_settings, | 133 | .program_sh_mem_settings = kgd_program_sh_mem_settings, |
134 | .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, | 134 | .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, |
135 | .init_pipeline = kgd_init_pipeline, | 135 | .init_pipeline = kgd_init_pipeline, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 0cf86eb357d6..03a69942cce5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | |||
@@ -230,8 +230,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
230 | if (r) { | 230 | if (r) { |
231 | dev_err(adev->dev, "failed to emit fence (%d)\n", r); | 231 | dev_err(adev->dev, "failed to emit fence (%d)\n", r); |
232 | if (job && job->vm_id) | 232 | if (job && job->vm_id) |
233 | amdgpu_vm_reset_id(adev, ring->funcs->vmhub, | 233 | amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vm_id); |
234 | job->vm_id); | ||
235 | amdgpu_ring_undo(ring); | 234 | amdgpu_ring_undo(ring); |
236 | return r; | 235 | return r; |
237 | } | 236 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c new file mode 100644 index 000000000000..71f8a76d4c10 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | |||
@@ -0,0 +1,459 @@ | |||
1 | /* | ||
2 | * Copyright 2017 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | #include "amdgpu_ids.h" | ||
24 | |||
25 | #include <linux/idr.h> | ||
26 | #include <linux/dma-fence-array.h> | ||
27 | #include <drm/drmP.h> | ||
28 | |||
29 | #include "amdgpu.h" | ||
30 | #include "amdgpu_trace.h" | ||
31 | |||
32 | /* | ||
33 | * PASID manager | ||
34 | * | ||
35 | * PASIDs are global address space identifiers that can be shared | ||
36 | * between the GPU, an IOMMU and the driver. VMs on different devices | ||
37 | * may use the same PASID if they share the same address | ||
38 | * space. Therefore PASIDs are allocated using a global IDA. VMs are | ||
39 | * looked up from the PASID per amdgpu_device. | ||
40 | */ | ||
41 | static DEFINE_IDA(amdgpu_pasid_ida); | ||
42 | |||
43 | /** | ||
44 | * amdgpu_pasid_alloc - Allocate a PASID | ||
45 | * @bits: Maximum width of the PASID in bits, must be at least 1 | ||
46 | * | ||
47 | * Allocates a PASID of the given width while keeping smaller PASIDs | ||
48 | * available if possible. | ||
49 | * | ||
50 | * Returns a positive integer on success. Returns %-EINVAL if bits==0. | ||
51 | * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on | ||
52 | * memory allocation failure. | ||
53 | */ | ||
54 | int amdgpu_pasid_alloc(unsigned int bits) | ||
55 | { | ||
56 | int pasid = -EINVAL; | ||
57 | |||
58 | for (bits = min(bits, 31U); bits > 0; bits--) { | ||
59 | pasid = ida_simple_get(&amdgpu_pasid_ida, | ||
60 | 1U << (bits - 1), 1U << bits, | ||
61 | GFP_KERNEL); | ||
62 | if (pasid != -ENOSPC) | ||
63 | break; | ||
64 | } | ||
65 | |||
66 | return pasid; | ||
67 | } | ||
68 | |||
69 | /** | ||
70 | * amdgpu_pasid_free - Free a PASID | ||
71 | * @pasid: PASID to free | ||
72 | */ | ||
73 | void amdgpu_pasid_free(unsigned int pasid) | ||
74 | { | ||
75 | ida_simple_remove(&amdgpu_pasid_ida, pasid); | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * VMID manager | ||
80 | * | ||
81 | * VMIDs are a per VMHUB identifier for page tables handling. | ||
82 | */ | ||
83 | |||
84 | /** | ||
85 | * amdgpu_vmid_had_gpu_reset - check if reset occured since last use | ||
86 | * | ||
87 | * @adev: amdgpu_device pointer | ||
88 | * @id: VMID structure | ||
89 | * | ||
90 | * Check if GPU reset occured since last use of the VMID. | ||
91 | */ | ||
92 | bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, | ||
93 | struct amdgpu_vmid *id) | ||
94 | { | ||
95 | return id->current_gpu_reset_count != | ||
96 | atomic_read(&adev->gpu_reset_counter); | ||
97 | } | ||
98 | |||
99 | /* idr_mgr->lock must be held */ | ||
100 | static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm, | ||
101 | struct amdgpu_ring *ring, | ||
102 | struct amdgpu_sync *sync, | ||
103 | struct dma_fence *fence, | ||
104 | struct amdgpu_job *job) | ||
105 | { | ||
106 | struct amdgpu_device *adev = ring->adev; | ||
107 | unsigned vmhub = ring->funcs->vmhub; | ||
108 | uint64_t fence_context = adev->fence_context + ring->idx; | ||
109 | struct amdgpu_vmid *id = vm->reserved_vmid[vmhub]; | ||
110 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; | ||
111 | struct dma_fence *updates = sync->last_vm_update; | ||
112 | int r = 0; | ||
113 | struct dma_fence *flushed, *tmp; | ||
114 | bool needs_flush = vm->use_cpu_for_update; | ||
115 | |||
116 | flushed = id->flushed_updates; | ||
117 | if ((amdgpu_vmid_had_gpu_reset(adev, id)) || | ||
118 | (atomic64_read(&id->owner) != vm->client_id) || | ||
119 | (job->vm_pd_addr != id->pd_gpu_addr) || | ||
120 | (updates && (!flushed || updates->context != flushed->context || | ||
121 | dma_fence_is_later(updates, flushed))) || | ||
122 | (!id->last_flush || (id->last_flush->context != fence_context && | ||
123 | !dma_fence_is_signaled(id->last_flush)))) { | ||
124 | needs_flush = true; | ||
125 | /* to prevent one context starved by another context */ | ||
126 | id->pd_gpu_addr = 0; | ||
127 | tmp = amdgpu_sync_peek_fence(&id->active, ring); | ||
128 | if (tmp) { | ||
129 | r = amdgpu_sync_fence(adev, sync, tmp, false); | ||
130 | return r; | ||
131 | } | ||
132 | } | ||
133 | |||
134 | /* Good we can use this VMID. Remember this submission as | ||
135 | * user of the VMID. | ||
136 | */ | ||
137 | r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); | ||
138 | if (r) | ||
139 | goto out; | ||
140 | |||
141 | if (updates && (!flushed || updates->context != flushed->context || | ||
142 | dma_fence_is_later(updates, flushed))) { | ||
143 | dma_fence_put(id->flushed_updates); | ||
144 | id->flushed_updates = dma_fence_get(updates); | ||
145 | } | ||
146 | id->pd_gpu_addr = job->vm_pd_addr; | ||
147 | atomic64_set(&id->owner, vm->client_id); | ||
148 | job->vm_needs_flush = needs_flush; | ||
149 | if (needs_flush) { | ||
150 | dma_fence_put(id->last_flush); | ||
151 | id->last_flush = NULL; | ||
152 | } | ||
153 | job->vm_id = id - id_mgr->ids; | ||
154 | trace_amdgpu_vm_grab_id(vm, ring, job); | ||
155 | out: | ||
156 | return r; | ||
157 | } | ||
158 | |||
159 | /** | ||
160 | * amdgpu_vm_grab_id - allocate the next free VMID | ||
161 | * | ||
162 | * @vm: vm to allocate id for | ||
163 | * @ring: ring we want to submit job to | ||
164 | * @sync: sync object where we add dependencies | ||
165 | * @fence: fence protecting ID from reuse | ||
166 | * | ||
167 | * Allocate an id for the vm, adding fences to the sync obj as necessary. | ||
168 | */ | ||
169 | int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | ||
170 | struct amdgpu_sync *sync, struct dma_fence *fence, | ||
171 | struct amdgpu_job *job) | ||
172 | { | ||
173 | struct amdgpu_device *adev = ring->adev; | ||
174 | unsigned vmhub = ring->funcs->vmhub; | ||
175 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; | ||
176 | uint64_t fence_context = adev->fence_context + ring->idx; | ||
177 | struct dma_fence *updates = sync->last_vm_update; | ||
178 | struct amdgpu_vmid *id, *idle; | ||
179 | struct dma_fence **fences; | ||
180 | unsigned i; | ||
181 | int r = 0; | ||
182 | |||
183 | mutex_lock(&id_mgr->lock); | ||
184 | if (vm->reserved_vmid[vmhub]) { | ||
185 | r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync, fence, job); | ||
186 | mutex_unlock(&id_mgr->lock); | ||
187 | return r; | ||
188 | } | ||
189 | fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); | ||
190 | if (!fences) { | ||
191 | mutex_unlock(&id_mgr->lock); | ||
192 | return -ENOMEM; | ||
193 | } | ||
194 | /* Check if we have an idle VMID */ | ||
195 | i = 0; | ||
196 | list_for_each_entry(idle, &id_mgr->ids_lru, list) { | ||
197 | fences[i] = amdgpu_sync_peek_fence(&idle->active, ring); | ||
198 | if (!fences[i]) | ||
199 | break; | ||
200 | ++i; | ||
201 | } | ||
202 | |||
203 | /* If we can't find a idle VMID to use, wait till one becomes available */ | ||
204 | if (&idle->list == &id_mgr->ids_lru) { | ||
205 | u64 fence_context = adev->vm_manager.fence_context + ring->idx; | ||
206 | unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; | ||
207 | struct dma_fence_array *array; | ||
208 | unsigned j; | ||
209 | |||
210 | for (j = 0; j < i; ++j) | ||
211 | dma_fence_get(fences[j]); | ||
212 | |||
213 | array = dma_fence_array_create(i, fences, fence_context, | ||
214 | seqno, true); | ||
215 | if (!array) { | ||
216 | for (j = 0; j < i; ++j) | ||
217 | dma_fence_put(fences[j]); | ||
218 | kfree(fences); | ||
219 | r = -ENOMEM; | ||
220 | goto error; | ||
221 | } | ||
222 | |||
223 | |||
224 | r = amdgpu_sync_fence(ring->adev, sync, &array->base, false); | ||
225 | dma_fence_put(&array->base); | ||
226 | if (r) | ||
227 | goto error; | ||
228 | |||
229 | mutex_unlock(&id_mgr->lock); | ||
230 | return 0; | ||
231 | |||
232 | } | ||
233 | kfree(fences); | ||
234 | |||
235 | job->vm_needs_flush = vm->use_cpu_for_update; | ||
236 | /* Check if we can use a VMID already assigned to this VM */ | ||
237 | list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) { | ||
238 | struct dma_fence *flushed; | ||
239 | bool needs_flush = vm->use_cpu_for_update; | ||
240 | |||
241 | /* Check all the prerequisites to using this VMID */ | ||
242 | if (amdgpu_vmid_had_gpu_reset(adev, id)) | ||
243 | continue; | ||
244 | |||
245 | if (atomic64_read(&id->owner) != vm->client_id) | ||
246 | continue; | ||
247 | |||
248 | if (job->vm_pd_addr != id->pd_gpu_addr) | ||
249 | continue; | ||
250 | |||
251 | if (!id->last_flush || | ||
252 | (id->last_flush->context != fence_context && | ||
253 | !dma_fence_is_signaled(id->last_flush))) | ||
254 | needs_flush = true; | ||
255 | |||
256 | flushed = id->flushed_updates; | ||
257 | if (updates && (!flushed || dma_fence_is_later(updates, flushed))) | ||
258 | needs_flush = true; | ||
259 | |||
260 | /* Concurrent flushes are only possible starting with Vega10 */ | ||
261 | if (adev->asic_type < CHIP_VEGA10 && needs_flush) | ||
262 | continue; | ||
263 | |||
264 | /* Good we can use this VMID. Remember this submission as | ||
265 | * user of the VMID. | ||
266 | */ | ||
267 | r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); | ||
268 | if (r) | ||
269 | goto error; | ||
270 | |||
271 | if (updates && (!flushed || dma_fence_is_later(updates, flushed))) { | ||
272 | dma_fence_put(id->flushed_updates); | ||
273 | id->flushed_updates = dma_fence_get(updates); | ||
274 | } | ||
275 | |||
276 | if (needs_flush) | ||
277 | goto needs_flush; | ||
278 | else | ||
279 | goto no_flush_needed; | ||
280 | |||
281 | }; | ||
282 | |||
283 | /* Still no ID to use? Then use the idle one found earlier */ | ||
284 | id = idle; | ||
285 | |||
286 | /* Remember this submission as user of the VMID */ | ||
287 | r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); | ||
288 | if (r) | ||
289 | goto error; | ||
290 | |||
291 | id->pd_gpu_addr = job->vm_pd_addr; | ||
292 | dma_fence_put(id->flushed_updates); | ||
293 | id->flushed_updates = dma_fence_get(updates); | ||
294 | atomic64_set(&id->owner, vm->client_id); | ||
295 | |||
296 | needs_flush: | ||
297 | job->vm_needs_flush = true; | ||
298 | dma_fence_put(id->last_flush); | ||
299 | id->last_flush = NULL; | ||
300 | |||
301 | no_flush_needed: | ||
302 | list_move_tail(&id->list, &id_mgr->ids_lru); | ||
303 | |||
304 | job->vm_id = id - id_mgr->ids; | ||
305 | trace_amdgpu_vm_grab_id(vm, ring, job); | ||
306 | |||
307 | error: | ||
308 | mutex_unlock(&id_mgr->lock); | ||
309 | return r; | ||
310 | } | ||
311 | |||
312 | int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, | ||
313 | struct amdgpu_vm *vm, | ||
314 | unsigned vmhub) | ||
315 | { | ||
316 | struct amdgpu_vmid_mgr *id_mgr; | ||
317 | struct amdgpu_vmid *idle; | ||
318 | int r = 0; | ||
319 | |||
320 | id_mgr = &adev->vm_manager.id_mgr[vmhub]; | ||
321 | mutex_lock(&id_mgr->lock); | ||
322 | if (vm->reserved_vmid[vmhub]) | ||
323 | goto unlock; | ||
324 | if (atomic_inc_return(&id_mgr->reserved_vmid_num) > | ||
325 | AMDGPU_VM_MAX_RESERVED_VMID) { | ||
326 | DRM_ERROR("Over limitation of reserved vmid\n"); | ||
327 | atomic_dec(&id_mgr->reserved_vmid_num); | ||
328 | r = -EINVAL; | ||
329 | goto unlock; | ||
330 | } | ||
331 | /* Select the first entry VMID */ | ||
332 | idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list); | ||
333 | list_del_init(&idle->list); | ||
334 | vm->reserved_vmid[vmhub] = idle; | ||
335 | mutex_unlock(&id_mgr->lock); | ||
336 | |||
337 | return 0; | ||
338 | unlock: | ||
339 | mutex_unlock(&id_mgr->lock); | ||
340 | return r; | ||
341 | } | ||
342 | |||
343 | void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, | ||
344 | struct amdgpu_vm *vm, | ||
345 | unsigned vmhub) | ||
346 | { | ||
347 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; | ||
348 | |||
349 | mutex_lock(&id_mgr->lock); | ||
350 | if (vm->reserved_vmid[vmhub]) { | ||
351 | list_add(&vm->reserved_vmid[vmhub]->list, | ||
352 | &id_mgr->ids_lru); | ||
353 | vm->reserved_vmid[vmhub] = NULL; | ||
354 | atomic_dec(&id_mgr->reserved_vmid_num); | ||
355 | } | ||
356 | mutex_unlock(&id_mgr->lock); | ||
357 | } | ||
358 | |||
359 | /** | ||
360 | * amdgpu_vmid_reset - reset VMID to zero | ||
361 | * | ||
362 | * @adev: amdgpu device structure | ||
363 | * @vm_id: vmid number to use | ||
364 | * | ||
365 | * Reset saved GDW, GWS and OA to force switch on next flush. | ||
366 | */ | ||
367 | void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, | ||
368 | unsigned vmid) | ||
369 | { | ||
370 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; | ||
371 | struct amdgpu_vmid *id = &id_mgr->ids[vmid]; | ||
372 | |||
373 | atomic64_set(&id->owner, 0); | ||
374 | id->gds_base = 0; | ||
375 | id->gds_size = 0; | ||
376 | id->gws_base = 0; | ||
377 | id->gws_size = 0; | ||
378 | id->oa_base = 0; | ||
379 | id->oa_size = 0; | ||
380 | } | ||
381 | |||
382 | /** | ||
383 | * amdgpu_vmid_reset_all - reset VMID to zero | ||
384 | * | ||
385 | * @adev: amdgpu device structure | ||
386 | * | ||
387 | * Reset VMID to force flush on next use | ||
388 | */ | ||
389 | void amdgpu_vmid_reset_all(struct amdgpu_device *adev) | ||
390 | { | ||
391 | unsigned i, j; | ||
392 | |||
393 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { | ||
394 | struct amdgpu_vmid_mgr *id_mgr = | ||
395 | &adev->vm_manager.id_mgr[i]; | ||
396 | |||
397 | for (j = 1; j < id_mgr->num_ids; ++j) | ||
398 | amdgpu_vmid_reset(adev, i, j); | ||
399 | } | ||
400 | } | ||
401 | |||
402 | /** | ||
403 | * amdgpu_vmid_mgr_init - init the VMID manager | ||
404 | * | ||
405 | * @adev: amdgpu_device pointer | ||
406 | * | ||
407 | * Initialize the VM manager structures | ||
408 | */ | ||
409 | void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) | ||
410 | { | ||
411 | unsigned i, j; | ||
412 | |||
413 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { | ||
414 | struct amdgpu_vmid_mgr *id_mgr = | ||
415 | &adev->vm_manager.id_mgr[i]; | ||
416 | |||
417 | mutex_init(&id_mgr->lock); | ||
418 | INIT_LIST_HEAD(&id_mgr->ids_lru); | ||
419 | atomic_set(&id_mgr->reserved_vmid_num, 0); | ||
420 | |||
421 | /* skip over VMID 0, since it is the system VM */ | ||
422 | for (j = 1; j < id_mgr->num_ids; ++j) { | ||
423 | amdgpu_vmid_reset(adev, i, j); | ||
424 | amdgpu_sync_create(&id_mgr->ids[i].active); | ||
425 | list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru); | ||
426 | } | ||
427 | } | ||
428 | |||
429 | adev->vm_manager.fence_context = | ||
430 | dma_fence_context_alloc(AMDGPU_MAX_RINGS); | ||
431 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) | ||
432 | adev->vm_manager.seqno[i] = 0; | ||
433 | } | ||
434 | |||
435 | /** | ||
436 | * amdgpu_vmid_mgr_fini - cleanup VM manager | ||
437 | * | ||
438 | * @adev: amdgpu_device pointer | ||
439 | * | ||
440 | * Cleanup the VM manager and free resources. | ||
441 | */ | ||
442 | void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev) | ||
443 | { | ||
444 | unsigned i, j; | ||
445 | |||
446 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { | ||
447 | struct amdgpu_vmid_mgr *id_mgr = | ||
448 | &adev->vm_manager.id_mgr[i]; | ||
449 | |||
450 | mutex_destroy(&id_mgr->lock); | ||
451 | for (j = 0; j < AMDGPU_NUM_VMID; ++j) { | ||
452 | struct amdgpu_vmid *id = &id_mgr->ids[j]; | ||
453 | |||
454 | amdgpu_sync_free(&id->active); | ||
455 | dma_fence_put(id->flushed_updates); | ||
456 | dma_fence_put(id->last_flush); | ||
457 | } | ||
458 | } | ||
459 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h new file mode 100644 index 000000000000..ad931fa570b3 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * Copyright 2017 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | #ifndef __AMDGPU_IDS_H__ | ||
24 | #define __AMDGPU_IDS_H__ | ||
25 | |||
26 | #include <linux/types.h> | ||
27 | #include <linux/mutex.h> | ||
28 | #include <linux/list.h> | ||
29 | #include <linux/dma-fence.h> | ||
30 | |||
31 | #include "amdgpu_sync.h" | ||
32 | |||
33 | /* maximum number of VMIDs */ | ||
34 | #define AMDGPU_NUM_VMID 16 | ||
35 | |||
36 | struct amdgpu_device; | ||
37 | struct amdgpu_vm; | ||
38 | struct amdgpu_ring; | ||
39 | struct amdgpu_sync; | ||
40 | struct amdgpu_job; | ||
41 | |||
42 | struct amdgpu_vmid { | ||
43 | struct list_head list; | ||
44 | struct amdgpu_sync active; | ||
45 | struct dma_fence *last_flush; | ||
46 | atomic64_t owner; | ||
47 | |||
48 | uint64_t pd_gpu_addr; | ||
49 | /* last flushed PD/PT update */ | ||
50 | struct dma_fence *flushed_updates; | ||
51 | |||
52 | uint32_t current_gpu_reset_count; | ||
53 | |||
54 | uint32_t gds_base; | ||
55 | uint32_t gds_size; | ||
56 | uint32_t gws_base; | ||
57 | uint32_t gws_size; | ||
58 | uint32_t oa_base; | ||
59 | uint32_t oa_size; | ||
60 | }; | ||
61 | |||
62 | struct amdgpu_vmid_mgr { | ||
63 | struct mutex lock; | ||
64 | unsigned num_ids; | ||
65 | struct list_head ids_lru; | ||
66 | struct amdgpu_vmid ids[AMDGPU_NUM_VMID]; | ||
67 | atomic_t reserved_vmid_num; | ||
68 | }; | ||
69 | |||
70 | int amdgpu_pasid_alloc(unsigned int bits); | ||
71 | void amdgpu_pasid_free(unsigned int pasid); | ||
72 | |||
73 | bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, | ||
74 | struct amdgpu_vmid *id); | ||
75 | int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, | ||
76 | struct amdgpu_vm *vm, | ||
77 | unsigned vmhub); | ||
78 | void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, | ||
79 | struct amdgpu_vm *vm, | ||
80 | unsigned vmhub); | ||
81 | int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | ||
82 | struct amdgpu_sync *sync, struct dma_fence *fence, | ||
83 | struct amdgpu_job *job); | ||
84 | void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, | ||
85 | unsigned vmid); | ||
86 | void amdgpu_vmid_reset_all(struct amdgpu_device *adev); | ||
87 | |||
88 | void amdgpu_vmid_mgr_init(struct amdgpu_device *adev); | ||
89 | void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev); | ||
90 | |||
91 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 56d9ee5013a9..cdc9e0f5336a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | |||
@@ -161,9 +161,9 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, | |||
161 | while (fence == NULL && vm && !job->vm_id) { | 161 | while (fence == NULL && vm && !job->vm_id) { |
162 | struct amdgpu_ring *ring = job->ring; | 162 | struct amdgpu_ring *ring = job->ring; |
163 | 163 | ||
164 | r = amdgpu_vm_grab_id(vm, ring, &job->sync, | 164 | r = amdgpu_vmid_grab(vm, ring, &job->sync, |
165 | &job->base.s_fence->finished, | 165 | &job->base.s_fence->finished, |
166 | job); | 166 | job); |
167 | if (r) | 167 | if (r) |
168 | DRM_ERROR("Error getting VM ID (%d)\n", r); | 168 | DRM_ERROR("Error getting VM ID (%d)\n", r); |
169 | 169 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 398abbcbf029..01ee8e2258c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -34,52 +34,6 @@ | |||
34 | #include "amdgpu_trace.h" | 34 | #include "amdgpu_trace.h" |
35 | 35 | ||
36 | /* | 36 | /* |
37 | * PASID manager | ||
38 | * | ||
39 | * PASIDs are global address space identifiers that can be shared | ||
40 | * between the GPU, an IOMMU and the driver. VMs on different devices | ||
41 | * may use the same PASID if they share the same address | ||
42 | * space. Therefore PASIDs are allocated using a global IDA. VMs are | ||
43 | * looked up from the PASID per amdgpu_device. | ||
44 | */ | ||
45 | static DEFINE_IDA(amdgpu_vm_pasid_ida); | ||
46 | |||
47 | /** | ||
48 | * amdgpu_vm_alloc_pasid - Allocate a PASID | ||
49 | * @bits: Maximum width of the PASID in bits, must be at least 1 | ||
50 | * | ||
51 | * Allocates a PASID of the given width while keeping smaller PASIDs | ||
52 | * available if possible. | ||
53 | * | ||
54 | * Returns a positive integer on success. Returns %-EINVAL if bits==0. | ||
55 | * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on | ||
56 | * memory allocation failure. | ||
57 | */ | ||
58 | int amdgpu_vm_alloc_pasid(unsigned int bits) | ||
59 | { | ||
60 | int pasid = -EINVAL; | ||
61 | |||
62 | for (bits = min(bits, 31U); bits > 0; bits--) { | ||
63 | pasid = ida_simple_get(&amdgpu_vm_pasid_ida, | ||
64 | 1U << (bits - 1), 1U << bits, | ||
65 | GFP_KERNEL); | ||
66 | if (pasid != -ENOSPC) | ||
67 | break; | ||
68 | } | ||
69 | |||
70 | return pasid; | ||
71 | } | ||
72 | |||
73 | /** | ||
74 | * amdgpu_vm_free_pasid - Free a PASID | ||
75 | * @pasid: PASID to free | ||
76 | */ | ||
77 | void amdgpu_vm_free_pasid(unsigned int pasid) | ||
78 | { | ||
79 | ida_simple_remove(&amdgpu_vm_pasid_ida, pasid); | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * GPUVM | 37 | * GPUVM |
84 | * GPUVM is similar to the legacy gart on older asics, however | 38 | * GPUVM is similar to the legacy gart on older asics, however |
85 | * rather than there being a single global gart table | 39 | * rather than there being a single global gart table |
@@ -448,286 +402,6 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, | |||
448 | } | 402 | } |
449 | 403 | ||
450 | /** | 404 | /** |
451 | * amdgpu_vm_had_gpu_reset - check if reset occured since last use | ||
452 | * | ||
453 | * @adev: amdgpu_device pointer | ||
454 | * @id: VMID structure | ||
455 | * | ||
456 | * Check if GPU reset occured since last use of the VMID. | ||
457 | */ | ||
458 | static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev, | ||
459 | struct amdgpu_vm_id *id) | ||
460 | { | ||
461 | return id->current_gpu_reset_count != | ||
462 | atomic_read(&adev->gpu_reset_counter); | ||
463 | } | ||
464 | |||
465 | static bool amdgpu_vm_reserved_vmid_ready(struct amdgpu_vm *vm, unsigned vmhub) | ||
466 | { | ||
467 | return !!vm->reserved_vmid[vmhub]; | ||
468 | } | ||
469 | |||
470 | /* idr_mgr->lock must be held */ | ||
471 | static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm, | ||
472 | struct amdgpu_ring *ring, | ||
473 | struct amdgpu_sync *sync, | ||
474 | struct dma_fence *fence, | ||
475 | struct amdgpu_job *job) | ||
476 | { | ||
477 | struct amdgpu_device *adev = ring->adev; | ||
478 | unsigned vmhub = ring->funcs->vmhub; | ||
479 | uint64_t fence_context = adev->fence_context + ring->idx; | ||
480 | struct amdgpu_vm_id *id = vm->reserved_vmid[vmhub]; | ||
481 | struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; | ||
482 | struct dma_fence *updates = sync->last_vm_update; | ||
483 | int r = 0; | ||
484 | struct dma_fence *flushed, *tmp; | ||
485 | bool needs_flush = vm->use_cpu_for_update; | ||
486 | |||
487 | flushed = id->flushed_updates; | ||
488 | if ((amdgpu_vm_had_gpu_reset(adev, id)) || | ||
489 | (atomic64_read(&id->owner) != vm->client_id) || | ||
490 | (job->vm_pd_addr != id->pd_gpu_addr) || | ||
491 | (updates && (!flushed || updates->context != flushed->context || | ||
492 | dma_fence_is_later(updates, flushed))) || | ||
493 | (!id->last_flush || (id->last_flush->context != fence_context && | ||
494 | !dma_fence_is_signaled(id->last_flush)))) { | ||
495 | needs_flush = true; | ||
496 | /* to prevent one context starved by another context */ | ||
497 | id->pd_gpu_addr = 0; | ||
498 | tmp = amdgpu_sync_peek_fence(&id->active, ring); | ||
499 | if (tmp) { | ||
500 | r = amdgpu_sync_fence(adev, sync, tmp, false); | ||
501 | return r; | ||
502 | } | ||
503 | } | ||
504 | |||
505 | /* Good we can use this VMID. Remember this submission as | ||
506 | * user of the VMID. | ||
507 | */ | ||
508 | r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); | ||
509 | if (r) | ||
510 | goto out; | ||
511 | |||
512 | if (updates && (!flushed || updates->context != flushed->context || | ||
513 | dma_fence_is_later(updates, flushed))) { | ||
514 | dma_fence_put(id->flushed_updates); | ||
515 | id->flushed_updates = dma_fence_get(updates); | ||
516 | } | ||
517 | id->pd_gpu_addr = job->vm_pd_addr; | ||
518 | atomic64_set(&id->owner, vm->client_id); | ||
519 | job->vm_needs_flush = needs_flush; | ||
520 | if (needs_flush) { | ||
521 | dma_fence_put(id->last_flush); | ||
522 | id->last_flush = NULL; | ||
523 | } | ||
524 | job->vm_id = id - id_mgr->ids; | ||
525 | trace_amdgpu_vm_grab_id(vm, ring, job); | ||
526 | out: | ||
527 | return r; | ||
528 | } | ||
529 | |||
530 | /** | ||
531 | * amdgpu_vm_grab_id - allocate the next free VMID | ||
532 | * | ||
533 | * @vm: vm to allocate id for | ||
534 | * @ring: ring we want to submit job to | ||
535 | * @sync: sync object where we add dependencies | ||
536 | * @fence: fence protecting ID from reuse | ||
537 | * | ||
538 | * Allocate an id for the vm, adding fences to the sync obj as necessary. | ||
539 | */ | ||
540 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | ||
541 | struct amdgpu_sync *sync, struct dma_fence *fence, | ||
542 | struct amdgpu_job *job) | ||
543 | { | ||
544 | struct amdgpu_device *adev = ring->adev; | ||
545 | unsigned vmhub = ring->funcs->vmhub; | ||
546 | struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; | ||
547 | uint64_t fence_context = adev->fence_context + ring->idx; | ||
548 | struct dma_fence *updates = sync->last_vm_update; | ||
549 | struct amdgpu_vm_id *id, *idle; | ||
550 | struct dma_fence **fences; | ||
551 | unsigned i; | ||
552 | int r = 0; | ||
553 | |||
554 | mutex_lock(&id_mgr->lock); | ||
555 | if (amdgpu_vm_reserved_vmid_ready(vm, vmhub)) { | ||
556 | r = amdgpu_vm_grab_reserved_vmid_locked(vm, ring, sync, fence, job); | ||
557 | mutex_unlock(&id_mgr->lock); | ||
558 | return r; | ||
559 | } | ||
560 | fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); | ||
561 | if (!fences) { | ||
562 | mutex_unlock(&id_mgr->lock); | ||
563 | return -ENOMEM; | ||
564 | } | ||
565 | /* Check if we have an idle VMID */ | ||
566 | i = 0; | ||
567 | list_for_each_entry(idle, &id_mgr->ids_lru, list) { | ||
568 | fences[i] = amdgpu_sync_peek_fence(&idle->active, ring); | ||
569 | if (!fences[i]) | ||
570 | break; | ||
571 | ++i; | ||
572 | } | ||
573 | |||
574 | /* If we can't find a idle VMID to use, wait till one becomes available */ | ||
575 | if (&idle->list == &id_mgr->ids_lru) { | ||
576 | u64 fence_context = adev->vm_manager.fence_context + ring->idx; | ||
577 | unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; | ||
578 | struct dma_fence_array *array; | ||
579 | unsigned j; | ||
580 | |||
581 | for (j = 0; j < i; ++j) | ||
582 | dma_fence_get(fences[j]); | ||
583 | |||
584 | array = dma_fence_array_create(i, fences, fence_context, | ||
585 | seqno, true); | ||
586 | if (!array) { | ||
587 | for (j = 0; j < i; ++j) | ||
588 | dma_fence_put(fences[j]); | ||
589 | kfree(fences); | ||
590 | r = -ENOMEM; | ||
591 | goto error; | ||
592 | } | ||
593 | |||
594 | |||
595 | r = amdgpu_sync_fence(ring->adev, sync, &array->base, false); | ||
596 | dma_fence_put(&array->base); | ||
597 | if (r) | ||
598 | goto error; | ||
599 | |||
600 | mutex_unlock(&id_mgr->lock); | ||
601 | return 0; | ||
602 | |||
603 | } | ||
604 | kfree(fences); | ||
605 | |||
606 | job->vm_needs_flush = vm->use_cpu_for_update; | ||
607 | /* Check if we can use a VMID already assigned to this VM */ | ||
608 | list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) { | ||
609 | struct dma_fence *flushed; | ||
610 | bool needs_flush = vm->use_cpu_for_update; | ||
611 | |||
612 | /* Check all the prerequisites to using this VMID */ | ||
613 | if (amdgpu_vm_had_gpu_reset(adev, id)) | ||
614 | continue; | ||
615 | |||
616 | if (atomic64_read(&id->owner) != vm->client_id) | ||
617 | continue; | ||
618 | |||
619 | if (job->vm_pd_addr != id->pd_gpu_addr) | ||
620 | continue; | ||
621 | |||
622 | if (!id->last_flush || | ||
623 | (id->last_flush->context != fence_context && | ||
624 | !dma_fence_is_signaled(id->last_flush))) | ||
625 | needs_flush = true; | ||
626 | |||
627 | flushed = id->flushed_updates; | ||
628 | if (updates && (!flushed || dma_fence_is_later(updates, flushed))) | ||
629 | needs_flush = true; | ||
630 | |||
631 | /* Concurrent flushes are only possible starting with Vega10 */ | ||
632 | if (adev->asic_type < CHIP_VEGA10 && needs_flush) | ||
633 | continue; | ||
634 | |||
635 | /* Good we can use this VMID. Remember this submission as | ||
636 | * user of the VMID. | ||
637 | */ | ||
638 | r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); | ||
639 | if (r) | ||
640 | goto error; | ||
641 | |||
642 | if (updates && (!flushed || dma_fence_is_later(updates, flushed))) { | ||
643 | dma_fence_put(id->flushed_updates); | ||
644 | id->flushed_updates = dma_fence_get(updates); | ||
645 | } | ||
646 | |||
647 | if (needs_flush) | ||
648 | goto needs_flush; | ||
649 | else | ||
650 | goto no_flush_needed; | ||
651 | |||
652 | }; | ||
653 | |||
654 | /* Still no ID to use? Then use the idle one found earlier */ | ||
655 | id = idle; | ||
656 | |||
657 | /* Remember this submission as user of the VMID */ | ||
658 | r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); | ||
659 | if (r) | ||
660 | goto error; | ||
661 | |||
662 | id->pd_gpu_addr = job->vm_pd_addr; | ||
663 | dma_fence_put(id->flushed_updates); | ||
664 | id->flushed_updates = dma_fence_get(updates); | ||
665 | atomic64_set(&id->owner, vm->client_id); | ||
666 | |||
667 | needs_flush: | ||
668 | job->vm_needs_flush = true; | ||
669 | dma_fence_put(id->last_flush); | ||
670 | id->last_flush = NULL; | ||
671 | |||
672 | no_flush_needed: | ||
673 | list_move_tail(&id->list, &id_mgr->ids_lru); | ||
674 | |||
675 | job->vm_id = id - id_mgr->ids; | ||
676 | trace_amdgpu_vm_grab_id(vm, ring, job); | ||
677 | |||
678 | error: | ||
679 | mutex_unlock(&id_mgr->lock); | ||
680 | return r; | ||
681 | } | ||
682 | |||
683 | static void amdgpu_vm_free_reserved_vmid(struct amdgpu_device *adev, | ||
684 | struct amdgpu_vm *vm, | ||
685 | unsigned vmhub) | ||
686 | { | ||
687 | struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; | ||
688 | |||
689 | mutex_lock(&id_mgr->lock); | ||
690 | if (vm->reserved_vmid[vmhub]) { | ||
691 | list_add(&vm->reserved_vmid[vmhub]->list, | ||
692 | &id_mgr->ids_lru); | ||
693 | vm->reserved_vmid[vmhub] = NULL; | ||
694 | atomic_dec(&id_mgr->reserved_vmid_num); | ||
695 | } | ||
696 | mutex_unlock(&id_mgr->lock); | ||
697 | } | ||
698 | |||
699 | static int amdgpu_vm_alloc_reserved_vmid(struct amdgpu_device *adev, | ||
700 | struct amdgpu_vm *vm, | ||
701 | unsigned vmhub) | ||
702 | { | ||
703 | struct amdgpu_vm_id_manager *id_mgr; | ||
704 | struct amdgpu_vm_id *idle; | ||
705 | int r = 0; | ||
706 | |||
707 | id_mgr = &adev->vm_manager.id_mgr[vmhub]; | ||
708 | mutex_lock(&id_mgr->lock); | ||
709 | if (vm->reserved_vmid[vmhub]) | ||
710 | goto unlock; | ||
711 | if (atomic_inc_return(&id_mgr->reserved_vmid_num) > | ||
712 | AMDGPU_VM_MAX_RESERVED_VMID) { | ||
713 | DRM_ERROR("Over limitation of reserved vmid\n"); | ||
714 | atomic_dec(&id_mgr->reserved_vmid_num); | ||
715 | r = -EINVAL; | ||
716 | goto unlock; | ||
717 | } | ||
718 | /* Select the first entry VMID */ | ||
719 | idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vm_id, list); | ||
720 | list_del_init(&idle->list); | ||
721 | vm->reserved_vmid[vmhub] = idle; | ||
722 | mutex_unlock(&id_mgr->lock); | ||
723 | |||
724 | return 0; | ||
725 | unlock: | ||
726 | mutex_unlock(&id_mgr->lock); | ||
727 | return r; | ||
728 | } | ||
729 | |||
730 | /** | ||
731 | * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug | 405 | * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug |
732 | * | 406 | * |
733 | * @adev: amdgpu_device pointer | 407 | * @adev: amdgpu_device pointer |
@@ -767,8 +441,8 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, | |||
767 | { | 441 | { |
768 | struct amdgpu_device *adev = ring->adev; | 442 | struct amdgpu_device *adev = ring->adev; |
769 | unsigned vmhub = ring->funcs->vmhub; | 443 | unsigned vmhub = ring->funcs->vmhub; |
770 | struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; | 444 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
771 | struct amdgpu_vm_id *id; | 445 | struct amdgpu_vmid *id; |
772 | bool gds_switch_needed; | 446 | bool gds_switch_needed; |
773 | bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug; | 447 | bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug; |
774 | 448 | ||
@@ -783,7 +457,7 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, | |||
783 | id->oa_base != job->oa_base || | 457 | id->oa_base != job->oa_base || |
784 | id->oa_size != job->oa_size); | 458 | id->oa_size != job->oa_size); |
785 | 459 | ||
786 | if (amdgpu_vm_had_gpu_reset(adev, id)) | 460 | if (amdgpu_vmid_had_gpu_reset(adev, id)) |
787 | return true; | 461 | return true; |
788 | 462 | ||
789 | return vm_flush_needed || gds_switch_needed; | 463 | return vm_flush_needed || gds_switch_needed; |
@@ -807,8 +481,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ | |||
807 | { | 481 | { |
808 | struct amdgpu_device *adev = ring->adev; | 482 | struct amdgpu_device *adev = ring->adev; |
809 | unsigned vmhub = ring->funcs->vmhub; | 483 | unsigned vmhub = ring->funcs->vmhub; |
810 | struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; | 484 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
811 | struct amdgpu_vm_id *id = &id_mgr->ids[job->vm_id]; | 485 | struct amdgpu_vmid *id = &id_mgr->ids[job->vm_id]; |
812 | bool gds_switch_needed = ring->funcs->emit_gds_switch && ( | 486 | bool gds_switch_needed = ring->funcs->emit_gds_switch && ( |
813 | id->gds_base != job->gds_base || | 487 | id->gds_base != job->gds_base || |
814 | id->gds_size != job->gds_size || | 488 | id->gds_size != job->gds_size || |
@@ -820,7 +494,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ | |||
820 | unsigned patch_offset = 0; | 494 | unsigned patch_offset = 0; |
821 | int r; | 495 | int r; |
822 | 496 | ||
823 | if (amdgpu_vm_had_gpu_reset(adev, id)) { | 497 | if (amdgpu_vmid_had_gpu_reset(adev, id)) { |
824 | gds_switch_needed = true; | 498 | gds_switch_needed = true; |
825 | vm_flush_needed = true; | 499 | vm_flush_needed = true; |
826 | } | 500 | } |
@@ -876,49 +550,6 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ | |||
876 | } | 550 | } |
877 | 551 | ||
878 | /** | 552 | /** |
879 | * amdgpu_vm_reset_id - reset VMID to zero | ||
880 | * | ||
881 | * @adev: amdgpu device structure | ||
882 | * @vm_id: vmid number to use | ||
883 | * | ||
884 | * Reset saved GDW, GWS and OA to force switch on next flush. | ||
885 | */ | ||
886 | void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, | ||
887 | unsigned vmid) | ||
888 | { | ||
889 | struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; | ||
890 | struct amdgpu_vm_id *id = &id_mgr->ids[vmid]; | ||
891 | |||
892 | atomic64_set(&id->owner, 0); | ||
893 | id->gds_base = 0; | ||
894 | id->gds_size = 0; | ||
895 | id->gws_base = 0; | ||
896 | id->gws_size = 0; | ||
897 | id->oa_base = 0; | ||
898 | id->oa_size = 0; | ||
899 | } | ||
900 | |||
901 | /** | ||
902 | * amdgpu_vm_reset_all_id - reset VMID to zero | ||
903 | * | ||
904 | * @adev: amdgpu device structure | ||
905 | * | ||
906 | * Reset VMID to force flush on next use | ||
907 | */ | ||
908 | void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev) | ||
909 | { | ||
910 | unsigned i, j; | ||
911 | |||
912 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { | ||
913 | struct amdgpu_vm_id_manager *id_mgr = | ||
914 | &adev->vm_manager.id_mgr[i]; | ||
915 | |||
916 | for (j = 1; j < id_mgr->num_ids; ++j) | ||
917 | amdgpu_vm_reset_id(adev, i, j); | ||
918 | } | ||
919 | } | ||
920 | |||
921 | /** | ||
922 | * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo | 553 | * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo |
923 | * | 554 | * |
924 | * @vm: requested vm | 555 | * @vm: requested vm |
@@ -2819,7 +2450,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
2819 | amdgpu_bo_unref(&root); | 2450 | amdgpu_bo_unref(&root); |
2820 | dma_fence_put(vm->last_update); | 2451 | dma_fence_put(vm->last_update); |
2821 | for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) | 2452 | for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) |
2822 | amdgpu_vm_free_reserved_vmid(adev, vm, i); | 2453 | amdgpu_vmid_free_reserved(adev, vm, i); |
2823 | } | 2454 | } |
2824 | 2455 | ||
2825 | /** | 2456 | /** |
@@ -2861,23 +2492,9 @@ bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev, | |||
2861 | */ | 2492 | */ |
2862 | void amdgpu_vm_manager_init(struct amdgpu_device *adev) | 2493 | void amdgpu_vm_manager_init(struct amdgpu_device *adev) |
2863 | { | 2494 | { |
2864 | unsigned i, j; | 2495 | unsigned i; |
2865 | |||
2866 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { | ||
2867 | struct amdgpu_vm_id_manager *id_mgr = | ||
2868 | &adev->vm_manager.id_mgr[i]; | ||
2869 | 2496 | ||
2870 | mutex_init(&id_mgr->lock); | 2497 | amdgpu_vmid_mgr_init(adev); |
2871 | INIT_LIST_HEAD(&id_mgr->ids_lru); | ||
2872 | atomic_set(&id_mgr->reserved_vmid_num, 0); | ||
2873 | |||
2874 | /* skip over VMID 0, since it is the system VM */ | ||
2875 | for (j = 1; j < id_mgr->num_ids; ++j) { | ||
2876 | amdgpu_vm_reset_id(adev, i, j); | ||
2877 | amdgpu_sync_create(&id_mgr->ids[i].active); | ||
2878 | list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru); | ||
2879 | } | ||
2880 | } | ||
2881 | 2498 | ||
2882 | adev->vm_manager.fence_context = | 2499 | adev->vm_manager.fence_context = |
2883 | dma_fence_context_alloc(AMDGPU_MAX_RINGS); | 2500 | dma_fence_context_alloc(AMDGPU_MAX_RINGS); |
@@ -2918,24 +2535,10 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) | |||
2918 | */ | 2535 | */ |
2919 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev) | 2536 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev) |
2920 | { | 2537 | { |
2921 | unsigned i, j; | ||
2922 | |||
2923 | WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr)); | 2538 | WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr)); |
2924 | idr_destroy(&adev->vm_manager.pasid_idr); | 2539 | idr_destroy(&adev->vm_manager.pasid_idr); |
2925 | 2540 | ||
2926 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { | 2541 | amdgpu_vmid_mgr_fini(adev); |
2927 | struct amdgpu_vm_id_manager *id_mgr = | ||
2928 | &adev->vm_manager.id_mgr[i]; | ||
2929 | |||
2930 | mutex_destroy(&id_mgr->lock); | ||
2931 | for (j = 0; j < AMDGPU_NUM_VM; ++j) { | ||
2932 | struct amdgpu_vm_id *id = &id_mgr->ids[j]; | ||
2933 | |||
2934 | amdgpu_sync_free(&id->active); | ||
2935 | dma_fence_put(id->flushed_updates); | ||
2936 | dma_fence_put(id->last_flush); | ||
2937 | } | ||
2938 | } | ||
2939 | } | 2542 | } |
2940 | 2543 | ||
2941 | int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | 2544 | int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
@@ -2948,13 +2551,12 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
2948 | switch (args->in.op) { | 2551 | switch (args->in.op) { |
2949 | case AMDGPU_VM_OP_RESERVE_VMID: | 2552 | case AMDGPU_VM_OP_RESERVE_VMID: |
2950 | /* current, we only have requirement to reserve vmid from gfxhub */ | 2553 | /* current, we only have requirement to reserve vmid from gfxhub */ |
2951 | r = amdgpu_vm_alloc_reserved_vmid(adev, &fpriv->vm, | 2554 | r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB); |
2952 | AMDGPU_GFXHUB); | ||
2953 | if (r) | 2555 | if (r) |
2954 | return r; | 2556 | return r; |
2955 | break; | 2557 | break; |
2956 | case AMDGPU_VM_OP_UNRESERVE_VMID: | 2558 | case AMDGPU_VM_OP_UNRESERVE_VMID: |
2957 | amdgpu_vm_free_reserved_vmid(adev, &fpriv->vm, AMDGPU_GFXHUB); | 2559 | amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB); |
2958 | break; | 2560 | break; |
2959 | default: | 2561 | default: |
2960 | return -EINVAL; | 2562 | return -EINVAL; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index edd2ea52dc00..78296d1a5b2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | |||
@@ -31,6 +31,7 @@ | |||
31 | 31 | ||
32 | #include "amdgpu_sync.h" | 32 | #include "amdgpu_sync.h" |
33 | #include "amdgpu_ring.h" | 33 | #include "amdgpu_ring.h" |
34 | #include "amdgpu_ids.h" | ||
34 | 35 | ||
35 | struct amdgpu_bo_va; | 36 | struct amdgpu_bo_va; |
36 | struct amdgpu_job; | 37 | struct amdgpu_job; |
@@ -40,9 +41,6 @@ struct amdgpu_bo_list_entry; | |||
40 | * GPUVM handling | 41 | * GPUVM handling |
41 | */ | 42 | */ |
42 | 43 | ||
43 | /* maximum number of VMIDs */ | ||
44 | #define AMDGPU_NUM_VM 16 | ||
45 | |||
46 | /* Maximum number of PTEs the hardware can write with one command */ | 44 | /* Maximum number of PTEs the hardware can write with one command */ |
47 | #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF | 45 | #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF |
48 | 46 | ||
@@ -197,7 +195,7 @@ struct amdgpu_vm { | |||
197 | u64 client_id; | 195 | u64 client_id; |
198 | unsigned int pasid; | 196 | unsigned int pasid; |
199 | /* dedicated to vm */ | 197 | /* dedicated to vm */ |
200 | struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS]; | 198 | struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS]; |
201 | 199 | ||
202 | /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */ | 200 | /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */ |
203 | bool use_cpu_for_update; | 201 | bool use_cpu_for_update; |
@@ -212,37 +210,9 @@ struct amdgpu_vm { | |||
212 | unsigned int fault_credit; | 210 | unsigned int fault_credit; |
213 | }; | 211 | }; |
214 | 212 | ||
215 | struct amdgpu_vm_id { | ||
216 | struct list_head list; | ||
217 | struct amdgpu_sync active; | ||
218 | struct dma_fence *last_flush; | ||
219 | atomic64_t owner; | ||
220 | |||
221 | uint64_t pd_gpu_addr; | ||
222 | /* last flushed PD/PT update */ | ||
223 | struct dma_fence *flushed_updates; | ||
224 | |||
225 | uint32_t current_gpu_reset_count; | ||
226 | |||
227 | uint32_t gds_base; | ||
228 | uint32_t gds_size; | ||
229 | uint32_t gws_base; | ||
230 | uint32_t gws_size; | ||
231 | uint32_t oa_base; | ||
232 | uint32_t oa_size; | ||
233 | }; | ||
234 | |||
235 | struct amdgpu_vm_id_manager { | ||
236 | struct mutex lock; | ||
237 | unsigned num_ids; | ||
238 | struct list_head ids_lru; | ||
239 | struct amdgpu_vm_id ids[AMDGPU_NUM_VM]; | ||
240 | atomic_t reserved_vmid_num; | ||
241 | }; | ||
242 | |||
243 | struct amdgpu_vm_manager { | 213 | struct amdgpu_vm_manager { |
244 | /* Handling of VMIDs */ | 214 | /* Handling of VMIDs */ |
245 | struct amdgpu_vm_id_manager id_mgr[AMDGPU_MAX_VMHUBS]; | 215 | struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS]; |
246 | 216 | ||
247 | /* Handling of VM fences */ | 217 | /* Handling of VM fences */ |
248 | u64 fence_context; | 218 | u64 fence_context; |
@@ -280,8 +250,6 @@ struct amdgpu_vm_manager { | |||
280 | spinlock_t pasid_lock; | 250 | spinlock_t pasid_lock; |
281 | }; | 251 | }; |
282 | 252 | ||
283 | int amdgpu_vm_alloc_pasid(unsigned int bits); | ||
284 | void amdgpu_vm_free_pasid(unsigned int pasid); | ||
285 | void amdgpu_vm_manager_init(struct amdgpu_device *adev); | 253 | void amdgpu_vm_manager_init(struct amdgpu_device *adev); |
286 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev); | 254 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev); |
287 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, | 255 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, |
@@ -299,13 +267,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, | |||
299 | int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, | 267 | int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, |
300 | struct amdgpu_vm *vm, | 268 | struct amdgpu_vm *vm, |
301 | uint64_t saddr, uint64_t size); | 269 | uint64_t saddr, uint64_t size); |
302 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | ||
303 | struct amdgpu_sync *sync, struct dma_fence *fence, | ||
304 | struct amdgpu_job *job); | ||
305 | int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync); | 270 | int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync); |
306 | void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, | ||
307 | unsigned vmid); | ||
308 | void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev); | ||
309 | int amdgpu_vm_update_directories(struct amdgpu_device *adev, | 271 | int amdgpu_vm_update_directories(struct amdgpu_device *adev, |
310 | struct amdgpu_vm *vm); | 272 | struct amdgpu_vm *vm); |
311 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | 273 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index e1a73c43f32d..8e28270d1ea9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | |||
@@ -956,7 +956,7 @@ static int gmc_v6_0_resume(void *handle) | |||
956 | if (r) | 956 | if (r) |
957 | return r; | 957 | return r; |
958 | 958 | ||
959 | amdgpu_vm_reset_all_ids(adev); | 959 | amdgpu_vmid_reset_all(adev); |
960 | 960 | ||
961 | return 0; | 961 | return 0; |
962 | } | 962 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 356a9a71b8cf..86e9d682c59e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | |||
@@ -1107,7 +1107,7 @@ static int gmc_v7_0_resume(void *handle) | |||
1107 | if (r) | 1107 | if (r) |
1108 | return r; | 1108 | return r; |
1109 | 1109 | ||
1110 | amdgpu_vm_reset_all_ids(adev); | 1110 | amdgpu_vmid_reset_all(adev); |
1111 | 1111 | ||
1112 | return 0; | 1112 | return 0; |
1113 | } | 1113 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index fce45578f5fd..9a813d834f1a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | |||
@@ -1212,7 +1212,7 @@ static int gmc_v8_0_resume(void *handle) | |||
1212 | if (r) | 1212 | if (r) |
1213 | return r; | 1213 | return r; |
1214 | 1214 | ||
1215 | amdgpu_vm_reset_all_ids(adev); | 1215 | amdgpu_vmid_reset_all(adev); |
1216 | 1216 | ||
1217 | return 0; | 1217 | return 0; |
1218 | } | 1218 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index b776df4c999f..909274e3ebe7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | |||
@@ -1056,7 +1056,7 @@ static int gmc_v9_0_resume(void *handle) | |||
1056 | if (r) | 1056 | if (r) |
1057 | return r; | 1057 | return r; |
1058 | 1058 | ||
1059 | amdgpu_vm_reset_all_ids(adev); | 1059 | amdgpu_vmid_reset_all(adev); |
1060 | 1060 | ||
1061 | return 0; | 1061 | return 0; |
1062 | } | 1062 | } |