diff options
author | Christian König <christian.koenig@amd.com> | 2018-10-25 04:49:07 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2018-11-05 15:49:40 -0500 |
commit | af5fe1e96aa156886f89282371fce1629fcc9f6a (patch) | |
tree | 317a9aaf3281c1ec56c9d0ed3a8039343d107014 /drivers | |
parent | 5c76c6a8975e1d074dc5763d3f46c928bc7d6484 (diff) |
drm/amdgpu: cleanup GMC v9 TLB invalidation
Move the kiq handling into amdgpu_virt.c and drop the fallback.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Emily Deng <Emily.Deng@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 40 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 53 |
3 files changed, 49 insertions, 47 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index ff887639bfa3..cfee74732edb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | |||
@@ -132,6 +132,46 @@ failed_kiq_write: | |||
132 | pr_err("failed to write reg:%x\n", reg); | 132 | pr_err("failed to write reg:%x\n", reg); |
133 | } | 133 | } |
134 | 134 | ||
135 | void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, | ||
136 | uint32_t reg0, uint32_t reg1, | ||
137 | uint32_t ref, uint32_t mask) | ||
138 | { | ||
139 | struct amdgpu_kiq *kiq = &adev->gfx.kiq; | ||
140 | struct amdgpu_ring *ring = &kiq->ring; | ||
141 | signed long r, cnt = 0; | ||
142 | unsigned long flags; | ||
143 | uint32_t seq; | ||
144 | |||
145 | spin_lock_irqsave(&kiq->ring_lock, flags); | ||
146 | amdgpu_ring_alloc(ring, 32); | ||
147 | amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1, | ||
148 | ref, mask); | ||
149 | amdgpu_fence_emit_polling(ring, &seq); | ||
150 | amdgpu_ring_commit(ring); | ||
151 | spin_unlock_irqrestore(&kiq->ring_lock, flags); | ||
152 | |||
153 | r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); | ||
154 | |||
155 | /* don't wait anymore for IRQ context */ | ||
156 | if (r < 1 && in_interrupt()) | ||
157 | goto failed_kiq; | ||
158 | |||
159 | might_sleep(); | ||
160 | while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { | ||
161 | |||
162 | msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); | ||
163 | r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); | ||
164 | } | ||
165 | |||
166 | if (cnt > MAX_KIQ_REG_TRY) | ||
167 | goto failed_kiq; | ||
168 | |||
169 | return; | ||
170 | |||
171 | failed_kiq: | ||
172 | pr_err("failed to write reg %x wait reg %x\n", reg0, reg1); | ||
173 | } | ||
174 | |||
135 | /** | 175 | /** |
136 | * amdgpu_virt_request_full_gpu() - request full gpu access | 176 | * amdgpu_virt_request_full_gpu() - request full gpu access |
137 | * @amdgpu: amdgpu device. | 177 | * @amdgpu: amdgpu device. |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index cf46dfb59320..0728fbc9a692 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | |||
@@ -278,6 +278,9 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); | |||
278 | void amdgpu_virt_init_setting(struct amdgpu_device *adev); | 278 | void amdgpu_virt_init_setting(struct amdgpu_device *adev); |
279 | uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); | 279 | uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); |
280 | void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); | 280 | void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); |
281 | void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, | ||
282 | uint32_t reg0, uint32_t rreg1, | ||
283 | uint32_t ref, uint32_t mask); | ||
281 | int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); | 284 | int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); |
282 | int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init); | 285 | int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init); |
283 | int amdgpu_virt_reset_gpu(struct amdgpu_device *adev); | 286 | int amdgpu_virt_reset_gpu(struct amdgpu_device *adev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 6cedf7ebf036..4845b6af5808 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | |||
@@ -312,48 +312,6 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid, | |||
312 | return req; | 312 | return req; |
313 | } | 313 | } |
314 | 314 | ||
315 | static signed long amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev, | ||
316 | uint32_t reg0, uint32_t reg1, | ||
317 | uint32_t ref, uint32_t mask) | ||
318 | { | ||
319 | signed long r, cnt = 0; | ||
320 | unsigned long flags; | ||
321 | uint32_t seq; | ||
322 | struct amdgpu_kiq *kiq = &adev->gfx.kiq; | ||
323 | struct amdgpu_ring *ring = &kiq->ring; | ||
324 | |||
325 | spin_lock_irqsave(&kiq->ring_lock, flags); | ||
326 | |||
327 | amdgpu_ring_alloc(ring, 32); | ||
328 | amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1, | ||
329 | ref, mask); | ||
330 | amdgpu_fence_emit_polling(ring, &seq); | ||
331 | amdgpu_ring_commit(ring); | ||
332 | spin_unlock_irqrestore(&kiq->ring_lock, flags); | ||
333 | |||
334 | r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); | ||
335 | |||
336 | /* don't wait anymore for IRQ context */ | ||
337 | if (r < 1 && in_interrupt()) | ||
338 | goto failed_kiq; | ||
339 | |||
340 | might_sleep(); | ||
341 | |||
342 | while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { | ||
343 | msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); | ||
344 | r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); | ||
345 | } | ||
346 | |||
347 | if (cnt > MAX_KIQ_REG_TRY) | ||
348 | goto failed_kiq; | ||
349 | |||
350 | return 0; | ||
351 | |||
352 | failed_kiq: | ||
353 | pr_err("failed to invalidate tlb with kiq\n"); | ||
354 | return r; | ||
355 | } | ||
356 | |||
357 | /* | 315 | /* |
358 | * GART | 316 | * GART |
359 | * VMID 0 is the physical GPU addresses as used by the kernel. | 317 | * VMID 0 is the physical GPU addresses as used by the kernel. |
@@ -375,7 +333,6 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, | |||
375 | { | 333 | { |
376 | const unsigned eng = 17; | 334 | const unsigned eng = 17; |
377 | unsigned i, j; | 335 | unsigned i, j; |
378 | int r; | ||
379 | 336 | ||
380 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { | 337 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { |
381 | struct amdgpu_vmhub *hub = &adev->vmhub[i]; | 338 | struct amdgpu_vmhub *hub = &adev->vmhub[i]; |
@@ -384,10 +341,12 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, | |||
384 | if (adev->gfx.kiq.ring.sched.ready && | 341 | if (adev->gfx.kiq.ring.sched.ready && |
385 | (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && | 342 | (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && |
386 | !adev->in_gpu_reset) { | 343 | !adev->in_gpu_reset) { |
387 | r = amdgpu_kiq_reg_write_reg_wait(adev, hub->vm_inv_eng0_req + eng, | 344 | uint32_t req = hub->vm_inv_eng0_req + eng; |
388 | hub->vm_inv_eng0_ack + eng, tmp, 1 << vmid); | 345 | uint32_t ack = hub->vm_inv_eng0_ack + eng; |
389 | if (!r) | 346 | |
390 | continue; | 347 | amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp, |
348 | 1 << vmid); | ||
349 | continue; | ||
391 | } | 350 | } |
392 | 351 | ||
393 | spin_lock(&adev->gmc.invalidate_lock); | 352 | spin_lock(&adev->gmc.invalidate_lock); |