diff options
author | Christian König <christian.koenig@amd.com> | 2016-01-21 05:28:53 -0500 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2016-02-10 14:16:58 -0500 |
commit | a27de35caab59bacf5c47713856739f86ec06e43 (patch) | |
tree | 7452339b06230a40ed9f2986d1c525e1e793173c | |
parent | a9a78b329a3e31a977f8d8ef64b2f3a574899992 (diff) |
drm/amdgpu: remove the ring lock v2
It's not needed any more because all access goes through the scheduler now.
v2: Update commit message.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 80 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 8 |
15 files changed, 45 insertions, 132 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 3d4c2abfcfe8..081953e1ec6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -814,7 +814,6 @@ struct amdgpu_ring { | |||
814 | struct amd_gpu_scheduler sched; | 814 | struct amd_gpu_scheduler sched; |
815 | 815 | ||
816 | spinlock_t fence_lock; | 816 | spinlock_t fence_lock; |
817 | struct mutex *ring_lock; | ||
818 | struct amdgpu_bo *ring_obj; | 817 | struct amdgpu_bo *ring_obj; |
819 | volatile uint32_t *ring; | 818 | volatile uint32_t *ring; |
820 | unsigned rptr_offs; | 819 | unsigned rptr_offs; |
@@ -1190,12 +1189,9 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev); | |||
1190 | /* Ring access between begin & end cannot sleep */ | 1189 | /* Ring access between begin & end cannot sleep */ |
1191 | void amdgpu_ring_free_size(struct amdgpu_ring *ring); | 1190 | void amdgpu_ring_free_size(struct amdgpu_ring *ring); |
1192 | int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); | 1191 | int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); |
1193 | int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw); | ||
1194 | void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); | 1192 | void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); |
1195 | void amdgpu_ring_commit(struct amdgpu_ring *ring); | 1193 | void amdgpu_ring_commit(struct amdgpu_ring *ring); |
1196 | void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring); | ||
1197 | void amdgpu_ring_undo(struct amdgpu_ring *ring); | 1194 | void amdgpu_ring_undo(struct amdgpu_ring *ring); |
1198 | void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring); | ||
1199 | unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, | 1195 | unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, |
1200 | uint32_t **data); | 1196 | uint32_t **data); |
1201 | int amdgpu_ring_restore(struct amdgpu_ring *ring, | 1197 | int amdgpu_ring_restore(struct amdgpu_ring *ring, |
@@ -2009,7 +2005,6 @@ struct amdgpu_device { | |||
2009 | 2005 | ||
2010 | /* rings */ | 2006 | /* rings */ |
2011 | unsigned fence_context; | 2007 | unsigned fence_context; |
2012 | struct mutex ring_lock; | ||
2013 | unsigned num_rings; | 2008 | unsigned num_rings; |
2014 | struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; | 2009 | struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; |
2015 | bool ib_pool_ready; | 2010 | bool ib_pool_ready; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index bbe8023bf58f..cf8a3b37a111 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
@@ -1455,7 +1455,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, | |||
1455 | 1455 | ||
1456 | /* mutex initialization are all done here so we | 1456 | /* mutex initialization are all done here so we |
1457 | * can recall function without having locking issues */ | 1457 | * can recall function without having locking issues */ |
1458 | mutex_init(&adev->ring_lock); | ||
1459 | mutex_init(&adev->vm_manager.lock); | 1458 | mutex_init(&adev->vm_manager.lock); |
1460 | atomic_set(&adev->irq.ih.lock, 0); | 1459 | atomic_set(&adev->irq.ih.lock, 0); |
1461 | mutex_init(&adev->gem.mutex); | 1460 | mutex_init(&adev->gem.mutex); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 08963fc83168..72105020086c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |||
@@ -487,7 +487,6 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) | |||
487 | 487 | ||
488 | if (atomic_dec_and_test(&amdgpu_fence_slab_ref)) | 488 | if (atomic_dec_and_test(&amdgpu_fence_slab_ref)) |
489 | kmem_cache_destroy(amdgpu_fence_slab); | 489 | kmem_cache_destroy(amdgpu_fence_slab); |
490 | mutex_lock(&adev->ring_lock); | ||
491 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { | 490 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { |
492 | struct amdgpu_ring *ring = adev->rings[i]; | 491 | struct amdgpu_ring *ring = adev->rings[i]; |
493 | 492 | ||
@@ -505,7 +504,6 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) | |||
505 | del_timer_sync(&ring->fence_drv.fallback_timer); | 504 | del_timer_sync(&ring->fence_drv.fallback_timer); |
506 | ring->fence_drv.initialized = false; | 505 | ring->fence_drv.initialized = false; |
507 | } | 506 | } |
508 | mutex_unlock(&adev->ring_lock); | ||
509 | } | 507 | } |
510 | 508 | ||
511 | /** | 509 | /** |
@@ -520,7 +518,6 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) | |||
520 | { | 518 | { |
521 | int i, r; | 519 | int i, r; |
522 | 520 | ||
523 | mutex_lock(&adev->ring_lock); | ||
524 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { | 521 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { |
525 | struct amdgpu_ring *ring = adev->rings[i]; | 522 | struct amdgpu_ring *ring = adev->rings[i]; |
526 | if (!ring || !ring->fence_drv.initialized) | 523 | if (!ring || !ring->fence_drv.initialized) |
@@ -537,7 +534,6 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) | |||
537 | amdgpu_irq_put(adev, ring->fence_drv.irq_src, | 534 | amdgpu_irq_put(adev, ring->fence_drv.irq_src, |
538 | ring->fence_drv.irq_type); | 535 | ring->fence_drv.irq_type); |
539 | } | 536 | } |
540 | mutex_unlock(&adev->ring_lock); | ||
541 | } | 537 | } |
542 | 538 | ||
543 | /** | 539 | /** |
@@ -556,7 +552,6 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev) | |||
556 | { | 552 | { |
557 | int i; | 553 | int i; |
558 | 554 | ||
559 | mutex_lock(&adev->ring_lock); | ||
560 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { | 555 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { |
561 | struct amdgpu_ring *ring = adev->rings[i]; | 556 | struct amdgpu_ring *ring = adev->rings[i]; |
562 | if (!ring || !ring->fence_drv.initialized) | 557 | if (!ring || !ring->fence_drv.initialized) |
@@ -566,7 +561,6 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev) | |||
566 | amdgpu_irq_get(adev, ring->fence_drv.irq_src, | 561 | amdgpu_irq_get(adev, ring->fence_drv.irq_src, |
567 | ring->fence_drv.irq_type); | 562 | ring->fence_drv.irq_type); |
568 | } | 563 | } |
569 | mutex_unlock(&adev->ring_lock); | ||
570 | } | 564 | } |
571 | 565 | ||
572 | /** | 566 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 56ae9a58dbc5..40c9779993c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | |||
@@ -147,7 +147,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, | |||
147 | return -EINVAL; | 147 | return -EINVAL; |
148 | } | 148 | } |
149 | 149 | ||
150 | r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs); | 150 | r = amdgpu_ring_alloc(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs); |
151 | if (r) { | 151 | if (r) { |
152 | dev_err(adev->dev, "scheduling IB failed (%d).\n", r); | 152 | dev_err(adev->dev, "scheduling IB failed (%d).\n", r); |
153 | return r; | 153 | return r; |
@@ -155,7 +155,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, | |||
155 | 155 | ||
156 | r = amdgpu_sync_wait(&ibs->sync); | 156 | r = amdgpu_sync_wait(&ibs->sync); |
157 | if (r) { | 157 | if (r) { |
158 | amdgpu_ring_unlock_undo(ring); | 158 | amdgpu_ring_undo(ring); |
159 | dev_err(adev->dev, "failed to sync wait (%d)\n", r); | 159 | dev_err(adev->dev, "failed to sync wait (%d)\n", r); |
160 | return r; | 160 | return r; |
161 | } | 161 | } |
@@ -180,7 +180,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, | |||
180 | 180 | ||
181 | if (ib->ring != ring || ib->ctx != ctx || ib->vm != vm) { | 181 | if (ib->ring != ring || ib->ctx != ctx || ib->vm != vm) { |
182 | ring->current_ctx = old_ctx; | 182 | ring->current_ctx = old_ctx; |
183 | amdgpu_ring_unlock_undo(ring); | 183 | amdgpu_ring_undo(ring); |
184 | return -EINVAL; | 184 | return -EINVAL; |
185 | } | 185 | } |
186 | amdgpu_ring_emit_ib(ring, ib); | 186 | amdgpu_ring_emit_ib(ring, ib); |
@@ -191,7 +191,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, | |||
191 | if (r) { | 191 | if (r) { |
192 | dev_err(adev->dev, "failed to emit fence (%d)\n", r); | 192 | dev_err(adev->dev, "failed to emit fence (%d)\n", r); |
193 | ring->current_ctx = old_ctx; | 193 | ring->current_ctx = old_ctx; |
194 | amdgpu_ring_unlock_undo(ring); | 194 | amdgpu_ring_undo(ring); |
195 | return r; | 195 | return r; |
196 | } | 196 | } |
197 | 197 | ||
@@ -203,7 +203,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, | |||
203 | AMDGPU_FENCE_FLAG_64BIT); | 203 | AMDGPU_FENCE_FLAG_64BIT); |
204 | } | 204 | } |
205 | 205 | ||
206 | amdgpu_ring_unlock_commit(ring); | 206 | amdgpu_ring_commit(ring); |
207 | return 0; | 207 | return 0; |
208 | } | 208 | } |
209 | 209 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 7d8d84eaea4a..a0da563c8c82 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | |||
@@ -623,14 +623,12 @@ force: | |||
623 | amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); | 623 | amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); |
624 | } | 624 | } |
625 | 625 | ||
626 | mutex_lock(&adev->ring_lock); | ||
627 | |||
628 | /* update whether vce is active */ | 626 | /* update whether vce is active */ |
629 | ps->vce_active = adev->pm.dpm.vce_active; | 627 | ps->vce_active = adev->pm.dpm.vce_active; |
630 | 628 | ||
631 | ret = amdgpu_dpm_pre_set_power_state(adev); | 629 | ret = amdgpu_dpm_pre_set_power_state(adev); |
632 | if (ret) | 630 | if (ret) |
633 | goto done; | 631 | return; |
634 | 632 | ||
635 | /* update display watermarks based on new power state */ | 633 | /* update display watermarks based on new power state */ |
636 | amdgpu_display_bandwidth_update(adev); | 634 | amdgpu_display_bandwidth_update(adev); |
@@ -667,9 +665,6 @@ force: | |||
667 | amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); | 665 | amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); |
668 | } | 666 | } |
669 | } | 667 | } |
670 | |||
671 | done: | ||
672 | mutex_unlock(&adev->ring_lock); | ||
673 | } | 668 | } |
674 | 669 | ||
675 | void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) | 670 | void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) |
@@ -802,13 +797,11 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) | |||
802 | int i = 0; | 797 | int i = 0; |
803 | 798 | ||
804 | amdgpu_display_bandwidth_update(adev); | 799 | amdgpu_display_bandwidth_update(adev); |
805 | mutex_lock(&adev->ring_lock); | 800 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { |
806 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { | 801 | struct amdgpu_ring *ring = adev->rings[i]; |
807 | struct amdgpu_ring *ring = adev->rings[i]; | 802 | if (ring && ring->ready) |
808 | if (ring && ring->ready) | 803 | amdgpu_fence_wait_empty(ring); |
809 | amdgpu_fence_wait_empty(ring); | 804 | } |
810 | } | ||
811 | mutex_unlock(&adev->ring_lock); | ||
812 | 805 | ||
813 | amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL); | 806 | amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL); |
814 | } else { | 807 | } else { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 66c6bbd27309..81d06d772dde 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | |||
@@ -105,30 +105,6 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw) | |||
105 | return 0; | 105 | return 0; |
106 | } | 106 | } |
107 | 107 | ||
108 | /** | ||
109 | * amdgpu_ring_lock - lock the ring and allocate space on it | ||
110 | * | ||
111 | * @adev: amdgpu_device pointer | ||
112 | * @ring: amdgpu_ring structure holding ring information | ||
113 | * @ndw: number of dwords to allocate in the ring buffer | ||
114 | * | ||
115 | * Lock the ring and allocate @ndw dwords in the ring buffer | ||
116 | * (all asics). | ||
117 | * Returns 0 on success, error on failure. | ||
118 | */ | ||
119 | int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw) | ||
120 | { | ||
121 | int r; | ||
122 | |||
123 | mutex_lock(ring->ring_lock); | ||
124 | r = amdgpu_ring_alloc(ring, ndw); | ||
125 | if (r) { | ||
126 | mutex_unlock(ring->ring_lock); | ||
127 | return r; | ||
128 | } | ||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | /** amdgpu_ring_insert_nop - insert NOP packets | 108 | /** amdgpu_ring_insert_nop - insert NOP packets |
133 | * | 109 | * |
134 | * @ring: amdgpu_ring structure holding ring information | 110 | * @ring: amdgpu_ring structure holding ring information |
@@ -168,20 +144,6 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring) | |||
168 | } | 144 | } |
169 | 145 | ||
170 | /** | 146 | /** |
171 | * amdgpu_ring_unlock_commit - tell the GPU to execute the new | ||
172 | * commands on the ring buffer and unlock it | ||
173 | * | ||
174 | * @ring: amdgpu_ring structure holding ring information | ||
175 | * | ||
176 | * Call amdgpu_ring_commit() then unlock the ring (all asics). | ||
177 | */ | ||
178 | void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring) | ||
179 | { | ||
180 | amdgpu_ring_commit(ring); | ||
181 | mutex_unlock(ring->ring_lock); | ||
182 | } | ||
183 | |||
184 | /** | ||
185 | * amdgpu_ring_undo - reset the wptr | 147 | * amdgpu_ring_undo - reset the wptr |
186 | * | 148 | * |
187 | * @ring: amdgpu_ring structure holding ring information | 149 | * @ring: amdgpu_ring structure holding ring information |
@@ -194,19 +156,6 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring) | |||
194 | } | 156 | } |
195 | 157 | ||
196 | /** | 158 | /** |
197 | * amdgpu_ring_unlock_undo - reset the wptr and unlock the ring | ||
198 | * | ||
199 | * @ring: amdgpu_ring structure holding ring information | ||
200 | * | ||
201 | * Call amdgpu_ring_undo() then unlock the ring (all asics). | ||
202 | */ | ||
203 | void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring) | ||
204 | { | ||
205 | amdgpu_ring_undo(ring); | ||
206 | mutex_unlock(ring->ring_lock); | ||
207 | } | ||
208 | |||
209 | /** | ||
210 | * amdgpu_ring_backup - Back up the content of a ring | 159 | * amdgpu_ring_backup - Back up the content of a ring |
211 | * | 160 | * |
212 | * @ring: the ring we want to back up | 161 | * @ring: the ring we want to back up |
@@ -218,43 +167,32 @@ unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, | |||
218 | { | 167 | { |
219 | unsigned size, ptr, i; | 168 | unsigned size, ptr, i; |
220 | 169 | ||
221 | /* just in case lock the ring */ | ||
222 | mutex_lock(ring->ring_lock); | ||
223 | *data = NULL; | 170 | *data = NULL; |
224 | 171 | ||
225 | if (ring->ring_obj == NULL) { | 172 | if (ring->ring_obj == NULL) |
226 | mutex_unlock(ring->ring_lock); | ||
227 | return 0; | 173 | return 0; |
228 | } | ||
229 | 174 | ||
230 | /* it doesn't make sense to save anything if all fences are signaled */ | 175 | /* it doesn't make sense to save anything if all fences are signaled */ |
231 | if (!amdgpu_fence_count_emitted(ring)) { | 176 | if (!amdgpu_fence_count_emitted(ring)) |
232 | mutex_unlock(ring->ring_lock); | ||
233 | return 0; | 177 | return 0; |
234 | } | ||
235 | 178 | ||
236 | ptr = le32_to_cpu(*ring->next_rptr_cpu_addr); | 179 | ptr = le32_to_cpu(*ring->next_rptr_cpu_addr); |
237 | 180 | ||
238 | size = ring->wptr + (ring->ring_size / 4); | 181 | size = ring->wptr + (ring->ring_size / 4); |
239 | size -= ptr; | 182 | size -= ptr; |
240 | size &= ring->ptr_mask; | 183 | size &= ring->ptr_mask; |
241 | if (size == 0) { | 184 | if (size == 0) |
242 | mutex_unlock(ring->ring_lock); | ||
243 | return 0; | 185 | return 0; |
244 | } | ||
245 | 186 | ||
246 | /* and then save the content of the ring */ | 187 | /* and then save the content of the ring */ |
247 | *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); | 188 | *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); |
248 | if (!*data) { | 189 | if (!*data) |
249 | mutex_unlock(ring->ring_lock); | ||
250 | return 0; | 190 | return 0; |
251 | } | ||
252 | for (i = 0; i < size; ++i) { | 191 | for (i = 0; i < size; ++i) { |
253 | (*data)[i] = ring->ring[ptr++]; | 192 | (*data)[i] = ring->ring[ptr++]; |
254 | ptr &= ring->ptr_mask; | 193 | ptr &= ring->ptr_mask; |
255 | } | 194 | } |
256 | 195 | ||
257 | mutex_unlock(ring->ring_lock); | ||
258 | return size; | 196 | return size; |
259 | } | 197 | } |
260 | 198 | ||
@@ -276,7 +214,7 @@ int amdgpu_ring_restore(struct amdgpu_ring *ring, | |||
276 | return 0; | 214 | return 0; |
277 | 215 | ||
278 | /* restore the saved ring content */ | 216 | /* restore the saved ring content */ |
279 | r = amdgpu_ring_lock(ring, size); | 217 | r = amdgpu_ring_alloc(ring, size); |
280 | if (r) | 218 | if (r) |
281 | return r; | 219 | return r; |
282 | 220 | ||
@@ -284,7 +222,7 @@ int amdgpu_ring_restore(struct amdgpu_ring *ring, | |||
284 | amdgpu_ring_write(ring, data[i]); | 222 | amdgpu_ring_write(ring, data[i]); |
285 | } | 223 | } |
286 | 224 | ||
287 | amdgpu_ring_unlock_commit(ring); | 225 | amdgpu_ring_commit(ring); |
288 | kfree(data); | 226 | kfree(data); |
289 | return 0; | 227 | return 0; |
290 | } | 228 | } |
@@ -352,7 +290,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, | |||
352 | return r; | 290 | return r; |
353 | } | 291 | } |
354 | 292 | ||
355 | ring->ring_lock = &adev->ring_lock; | ||
356 | /* Align ring size */ | 293 | /* Align ring size */ |
357 | rb_bufsz = order_base_2(ring_size / 8); | 294 | rb_bufsz = order_base_2(ring_size / 8); |
358 | ring_size = (1 << (rb_bufsz + 1)) * 4; | 295 | ring_size = (1 << (rb_bufsz + 1)) * 4; |
@@ -410,15 +347,10 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) | |||
410 | int r; | 347 | int r; |
411 | struct amdgpu_bo *ring_obj; | 348 | struct amdgpu_bo *ring_obj; |
412 | 349 | ||
413 | if (ring->ring_lock == NULL) | ||
414 | return; | ||
415 | |||
416 | mutex_lock(ring->ring_lock); | ||
417 | ring_obj = ring->ring_obj; | 350 | ring_obj = ring->ring_obj; |
418 | ring->ready = false; | 351 | ring->ready = false; |
419 | ring->ring = NULL; | 352 | ring->ring = NULL; |
420 | ring->ring_obj = NULL; | 353 | ring->ring_obj = NULL; |
421 | mutex_unlock(ring->ring_lock); | ||
422 | 354 | ||
423 | amdgpu_wb_free(ring->adev, ring->fence_offs); | 355 | amdgpu_wb_free(ring->adev, ring->fence_offs); |
424 | amdgpu_wb_free(ring->adev, ring->rptr_offs); | 356 | amdgpu_wb_free(ring->adev, ring->rptr_offs); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 16fbde9c5f56..c90517f61210 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | |||
@@ -788,14 +788,14 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) | |||
788 | unsigned i; | 788 | unsigned i; |
789 | int r; | 789 | int r; |
790 | 790 | ||
791 | r = amdgpu_ring_lock(ring, 16); | 791 | r = amdgpu_ring_alloc(ring, 16); |
792 | if (r) { | 792 | if (r) { |
793 | DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n", | 793 | DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n", |
794 | ring->idx, r); | 794 | ring->idx, r); |
795 | return r; | 795 | return r; |
796 | } | 796 | } |
797 | amdgpu_ring_write(ring, VCE_CMD_END); | 797 | amdgpu_ring_write(ring, VCE_CMD_END); |
798 | amdgpu_ring_unlock_commit(ring); | 798 | amdgpu_ring_commit(ring); |
799 | 799 | ||
800 | for (i = 0; i < adev->usec_timeout; i++) { | 800 | for (i = 0; i < adev->usec_timeout; i++) { |
801 | if (amdgpu_ring_get_rptr(ring) != rptr) | 801 | if (amdgpu_ring_get_rptr(ring) != rptr) |
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 538d6a26e529..e8a48ae8d360 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c | |||
@@ -560,7 +560,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) | |||
560 | tmp = 0xCAFEDEAD; | 560 | tmp = 0xCAFEDEAD; |
561 | adev->wb.wb[index] = cpu_to_le32(tmp); | 561 | adev->wb.wb[index] = cpu_to_le32(tmp); |
562 | 562 | ||
563 | r = amdgpu_ring_lock(ring, 5); | 563 | r = amdgpu_ring_alloc(ring, 5); |
564 | if (r) { | 564 | if (r) { |
565 | DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); | 565 | DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); |
566 | amdgpu_wb_free(adev, index); | 566 | amdgpu_wb_free(adev, index); |
@@ -571,7 +571,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) | |||
571 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); | 571 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); |
572 | amdgpu_ring_write(ring, 1); /* number of DWs to follow */ | 572 | amdgpu_ring_write(ring, 1); /* number of DWs to follow */ |
573 | amdgpu_ring_write(ring, 0xDEADBEEF); | 573 | amdgpu_ring_write(ring, 0xDEADBEEF); |
574 | amdgpu_ring_unlock_commit(ring); | 574 | amdgpu_ring_commit(ring); |
575 | 575 | ||
576 | for (i = 0; i < adev->usec_timeout; i++) { | 576 | for (i = 0; i < adev->usec_timeout; i++) { |
577 | tmp = le32_to_cpu(adev->wb.wb[index]); | 577 | tmp = le32_to_cpu(adev->wb.wb[index]); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index b20eb7969e05..1d796eac421b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
@@ -2379,7 +2379,7 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring) | |||
2379 | return r; | 2379 | return r; |
2380 | } | 2380 | } |
2381 | WREG32(scratch, 0xCAFEDEAD); | 2381 | WREG32(scratch, 0xCAFEDEAD); |
2382 | r = amdgpu_ring_lock(ring, 3); | 2382 | r = amdgpu_ring_alloc(ring, 3); |
2383 | if (r) { | 2383 | if (r) { |
2384 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r); | 2384 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r); |
2385 | amdgpu_gfx_scratch_free(adev, scratch); | 2385 | amdgpu_gfx_scratch_free(adev, scratch); |
@@ -2388,7 +2388,7 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring) | |||
2388 | amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); | 2388 | amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); |
2389 | amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); | 2389 | amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); |
2390 | amdgpu_ring_write(ring, 0xDEADBEEF); | 2390 | amdgpu_ring_write(ring, 0xDEADBEEF); |
2391 | amdgpu_ring_unlock_commit(ring); | 2391 | amdgpu_ring_commit(ring); |
2392 | 2392 | ||
2393 | for (i = 0; i < adev->usec_timeout; i++) { | 2393 | for (i = 0; i < adev->usec_timeout; i++) { |
2394 | tmp = RREG32(scratch); | 2394 | tmp = RREG32(scratch); |
@@ -2812,7 +2812,7 @@ static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev) | |||
2812 | 2812 | ||
2813 | gfx_v7_0_cp_gfx_enable(adev, true); | 2813 | gfx_v7_0_cp_gfx_enable(adev, true); |
2814 | 2814 | ||
2815 | r = amdgpu_ring_lock(ring, gfx_v7_0_get_csb_size(adev) + 8); | 2815 | r = amdgpu_ring_alloc(ring, gfx_v7_0_get_csb_size(adev) + 8); |
2816 | if (r) { | 2816 | if (r) { |
2817 | DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); | 2817 | DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); |
2818 | return r; | 2818 | return r; |
@@ -2881,7 +2881,7 @@ static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev) | |||
2881 | amdgpu_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ | 2881 | amdgpu_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ |
2882 | amdgpu_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ | 2882 | amdgpu_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ |
2883 | 2883 | ||
2884 | amdgpu_ring_unlock_commit(ring); | 2884 | amdgpu_ring_commit(ring); |
2885 | 2885 | ||
2886 | return 0; | 2886 | return 0; |
2887 | } | 2887 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 248271050d0a..0d9ef524cf71 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
@@ -652,7 +652,7 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring) | |||
652 | return r; | 652 | return r; |
653 | } | 653 | } |
654 | WREG32(scratch, 0xCAFEDEAD); | 654 | WREG32(scratch, 0xCAFEDEAD); |
655 | r = amdgpu_ring_lock(ring, 3); | 655 | r = amdgpu_ring_alloc(ring, 3); |
656 | if (r) { | 656 | if (r) { |
657 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | 657 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", |
658 | ring->idx, r); | 658 | ring->idx, r); |
@@ -662,7 +662,7 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring) | |||
662 | amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); | 662 | amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); |
663 | amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); | 663 | amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); |
664 | amdgpu_ring_write(ring, 0xDEADBEEF); | 664 | amdgpu_ring_write(ring, 0xDEADBEEF); |
665 | amdgpu_ring_unlock_commit(ring); | 665 | amdgpu_ring_commit(ring); |
666 | 666 | ||
667 | for (i = 0; i < adev->usec_timeout; i++) { | 667 | for (i = 0; i < adev->usec_timeout; i++) { |
668 | tmp = RREG32(scratch); | 668 | tmp = RREG32(scratch); |
@@ -3062,7 +3062,7 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) | |||
3062 | 3062 | ||
3063 | gfx_v8_0_cp_gfx_enable(adev, true); | 3063 | gfx_v8_0_cp_gfx_enable(adev, true); |
3064 | 3064 | ||
3065 | r = amdgpu_ring_lock(ring, gfx_v8_0_get_csb_size(adev) + 4); | 3065 | r = amdgpu_ring_alloc(ring, gfx_v8_0_get_csb_size(adev) + 4); |
3066 | if (r) { | 3066 | if (r) { |
3067 | DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); | 3067 | DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); |
3068 | return r; | 3068 | return r; |
@@ -3126,7 +3126,7 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) | |||
3126 | amdgpu_ring_write(ring, 0x8000); | 3126 | amdgpu_ring_write(ring, 0x8000); |
3127 | amdgpu_ring_write(ring, 0x8000); | 3127 | amdgpu_ring_write(ring, 0x8000); |
3128 | 3128 | ||
3129 | amdgpu_ring_unlock_commit(ring); | 3129 | amdgpu_ring_commit(ring); |
3130 | 3130 | ||
3131 | return 0; | 3131 | return 0; |
3132 | } | 3132 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 622f5dad6f2a..9fae4bf1a6c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | |||
@@ -611,7 +611,7 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring) | |||
611 | tmp = 0xCAFEDEAD; | 611 | tmp = 0xCAFEDEAD; |
612 | adev->wb.wb[index] = cpu_to_le32(tmp); | 612 | adev->wb.wb[index] = cpu_to_le32(tmp); |
613 | 613 | ||
614 | r = amdgpu_ring_lock(ring, 5); | 614 | r = amdgpu_ring_alloc(ring, 5); |
615 | if (r) { | 615 | if (r) { |
616 | DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); | 616 | DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); |
617 | amdgpu_wb_free(adev, index); | 617 | amdgpu_wb_free(adev, index); |
@@ -624,7 +624,7 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring) | |||
624 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); | 624 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); |
625 | amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); | 625 | amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); |
626 | amdgpu_ring_write(ring, 0xDEADBEEF); | 626 | amdgpu_ring_write(ring, 0xDEADBEEF); |
627 | amdgpu_ring_unlock_commit(ring); | 627 | amdgpu_ring_commit(ring); |
628 | 628 | ||
629 | for (i = 0; i < adev->usec_timeout; i++) { | 629 | for (i = 0; i < adev->usec_timeout; i++) { |
630 | tmp = le32_to_cpu(adev->wb.wb[index]); | 630 | tmp = le32_to_cpu(adev->wb.wb[index]); |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index d6170e69eb34..b2fbf96dad7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | |||
@@ -762,7 +762,7 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) | |||
762 | tmp = 0xCAFEDEAD; | 762 | tmp = 0xCAFEDEAD; |
763 | adev->wb.wb[index] = cpu_to_le32(tmp); | 763 | adev->wb.wb[index] = cpu_to_le32(tmp); |
764 | 764 | ||
765 | r = amdgpu_ring_lock(ring, 5); | 765 | r = amdgpu_ring_alloc(ring, 5); |
766 | if (r) { | 766 | if (r) { |
767 | DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); | 767 | DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); |
768 | amdgpu_wb_free(adev, index); | 768 | amdgpu_wb_free(adev, index); |
@@ -775,7 +775,7 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) | |||
775 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); | 775 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); |
776 | amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); | 776 | amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); |
777 | amdgpu_ring_write(ring, 0xDEADBEEF); | 777 | amdgpu_ring_write(ring, 0xDEADBEEF); |
778 | amdgpu_ring_unlock_commit(ring); | 778 | amdgpu_ring_commit(ring); |
779 | 779 | ||
780 | for (i = 0; i < adev->usec_timeout; i++) { | 780 | for (i = 0; i < adev->usec_timeout; i++) { |
781 | tmp = le32_to_cpu(adev->wb.wb[index]); | 781 | tmp = le32_to_cpu(adev->wb.wb[index]); |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index c4753726a79f..e7a141c75467 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | |||
@@ -164,7 +164,7 @@ static int uvd_v4_2_hw_init(void *handle) | |||
164 | goto done; | 164 | goto done; |
165 | } | 165 | } |
166 | 166 | ||
167 | r = amdgpu_ring_lock(ring, 10); | 167 | r = amdgpu_ring_alloc(ring, 10); |
168 | if (r) { | 168 | if (r) { |
169 | DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); | 169 | DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); |
170 | goto done; | 170 | goto done; |
@@ -189,7 +189,7 @@ static int uvd_v4_2_hw_init(void *handle) | |||
189 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); | 189 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); |
190 | amdgpu_ring_write(ring, 3); | 190 | amdgpu_ring_write(ring, 3); |
191 | 191 | ||
192 | amdgpu_ring_unlock_commit(ring); | 192 | amdgpu_ring_commit(ring); |
193 | 193 | ||
194 | done: | 194 | done: |
195 | /* lower clocks again */ | 195 | /* lower clocks again */ |
@@ -453,7 +453,7 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) | |||
453 | int r; | 453 | int r; |
454 | 454 | ||
455 | WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); | 455 | WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); |
456 | r = amdgpu_ring_lock(ring, 3); | 456 | r = amdgpu_ring_alloc(ring, 3); |
457 | if (r) { | 457 | if (r) { |
458 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | 458 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", |
459 | ring->idx, r); | 459 | ring->idx, r); |
@@ -461,7 +461,7 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) | |||
461 | } | 461 | } |
462 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); | 462 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); |
463 | amdgpu_ring_write(ring, 0xDEADBEEF); | 463 | amdgpu_ring_write(ring, 0xDEADBEEF); |
464 | amdgpu_ring_unlock_commit(ring); | 464 | amdgpu_ring_commit(ring); |
465 | for (i = 0; i < adev->usec_timeout; i++) { | 465 | for (i = 0; i < adev->usec_timeout; i++) { |
466 | tmp = RREG32(mmUVD_CONTEXT_ID); | 466 | tmp = RREG32(mmUVD_CONTEXT_ID); |
467 | if (tmp == 0xDEADBEEF) | 467 | if (tmp == 0xDEADBEEF) |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 774033ab2b86..3775f7756cf2 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | |||
@@ -160,7 +160,7 @@ static int uvd_v5_0_hw_init(void *handle) | |||
160 | goto done; | 160 | goto done; |
161 | } | 161 | } |
162 | 162 | ||
163 | r = amdgpu_ring_lock(ring, 10); | 163 | r = amdgpu_ring_alloc(ring, 10); |
164 | if (r) { | 164 | if (r) { |
165 | DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); | 165 | DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); |
166 | goto done; | 166 | goto done; |
@@ -185,7 +185,7 @@ static int uvd_v5_0_hw_init(void *handle) | |||
185 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); | 185 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); |
186 | amdgpu_ring_write(ring, 3); | 186 | amdgpu_ring_write(ring, 3); |
187 | 187 | ||
188 | amdgpu_ring_unlock_commit(ring); | 188 | amdgpu_ring_commit(ring); |
189 | 189 | ||
190 | done: | 190 | done: |
191 | /* lower clocks again */ | 191 | /* lower clocks again */ |
@@ -497,7 +497,7 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) | |||
497 | int r; | 497 | int r; |
498 | 498 | ||
499 | WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); | 499 | WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); |
500 | r = amdgpu_ring_lock(ring, 3); | 500 | r = amdgpu_ring_alloc(ring, 3); |
501 | if (r) { | 501 | if (r) { |
502 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | 502 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", |
503 | ring->idx, r); | 503 | ring->idx, r); |
@@ -505,7 +505,7 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) | |||
505 | } | 505 | } |
506 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); | 506 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); |
507 | amdgpu_ring_write(ring, 0xDEADBEEF); | 507 | amdgpu_ring_write(ring, 0xDEADBEEF); |
508 | amdgpu_ring_unlock_commit(ring); | 508 | amdgpu_ring_commit(ring); |
509 | for (i = 0; i < adev->usec_timeout; i++) { | 509 | for (i = 0; i < adev->usec_timeout; i++) { |
510 | tmp = RREG32(mmUVD_CONTEXT_ID); | 510 | tmp = RREG32(mmUVD_CONTEXT_ID); |
511 | if (tmp == 0xDEADBEEF) | 511 | if (tmp == 0xDEADBEEF) |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index eb0bbbab5b35..0b2fccad8e9a 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | |||
@@ -157,7 +157,7 @@ static int uvd_v6_0_hw_init(void *handle) | |||
157 | goto done; | 157 | goto done; |
158 | } | 158 | } |
159 | 159 | ||
160 | r = amdgpu_ring_lock(ring, 10); | 160 | r = amdgpu_ring_alloc(ring, 10); |
161 | if (r) { | 161 | if (r) { |
162 | DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); | 162 | DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); |
163 | goto done; | 163 | goto done; |
@@ -182,7 +182,7 @@ static int uvd_v6_0_hw_init(void *handle) | |||
182 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); | 182 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); |
183 | amdgpu_ring_write(ring, 3); | 183 | amdgpu_ring_write(ring, 3); |
184 | 184 | ||
185 | amdgpu_ring_unlock_commit(ring); | 185 | amdgpu_ring_commit(ring); |
186 | 186 | ||
187 | done: | 187 | done: |
188 | if (!r) | 188 | if (!r) |
@@ -736,7 +736,7 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) | |||
736 | int r; | 736 | int r; |
737 | 737 | ||
738 | WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); | 738 | WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); |
739 | r = amdgpu_ring_lock(ring, 3); | 739 | r = amdgpu_ring_alloc(ring, 3); |
740 | if (r) { | 740 | if (r) { |
741 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | 741 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", |
742 | ring->idx, r); | 742 | ring->idx, r); |
@@ -744,7 +744,7 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) | |||
744 | } | 744 | } |
745 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); | 745 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); |
746 | amdgpu_ring_write(ring, 0xDEADBEEF); | 746 | amdgpu_ring_write(ring, 0xDEADBEEF); |
747 | amdgpu_ring_unlock_commit(ring); | 747 | amdgpu_ring_commit(ring); |
748 | for (i = 0; i < adev->usec_timeout; i++) { | 748 | for (i = 0; i < adev->usec_timeout; i++) { |
749 | tmp = RREG32(mmUVD_CONTEXT_ID); | 749 | tmp = RREG32(mmUVD_CONTEXT_ID); |
750 | if (tmp == 0xDEADBEEF) | 750 | if (tmp == 0xDEADBEEF) |