aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c13
1 files changed, 2 insertions, 11 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 389509aeddf8..f1423a412867 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -214,8 +214,6 @@ static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
214 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2); 214 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
215} 215}
216 216
217static void sdma_v2_4_hdp_flush_ring_emit(struct amdgpu_ring *);
218
219/** 217/**
220 * sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine 218 * sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine
221 * 219 *
@@ -230,9 +228,6 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
230 u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; 228 u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf;
231 u32 next_rptr = ring->wptr + 5; 229 u32 next_rptr = ring->wptr + 5;
232 230
233 if (ib->flush_hdp_writefifo)
234 next_rptr += 6;
235
236 while ((next_rptr & 7) != 2) 231 while ((next_rptr & 7) != 2)
237 next_rptr++; 232 next_rptr++;
238 233
@@ -245,11 +240,6 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
245 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); 240 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
246 amdgpu_ring_write(ring, next_rptr); 241 amdgpu_ring_write(ring, next_rptr);
247 242
248 if (ib->flush_hdp_writefifo) {
249 /* flush HDP */
250 sdma_v2_4_hdp_flush_ring_emit(ring);
251 }
252
253 /* IB packet must end on a 8 DW boundary */ 243 /* IB packet must end on a 8 DW boundary */
254 while ((ring->wptr & 7) != 2) 244 while ((ring->wptr & 7) != 2)
255 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP)); 245 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP));
@@ -271,7 +261,7 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
271 * 261 *
272 * Emit an hdp flush packet on the requested DMA ring. 262 * Emit an hdp flush packet on the requested DMA ring.
273 */ 263 */
274static void sdma_v2_4_hdp_flush_ring_emit(struct amdgpu_ring *ring) 264static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
275{ 265{
276 u32 ref_and_mask = 0; 266 u32 ref_and_mask = 0;
277 267
@@ -1340,6 +1330,7 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
1340 .emit_fence = sdma_v2_4_ring_emit_fence, 1330 .emit_fence = sdma_v2_4_ring_emit_fence,
1341 .emit_semaphore = sdma_v2_4_ring_emit_semaphore, 1331 .emit_semaphore = sdma_v2_4_ring_emit_semaphore,
1342 .emit_vm_flush = sdma_v2_4_ring_emit_vm_flush, 1332 .emit_vm_flush = sdma_v2_4_ring_emit_vm_flush,
1333 .emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush,
1343 .test_ring = sdma_v2_4_ring_test_ring, 1334 .test_ring = sdma_v2_4_ring_test_ring,
1344 .test_ib = sdma_v2_4_ring_test_ib, 1335 .test_ib = sdma_v2_4_ring_test_ib,
1345 .is_lockup = sdma_v2_4_ring_is_lockup, 1336 .is_lockup = sdma_v2_4_ring_is_lockup,