diff options
author | Christian König <christian.koenig@amd.com> | 2015-05-11 08:10:34 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2015-06-03 21:03:32 -0400 |
commit | d2edb07b10fce5127a60671b55ca53921c212bc3 (patch) | |
tree | 5c6c7fe319aa67c5ca0a279008381f4a31d3cd22 /drivers/gpu/drm/amd/amdgpu | |
parent | 66782cec7ad8f48c09d96ee59b713f694265cfa1 (diff) |
drm/amdgpu: cleanup HDP flush handling
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
Reviewed-by: Monk Liu <monk.liu@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 15 |
7 files changed, 18 insertions, 54 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index aaa62f33870f..72d9d9ec5c9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -382,6 +382,7 @@ struct amdgpu_ring_funcs { | |||
382 | bool emit_wait); | 382 | bool emit_wait); |
383 | void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id, | 383 | void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id, |
384 | uint64_t pd_addr); | 384 | uint64_t pd_addr); |
385 | void (*emit_hdp_flush)(struct amdgpu_ring *ring); | ||
385 | void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid, | 386 | void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid, |
386 | uint32_t gds_base, uint32_t gds_size, | 387 | uint32_t gds_base, uint32_t gds_size, |
387 | uint32_t gws_base, uint32_t gws_size, | 388 | uint32_t gws_base, uint32_t gws_size, |
@@ -892,7 +893,6 @@ struct amdgpu_ib { | |||
892 | struct amdgpu_fence *fence; | 893 | struct amdgpu_fence *fence; |
893 | struct amdgpu_user_fence *user; | 894 | struct amdgpu_user_fence *user; |
894 | struct amdgpu_vm *vm; | 895 | struct amdgpu_vm *vm; |
895 | bool flush_hdp_writefifo; | ||
896 | struct amdgpu_sync sync; | 896 | struct amdgpu_sync sync; |
897 | uint32_t gds_base, gds_size; | 897 | uint32_t gds_base, gds_size; |
898 | uint32_t gws_base, gws_size; | 898 | uint32_t gws_base, gws_size; |
@@ -2203,6 +2203,7 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) | |||
2203 | #define amdgpu_ring_emit_fence(r, addr, seq, write64bit) (r)->funcs->emit_fence((r), (addr), (seq), (write64bit)) | 2203 | #define amdgpu_ring_emit_fence(r, addr, seq, write64bit) (r)->funcs->emit_fence((r), (addr), (seq), (write64bit)) |
2204 | #define amdgpu_ring_emit_semaphore(r, semaphore, emit_wait) (r)->funcs->emit_semaphore((r), (semaphore), (emit_wait)) | 2204 | #define amdgpu_ring_emit_semaphore(r, semaphore, emit_wait) (r)->funcs->emit_semaphore((r), (semaphore), (emit_wait)) |
2205 | #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) | 2205 | #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) |
2206 | #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) | ||
2206 | #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) | 2207 | #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) |
2207 | #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) | 2208 | #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) |
2208 | #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) | 2209 | #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index f2442b2cec00..66c66f83b407 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | |||
@@ -143,7 +143,6 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, | |||
143 | struct amdgpu_ib *ib = &ibs[0]; | 143 | struct amdgpu_ib *ib = &ibs[0]; |
144 | unsigned i; | 144 | unsigned i; |
145 | int r = 0; | 145 | int r = 0; |
146 | bool flush_hdp = true; | ||
147 | 146 | ||
148 | if (num_ibs == 0) | 147 | if (num_ibs == 0) |
149 | return -EINVAL; | 148 | return -EINVAL; |
@@ -185,6 +184,9 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, | |||
185 | ib->gws_base, ib->gws_size, | 184 | ib->gws_base, ib->gws_size, |
186 | ib->oa_base, ib->oa_size); | 185 | ib->oa_base, ib->oa_size); |
187 | 186 | ||
187 | if (ring->funcs->emit_hdp_flush) | ||
188 | amdgpu_ring_emit_hdp_flush(ring); | ||
189 | |||
188 | for (i = 0; i < num_ibs; ++i) { | 190 | for (i = 0; i < num_ibs; ++i) { |
189 | ib = &ibs[i]; | 191 | ib = &ibs[i]; |
190 | 192 | ||
@@ -192,8 +194,6 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, | |||
192 | amdgpu_ring_unlock_undo(ring); | 194 | amdgpu_ring_unlock_undo(ring); |
193 | return -EINVAL; | 195 | return -EINVAL; |
194 | } | 196 | } |
195 | ib->flush_hdp_writefifo = flush_hdp; | ||
196 | flush_hdp = false; | ||
197 | amdgpu_ring_emit_ib(ring, ib); | 197 | amdgpu_ring_emit_ib(ring, ib); |
198 | } | 198 | } |
199 | 199 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 037e3db69547..d5055ea4d112 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c | |||
@@ -188,8 +188,6 @@ static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring) | |||
188 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc); | 188 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc); |
189 | } | 189 | } |
190 | 190 | ||
191 | static void cik_sdma_hdp_flush_ring_emit(struct amdgpu_ring *); | ||
192 | |||
193 | /** | 191 | /** |
194 | * cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine | 192 | * cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine |
195 | * | 193 | * |
@@ -204,9 +202,6 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, | |||
204 | u32 extra_bits = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; | 202 | u32 extra_bits = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; |
205 | u32 next_rptr = ring->wptr + 5; | 203 | u32 next_rptr = ring->wptr + 5; |
206 | 204 | ||
207 | if (ib->flush_hdp_writefifo) | ||
208 | next_rptr += 6; | ||
209 | |||
210 | while ((next_rptr & 7) != 4) | 205 | while ((next_rptr & 7) != 4) |
211 | next_rptr++; | 206 | next_rptr++; |
212 | 207 | ||
@@ -217,11 +212,6 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, | |||
217 | amdgpu_ring_write(ring, 1); /* number of DWs to follow */ | 212 | amdgpu_ring_write(ring, 1); /* number of DWs to follow */ |
218 | amdgpu_ring_write(ring, next_rptr); | 213 | amdgpu_ring_write(ring, next_rptr); |
219 | 214 | ||
220 | if (ib->flush_hdp_writefifo) { | ||
221 | /* flush HDP */ | ||
222 | cik_sdma_hdp_flush_ring_emit(ring); | ||
223 | } | ||
224 | |||
225 | /* IB packet must end on a 8 DW boundary */ | 215 | /* IB packet must end on a 8 DW boundary */ |
226 | while ((ring->wptr & 7) != 4) | 216 | while ((ring->wptr & 7) != 4) |
227 | amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); | 217 | amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); |
@@ -233,13 +223,13 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, | |||
233 | } | 223 | } |
234 | 224 | ||
235 | /** | 225 | /** |
236 | * cik_sdma_hdp_flush_ring_emit - emit an hdp flush on the DMA ring | 226 | * cik_sdma_ring_emit_hdp_flush - emit an hdp flush on the DMA ring |
237 | * | 227 | * |
238 | * @ring: amdgpu ring pointer | 228 | * @ring: amdgpu ring pointer |
239 | * | 229 | * |
240 | * Emit an hdp flush packet on the requested DMA ring. | 230 | * Emit an hdp flush packet on the requested DMA ring. |
241 | */ | 231 | */ |
242 | static void cik_sdma_hdp_flush_ring_emit(struct amdgpu_ring *ring) | 232 | static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring) |
243 | { | 233 | { |
244 | u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) | | 234 | u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) | |
245 | SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ | 235 | SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ |
@@ -1317,6 +1307,7 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { | |||
1317 | .emit_fence = cik_sdma_ring_emit_fence, | 1307 | .emit_fence = cik_sdma_ring_emit_fence, |
1318 | .emit_semaphore = cik_sdma_ring_emit_semaphore, | 1308 | .emit_semaphore = cik_sdma_ring_emit_semaphore, |
1319 | .emit_vm_flush = cik_sdma_ring_emit_vm_flush, | 1309 | .emit_vm_flush = cik_sdma_ring_emit_vm_flush, |
1310 | .emit_hdp_flush = cik_sdma_ring_emit_hdp_flush, | ||
1320 | .test_ring = cik_sdma_ring_test_ring, | 1311 | .test_ring = cik_sdma_ring_test_ring, |
1321 | .test_ib = cik_sdma_ring_test_ib, | 1312 | .test_ib = cik_sdma_ring_test_ib, |
1322 | .is_lockup = cik_sdma_ring_is_lockup, | 1313 | .is_lockup = cik_sdma_ring_is_lockup, |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index cec46ebae5f7..855b5272f4b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
@@ -2366,14 +2366,14 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring) | |||
2366 | } | 2366 | } |
2367 | 2367 | ||
2368 | /** | 2368 | /** |
2369 | * gfx_v7_0_hdp_flush_cp_ring_emit - emit an hdp flush on the cp | 2369 | * gfx_v7_0_ring_emit_hdp - emit an hdp flush on the cp |
2370 | * | 2370 | * |
2371 | * @adev: amdgpu_device pointer | 2371 | * @adev: amdgpu_device pointer |
2372 | * @ridx: amdgpu ring index | 2372 | * @ridx: amdgpu ring index |
2373 | * | 2373 | * |
2374 | * Emits an hdp flush on the cp. | 2374 | * Emits an hdp flush on the cp. |
2375 | */ | 2375 | */ |
2376 | static void gfx_v7_0_hdp_flush_cp_ring_emit(struct amdgpu_ring *ring) | 2376 | static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) |
2377 | { | 2377 | { |
2378 | u32 ref_and_mask; | 2378 | u32 ref_and_mask; |
2379 | 2379 | ||
@@ -2528,9 +2528,6 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring, | |||
2528 | if (ring->type == AMDGPU_RING_TYPE_COMPUTE) | 2528 | if (ring->type == AMDGPU_RING_TYPE_COMPUTE) |
2529 | control |= INDIRECT_BUFFER_VALID; | 2529 | control |= INDIRECT_BUFFER_VALID; |
2530 | 2530 | ||
2531 | if (ib->flush_hdp_writefifo) | ||
2532 | next_rptr += 7; | ||
2533 | |||
2534 | if (ring->need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) | 2531 | if (ring->need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) |
2535 | next_rptr += 2; | 2532 | next_rptr += 2; |
2536 | 2533 | ||
@@ -2541,9 +2538,6 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring, | |||
2541 | amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); | 2538 | amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); |
2542 | amdgpu_ring_write(ring, next_rptr); | 2539 | amdgpu_ring_write(ring, next_rptr); |
2543 | 2540 | ||
2544 | if (ib->flush_hdp_writefifo) | ||
2545 | gfx_v7_0_hdp_flush_cp_ring_emit(ring); | ||
2546 | |||
2547 | /* insert SWITCH_BUFFER packet before first IB in the ring frame */ | 2541 | /* insert SWITCH_BUFFER packet before first IB in the ring frame */ |
2548 | if (ring->need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) { | 2542 | if (ring->need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) { |
2549 | amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); | 2543 | amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); |
@@ -5522,6 +5516,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { | |||
5522 | .emit_semaphore = gfx_v7_0_ring_emit_semaphore, | 5516 | .emit_semaphore = gfx_v7_0_ring_emit_semaphore, |
5523 | .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, | 5517 | .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, |
5524 | .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch, | 5518 | .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch, |
5519 | .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush, | ||
5525 | .test_ring = gfx_v7_0_ring_test_ring, | 5520 | .test_ring = gfx_v7_0_ring_test_ring, |
5526 | .test_ib = gfx_v7_0_ring_test_ib, | 5521 | .test_ib = gfx_v7_0_ring_test_ib, |
5527 | .is_lockup = gfx_v7_0_ring_is_lockup, | 5522 | .is_lockup = gfx_v7_0_ring_is_lockup, |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index fc8c46209db4..63ed3b01cea1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
@@ -3610,7 +3610,7 @@ static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) | |||
3610 | } | 3610 | } |
3611 | } | 3611 | } |
3612 | 3612 | ||
3613 | static void gfx_v8_0_hdp_flush_cp_ring_emit(struct amdgpu_ring *ring) | 3613 | static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) |
3614 | { | 3614 | { |
3615 | u32 ref_and_mask, reg_mem_engine; | 3615 | u32 ref_and_mask, reg_mem_engine; |
3616 | 3616 | ||
@@ -3657,9 +3657,6 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, | |||
3657 | if (ring->type == AMDGPU_RING_TYPE_COMPUTE) | 3657 | if (ring->type == AMDGPU_RING_TYPE_COMPUTE) |
3658 | control |= INDIRECT_BUFFER_VALID; | 3658 | control |= INDIRECT_BUFFER_VALID; |
3659 | 3659 | ||
3660 | if (ib->flush_hdp_writefifo) | ||
3661 | next_rptr += 7; | ||
3662 | |||
3663 | if (ring->need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) | 3660 | if (ring->need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) |
3664 | next_rptr += 2; | 3661 | next_rptr += 2; |
3665 | 3662 | ||
@@ -3670,9 +3667,6 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, | |||
3670 | amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); | 3667 | amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); |
3671 | amdgpu_ring_write(ring, next_rptr); | 3668 | amdgpu_ring_write(ring, next_rptr); |
3672 | 3669 | ||
3673 | if (ib->flush_hdp_writefifo) | ||
3674 | gfx_v8_0_hdp_flush_cp_ring_emit(ring); | ||
3675 | |||
3676 | /* insert SWITCH_BUFFER packet before first IB in the ring frame */ | 3670 | /* insert SWITCH_BUFFER packet before first IB in the ring frame */ |
3677 | if (ring->need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) { | 3671 | if (ring->need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) { |
3678 | amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); | 3672 | amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); |
@@ -4149,6 +4143,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { | |||
4149 | .emit_semaphore = gfx_v8_0_ring_emit_semaphore, | 4143 | .emit_semaphore = gfx_v8_0_ring_emit_semaphore, |
4150 | .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, | 4144 | .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, |
4151 | .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch, | 4145 | .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch, |
4146 | .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush, | ||
4152 | .test_ring = gfx_v8_0_ring_test_ring, | 4147 | .test_ring = gfx_v8_0_ring_test_ring, |
4153 | .test_ib = gfx_v8_0_ring_test_ib, | 4148 | .test_ib = gfx_v8_0_ring_test_ib, |
4154 | .is_lockup = gfx_v8_0_ring_is_lockup, | 4149 | .is_lockup = gfx_v8_0_ring_is_lockup, |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 389509aeddf8..f1423a412867 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | |||
@@ -214,8 +214,6 @@ static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring) | |||
214 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2); | 214 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2); |
215 | } | 215 | } |
216 | 216 | ||
217 | static void sdma_v2_4_hdp_flush_ring_emit(struct amdgpu_ring *); | ||
218 | |||
219 | /** | 217 | /** |
220 | * sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine | 218 | * sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine |
221 | * | 219 | * |
@@ -230,9 +228,6 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, | |||
230 | u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; | 228 | u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; |
231 | u32 next_rptr = ring->wptr + 5; | 229 | u32 next_rptr = ring->wptr + 5; |
232 | 230 | ||
233 | if (ib->flush_hdp_writefifo) | ||
234 | next_rptr += 6; | ||
235 | |||
236 | while ((next_rptr & 7) != 2) | 231 | while ((next_rptr & 7) != 2) |
237 | next_rptr++; | 232 | next_rptr++; |
238 | 233 | ||
@@ -245,11 +240,6 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, | |||
245 | amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); | 240 | amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); |
246 | amdgpu_ring_write(ring, next_rptr); | 241 | amdgpu_ring_write(ring, next_rptr); |
247 | 242 | ||
248 | if (ib->flush_hdp_writefifo) { | ||
249 | /* flush HDP */ | ||
250 | sdma_v2_4_hdp_flush_ring_emit(ring); | ||
251 | } | ||
252 | |||
253 | /* IB packet must end on a 8 DW boundary */ | 243 | /* IB packet must end on a 8 DW boundary */ |
254 | while ((ring->wptr & 7) != 2) | 244 | while ((ring->wptr & 7) != 2) |
255 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP)); | 245 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP)); |
@@ -271,7 +261,7 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, | |||
271 | * | 261 | * |
272 | * Emit an hdp flush packet on the requested DMA ring. | 262 | * Emit an hdp flush packet on the requested DMA ring. |
273 | */ | 263 | */ |
274 | static void sdma_v2_4_hdp_flush_ring_emit(struct amdgpu_ring *ring) | 264 | static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring) |
275 | { | 265 | { |
276 | u32 ref_and_mask = 0; | 266 | u32 ref_and_mask = 0; |
277 | 267 | ||
@@ -1340,6 +1330,7 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = { | |||
1340 | .emit_fence = sdma_v2_4_ring_emit_fence, | 1330 | .emit_fence = sdma_v2_4_ring_emit_fence, |
1341 | .emit_semaphore = sdma_v2_4_ring_emit_semaphore, | 1331 | .emit_semaphore = sdma_v2_4_ring_emit_semaphore, |
1342 | .emit_vm_flush = sdma_v2_4_ring_emit_vm_flush, | 1332 | .emit_vm_flush = sdma_v2_4_ring_emit_vm_flush, |
1333 | .emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush, | ||
1343 | .test_ring = sdma_v2_4_ring_test_ring, | 1334 | .test_ring = sdma_v2_4_ring_test_ring, |
1344 | .test_ib = sdma_v2_4_ring_test_ib, | 1335 | .test_ib = sdma_v2_4_ring_test_ib, |
1345 | .is_lockup = sdma_v2_4_ring_is_lockup, | 1336 | .is_lockup = sdma_v2_4_ring_is_lockup, |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index d3eda315e719..7c4abff5005d 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | |||
@@ -269,8 +269,6 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring) | |||
269 | } | 269 | } |
270 | } | 270 | } |
271 | 271 | ||
272 | static void sdma_v3_0_hdp_flush_ring_emit(struct amdgpu_ring *); | ||
273 | |||
274 | /** | 272 | /** |
275 | * sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine | 273 | * sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine |
276 | * | 274 | * |
@@ -285,9 +283,6 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, | |||
285 | u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; | 283 | u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; |
286 | u32 next_rptr = ring->wptr + 5; | 284 | u32 next_rptr = ring->wptr + 5; |
287 | 285 | ||
288 | if (ib->flush_hdp_writefifo) | ||
289 | next_rptr += 6; | ||
290 | |||
291 | while ((next_rptr & 7) != 2) | 286 | while ((next_rptr & 7) != 2) |
292 | next_rptr++; | 287 | next_rptr++; |
293 | next_rptr += 6; | 288 | next_rptr += 6; |
@@ -299,11 +294,6 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, | |||
299 | amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); | 294 | amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); |
300 | amdgpu_ring_write(ring, next_rptr); | 295 | amdgpu_ring_write(ring, next_rptr); |
301 | 296 | ||
302 | /* flush HDP */ | ||
303 | if (ib->flush_hdp_writefifo) { | ||
304 | sdma_v3_0_hdp_flush_ring_emit(ring); | ||
305 | } | ||
306 | |||
307 | /* IB packet must end on a 8 DW boundary */ | 297 | /* IB packet must end on a 8 DW boundary */ |
308 | while ((ring->wptr & 7) != 2) | 298 | while ((ring->wptr & 7) != 2) |
309 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP)); | 299 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP)); |
@@ -320,13 +310,13 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, | |||
320 | } | 310 | } |
321 | 311 | ||
322 | /** | 312 | /** |
323 | * sdma_v3_0_hdp_flush_ring_emit - emit an hdp flush on the DMA ring | 313 | * sdma_v3_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring |
324 | * | 314 | * |
325 | * @ring: amdgpu ring pointer | 315 | * @ring: amdgpu ring pointer |
326 | * | 316 | * |
327 | * Emit an hdp flush packet on the requested DMA ring. | 317 | * Emit an hdp flush packet on the requested DMA ring. |
328 | */ | 318 | */ |
329 | static void sdma_v3_0_hdp_flush_ring_emit(struct amdgpu_ring *ring) | 319 | static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) |
330 | { | 320 | { |
331 | u32 ref_and_mask = 0; | 321 | u32 ref_and_mask = 0; |
332 | 322 | ||
@@ -1407,6 +1397,7 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = { | |||
1407 | .emit_fence = sdma_v3_0_ring_emit_fence, | 1397 | .emit_fence = sdma_v3_0_ring_emit_fence, |
1408 | .emit_semaphore = sdma_v3_0_ring_emit_semaphore, | 1398 | .emit_semaphore = sdma_v3_0_ring_emit_semaphore, |
1409 | .emit_vm_flush = sdma_v3_0_ring_emit_vm_flush, | 1399 | .emit_vm_flush = sdma_v3_0_ring_emit_vm_flush, |
1400 | .emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush, | ||
1410 | .test_ring = sdma_v3_0_ring_test_ring, | 1401 | .test_ring = sdma_v3_0_ring_test_ring, |
1411 | .test_ib = sdma_v3_0_ring_test_ib, | 1402 | .test_ib = sdma_v3_0_ring_test_ib, |
1412 | .is_lockup = sdma_v3_0_ring_is_lockup, | 1403 | .is_lockup = sdma_v3_0_ring_is_lockup, |