diff options
author | monk.liu <monk.liu@amd.com> | 2015-07-15 05:21:45 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2015-07-29 16:05:57 -0400 |
commit | 93323131d66db68802e646204c0562cddc81a651 (patch) | |
tree | 5574f35548172b118998fb14f6cde67fd740efba | |
parent | c193fa91b9182465a4a01665ad4096a6cdb4db2d (diff) |
drm/amdgpu: different emit_ib for gfx and compute
compute ring didn't use const engine byfar, so ignore CE things in
compute routine
Signed-off-by: monk.liu <monk.liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 46 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 47 |
2 files changed, 71 insertions, 22 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 2c188fb9fd22..2db6ab0a543d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
@@ -2561,7 +2561,7 @@ static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring, | |||
2561 | * sheduling on the ring. This function schedules the IB | 2561 | * sheduling on the ring. This function schedules the IB |
2562 | * on the gfx ring for execution by the GPU. | 2562 | * on the gfx ring for execution by the GPU. |
2563 | */ | 2563 | */ |
2564 | static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring, | 2564 | static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, |
2565 | struct amdgpu_ib *ib) | 2565 | struct amdgpu_ib *ib) |
2566 | { | 2566 | { |
2567 | bool need_ctx_switch = ring->current_ctx != ib->ctx; | 2567 | bool need_ctx_switch = ring->current_ctx != ib->ctx; |
@@ -2569,15 +2569,10 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring, | |||
2569 | u32 next_rptr = ring->wptr + 5; | 2569 | u32 next_rptr = ring->wptr + 5; |
2570 | 2570 | ||
2571 | /* drop the CE preamble IB for the same context */ | 2571 | /* drop the CE preamble IB for the same context */ |
2572 | if ((ring->type == AMDGPU_RING_TYPE_GFX) && | 2572 | if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch) |
2573 | (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && | ||
2574 | !need_ctx_switch) | ||
2575 | return; | 2573 | return; |
2576 | 2574 | ||
2577 | if (ring->type == AMDGPU_RING_TYPE_COMPUTE) | 2575 | if (need_ctx_switch) |
2578 | control |= INDIRECT_BUFFER_VALID; | ||
2579 | |||
2580 | if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) | ||
2581 | next_rptr += 2; | 2576 | next_rptr += 2; |
2582 | 2577 | ||
2583 | next_rptr += 4; | 2578 | next_rptr += 4; |
@@ -2588,7 +2583,7 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring, | |||
2588 | amdgpu_ring_write(ring, next_rptr); | 2583 | amdgpu_ring_write(ring, next_rptr); |
2589 | 2584 | ||
2590 | /* insert SWITCH_BUFFER packet before first IB in the ring frame */ | 2585 | /* insert SWITCH_BUFFER packet before first IB in the ring frame */ |
2591 | if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) { | 2586 | if (need_ctx_switch) { |
2592 | amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); | 2587 | amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); |
2593 | amdgpu_ring_write(ring, 0); | 2588 | amdgpu_ring_write(ring, 0); |
2594 | } | 2589 | } |
@@ -2611,6 +2606,35 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring, | |||
2611 | amdgpu_ring_write(ring, control); | 2606 | amdgpu_ring_write(ring, control); |
2612 | } | 2607 | } |
2613 | 2608 | ||
2609 | static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, | ||
2610 | struct amdgpu_ib *ib) | ||
2611 | { | ||
2612 | u32 header, control = 0; | ||
2613 | u32 next_rptr = ring->wptr + 5; | ||
2614 | |||
2615 | control |= INDIRECT_BUFFER_VALID; | ||
2616 | next_rptr += 4; | ||
2617 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||
2618 | amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); | ||
2619 | amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); | ||
2620 | amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); | ||
2621 | amdgpu_ring_write(ring, next_rptr); | ||
2622 | |||
2623 | header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); | ||
2624 | |||
2625 | control |= ib->length_dw | | ||
2626 | (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); | ||
2627 | |||
2628 | amdgpu_ring_write(ring, header); | ||
2629 | amdgpu_ring_write(ring, | ||
2630 | #ifdef __BIG_ENDIAN | ||
2631 | (2 << 0) | | ||
2632 | #endif | ||
2633 | (ib->gpu_addr & 0xFFFFFFFC)); | ||
2634 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); | ||
2635 | amdgpu_ring_write(ring, control); | ||
2636 | } | ||
2637 | |||
2614 | /** | 2638 | /** |
2615 | * gfx_v7_0_ring_test_ib - basic ring IB test | 2639 | * gfx_v7_0_ring_test_ib - basic ring IB test |
2616 | * | 2640 | * |
@@ -5555,7 +5579,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { | |||
5555 | .get_wptr = gfx_v7_0_ring_get_wptr_gfx, | 5579 | .get_wptr = gfx_v7_0_ring_get_wptr_gfx, |
5556 | .set_wptr = gfx_v7_0_ring_set_wptr_gfx, | 5580 | .set_wptr = gfx_v7_0_ring_set_wptr_gfx, |
5557 | .parse_cs = NULL, | 5581 | .parse_cs = NULL, |
5558 | .emit_ib = gfx_v7_0_ring_emit_ib, | 5582 | .emit_ib = gfx_v7_0_ring_emit_ib_gfx, |
5559 | .emit_fence = gfx_v7_0_ring_emit_fence_gfx, | 5583 | .emit_fence = gfx_v7_0_ring_emit_fence_gfx, |
5560 | .emit_semaphore = gfx_v7_0_ring_emit_semaphore, | 5584 | .emit_semaphore = gfx_v7_0_ring_emit_semaphore, |
5561 | .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, | 5585 | .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, |
@@ -5571,7 +5595,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { | |||
5571 | .get_wptr = gfx_v7_0_ring_get_wptr_compute, | 5595 | .get_wptr = gfx_v7_0_ring_get_wptr_compute, |
5572 | .set_wptr = gfx_v7_0_ring_set_wptr_compute, | 5596 | .set_wptr = gfx_v7_0_ring_set_wptr_compute, |
5573 | .parse_cs = NULL, | 5597 | .parse_cs = NULL, |
5574 | .emit_ib = gfx_v7_0_ring_emit_ib, | 5598 | .emit_ib = gfx_v7_0_ring_emit_ib_compute, |
5575 | .emit_fence = gfx_v7_0_ring_emit_fence_compute, | 5599 | .emit_fence = gfx_v7_0_ring_emit_fence_compute, |
5576 | .emit_semaphore = gfx_v7_0_ring_emit_semaphore, | 5600 | .emit_semaphore = gfx_v7_0_ring_emit_semaphore, |
5577 | .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, | 5601 | .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 1c7c992dea37..9e1d4ddbf475 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
@@ -3753,7 +3753,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) | |||
3753 | amdgpu_ring_write(ring, 0x20); /* poll interval */ | 3753 | amdgpu_ring_write(ring, 0x20); /* poll interval */ |
3754 | } | 3754 | } |
3755 | 3755 | ||
3756 | static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, | 3756 | static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, |
3757 | struct amdgpu_ib *ib) | 3757 | struct amdgpu_ib *ib) |
3758 | { | 3758 | { |
3759 | bool need_ctx_switch = ring->current_ctx != ib->ctx; | 3759 | bool need_ctx_switch = ring->current_ctx != ib->ctx; |
@@ -3761,15 +3761,10 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, | |||
3761 | u32 next_rptr = ring->wptr + 5; | 3761 | u32 next_rptr = ring->wptr + 5; |
3762 | 3762 | ||
3763 | /* drop the CE preamble IB for the same context */ | 3763 | /* drop the CE preamble IB for the same context */ |
3764 | if ((ring->type == AMDGPU_RING_TYPE_GFX) && | 3764 | if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch) |
3765 | (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && | ||
3766 | !need_ctx_switch) | ||
3767 | return; | 3765 | return; |
3768 | 3766 | ||
3769 | if (ring->type == AMDGPU_RING_TYPE_COMPUTE) | 3767 | if (need_ctx_switch) |
3770 | control |= INDIRECT_BUFFER_VALID; | ||
3771 | |||
3772 | if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) | ||
3773 | next_rptr += 2; | 3768 | next_rptr += 2; |
3774 | 3769 | ||
3775 | next_rptr += 4; | 3770 | next_rptr += 4; |
@@ -3780,7 +3775,7 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, | |||
3780 | amdgpu_ring_write(ring, next_rptr); | 3775 | amdgpu_ring_write(ring, next_rptr); |
3781 | 3776 | ||
3782 | /* insert SWITCH_BUFFER packet before first IB in the ring frame */ | 3777 | /* insert SWITCH_BUFFER packet before first IB in the ring frame */ |
3783 | if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) { | 3778 | if (need_ctx_switch) { |
3784 | amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); | 3779 | amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); |
3785 | amdgpu_ring_write(ring, 0); | 3780 | amdgpu_ring_write(ring, 0); |
3786 | } | 3781 | } |
@@ -3803,6 +3798,36 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, | |||
3803 | amdgpu_ring_write(ring, control); | 3798 | amdgpu_ring_write(ring, control); |
3804 | } | 3799 | } |
3805 | 3800 | ||
3801 | static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, | ||
3802 | struct amdgpu_ib *ib) | ||
3803 | { | ||
3804 | u32 header, control = 0; | ||
3805 | u32 next_rptr = ring->wptr + 5; | ||
3806 | |||
3807 | control |= INDIRECT_BUFFER_VALID; | ||
3808 | |||
3809 | next_rptr += 4; | ||
3810 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||
3811 | amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); | ||
3812 | amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); | ||
3813 | amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); | ||
3814 | amdgpu_ring_write(ring, next_rptr); | ||
3815 | |||
3816 | header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); | ||
3817 | |||
3818 | control |= ib->length_dw | | ||
3819 | (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); | ||
3820 | |||
3821 | amdgpu_ring_write(ring, header); | ||
3822 | amdgpu_ring_write(ring, | ||
3823 | #ifdef __BIG_ENDIAN | ||
3824 | (2 << 0) | | ||
3825 | #endif | ||
3826 | (ib->gpu_addr & 0xFFFFFFFC)); | ||
3827 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); | ||
3828 | amdgpu_ring_write(ring, control); | ||
3829 | } | ||
3830 | |||
3806 | static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, | 3831 | static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, |
3807 | u64 seq, unsigned flags) | 3832 | u64 seq, unsigned flags) |
3808 | { | 3833 | { |
@@ -4224,7 +4249,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { | |||
4224 | .get_wptr = gfx_v8_0_ring_get_wptr_gfx, | 4249 | .get_wptr = gfx_v8_0_ring_get_wptr_gfx, |
4225 | .set_wptr = gfx_v8_0_ring_set_wptr_gfx, | 4250 | .set_wptr = gfx_v8_0_ring_set_wptr_gfx, |
4226 | .parse_cs = NULL, | 4251 | .parse_cs = NULL, |
4227 | .emit_ib = gfx_v8_0_ring_emit_ib, | 4252 | .emit_ib = gfx_v8_0_ring_emit_ib_gfx, |
4228 | .emit_fence = gfx_v8_0_ring_emit_fence_gfx, | 4253 | .emit_fence = gfx_v8_0_ring_emit_fence_gfx, |
4229 | .emit_semaphore = gfx_v8_0_ring_emit_semaphore, | 4254 | .emit_semaphore = gfx_v8_0_ring_emit_semaphore, |
4230 | .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, | 4255 | .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, |
@@ -4240,7 +4265,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { | |||
4240 | .get_wptr = gfx_v8_0_ring_get_wptr_compute, | 4265 | .get_wptr = gfx_v8_0_ring_get_wptr_compute, |
4241 | .set_wptr = gfx_v8_0_ring_set_wptr_compute, | 4266 | .set_wptr = gfx_v8_0_ring_set_wptr_compute, |
4242 | .parse_cs = NULL, | 4267 | .parse_cs = NULL, |
4243 | .emit_ib = gfx_v8_0_ring_emit_ib, | 4268 | .emit_ib = gfx_v8_0_ring_emit_ib_compute, |
4244 | .emit_fence = gfx_v8_0_ring_emit_fence_compute, | 4269 | .emit_fence = gfx_v8_0_ring_emit_fence_compute, |
4245 | .emit_semaphore = gfx_v8_0_ring_emit_semaphore, | 4270 | .emit_semaphore = gfx_v8_0_ring_emit_semaphore, |
4246 | .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, | 4271 | .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, |