aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
authorChunming Zhou <David1.Zhou@amd.com>2016-03-01 22:30:31 -0500
committerAlex Deucher <alexander.deucher@amd.com>2016-03-08 11:01:40 -0500
commit5c55db83b759b079ead93aa0767fc97048b1f899 (patch)
tree5c4db2c3e0291851e36f1b2ee50f6c27629d868a /drivers/gpu/drm/amd
parentb6bae58d8b6c21357198255cdb42a3929e7f7357 (diff)
drm/amdgpu: wait engine idle before vm flush for sdma
Signed-off-by: Chunming Zhou <David1.Zhou@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c15
3 files changed, 44 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index e4e4b2ac77b7..6cf45b0ff91b 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -829,6 +829,20 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
829{ 829{
830 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) | 830 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
831 SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */ 831 SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
832 uint32_t seq = ring->fence_drv.sync_seq;
833 uint64_t addr = ring->fence_drv.gpu_addr;
834
835 /* wait for idle */
836 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0,
837 SDMA_POLL_REG_MEM_EXTRA_OP(0) |
838 SDMA_POLL_REG_MEM_EXTRA_FUNC(3) | /* equal */
839 SDMA_POLL_REG_MEM_EXTRA_M));
840 amdgpu_ring_write(ring, addr & 0xfffffffc);
841 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
842 amdgpu_ring_write(ring, seq); /* reference */
843 amdgpu_ring_write(ring, 0xfffffff); /* mask */
844 amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */
845
832 846
833 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 847 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
834 if (vm_id < 8) { 848 if (vm_id < 8) {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index dddb8d6a81f3..0e246ff6734c 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -885,6 +885,21 @@ static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
885static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring, 885static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
886 unsigned vm_id, uint64_t pd_addr) 886 unsigned vm_id, uint64_t pd_addr)
887{ 887{
888 uint32_t seq = ring->fence_drv.sync_seq;
889 uint64_t addr = ring->fence_drv.gpu_addr;
890
891 /* wait for idle */
892 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
893 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
894 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
895 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
896 amdgpu_ring_write(ring, addr & 0xfffffffc);
897 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
898 amdgpu_ring_write(ring, seq); /* reference */
899 amdgpu_ring_write(ring, 0xfffffff); /* mask */
900 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
901 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
902
888 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | 903 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
889 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); 904 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
890 if (vm_id < 8) { 905 if (vm_id < 8) {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 19e02f7a06f3..67441f8844bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1035,6 +1035,21 @@ static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
1035static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 1035static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1036 unsigned vm_id, uint64_t pd_addr) 1036 unsigned vm_id, uint64_t pd_addr)
1037{ 1037{
1038 uint32_t seq = ring->fence_drv.sync_seq;
1039 uint64_t addr = ring->fence_drv.gpu_addr;
1040
1041 /* wait for idle */
1042 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1043 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1044 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
1045 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
1046 amdgpu_ring_write(ring, addr & 0xfffffffc);
1047 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1048 amdgpu_ring_write(ring, seq); /* reference */
1049 amdgpu_ring_write(ring, 0xfffffff); /* mask */
1050 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1051 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1052
1038 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | 1053 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1039 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); 1054 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1040 if (vm_id < 8) { 1055 if (vm_id < 8) {