aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2016-03-08 08:11:00 -0500
committerAlex Deucher <alexander.deucher@amd.com>2016-03-10 10:36:13 -0500
commit00b7c4ff7d482d287a591f047e0963d494569b46 (patch)
treec5de557ee6252f7eab2cc577ebd41277b46653cf
parent32b41ac21fde8f7cea465d74c570fc7bd0089163 (diff)
drm/amdgpu: split pipeline sync out of SDMA vm_flush() as well
Code it similar to how we did it for the gfx and compute engines. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c23
3 files changed, 53 insertions, 20 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index b5b4220a6141..8663a27018a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -822,19 +822,14 @@ static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
822} 822}
823 823
824/** 824/**
825 * cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA 825 * cik_sdma_ring_emit_pipeline_sync - sync the pipeline
826 * 826 *
827 * @ring: amdgpu_ring pointer 827 * @ring: amdgpu_ring pointer
828 * @vm: amdgpu_vm pointer
829 * 828 *
830 * Update the page table base and flush the VM TLB 829 * Make sure all previous operations are completed (CIK).
831 * using sDMA (CIK).
832 */ 830 */
833static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring, 831static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
834 unsigned vm_id, uint64_t pd_addr)
835{ 832{
836 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
837 SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
838 uint32_t seq = ring->fence_drv.sync_seq; 833 uint32_t seq = ring->fence_drv.sync_seq;
839 uint64_t addr = ring->fence_drv.gpu_addr; 834 uint64_t addr = ring->fence_drv.gpu_addr;
840 835
@@ -848,7 +843,22 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
848 amdgpu_ring_write(ring, seq); /* reference */ 843 amdgpu_ring_write(ring, seq); /* reference */
849 amdgpu_ring_write(ring, 0xfffffff); /* mask */ 844 amdgpu_ring_write(ring, 0xfffffff); /* mask */
850 amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */ 845 amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */
846}
851 847
848/**
849 * cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA
850 *
851 * @ring: amdgpu_ring pointer
852 * @vm: amdgpu_vm pointer
853 *
854 * Update the page table base and flush the VM TLB
855 * using sDMA (CIK).
856 */
857static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
858 unsigned vm_id, uint64_t pd_addr)
859{
860 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
861 SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
852 862
853 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 863 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
854 if (vm_id < 8) { 864 if (vm_id < 8) {
@@ -1290,6 +1300,7 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
1290 .parse_cs = NULL, 1300 .parse_cs = NULL,
1291 .emit_ib = cik_sdma_ring_emit_ib, 1301 .emit_ib = cik_sdma_ring_emit_ib,
1292 .emit_fence = cik_sdma_ring_emit_fence, 1302 .emit_fence = cik_sdma_ring_emit_fence,
1303 .emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync,
1293 .emit_vm_flush = cik_sdma_ring_emit_vm_flush, 1304 .emit_vm_flush = cik_sdma_ring_emit_vm_flush,
1294 .emit_hdp_flush = cik_sdma_ring_emit_hdp_flush, 1305 .emit_hdp_flush = cik_sdma_ring_emit_hdp_flush,
1295 .emit_hdp_invalidate = cik_sdma_ring_emit_hdp_invalidate, 1306 .emit_hdp_invalidate = cik_sdma_ring_emit_hdp_invalidate,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 747ef558033a..ab9ff89a3096 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -880,16 +880,13 @@ static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
880} 880}
881 881
882/** 882/**
883 * sdma_v2_4_ring_emit_vm_flush - cik vm flush using sDMA 883 * sdma_v2_4_ring_emit_pipeline_sync - sync the pipeline
884 * 884 *
885 * @ring: amdgpu_ring pointer 885 * @ring: amdgpu_ring pointer
886 * @vm: amdgpu_vm pointer
887 * 886 *
888 * Update the page table base and flush the VM TLB 887 * Make sure all previous operations are completed (CIK).
889 * using sDMA (VI).
890 */ 888 */
891static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring, 889static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
892 unsigned vm_id, uint64_t pd_addr)
893{ 890{
894 uint32_t seq = ring->fence_drv.sync_seq; 891 uint32_t seq = ring->fence_drv.sync_seq;
895 uint64_t addr = ring->fence_drv.gpu_addr; 892 uint64_t addr = ring->fence_drv.gpu_addr;
@@ -905,7 +902,20 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
905 amdgpu_ring_write(ring, 0xfffffff); /* mask */ 902 amdgpu_ring_write(ring, 0xfffffff); /* mask */
906 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 903 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
907 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */ 904 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
905}
908 906
907/**
908 * sdma_v2_4_ring_emit_vm_flush - cik vm flush using sDMA
909 *
910 * @ring: amdgpu_ring pointer
911 * @vm: amdgpu_vm pointer
912 *
913 * Update the page table base and flush the VM TLB
914 * using sDMA (VI).
915 */
916static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
917 unsigned vm_id, uint64_t pd_addr)
918{
909 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | 919 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
910 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); 920 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
911 if (vm_id < 8) { 921 if (vm_id < 8) {
@@ -1295,6 +1305,7 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
1295 .parse_cs = NULL, 1305 .parse_cs = NULL,
1296 .emit_ib = sdma_v2_4_ring_emit_ib, 1306 .emit_ib = sdma_v2_4_ring_emit_ib,
1297 .emit_fence = sdma_v2_4_ring_emit_fence, 1307 .emit_fence = sdma_v2_4_ring_emit_fence,
1308 .emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
1298 .emit_vm_flush = sdma_v2_4_ring_emit_vm_flush, 1309 .emit_vm_flush = sdma_v2_4_ring_emit_vm_flush,
1299 .emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush, 1310 .emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush,
1300 .emit_hdp_invalidate = sdma_v2_4_ring_emit_hdp_invalidate, 1311 .emit_hdp_invalidate = sdma_v2_4_ring_emit_hdp_invalidate,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 11a544fbf197..4c24c371fec7 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1031,16 +1031,13 @@ static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
1031} 1031}
1032 1032
1033/** 1033/**
1034 * sdma_v3_0_ring_emit_vm_flush - cik vm flush using sDMA 1034 * sdma_v3_0_ring_emit_pipeline_sync - sync the pipeline
1035 * 1035 *
1036 * @ring: amdgpu_ring pointer 1036 * @ring: amdgpu_ring pointer
1037 * @vm: amdgpu_vm pointer
1038 * 1037 *
1039 * Update the page table base and flush the VM TLB 1038 * Make sure all previous operations are completed (CIK).
1040 * using sDMA (VI).
1041 */ 1039 */
1042static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 1040static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1043 unsigned vm_id, uint64_t pd_addr)
1044{ 1041{
1045 uint32_t seq = ring->fence_drv.sync_seq; 1042 uint32_t seq = ring->fence_drv.sync_seq;
1046 uint64_t addr = ring->fence_drv.gpu_addr; 1043 uint64_t addr = ring->fence_drv.gpu_addr;
@@ -1056,7 +1053,20 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1056 amdgpu_ring_write(ring, 0xfffffff); /* mask */ 1053 amdgpu_ring_write(ring, 0xfffffff); /* mask */
1057 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 1054 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1058 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */ 1055 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1056}
1059 1057
1058/**
1059 * sdma_v3_0_ring_emit_vm_flush - cik vm flush using sDMA
1060 *
1061 * @ring: amdgpu_ring pointer
1062 * @vm: amdgpu_vm pointer
1063 *
1064 * Update the page table base and flush the VM TLB
1065 * using sDMA (VI).
1066 */
1067static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1068 unsigned vm_id, uint64_t pd_addr)
1069{
1060 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | 1070 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1061 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); 1071 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1062 if (vm_id < 8) { 1072 if (vm_id < 8) {
@@ -1563,6 +1573,7 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
1563 .parse_cs = NULL, 1573 .parse_cs = NULL,
1564 .emit_ib = sdma_v3_0_ring_emit_ib, 1574 .emit_ib = sdma_v3_0_ring_emit_ib,
1565 .emit_fence = sdma_v3_0_ring_emit_fence, 1575 .emit_fence = sdma_v3_0_ring_emit_fence,
1576 .emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync,
1566 .emit_vm_flush = sdma_v3_0_ring_emit_vm_flush, 1577 .emit_vm_flush = sdma_v3_0_ring_emit_vm_flush,
1567 .emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush, 1578 .emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush,
1568 .emit_hdp_invalidate = sdma_v3_0_ring_emit_hdp_invalidate, 1579 .emit_hdp_invalidate = sdma_v3_0_ring_emit_hdp_invalidate,