aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2016-10-05 08:29:38 -0400
committerAlex Deucher <alexander.deucher@amd.com>2016-10-25 14:38:36 -0400
commite12f3d7a23c99617f72305a805ed827567a43a9c (patch)
tree826d4091007045c5dc405f4ca0138a965761a34d
parent7bc6be825a2efb00cf8a194e1d0560c92d5a2f6c (diff)
drm/amdgpu: move IB and frame size directly into the engine description
I should have suggested that on the initial patchset. This saves us a few CPU cycles during CS and a bunch of loc. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c34
16 files changed, 117 insertions, 288 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index efbd9ef41785..fa99c0d6158c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1962,8 +1962,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
1962#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) 1962#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
1963#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) 1963#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
1964#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) 1964#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
1965#define amdgpu_ring_get_emit_ib_size(r) (r)->funcs->get_emit_ib_size((r))
1966#define amdgpu_ring_get_dma_frame_size(r) (r)->funcs->get_dma_frame_size((r))
1967#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) 1965#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
1968#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) 1966#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
1969#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) 1967#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 3c9a6da0966e..16308eb22e7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -152,8 +152,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
152 return -EINVAL; 152 return -EINVAL;
153 } 153 }
154 154
155 alloc_size = amdgpu_ring_get_dma_frame_size(ring) + 155 alloc_size = ring->funcs->emit_frame_size + num_ibs *
156 num_ibs * amdgpu_ring_get_emit_ib_size(ring); 156 ring->funcs->emit_ib_size;
157 157
158 r = amdgpu_ring_alloc(ring, alloc_size); 158 r = amdgpu_ring_alloc(ring, alloc_size);
159 if (r) { 159 if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 2f935888c64b..767843c2b1d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -98,6 +98,9 @@ struct amdgpu_ring_funcs {
98 void (*set_wptr)(struct amdgpu_ring *ring); 98 void (*set_wptr)(struct amdgpu_ring *ring);
99 /* validating and patching of IBs */ 99 /* validating and patching of IBs */
100 int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx); 100 int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
101 /* constants to calculate how many DW are needed for an emit */
102 unsigned emit_frame_size;
103 unsigned emit_ib_size;
101 /* command emit functions */ 104 /* command emit functions */
102 void (*emit_ib)(struct amdgpu_ring *ring, 105 void (*emit_ib)(struct amdgpu_ring *ring,
103 struct amdgpu_ib *ib, 106 struct amdgpu_ib *ib,
@@ -127,8 +130,6 @@ struct amdgpu_ring_funcs {
127 void (*end_use)(struct amdgpu_ring *ring); 130 void (*end_use)(struct amdgpu_ring *ring);
128 void (*emit_switch_buffer) (struct amdgpu_ring *ring); 131 void (*emit_switch_buffer) (struct amdgpu_ring *ring);
129 void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags); 132 void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
130 unsigned (*get_emit_ib_size) (struct amdgpu_ring *ring);
131 unsigned (*get_dma_frame_size) (struct amdgpu_ring *ring);
132}; 133};
133 134
134struct amdgpu_ring { 135struct amdgpu_ring {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 9a534e2757cd..2fb469aa850a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -824,18 +824,6 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
824 amdgpu_ring_write(ring, VCE_CMD_END); 824 amdgpu_ring_write(ring, VCE_CMD_END);
825} 825}
826 826
827unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring)
828{
829 return
830 4; /* amdgpu_vce_ring_emit_ib */
831}
832
833unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring)
834{
835 return
836 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
837}
838
839/** 827/**
840 * amdgpu_vce_ring_test_ring - test if VCE ring is working 828 * amdgpu_vce_ring_test_ring - test if VCE ring is working
841 * 829 *
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 34e41590c5c8..49b34decce58 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -848,22 +848,6 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
848 amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ 848 amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
849} 849}
850 850
851static unsigned cik_sdma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
852{
853 return
854 7 + 4; /* cik_sdma_ring_emit_ib */
855}
856
857static unsigned cik_sdma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
858{
859 return
860 6 + /* cik_sdma_ring_emit_hdp_flush */
861 3 + /* cik_sdma_ring_emit_hdp_invalidate */
862 6 + /* cik_sdma_ring_emit_pipeline_sync */
863 12 + /* cik_sdma_ring_emit_vm_flush */
864 9 + 9 + 9; /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
865}
866
867static void cik_enable_sdma_mgcg(struct amdgpu_device *adev, 851static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
868 bool enable) 852 bool enable)
869{ 853{
@@ -1228,6 +1212,13 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
1228 .get_rptr = cik_sdma_ring_get_rptr, 1212 .get_rptr = cik_sdma_ring_get_rptr,
1229 .get_wptr = cik_sdma_ring_get_wptr, 1213 .get_wptr = cik_sdma_ring_get_wptr,
1230 .set_wptr = cik_sdma_ring_set_wptr, 1214 .set_wptr = cik_sdma_ring_set_wptr,
1215 .emit_frame_size =
1216 6 + /* cik_sdma_ring_emit_hdp_flush */
1217 3 + /* cik_sdma_ring_emit_hdp_invalidate */
1218 6 + /* cik_sdma_ring_emit_pipeline_sync */
1219 12 + /* cik_sdma_ring_emit_vm_flush */
1220 9 + 9 + 9, /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
1221 .emit_ib_size = 7 + 4, /* cik_sdma_ring_emit_ib */
1231 .emit_ib = cik_sdma_ring_emit_ib, 1222 .emit_ib = cik_sdma_ring_emit_ib,
1232 .emit_fence = cik_sdma_ring_emit_fence, 1223 .emit_fence = cik_sdma_ring_emit_fence,
1233 .emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync, 1224 .emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync,
@@ -1238,8 +1229,6 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
1238 .test_ib = cik_sdma_ring_test_ib, 1229 .test_ib = cik_sdma_ring_test_ib,
1239 .insert_nop = cik_sdma_ring_insert_nop, 1230 .insert_nop = cik_sdma_ring_insert_nop,
1240 .pad_ib = cik_sdma_ring_pad_ib, 1231 .pad_ib = cik_sdma_ring_pad_ib,
1241 .get_emit_ib_size = cik_sdma_ring_get_emit_ib_size,
1242 .get_dma_frame_size = cik_sdma_ring_get_dma_frame_size,
1243}; 1232};
1244 1233
1245static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev) 1234static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 4e35e16d1311..a86b17944bcf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -2814,33 +2814,6 @@ static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
2814 amdgpu_ring_write(ring, 0); 2814 amdgpu_ring_write(ring, 0);
2815} 2815}
2816 2816
2817static unsigned gfx_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
2818{
2819 return
2820 6; /* gfx_v6_0_ring_emit_ib */
2821}
2822
2823static unsigned gfx_v6_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
2824{
2825 return
2826 5 + /* gfx_v6_0_ring_emit_hdp_flush */
2827 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
2828 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
2829 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
2830 17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
2831 3; /* gfx_v6_ring_emit_cntxcntl */
2832}
2833
2834static unsigned gfx_v6_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
2835{
2836 return
2837 5 + /* gfx_v6_0_ring_emit_hdp_flush */
2838 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
2839 7 + /* gfx_v6_0_ring_emit_pipeline_sync */
2840 17 + /* gfx_v6_0_ring_emit_vm_flush */
2841 14 + 14 + 14; /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
2842}
2843
2844static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = { 2817static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
2845 .get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter, 2818 .get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter,
2846 .select_se_sh = &gfx_v6_0_select_se_sh, 2819 .select_se_sh = &gfx_v6_0_select_se_sh,
@@ -3258,6 +3231,14 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
3258 .get_rptr = gfx_v6_0_ring_get_rptr, 3231 .get_rptr = gfx_v6_0_ring_get_rptr,
3259 .get_wptr = gfx_v6_0_ring_get_wptr, 3232 .get_wptr = gfx_v6_0_ring_get_wptr,
3260 .set_wptr = gfx_v6_0_ring_set_wptr_gfx, 3233 .set_wptr = gfx_v6_0_ring_set_wptr_gfx,
3234 .emit_frame_size =
3235 5 + /* gfx_v6_0_ring_emit_hdp_flush */
3236 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
3237 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
3238 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
3239 17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
3240 3, /* gfx_v6_ring_emit_cntxcntl */
3241 .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
3261 .emit_ib = gfx_v6_0_ring_emit_ib, 3242 .emit_ib = gfx_v6_0_ring_emit_ib,
3262 .emit_fence = gfx_v6_0_ring_emit_fence, 3243 .emit_fence = gfx_v6_0_ring_emit_fence,
3263 .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync, 3244 .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
@@ -3268,14 +3249,19 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
3268 .test_ib = gfx_v6_0_ring_test_ib, 3249 .test_ib = gfx_v6_0_ring_test_ib,
3269 .insert_nop = amdgpu_ring_insert_nop, 3250 .insert_nop = amdgpu_ring_insert_nop,
3270 .emit_cntxcntl = gfx_v6_ring_emit_cntxcntl, 3251 .emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
3271 .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
3272 .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_gfx,
3273}; 3252};
3274 3253
3275static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = { 3254static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
3276 .get_rptr = gfx_v6_0_ring_get_rptr, 3255 .get_rptr = gfx_v6_0_ring_get_rptr,
3277 .get_wptr = gfx_v6_0_ring_get_wptr, 3256 .get_wptr = gfx_v6_0_ring_get_wptr,
3278 .set_wptr = gfx_v6_0_ring_set_wptr_compute, 3257 .set_wptr = gfx_v6_0_ring_set_wptr_compute,
3258 .emit_frame_size =
3259 5 + /* gfx_v6_0_ring_emit_hdp_flush */
3260 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
3261 7 + /* gfx_v6_0_ring_emit_pipeline_sync */
3262 17 + /* gfx_v6_0_ring_emit_vm_flush */
3263 14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
3264 .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
3279 .emit_ib = gfx_v6_0_ring_emit_ib, 3265 .emit_ib = gfx_v6_0_ring_emit_ib,
3280 .emit_fence = gfx_v6_0_ring_emit_fence, 3266 .emit_fence = gfx_v6_0_ring_emit_fence,
3281 .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync, 3267 .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
@@ -3285,8 +3271,6 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
3285 .test_ring = gfx_v6_0_ring_test_ring, 3271 .test_ring = gfx_v6_0_ring_test_ring,
3286 .test_ib = gfx_v6_0_ring_test_ib, 3272 .test_ib = gfx_v6_0_ring_test_ib,
3287 .insert_nop = amdgpu_ring_insert_nop, 3273 .insert_nop = amdgpu_ring_insert_nop,
3288 .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
3289 .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_compute,
3290}; 3274};
3291 3275
3292static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev) 3276static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 9a54ea982b87..f2415f58c160 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4357,41 +4357,6 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4357 amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); 4357 amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
4358} 4358}
4359 4359
4360static unsigned gfx_v7_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
4361{
4362 return
4363 4; /* gfx_v7_0_ring_emit_ib_gfx */
4364}
4365
4366static unsigned gfx_v7_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
4367{
4368 return
4369 20 + /* gfx_v7_0_ring_emit_gds_switch */
4370 7 + /* gfx_v7_0_ring_emit_hdp_flush */
4371 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
4372 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
4373 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
4374 17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
4375 3; /* gfx_v7_ring_emit_cntxcntl */
4376}
4377
4378static unsigned gfx_v7_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
4379{
4380 return
4381 4; /* gfx_v7_0_ring_emit_ib_compute */
4382}
4383
4384static unsigned gfx_v7_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
4385{
4386 return
4387 20 + /* gfx_v7_0_ring_emit_gds_switch */
4388 7 + /* gfx_v7_0_ring_emit_hdp_flush */
4389 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
4390 7 + /* gfx_v7_0_ring_emit_pipeline_sync */
4391 17 + /* gfx_v7_0_ring_emit_vm_flush */
4392 7 + 7 + 7; /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
4393}
4394
4395static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = { 4360static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
4396 .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, 4361 .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
4397 .select_se_sh = &gfx_v7_0_select_se_sh, 4362 .select_se_sh = &gfx_v7_0_select_se_sh,
@@ -5147,6 +5112,15 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
5147 .get_rptr = gfx_v7_0_ring_get_rptr, 5112 .get_rptr = gfx_v7_0_ring_get_rptr,
5148 .get_wptr = gfx_v7_0_ring_get_wptr_gfx, 5113 .get_wptr = gfx_v7_0_ring_get_wptr_gfx,
5149 .set_wptr = gfx_v7_0_ring_set_wptr_gfx, 5114 .set_wptr = gfx_v7_0_ring_set_wptr_gfx,
5115 .emit_frame_size =
5116 20 + /* gfx_v7_0_ring_emit_gds_switch */
5117 7 + /* gfx_v7_0_ring_emit_hdp_flush */
5118 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
5119 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
5120 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
5121 17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
5122 3, /* gfx_v7_ring_emit_cntxcntl */
5123 .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
5150 .emit_ib = gfx_v7_0_ring_emit_ib_gfx, 5124 .emit_ib = gfx_v7_0_ring_emit_ib_gfx,
5151 .emit_fence = gfx_v7_0_ring_emit_fence_gfx, 5125 .emit_fence = gfx_v7_0_ring_emit_fence_gfx,
5152 .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync, 5126 .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
@@ -5159,14 +5133,20 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
5159 .insert_nop = amdgpu_ring_insert_nop, 5133 .insert_nop = amdgpu_ring_insert_nop,
5160 .pad_ib = amdgpu_ring_generic_pad_ib, 5134 .pad_ib = amdgpu_ring_generic_pad_ib,
5161 .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl, 5135 .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
5162 .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_gfx,
5163 .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_gfx,
5164}; 5136};
5165 5137
5166static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { 5138static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
5167 .get_rptr = gfx_v7_0_ring_get_rptr, 5139 .get_rptr = gfx_v7_0_ring_get_rptr,
5168 .get_wptr = gfx_v7_0_ring_get_wptr_compute, 5140 .get_wptr = gfx_v7_0_ring_get_wptr_compute,
5169 .set_wptr = gfx_v7_0_ring_set_wptr_compute, 5141 .set_wptr = gfx_v7_0_ring_set_wptr_compute,
5142 .emit_frame_size =
5143 20 + /* gfx_v7_0_ring_emit_gds_switch */
5144 7 + /* gfx_v7_0_ring_emit_hdp_flush */
5145 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
5146 7 + /* gfx_v7_0_ring_emit_pipeline_sync */
5147 17 + /* gfx_v7_0_ring_emit_vm_flush */
5148 7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
5149 .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_compute */
5170 .emit_ib = gfx_v7_0_ring_emit_ib_compute, 5150 .emit_ib = gfx_v7_0_ring_emit_ib_compute,
5171 .emit_fence = gfx_v7_0_ring_emit_fence_compute, 5151 .emit_fence = gfx_v7_0_ring_emit_fence_compute,
5172 .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync, 5152 .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
@@ -5178,8 +5158,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
5178 .test_ib = gfx_v7_0_ring_test_ib, 5158 .test_ib = gfx_v7_0_ring_test_ib,
5179 .insert_nop = amdgpu_ring_insert_nop, 5159 .insert_nop = amdgpu_ring_insert_nop,
5180 .pad_ib = amdgpu_ring_generic_pad_ib, 5160 .pad_ib = amdgpu_ring_generic_pad_ib,
5181 .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_compute,
5182 .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_compute,
5183}; 5161};
5184 5162
5185static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev) 5163static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 404c49c45b03..e3330d06af9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -6363,42 +6363,6 @@ static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
6363 amdgpu_ring_write(ring, 0); 6363 amdgpu_ring_write(ring, 0);
6364} 6364}
6365 6365
6366static unsigned gfx_v8_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
6367{
6368 return
6369 4; /* gfx_v8_0_ring_emit_ib_gfx */
6370}
6371
6372static unsigned gfx_v8_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
6373{
6374 return
6375 20 + /* gfx_v8_0_ring_emit_gds_switch */
6376 7 + /* gfx_v8_0_ring_emit_hdp_flush */
6377 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
6378 6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
6379 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
6380 128 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
6381 2 + /* gfx_v8_ring_emit_sb */
6382 3; /* gfx_v8_ring_emit_cntxcntl */
6383}
6384
6385static unsigned gfx_v8_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
6386{
6387 return
6388 4; /* gfx_v8_0_ring_emit_ib_compute */
6389}
6390
6391static unsigned gfx_v8_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
6392{
6393 return
6394 20 + /* gfx_v8_0_ring_emit_gds_switch */
6395 7 + /* gfx_v8_0_ring_emit_hdp_flush */
6396 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
6397 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
6398 17 + /* gfx_v8_0_ring_emit_vm_flush */
6399 7 + 7 + 7; /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
6400}
6401
6402static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 6366static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
6403 enum amdgpu_interrupt_state state) 6367 enum amdgpu_interrupt_state state)
6404{ 6368{
@@ -6568,6 +6532,16 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
6568 .get_rptr = gfx_v8_0_ring_get_rptr, 6532 .get_rptr = gfx_v8_0_ring_get_rptr,
6569 .get_wptr = gfx_v8_0_ring_get_wptr_gfx, 6533 .get_wptr = gfx_v8_0_ring_get_wptr_gfx,
6570 .set_wptr = gfx_v8_0_ring_set_wptr_gfx, 6534 .set_wptr = gfx_v8_0_ring_set_wptr_gfx,
6535 .emit_frame_size =
6536 20 + /* gfx_v8_0_ring_emit_gds_switch */
6537 7 + /* gfx_v8_0_ring_emit_hdp_flush */
6538 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
6539 6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
6540 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
6541 128 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
6542 2 + /* gfx_v8_ring_emit_sb */
6543 3, /* gfx_v8_ring_emit_cntxcntl */
6544 .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
6571 .emit_ib = gfx_v8_0_ring_emit_ib_gfx, 6545 .emit_ib = gfx_v8_0_ring_emit_ib_gfx,
6572 .emit_fence = gfx_v8_0_ring_emit_fence_gfx, 6546 .emit_fence = gfx_v8_0_ring_emit_fence_gfx,
6573 .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync, 6547 .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
@@ -6581,14 +6555,20 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
6581 .pad_ib = amdgpu_ring_generic_pad_ib, 6555 .pad_ib = amdgpu_ring_generic_pad_ib,
6582 .emit_switch_buffer = gfx_v8_ring_emit_sb, 6556 .emit_switch_buffer = gfx_v8_ring_emit_sb,
6583 .emit_cntxcntl = gfx_v8_ring_emit_cntxcntl, 6557 .emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
6584 .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_gfx,
6585 .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_gfx,
6586}; 6558};
6587 6559
6588static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { 6560static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
6589 .get_rptr = gfx_v8_0_ring_get_rptr, 6561 .get_rptr = gfx_v8_0_ring_get_rptr,
6590 .get_wptr = gfx_v8_0_ring_get_wptr_compute, 6562 .get_wptr = gfx_v8_0_ring_get_wptr_compute,
6591 .set_wptr = gfx_v8_0_ring_set_wptr_compute, 6563 .set_wptr = gfx_v8_0_ring_set_wptr_compute,
6564 .emit_frame_size =
6565 20 + /* gfx_v8_0_ring_emit_gds_switch */
6566 7 + /* gfx_v8_0_ring_emit_hdp_flush */
6567 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
6568 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
6569 17 + /* gfx_v8_0_ring_emit_vm_flush */
6570 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
6571 .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
6592 .emit_ib = gfx_v8_0_ring_emit_ib_compute, 6572 .emit_ib = gfx_v8_0_ring_emit_ib_compute,
6593 .emit_fence = gfx_v8_0_ring_emit_fence_compute, 6573 .emit_fence = gfx_v8_0_ring_emit_fence_compute,
6594 .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync, 6574 .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
@@ -6600,8 +6580,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
6600 .test_ib = gfx_v8_0_ring_test_ib, 6580 .test_ib = gfx_v8_0_ring_test_ib,
6601 .insert_nop = amdgpu_ring_insert_nop, 6581 .insert_nop = amdgpu_ring_insert_nop,
6602 .pad_ib = amdgpu_ring_generic_pad_ib, 6582 .pad_ib = amdgpu_ring_generic_pad_ib,
6603 .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_compute,
6604 .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_compute,
6605}; 6583};
6606 6584
6607static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev) 6585static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 16cc1f5d0d4e..7cd24e42aa9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -902,22 +902,6 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
902 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ 902 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
903} 903}
904 904
905static unsigned sdma_v2_4_ring_get_emit_ib_size(struct amdgpu_ring *ring)
906{
907 return
908 7 + 6; /* sdma_v2_4_ring_emit_ib */
909}
910
911static unsigned sdma_v2_4_ring_get_dma_frame_size(struct amdgpu_ring *ring)
912{
913 return
914 6 + /* sdma_v2_4_ring_emit_hdp_flush */
915 3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
916 6 + /* sdma_v2_4_ring_emit_pipeline_sync */
917 12 + /* sdma_v2_4_ring_emit_vm_flush */
918 10 + 10 + 10; /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
919}
920
921static int sdma_v2_4_early_init(void *handle) 905static int sdma_v2_4_early_init(void *handle)
922{ 906{
923 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 907 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1225,6 +1209,13 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
1225 .get_rptr = sdma_v2_4_ring_get_rptr, 1209 .get_rptr = sdma_v2_4_ring_get_rptr,
1226 .get_wptr = sdma_v2_4_ring_get_wptr, 1210 .get_wptr = sdma_v2_4_ring_get_wptr,
1227 .set_wptr = sdma_v2_4_ring_set_wptr, 1211 .set_wptr = sdma_v2_4_ring_set_wptr,
1212 .emit_frame_size =
1213 6 + /* sdma_v2_4_ring_emit_hdp_flush */
1214 3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
1215 6 + /* sdma_v2_4_ring_emit_pipeline_sync */
1216 12 + /* sdma_v2_4_ring_emit_vm_flush */
1217 10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
1218 .emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */
1228 .emit_ib = sdma_v2_4_ring_emit_ib, 1219 .emit_ib = sdma_v2_4_ring_emit_ib,
1229 .emit_fence = sdma_v2_4_ring_emit_fence, 1220 .emit_fence = sdma_v2_4_ring_emit_fence,
1230 .emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync, 1221 .emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
@@ -1235,8 +1226,6 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
1235 .test_ib = sdma_v2_4_ring_test_ib, 1226 .test_ib = sdma_v2_4_ring_test_ib,
1236 .insert_nop = sdma_v2_4_ring_insert_nop, 1227 .insert_nop = sdma_v2_4_ring_insert_nop,
1237 .pad_ib = sdma_v2_4_ring_pad_ib, 1228 .pad_ib = sdma_v2_4_ring_pad_ib,
1238 .get_emit_ib_size = sdma_v2_4_ring_get_emit_ib_size,
1239 .get_dma_frame_size = sdma_v2_4_ring_get_dma_frame_size,
1240}; 1229};
1241 1230
1242static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev) 1231static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 2c2f24fff64b..6518993e23a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1104,22 +1104,6 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1104 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ 1104 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
1105} 1105}
1106 1106
1107static unsigned sdma_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
1108{
1109 return
1110 7 + 6; /* sdma_v3_0_ring_emit_ib */
1111}
1112
1113static unsigned sdma_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
1114{
1115 return
1116 6 + /* sdma_v3_0_ring_emit_hdp_flush */
1117 3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
1118 6 + /* sdma_v3_0_ring_emit_pipeline_sync */
1119 12 + /* sdma_v3_0_ring_emit_vm_flush */
1120 10 + 10 + 10; /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
1121}
1122
1123static int sdma_v3_0_early_init(void *handle) 1107static int sdma_v3_0_early_init(void *handle)
1124{ 1108{
1125 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1109 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1568,6 +1552,13 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
1568 .get_rptr = sdma_v3_0_ring_get_rptr, 1552 .get_rptr = sdma_v3_0_ring_get_rptr,
1569 .get_wptr = sdma_v3_0_ring_get_wptr, 1553 .get_wptr = sdma_v3_0_ring_get_wptr,
1570 .set_wptr = sdma_v3_0_ring_set_wptr, 1554 .set_wptr = sdma_v3_0_ring_set_wptr,
1555 .emit_frame_size =
1556 6 + /* sdma_v3_0_ring_emit_hdp_flush */
1557 3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
1558 6 + /* sdma_v3_0_ring_emit_pipeline_sync */
1559 12 + /* sdma_v3_0_ring_emit_vm_flush */
1560 10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
1561 .emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */
1571 .emit_ib = sdma_v3_0_ring_emit_ib, 1562 .emit_ib = sdma_v3_0_ring_emit_ib,
1572 .emit_fence = sdma_v3_0_ring_emit_fence, 1563 .emit_fence = sdma_v3_0_ring_emit_fence,
1573 .emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync, 1564 .emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync,
@@ -1578,8 +1569,6 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
1578 .test_ib = sdma_v3_0_ring_test_ib, 1569 .test_ib = sdma_v3_0_ring_test_ib,
1579 .insert_nop = sdma_v3_0_ring_insert_nop, 1570 .insert_nop = sdma_v3_0_ring_insert_nop,
1580 .pad_ib = sdma_v3_0_ring_pad_ib, 1571 .pad_ib = sdma_v3_0_ring_pad_ib,
1581 .get_emit_ib_size = sdma_v3_0_ring_get_emit_ib_size,
1582 .get_dma_frame_size = sdma_v3_0_ring_get_dma_frame_size,
1583}; 1572};
1584 1573
1585static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev) 1574static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index 9f11e3792077..c1c1b5179de5 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -495,22 +495,6 @@ static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
495 amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */ 495 amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
496} 496}
497 497
498static unsigned si_dma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
499{
500 return
501 7 + 3; /* si_dma_ring_emit_ib */
502}
503
504static unsigned si_dma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
505{
506 return
507 3 + /* si_dma_ring_emit_hdp_flush */
508 3 + /* si_dma_ring_emit_hdp_invalidate */
509 6 + /* si_dma_ring_emit_pipeline_sync */
510 12 + /* si_dma_ring_emit_vm_flush */
511 9 + 9 + 9; /* si_dma_ring_emit_fence x3 for user fence, vm fence */
512}
513
514static int si_dma_early_init(void *handle) 498static int si_dma_early_init(void *handle)
515{ 499{
516 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 500 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -783,6 +767,13 @@ static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
783 .get_rptr = si_dma_ring_get_rptr, 767 .get_rptr = si_dma_ring_get_rptr,
784 .get_wptr = si_dma_ring_get_wptr, 768 .get_wptr = si_dma_ring_get_wptr,
785 .set_wptr = si_dma_ring_set_wptr, 769 .set_wptr = si_dma_ring_set_wptr,
770 .emit_frame_size =
771 3 + /* si_dma_ring_emit_hdp_flush */
772 3 + /* si_dma_ring_emit_hdp_invalidate */
773 6 + /* si_dma_ring_emit_pipeline_sync */
774 12 + /* si_dma_ring_emit_vm_flush */
775 9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */
776 .emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */
786 .emit_ib = si_dma_ring_emit_ib, 777 .emit_ib = si_dma_ring_emit_ib,
787 .emit_fence = si_dma_ring_emit_fence, 778 .emit_fence = si_dma_ring_emit_fence,
788 .emit_pipeline_sync = si_dma_ring_emit_pipeline_sync, 779 .emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
@@ -793,8 +784,6 @@ static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
793 .test_ib = si_dma_ring_test_ib, 784 .test_ib = si_dma_ring_test_ib,
794 .insert_nop = amdgpu_ring_insert_nop, 785 .insert_nop = amdgpu_ring_insert_nop,
795 .pad_ib = si_dma_ring_pad_ib, 786 .pad_ib = si_dma_ring_pad_ib,
796 .get_emit_ib_size = si_dma_ring_get_emit_ib_size,
797 .get_dma_frame_size = si_dma_ring_get_dma_frame_size,
798}; 787};
799 788
800static void si_dma_set_ring_funcs(struct amdgpu_device *adev) 789static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index f6c941550b8f..708de997e3b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -526,20 +526,6 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
526 amdgpu_ring_write(ring, ib->length_dw); 526 amdgpu_ring_write(ring, ib->length_dw);
527} 527}
528 528
529static unsigned uvd_v4_2_ring_get_emit_ib_size(struct amdgpu_ring *ring)
530{
531 return
532 4; /* uvd_v4_2_ring_emit_ib */
533}
534
535static unsigned uvd_v4_2_ring_get_dma_frame_size(struct amdgpu_ring *ring)
536{
537 return
538 2 + /* uvd_v4_2_ring_emit_hdp_flush */
539 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
540 14; /* uvd_v4_2_ring_emit_fence x1 no user fence */
541}
542
543/** 529/**
544 * uvd_v4_2_mc_resume - memory controller programming 530 * uvd_v4_2_mc_resume - memory controller programming
545 * 531 *
@@ -760,6 +746,11 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
760 .get_wptr = uvd_v4_2_ring_get_wptr, 746 .get_wptr = uvd_v4_2_ring_get_wptr,
761 .set_wptr = uvd_v4_2_ring_set_wptr, 747 .set_wptr = uvd_v4_2_ring_set_wptr,
762 .parse_cs = amdgpu_uvd_ring_parse_cs, 748 .parse_cs = amdgpu_uvd_ring_parse_cs,
749 .emit_frame_size =
750 2 + /* uvd_v4_2_ring_emit_hdp_flush */
751 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
752 14, /* uvd_v4_2_ring_emit_fence x1 no user fence */
753 .emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */
763 .emit_ib = uvd_v4_2_ring_emit_ib, 754 .emit_ib = uvd_v4_2_ring_emit_ib,
764 .emit_fence = uvd_v4_2_ring_emit_fence, 755 .emit_fence = uvd_v4_2_ring_emit_fence,
765 .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush, 756 .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush,
@@ -770,8 +761,6 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
770 .pad_ib = amdgpu_ring_generic_pad_ib, 761 .pad_ib = amdgpu_ring_generic_pad_ib,
771 .begin_use = amdgpu_uvd_ring_begin_use, 762 .begin_use = amdgpu_uvd_ring_begin_use,
772 .end_use = amdgpu_uvd_ring_end_use, 763 .end_use = amdgpu_uvd_ring_end_use,
773 .get_emit_ib_size = uvd_v4_2_ring_get_emit_ib_size,
774 .get_dma_frame_size = uvd_v4_2_ring_get_dma_frame_size,
775}; 764};
776 765
777static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) 766static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 400c16fe579e..9e695e01f8b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -577,20 +577,6 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
577 amdgpu_ring_write(ring, ib->length_dw); 577 amdgpu_ring_write(ring, ib->length_dw);
578} 578}
579 579
580static unsigned uvd_v5_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
581{
582 return
583 6; /* uvd_v5_0_ring_emit_ib */
584}
585
586static unsigned uvd_v5_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
587{
588 return
589 2 + /* uvd_v5_0_ring_emit_hdp_flush */
590 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
591 14; /* uvd_v5_0_ring_emit_fence x1 no user fence */
592}
593
594static bool uvd_v5_0_is_idle(void *handle) 580static bool uvd_v5_0_is_idle(void *handle)
595{ 581{
596 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 582 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -811,6 +797,11 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
811 .get_wptr = uvd_v5_0_ring_get_wptr, 797 .get_wptr = uvd_v5_0_ring_get_wptr,
812 .set_wptr = uvd_v5_0_ring_set_wptr, 798 .set_wptr = uvd_v5_0_ring_set_wptr,
813 .parse_cs = amdgpu_uvd_ring_parse_cs, 799 .parse_cs = amdgpu_uvd_ring_parse_cs,
800 .emit_frame_size =
801 2 + /* uvd_v5_0_ring_emit_hdp_flush */
802 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
803 14, /* uvd_v5_0_ring_emit_fence x1 no user fence */
804 .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
814 .emit_ib = uvd_v5_0_ring_emit_ib, 805 .emit_ib = uvd_v5_0_ring_emit_ib,
815 .emit_fence = uvd_v5_0_ring_emit_fence, 806 .emit_fence = uvd_v5_0_ring_emit_fence,
816 .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush, 807 .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
@@ -821,8 +812,6 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
821 .pad_ib = amdgpu_ring_generic_pad_ib, 812 .pad_ib = amdgpu_ring_generic_pad_ib,
822 .begin_use = amdgpu_uvd_ring_begin_use, 813 .begin_use = amdgpu_uvd_ring_begin_use,
823 .end_use = amdgpu_uvd_ring_end_use, 814 .end_use = amdgpu_uvd_ring_end_use,
824 .get_emit_ib_size = uvd_v5_0_ring_get_emit_ib_size,
825 .get_dma_frame_size = uvd_v5_0_ring_get_dma_frame_size,
826}; 815};
827 816
828static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) 817static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index f74229496cc7..aeb1b6e2c518 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -725,31 +725,6 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
725 amdgpu_ring_write(ring, 0xE); 725 amdgpu_ring_write(ring, 0xE);
726} 726}
727 727
728static unsigned uvd_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
729{
730 return
731 8; /* uvd_v6_0_ring_emit_ib */
732}
733
734static unsigned uvd_v6_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
735{
736 return
737 2 + /* uvd_v6_0_ring_emit_hdp_flush */
738 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
739 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
740 14; /* uvd_v6_0_ring_emit_fence x1 no user fence */
741}
742
743static unsigned uvd_v6_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
744{
745 return
746 2 + /* uvd_v6_0_ring_emit_hdp_flush */
747 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
748 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
749 20 + /* uvd_v6_0_ring_emit_vm_flush */
750 14 + 14; /* uvd_v6_0_ring_emit_fence x2 vm fence */
751}
752
753static bool uvd_v6_0_is_idle(void *handle) 728static bool uvd_v6_0_is_idle(void *handle)
754{ 729{
755 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 730 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1052,6 +1027,12 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1052 .get_wptr = uvd_v6_0_ring_get_wptr, 1027 .get_wptr = uvd_v6_0_ring_get_wptr,
1053 .set_wptr = uvd_v6_0_ring_set_wptr, 1028 .set_wptr = uvd_v6_0_ring_set_wptr,
1054 .parse_cs = amdgpu_uvd_ring_parse_cs, 1029 .parse_cs = amdgpu_uvd_ring_parse_cs,
1030 .emit_frame_size =
1031 2 + /* uvd_v6_0_ring_emit_hdp_flush */
1032 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
1033 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1034 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
1035 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1055 .emit_ib = uvd_v6_0_ring_emit_ib, 1036 .emit_ib = uvd_v6_0_ring_emit_ib,
1056 .emit_fence = uvd_v6_0_ring_emit_fence, 1037 .emit_fence = uvd_v6_0_ring_emit_fence,
1057 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, 1038 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
@@ -1062,14 +1043,19 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1062 .pad_ib = amdgpu_ring_generic_pad_ib, 1043 .pad_ib = amdgpu_ring_generic_pad_ib,
1063 .begin_use = amdgpu_uvd_ring_begin_use, 1044 .begin_use = amdgpu_uvd_ring_begin_use,
1064 .end_use = amdgpu_uvd_ring_end_use, 1045 .end_use = amdgpu_uvd_ring_end_use,
1065 .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
1066 .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size,
1067}; 1046};
1068 1047
1069static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { 1048static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1070 .get_rptr = uvd_v6_0_ring_get_rptr, 1049 .get_rptr = uvd_v6_0_ring_get_rptr,
1071 .get_wptr = uvd_v6_0_ring_get_wptr, 1050 .get_wptr = uvd_v6_0_ring_get_wptr,
1072 .set_wptr = uvd_v6_0_ring_set_wptr, 1051 .set_wptr = uvd_v6_0_ring_set_wptr,
1052 .emit_frame_size =
1053 2 + /* uvd_v6_0_ring_emit_hdp_flush */
1054 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
1055 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1056 20 + /* uvd_v6_0_ring_emit_vm_flush */
1057 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
1058 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1073 .emit_ib = uvd_v6_0_ring_emit_ib, 1059 .emit_ib = uvd_v6_0_ring_emit_ib,
1074 .emit_fence = uvd_v6_0_ring_emit_fence, 1060 .emit_fence = uvd_v6_0_ring_emit_fence,
1075 .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush, 1061 .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
@@ -1082,8 +1068,6 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1082 .pad_ib = amdgpu_ring_generic_pad_ib, 1068 .pad_ib = amdgpu_ring_generic_pad_ib,
1083 .begin_use = amdgpu_uvd_ring_begin_use, 1069 .begin_use = amdgpu_uvd_ring_begin_use,
1084 .end_use = amdgpu_uvd_ring_end_use, 1070 .end_use = amdgpu_uvd_ring_end_use,
1085 .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
1086 .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size_vm,
1087}; 1071};
1088 1072
1089static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) 1073static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 76e64ad04a53..7ada30ddfa0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -614,6 +614,8 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
614 .get_wptr = vce_v2_0_ring_get_wptr, 614 .get_wptr = vce_v2_0_ring_get_wptr,
615 .set_wptr = vce_v2_0_ring_set_wptr, 615 .set_wptr = vce_v2_0_ring_set_wptr,
616 .parse_cs = amdgpu_vce_ring_parse_cs, 616 .parse_cs = amdgpu_vce_ring_parse_cs,
617 .emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
618 .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
617 .emit_ib = amdgpu_vce_ring_emit_ib, 619 .emit_ib = amdgpu_vce_ring_emit_ib,
618 .emit_fence = amdgpu_vce_ring_emit_fence, 620 .emit_fence = amdgpu_vce_ring_emit_fence,
619 .test_ring = amdgpu_vce_ring_test_ring, 621 .test_ring = amdgpu_vce_ring_test_ring,
@@ -622,8 +624,6 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
622 .pad_ib = amdgpu_ring_generic_pad_ib, 624 .pad_ib = amdgpu_ring_generic_pad_ib,
623 .begin_use = amdgpu_vce_ring_begin_use, 625 .begin_use = amdgpu_vce_ring_begin_use,
624 .end_use = amdgpu_vce_ring_end_use, 626 .end_use = amdgpu_vce_ring_end_use,
625 .get_emit_ib_size = amdgpu_vce_ring_get_emit_ib_size,
626 .get_dma_frame_size = amdgpu_vce_ring_get_dma_frame_size,
627}; 627};
628 628
629static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev) 629static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index e76bc092becc..0db59d885f04 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -808,27 +808,6 @@ static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
808 amdgpu_ring_write(ring, seq); 808 amdgpu_ring_write(ring, seq);
809} 809}
810 810
811static unsigned vce_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
812{
813 return
814 5; /* vce_v3_0_ring_emit_ib */
815}
816
817static unsigned vce_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
818{
819 return
820 4 + /* vce_v3_0_emit_pipeline_sync */
821 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
822}
823
824static unsigned vce_v3_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
825{
826 return
827 6 + /* vce_v3_0_emit_vm_flush */
828 4 + /* vce_v3_0_emit_pipeline_sync */
829 6 + 6; /* amdgpu_vce_ring_emit_fence x2 vm fence */
830}
831
832const struct amd_ip_funcs vce_v3_0_ip_funcs = { 811const struct amd_ip_funcs vce_v3_0_ip_funcs = {
833 .name = "vce_v3_0", 812 .name = "vce_v3_0",
834 .early_init = vce_v3_0_early_init, 813 .early_init = vce_v3_0_early_init,
@@ -854,6 +833,10 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
854 .get_wptr = vce_v3_0_ring_get_wptr, 833 .get_wptr = vce_v3_0_ring_get_wptr,
855 .set_wptr = vce_v3_0_ring_set_wptr, 834 .set_wptr = vce_v3_0_ring_set_wptr,
856 .parse_cs = amdgpu_vce_ring_parse_cs, 835 .parse_cs = amdgpu_vce_ring_parse_cs,
836 .emit_frame_size =
837 4 + /* vce_v3_0_emit_pipeline_sync */
838 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
839 .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
857 .emit_ib = amdgpu_vce_ring_emit_ib, 840 .emit_ib = amdgpu_vce_ring_emit_ib,
858 .emit_fence = amdgpu_vce_ring_emit_fence, 841 .emit_fence = amdgpu_vce_ring_emit_fence,
859 .test_ring = amdgpu_vce_ring_test_ring, 842 .test_ring = amdgpu_vce_ring_test_ring,
@@ -862,14 +845,17 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
862 .pad_ib = amdgpu_ring_generic_pad_ib, 845 .pad_ib = amdgpu_ring_generic_pad_ib,
863 .begin_use = amdgpu_vce_ring_begin_use, 846 .begin_use = amdgpu_vce_ring_begin_use,
864 .end_use = amdgpu_vce_ring_end_use, 847 .end_use = amdgpu_vce_ring_end_use,
865 .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
866 .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size,
867}; 848};
868 849
869static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = { 850static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
870 .get_rptr = vce_v3_0_ring_get_rptr, 851 .get_rptr = vce_v3_0_ring_get_rptr,
871 .get_wptr = vce_v3_0_ring_get_wptr, 852 .get_wptr = vce_v3_0_ring_get_wptr,
872 .set_wptr = vce_v3_0_ring_set_wptr, 853 .set_wptr = vce_v3_0_ring_set_wptr,
854 .emit_frame_size =
855 6 + /* vce_v3_0_emit_vm_flush */
856 4 + /* vce_v3_0_emit_pipeline_sync */
857 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
858 .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
873 .emit_ib = vce_v3_0_ring_emit_ib, 859 .emit_ib = vce_v3_0_ring_emit_ib,
874 .emit_vm_flush = vce_v3_0_emit_vm_flush, 860 .emit_vm_flush = vce_v3_0_emit_vm_flush,
875 .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync, 861 .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
@@ -880,8 +866,6 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
880 .pad_ib = amdgpu_ring_generic_pad_ib, 866 .pad_ib = amdgpu_ring_generic_pad_ib,
881 .begin_use = amdgpu_vce_ring_begin_use, 867 .begin_use = amdgpu_vce_ring_begin_use,
882 .end_use = amdgpu_vce_ring_end_use, 868 .end_use = amdgpu_vce_ring_end_use,
883 .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
884 .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size_vm,
885}; 869};
886 870
887static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) 871static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)