aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c219
1 files changed, 126 insertions, 93 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 261bb051b14d..e61f6a3ca241 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -133,7 +133,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
133 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000), 133 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
134 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107), 134 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
135 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800), 135 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
136 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080) 136 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
137 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
138 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
139 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
137}; 140};
138 141
139static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] = 142static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
@@ -173,7 +176,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_1[] =
173 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120), 176 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
174 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), 177 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
175 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff), 178 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
176 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080) 179 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
180 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
181 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
182 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
177}; 183};
178 184
179static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] = 185static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
@@ -247,7 +253,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
247 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107), 253 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
248 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000), 254 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
249 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410), 255 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
250 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000) 256 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
257 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
258 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
259 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
251}; 260};
252 261
253static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] = 262static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
@@ -908,6 +917,50 @@ static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
908 buffer[count++] = cpu_to_le32(0); 917 buffer[count++] = cpu_to_le32(0);
909} 918}
910 919
920static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
921{
922 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
923 uint32_t pg_always_on_cu_num = 2;
924 uint32_t always_on_cu_num;
925 uint32_t i, j, k;
926 uint32_t mask, cu_bitmap, counter;
927
928 if (adev->flags & AMD_IS_APU)
929 always_on_cu_num = 4;
930 else if (adev->asic_type == CHIP_VEGA12)
931 always_on_cu_num = 8;
932 else
933 always_on_cu_num = 12;
934
935 mutex_lock(&adev->grbm_idx_mutex);
936 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
937 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
938 mask = 1;
939 cu_bitmap = 0;
940 counter = 0;
941 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
942
943 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
944 if (cu_info->bitmap[i][j] & mask) {
945 if (counter == pg_always_on_cu_num)
946 WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
947 if (counter < always_on_cu_num)
948 cu_bitmap |= mask;
949 else
950 break;
951 counter++;
952 }
953 mask <<= 1;
954 }
955
956 WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
957 cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
958 }
959 }
960 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
961 mutex_unlock(&adev->grbm_idx_mutex);
962}
963
911static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev) 964static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
912{ 965{
913 uint32_t data; 966 uint32_t data;
@@ -941,8 +994,10 @@ static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
941 data |= 0x00C00000; 994 data |= 0x00C00000;
942 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data); 995 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
943 996
944 /* set RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF */ 997 /*
945 WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, 0xFFF); 998 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
999 * programmed in gfx_v9_0_init_always_on_cu_mask()
1000 */
946 1001
947 /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved, 1002 /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
948 * but used for RLC_LB_CNTL configuration */ 1003 * but used for RLC_LB_CNTL configuration */
@@ -951,6 +1006,57 @@ static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
951 data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000); 1006 data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
952 WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data); 1007 WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
953 mutex_unlock(&adev->grbm_idx_mutex); 1008 mutex_unlock(&adev->grbm_idx_mutex);
1009
1010 gfx_v9_0_init_always_on_cu_mask(adev);
1011}
1012
1013static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
1014{
1015 uint32_t data;
1016
1017 /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1018 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1019 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
1020 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1021 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
1022
1023 /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1024 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1025
1026 /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1027 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
1028
1029 mutex_lock(&adev->grbm_idx_mutex);
1030 /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1031 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1032 WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1033
1034 /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1035 data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1036 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1037 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1038 WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1039
1040 /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1041 data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1042 data &= 0x0000FFFF;
1043 data |= 0x00C00000;
1044 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1045
1046 /*
1047 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
1048 * programmed in gfx_v9_0_init_always_on_cu_mask()
1049 */
1050
1051 /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1052 * but used for RLC_LB_CNTL configuration */
1053 data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1054 data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1055 data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1056 WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1057 mutex_unlock(&adev->grbm_idx_mutex);
1058
1059 gfx_v9_0_init_always_on_cu_mask(adev);
954} 1060}
955 1061
956static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable) 1062static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
@@ -1084,8 +1190,17 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
1084 rv_init_cp_jump_table(adev); 1190 rv_init_cp_jump_table(adev);
1085 amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); 1191 amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
1086 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); 1192 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
1193 }
1087 1194
1195 switch (adev->asic_type) {
1196 case CHIP_RAVEN:
1088 gfx_v9_0_init_lbpw(adev); 1197 gfx_v9_0_init_lbpw(adev);
1198 break;
1199 case CHIP_VEGA20:
1200 gfx_v9_4_init_lbpw(adev);
1201 break;
1202 default:
1203 break;
1089 } 1204 }
1090 1205
1091 return 0; 1206 return 0;
@@ -1605,11 +1720,6 @@ static int gfx_v9_0_sw_init(void *handle)
1605 adev->gfx.mec.num_pipe_per_mec = 4; 1720 adev->gfx.mec.num_pipe_per_mec = 4;
1606 adev->gfx.mec.num_queue_per_pipe = 8; 1721 adev->gfx.mec.num_queue_per_pipe = 8;
1607 1722
1608 /* KIQ event */
1609 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_IB2_INTERRUPT_PKT, &adev->gfx.kiq.irq);
1610 if (r)
1611 return r;
1612
1613 /* EOP Event */ 1723 /* EOP Event */
1614 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq); 1724 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
1615 if (r) 1725 if (r)
@@ -2403,7 +2513,8 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
2403 return r; 2513 return r;
2404 } 2514 }
2405 2515
2406 if (adev->asic_type == CHIP_RAVEN) { 2516 if (adev->asic_type == CHIP_RAVEN ||
2517 adev->asic_type == CHIP_VEGA20) {
2407 if (amdgpu_lbpw != 0) 2518 if (amdgpu_lbpw != 0)
2408 gfx_v9_0_enable_lbpw(adev, true); 2519 gfx_v9_0_enable_lbpw(adev, true);
2409 else 2520 else
@@ -3091,7 +3202,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
3091 struct v9_mqd *mqd = ring->mqd_ptr; 3202 struct v9_mqd *mqd = ring->mqd_ptr;
3092 int mqd_idx = ring - &adev->gfx.compute_ring[0]; 3203 int mqd_idx = ring - &adev->gfx.compute_ring[0];
3093 3204
3094 if (!adev->in_gpu_reset && !adev->gfx.in_suspend) { 3205 if (!adev->in_gpu_reset && !adev->in_suspend) {
3095 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); 3206 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3096 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; 3207 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3097 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; 3208 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@@ -3310,7 +3421,7 @@ static int gfx_v9_0_hw_fini(void *handle)
3310 /* Use deinitialize sequence from CAIL when unbinding device from driver, 3421 /* Use deinitialize sequence from CAIL when unbinding device from driver,
3311 * otherwise KIQ is hanging when binding back 3422 * otherwise KIQ is hanging when binding back
3312 */ 3423 */
3313 if (!adev->in_gpu_reset && !adev->gfx.in_suspend) { 3424 if (!adev->in_gpu_reset && !adev->in_suspend) {
3314 mutex_lock(&adev->srbm_mutex); 3425 mutex_lock(&adev->srbm_mutex);
3315 soc15_grbm_select(adev, adev->gfx.kiq.ring.me, 3426 soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
3316 adev->gfx.kiq.ring.pipe, 3427 adev->gfx.kiq.ring.pipe,
@@ -3330,20 +3441,12 @@ static int gfx_v9_0_hw_fini(void *handle)
3330 3441
3331static int gfx_v9_0_suspend(void *handle) 3442static int gfx_v9_0_suspend(void *handle)
3332{ 3443{
3333 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3444 return gfx_v9_0_hw_fini(handle);
3334
3335 adev->gfx.in_suspend = true;
3336 return gfx_v9_0_hw_fini(adev);
3337} 3445}
3338 3446
3339static int gfx_v9_0_resume(void *handle) 3447static int gfx_v9_0_resume(void *handle)
3340{ 3448{
3341 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3449 return gfx_v9_0_hw_init(handle);
3342 int r;
3343
3344 r = gfx_v9_0_hw_init(adev);
3345 adev->gfx.in_suspend = false;
3346 return r;
3347} 3450}
3348 3451
3349static bool gfx_v9_0_is_idle(void *handle) 3452static bool gfx_v9_0_is_idle(void *handle)
@@ -4609,68 +4712,6 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
4609 return 0; 4712 return 0;
4610} 4713}
4611 4714
4612static int gfx_v9_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
4613 struct amdgpu_irq_src *src,
4614 unsigned int type,
4615 enum amdgpu_interrupt_state state)
4616{
4617 uint32_t tmp, target;
4618 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
4619
4620 if (ring->me == 1)
4621 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4622 else
4623 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL);
4624 target += ring->pipe;
4625
4626 switch (type) {
4627 case AMDGPU_CP_KIQ_IRQ_DRIVER0:
4628 if (state == AMDGPU_IRQ_STATE_DISABLE) {
4629 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
4630 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
4631 GENERIC2_INT_ENABLE, 0);
4632 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
4633
4634 tmp = RREG32(target);
4635 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
4636 GENERIC2_INT_ENABLE, 0);
4637 WREG32(target, tmp);
4638 } else {
4639 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
4640 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
4641 GENERIC2_INT_ENABLE, 1);
4642 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
4643
4644 tmp = RREG32(target);
4645 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
4646 GENERIC2_INT_ENABLE, 1);
4647 WREG32(target, tmp);
4648 }
4649 break;
4650 default:
4651 BUG(); /* kiq only support GENERIC2_INT now */
4652 break;
4653 }
4654 return 0;
4655}
4656
4657static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev,
4658 struct amdgpu_irq_src *source,
4659 struct amdgpu_iv_entry *entry)
4660{
4661 u8 me_id, pipe_id, queue_id;
4662 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
4663
4664 me_id = (entry->ring_id & 0x0c) >> 2;
4665 pipe_id = (entry->ring_id & 0x03) >> 0;
4666 queue_id = (entry->ring_id & 0x70) >> 4;
4667 DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
4668 me_id, pipe_id, queue_id);
4669
4670 amdgpu_fence_process(ring);
4671 return 0;
4672}
4673
4674static const struct amd_ip_funcs gfx_v9_0_ip_funcs = { 4715static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
4675 .name = "gfx_v9_0", 4716 .name = "gfx_v9_0",
4676 .early_init = gfx_v9_0_early_init, 4717 .early_init = gfx_v9_0_early_init,
@@ -4819,11 +4860,6 @@ static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
4819 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute; 4860 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
4820} 4861}
4821 4862
4822static const struct amdgpu_irq_src_funcs gfx_v9_0_kiq_irq_funcs = {
4823 .set = gfx_v9_0_kiq_set_interrupt_state,
4824 .process = gfx_v9_0_kiq_irq,
4825};
4826
4827static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = { 4863static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
4828 .set = gfx_v9_0_set_eop_interrupt_state, 4864 .set = gfx_v9_0_set_eop_interrupt_state,
4829 .process = gfx_v9_0_eop_irq, 4865 .process = gfx_v9_0_eop_irq,
@@ -4849,9 +4885,6 @@ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
4849 4885
4850 adev->gfx.priv_inst_irq.num_types = 1; 4886 adev->gfx.priv_inst_irq.num_types = 1;
4851 adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs; 4887 adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
4852
4853 adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
4854 adev->gfx.kiq.irq.funcs = &gfx_v9_0_kiq_irq_funcs;
4855} 4888}
4856 4889
4857static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev) 4890static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)