diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 146 |
1 files changed, 97 insertions, 49 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 71116da9e782..903aa240e946 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
@@ -2077,9 +2077,9 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring) | |||
2077 | static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) | 2077 | static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) |
2078 | { | 2078 | { |
2079 | u32 ref_and_mask; | 2079 | u32 ref_and_mask; |
2080 | int usepfp = ring->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1; | 2080 | int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1; |
2081 | 2081 | ||
2082 | if (ring->type == AMDGPU_RING_TYPE_COMPUTE) { | 2082 | if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { |
2083 | switch (ring->me) { | 2083 | switch (ring->me) { |
2084 | case 1: | 2084 | case 1: |
2085 | ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe; | 2085 | ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe; |
@@ -3222,7 +3222,7 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev) | |||
3222 | */ | 3222 | */ |
3223 | static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) | 3223 | static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) |
3224 | { | 3224 | { |
3225 | int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); | 3225 | int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); |
3226 | uint32_t seq = ring->fence_drv.sync_seq; | 3226 | uint32_t seq = ring->fence_drv.sync_seq; |
3227 | uint64_t addr = ring->fence_drv.gpu_addr; | 3227 | uint64_t addr = ring->fence_drv.gpu_addr; |
3228 | 3228 | ||
@@ -3262,7 +3262,7 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) | |||
3262 | static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, | 3262 | static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, |
3263 | unsigned vm_id, uint64_t pd_addr) | 3263 | unsigned vm_id, uint64_t pd_addr) |
3264 | { | 3264 | { |
3265 | int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); | 3265 | int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); |
3266 | 3266 | ||
3267 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | 3267 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
3268 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | | 3268 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | |
@@ -3391,7 +3391,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) | |||
3391 | if (adev->gfx.rlc.save_restore_obj == NULL) { | 3391 | if (adev->gfx.rlc.save_restore_obj == NULL) { |
3392 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, | 3392 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, |
3393 | AMDGPU_GEM_DOMAIN_VRAM, | 3393 | AMDGPU_GEM_DOMAIN_VRAM, |
3394 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 3394 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
3395 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
3395 | NULL, NULL, | 3396 | NULL, NULL, |
3396 | &adev->gfx.rlc.save_restore_obj); | 3397 | &adev->gfx.rlc.save_restore_obj); |
3397 | if (r) { | 3398 | if (r) { |
@@ -3435,7 +3436,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) | |||
3435 | if (adev->gfx.rlc.clear_state_obj == NULL) { | 3436 | if (adev->gfx.rlc.clear_state_obj == NULL) { |
3436 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, | 3437 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, |
3437 | AMDGPU_GEM_DOMAIN_VRAM, | 3438 | AMDGPU_GEM_DOMAIN_VRAM, |
3438 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 3439 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
3440 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
3439 | NULL, NULL, | 3441 | NULL, NULL, |
3440 | &adev->gfx.rlc.clear_state_obj); | 3442 | &adev->gfx.rlc.clear_state_obj); |
3441 | if (r) { | 3443 | if (r) { |
@@ -3475,7 +3477,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) | |||
3475 | if (adev->gfx.rlc.cp_table_obj == NULL) { | 3477 | if (adev->gfx.rlc.cp_table_obj == NULL) { |
3476 | r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, | 3478 | r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, |
3477 | AMDGPU_GEM_DOMAIN_VRAM, | 3479 | AMDGPU_GEM_DOMAIN_VRAM, |
3478 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 3480 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
3481 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
3479 | NULL, NULL, | 3482 | NULL, NULL, |
3480 | &adev->gfx.rlc.cp_table_obj); | 3483 | &adev->gfx.rlc.cp_table_obj); |
3481 | if (r) { | 3484 | if (r) { |
@@ -4354,44 +4357,40 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring, | |||
4354 | amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); | 4357 | amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); |
4355 | } | 4358 | } |
4356 | 4359 | ||
4357 | static unsigned gfx_v7_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring) | 4360 | static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address) |
4358 | { | 4361 | { |
4359 | return | 4362 | WREG32(mmSQ_IND_INDEX, (wave & 0xF) | ((simd & 0x3) << 4) | (address << 16) | (1 << 13)); |
4360 | 4; /* gfx_v7_0_ring_emit_ib_gfx */ | 4363 | return RREG32(mmSQ_IND_DATA); |
4361 | } | 4364 | } |
4362 | 4365 | ||
4363 | static unsigned gfx_v7_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring) | 4366 | static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) |
4364 | { | 4367 | { |
4365 | return | 4368 | /* type 0 wave data */ |
4366 | 20 + /* gfx_v7_0_ring_emit_gds_switch */ | 4369 | dst[(*no_fields)++] = 0; |
4367 | 7 + /* gfx_v7_0_ring_emit_hdp_flush */ | 4370 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS); |
4368 | 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */ | 4371 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO); |
4369 | 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */ | 4372 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI); |
4370 | 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */ | 4373 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO); |
4371 | 17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */ | 4374 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI); |
4372 | 3; /* gfx_v7_ring_emit_cntxcntl */ | 4375 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID); |
4373 | } | 4376 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0); |
4374 | 4377 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1); | |
4375 | static unsigned gfx_v7_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring) | 4378 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC); |
4376 | { | 4379 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC); |
4377 | return | 4380 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS); |
4378 | 4; /* gfx_v7_0_ring_emit_ib_compute */ | 4381 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS); |
4379 | } | 4382 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO); |
4380 | 4383 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI); | |
4381 | static unsigned gfx_v7_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring) | 4384 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO); |
4382 | { | 4385 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI); |
4383 | return | 4386 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0); |
4384 | 20 + /* gfx_v7_0_ring_emit_gds_switch */ | 4387 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0); |
4385 | 7 + /* gfx_v7_0_ring_emit_hdp_flush */ | ||
4386 | 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */ | ||
4387 | 7 + /* gfx_v7_0_ring_emit_pipeline_sync */ | ||
4388 | 17 + /* gfx_v7_0_ring_emit_vm_flush */ | ||
4389 | 7 + 7 + 7; /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */ | ||
4390 | } | 4388 | } |
4391 | 4389 | ||
4392 | static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = { | 4390 | static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = { |
4393 | .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, | 4391 | .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, |
4394 | .select_se_sh = &gfx_v7_0_select_se_sh, | 4392 | .select_se_sh = &gfx_v7_0_select_se_sh, |
4393 | .read_wave_data = &gfx_v7_0_read_wave_data, | ||
4395 | }; | 4394 | }; |
4396 | 4395 | ||
4397 | static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { | 4396 | static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { |
@@ -4643,9 +4642,7 @@ static int gfx_v7_0_sw_init(void *handle) | |||
4643 | ring->ring_obj = NULL; | 4642 | ring->ring_obj = NULL; |
4644 | sprintf(ring->name, "gfx"); | 4643 | sprintf(ring->name, "gfx"); |
4645 | r = amdgpu_ring_init(adev, ring, 1024, | 4644 | r = amdgpu_ring_init(adev, ring, 1024, |
4646 | PACKET3(PACKET3_NOP, 0x3FFF), 0xf, | 4645 | &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP); |
4647 | &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP, | ||
4648 | AMDGPU_RING_TYPE_GFX); | ||
4649 | if (r) | 4646 | if (r) |
4650 | return r; | 4647 | return r; |
4651 | } | 4648 | } |
@@ -4670,9 +4667,7 @@ static int gfx_v7_0_sw_init(void *handle) | |||
4670 | irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe; | 4667 | irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe; |
4671 | /* type-2 packets are deprecated on MEC, use type-3 instead */ | 4668 | /* type-2 packets are deprecated on MEC, use type-3 instead */ |
4672 | r = amdgpu_ring_init(adev, ring, 1024, | 4669 | r = amdgpu_ring_init(adev, ring, 1024, |
4673 | PACKET3(PACKET3_NOP, 0x3FFF), 0xf, | 4670 | &adev->gfx.eop_irq, irq_type); |
4674 | &adev->gfx.eop_irq, irq_type, | ||
4675 | AMDGPU_RING_TYPE_COMPUTE); | ||
4676 | if (r) | 4671 | if (r) |
4677 | return r; | 4672 | return r; |
4678 | } | 4673 | } |
@@ -5123,7 +5118,7 @@ static int gfx_v7_0_set_powergating_state(void *handle, | |||
5123 | return 0; | 5118 | return 0; |
5124 | } | 5119 | } |
5125 | 5120 | ||
5126 | const struct amd_ip_funcs gfx_v7_0_ip_funcs = { | 5121 | static const struct amd_ip_funcs gfx_v7_0_ip_funcs = { |
5127 | .name = "gfx_v7_0", | 5122 | .name = "gfx_v7_0", |
5128 | .early_init = gfx_v7_0_early_init, | 5123 | .early_init = gfx_v7_0_early_init, |
5129 | .late_init = gfx_v7_0_late_init, | 5124 | .late_init = gfx_v7_0_late_init, |
@@ -5141,10 +5136,21 @@ const struct amd_ip_funcs gfx_v7_0_ip_funcs = { | |||
5141 | }; | 5136 | }; |
5142 | 5137 | ||
5143 | static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { | 5138 | static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { |
5139 | .type = AMDGPU_RING_TYPE_GFX, | ||
5140 | .align_mask = 0xff, | ||
5141 | .nop = PACKET3(PACKET3_NOP, 0x3FFF), | ||
5144 | .get_rptr = gfx_v7_0_ring_get_rptr, | 5142 | .get_rptr = gfx_v7_0_ring_get_rptr, |
5145 | .get_wptr = gfx_v7_0_ring_get_wptr_gfx, | 5143 | .get_wptr = gfx_v7_0_ring_get_wptr_gfx, |
5146 | .set_wptr = gfx_v7_0_ring_set_wptr_gfx, | 5144 | .set_wptr = gfx_v7_0_ring_set_wptr_gfx, |
5147 | .parse_cs = NULL, | 5145 | .emit_frame_size = |
5146 | 20 + /* gfx_v7_0_ring_emit_gds_switch */ | ||
5147 | 7 + /* gfx_v7_0_ring_emit_hdp_flush */ | ||
5148 | 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */ | ||
5149 | 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */ | ||
5150 | 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */ | ||
5151 | 17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */ | ||
5152 | 3, /* gfx_v7_ring_emit_cntxcntl */ | ||
5153 | .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */ | ||
5148 | .emit_ib = gfx_v7_0_ring_emit_ib_gfx, | 5154 | .emit_ib = gfx_v7_0_ring_emit_ib_gfx, |
5149 | .emit_fence = gfx_v7_0_ring_emit_fence_gfx, | 5155 | .emit_fence = gfx_v7_0_ring_emit_fence_gfx, |
5150 | .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync, | 5156 | .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync, |
@@ -5157,15 +5163,23 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { | |||
5157 | .insert_nop = amdgpu_ring_insert_nop, | 5163 | .insert_nop = amdgpu_ring_insert_nop, |
5158 | .pad_ib = amdgpu_ring_generic_pad_ib, | 5164 | .pad_ib = amdgpu_ring_generic_pad_ib, |
5159 | .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl, | 5165 | .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl, |
5160 | .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_gfx, | ||
5161 | .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_gfx, | ||
5162 | }; | 5166 | }; |
5163 | 5167 | ||
5164 | static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { | 5168 | static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { |
5169 | .type = AMDGPU_RING_TYPE_COMPUTE, | ||
5170 | .align_mask = 0xff, | ||
5171 | .nop = PACKET3(PACKET3_NOP, 0x3FFF), | ||
5165 | .get_rptr = gfx_v7_0_ring_get_rptr, | 5172 | .get_rptr = gfx_v7_0_ring_get_rptr, |
5166 | .get_wptr = gfx_v7_0_ring_get_wptr_compute, | 5173 | .get_wptr = gfx_v7_0_ring_get_wptr_compute, |
5167 | .set_wptr = gfx_v7_0_ring_set_wptr_compute, | 5174 | .set_wptr = gfx_v7_0_ring_set_wptr_compute, |
5168 | .parse_cs = NULL, | 5175 | .emit_frame_size = |
5176 | 20 + /* gfx_v7_0_ring_emit_gds_switch */ | ||
5177 | 7 + /* gfx_v7_0_ring_emit_hdp_flush */ | ||
5178 | 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */ | ||
5179 | 7 + /* gfx_v7_0_ring_emit_pipeline_sync */ | ||
5180 | 17 + /* gfx_v7_0_ring_emit_vm_flush */ | ||
5181 | 7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */ | ||
5182 | .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_compute */ | ||
5169 | .emit_ib = gfx_v7_0_ring_emit_ib_compute, | 5183 | .emit_ib = gfx_v7_0_ring_emit_ib_compute, |
5170 | .emit_fence = gfx_v7_0_ring_emit_fence_compute, | 5184 | .emit_fence = gfx_v7_0_ring_emit_fence_compute, |
5171 | .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync, | 5185 | .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync, |
@@ -5177,8 +5191,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { | |||
5177 | .test_ib = gfx_v7_0_ring_test_ib, | 5191 | .test_ib = gfx_v7_0_ring_test_ib, |
5178 | .insert_nop = amdgpu_ring_insert_nop, | 5192 | .insert_nop = amdgpu_ring_insert_nop, |
5179 | .pad_ib = amdgpu_ring_generic_pad_ib, | 5193 | .pad_ib = amdgpu_ring_generic_pad_ib, |
5180 | .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_compute, | ||
5181 | .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_compute, | ||
5182 | }; | 5194 | }; |
5183 | 5195 | ||
5184 | static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev) | 5196 | static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev) |
@@ -5289,3 +5301,39 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) | |||
5289 | cu_info->number = active_cu_number; | 5301 | cu_info->number = active_cu_number; |
5290 | cu_info->ao_cu_mask = ao_cu_mask; | 5302 | cu_info->ao_cu_mask = ao_cu_mask; |
5291 | } | 5303 | } |
5304 | |||
5305 | const struct amdgpu_ip_block_version gfx_v7_0_ip_block = | ||
5306 | { | ||
5307 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
5308 | .major = 7, | ||
5309 | .minor = 0, | ||
5310 | .rev = 0, | ||
5311 | .funcs = &gfx_v7_0_ip_funcs, | ||
5312 | }; | ||
5313 | |||
5314 | const struct amdgpu_ip_block_version gfx_v7_1_ip_block = | ||
5315 | { | ||
5316 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
5317 | .major = 7, | ||
5318 | .minor = 1, | ||
5319 | .rev = 0, | ||
5320 | .funcs = &gfx_v7_0_ip_funcs, | ||
5321 | }; | ||
5322 | |||
5323 | const struct amdgpu_ip_block_version gfx_v7_2_ip_block = | ||
5324 | { | ||
5325 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
5326 | .major = 7, | ||
5327 | .minor = 2, | ||
5328 | .rev = 0, | ||
5329 | .funcs = &gfx_v7_0_ip_funcs, | ||
5330 | }; | ||
5331 | |||
5332 | const struct amdgpu_ip_block_version gfx_v7_3_ip_block = | ||
5333 | { | ||
5334 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
5335 | .major = 7, | ||
5336 | .minor = 3, | ||
5337 | .rev = 0, | ||
5338 | .funcs = &gfx_v7_0_ip_funcs, | ||
5339 | }; | ||