aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c166
1 files changed, 82 insertions, 84 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 9bfe92df15f7..670555a45da9 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -184,7 +184,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
184{ 184{
185 const char *chip_name; 185 const char *chip_name;
186 char fw_name[30]; 186 char fw_name[30];
187 int err, i; 187 int err = 0, i;
188 struct amdgpu_firmware_info *info = NULL; 188 struct amdgpu_firmware_info *info = NULL;
189 const struct common_firmware_header *header = NULL; 189 const struct common_firmware_header *header = NULL;
190 const struct sdma_firmware_header_v1_0 *hdr; 190 const struct sdma_firmware_header_v1_0 *hdr;
@@ -204,27 +204,27 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
204 default: BUG(); 204 default: BUG();
205 } 205 }
206 206
207 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 207 for (i = 0; i < adev->sdma.num_instances; i++) {
208 if (i == 0) 208 if (i == 0)
209 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); 209 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
210 else 210 else
211 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); 211 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
212 err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev); 212 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
213 if (err) 213 if (err)
214 goto out; 214 goto out;
215 err = amdgpu_ucode_validate(adev->sdma[i].fw); 215 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
216 if (err) 216 if (err)
217 goto out; 217 goto out;
218 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; 218 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
219 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); 219 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
220 adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); 220 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
221 if (adev->sdma[i].feature_version >= 20) 221 if (adev->sdma.instance[i].feature_version >= 20)
222 adev->sdma[i].burst_nop = true; 222 adev->sdma.instance[i].burst_nop = true;
223 223
224 if (adev->firmware.smu_load) { 224 if (adev->firmware.smu_load) {
225 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; 225 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
226 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; 226 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
227 info->fw = adev->sdma[i].fw; 227 info->fw = adev->sdma.instance[i].fw;
228 header = (const struct common_firmware_header *)info->fw->data; 228 header = (const struct common_firmware_header *)info->fw->data;
229 adev->firmware.fw_size += 229 adev->firmware.fw_size +=
230 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 230 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
@@ -235,9 +235,9 @@ out:
235 printk(KERN_ERR 235 printk(KERN_ERR
236 "sdma_v3_0: Failed to load firmware \"%s\"\n", 236 "sdma_v3_0: Failed to load firmware \"%s\"\n",
237 fw_name); 237 fw_name);
238 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 238 for (i = 0; i < adev->sdma.num_instances; i++) {
239 release_firmware(adev->sdma[i].fw); 239 release_firmware(adev->sdma.instance[i].fw);
240 adev->sdma[i].fw = NULL; 240 adev->sdma.instance[i].fw = NULL;
241 } 241 }
242 } 242 }
243 return err; 243 return err;
@@ -276,7 +276,7 @@ static uint32_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
276 /* XXX check if swapping is necessary on BE */ 276 /* XXX check if swapping is necessary on BE */
277 wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2; 277 wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
278 } else { 278 } else {
279 int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; 279 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
280 280
281 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2; 281 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
282 } 282 }
@@ -300,7 +300,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
300 adev->wb.wb[ring->wptr_offs] = ring->wptr << 2; 300 adev->wb.wb[ring->wptr_offs] = ring->wptr << 2;
301 WDOORBELL32(ring->doorbell_index, ring->wptr << 2); 301 WDOORBELL32(ring->doorbell_index, ring->wptr << 2);
302 } else { 302 } else {
303 int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; 303 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
304 304
305 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2); 305 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
306 } 306 }
@@ -308,7 +308,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
308 308
309static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 309static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
310{ 310{
311 struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring); 311 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
312 int i; 312 int i;
313 313
314 for (i = 0; i < count; i++) 314 for (i = 0; i < count; i++)
@@ -369,7 +369,7 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
369{ 369{
370 u32 ref_and_mask = 0; 370 u32 ref_and_mask = 0;
371 371
372 if (ring == &ring->adev->sdma[0].ring) 372 if (ring == &ring->adev->sdma.instance[0].ring)
373 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1); 373 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
374 else 374 else
375 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1); 375 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
@@ -454,8 +454,8 @@ static bool sdma_v3_0_ring_emit_semaphore(struct amdgpu_ring *ring,
454 */ 454 */
455static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev) 455static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
456{ 456{
457 struct amdgpu_ring *sdma0 = &adev->sdma[0].ring; 457 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
458 struct amdgpu_ring *sdma1 = &adev->sdma[1].ring; 458 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
459 u32 rb_cntl, ib_cntl; 459 u32 rb_cntl, ib_cntl;
460 int i; 460 int i;
461 461
@@ -463,7 +463,7 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
463 (adev->mman.buffer_funcs_ring == sdma1)) 463 (adev->mman.buffer_funcs_ring == sdma1))
464 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); 464 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
465 465
466 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 466 for (i = 0; i < adev->sdma.num_instances; i++) {
467 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); 467 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
468 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); 468 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
469 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); 469 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
@@ -500,7 +500,7 @@ static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
500 u32 f32_cntl; 500 u32 f32_cntl;
501 int i; 501 int i;
502 502
503 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 503 for (i = 0; i < adev->sdma.num_instances; i++) {
504 f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]); 504 f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
505 if (enable) 505 if (enable)
506 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, 506 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
@@ -530,7 +530,7 @@ static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
530 sdma_v3_0_rlc_stop(adev); 530 sdma_v3_0_rlc_stop(adev);
531 } 531 }
532 532
533 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 533 for (i = 0; i < adev->sdma.num_instances; i++) {
534 f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); 534 f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
535 if (enable) 535 if (enable)
536 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0); 536 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
@@ -557,8 +557,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
557 u32 doorbell; 557 u32 doorbell;
558 int i, j, r; 558 int i, j, r;
559 559
560 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 560 for (i = 0; i < adev->sdma.num_instances; i++) {
561 ring = &adev->sdma[i].ring; 561 ring = &adev->sdma.instance[i].ring;
562 wb_offset = (ring->rptr_offs * 4); 562 wb_offset = (ring->rptr_offs * 4);
563 563
564 mutex_lock(&adev->srbm_mutex); 564 mutex_lock(&adev->srbm_mutex);
@@ -669,23 +669,22 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
669 u32 fw_size; 669 u32 fw_size;
670 int i, j; 670 int i, j;
671 671
672 if (!adev->sdma[0].fw || !adev->sdma[1].fw)
673 return -EINVAL;
674
675 /* halt the MEs */ 672 /* halt the MEs */
676 sdma_v3_0_enable(adev, false); 673 sdma_v3_0_enable(adev, false);
677 674
678 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 675 for (i = 0; i < adev->sdma.num_instances; i++) {
679 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; 676 if (!adev->sdma.instance[i].fw)
677 return -EINVAL;
678 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
680 amdgpu_ucode_print_sdma_hdr(&hdr->header); 679 amdgpu_ucode_print_sdma_hdr(&hdr->header);
681 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 680 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
682 fw_data = (const __le32 *) 681 fw_data = (const __le32 *)
683 (adev->sdma[i].fw->data + 682 (adev->sdma.instance[i].fw->data +
684 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 683 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
685 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); 684 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
686 for (j = 0; j < fw_size; j++) 685 for (j = 0; j < fw_size; j++)
687 WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); 686 WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
688 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version); 687 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
689 } 688 }
690 689
691 return 0; 690 return 0;
@@ -701,21 +700,21 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
701 */ 700 */
702static int sdma_v3_0_start(struct amdgpu_device *adev) 701static int sdma_v3_0_start(struct amdgpu_device *adev)
703{ 702{
704 int r; 703 int r, i;
705 704
706 if (!adev->firmware.smu_load) { 705 if (!adev->firmware.smu_load) {
707 r = sdma_v3_0_load_microcode(adev); 706 r = sdma_v3_0_load_microcode(adev);
708 if (r) 707 if (r)
709 return r; 708 return r;
710 } else { 709 } else {
711 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, 710 for (i = 0; i < adev->sdma.num_instances; i++) {
712 AMDGPU_UCODE_ID_SDMA0); 711 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
713 if (r) 712 (i == 0) ?
714 return -EINVAL; 713 AMDGPU_UCODE_ID_SDMA0 :
715 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, 714 AMDGPU_UCODE_ID_SDMA1);
716 AMDGPU_UCODE_ID_SDMA1); 715 if (r)
717 if (r) 716 return -EINVAL;
718 return -EINVAL; 717 }
719 } 718 }
720 719
721 /* unhalt the MEs */ 720 /* unhalt the MEs */
@@ -1013,7 +1012,7 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib,
1013 */ 1012 */
1014static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib) 1013static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib)
1015{ 1014{
1016 struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring); 1015 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
1017 u32 pad_count; 1016 u32 pad_count;
1018 int i; 1017 int i;
1019 1018
@@ -1071,6 +1070,12 @@ static int sdma_v3_0_early_init(void *handle)
1071{ 1070{
1072 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1071 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1073 1072
1073 switch (adev->asic_type) {
1074 default:
1075 adev->sdma.num_instances = SDMA_MAX_INSTANCE;
1076 break;
1077 }
1078
1074 sdma_v3_0_set_ring_funcs(adev); 1079 sdma_v3_0_set_ring_funcs(adev);
1075 sdma_v3_0_set_buffer_funcs(adev); 1080 sdma_v3_0_set_buffer_funcs(adev);
1076 sdma_v3_0_set_vm_pte_funcs(adev); 1081 sdma_v3_0_set_vm_pte_funcs(adev);
@@ -1082,21 +1087,21 @@ static int sdma_v3_0_early_init(void *handle)
1082static int sdma_v3_0_sw_init(void *handle) 1087static int sdma_v3_0_sw_init(void *handle)
1083{ 1088{
1084 struct amdgpu_ring *ring; 1089 struct amdgpu_ring *ring;
1085 int r; 1090 int r, i;
1086 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1091 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1087 1092
1088 /* SDMA trap event */ 1093 /* SDMA trap event */
1089 r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq); 1094 r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
1090 if (r) 1095 if (r)
1091 return r; 1096 return r;
1092 1097
1093 /* SDMA Privileged inst */ 1098 /* SDMA Privileged inst */
1094 r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq); 1099 r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
1095 if (r) 1100 if (r)
1096 return r; 1101 return r;
1097 1102
1098 /* SDMA Privileged inst */ 1103 /* SDMA Privileged inst */
1099 r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq); 1104 r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
1100 if (r) 1105 if (r)
1101 return r; 1106 return r;
1102 1107
@@ -1106,33 +1111,23 @@ static int sdma_v3_0_sw_init(void *handle)
1106 return r; 1111 return r;
1107 } 1112 }
1108 1113
1109 ring = &adev->sdma[0].ring; 1114 for (i = 0; i < adev->sdma.num_instances; i++) {
1110 ring->ring_obj = NULL; 1115 ring = &adev->sdma.instance[i].ring;
1111 ring->use_doorbell = true; 1116 ring->ring_obj = NULL;
1112 ring->doorbell_index = AMDGPU_DOORBELL_sDMA_ENGINE0; 1117 ring->use_doorbell = true;
1113 1118 ring->doorbell_index = (i == 0) ?
1114 ring = &adev->sdma[1].ring; 1119 AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
1115 ring->ring_obj = NULL; 1120
1116 ring->use_doorbell = true; 1121 sprintf(ring->name, "sdma%d", i);
1117 ring->doorbell_index = AMDGPU_DOORBELL_sDMA_ENGINE1; 1122 r = amdgpu_ring_init(adev, ring, 256 * 1024,
1118 1123 SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
1119 ring = &adev->sdma[0].ring; 1124 &adev->sdma.trap_irq,
1120 sprintf(ring->name, "sdma0"); 1125 (i == 0) ?
1121 r = amdgpu_ring_init(adev, ring, 256 * 1024, 1126 AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
1122 SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, 1127 AMDGPU_RING_TYPE_SDMA);
1123 &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0, 1128 if (r)
1124 AMDGPU_RING_TYPE_SDMA); 1129 return r;
1125 if (r) 1130 }
1126 return r;
1127
1128 ring = &adev->sdma[1].ring;
1129 sprintf(ring->name, "sdma1");
1130 r = amdgpu_ring_init(adev, ring, 256 * 1024,
1131 SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
1132 &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
1133 AMDGPU_RING_TYPE_SDMA);
1134 if (r)
1135 return r;
1136 1131
1137 return r; 1132 return r;
1138} 1133}
@@ -1140,9 +1135,10 @@ static int sdma_v3_0_sw_init(void *handle)
1140static int sdma_v3_0_sw_fini(void *handle) 1135static int sdma_v3_0_sw_fini(void *handle)
1141{ 1136{
1142 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1137 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1138 int i;
1143 1139
1144 amdgpu_ring_fini(&adev->sdma[0].ring); 1140 for (i = 0; i < adev->sdma.num_instances; i++)
1145 amdgpu_ring_fini(&adev->sdma[1].ring); 1141 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1146 1142
1147 return 0; 1143 return 0;
1148} 1144}
@@ -1222,7 +1218,7 @@ static void sdma_v3_0_print_status(void *handle)
1222 dev_info(adev->dev, "VI SDMA registers\n"); 1218 dev_info(adev->dev, "VI SDMA registers\n");
1223 dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", 1219 dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
1224 RREG32(mmSRBM_STATUS2)); 1220 RREG32(mmSRBM_STATUS2));
1225 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 1221 for (i = 0; i < adev->sdma.num_instances; i++) {
1226 dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", 1222 dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n",
1227 i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); 1223 i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
1228 dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n", 1224 dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n",
@@ -1367,7 +1363,7 @@ static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev,
1367 case 0: 1363 case 0:
1368 switch (queue_id) { 1364 switch (queue_id) {
1369 case 0: 1365 case 0:
1370 amdgpu_fence_process(&adev->sdma[0].ring); 1366 amdgpu_fence_process(&adev->sdma.instance[0].ring);
1371 break; 1367 break;
1372 case 1: 1368 case 1:
1373 /* XXX compute */ 1369 /* XXX compute */
@@ -1380,7 +1376,7 @@ static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev,
1380 case 1: 1376 case 1:
1381 switch (queue_id) { 1377 switch (queue_id) {
1382 case 0: 1378 case 0:
1383 amdgpu_fence_process(&adev->sdma[1].ring); 1379 amdgpu_fence_process(&adev->sdma.instance[1].ring);
1384 break; 1380 break;
1385 case 1: 1381 case 1:
1386 /* XXX compute */ 1382 /* XXX compute */
@@ -1468,8 +1464,10 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
1468 1464
1469static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev) 1465static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
1470{ 1466{
1471 adev->sdma[0].ring.funcs = &sdma_v3_0_ring_funcs; 1467 int i;
1472 adev->sdma[1].ring.funcs = &sdma_v3_0_ring_funcs; 1468
1469 for (i = 0; i < adev->sdma.num_instances; i++)
1470 adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs;
1473} 1471}
1474 1472
1475static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = { 1473static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = {
@@ -1483,9 +1481,9 @@ static const struct amdgpu_irq_src_funcs sdma_v3_0_illegal_inst_irq_funcs = {
1483 1481
1484static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev) 1482static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
1485{ 1483{
1486 adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; 1484 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1487 adev->sdma_trap_irq.funcs = &sdma_v3_0_trap_irq_funcs; 1485 adev->sdma.trap_irq.funcs = &sdma_v3_0_trap_irq_funcs;
1488 adev->sdma_illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs; 1486 adev->sdma.illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs;
1489} 1487}
1490 1488
1491/** 1489/**
@@ -1551,7 +1549,7 @@ static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
1551{ 1549{
1552 if (adev->mman.buffer_funcs == NULL) { 1550 if (adev->mman.buffer_funcs == NULL) {
1553 adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs; 1551 adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
1554 adev->mman.buffer_funcs_ring = &adev->sdma[0].ring; 1552 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1555 } 1553 }
1556} 1554}
1557 1555
@@ -1566,7 +1564,7 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
1566{ 1564{
1567 if (adev->vm_manager.vm_pte_funcs == NULL) { 1565 if (adev->vm_manager.vm_pte_funcs == NULL) {
1568 adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs; 1566 adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
1569 adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; 1567 adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
1570 adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; 1568 adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
1571 } 1569 }
1572} 1570}