diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/cik_sdma.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 130 |
1 files changed, 62 insertions, 68 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 9ea9de457da3..814598e76c98 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c | |||
@@ -96,7 +96,7 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev) | |||
96 | { | 96 | { |
97 | const char *chip_name; | 97 | const char *chip_name; |
98 | char fw_name[30]; | 98 | char fw_name[30]; |
99 | int err, i; | 99 | int err = 0, i; |
100 | 100 | ||
101 | DRM_DEBUG("\n"); | 101 | DRM_DEBUG("\n"); |
102 | 102 | ||
@@ -119,24 +119,24 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev) | |||
119 | default: BUG(); | 119 | default: BUG(); |
120 | } | 120 | } |
121 | 121 | ||
122 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 122 | for (i = 0; i < adev->sdma.num_instances; i++) { |
123 | if (i == 0) | 123 | if (i == 0) |
124 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); | 124 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); |
125 | else | 125 | else |
126 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name); | 126 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name); |
127 | err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev); | 127 | err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); |
128 | if (err) | 128 | if (err) |
129 | goto out; | 129 | goto out; |
130 | err = amdgpu_ucode_validate(adev->sdma[i].fw); | 130 | err = amdgpu_ucode_validate(adev->sdma.instance[i].fw); |
131 | } | 131 | } |
132 | out: | 132 | out: |
133 | if (err) { | 133 | if (err) { |
134 | printk(KERN_ERR | 134 | printk(KERN_ERR |
135 | "cik_sdma: Failed to load firmware \"%s\"\n", | 135 | "cik_sdma: Failed to load firmware \"%s\"\n", |
136 | fw_name); | 136 | fw_name); |
137 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 137 | for (i = 0; i < adev->sdma.num_instances; i++) { |
138 | release_firmware(adev->sdma[i].fw); | 138 | release_firmware(adev->sdma.instance[i].fw); |
139 | adev->sdma[i].fw = NULL; | 139 | adev->sdma.instance[i].fw = NULL; |
140 | } | 140 | } |
141 | } | 141 | } |
142 | return err; | 142 | return err; |
@@ -168,7 +168,7 @@ static uint32_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring) | |||
168 | static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring) | 168 | static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring) |
169 | { | 169 | { |
170 | struct amdgpu_device *adev = ring->adev; | 170 | struct amdgpu_device *adev = ring->adev; |
171 | u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1; | 171 | u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; |
172 | 172 | ||
173 | return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2; | 173 | return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2; |
174 | } | 174 | } |
@@ -183,14 +183,14 @@ static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring) | |||
183 | static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring) | 183 | static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring) |
184 | { | 184 | { |
185 | struct amdgpu_device *adev = ring->adev; | 185 | struct amdgpu_device *adev = ring->adev; |
186 | u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1; | 186 | u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; |
187 | 187 | ||
188 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc); | 188 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc); |
189 | } | 189 | } |
190 | 190 | ||
191 | static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | 191 | static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) |
192 | { | 192 | { |
193 | struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring); | 193 | struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); |
194 | int i; | 194 | int i; |
195 | 195 | ||
196 | for (i = 0; i < count; i++) | 196 | for (i = 0; i < count; i++) |
@@ -248,7 +248,7 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring) | |||
248 | SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ | 248 | SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ |
249 | u32 ref_and_mask; | 249 | u32 ref_and_mask; |
250 | 250 | ||
251 | if (ring == &ring->adev->sdma[0].ring) | 251 | if (ring == &ring->adev->sdma.instance[0].ring) |
252 | ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK; | 252 | ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK; |
253 | else | 253 | else |
254 | ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK; | 254 | ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK; |
@@ -327,8 +327,8 @@ static bool cik_sdma_ring_emit_semaphore(struct amdgpu_ring *ring, | |||
327 | */ | 327 | */ |
328 | static void cik_sdma_gfx_stop(struct amdgpu_device *adev) | 328 | static void cik_sdma_gfx_stop(struct amdgpu_device *adev) |
329 | { | 329 | { |
330 | struct amdgpu_ring *sdma0 = &adev->sdma[0].ring; | 330 | struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; |
331 | struct amdgpu_ring *sdma1 = &adev->sdma[1].ring; | 331 | struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; |
332 | u32 rb_cntl; | 332 | u32 rb_cntl; |
333 | int i; | 333 | int i; |
334 | 334 | ||
@@ -336,7 +336,7 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev) | |||
336 | (adev->mman.buffer_funcs_ring == sdma1)) | 336 | (adev->mman.buffer_funcs_ring == sdma1)) |
337 | amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); | 337 | amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); |
338 | 338 | ||
339 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 339 | for (i = 0; i < adev->sdma.num_instances; i++) { |
340 | rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); | 340 | rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); |
341 | rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK; | 341 | rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK; |
342 | WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); | 342 | WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); |
@@ -376,7 +376,7 @@ static void cik_sdma_enable(struct amdgpu_device *adev, bool enable) | |||
376 | cik_sdma_rlc_stop(adev); | 376 | cik_sdma_rlc_stop(adev); |
377 | } | 377 | } |
378 | 378 | ||
379 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 379 | for (i = 0; i < adev->sdma.num_instances; i++) { |
380 | me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); | 380 | me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); |
381 | if (enable) | 381 | if (enable) |
382 | me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK; | 382 | me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK; |
@@ -402,8 +402,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) | |||
402 | u32 wb_offset; | 402 | u32 wb_offset; |
403 | int i, j, r; | 403 | int i, j, r; |
404 | 404 | ||
405 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 405 | for (i = 0; i < adev->sdma.num_instances; i++) { |
406 | ring = &adev->sdma[i].ring; | 406 | ring = &adev->sdma.instance[i].ring; |
407 | wb_offset = (ring->rptr_offs * 4); | 407 | wb_offset = (ring->rptr_offs * 4); |
408 | 408 | ||
409 | mutex_lock(&adev->srbm_mutex); | 409 | mutex_lock(&adev->srbm_mutex); |
@@ -502,26 +502,25 @@ static int cik_sdma_load_microcode(struct amdgpu_device *adev) | |||
502 | u32 fw_size; | 502 | u32 fw_size; |
503 | int i, j; | 503 | int i, j; |
504 | 504 | ||
505 | if (!adev->sdma[0].fw || !adev->sdma[1].fw) | ||
506 | return -EINVAL; | ||
507 | |||
508 | /* halt the MEs */ | 505 | /* halt the MEs */ |
509 | cik_sdma_enable(adev, false); | 506 | cik_sdma_enable(adev, false); |
510 | 507 | ||
511 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 508 | for (i = 0; i < adev->sdma.num_instances; i++) { |
512 | hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; | 509 | if (!adev->sdma.instance[i].fw) |
510 | return -EINVAL; | ||
511 | hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; | ||
513 | amdgpu_ucode_print_sdma_hdr(&hdr->header); | 512 | amdgpu_ucode_print_sdma_hdr(&hdr->header); |
514 | fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; | 513 | fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; |
515 | adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); | 514 | adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version); |
516 | adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); | 515 | adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); |
517 | if (adev->sdma[i].feature_version >= 20) | 516 | if (adev->sdma.instance[i].feature_version >= 20) |
518 | adev->sdma[i].burst_nop = true; | 517 | adev->sdma.instance[i].burst_nop = true; |
519 | fw_data = (const __le32 *) | 518 | fw_data = (const __le32 *) |
520 | (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | 519 | (adev->sdma.instance[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); |
521 | WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); | 520 | WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); |
522 | for (j = 0; j < fw_size; j++) | 521 | for (j = 0; j < fw_size; j++) |
523 | WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); | 522 | WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); |
524 | WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version); | 523 | WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version); |
525 | } | 524 | } |
526 | 525 | ||
527 | return 0; | 526 | return 0; |
@@ -830,7 +829,7 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, | |||
830 | */ | 829 | */ |
831 | static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib) | 830 | static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib) |
832 | { | 831 | { |
833 | struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring); | 832 | struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring); |
834 | u32 pad_count; | 833 | u32 pad_count; |
835 | int i; | 834 | int i; |
836 | 835 | ||
@@ -934,6 +933,8 @@ static int cik_sdma_early_init(void *handle) | |||
934 | { | 933 | { |
935 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 934 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
936 | 935 | ||
936 | adev->sdma.num_instances = SDMA_MAX_INSTANCE; | ||
937 | |||
937 | cik_sdma_set_ring_funcs(adev); | 938 | cik_sdma_set_ring_funcs(adev); |
938 | cik_sdma_set_irq_funcs(adev); | 939 | cik_sdma_set_irq_funcs(adev); |
939 | cik_sdma_set_buffer_funcs(adev); | 940 | cik_sdma_set_buffer_funcs(adev); |
@@ -946,7 +947,7 @@ static int cik_sdma_sw_init(void *handle) | |||
946 | { | 947 | { |
947 | struct amdgpu_ring *ring; | 948 | struct amdgpu_ring *ring; |
948 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 949 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
949 | int r; | 950 | int r, i; |
950 | 951 | ||
951 | r = cik_sdma_init_microcode(adev); | 952 | r = cik_sdma_init_microcode(adev); |
952 | if (r) { | 953 | if (r) { |
@@ -955,43 +956,33 @@ static int cik_sdma_sw_init(void *handle) | |||
955 | } | 956 | } |
956 | 957 | ||
957 | /* SDMA trap event */ | 958 | /* SDMA trap event */ |
958 | r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq); | 959 | r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq); |
959 | if (r) | 960 | if (r) |
960 | return r; | 961 | return r; |
961 | 962 | ||
962 | /* SDMA Privileged inst */ | 963 | /* SDMA Privileged inst */ |
963 | r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq); | 964 | r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq); |
964 | if (r) | 965 | if (r) |
965 | return r; | 966 | return r; |
966 | 967 | ||
967 | /* SDMA Privileged inst */ | 968 | /* SDMA Privileged inst */ |
968 | r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq); | 969 | r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq); |
969 | if (r) | ||
970 | return r; | ||
971 | |||
972 | ring = &adev->sdma[0].ring; | ||
973 | ring->ring_obj = NULL; | ||
974 | |||
975 | ring = &adev->sdma[1].ring; | ||
976 | ring->ring_obj = NULL; | ||
977 | |||
978 | ring = &adev->sdma[0].ring; | ||
979 | sprintf(ring->name, "sdma0"); | ||
980 | r = amdgpu_ring_init(adev, ring, 256 * 1024, | ||
981 | SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf, | ||
982 | &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0, | ||
983 | AMDGPU_RING_TYPE_SDMA); | ||
984 | if (r) | 970 | if (r) |
985 | return r; | 971 | return r; |
986 | 972 | ||
987 | ring = &adev->sdma[1].ring; | 973 | for (i = 0; i < adev->sdma.num_instances; i++) { |
988 | sprintf(ring->name, "sdma1"); | 974 | ring = &adev->sdma.instance[i].ring; |
989 | r = amdgpu_ring_init(adev, ring, 256 * 1024, | 975 | ring->ring_obj = NULL; |
990 | SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf, | 976 | sprintf(ring->name, "sdma%d", i); |
991 | &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1, | 977 | r = amdgpu_ring_init(adev, ring, 256 * 1024, |
992 | AMDGPU_RING_TYPE_SDMA); | 978 | SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf, |
993 | if (r) | 979 | &adev->sdma.trap_irq, |
994 | return r; | 980 | (i == 0) ? |
981 | AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1, | ||
982 | AMDGPU_RING_TYPE_SDMA); | ||
983 | if (r) | ||
984 | return r; | ||
985 | } | ||
995 | 986 | ||
996 | return r; | 987 | return r; |
997 | } | 988 | } |
@@ -999,9 +990,10 @@ static int cik_sdma_sw_init(void *handle) | |||
999 | static int cik_sdma_sw_fini(void *handle) | 990 | static int cik_sdma_sw_fini(void *handle) |
1000 | { | 991 | { |
1001 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 992 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
993 | int i; | ||
1002 | 994 | ||
1003 | amdgpu_ring_fini(&adev->sdma[0].ring); | 995 | for (i = 0; i < adev->sdma.num_instances; i++) |
1004 | amdgpu_ring_fini(&adev->sdma[1].ring); | 996 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); |
1005 | 997 | ||
1006 | return 0; | 998 | return 0; |
1007 | } | 999 | } |
@@ -1078,7 +1070,7 @@ static void cik_sdma_print_status(void *handle) | |||
1078 | dev_info(adev->dev, "CIK SDMA registers\n"); | 1070 | dev_info(adev->dev, "CIK SDMA registers\n"); |
1079 | dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", | 1071 | dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", |
1080 | RREG32(mmSRBM_STATUS2)); | 1072 | RREG32(mmSRBM_STATUS2)); |
1081 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 1073 | for (i = 0; i < adev->sdma.num_instances; i++) { |
1082 | dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", | 1074 | dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", |
1083 | i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); | 1075 | i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); |
1084 | dev_info(adev->dev, " SDMA%d_ME_CNTL=0x%08X\n", | 1076 | dev_info(adev->dev, " SDMA%d_ME_CNTL=0x%08X\n", |
@@ -1223,7 +1215,7 @@ static int cik_sdma_process_trap_irq(struct amdgpu_device *adev, | |||
1223 | case 0: | 1215 | case 0: |
1224 | switch (queue_id) { | 1216 | switch (queue_id) { |
1225 | case 0: | 1217 | case 0: |
1226 | amdgpu_fence_process(&adev->sdma[0].ring); | 1218 | amdgpu_fence_process(&adev->sdma.instance[0].ring); |
1227 | break; | 1219 | break; |
1228 | case 1: | 1220 | case 1: |
1229 | /* XXX compute */ | 1221 | /* XXX compute */ |
@@ -1236,7 +1228,7 @@ static int cik_sdma_process_trap_irq(struct amdgpu_device *adev, | |||
1236 | case 1: | 1228 | case 1: |
1237 | switch (queue_id) { | 1229 | switch (queue_id) { |
1238 | case 0: | 1230 | case 0: |
1239 | amdgpu_fence_process(&adev->sdma[1].ring); | 1231 | amdgpu_fence_process(&adev->sdma.instance[1].ring); |
1240 | break; | 1232 | break; |
1241 | case 1: | 1233 | case 1: |
1242 | /* XXX compute */ | 1234 | /* XXX compute */ |
@@ -1334,8 +1326,10 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { | |||
1334 | 1326 | ||
1335 | static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev) | 1327 | static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev) |
1336 | { | 1328 | { |
1337 | adev->sdma[0].ring.funcs = &cik_sdma_ring_funcs; | 1329 | int i; |
1338 | adev->sdma[1].ring.funcs = &cik_sdma_ring_funcs; | 1330 | |
1331 | for (i = 0; i < adev->sdma.num_instances; i++) | ||
1332 | adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs; | ||
1339 | } | 1333 | } |
1340 | 1334 | ||
1341 | static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = { | 1335 | static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = { |
@@ -1349,9 +1343,9 @@ static const struct amdgpu_irq_src_funcs cik_sdma_illegal_inst_irq_funcs = { | |||
1349 | 1343 | ||
1350 | static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev) | 1344 | static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev) |
1351 | { | 1345 | { |
1352 | adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; | 1346 | adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; |
1353 | adev->sdma_trap_irq.funcs = &cik_sdma_trap_irq_funcs; | 1347 | adev->sdma.trap_irq.funcs = &cik_sdma_trap_irq_funcs; |
1354 | adev->sdma_illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs; | 1348 | adev->sdma.illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs; |
1355 | } | 1349 | } |
1356 | 1350 | ||
1357 | /** | 1351 | /** |
@@ -1416,7 +1410,7 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev) | |||
1416 | { | 1410 | { |
1417 | if (adev->mman.buffer_funcs == NULL) { | 1411 | if (adev->mman.buffer_funcs == NULL) { |
1418 | adev->mman.buffer_funcs = &cik_sdma_buffer_funcs; | 1412 | adev->mman.buffer_funcs = &cik_sdma_buffer_funcs; |
1419 | adev->mman.buffer_funcs_ring = &adev->sdma[0].ring; | 1413 | adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; |
1420 | } | 1414 | } |
1421 | } | 1415 | } |
1422 | 1416 | ||
@@ -1431,7 +1425,7 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev) | |||
1431 | { | 1425 | { |
1432 | if (adev->vm_manager.vm_pte_funcs == NULL) { | 1426 | if (adev->vm_manager.vm_pte_funcs == NULL) { |
1433 | adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; | 1427 | adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; |
1434 | adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; | 1428 | adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring; |
1435 | adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; | 1429 | adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; |
1436 | } | 1430 | } |
1437 | } | 1431 | } |