aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Zhu <James.Zhu@amd.com>2019-07-10 11:53:34 -0400
committerAlex Deucher <alexander.deucher@amd.com>2019-07-18 15:18:05 -0400
commitc01b6a1d38675652199d12b898c1c23b96b5055f (patch)
tree54dbb93453c26780063265d374a11084475aab1b
parent989b6a0549977faf0b5b8d7e1c2634e880c579a2 (diff)
drm/amdgpu: modify amdgpu_vcn to support multiple instances
Arcturus has dual-VCN. Need Restruct amdgpu_device::vcn to support multiple vcns. There are no any logical changes here Signed-off-by: James Zhu <James.Zhu@amd.com> Reviewed-by: Leo Liu <leo.liu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c110
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c106
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c87
7 files changed, 210 insertions, 197 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index f539a2a92774..82b871fdfb45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -154,15 +154,15 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
154 num_rings = 1; 154 num_rings = 1;
155 break; 155 break;
156 case AMDGPU_HW_IP_VCN_DEC: 156 case AMDGPU_HW_IP_VCN_DEC:
157 rings[0] = &adev->vcn.ring_dec; 157 rings[0] = &adev->vcn.inst[0].ring_dec;
158 num_rings = 1; 158 num_rings = 1;
159 break; 159 break;
160 case AMDGPU_HW_IP_VCN_ENC: 160 case AMDGPU_HW_IP_VCN_ENC:
161 rings[0] = &adev->vcn.ring_enc[0]; 161 rings[0] = &adev->vcn.inst[0].ring_enc[0];
162 num_rings = 1; 162 num_rings = 1;
163 break; 163 break;
164 case AMDGPU_HW_IP_VCN_JPEG: 164 case AMDGPU_HW_IP_VCN_JPEG:
165 rings[0] = &adev->vcn.ring_jpeg; 165 rings[0] = &adev->vcn.inst[0].ring_jpeg;
166 num_rings = 1; 166 num_rings = 1;
167 break; 167 break;
168 } 168 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 0cf7e8606fd3..4824a2b5f29b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -408,7 +408,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
408 break; 408 break;
409 case AMDGPU_HW_IP_VCN_DEC: 409 case AMDGPU_HW_IP_VCN_DEC:
410 type = AMD_IP_BLOCK_TYPE_VCN; 410 type = AMD_IP_BLOCK_TYPE_VCN;
411 if (adev->vcn.ring_dec.sched.ready) 411 if (adev->vcn.inst[0].ring_dec.sched.ready)
412 ++num_rings; 412 ++num_rings;
413 ib_start_alignment = 16; 413 ib_start_alignment = 16;
414 ib_size_alignment = 16; 414 ib_size_alignment = 16;
@@ -416,14 +416,14 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
416 case AMDGPU_HW_IP_VCN_ENC: 416 case AMDGPU_HW_IP_VCN_ENC:
417 type = AMD_IP_BLOCK_TYPE_VCN; 417 type = AMD_IP_BLOCK_TYPE_VCN;
418 for (i = 0; i < adev->vcn.num_enc_rings; i++) 418 for (i = 0; i < adev->vcn.num_enc_rings; i++)
419 if (adev->vcn.ring_enc[i].sched.ready) 419 if (adev->vcn.inst[0].ring_enc[i].sched.ready)
420 ++num_rings; 420 ++num_rings;
421 ib_start_alignment = 64; 421 ib_start_alignment = 64;
422 ib_size_alignment = 1; 422 ib_size_alignment = 1;
423 break; 423 break;
424 case AMDGPU_HW_IP_VCN_JPEG: 424 case AMDGPU_HW_IP_VCN_JPEG:
425 type = AMD_IP_BLOCK_TYPE_VCN; 425 type = AMD_IP_BLOCK_TYPE_VCN;
426 if (adev->vcn.ring_jpeg.sched.ready) 426 if (adev->vcn.inst[0].ring_jpeg.sched.ready)
427 ++num_rings; 427 ++num_rings;
428 ib_start_alignment = 16; 428 ib_start_alignment = 16;
429 ib_size_alignment = 16; 429 ib_size_alignment = 16;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index a119a7df0305..c102267da85d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -147,8 +147,8 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
147 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 147 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
148 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); 148 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
149 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, 149 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
150 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo, 150 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[0].vcpu_bo,
151 &adev->vcn.gpu_addr, &adev->vcn.cpu_addr); 151 &adev->vcn.inst[0].gpu_addr, &adev->vcn.inst[0].cpu_addr);
152 if (r) { 152 if (r) {
153 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r); 153 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
154 return r; 154 return r;
@@ -171,7 +171,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
171{ 171{
172 int i; 172 int i;
173 173
174 kvfree(adev->vcn.saved_bo); 174 kvfree(adev->vcn.inst[0].saved_bo);
175 175
176 if (adev->vcn.indirect_sram) { 176 if (adev->vcn.indirect_sram) {
177 amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo, 177 amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo,
@@ -179,16 +179,16 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
179 (void **)&adev->vcn.dpg_sram_cpu_addr); 179 (void **)&adev->vcn.dpg_sram_cpu_addr);
180 } 180 }
181 181
182 amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo, 182 amdgpu_bo_free_kernel(&adev->vcn.inst[0].vcpu_bo,
183 &adev->vcn.gpu_addr, 183 &adev->vcn.inst[0].gpu_addr,
184 (void **)&adev->vcn.cpu_addr); 184 (void **)&adev->vcn.inst[0].cpu_addr);
185 185
186 amdgpu_ring_fini(&adev->vcn.ring_dec); 186 amdgpu_ring_fini(&adev->vcn.inst[0].ring_dec);
187 187
188 for (i = 0; i < adev->vcn.num_enc_rings; ++i) 188 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
189 amdgpu_ring_fini(&adev->vcn.ring_enc[i]); 189 amdgpu_ring_fini(&adev->vcn.inst[0].ring_enc[i]);
190 190
191 amdgpu_ring_fini(&adev->vcn.ring_jpeg); 191 amdgpu_ring_fini(&adev->vcn.inst[0].ring_jpeg);
192 192
193 release_firmware(adev->vcn.fw); 193 release_firmware(adev->vcn.fw);
194 194
@@ -202,17 +202,17 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
202 202
203 cancel_delayed_work_sync(&adev->vcn.idle_work); 203 cancel_delayed_work_sync(&adev->vcn.idle_work);
204 204
205 if (adev->vcn.vcpu_bo == NULL) 205 if (adev->vcn.inst[0].vcpu_bo == NULL)
206 return 0; 206 return 0;
207 207
208 size = amdgpu_bo_size(adev->vcn.vcpu_bo); 208 size = amdgpu_bo_size(adev->vcn.inst[0].vcpu_bo);
209 ptr = adev->vcn.cpu_addr; 209 ptr = adev->vcn.inst[0].cpu_addr;
210 210
211 adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL); 211 adev->vcn.inst[0].saved_bo = kvmalloc(size, GFP_KERNEL);
212 if (!adev->vcn.saved_bo) 212 if (!adev->vcn.inst[0].saved_bo)
213 return -ENOMEM; 213 return -ENOMEM;
214 214
215 memcpy_fromio(adev->vcn.saved_bo, ptr, size); 215 memcpy_fromio(adev->vcn.inst[0].saved_bo, ptr, size);
216 216
217 return 0; 217 return 0;
218} 218}
@@ -222,16 +222,16 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
222 unsigned size; 222 unsigned size;
223 void *ptr; 223 void *ptr;
224 224
225 if (adev->vcn.vcpu_bo == NULL) 225 if (adev->vcn.inst[0].vcpu_bo == NULL)
226 return -EINVAL; 226 return -EINVAL;
227 227
228 size = amdgpu_bo_size(adev->vcn.vcpu_bo); 228 size = amdgpu_bo_size(adev->vcn.inst[0].vcpu_bo);
229 ptr = adev->vcn.cpu_addr; 229 ptr = adev->vcn.inst[0].cpu_addr;
230 230
231 if (adev->vcn.saved_bo != NULL) { 231 if (adev->vcn.inst[0].saved_bo != NULL) {
232 memcpy_toio(ptr, adev->vcn.saved_bo, size); 232 memcpy_toio(ptr, adev->vcn.inst[0].saved_bo, size);
233 kvfree(adev->vcn.saved_bo); 233 kvfree(adev->vcn.inst[0].saved_bo);
234 adev->vcn.saved_bo = NULL; 234 adev->vcn.inst[0].saved_bo = NULL;
235 } else { 235 } else {
236 const struct common_firmware_header *hdr; 236 const struct common_firmware_header *hdr;
237 unsigned offset; 237 unsigned offset;
@@ -239,7 +239,7 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
239 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; 239 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
240 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 240 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
241 offset = le32_to_cpu(hdr->ucode_array_offset_bytes); 241 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
242 memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset, 242 memcpy_toio(adev->vcn.inst[0].cpu_addr, adev->vcn.fw->data + offset,
243 le32_to_cpu(hdr->ucode_size_bytes)); 243 le32_to_cpu(hdr->ucode_size_bytes));
244 size -= le32_to_cpu(hdr->ucode_size_bytes); 244 size -= le32_to_cpu(hdr->ucode_size_bytes);
245 ptr += le32_to_cpu(hdr->ucode_size_bytes); 245 ptr += le32_to_cpu(hdr->ucode_size_bytes);
@@ -258,7 +258,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
258 unsigned int i; 258 unsigned int i;
259 259
260 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 260 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
261 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]); 261 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_enc[i]);
262 } 262 }
263 263
264 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 264 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
@@ -269,7 +269,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
269 else 269 else
270 new_state.fw_based = VCN_DPG_STATE__UNPAUSE; 270 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
271 271
272 if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg)) 272 if (amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_jpeg))
273 new_state.jpeg = VCN_DPG_STATE__PAUSE; 273 new_state.jpeg = VCN_DPG_STATE__PAUSE;
274 else 274 else
275 new_state.jpeg = VCN_DPG_STATE__UNPAUSE; 275 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
@@ -277,8 +277,8 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
277 adev->vcn.pause_dpg_mode(adev, &new_state); 277 adev->vcn.pause_dpg_mode(adev, &new_state);
278 } 278 }
279 279
280 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg); 280 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_jpeg);
281 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_dec); 281 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_dec);
282 282
283 if (fences == 0) { 283 if (fences == 0) {
284 amdgpu_gfx_off_ctrl(adev, true); 284 amdgpu_gfx_off_ctrl(adev, true);
@@ -312,14 +312,14 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
312 unsigned int i; 312 unsigned int i;
313 313
314 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 314 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
315 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]); 315 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_enc[i]);
316 } 316 }
317 if (fences) 317 if (fences)
318 new_state.fw_based = VCN_DPG_STATE__PAUSE; 318 new_state.fw_based = VCN_DPG_STATE__PAUSE;
319 else 319 else
320 new_state.fw_based = VCN_DPG_STATE__UNPAUSE; 320 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
321 321
322 if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg)) 322 if (amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_jpeg))
323 new_state.jpeg = VCN_DPG_STATE__PAUSE; 323 new_state.jpeg = VCN_DPG_STATE__PAUSE;
324 else 324 else
325 new_state.jpeg = VCN_DPG_STATE__UNPAUSE; 325 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
@@ -345,7 +345,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
345 unsigned i; 345 unsigned i;
346 int r; 346 int r;
347 347
348 WREG32(adev->vcn.external.scratch9, 0xCAFEDEAD); 348 WREG32(adev->vcn.inst[0].external.scratch9, 0xCAFEDEAD);
349 r = amdgpu_ring_alloc(ring, 3); 349 r = amdgpu_ring_alloc(ring, 3);
350 if (r) 350 if (r)
351 return r; 351 return r;
@@ -353,7 +353,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
353 amdgpu_ring_write(ring, 0xDEADBEEF); 353 amdgpu_ring_write(ring, 0xDEADBEEF);
354 amdgpu_ring_commit(ring); 354 amdgpu_ring_commit(ring);
355 for (i = 0; i < adev->usec_timeout; i++) { 355 for (i = 0; i < adev->usec_timeout; i++) {
356 tmp = RREG32(adev->vcn.external.scratch9); 356 tmp = RREG32(adev->vcn.inst[0].external.scratch9);
357 if (tmp == 0xDEADBEEF) 357 if (tmp == 0xDEADBEEF)
358 break; 358 break;
359 udelay(1); 359 udelay(1);
@@ -664,7 +664,7 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
664 unsigned i; 664 unsigned i;
665 int r; 665 int r;
666 666
667 WREG32(adev->vcn.external.jpeg_pitch, 0xCAFEDEAD); 667 WREG32(adev->vcn.inst[0].external.jpeg_pitch, 0xCAFEDEAD);
668 r = amdgpu_ring_alloc(ring, 3); 668 r = amdgpu_ring_alloc(ring, 3);
669 if (r) 669 if (r)
670 return r; 670 return r;
@@ -674,7 +674,7 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
674 amdgpu_ring_commit(ring); 674 amdgpu_ring_commit(ring);
675 675
676 for (i = 0; i < adev->usec_timeout; i++) { 676 for (i = 0; i < adev->usec_timeout; i++) {
677 tmp = RREG32(adev->vcn.external.jpeg_pitch); 677 tmp = RREG32(adev->vcn.inst[0].external.jpeg_pitch);
678 if (tmp == 0xDEADBEEF) 678 if (tmp == 0xDEADBEEF)
679 break; 679 break;
680 udelay(1); 680 udelay(1);
@@ -748,7 +748,7 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
748 } 748 }
749 749
750 for (i = 0; i < adev->usec_timeout; i++) { 750 for (i = 0; i < adev->usec_timeout; i++) {
751 tmp = RREG32(adev->vcn.external.jpeg_pitch); 751 tmp = RREG32(adev->vcn.inst[0].external.jpeg_pitch);
752 if (tmp == 0xDEADBEEF) 752 if (tmp == 0xDEADBEEF)
753 break; 753 break;
754 udelay(1); 754 udelay(1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index bfd8c3cea13a..d2fc47a954ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -30,6 +30,8 @@
30#define AMDGPU_VCN_FIRMWARE_OFFSET 256 30#define AMDGPU_VCN_FIRMWARE_OFFSET 256
31#define AMDGPU_VCN_MAX_ENC_RINGS 3 31#define AMDGPU_VCN_MAX_ENC_RINGS 3
32 32
33#define AMDGPU_MAX_VCN_INSTANCES 2
34
33#define VCN_DEC_CMD_FENCE 0x00000000 35#define VCN_DEC_CMD_FENCE 0x00000000
34#define VCN_DEC_CMD_TRAP 0x00000001 36#define VCN_DEC_CMD_TRAP 0x00000001
35#define VCN_DEC_CMD_WRITE_REG 0x00000004 37#define VCN_DEC_CMD_WRITE_REG 0x00000004
@@ -155,30 +157,38 @@ struct amdgpu_vcn_reg{
155 unsigned jpeg_pitch; 157 unsigned jpeg_pitch;
156}; 158};
157 159
158struct amdgpu_vcn { 160struct amdgpu_vcn_inst {
159 struct amdgpu_bo *vcpu_bo; 161 struct amdgpu_bo *vcpu_bo;
160 void *cpu_addr; 162 void *cpu_addr;
161 uint64_t gpu_addr; 163 uint64_t gpu_addr;
162 unsigned fw_version;
163 void *saved_bo; 164 void *saved_bo;
164 struct delayed_work idle_work;
165 const struct firmware *fw; /* VCN firmware */
166 struct amdgpu_ring ring_dec; 165 struct amdgpu_ring ring_dec;
167 struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS]; 166 struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
168 struct amdgpu_ring ring_jpeg; 167 struct amdgpu_ring ring_jpeg;
169 struct amdgpu_irq_src irq; 168 struct amdgpu_irq_src irq;
169 struct amdgpu_vcn_reg external;
170};
171
172struct amdgpu_vcn {
173 unsigned fw_version;
174 struct delayed_work idle_work;
175 const struct firmware *fw; /* VCN firmware */
170 unsigned num_enc_rings; 176 unsigned num_enc_rings;
171 enum amd_powergating_state cur_state; 177 enum amd_powergating_state cur_state;
172 struct dpg_pause_state pause_state; 178 struct dpg_pause_state pause_state;
173 struct amdgpu_vcn_reg internal, external;
174 int (*pause_dpg_mode)(struct amdgpu_device *adev,
175 struct dpg_pause_state *new_state);
176 179
177 bool indirect_sram; 180 bool indirect_sram;
178 struct amdgpu_bo *dpg_sram_bo; 181 struct amdgpu_bo *dpg_sram_bo;
179 void *dpg_sram_cpu_addr; 182 void *dpg_sram_cpu_addr;
180 uint64_t dpg_sram_gpu_addr; 183 uint64_t dpg_sram_gpu_addr;
181 uint32_t *dpg_sram_curr_addr; 184 uint32_t *dpg_sram_curr_addr;
185
186 uint8_t num_vcn_inst;
187 struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
188 struct amdgpu_vcn_reg internal;
189
190 int (*pause_dpg_mode)(struct amdgpu_device *adev,
191 struct dpg_pause_state *new_state);
182}; 192};
183 193
184int amdgpu_vcn_sw_init(struct amdgpu_device *adev); 194int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 916e32533c1b..93b3500e522b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -63,6 +63,7 @@ static int vcn_v1_0_early_init(void *handle)
63{ 63{
64 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 64 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
65 65
66 adev->vcn.num_vcn_inst = 1;
66 adev->vcn.num_enc_rings = 2; 67 adev->vcn.num_enc_rings = 2;
67 68
68 vcn_v1_0_set_dec_ring_funcs(adev); 69 vcn_v1_0_set_dec_ring_funcs(adev);
@@ -87,20 +88,21 @@ static int vcn_v1_0_sw_init(void *handle)
87 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 88 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
88 89
89 /* VCN DEC TRAP */ 90 /* VCN DEC TRAP */
90 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.irq); 91 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
92 VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst->irq);
91 if (r) 93 if (r)
92 return r; 94 return r;
93 95
94 /* VCN ENC TRAP */ 96 /* VCN ENC TRAP */
95 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 97 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
96 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE, 98 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
97 &adev->vcn.irq); 99 &adev->vcn.inst->irq);
98 if (r) 100 if (r)
99 return r; 101 return r;
100 } 102 }
101 103
102 /* VCN JPEG TRAP */ 104 /* VCN JPEG TRAP */
103 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.irq); 105 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.inst->irq);
104 if (r) 106 if (r)
105 return r; 107 return r;
106 108
@@ -122,39 +124,39 @@ static int vcn_v1_0_sw_init(void *handle)
122 if (r) 124 if (r)
123 return r; 125 return r;
124 126
125 ring = &adev->vcn.ring_dec; 127 ring = &adev->vcn.inst->ring_dec;
126 sprintf(ring->name, "vcn_dec"); 128 sprintf(ring->name, "vcn_dec");
127 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); 129 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
128 if (r) 130 if (r)
129 return r; 131 return r;
130 132
131 adev->vcn.internal.scratch9 = adev->vcn.external.scratch9 = 133 adev->vcn.internal.scratch9 = adev->vcn.inst->external.scratch9 =
132 SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9); 134 SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
133 adev->vcn.internal.data0 = adev->vcn.external.data0 = 135 adev->vcn.internal.data0 = adev->vcn.inst->external.data0 =
134 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0); 136 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
135 adev->vcn.internal.data1 = adev->vcn.external.data1 = 137 adev->vcn.internal.data1 = adev->vcn.inst->external.data1 =
136 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1); 138 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
137 adev->vcn.internal.cmd = adev->vcn.external.cmd = 139 adev->vcn.internal.cmd = adev->vcn.inst->external.cmd =
138 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD); 140 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
139 adev->vcn.internal.nop = adev->vcn.external.nop = 141 adev->vcn.internal.nop = adev->vcn.inst->external.nop =
140 SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP); 142 SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
141 143
142 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 144 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
143 ring = &adev->vcn.ring_enc[i]; 145 ring = &adev->vcn.inst->ring_enc[i];
144 sprintf(ring->name, "vcn_enc%d", i); 146 sprintf(ring->name, "vcn_enc%d", i);
145 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); 147 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
146 if (r) 148 if (r)
147 return r; 149 return r;
148 } 150 }
149 151
150 ring = &adev->vcn.ring_jpeg; 152 ring = &adev->vcn.inst->ring_jpeg;
151 sprintf(ring->name, "vcn_jpeg"); 153 sprintf(ring->name, "vcn_jpeg");
152 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); 154 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
153 if (r) 155 if (r)
154 return r; 156 return r;
155 157
156 adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode; 158 adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
157 adev->vcn.internal.jpeg_pitch = adev->vcn.external.jpeg_pitch = 159 adev->vcn.internal.jpeg_pitch = adev->vcn.inst->external.jpeg_pitch =
158 SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH); 160 SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH);
159 161
160 return 0; 162 return 0;
@@ -191,7 +193,7 @@ static int vcn_v1_0_sw_fini(void *handle)
191static int vcn_v1_0_hw_init(void *handle) 193static int vcn_v1_0_hw_init(void *handle)
192{ 194{
193 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 195 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
194 struct amdgpu_ring *ring = &adev->vcn.ring_dec; 196 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
195 int i, r; 197 int i, r;
196 198
197 r = amdgpu_ring_test_helper(ring); 199 r = amdgpu_ring_test_helper(ring);
@@ -199,14 +201,14 @@ static int vcn_v1_0_hw_init(void *handle)
199 goto done; 201 goto done;
200 202
201 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 203 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
202 ring = &adev->vcn.ring_enc[i]; 204 ring = &adev->vcn.inst->ring_enc[i];
203 ring->sched.ready = true; 205 ring->sched.ready = true;
204 r = amdgpu_ring_test_helper(ring); 206 r = amdgpu_ring_test_helper(ring);
205 if (r) 207 if (r)
206 goto done; 208 goto done;
207 } 209 }
208 210
209 ring = &adev->vcn.ring_jpeg; 211 ring = &adev->vcn.inst->ring_jpeg;
210 r = amdgpu_ring_test_helper(ring); 212 r = amdgpu_ring_test_helper(ring);
211 if (r) 213 if (r)
212 goto done; 214 goto done;
@@ -229,7 +231,7 @@ done:
229static int vcn_v1_0_hw_fini(void *handle) 231static int vcn_v1_0_hw_fini(void *handle)
230{ 232{
231 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 233 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
232 struct amdgpu_ring *ring = &adev->vcn.ring_dec; 234 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
233 235
234 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || 236 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
235 RREG32_SOC15(VCN, 0, mmUVD_STATUS)) 237 RREG32_SOC15(VCN, 0, mmUVD_STATUS))
@@ -304,9 +306,9 @@ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
304 offset = 0; 306 offset = 0;
305 } else { 307 } else {
306 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 308 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
307 lower_32_bits(adev->vcn.gpu_addr)); 309 lower_32_bits(adev->vcn.inst->gpu_addr));
308 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 310 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
309 upper_32_bits(adev->vcn.gpu_addr)); 311 upper_32_bits(adev->vcn.inst->gpu_addr));
310 offset = size; 312 offset = size;
311 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 313 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
312 AMDGPU_UVD_FIRMWARE_OFFSET >> 3); 314 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
@@ -316,17 +318,17 @@ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
316 318
317 /* cache window 1: stack */ 319 /* cache window 1: stack */
318 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, 320 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
319 lower_32_bits(adev->vcn.gpu_addr + offset)); 321 lower_32_bits(adev->vcn.inst->gpu_addr + offset));
320 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, 322 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
321 upper_32_bits(adev->vcn.gpu_addr + offset)); 323 upper_32_bits(adev->vcn.inst->gpu_addr + offset));
322 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0); 324 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
323 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); 325 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
324 326
325 /* cache window 2: context */ 327 /* cache window 2: context */
326 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, 328 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
327 lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 329 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
328 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, 330 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
329 upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 331 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
330 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0); 332 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
331 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); 333 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
332 334
@@ -374,9 +376,9 @@ static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
374 offset = 0; 376 offset = 0;
375 } else { 377 } else {
376 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 378 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
377 lower_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0); 379 lower_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0);
378 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 380 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
379 upper_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0); 381 upper_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0);
380 offset = size; 382 offset = size;
381 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 383 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
382 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0); 384 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0);
@@ -386,9 +388,9 @@ static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
386 388
387 /* cache window 1: stack */ 389 /* cache window 1: stack */
388 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, 390 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
389 lower_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0); 391 lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0);
390 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, 392 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
391 upper_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0); 393 upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0);
392 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0, 394 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0,
393 0xFFFFFFFF, 0); 395 0xFFFFFFFF, 0);
394 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE, 396 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE,
@@ -396,10 +398,10 @@ static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
396 398
397 /* cache window 2: context */ 399 /* cache window 2: context */
398 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, 400 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
399 lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 401 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
400 0xFFFFFFFF, 0); 402 0xFFFFFFFF, 0);
401 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, 403 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
402 upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 404 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
403 0xFFFFFFFF, 0); 405 0xFFFFFFFF, 0);
404 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0); 406 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0);
405 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE, 407 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE,
@@ -779,7 +781,7 @@ static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
779 */ 781 */
780static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev) 782static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
781{ 783{
782 struct amdgpu_ring *ring = &adev->vcn.ring_dec; 784 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
783 uint32_t rb_bufsz, tmp; 785 uint32_t rb_bufsz, tmp;
784 uint32_t lmi_swap_cntl; 786 uint32_t lmi_swap_cntl;
785 int i, j, r; 787 int i, j, r;
@@ -932,21 +934,21 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
932 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0, 934 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
933 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); 935 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
934 936
935 ring = &adev->vcn.ring_enc[0]; 937 ring = &adev->vcn.inst->ring_enc[0];
936 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); 938 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
937 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); 939 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
938 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); 940 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
939 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 941 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
940 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); 942 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
941 943
942 ring = &adev->vcn.ring_enc[1]; 944 ring = &adev->vcn.inst->ring_enc[1];
943 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); 945 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
944 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); 946 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
945 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); 947 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
946 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 948 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
947 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); 949 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
948 950
949 ring = &adev->vcn.ring_jpeg; 951 ring = &adev->vcn.inst->ring_jpeg;
950 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0); 952 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
951 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK | 953 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
952 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); 954 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
@@ -968,7 +970,7 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
968 970
969static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev) 971static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
970{ 972{
971 struct amdgpu_ring *ring = &adev->vcn.ring_dec; 973 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
972 uint32_t rb_bufsz, tmp; 974 uint32_t rb_bufsz, tmp;
973 uint32_t lmi_swap_cntl; 975 uint32_t lmi_swap_cntl;
974 976
@@ -1106,7 +1108,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
1106 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); 1108 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1107 1109
1108 /* initialize JPEG wptr */ 1110 /* initialize JPEG wptr */
1109 ring = &adev->vcn.ring_jpeg; 1111 ring = &adev->vcn.inst->ring_jpeg;
1110 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); 1112 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1111 1113
1112 /* copy patch commands to the jpeg ring */ 1114 /* copy patch commands to the jpeg ring */
@@ -1255,21 +1257,21 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
1255 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code); 1257 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
1256 1258
1257 /* Restore */ 1259 /* Restore */
1258 ring = &adev->vcn.ring_enc[0]; 1260 ring = &adev->vcn.inst->ring_enc[0];
1259 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); 1261 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1260 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 1262 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1261 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); 1263 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1262 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); 1264 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1263 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); 1265 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1264 1266
1265 ring = &adev->vcn.ring_enc[1]; 1267 ring = &adev->vcn.inst->ring_enc[1];
1266 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); 1268 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1267 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 1269 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1268 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); 1270 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1269 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); 1271 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1270 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); 1272 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1271 1273
1272 ring = &adev->vcn.ring_dec; 1274 ring = &adev->vcn.inst->ring_dec;
1273 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, 1275 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1274 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); 1276 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1275 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1277 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
@@ -1315,7 +1317,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
1315 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code); 1317 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code);
1316 1318
1317 /* Restore */ 1319 /* Restore */
1318 ring = &adev->vcn.ring_jpeg; 1320 ring = &adev->vcn.inst->ring_jpeg;
1319 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0); 1321 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
1320 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 1322 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
1321 UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK | 1323 UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
@@ -1329,7 +1331,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
1329 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 1331 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
1330 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); 1332 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
1331 1333
1332 ring = &adev->vcn.ring_dec; 1334 ring = &adev->vcn.inst->ring_dec;
1333 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, 1335 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1334 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); 1336 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1335 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1337 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
@@ -1596,7 +1598,7 @@ static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1596{ 1598{
1597 struct amdgpu_device *adev = ring->adev; 1599 struct amdgpu_device *adev = ring->adev;
1598 1600
1599 if (ring == &adev->vcn.ring_enc[0]) 1601 if (ring == &adev->vcn.inst->ring_enc[0])
1600 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR); 1602 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1601 else 1603 else
1602 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2); 1604 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
@@ -1613,7 +1615,7 @@ static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1613{ 1615{
1614 struct amdgpu_device *adev = ring->adev; 1616 struct amdgpu_device *adev = ring->adev;
1615 1617
1616 if (ring == &adev->vcn.ring_enc[0]) 1618 if (ring == &adev->vcn.inst->ring_enc[0])
1617 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); 1619 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1618 else 1620 else
1619 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); 1621 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
@@ -1630,7 +1632,7 @@ static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1630{ 1632{
1631 struct amdgpu_device *adev = ring->adev; 1633 struct amdgpu_device *adev = ring->adev;
1632 1634
1633 if (ring == &adev->vcn.ring_enc[0]) 1635 if (ring == &adev->vcn.inst->ring_enc[0])
1634 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, 1636 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
1635 lower_32_bits(ring->wptr)); 1637 lower_32_bits(ring->wptr));
1636 else 1638 else
@@ -2114,16 +2116,16 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
2114 2116
2115 switch (entry->src_id) { 2117 switch (entry->src_id) {
2116 case 124: 2118 case 124:
2117 amdgpu_fence_process(&adev->vcn.ring_dec); 2119 amdgpu_fence_process(&adev->vcn.inst->ring_dec);
2118 break; 2120 break;
2119 case 119: 2121 case 119:
2120 amdgpu_fence_process(&adev->vcn.ring_enc[0]); 2122 amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
2121 break; 2123 break;
2122 case 120: 2124 case 120:
2123 amdgpu_fence_process(&adev->vcn.ring_enc[1]); 2125 amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
2124 break; 2126 break;
2125 case 126: 2127 case 126:
2126 amdgpu_fence_process(&adev->vcn.ring_jpeg); 2128 amdgpu_fence_process(&adev->vcn.inst->ring_jpeg);
2127 break; 2129 break;
2128 default: 2130 default:
2129 DRM_ERROR("Unhandled interrupt: %d %d\n", 2131 DRM_ERROR("Unhandled interrupt: %d %d\n",
@@ -2295,7 +2297,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = {
2295 2297
2296static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev) 2298static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2297{ 2299{
2298 adev->vcn.ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs; 2300 adev->vcn.inst->ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
2299 DRM_INFO("VCN decode is enabled in VM mode\n"); 2301 DRM_INFO("VCN decode is enabled in VM mode\n");
2300} 2302}
2301 2303
@@ -2304,14 +2306,14 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2304 int i; 2306 int i;
2305 2307
2306 for (i = 0; i < adev->vcn.num_enc_rings; ++i) 2308 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
2307 adev->vcn.ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs; 2309 adev->vcn.inst->ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
2308 2310
2309 DRM_INFO("VCN encode is enabled in VM mode\n"); 2311 DRM_INFO("VCN encode is enabled in VM mode\n");
2310} 2312}
2311 2313
2312static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev) 2314static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
2313{ 2315{
2314 adev->vcn.ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs; 2316 adev->vcn.inst->ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs;
2315 DRM_INFO("VCN jpeg decode is enabled in VM mode\n"); 2317 DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
2316} 2318}
2317 2319
@@ -2322,8 +2324,8 @@ static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
2322 2324
2323static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev) 2325static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
2324{ 2326{
2325 adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 2; 2327 adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2;
2326 adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs; 2328 adev->vcn.inst->irq.funcs = &vcn_v1_0_irq_funcs;
2327} 2329}
2328 2330
2329const struct amdgpu_ip_block_version vcn_v1_0_ip_block = 2331const struct amdgpu_ip_block_version vcn_v1_0_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index b6b77a063c34..31539e6a16b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -92,6 +92,7 @@ static int vcn_v2_0_early_init(void *handle)
92{ 92{
93 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 93 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
94 94
95 adev->vcn.num_vcn_inst = 1;
95 adev->vcn.num_enc_rings = 2; 96 adev->vcn.num_enc_rings = 2;
96 97
97 vcn_v2_0_set_dec_ring_funcs(adev); 98 vcn_v2_0_set_dec_ring_funcs(adev);
@@ -118,7 +119,7 @@ static int vcn_v2_0_sw_init(void *handle)
118 /* VCN DEC TRAP */ 119 /* VCN DEC TRAP */
119 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 120 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
120 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, 121 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT,
121 &adev->vcn.irq); 122 &adev->vcn.inst->irq);
122 if (r) 123 if (r)
123 return r; 124 return r;
124 125
@@ -126,15 +127,14 @@ static int vcn_v2_0_sw_init(void *handle)
126 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 127 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
127 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 128 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
128 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, 129 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
129 &adev->vcn.irq); 130 &adev->vcn.inst->irq);
130 if (r) 131 if (r)
131 return r; 132 return r;
132 } 133 }
133 134
134 /* VCN JPEG TRAP */ 135 /* VCN JPEG TRAP */
135 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 136 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
136 VCN_2_0__SRCID__JPEG_DECODE, 137 VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst->irq);
137 &adev->vcn.irq);
138 if (r) 138 if (r)
139 return r; 139 return r;
140 140
@@ -156,13 +156,13 @@ static int vcn_v2_0_sw_init(void *handle)
156 if (r) 156 if (r)
157 return r; 157 return r;
158 158
159 ring = &adev->vcn.ring_dec; 159 ring = &adev->vcn.inst->ring_dec;
160 160
161 ring->use_doorbell = true; 161 ring->use_doorbell = true;
162 ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1; 162 ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
163 163
164 sprintf(ring->name, "vcn_dec"); 164 sprintf(ring->name, "vcn_dec");
165 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); 165 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
166 if (r) 166 if (r)
167 return r; 167 return r;
168 168
@@ -174,38 +174,38 @@ static int vcn_v2_0_sw_init(void *handle)
174 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET; 174 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
175 175
176 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET; 176 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
177 adev->vcn.external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9); 177 adev->vcn.inst->external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
178 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET; 178 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
179 adev->vcn.external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0); 179 adev->vcn.inst->external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
180 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET; 180 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
181 adev->vcn.external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1); 181 adev->vcn.inst->external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
182 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET; 182 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
183 adev->vcn.external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD); 183 adev->vcn.inst->external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
184 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET; 184 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
185 adev->vcn.external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP); 185 adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
186 186
187 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 187 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
188 ring = &adev->vcn.ring_enc[i]; 188 ring = &adev->vcn.inst->ring_enc[i];
189 ring->use_doorbell = true; 189 ring->use_doorbell = true;
190 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i; 190 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
191 sprintf(ring->name, "vcn_enc%d", i); 191 sprintf(ring->name, "vcn_enc%d", i);
192 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); 192 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
193 if (r) 193 if (r)
194 return r; 194 return r;
195 } 195 }
196 196
197 ring = &adev->vcn.ring_jpeg; 197 ring = &adev->vcn.inst->ring_jpeg;
198 ring->use_doorbell = true; 198 ring->use_doorbell = true;
199 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; 199 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
200 sprintf(ring->name, "vcn_jpeg"); 200 sprintf(ring->name, "vcn_jpeg");
201 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); 201 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
202 if (r) 202 if (r)
203 return r; 203 return r;
204 204
205 adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode; 205 adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
206 206
207 adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; 207 adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
208 adev->vcn.external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH); 208 adev->vcn.inst->external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH);
209 209
210 return 0; 210 return 0;
211} 211}
@@ -241,7 +241,7 @@ static int vcn_v2_0_sw_fini(void *handle)
241static int vcn_v2_0_hw_init(void *handle) 241static int vcn_v2_0_hw_init(void *handle)
242{ 242{
243 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 243 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
244 struct amdgpu_ring *ring = &adev->vcn.ring_dec; 244 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
245 int i, r; 245 int i, r;
246 246
247 adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell, 247 adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
@@ -255,7 +255,7 @@ static int vcn_v2_0_hw_init(void *handle)
255 } 255 }
256 256
257 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 257 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
258 ring = &adev->vcn.ring_enc[i]; 258 ring = &adev->vcn.inst->ring_enc[i];
259 ring->sched.ready = true; 259 ring->sched.ready = true;
260 r = amdgpu_ring_test_ring(ring); 260 r = amdgpu_ring_test_ring(ring);
261 if (r) { 261 if (r) {
@@ -264,7 +264,7 @@ static int vcn_v2_0_hw_init(void *handle)
264 } 264 }
265 } 265 }
266 266
267 ring = &adev->vcn.ring_jpeg; 267 ring = &adev->vcn.inst->ring_jpeg;
268 ring->sched.ready = true; 268 ring->sched.ready = true;
269 r = amdgpu_ring_test_ring(ring); 269 r = amdgpu_ring_test_ring(ring);
270 if (r) { 270 if (r) {
@@ -290,7 +290,7 @@ done:
290static int vcn_v2_0_hw_fini(void *handle) 290static int vcn_v2_0_hw_fini(void *handle)
291{ 291{
292 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 292 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
293 struct amdgpu_ring *ring = &adev->vcn.ring_dec; 293 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
294 int i; 294 int i;
295 295
296 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || 296 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
@@ -301,11 +301,11 @@ static int vcn_v2_0_hw_fini(void *handle)
301 ring->sched.ready = false; 301 ring->sched.ready = false;
302 302
303 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 303 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
304 ring = &adev->vcn.ring_enc[i]; 304 ring = &adev->vcn.inst->ring_enc[i];
305 ring->sched.ready = false; 305 ring->sched.ready = false;
306 } 306 }
307 307
308 ring = &adev->vcn.ring_jpeg; 308 ring = &adev->vcn.inst->ring_jpeg;
309 ring->sched.ready = false; 309 ring->sched.ready = false;
310 310
311 return 0; 311 return 0;
@@ -375,9 +375,9 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
375 offset = 0; 375 offset = 0;
376 } else { 376 } else {
377 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 377 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
378 lower_32_bits(adev->vcn.gpu_addr)); 378 lower_32_bits(adev->vcn.inst->gpu_addr));
379 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 379 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
380 upper_32_bits(adev->vcn.gpu_addr)); 380 upper_32_bits(adev->vcn.inst->gpu_addr));
381 offset = size; 381 offset = size;
382 /* No signed header for now from firmware 382 /* No signed header for now from firmware
383 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 383 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
@@ -390,17 +390,17 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
390 390
391 /* cache window 1: stack */ 391 /* cache window 1: stack */
392 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, 392 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
393 lower_32_bits(adev->vcn.gpu_addr + offset)); 393 lower_32_bits(adev->vcn.inst->gpu_addr + offset));
394 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, 394 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
395 upper_32_bits(adev->vcn.gpu_addr + offset)); 395 upper_32_bits(adev->vcn.inst->gpu_addr + offset));
396 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0); 396 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
397 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); 397 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
398 398
399 /* cache window 2: context */ 399 /* cache window 2: context */
400 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, 400 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
401 lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 401 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
402 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, 402 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
403 upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 403 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
404 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0); 404 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
405 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); 405 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
406 406
@@ -436,10 +436,10 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirec
436 } else { 436 } else {
437 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( 437 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
438 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 438 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
439 lower_32_bits(adev->vcn.gpu_addr), 0, indirect); 439 lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
440 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( 440 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
441 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 441 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
442 upper_32_bits(adev->vcn.gpu_addr), 0, indirect); 442 upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
443 offset = size; 443 offset = size;
444 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( 444 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
445 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 445 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
@@ -457,10 +457,10 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirec
457 if (!indirect) { 457 if (!indirect) {
458 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( 458 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
459 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 459 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
460 lower_32_bits(adev->vcn.gpu_addr + offset), 0, indirect); 460 lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
461 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( 461 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
462 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 462 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
463 upper_32_bits(adev->vcn.gpu_addr + offset), 0, indirect); 463 upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
464 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( 464 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
465 UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); 465 UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
466 } else { 466 } else {
@@ -477,10 +477,10 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirec
477 /* cache window 2: context */ 477 /* cache window 2: context */
478 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( 478 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
479 UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), 479 UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
480 lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); 480 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
481 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( 481 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
482 UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), 482 UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
483 upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); 483 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
484 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( 484 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
485 UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); 485 UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
486 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( 486 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
@@ -668,7 +668,7 @@ static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
668 */ 668 */
669static int jpeg_v2_0_start(struct amdgpu_device *adev) 669static int jpeg_v2_0_start(struct amdgpu_device *adev)
670{ 670{
671 struct amdgpu_ring *ring = &adev->vcn.ring_jpeg; 671 struct amdgpu_ring *ring = &adev->vcn.inst->ring_jpeg;
672 uint32_t tmp; 672 uint32_t tmp;
673 int r = 0; 673 int r = 0;
674 674
@@ -930,7 +930,7 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
930 930
931static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) 931static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
932{ 932{
933 struct amdgpu_ring *ring = &adev->vcn.ring_dec; 933 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
934 uint32_t rb_bufsz, tmp; 934 uint32_t rb_bufsz, tmp;
935 935
936 vcn_v2_0_enable_static_power_gating(adev); 936 vcn_v2_0_enable_static_power_gating(adev);
@@ -1056,7 +1056,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
1056 1056
1057static int vcn_v2_0_start(struct amdgpu_device *adev) 1057static int vcn_v2_0_start(struct amdgpu_device *adev)
1058{ 1058{
1059 struct amdgpu_ring *ring = &adev->vcn.ring_dec; 1059 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
1060 uint32_t rb_bufsz, tmp; 1060 uint32_t rb_bufsz, tmp;
1061 uint32_t lmi_swap_cntl; 1061 uint32_t lmi_swap_cntl;
1062 int i, j, r; 1062 int i, j, r;
@@ -1207,14 +1207,14 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
1207 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, 1207 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1208 lower_32_bits(ring->wptr)); 1208 lower_32_bits(ring->wptr));
1209 1209
1210 ring = &adev->vcn.ring_enc[0]; 1210 ring = &adev->vcn.inst->ring_enc[0];
1211 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); 1211 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1212 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); 1212 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1213 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); 1213 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1214 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 1214 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1215 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); 1215 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1216 1216
1217 ring = &adev->vcn.ring_enc[1]; 1217 ring = &adev->vcn.inst->ring_enc[1];
1218 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); 1218 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1219 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); 1219 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1220 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); 1220 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
@@ -1361,14 +1361,14 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
1361 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code); 1361 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
1362 1362
1363 /* Restore */ 1363 /* Restore */
1364 ring = &adev->vcn.ring_enc[0]; 1364 ring = &adev->vcn.inst->ring_enc[0];
1365 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); 1365 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1366 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 1366 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1367 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); 1367 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1368 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); 1368 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1369 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); 1369 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1370 1370
1371 ring = &adev->vcn.ring_enc[1]; 1371 ring = &adev->vcn.inst->ring_enc[1];
1372 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); 1372 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1373 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 1373 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1374 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); 1374 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
@@ -1660,7 +1660,7 @@ static uint64_t vcn_v2_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1660{ 1660{
1661 struct amdgpu_device *adev = ring->adev; 1661 struct amdgpu_device *adev = ring->adev;
1662 1662
1663 if (ring == &adev->vcn.ring_enc[0]) 1663 if (ring == &adev->vcn.inst->ring_enc[0])
1664 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR); 1664 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1665 else 1665 else
1666 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2); 1666 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
@@ -1677,7 +1677,7 @@ static uint64_t vcn_v2_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1677{ 1677{
1678 struct amdgpu_device *adev = ring->adev; 1678 struct amdgpu_device *adev = ring->adev;
1679 1679
1680 if (ring == &adev->vcn.ring_enc[0]) { 1680 if (ring == &adev->vcn.inst->ring_enc[0]) {
1681 if (ring->use_doorbell) 1681 if (ring->use_doorbell)
1682 return adev->wb.wb[ring->wptr_offs]; 1682 return adev->wb.wb[ring->wptr_offs];
1683 else 1683 else
@@ -1701,7 +1701,7 @@ static void vcn_v2_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1701{ 1701{
1702 struct amdgpu_device *adev = ring->adev; 1702 struct amdgpu_device *adev = ring->adev;
1703 1703
1704 if (ring == &adev->vcn.ring_enc[0]) { 1704 if (ring == &adev->vcn.inst->ring_enc[0]) {
1705 if (ring->use_doorbell) { 1705 if (ring->use_doorbell) {
1706 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); 1706 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1707 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 1707 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
@@ -2075,16 +2075,16 @@ static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev,
2075 2075
2076 switch (entry->src_id) { 2076 switch (entry->src_id) {
2077 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT: 2077 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
2078 amdgpu_fence_process(&adev->vcn.ring_dec); 2078 amdgpu_fence_process(&adev->vcn.inst->ring_dec);
2079 break; 2079 break;
2080 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE: 2080 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
2081 amdgpu_fence_process(&adev->vcn.ring_enc[0]); 2081 amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
2082 break; 2082 break;
2083 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY: 2083 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
2084 amdgpu_fence_process(&adev->vcn.ring_enc[1]); 2084 amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
2085 break; 2085 break;
2086 case VCN_2_0__SRCID__JPEG_DECODE: 2086 case VCN_2_0__SRCID__JPEG_DECODE:
2087 amdgpu_fence_process(&adev->vcn.ring_jpeg); 2087 amdgpu_fence_process(&adev->vcn.inst->ring_jpeg);
2088 break; 2088 break;
2089 default: 2089 default:
2090 DRM_ERROR("Unhandled interrupt: %d %d\n", 2090 DRM_ERROR("Unhandled interrupt: %d %d\n",
@@ -2233,7 +2233,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_jpeg_ring_vm_funcs = {
2233 2233
2234static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev) 2234static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2235{ 2235{
2236 adev->vcn.ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs; 2236 adev->vcn.inst->ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs;
2237 DRM_INFO("VCN decode is enabled in VM mode\n"); 2237 DRM_INFO("VCN decode is enabled in VM mode\n");
2238} 2238}
2239 2239
@@ -2242,14 +2242,14 @@ static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2242 int i; 2242 int i;
2243 2243
2244 for (i = 0; i < adev->vcn.num_enc_rings; ++i) 2244 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
2245 adev->vcn.ring_enc[i].funcs = &vcn_v2_0_enc_ring_vm_funcs; 2245 adev->vcn.inst->ring_enc[i].funcs = &vcn_v2_0_enc_ring_vm_funcs;
2246 2246
2247 DRM_INFO("VCN encode is enabled in VM mode\n"); 2247 DRM_INFO("VCN encode is enabled in VM mode\n");
2248} 2248}
2249 2249
2250static void vcn_v2_0_set_jpeg_ring_funcs(struct amdgpu_device *adev) 2250static void vcn_v2_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
2251{ 2251{
2252 adev->vcn.ring_jpeg.funcs = &vcn_v2_0_jpeg_ring_vm_funcs; 2252 adev->vcn.inst->ring_jpeg.funcs = &vcn_v2_0_jpeg_ring_vm_funcs;
2253 DRM_INFO("VCN jpeg decode is enabled in VM mode\n"); 2253 DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
2254} 2254}
2255 2255
@@ -2260,8 +2260,8 @@ static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = {
2260 2260
2261static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev) 2261static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev)
2262{ 2262{
2263 adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 2; 2263 adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2;
2264 adev->vcn.irq.funcs = &vcn_v2_0_irq_funcs; 2264 adev->vcn.inst->irq.funcs = &vcn_v2_0_irq_funcs;
2265} 2265}
2266 2266
2267const struct amdgpu_ip_block_version vcn_v2_0_ip_block = 2267const struct amdgpu_ip_block_version vcn_v2_0_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 75fdb6881ac0..e27351267c9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -66,6 +66,7 @@ static int vcn_v2_5_early_init(void *handle)
66{ 66{
67 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 67 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
68 68
69 adev->vcn.num_vcn_inst = 1;
69 adev->vcn.num_enc_rings = 2; 70 adev->vcn.num_enc_rings = 2;
70 71
71 vcn_v2_5_set_dec_ring_funcs(adev); 72 vcn_v2_5_set_dec_ring_funcs(adev);
@@ -91,21 +92,21 @@ static int vcn_v2_5_sw_init(void *handle)
91 92
92 /* VCN DEC TRAP */ 93 /* VCN DEC TRAP */
93 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 94 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
94 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.irq); 95 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[0].irq);
95 if (r) 96 if (r)
96 return r; 97 return r;
97 98
98 /* VCN ENC TRAP */ 99 /* VCN ENC TRAP */
99 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 100 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
100 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 101 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
101 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.irq); 102 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[0].irq);
102 if (r) 103 if (r)
103 return r; 104 return r;
104 } 105 }
105 106
106 /* VCN JPEG TRAP */ 107 /* VCN JPEG TRAP */
107 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 108 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
108 VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.irq); 109 VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst[0].irq);
109 if (r) 110 if (r)
110 return r; 111 return r;
111 112
@@ -127,11 +128,11 @@ static int vcn_v2_5_sw_init(void *handle)
127 if (r) 128 if (r)
128 return r; 129 return r;
129 130
130 ring = &adev->vcn.ring_dec; 131 ring = &adev->vcn.inst[0].ring_dec;
131 ring->use_doorbell = true; 132 ring->use_doorbell = true;
132 ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1; 133 ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
133 sprintf(ring->name, "vcn_dec"); 134 sprintf(ring->name, "vcn_dec");
134 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); 135 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[0].irq, 0);
135 if (r) 136 if (r)
136 return r; 137 return r;
137 138
@@ -143,36 +144,36 @@ static int vcn_v2_5_sw_init(void *handle)
143 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET; 144 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
144 145
145 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET; 146 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
146 adev->vcn.external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9); 147 adev->vcn.inst[0].external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
147 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET; 148 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
148 adev->vcn.external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0); 149 adev->vcn.inst[0].external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
149 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET; 150 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
150 adev->vcn.external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1); 151 adev->vcn.inst[0].external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
151 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET; 152 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
152 adev->vcn.external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD); 153 adev->vcn.inst[0].external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
153 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET; 154 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
154 adev->vcn.external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP); 155 adev->vcn.inst[0].external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
155 156
156 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 157 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
157 ring = &adev->vcn.ring_enc[i]; 158 ring = &adev->vcn.inst[0].ring_enc[i];
158 ring->use_doorbell = true; 159 ring->use_doorbell = true;
159 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i; 160 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
160 sprintf(ring->name, "vcn_enc%d", i); 161 sprintf(ring->name, "vcn_enc%d", i);
161 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); 162 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[0].irq, 0);
162 if (r) 163 if (r)
163 return r; 164 return r;
164 } 165 }
165 166
166 ring = &adev->vcn.ring_jpeg; 167 ring = &adev->vcn.inst[0].ring_jpeg;
167 ring->use_doorbell = true; 168 ring->use_doorbell = true;
168 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; 169 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
169 sprintf(ring->name, "vcn_jpeg"); 170 sprintf(ring->name, "vcn_jpeg");
170 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); 171 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[0].irq, 0);
171 if (r) 172 if (r)
172 return r; 173 return r;
173 174
174 adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; 175 adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
175 adev->vcn.external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH); 176 adev->vcn.inst[0].external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH);
176 177
177 return 0; 178 return 0;
178} 179}
@@ -208,7 +209,7 @@ static int vcn_v2_5_sw_fini(void *handle)
208static int vcn_v2_5_hw_init(void *handle) 209static int vcn_v2_5_hw_init(void *handle)
209{ 210{
210 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 211 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
211 struct amdgpu_ring *ring = &adev->vcn.ring_dec; 212 struct amdgpu_ring *ring = &adev->vcn.inst[0].ring_dec;
212 int i, r; 213 int i, r;
213 214
214 adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell, 215 adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
@@ -221,7 +222,7 @@ static int vcn_v2_5_hw_init(void *handle)
221 } 222 }
222 223
223 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 224 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
224 ring = &adev->vcn.ring_enc[i]; 225 ring = &adev->vcn.inst[0].ring_enc[i];
225 ring->sched.ready = false; 226 ring->sched.ready = false;
226 continue; 227 continue;
227 r = amdgpu_ring_test_ring(ring); 228 r = amdgpu_ring_test_ring(ring);
@@ -231,7 +232,7 @@ static int vcn_v2_5_hw_init(void *handle)
231 } 232 }
232 } 233 }
233 234
234 ring = &adev->vcn.ring_jpeg; 235 ring = &adev->vcn.inst[0].ring_jpeg;
235 r = amdgpu_ring_test_ring(ring); 236 r = amdgpu_ring_test_ring(ring);
236 if (r) { 237 if (r) {
237 ring->sched.ready = false; 238 ring->sched.ready = false;
@@ -255,7 +256,7 @@ done:
255static int vcn_v2_5_hw_fini(void *handle) 256static int vcn_v2_5_hw_fini(void *handle)
256{ 257{
257 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 258 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
258 struct amdgpu_ring *ring = &adev->vcn.ring_dec; 259 struct amdgpu_ring *ring = &adev->vcn.inst[0].ring_dec;
259 int i; 260 int i;
260 261
261 if (RREG32_SOC15(VCN, 0, mmUVD_STATUS)) 262 if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
@@ -264,11 +265,11 @@ static int vcn_v2_5_hw_fini(void *handle)
264 ring->sched.ready = false; 265 ring->sched.ready = false;
265 266
266 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 267 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
267 ring = &adev->vcn.ring_enc[i]; 268 ring = &adev->vcn.inst[0].ring_enc[i];
268 ring->sched.ready = false; 269 ring->sched.ready = false;
269 } 270 }
270 271
271 ring = &adev->vcn.ring_jpeg; 272 ring = &adev->vcn.inst[0].ring_jpeg;
272 ring->sched.ready = false; 273 ring->sched.ready = false;
273 274
274 return 0; 275 return 0;
@@ -338,9 +339,9 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
338 offset = 0; 339 offset = 0;
339 } else { 340 } else {
340 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 341 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
341 lower_32_bits(adev->vcn.gpu_addr)); 342 lower_32_bits(adev->vcn.inst[0].gpu_addr));
342 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 343 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
343 upper_32_bits(adev->vcn.gpu_addr)); 344 upper_32_bits(adev->vcn.inst[0].gpu_addr));
344 offset = size; 345 offset = size;
345 /* No signed header for now from firmware 346 /* No signed header for now from firmware
346 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 347 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
@@ -352,17 +353,17 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
352 353
353 /* cache window 1: stack */ 354 /* cache window 1: stack */
354 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, 355 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
355 lower_32_bits(adev->vcn.gpu_addr + offset)); 356 lower_32_bits(adev->vcn.inst[0].gpu_addr + offset));
356 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, 357 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
357 upper_32_bits(adev->vcn.gpu_addr + offset)); 358 upper_32_bits(adev->vcn.inst[0].gpu_addr + offset));
358 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0); 359 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
359 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); 360 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
360 361
361 /* cache window 2: context */ 362 /* cache window 2: context */
362 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, 363 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
363 lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 364 lower_32_bits(adev->vcn.inst[0].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
364 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, 365 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
365 upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 366 upper_32_bits(adev->vcn.inst[0].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
366 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0); 367 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
367 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); 368 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
368} 369}
@@ -548,7 +549,7 @@ static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
548 */ 549 */
549static int jpeg_v2_5_start(struct amdgpu_device *adev) 550static int jpeg_v2_5_start(struct amdgpu_device *adev)
550{ 551{
551 struct amdgpu_ring *ring = &adev->vcn.ring_jpeg; 552 struct amdgpu_ring *ring = &adev->vcn.inst[0].ring_jpeg;
552 uint32_t tmp; 553 uint32_t tmp;
553 554
554 /* disable anti hang mechanism */ 555 /* disable anti hang mechanism */
@@ -639,7 +640,7 @@ static int jpeg_v2_5_stop(struct amdgpu_device *adev)
639 640
640static int vcn_v2_5_start(struct amdgpu_device *adev) 641static int vcn_v2_5_start(struct amdgpu_device *adev)
641{ 642{
642 struct amdgpu_ring *ring = &adev->vcn.ring_dec; 643 struct amdgpu_ring *ring = &adev->vcn.inst[0].ring_dec;
643 uint32_t rb_bufsz, tmp; 644 uint32_t rb_bufsz, tmp;
644 int i, j, r; 645 int i, j, r;
645 646
@@ -781,14 +782,14 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
781 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); 782 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
782 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, 783 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
783 lower_32_bits(ring->wptr)); 784 lower_32_bits(ring->wptr));
784 ring = &adev->vcn.ring_enc[0]; 785 ring = &adev->vcn.inst[0].ring_enc[0];
785 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); 786 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
786 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); 787 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
787 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); 788 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
788 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 789 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
789 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); 790 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
790 791
791 ring = &adev->vcn.ring_enc[1]; 792 ring = &adev->vcn.inst[0].ring_enc[1];
792 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); 793 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
793 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); 794 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
794 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); 795 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
@@ -951,7 +952,7 @@ static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
951{ 952{
952 struct amdgpu_device *adev = ring->adev; 953 struct amdgpu_device *adev = ring->adev;
953 954
954 if (ring == &adev->vcn.ring_enc[0]) 955 if (ring == &adev->vcn.inst[0].ring_enc[0])
955 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR); 956 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
956 else 957 else
957 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2); 958 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
@@ -968,7 +969,7 @@ static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
968{ 969{
969 struct amdgpu_device *adev = ring->adev; 970 struct amdgpu_device *adev = ring->adev;
970 971
971 if (ring == &adev->vcn.ring_enc[0]) { 972 if (ring == &adev->vcn.inst[0].ring_enc[0]) {
972 if (ring->use_doorbell) 973 if (ring->use_doorbell)
973 return adev->wb.wb[ring->wptr_offs]; 974 return adev->wb.wb[ring->wptr_offs];
974 else 975 else
@@ -992,7 +993,7 @@ static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
992{ 993{
993 struct amdgpu_device *adev = ring->adev; 994 struct amdgpu_device *adev = ring->adev;
994 995
995 if (ring == &adev->vcn.ring_enc[0]) { 996 if (ring == &adev->vcn.inst[0].ring_enc[0]) {
996 if (ring->use_doorbell) { 997 if (ring->use_doorbell) {
997 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); 998 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
998 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 999 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
@@ -1121,7 +1122,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_jpeg_ring_vm_funcs = {
1121 1122
1122static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) 1123static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
1123{ 1124{
1124 adev->vcn.ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs; 1125 adev->vcn.inst[0].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
1125 DRM_INFO("VCN decode is enabled in VM mode\n"); 1126 DRM_INFO("VCN decode is enabled in VM mode\n");
1126} 1127}
1127 1128
@@ -1130,14 +1131,14 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
1130 int i; 1131 int i;
1131 1132
1132 for (i = 0; i < adev->vcn.num_enc_rings; ++i) 1133 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
1133 adev->vcn.ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs; 1134 adev->vcn.inst[0].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
1134 1135
1135 DRM_INFO("VCN encode is enabled in VM mode\n"); 1136 DRM_INFO("VCN encode is enabled in VM mode\n");
1136} 1137}
1137 1138
1138static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev) 1139static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev)
1139{ 1140{
1140 adev->vcn.ring_jpeg.funcs = &vcn_v2_5_jpeg_ring_vm_funcs; 1141 adev->vcn.inst[0].ring_jpeg.funcs = &vcn_v2_5_jpeg_ring_vm_funcs;
1141 DRM_INFO("VCN jpeg decode is enabled in VM mode\n"); 1142 DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
1142} 1143}
1143 1144
@@ -1212,16 +1213,16 @@ static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
1212 1213
1213 switch (entry->src_id) { 1214 switch (entry->src_id) {
1214 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT: 1215 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1215 amdgpu_fence_process(&adev->vcn.ring_dec); 1216 amdgpu_fence_process(&adev->vcn.inst[0].ring_dec);
1216 break; 1217 break;
1217 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE: 1218 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1218 amdgpu_fence_process(&adev->vcn.ring_enc[0]); 1219 amdgpu_fence_process(&adev->vcn.inst[0].ring_enc[0]);
1219 break; 1220 break;
1220 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY: 1221 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1221 amdgpu_fence_process(&adev->vcn.ring_enc[1]); 1222 amdgpu_fence_process(&adev->vcn.inst[0].ring_enc[1]);
1222 break; 1223 break;
1223 case VCN_2_0__SRCID__JPEG_DECODE: 1224 case VCN_2_0__SRCID__JPEG_DECODE:
1224 amdgpu_fence_process(&adev->vcn.ring_jpeg); 1225 amdgpu_fence_process(&adev->vcn.inst[0].ring_jpeg);
1225 break; 1226 break;
1226 default: 1227 default:
1227 DRM_ERROR("Unhandled interrupt: %d %d\n", 1228 DRM_ERROR("Unhandled interrupt: %d %d\n",
@@ -1239,8 +1240,8 @@ static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
1239 1240
1240static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) 1241static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
1241{ 1242{
1242 adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 2; 1243 adev->vcn.inst[0].irq.num_types = adev->vcn.num_enc_rings + 2;
1243 adev->vcn.irq.funcs = &vcn_v2_5_irq_funcs; 1244 adev->vcn.inst[0].irq.funcs = &vcn_v2_5_irq_funcs;
1244} 1245}
1245 1246
1246static const struct amd_ip_funcs vcn_v2_5_ip_funcs = { 1247static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {