aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrank Min <Frank.Min@amd.com>2017-04-16 23:51:44 -0400
committerAlex Deucher <alexander.deucher@amd.com>2017-04-28 17:33:00 -0400
commit6fa336a777712fac9da763d3df348fcfc96633f8 (patch)
tree45841d835dc142f131935cf22ec6a8f2a2d623b4
parentbeb2ced51b70a6cbccd8676b450e282fecf65565 (diff)
drm/amdgpu/uvd7: add UVD hw init sequences for sriov
Add UVD hw init. Signed-off-by: Frank Min <Frank.Min@amd.com> Signed-off-by: Xiangliang Yu <Xiangliang.Yu@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c101
1 files changed, 56 insertions, 45 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 552bfcdd3236..eca8f6e01e97 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -368,7 +368,10 @@ static int uvd_v7_0_early_init(void *handle)
368{ 368{
369 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 369 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
370 370
371 adev->uvd.num_enc_rings = 2; 371 if (amdgpu_sriov_vf(adev))
372 adev->uvd.num_enc_rings = 1;
373 else
374 adev->uvd.num_enc_rings = 2;
372 uvd_v7_0_set_ring_funcs(adev); 375 uvd_v7_0_set_ring_funcs(adev);
373 uvd_v7_0_set_enc_ring_funcs(adev); 376 uvd_v7_0_set_enc_ring_funcs(adev);
374 uvd_v7_0_set_irq_funcs(adev); 377 uvd_v7_0_set_irq_funcs(adev);
@@ -421,12 +424,14 @@ static int uvd_v7_0_sw_init(void *handle)
421 r = amdgpu_uvd_resume(adev); 424 r = amdgpu_uvd_resume(adev);
422 if (r) 425 if (r)
423 return r; 426 return r;
427 if (!amdgpu_sriov_vf(adev)) {
428 ring = &adev->uvd.ring;
429 sprintf(ring->name, "uvd");
430 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
431 if (r)
432 return r;
433 }
424 434
425 ring = &adev->uvd.ring;
426 sprintf(ring->name, "uvd");
427 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
428 if (r)
429 return r;
430 435
431 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { 436 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
432 ring = &adev->uvd.ring_enc[i]; 437 ring = &adev->uvd.ring_enc[i];
@@ -440,6 +445,10 @@ static int uvd_v7_0_sw_init(void *handle)
440 return r; 445 return r;
441 } 446 }
442 447
448 r = amdgpu_virt_alloc_mm_table(adev);
449 if (r)
450 return r;
451
443 return r; 452 return r;
444} 453}
445 454
@@ -448,6 +457,8 @@ static int uvd_v7_0_sw_fini(void *handle)
448 int i, r; 457 int i, r;
449 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 458 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
450 459
460 amdgpu_virt_free_mm_table(adev);
461
451 r = amdgpu_uvd_suspend(adev); 462 r = amdgpu_uvd_suspend(adev);
452 if (r) 463 if (r)
453 return r; 464 return r;
@@ -474,48 +485,53 @@ static int uvd_v7_0_hw_init(void *handle)
474 uint32_t tmp; 485 uint32_t tmp;
475 int i, r; 486 int i, r;
476 487
477 r = uvd_v7_0_start(adev); 488 if (amdgpu_sriov_vf(adev))
489 r = uvd_v7_0_sriov_start(adev);
490 else
491 r = uvd_v7_0_start(adev);
478 if (r) 492 if (r)
479 goto done; 493 goto done;
480 494
481 ring->ready = true; 495 if (!amdgpu_sriov_vf(adev)) {
482 r = amdgpu_ring_test_ring(ring); 496 ring->ready = true;
483 if (r) { 497 r = amdgpu_ring_test_ring(ring);
484 ring->ready = false; 498 if (r) {
485 goto done; 499 ring->ready = false;
486 } 500 goto done;
501 }
487 502
488 r = amdgpu_ring_alloc(ring, 10); 503 r = amdgpu_ring_alloc(ring, 10);
489 if (r) { 504 if (r) {
490 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); 505 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
491 goto done; 506 goto done;
492 } 507 }
493 508
494 tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0, 509 tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
495 mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0); 510 mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
496 amdgpu_ring_write(ring, tmp); 511 amdgpu_ring_write(ring, tmp);
497 amdgpu_ring_write(ring, 0xFFFFF); 512 amdgpu_ring_write(ring, 0xFFFFF);
498 513
499 tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0, 514 tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
500 mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0); 515 mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
501 amdgpu_ring_write(ring, tmp); 516 amdgpu_ring_write(ring, tmp);
502 amdgpu_ring_write(ring, 0xFFFFF); 517 amdgpu_ring_write(ring, 0xFFFFF);
503 518
504 tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0, 519 tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
505 mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0); 520 mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
506 amdgpu_ring_write(ring, tmp); 521 amdgpu_ring_write(ring, tmp);
507 amdgpu_ring_write(ring, 0xFFFFF); 522 amdgpu_ring_write(ring, 0xFFFFF);
508 523
509 /* Clear timeout status bits */ 524 /* Clear timeout status bits */
510 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, 525 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
511 mmUVD_SEMA_TIMEOUT_STATUS), 0)); 526 mmUVD_SEMA_TIMEOUT_STATUS), 0));
512 amdgpu_ring_write(ring, 0x8); 527 amdgpu_ring_write(ring, 0x8);
513 528
514 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, 529 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
515 mmUVD_SEMA_CNTL), 0)); 530 mmUVD_SEMA_CNTL), 0));
516 amdgpu_ring_write(ring, 3); 531 amdgpu_ring_write(ring, 3);
517 532
518 amdgpu_ring_commit(ring); 533 amdgpu_ring_commit(ring);
534 }
519 535
520 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { 536 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
521 ring = &adev->uvd.ring_enc[i]; 537 ring = &adev->uvd.ring_enc[i];
@@ -692,7 +708,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
692 struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} }; 708 struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
693 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} }; 709 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
694 struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} }; 710 struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
695 //struct mmsch_v1_0_cmd_indirect_write indirect_wt = {{0}};
696 struct mmsch_v1_0_cmd_end end = { {0} }; 711 struct mmsch_v1_0_cmd_end end = { {0} };
697 uint32_t *init_table = adev->virt.mm_table.cpu_addr; 712 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
698 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table; 713 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
@@ -863,11 +878,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
863 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr)); 878 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
864 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE), ring->ring_size / 4); 879 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE), ring->ring_size / 4);
865 880
866 ring = &adev->uvd.ring_enc[1];
867 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO2), ring->gpu_addr);
868 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI2), upper_32_bits(ring->gpu_addr));
869 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE2), ring->ring_size / 4);
870
871 /* add end packet */ 881 /* add end packet */
872 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end)); 882 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
873 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4; 883 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
@@ -1489,7 +1499,8 @@ static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1489 amdgpu_fence_process(&adev->uvd.ring_enc[0]); 1499 amdgpu_fence_process(&adev->uvd.ring_enc[0]);
1490 break; 1500 break;
1491 case 120: 1501 case 120:
1492 amdgpu_fence_process(&adev->uvd.ring_enc[1]); 1502 if (!amdgpu_sriov_vf(adev))
1503 amdgpu_fence_process(&adev->uvd.ring_enc[1]);
1493 break; 1504 break;
1494 default: 1505 default:
1495 DRM_ERROR("Unhandled interrupt: %d %d\n", 1506 DRM_ERROR("Unhandled interrupt: %d %d\n",