diff options
author | Rex Zhu <Rex.Zhu@amd.com> | 2018-03-06 00:13:21 -0500 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2018-03-07 16:10:10 -0500 |
commit | ecc124b0355cb8de070f97e04a919200925598d5 (patch) | |
tree | 2a52e06a02dafa606984934c148ece26a81ead53 /drivers/gpu | |
parent | 807f93ac6a1d37498fcd8f6af28f1e5f569ad8e4 (diff) |
drm/amd/pp: Delete the wrapper layer of smu_allocate/free_memory
use amdgpu_bo_create/free_kernel instand.
Reviewed-by: Alex Deucher <alexdeucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/inc/smumgr.h | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c | 84 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c | 71 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 64 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | 51 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c | 195 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h | 7 |
10 files changed, 195 insertions, 301 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index e1f6e830c84d..8872c5cb4f67 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h | |||
@@ -106,13 +106,6 @@ extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg); | |||
106 | extern int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, | 106 | extern int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, |
107 | uint16_t msg, uint32_t parameter); | 107 | uint16_t msg, uint32_t parameter); |
108 | 108 | ||
109 | extern int smu_allocate_memory(void *device, uint32_t size, | ||
110 | enum cgs_gpu_mem_type type, | ||
111 | uint32_t byte_align, uint64_t *mc_addr, | ||
112 | void **kptr, void *handle); | ||
113 | |||
114 | extern int smu_free_memory(void *device, void *handle); | ||
115 | |||
116 | extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr); | 109 | extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr); |
117 | 110 | ||
118 | extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type); | 111 | extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type); |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c index 7fe4c1170edc..df585392b2d7 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c | |||
@@ -347,8 +347,8 @@ static int cz_smu_populate_single_scratch_task( | |||
347 | return -EINVAL; | 347 | return -EINVAL; |
348 | } | 348 | } |
349 | 349 | ||
350 | task->addr.low = cz_smu->scratch_buffer[i].mc_addr_low; | 350 | task->addr.low = smu_lower_32_bits(cz_smu->scratch_buffer[i].mc_addr); |
351 | task->addr.high = cz_smu->scratch_buffer[i].mc_addr_high; | 351 | task->addr.high = smu_upper_32_bits(cz_smu->scratch_buffer[i].mc_addr); |
352 | task->size_bytes = cz_smu->scratch_buffer[i].data_size; | 352 | task->size_bytes = cz_smu->scratch_buffer[i].data_size; |
353 | 353 | ||
354 | if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == fw_enum) { | 354 | if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == fw_enum) { |
@@ -384,8 +384,8 @@ static int cz_smu_populate_single_ucode_load_task( | |||
384 | return -EINVAL; | 384 | return -EINVAL; |
385 | } | 385 | } |
386 | 386 | ||
387 | task->addr.low = cz_smu->driver_buffer[i].mc_addr_low; | 387 | task->addr.low = smu_lower_32_bits(cz_smu->driver_buffer[i].mc_addr); |
388 | task->addr.high = cz_smu->driver_buffer[i].mc_addr_high; | 388 | task->addr.high = smu_upper_32_bits(cz_smu->driver_buffer[i].mc_addr); |
389 | task->size_bytes = cz_smu->driver_buffer[i].data_size; | 389 | task->size_bytes = cz_smu->driver_buffer[i].data_size; |
390 | 390 | ||
391 | return 0; | 391 | return 0; |
@@ -566,11 +566,7 @@ static int cz_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr) | |||
566 | ucode_id, &info); | 566 | ucode_id, &info); |
567 | 567 | ||
568 | if (ret == 0) { | 568 | if (ret == 0) { |
569 | cz_smu->driver_buffer[i].mc_addr_high = | 569 | cz_smu->driver_buffer[i].mc_addr = info.mc_addr; |
570 | smu_upper_32_bits(info.mc_addr); | ||
571 | |||
572 | cz_smu->driver_buffer[i].mc_addr_low = | ||
573 | smu_lower_32_bits(info.mc_addr); | ||
574 | 570 | ||
575 | cz_smu->driver_buffer[i].data_size = info.image_size; | 571 | cz_smu->driver_buffer[i].data_size = info.image_size; |
576 | 572 | ||
@@ -589,19 +585,12 @@ static int cz_smu_populate_single_scratch_entry( | |||
589 | struct cz_buffer_entry *entry) | 585 | struct cz_buffer_entry *entry) |
590 | { | 586 | { |
591 | struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; | 587 | struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; |
592 | long long mc_addr = | ||
593 | ((long long)(cz_smu->smu_buffer.mc_addr_high) << 32) | ||
594 | | cz_smu->smu_buffer.mc_addr_low; | ||
595 | |||
596 | uint32_t ulsize_aligned = SIZE_ALIGN_32(ulsize_byte); | 588 | uint32_t ulsize_aligned = SIZE_ALIGN_32(ulsize_byte); |
597 | 589 | ||
598 | mc_addr += cz_smu->smu_buffer_used_bytes; | ||
599 | |||
600 | entry->data_size = ulsize_byte; | 590 | entry->data_size = ulsize_byte; |
601 | entry->kaddr = (char *) cz_smu->smu_buffer.kaddr + | 591 | entry->kaddr = (char *) cz_smu->smu_buffer.kaddr + |
602 | cz_smu->smu_buffer_used_bytes; | 592 | cz_smu->smu_buffer_used_bytes; |
603 | entry->mc_addr_low = smu_lower_32_bits(mc_addr); | 593 | entry->mc_addr = cz_smu->smu_buffer.mc_addr + cz_smu->smu_buffer_used_bytes; |
604 | entry->mc_addr_high = smu_upper_32_bits(mc_addr); | ||
605 | entry->firmware_ID = scratch_type; | 594 | entry->firmware_ID = scratch_type; |
606 | 595 | ||
607 | cz_smu->smu_buffer_used_bytes += ulsize_aligned; | 596 | cz_smu->smu_buffer_used_bytes += ulsize_aligned; |
@@ -624,11 +613,11 @@ static int cz_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table) | |||
624 | 613 | ||
625 | cz_send_msg_to_smc_with_parameter(hwmgr, | 614 | cz_send_msg_to_smc_with_parameter(hwmgr, |
626 | PPSMC_MSG_SetClkTableAddrHi, | 615 | PPSMC_MSG_SetClkTableAddrHi, |
627 | cz_smu->scratch_buffer[i].mc_addr_high); | 616 | smu_upper_32_bits(cz_smu->scratch_buffer[i].mc_addr)); |
628 | 617 | ||
629 | cz_send_msg_to_smc_with_parameter(hwmgr, | 618 | cz_send_msg_to_smc_with_parameter(hwmgr, |
630 | PPSMC_MSG_SetClkTableAddrLo, | 619 | PPSMC_MSG_SetClkTableAddrLo, |
631 | cz_smu->scratch_buffer[i].mc_addr_low); | 620 | smu_lower_32_bits(cz_smu->scratch_buffer[i].mc_addr)); |
632 | 621 | ||
633 | cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, | 622 | cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, |
634 | cz_smu->toc_entry_clock_table); | 623 | cz_smu->toc_entry_clock_table); |
@@ -651,11 +640,11 @@ static int cz_upload_pptable_settings(struct pp_hwmgr *hwmgr) | |||
651 | 640 | ||
652 | cz_send_msg_to_smc_with_parameter(hwmgr, | 641 | cz_send_msg_to_smc_with_parameter(hwmgr, |
653 | PPSMC_MSG_SetClkTableAddrHi, | 642 | PPSMC_MSG_SetClkTableAddrHi, |
654 | cz_smu->scratch_buffer[i].mc_addr_high); | 643 | smu_upper_32_bits(cz_smu->scratch_buffer[i].mc_addr)); |
655 | 644 | ||
656 | cz_send_msg_to_smc_with_parameter(hwmgr, | 645 | cz_send_msg_to_smc_with_parameter(hwmgr, |
657 | PPSMC_MSG_SetClkTableAddrLo, | 646 | PPSMC_MSG_SetClkTableAddrLo, |
658 | cz_smu->scratch_buffer[i].mc_addr_low); | 647 | smu_lower_32_bits(cz_smu->scratch_buffer[i].mc_addr)); |
659 | 648 | ||
660 | cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, | 649 | cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, |
661 | cz_smu->toc_entry_clock_table); | 650 | cz_smu->toc_entry_clock_table); |
@@ -686,11 +675,11 @@ static int cz_request_smu_load_fw(struct pp_hwmgr *hwmgr) | |||
686 | 675 | ||
687 | cz_send_msg_to_smc_with_parameter(hwmgr, | 676 | cz_send_msg_to_smc_with_parameter(hwmgr, |
688 | PPSMC_MSG_DriverDramAddrHi, | 677 | PPSMC_MSG_DriverDramAddrHi, |
689 | cz_smu->toc_buffer.mc_addr_high); | 678 | smu_upper_32_bits(cz_smu->toc_buffer.mc_addr)); |
690 | 679 | ||
691 | cz_send_msg_to_smc_with_parameter(hwmgr, | 680 | cz_send_msg_to_smc_with_parameter(hwmgr, |
692 | PPSMC_MSG_DriverDramAddrLo, | 681 | PPSMC_MSG_DriverDramAddrLo, |
693 | cz_smu->toc_buffer.mc_addr_low); | 682 | smu_lower_32_bits(cz_smu->toc_buffer.mc_addr)); |
694 | 683 | ||
695 | cz_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs); | 684 | cz_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs); |
696 | 685 | ||
@@ -750,7 +739,6 @@ static int cz_start_smu(struct pp_hwmgr *hwmgr) | |||
750 | 739 | ||
751 | static int cz_smu_init(struct pp_hwmgr *hwmgr) | 740 | static int cz_smu_init(struct pp_hwmgr *hwmgr) |
752 | { | 741 | { |
753 | uint64_t mc_addr = 0; | ||
754 | int ret = 0; | 742 | int ret = 0; |
755 | struct cz_smumgr *cz_smu; | 743 | struct cz_smumgr *cz_smu; |
756 | 744 | ||
@@ -768,31 +756,29 @@ static int cz_smu_init(struct pp_hwmgr *hwmgr) | |||
768 | ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) + | 756 | ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) + |
769 | ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32); | 757 | ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32); |
770 | 758 | ||
771 | ret = smu_allocate_memory(hwmgr->device, | 759 | ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, |
772 | cz_smu->toc_buffer.data_size, | 760 | cz_smu->toc_buffer.data_size, |
773 | CGS_GPU_MEM_TYPE__GART_CACHEABLE, | ||
774 | PAGE_SIZE, | 761 | PAGE_SIZE, |
775 | &mc_addr, | 762 | AMDGPU_GEM_DOMAIN_VRAM, |
776 | &cz_smu->toc_buffer.kaddr, | 763 | &cz_smu->toc_buffer.handle, |
777 | &cz_smu->toc_buffer.handle); | 764 | &cz_smu->toc_buffer.mc_addr, |
778 | if (ret != 0) | 765 | &cz_smu->toc_buffer.kaddr); |
779 | return -1; | 766 | if (ret) |
780 | 767 | return -EINVAL; | |
781 | cz_smu->toc_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); | ||
782 | cz_smu->toc_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); | ||
783 | 768 | ||
784 | ret = smu_allocate_memory(hwmgr->device, | 769 | ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, |
785 | cz_smu->smu_buffer.data_size, | 770 | cz_smu->smu_buffer.data_size, |
786 | CGS_GPU_MEM_TYPE__GART_CACHEABLE, | ||
787 | PAGE_SIZE, | 771 | PAGE_SIZE, |
788 | &mc_addr, | 772 | AMDGPU_GEM_DOMAIN_VRAM, |
789 | &cz_smu->smu_buffer.kaddr, | 773 | &cz_smu->smu_buffer.handle, |
790 | &cz_smu->smu_buffer.handle); | 774 | &cz_smu->smu_buffer.mc_addr, |
791 | if (ret != 0) | 775 | &cz_smu->smu_buffer.kaddr); |
792 | return -1; | 776 | if (ret) { |
793 | 777 | amdgpu_bo_free_kernel(&cz_smu->toc_buffer.handle, | |
794 | cz_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); | 778 | &cz_smu->toc_buffer.mc_addr, |
795 | cz_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); | 779 | &cz_smu->toc_buffer.kaddr); |
780 | return -EINVAL; | ||
781 | } | ||
796 | 782 | ||
797 | if (0 != cz_smu_populate_single_scratch_entry(hwmgr, | 783 | if (0 != cz_smu_populate_single_scratch_entry(hwmgr, |
798 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, | 784 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, |
@@ -845,10 +831,12 @@ static int cz_smu_fini(struct pp_hwmgr *hwmgr) | |||
845 | 831 | ||
846 | cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; | 832 | cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; |
847 | if (cz_smu) { | 833 | if (cz_smu) { |
848 | cgs_free_gpu_mem(hwmgr->device, | 834 | amdgpu_bo_free_kernel(&cz_smu->toc_buffer.handle, |
849 | cz_smu->toc_buffer.handle); | 835 | &cz_smu->toc_buffer.mc_addr, |
850 | cgs_free_gpu_mem(hwmgr->device, | 836 | &cz_smu->toc_buffer.kaddr); |
851 | cz_smu->smu_buffer.handle); | 837 | amdgpu_bo_free_kernel(&cz_smu->smu_buffer.handle, |
838 | &cz_smu->smu_buffer.mc_addr, | ||
839 | &cz_smu->smu_buffer.kaddr); | ||
852 | kfree(cz_smu); | 840 | kfree(cz_smu); |
853 | } | 841 | } |
854 | 842 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h index 756b2c4b5af0..c13ab8377e26 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h | |||
@@ -60,11 +60,10 @@ enum cz_scratch_entry { | |||
60 | 60 | ||
61 | struct cz_buffer_entry { | 61 | struct cz_buffer_entry { |
62 | uint32_t data_size; | 62 | uint32_t data_size; |
63 | uint32_t mc_addr_low; | 63 | uint64_t mc_addr; |
64 | uint32_t mc_addr_high; | ||
65 | void *kaddr; | 64 | void *kaddr; |
66 | enum cz_scratch_entry firmware_ID; | 65 | enum cz_scratch_entry firmware_ID; |
67 | unsigned long handle; /* as bo handle used when release bo */ | 66 | struct amdgpu_bo *handle; /* as bo handle used when release bo */ |
68 | }; | 67 | }; |
69 | 68 | ||
70 | struct cz_register_index_data_pair { | 69 | struct cz_register_index_data_pair { |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c index 2d662b44af54..9a0aedbf2b59 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c | |||
@@ -169,11 +169,11 @@ int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr, | |||
169 | "Invalid SMU Table Length!", return -EINVAL;); | 169 | "Invalid SMU Table Length!", return -EINVAL;); |
170 | PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, | 170 | PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, |
171 | PPSMC_MSG_SetDriverDramAddrHigh, | 171 | PPSMC_MSG_SetDriverDramAddrHigh, |
172 | priv->smu_tables.entry[table_id].table_addr_high) == 0, | 172 | smu_upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, |
173 | "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL;); | 173 | "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL;); |
174 | PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, | 174 | PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, |
175 | PPSMC_MSG_SetDriverDramAddrLow, | 175 | PPSMC_MSG_SetDriverDramAddrLow, |
176 | priv->smu_tables.entry[table_id].table_addr_low) == 0, | 176 | smu_lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, |
177 | "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!", | 177 | "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!", |
178 | return -EINVAL;); | 178 | return -EINVAL;); |
179 | PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, | 179 | PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, |
@@ -182,7 +182,7 @@ int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr, | |||
182 | "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!", | 182 | "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!", |
183 | return -EINVAL;); | 183 | return -EINVAL;); |
184 | 184 | ||
185 | memcpy(table, priv->smu_tables.entry[table_id].table, | 185 | memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table, |
186 | priv->smu_tables.entry[table_id].size); | 186 | priv->smu_tables.entry[table_id].size); |
187 | 187 | ||
188 | return 0; | 188 | return 0; |
@@ -206,12 +206,12 @@ int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr, | |||
206 | 206 | ||
207 | PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, | 207 | PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, |
208 | PPSMC_MSG_SetDriverDramAddrHigh, | 208 | PPSMC_MSG_SetDriverDramAddrHigh, |
209 | priv->smu_tables.entry[table_id].table_addr_high) == 0, | 209 | smu_upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, |
210 | "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!", | 210 | "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!", |
211 | return -EINVAL;); | 211 | return -EINVAL;); |
212 | PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, | 212 | PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, |
213 | PPSMC_MSG_SetDriverDramAddrLow, | 213 | PPSMC_MSG_SetDriverDramAddrLow, |
214 | priv->smu_tables.entry[table_id].table_addr_low) == 0, | 214 | smu_lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, |
215 | "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!", | 215 | "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!", |
216 | return -EINVAL;); | 216 | return -EINVAL;); |
217 | PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, | 217 | PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, |
@@ -292,10 +292,12 @@ static int rv_smu_fini(struct pp_hwmgr *hwmgr) | |||
292 | if (priv) { | 292 | if (priv) { |
293 | rv_smc_disable_sdma(hwmgr); | 293 | rv_smc_disable_sdma(hwmgr); |
294 | rv_smc_disable_vcn(hwmgr); | 294 | rv_smc_disable_vcn(hwmgr); |
295 | cgs_free_gpu_mem(hwmgr->device, | 295 | amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle, |
296 | priv->smu_tables.entry[WMTABLE].handle); | 296 | &priv->smu_tables.entry[WMTABLE].mc_addr, |
297 | cgs_free_gpu_mem(hwmgr->device, | 297 | priv->smu_tables.entry[WMTABLE].table); |
298 | priv->smu_tables.entry[CLOCKTABLE].handle); | 298 | amdgpu_bo_free_kernel(&priv->smu_tables.entry[CLOCKTABLE].handle, |
299 | &priv->smu_tables.entry[CLOCKTABLE].mc_addr, | ||
300 | priv->smu_tables.entry[CLOCKTABLE].table); | ||
299 | kfree(hwmgr->smu_backend); | 301 | kfree(hwmgr->smu_backend); |
300 | hwmgr->smu_backend = NULL; | 302 | hwmgr->smu_backend = NULL; |
301 | } | 303 | } |
@@ -328,7 +330,8 @@ static int rv_smu_init(struct pp_hwmgr *hwmgr) | |||
328 | struct rv_smumgr *priv; | 330 | struct rv_smumgr *priv; |
329 | uint64_t mc_addr; | 331 | uint64_t mc_addr; |
330 | void *kaddr = NULL; | 332 | void *kaddr = NULL; |
331 | unsigned long handle; | 333 | struct amdgpu_bo *handle; |
334 | int r; | ||
332 | 335 | ||
333 | priv = kzalloc(sizeof(struct rv_smumgr), GFP_KERNEL); | 336 | priv = kzalloc(sizeof(struct rv_smumgr), GFP_KERNEL); |
334 | 337 | ||
@@ -338,54 +341,44 @@ static int rv_smu_init(struct pp_hwmgr *hwmgr) | |||
338 | hwmgr->smu_backend = priv; | 341 | hwmgr->smu_backend = priv; |
339 | 342 | ||
340 | /* allocate space for watermarks table */ | 343 | /* allocate space for watermarks table */ |
341 | smu_allocate_memory(hwmgr->device, | 344 | r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, |
342 | sizeof(Watermarks_t), | 345 | sizeof(Watermarks_t), |
343 | CGS_GPU_MEM_TYPE__GART_CACHEABLE, | ||
344 | PAGE_SIZE, | 346 | PAGE_SIZE, |
347 | AMDGPU_GEM_DOMAIN_VRAM, | ||
348 | &handle, | ||
345 | &mc_addr, | 349 | &mc_addr, |
346 | &kaddr, | 350 | &kaddr); |
347 | &handle); | ||
348 | 351 | ||
349 | PP_ASSERT_WITH_CODE(kaddr, | 352 | if (r) |
350 | "[rv_smu_init] Out of memory for wmtable.", | 353 | return -EINVAL; |
351 | kfree(hwmgr->smu_backend); | ||
352 | hwmgr->smu_backend = NULL; | ||
353 | return -EINVAL); | ||
354 | 354 | ||
355 | priv->smu_tables.entry[WMTABLE].version = 0x01; | 355 | priv->smu_tables.entry[WMTABLE].version = 0x01; |
356 | priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t); | 356 | priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t); |
357 | priv->smu_tables.entry[WMTABLE].table_id = TABLE_WATERMARKS; | 357 | priv->smu_tables.entry[WMTABLE].table_id = TABLE_WATERMARKS; |
358 | priv->smu_tables.entry[WMTABLE].table_addr_high = | 358 | priv->smu_tables.entry[WMTABLE].mc_addr = mc_addr; |
359 | smu_upper_32_bits(mc_addr); | ||
360 | priv->smu_tables.entry[WMTABLE].table_addr_low = | ||
361 | smu_lower_32_bits(mc_addr); | ||
362 | priv->smu_tables.entry[WMTABLE].table = kaddr; | 359 | priv->smu_tables.entry[WMTABLE].table = kaddr; |
363 | priv->smu_tables.entry[WMTABLE].handle = handle; | 360 | priv->smu_tables.entry[WMTABLE].handle = handle; |
364 | 361 | ||
365 | /* allocate space for watermarks table */ | 362 | /* allocate space for watermarks table */ |
366 | smu_allocate_memory(hwmgr->device, | 363 | r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, |
367 | sizeof(DpmClocks_t), | 364 | sizeof(DpmClocks_t), |
368 | CGS_GPU_MEM_TYPE__GART_CACHEABLE, | ||
369 | PAGE_SIZE, | 365 | PAGE_SIZE, |
366 | AMDGPU_GEM_DOMAIN_VRAM, | ||
367 | &handle, | ||
370 | &mc_addr, | 368 | &mc_addr, |
371 | &kaddr, | 369 | &kaddr); |
372 | &handle); | 370 | |
373 | 371 | if (r) { | |
374 | PP_ASSERT_WITH_CODE(kaddr, | 372 | amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle, |
375 | "[rv_smu_init] Out of memory for CLOCKTABLE.", | 373 | &priv->smu_tables.entry[WMTABLE].mc_addr, |
376 | cgs_free_gpu_mem(hwmgr->device, | 374 | &priv->smu_tables.entry[WMTABLE].table); |
377 | (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle); | 375 | return -EINVAL; |
378 | kfree(hwmgr->smu_backend); | 376 | } |
379 | hwmgr->smu_backend = NULL; | ||
380 | return -EINVAL); | ||
381 | 377 | ||
382 | priv->smu_tables.entry[CLOCKTABLE].version = 0x01; | 378 | priv->smu_tables.entry[CLOCKTABLE].version = 0x01; |
383 | priv->smu_tables.entry[CLOCKTABLE].size = sizeof(DpmClocks_t); | 379 | priv->smu_tables.entry[CLOCKTABLE].size = sizeof(DpmClocks_t); |
384 | priv->smu_tables.entry[CLOCKTABLE].table_id = TABLE_DPMCLOCKS; | 380 | priv->smu_tables.entry[CLOCKTABLE].table_id = TABLE_DPMCLOCKS; |
385 | priv->smu_tables.entry[CLOCKTABLE].table_addr_high = | 381 | priv->smu_tables.entry[CLOCKTABLE].mc_addr = mc_addr; |
386 | smu_upper_32_bits(mc_addr); | ||
387 | priv->smu_tables.entry[CLOCKTABLE].table_addr_low = | ||
388 | smu_lower_32_bits(mc_addr); | ||
389 | priv->smu_tables.entry[CLOCKTABLE].table = kaddr; | 382 | priv->smu_tables.entry[CLOCKTABLE].table = kaddr; |
390 | priv->smu_tables.entry[CLOCKTABLE].handle = handle; | 383 | priv->smu_tables.entry[CLOCKTABLE].handle = handle; |
391 | 384 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h index caebdbebdcd8..0ff4ac5838f7 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h | |||
@@ -37,10 +37,9 @@ struct smu_table_entry { | |||
37 | uint32_t version; | 37 | uint32_t version; |
38 | uint32_t size; | 38 | uint32_t size; |
39 | uint32_t table_id; | 39 | uint32_t table_id; |
40 | uint32_t table_addr_high; | 40 | uint64_t mc_addr; |
41 | uint32_t table_addr_low; | 41 | void *table; |
42 | uint8_t *table; | 42 | struct amdgpu_bo *handle; |
43 | unsigned long handle; | ||
44 | }; | 43 | }; |
45 | 44 | ||
46 | struct smu_table_array { | 45 | struct smu_table_array { |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index 311ff3718618..92dd4bc8a05e 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | |||
@@ -412,10 +412,10 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) | |||
412 | if (!cgs_is_virtualization_enabled(hwmgr->device)) { | 412 | if (!cgs_is_virtualization_enabled(hwmgr->device)) { |
413 | smu7_send_msg_to_smc_with_parameter(hwmgr, | 413 | smu7_send_msg_to_smc_with_parameter(hwmgr, |
414 | PPSMC_MSG_SMU_DRAM_ADDR_HI, | 414 | PPSMC_MSG_SMU_DRAM_ADDR_HI, |
415 | smu_data->smu_buffer.mc_addr_high); | 415 | smu_upper_32_bits(smu_data->smu_buffer.mc_addr)); |
416 | smu7_send_msg_to_smc_with_parameter(hwmgr, | 416 | smu7_send_msg_to_smc_with_parameter(hwmgr, |
417 | PPSMC_MSG_SMU_DRAM_ADDR_LO, | 417 | PPSMC_MSG_SMU_DRAM_ADDR_LO, |
418 | smu_data->smu_buffer.mc_addr_low); | 418 | smu_lower_32_bits(smu_data->smu_buffer.mc_addr)); |
419 | } | 419 | } |
420 | fw_to_load = UCODE_ID_RLC_G_MASK | 420 | fw_to_load = UCODE_ID_RLC_G_MASK |
421 | + UCODE_ID_SDMA0_MASK | 421 | + UCODE_ID_SDMA0_MASK |
@@ -472,8 +472,8 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) | |||
472 | UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]), | 472 | UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]), |
473 | "Failed to Get Firmware Entry.", return -EINVAL); | 473 | "Failed to Get Firmware Entry.", return -EINVAL); |
474 | 474 | ||
475 | smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high); | 475 | smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_upper_32_bits(smu_data->header_buffer.mc_addr)); |
476 | smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low); | 476 | smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_lower_32_bits(smu_data->header_buffer.mc_addr)); |
477 | 477 | ||
478 | if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load)) | 478 | if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load)) |
479 | pr_err("Fail to Request SMU Load uCode"); | 479 | pr_err("Fail to Request SMU Load uCode"); |
@@ -587,7 +587,7 @@ int smu7_init(struct pp_hwmgr *hwmgr) | |||
587 | struct smu7_smumgr *smu_data; | 587 | struct smu7_smumgr *smu_data; |
588 | uint8_t *internal_buf; | 588 | uint8_t *internal_buf; |
589 | uint64_t mc_addr = 0; | 589 | uint64_t mc_addr = 0; |
590 | 590 | int r; | |
591 | /* Allocate memory for backend private data */ | 591 | /* Allocate memory for backend private data */ |
592 | smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); | 592 | smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); |
593 | smu_data->header_buffer.data_size = | 593 | smu_data->header_buffer.data_size = |
@@ -595,47 +595,40 @@ int smu7_init(struct pp_hwmgr *hwmgr) | |||
595 | 595 | ||
596 | /* Allocate FW image data structure and header buffer and | 596 | /* Allocate FW image data structure and header buffer and |
597 | * send the header buffer address to SMU */ | 597 | * send the header buffer address to SMU */ |
598 | smu_allocate_memory(hwmgr->device, | 598 | r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, |
599 | smu_data->header_buffer.data_size, | 599 | smu_data->header_buffer.data_size, |
600 | CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, | ||
601 | PAGE_SIZE, | 600 | PAGE_SIZE, |
601 | AMDGPU_GEM_DOMAIN_VRAM, | ||
602 | &smu_data->header_buffer.handle, | ||
602 | &mc_addr, | 603 | &mc_addr, |
603 | &smu_data->header_buffer.kaddr, | 604 | &smu_data->header_buffer.kaddr); |
604 | &smu_data->header_buffer.handle); | ||
605 | 605 | ||
606 | smu_data->header = smu_data->header_buffer.kaddr; | 606 | if (r) |
607 | smu_data->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); | 607 | return -EINVAL; |
608 | smu_data->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); | ||
609 | 608 | ||
610 | PP_ASSERT_WITH_CODE((NULL != smu_data->header), | 609 | smu_data->header = smu_data->header_buffer.kaddr; |
611 | "Out of memory.", | 610 | smu_data->header_buffer.mc_addr = mc_addr; |
612 | kfree(hwmgr->smu_backend); | ||
613 | cgs_free_gpu_mem(hwmgr->device, | ||
614 | (cgs_handle_t)smu_data->header_buffer.handle); | ||
615 | return -EINVAL); | ||
616 | 611 | ||
617 | if (cgs_is_virtualization_enabled(hwmgr->device)) | 612 | if (cgs_is_virtualization_enabled(hwmgr->device)) |
618 | return 0; | 613 | return 0; |
619 | 614 | ||
620 | smu_data->smu_buffer.data_size = 200*4096; | 615 | smu_data->smu_buffer.data_size = 200*4096; |
621 | smu_allocate_memory(hwmgr->device, | 616 | r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, |
622 | smu_data->smu_buffer.data_size, | 617 | smu_data->smu_buffer.data_size, |
623 | CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, | ||
624 | PAGE_SIZE, | 618 | PAGE_SIZE, |
619 | AMDGPU_GEM_DOMAIN_VRAM, | ||
620 | &smu_data->smu_buffer.handle, | ||
625 | &mc_addr, | 621 | &mc_addr, |
626 | &smu_data->smu_buffer.kaddr, | 622 | &smu_data->smu_buffer.kaddr); |
627 | &smu_data->smu_buffer.handle); | ||
628 | 623 | ||
624 | if (r) { | ||
625 | amdgpu_bo_free_kernel(&smu_data->header_buffer.handle, | ||
626 | &smu_data->header_buffer.mc_addr, | ||
627 | &smu_data->header_buffer.kaddr); | ||
628 | return -EINVAL; | ||
629 | } | ||
629 | internal_buf = smu_data->smu_buffer.kaddr; | 630 | internal_buf = smu_data->smu_buffer.kaddr; |
630 | smu_data->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); | 631 | smu_data->smu_buffer.mc_addr = mc_addr; |
631 | smu_data->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); | ||
632 | |||
633 | PP_ASSERT_WITH_CODE((NULL != internal_buf), | ||
634 | "Out of memory.", | ||
635 | kfree(hwmgr->smu_backend); | ||
636 | cgs_free_gpu_mem(hwmgr->device, | ||
637 | (cgs_handle_t)smu_data->smu_buffer.handle); | ||
638 | return -EINVAL); | ||
639 | 632 | ||
640 | if (smum_is_hw_avfs_present(hwmgr)) | 633 | if (smum_is_hw_avfs_present(hwmgr)) |
641 | smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT; | 634 | smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT; |
@@ -650,9 +643,14 @@ int smu7_smu_fini(struct pp_hwmgr *hwmgr) | |||
650 | { | 643 | { |
651 | struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); | 644 | struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); |
652 | 645 | ||
653 | smu_free_memory(hwmgr->device, (void *) smu_data->header_buffer.handle); | 646 | amdgpu_bo_free_kernel(&smu_data->header_buffer.handle, |
647 | &smu_data->header_buffer.mc_addr, | ||
648 | &smu_data->header_buffer.kaddr); | ||
649 | |||
654 | if (!cgs_is_virtualization_enabled(hwmgr->device)) | 650 | if (!cgs_is_virtualization_enabled(hwmgr->device)) |
655 | smu_free_memory(hwmgr->device, (void *) smu_data->smu_buffer.handle); | 651 | amdgpu_bo_free_kernel(&smu_data->smu_buffer.handle, |
652 | &smu_data->smu_buffer.mc_addr, | ||
653 | &smu_data->smu_buffer.kaddr); | ||
656 | 654 | ||
657 | kfree(hwmgr->smu_backend); | 655 | kfree(hwmgr->smu_backend); |
658 | hwmgr->smu_backend = NULL; | 656 | hwmgr->smu_backend = NULL; |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h index c87263bc0caa..64334a82b77b 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h | |||
@@ -31,10 +31,9 @@ | |||
31 | 31 | ||
32 | struct smu7_buffer_entry { | 32 | struct smu7_buffer_entry { |
33 | uint32_t data_size; | 33 | uint32_t data_size; |
34 | uint32_t mc_addr_low; | 34 | uint64_t mc_addr; |
35 | uint32_t mc_addr_high; | ||
36 | void *kaddr; | 35 | void *kaddr; |
37 | unsigned long handle; | 36 | struct amdgpu_bo *handle; |
38 | }; | 37 | }; |
39 | 38 | ||
40 | struct smu7_avfs { | 39 | struct smu7_avfs { |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index 43b1010ae7ee..3645127c8ee2 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | |||
@@ -144,57 +144,6 @@ int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, | |||
144 | hwmgr, msg, parameter); | 144 | hwmgr, msg, parameter); |
145 | } | 145 | } |
146 | 146 | ||
147 | int smu_allocate_memory(void *device, uint32_t size, | ||
148 | enum cgs_gpu_mem_type type, | ||
149 | uint32_t byte_align, uint64_t *mc_addr, | ||
150 | void **kptr, void *handle) | ||
151 | { | ||
152 | int ret = 0; | ||
153 | cgs_handle_t cgs_handle; | ||
154 | |||
155 | if (device == NULL || handle == NULL || | ||
156 | mc_addr == NULL || kptr == NULL) | ||
157 | return -EINVAL; | ||
158 | |||
159 | ret = cgs_alloc_gpu_mem(device, type, size, byte_align, | ||
160 | (cgs_handle_t *)handle); | ||
161 | if (ret) | ||
162 | return -ENOMEM; | ||
163 | |||
164 | cgs_handle = *(cgs_handle_t *)handle; | ||
165 | |||
166 | ret = cgs_gmap_gpu_mem(device, cgs_handle, mc_addr); | ||
167 | if (ret) | ||
168 | goto error_gmap; | ||
169 | |||
170 | ret = cgs_kmap_gpu_mem(device, cgs_handle, kptr); | ||
171 | if (ret) | ||
172 | goto error_kmap; | ||
173 | |||
174 | return 0; | ||
175 | |||
176 | error_kmap: | ||
177 | cgs_gunmap_gpu_mem(device, cgs_handle); | ||
178 | |||
179 | error_gmap: | ||
180 | cgs_free_gpu_mem(device, cgs_handle); | ||
181 | return ret; | ||
182 | } | ||
183 | |||
184 | int smu_free_memory(void *device, void *handle) | ||
185 | { | ||
186 | cgs_handle_t cgs_handle = (cgs_handle_t)handle; | ||
187 | |||
188 | if (device == NULL || handle == NULL) | ||
189 | return -EINVAL; | ||
190 | |||
191 | cgs_kunmap_gpu_mem(device, cgs_handle); | ||
192 | cgs_gunmap_gpu_mem(device, cgs_handle); | ||
193 | cgs_free_gpu_mem(device, cgs_handle); | ||
194 | |||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | int smum_init_smc_table(struct pp_hwmgr *hwmgr) | 147 | int smum_init_smc_table(struct pp_hwmgr *hwmgr) |
199 | { | 148 | { |
200 | if (NULL != hwmgr->smumgr_funcs->init_smc_table) | 149 | if (NULL != hwmgr->smumgr_funcs->init_smc_table) |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c index 68db5824de2d..1658e471d322 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c | |||
@@ -230,10 +230,10 @@ int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr, | |||
230 | "Invalid SMU Table Length!", return -EINVAL); | 230 | "Invalid SMU Table Length!", return -EINVAL); |
231 | vega10_send_msg_to_smc_with_parameter(hwmgr, | 231 | vega10_send_msg_to_smc_with_parameter(hwmgr, |
232 | PPSMC_MSG_SetDriverDramAddrHigh, | 232 | PPSMC_MSG_SetDriverDramAddrHigh, |
233 | priv->smu_tables.entry[table_id].table_addr_high); | 233 | smu_upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)); |
234 | vega10_send_msg_to_smc_with_parameter(hwmgr, | 234 | vega10_send_msg_to_smc_with_parameter(hwmgr, |
235 | PPSMC_MSG_SetDriverDramAddrLow, | 235 | PPSMC_MSG_SetDriverDramAddrLow, |
236 | priv->smu_tables.entry[table_id].table_addr_low); | 236 | smu_lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)); |
237 | vega10_send_msg_to_smc_with_parameter(hwmgr, | 237 | vega10_send_msg_to_smc_with_parameter(hwmgr, |
238 | PPSMC_MSG_TransferTableSmu2Dram, | 238 | PPSMC_MSG_TransferTableSmu2Dram, |
239 | priv->smu_tables.entry[table_id].table_id); | 239 | priv->smu_tables.entry[table_id].table_id); |
@@ -267,10 +267,10 @@ int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr, | |||
267 | 267 | ||
268 | vega10_send_msg_to_smc_with_parameter(hwmgr, | 268 | vega10_send_msg_to_smc_with_parameter(hwmgr, |
269 | PPSMC_MSG_SetDriverDramAddrHigh, | 269 | PPSMC_MSG_SetDriverDramAddrHigh, |
270 | priv->smu_tables.entry[table_id].table_addr_high); | 270 | smu_upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)); |
271 | vega10_send_msg_to_smc_with_parameter(hwmgr, | 271 | vega10_send_msg_to_smc_with_parameter(hwmgr, |
272 | PPSMC_MSG_SetDriverDramAddrLow, | 272 | PPSMC_MSG_SetDriverDramAddrLow, |
273 | priv->smu_tables.entry[table_id].table_addr_low); | 273 | smu_lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)); |
274 | vega10_send_msg_to_smc_with_parameter(hwmgr, | 274 | vega10_send_msg_to_smc_with_parameter(hwmgr, |
275 | PPSMC_MSG_TransferTableDram2Smu, | 275 | PPSMC_MSG_TransferTableDram2Smu, |
276 | priv->smu_tables.entry[table_id].table_id); | 276 | priv->smu_tables.entry[table_id].table_id); |
@@ -334,14 +334,13 @@ int vega10_set_tools_address(struct pp_hwmgr *hwmgr) | |||
334 | struct vega10_smumgr *priv = | 334 | struct vega10_smumgr *priv = |
335 | (struct vega10_smumgr *)(hwmgr->smu_backend); | 335 | (struct vega10_smumgr *)(hwmgr->smu_backend); |
336 | 336 | ||
337 | if (priv->smu_tables.entry[TOOLSTABLE].table_addr_high || | 337 | if (priv->smu_tables.entry[TOOLSTABLE].mc_addr) { |
338 | priv->smu_tables.entry[TOOLSTABLE].table_addr_low) { | ||
339 | vega10_send_msg_to_smc_with_parameter(hwmgr, | 338 | vega10_send_msg_to_smc_with_parameter(hwmgr, |
340 | PPSMC_MSG_SetToolsDramAddrHigh, | 339 | PPSMC_MSG_SetToolsDramAddrHigh, |
341 | priv->smu_tables.entry[TOOLSTABLE].table_addr_high); | 340 | smu_upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr)); |
342 | vega10_send_msg_to_smc_with_parameter(hwmgr, | 341 | vega10_send_msg_to_smc_with_parameter(hwmgr, |
343 | PPSMC_MSG_SetToolsDramAddrLow, | 342 | PPSMC_MSG_SetToolsDramAddrLow, |
344 | priv->smu_tables.entry[TOOLSTABLE].table_addr_low); | 343 | smu_lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr)); |
345 | } | 344 | } |
346 | return 0; | 345 | return 0; |
347 | } | 346 | } |
@@ -381,7 +380,8 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr) | |||
381 | struct vega10_smumgr *priv; | 380 | struct vega10_smumgr *priv; |
382 | uint64_t mc_addr; | 381 | uint64_t mc_addr; |
383 | void *kaddr = NULL; | 382 | void *kaddr = NULL; |
384 | unsigned long handle, tools_size; | 383 | unsigned long tools_size; |
384 | struct amdgpu_bo *handle; | ||
385 | int ret; | 385 | int ret; |
386 | struct cgs_firmware_info info = {0}; | 386 | struct cgs_firmware_info info = {0}; |
387 | 387 | ||
@@ -399,147 +399,119 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr) | |||
399 | hwmgr->smu_backend = priv; | 399 | hwmgr->smu_backend = priv; |
400 | 400 | ||
401 | /* allocate space for pptable */ | 401 | /* allocate space for pptable */ |
402 | smu_allocate_memory(hwmgr->device, | 402 | ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, |
403 | sizeof(PPTable_t), | 403 | sizeof(PPTable_t), |
404 | CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, | ||
405 | PAGE_SIZE, | 404 | PAGE_SIZE, |
405 | AMDGPU_GEM_DOMAIN_VRAM, | ||
406 | &handle, | ||
406 | &mc_addr, | 407 | &mc_addr, |
407 | &kaddr, | 408 | &kaddr); |
408 | &handle); | 409 | |
409 | 410 | if (ret) | |
410 | PP_ASSERT_WITH_CODE(kaddr, | 411 | return -EINVAL; |
411 | "[vega10_smu_init] Out of memory for pptable.", | ||
412 | kfree(hwmgr->smu_backend); | ||
413 | cgs_free_gpu_mem(hwmgr->device, | ||
414 | (cgs_handle_t)handle); | ||
415 | return -EINVAL); | ||
416 | 412 | ||
417 | priv->smu_tables.entry[PPTABLE].version = 0x01; | 413 | priv->smu_tables.entry[PPTABLE].version = 0x01; |
418 | priv->smu_tables.entry[PPTABLE].size = sizeof(PPTable_t); | 414 | priv->smu_tables.entry[PPTABLE].size = sizeof(PPTable_t); |
419 | priv->smu_tables.entry[PPTABLE].table_id = TABLE_PPTABLE; | 415 | priv->smu_tables.entry[PPTABLE].table_id = TABLE_PPTABLE; |
420 | priv->smu_tables.entry[PPTABLE].table_addr_high = | 416 | priv->smu_tables.entry[PPTABLE].mc_addr = mc_addr; |
421 | smu_upper_32_bits(mc_addr); | ||
422 | priv->smu_tables.entry[PPTABLE].table_addr_low = | ||
423 | smu_lower_32_bits(mc_addr); | ||
424 | priv->smu_tables.entry[PPTABLE].table = kaddr; | 417 | priv->smu_tables.entry[PPTABLE].table = kaddr; |
425 | priv->smu_tables.entry[PPTABLE].handle = handle; | 418 | priv->smu_tables.entry[PPTABLE].handle = handle; |
426 | 419 | ||
427 | /* allocate space for watermarks table */ | 420 | /* allocate space for watermarks table */ |
428 | smu_allocate_memory(hwmgr->device, | 421 | ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, |
429 | sizeof(Watermarks_t), | 422 | sizeof(Watermarks_t), |
430 | CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, | ||
431 | PAGE_SIZE, | 423 | PAGE_SIZE, |
424 | AMDGPU_GEM_DOMAIN_VRAM, | ||
425 | &handle, | ||
432 | &mc_addr, | 426 | &mc_addr, |
433 | &kaddr, | 427 | &kaddr); |
434 | &handle); | 428 | |
435 | 429 | if (ret) | |
436 | PP_ASSERT_WITH_CODE(kaddr, | 430 | goto err0; |
437 | "[vega10_smu_init] Out of memory for wmtable.", | ||
438 | kfree(hwmgr->smu_backend); | ||
439 | cgs_free_gpu_mem(hwmgr->device, | ||
440 | (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); | ||
441 | cgs_free_gpu_mem(hwmgr->device, | ||
442 | (cgs_handle_t)handle); | ||
443 | return -EINVAL); | ||
444 | 431 | ||
445 | priv->smu_tables.entry[WMTABLE].version = 0x01; | 432 | priv->smu_tables.entry[WMTABLE].version = 0x01; |
446 | priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t); | 433 | priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t); |
447 | priv->smu_tables.entry[WMTABLE].table_id = TABLE_WATERMARKS; | 434 | priv->smu_tables.entry[WMTABLE].table_id = TABLE_WATERMARKS; |
448 | priv->smu_tables.entry[WMTABLE].table_addr_high = | 435 | priv->smu_tables.entry[WMTABLE].mc_addr = mc_addr; |
449 | smu_upper_32_bits(mc_addr); | ||
450 | priv->smu_tables.entry[WMTABLE].table_addr_low = | ||
451 | smu_lower_32_bits(mc_addr); | ||
452 | priv->smu_tables.entry[WMTABLE].table = kaddr; | 436 | priv->smu_tables.entry[WMTABLE].table = kaddr; |
453 | priv->smu_tables.entry[WMTABLE].handle = handle; | 437 | priv->smu_tables.entry[WMTABLE].handle = handle; |
454 | 438 | ||
455 | /* allocate space for AVFS table */ | 439 | /* allocate space for AVFS table */ |
456 | smu_allocate_memory(hwmgr->device, | 440 | ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, |
457 | sizeof(AvfsTable_t), | 441 | sizeof(AvfsTable_t), |
458 | CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, | ||
459 | PAGE_SIZE, | 442 | PAGE_SIZE, |
443 | AMDGPU_GEM_DOMAIN_VRAM, | ||
444 | &handle, | ||
460 | &mc_addr, | 445 | &mc_addr, |
461 | &kaddr, | 446 | &kaddr); |
462 | &handle); | 447 | |
463 | 448 | if (ret) | |
464 | PP_ASSERT_WITH_CODE(kaddr, | 449 | goto err1; |
465 | "[vega10_smu_init] Out of memory for avfs table.", | ||
466 | kfree(hwmgr->smu_backend); | ||
467 | cgs_free_gpu_mem(hwmgr->device, | ||
468 | (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); | ||
469 | cgs_free_gpu_mem(hwmgr->device, | ||
470 | (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle); | ||
471 | cgs_free_gpu_mem(hwmgr->device, | ||
472 | (cgs_handle_t)handle); | ||
473 | return -EINVAL); | ||
474 | 450 | ||
475 | priv->smu_tables.entry[AVFSTABLE].version = 0x01; | 451 | priv->smu_tables.entry[AVFSTABLE].version = 0x01; |
476 | priv->smu_tables.entry[AVFSTABLE].size = sizeof(AvfsTable_t); | 452 | priv->smu_tables.entry[AVFSTABLE].size = sizeof(AvfsTable_t); |
477 | priv->smu_tables.entry[AVFSTABLE].table_id = TABLE_AVFS; | 453 | priv->smu_tables.entry[AVFSTABLE].table_id = TABLE_AVFS; |
478 | priv->smu_tables.entry[AVFSTABLE].table_addr_high = | 454 | priv->smu_tables.entry[AVFSTABLE].mc_addr = mc_addr; |
479 | smu_upper_32_bits(mc_addr); | ||
480 | priv->smu_tables.entry[AVFSTABLE].table_addr_low = | ||
481 | smu_lower_32_bits(mc_addr); | ||
482 | priv->smu_tables.entry[AVFSTABLE].table = kaddr; | 455 | priv->smu_tables.entry[AVFSTABLE].table = kaddr; |
483 | priv->smu_tables.entry[AVFSTABLE].handle = handle; | 456 | priv->smu_tables.entry[AVFSTABLE].handle = handle; |
484 | 457 | ||
485 | tools_size = 0x19000; | 458 | tools_size = 0x19000; |
486 | if (tools_size) { | 459 | if (tools_size) { |
487 | smu_allocate_memory(hwmgr->device, | 460 | ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, |
488 | tools_size, | 461 | tools_size, |
489 | CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, | ||
490 | PAGE_SIZE, | 462 | PAGE_SIZE, |
463 | AMDGPU_GEM_DOMAIN_VRAM, | ||
464 | &handle, | ||
491 | &mc_addr, | 465 | &mc_addr, |
492 | &kaddr, | 466 | &kaddr); |
493 | &handle); | 467 | if (ret) |
494 | 468 | goto err2; | |
495 | if (kaddr) { | 469 | priv->smu_tables.entry[TOOLSTABLE].version = 0x01; |
496 | priv->smu_tables.entry[TOOLSTABLE].version = 0x01; | 470 | priv->smu_tables.entry[TOOLSTABLE].size = tools_size; |
497 | priv->smu_tables.entry[TOOLSTABLE].size = tools_size; | 471 | priv->smu_tables.entry[TOOLSTABLE].table_id = TABLE_PMSTATUSLOG; |
498 | priv->smu_tables.entry[TOOLSTABLE].table_id = TABLE_PMSTATUSLOG; | 472 | priv->smu_tables.entry[TOOLSTABLE].mc_addr = mc_addr; |
499 | priv->smu_tables.entry[TOOLSTABLE].table_addr_high = | 473 | priv->smu_tables.entry[TOOLSTABLE].table = kaddr; |
500 | smu_upper_32_bits(mc_addr); | 474 | priv->smu_tables.entry[TOOLSTABLE].handle = handle; |
501 | priv->smu_tables.entry[TOOLSTABLE].table_addr_low = | ||
502 | smu_lower_32_bits(mc_addr); | ||
503 | priv->smu_tables.entry[TOOLSTABLE].table = kaddr; | ||
504 | priv->smu_tables.entry[TOOLSTABLE].handle = handle; | ||
505 | } | ||
506 | } | 475 | } |
507 | 476 | ||
508 | /* allocate space for AVFS Fuse table */ | 477 | /* allocate space for AVFS Fuse table */ |
509 | smu_allocate_memory(hwmgr->device, | 478 | ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, |
510 | sizeof(AvfsFuseOverride_t), | 479 | sizeof(AvfsFuseOverride_t), |
511 | CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, | ||
512 | PAGE_SIZE, | 480 | PAGE_SIZE, |
481 | AMDGPU_GEM_DOMAIN_VRAM, | ||
482 | &handle, | ||
513 | &mc_addr, | 483 | &mc_addr, |
514 | &kaddr, | 484 | &kaddr); |
515 | &handle); | 485 | if (ret) |
516 | 486 | goto err3; | |
517 | PP_ASSERT_WITH_CODE(kaddr, | ||
518 | "[vega10_smu_init] Out of memory for avfs fuse table.", | ||
519 | kfree(hwmgr->smu_backend); | ||
520 | cgs_free_gpu_mem(hwmgr->device, | ||
521 | (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); | ||
522 | cgs_free_gpu_mem(hwmgr->device, | ||
523 | (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle); | ||
524 | cgs_free_gpu_mem(hwmgr->device, | ||
525 | (cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle); | ||
526 | cgs_free_gpu_mem(hwmgr->device, | ||
527 | (cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle); | ||
528 | cgs_free_gpu_mem(hwmgr->device, | ||
529 | (cgs_handle_t)handle); | ||
530 | return -EINVAL); | ||
531 | 487 | ||
532 | priv->smu_tables.entry[AVFSFUSETABLE].version = 0x01; | 488 | priv->smu_tables.entry[AVFSFUSETABLE].version = 0x01; |
533 | priv->smu_tables.entry[AVFSFUSETABLE].size = sizeof(AvfsFuseOverride_t); | 489 | priv->smu_tables.entry[AVFSFUSETABLE].size = sizeof(AvfsFuseOverride_t); |
534 | priv->smu_tables.entry[AVFSFUSETABLE].table_id = TABLE_AVFS_FUSE_OVERRIDE; | 490 | priv->smu_tables.entry[AVFSFUSETABLE].table_id = TABLE_AVFS_FUSE_OVERRIDE; |
535 | priv->smu_tables.entry[AVFSFUSETABLE].table_addr_high = | 491 | priv->smu_tables.entry[AVFSFUSETABLE].mc_addr = mc_addr; |
536 | smu_upper_32_bits(mc_addr); | ||
537 | priv->smu_tables.entry[AVFSFUSETABLE].table_addr_low = | ||
538 | smu_lower_32_bits(mc_addr); | ||
539 | priv->smu_tables.entry[AVFSFUSETABLE].table = kaddr; | 492 | priv->smu_tables.entry[AVFSFUSETABLE].table = kaddr; |
540 | priv->smu_tables.entry[AVFSFUSETABLE].handle = handle; | 493 | priv->smu_tables.entry[AVFSFUSETABLE].handle = handle; |
541 | 494 | ||
542 | return 0; | 495 | return 0; |
496 | |||
497 | err3: | ||
498 | if (priv->smu_tables.entry[TOOLSTABLE].table) | ||
499 | amdgpu_bo_free_kernel(&priv->smu_tables.entry[TOOLSTABLE].handle, | ||
500 | &priv->smu_tables.entry[TOOLSTABLE].mc_addr, | ||
501 | &priv->smu_tables.entry[TOOLSTABLE].table); | ||
502 | err2: | ||
503 | amdgpu_bo_free_kernel(&priv->smu_tables.entry[AVFSTABLE].handle, | ||
504 | &priv->smu_tables.entry[AVFSTABLE].mc_addr, | ||
505 | &priv->smu_tables.entry[AVFSTABLE].table); | ||
506 | err1: | ||
507 | amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle, | ||
508 | &priv->smu_tables.entry[WMTABLE].mc_addr, | ||
509 | &priv->smu_tables.entry[WMTABLE].table); | ||
510 | err0: | ||
511 | amdgpu_bo_free_kernel(&priv->smu_tables.entry[PPTABLE].handle, | ||
512 | &priv->smu_tables.entry[PPTABLE].mc_addr, | ||
513 | &priv->smu_tables.entry[PPTABLE].table); | ||
514 | return -EINVAL; | ||
543 | } | 515 | } |
544 | 516 | ||
545 | static int vega10_smu_fini(struct pp_hwmgr *hwmgr) | 517 | static int vega10_smu_fini(struct pp_hwmgr *hwmgr) |
@@ -548,17 +520,22 @@ static int vega10_smu_fini(struct pp_hwmgr *hwmgr) | |||
548 | (struct vega10_smumgr *)(hwmgr->smu_backend); | 520 | (struct vega10_smumgr *)(hwmgr->smu_backend); |
549 | 521 | ||
550 | if (priv) { | 522 | if (priv) { |
551 | cgs_free_gpu_mem(hwmgr->device, | 523 | amdgpu_bo_free_kernel(&priv->smu_tables.entry[PPTABLE].handle, |
552 | (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); | 524 | &priv->smu_tables.entry[PPTABLE].mc_addr, |
553 | cgs_free_gpu_mem(hwmgr->device, | 525 | &priv->smu_tables.entry[PPTABLE].table); |
554 | (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle); | 526 | amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle, |
555 | cgs_free_gpu_mem(hwmgr->device, | 527 | &priv->smu_tables.entry[WMTABLE].mc_addr, |
556 | (cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle); | 528 | &priv->smu_tables.entry[WMTABLE].table); |
529 | amdgpu_bo_free_kernel(&priv->smu_tables.entry[AVFSTABLE].handle, | ||
530 | &priv->smu_tables.entry[AVFSTABLE].mc_addr, | ||
531 | &priv->smu_tables.entry[AVFSTABLE].table); | ||
557 | if (priv->smu_tables.entry[TOOLSTABLE].table) | 532 | if (priv->smu_tables.entry[TOOLSTABLE].table) |
558 | cgs_free_gpu_mem(hwmgr->device, | 533 | amdgpu_bo_free_kernel(&priv->smu_tables.entry[TOOLSTABLE].handle, |
559 | (cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle); | 534 | &priv->smu_tables.entry[TOOLSTABLE].mc_addr, |
560 | cgs_free_gpu_mem(hwmgr->device, | 535 | &priv->smu_tables.entry[TOOLSTABLE].table); |
561 | (cgs_handle_t)priv->smu_tables.entry[AVFSFUSETABLE].handle); | 536 | amdgpu_bo_free_kernel(&priv->smu_tables.entry[AVFSFUSETABLE].handle, |
537 | &priv->smu_tables.entry[AVFSFUSETABLE].mc_addr, | ||
538 | &priv->smu_tables.entry[AVFSFUSETABLE].table); | ||
562 | kfree(hwmgr->smu_backend); | 539 | kfree(hwmgr->smu_backend); |
563 | hwmgr->smu_backend = NULL; | 540 | hwmgr->smu_backend = NULL; |
564 | } | 541 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h index 0695455b21b2..736f8cfdbbdc 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h | |||
@@ -38,10 +38,9 @@ struct smu_table_entry { | |||
38 | uint32_t version; | 38 | uint32_t version; |
39 | uint32_t size; | 39 | uint32_t size; |
40 | uint32_t table_id; | 40 | uint32_t table_id; |
41 | uint32_t table_addr_high; | 41 | uint64_t mc_addr; |
42 | uint32_t table_addr_low; | 42 | void *table; |
43 | uint8_t *table; | 43 | struct amdgpu_bo *handle; |
44 | unsigned long handle; | ||
45 | }; | 44 | }; |
46 | 45 | ||
47 | struct smu_table_array { | 46 | struct smu_table_array { |