aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2018-03-20 21:46:05 -0400
committerDave Airlie <airlied@redhat.com>2018-03-20 21:46:05 -0400
commit287d2ac36b6f2830ea4ef66c110abc0f47a9a658 (patch)
tree04214f156461a95c2f7ca5a8821063cad7fc515e
parent963976cfe9c54d4d9e725e61c90c47a4af6b5ea2 (diff)
parent6da2b9332c572fcda94de9631f8fa514f574388a (diff)
Merge branch 'drm-next-4.17' of git://people.freedesktop.org/~agd5f/linux into drm-next
- Continued cleanup and restructuring of powerplay - Fetch VRAM type from vbios rather than hardcoding for SOC15 asics - Allow ttm to drop its backing store when drivers don't need it - DC bandwidth calc updates - Enable DC backlight control pre-DCE11 asics - Enable DC on all supported asics - DC Fixes for planes due to the way our hw is ordered vs what drm expects - DC CTM/regamma fixes - Misc cleanup and bug fixes * 'drm-next-4.17' of git://people.freedesktop.org/~agd5f/linux: (89 commits) amdgpu/dm: Default PRE_VEGA ASIC support to 'y' drm/amd/pp: Remove the cgs wrapper for notify smu version on APU drm/amd/display: fix dereferencing possible ERR_PTR() drm/amd/display: Refine disable VGA drm/amdgpu: Improve documentation of bo_ptr in amdgpu_bo_create_kernel drm/radeon: Don't turn off DP sink when disconnected drm/amd/pp: Rename file name cz_* to smu8_* drm/amd/pp: Replace function/struct name cz_* with smu8_* drm/amd/pp: Remove unneeded void * casts in cz_hwmgr.c/cz_smumgr.c drm/amd/pp: Mv cz uvd/vce pg/dpm functions to cz_hwmgr.c drm/amd/pp: Remove dead header file pp_asicblocks.h drm/amd/pp: Delete dead code on cz_clockpowergating.c drm/amdgpu: Call amdgpu_ucode_fini_bo in amd_powerplay.c drm/amdgpu: Remove wrapper layer of smu ip functions drm/amdgpu: Don't compared ip_block_type with ip_block_index drm/amdgpu: Plus NULL function pointer check drm/amd/pp: Move helper functions to smu_help.c drm/amd/pp: Replace rv_* with smu10_* drm/amd/pp: Fix function parameter not correct drm/amd/pp: Add rv_copy_table_from/to_smc to smu backend function table ...
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c288
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_dpm.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c200
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c4
-rwxr-xr-xdrivers/gpu/drm/amd/amdgpu/vce_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c18
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c123
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c15
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/logger.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c160
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c87
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c261
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_logger.h (renamed from drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h)17
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c155
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h9
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_stats.h (renamed from drivers/gpu/drm/amd/powerplay/inc/pp_asicblocks.h)56
-rw-r--r--drivers/gpu/drm/amd/display/modules/stats/stats.c334
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h3
-rw-r--r--drivers/gpu/drm/amd/include/soc15_ih_clientid.h70
-rw-r--r--drivers/gpu/drm/amd/include/vega10_ip_offset.h286
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c617
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c209
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h36
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c577
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c)547
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h)130
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_inc.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/rv_inc.h)4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c14
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c)997
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h)79
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c536
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h180
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h174
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_instance.h36
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h28
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c883
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c74
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c87
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c399
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c344
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h)19
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c891
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h)82
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c61
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c6
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c6
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.h4
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c15
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c24
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c75
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c10
-rw-r--r--drivers/staging/vboxvideo/vbox_ttm.c5
-rw-r--r--include/drm/ttm/ttm_bo_driver.h252
-rw-r--r--include/drm/ttm/ttm_tt.h272
-rw-r--r--include/uapi/drm/amdgpu_drm.h1
150 files changed, 5739 insertions, 5158 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 8522c2ea1f3e..2ca2b5154d52 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -87,8 +87,7 @@ amdgpu-y += \
87 87
88# add SMC block 88# add SMC block
89amdgpu-y += \ 89amdgpu-y += \
90 amdgpu_dpm.o \ 90 amdgpu_dpm.o
91 amdgpu_powerplay.o
92 91
93# add DCE block 92# add DCE block
94amdgpu-y += \ 93amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index eba4abc8aac6..f44a83ab2bf4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -441,7 +441,7 @@ struct amdgpu_sa_bo {
441void amdgpu_gem_force_release(struct amdgpu_device *adev); 441void amdgpu_gem_force_release(struct amdgpu_device *adev);
442int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, 442int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
443 int alignment, u32 initial_domain, 443 int alignment, u32 initial_domain,
444 u64 flags, bool kernel, 444 u64 flags, enum ttm_bo_type type,
445 struct reservation_object *resv, 445 struct reservation_object *resv,
446 struct drm_gem_object **obj); 446 struct drm_gem_object **obj);
447 447
@@ -1081,8 +1081,6 @@ struct amdgpu_wb {
1081int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb); 1081int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb);
1082void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb); 1082void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb);
1083 1083
1084void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
1085
1086/* 1084/*
1087 * SDMA 1085 * SDMA
1088 */ 1086 */
@@ -1395,9 +1393,7 @@ enum amd_hw_ip_block_type {
1395#define HWIP_MAX_INSTANCE 6 1393#define HWIP_MAX_INSTANCE 6
1396 1394
1397struct amd_powerplay { 1395struct amd_powerplay {
1398 struct cgs_device *cgs_device;
1399 void *pp_handle; 1396 void *pp_handle;
1400 const struct amd_ip_funcs *ip_funcs;
1401 const struct amd_pm_funcs *pp_funcs; 1397 const struct amd_pm_funcs *pp_funcs;
1402}; 1398};
1403 1399
@@ -1632,6 +1628,9 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
1632 uint32_t acc_flags); 1628 uint32_t acc_flags);
1633void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, 1629void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
1634 uint32_t acc_flags); 1630 uint32_t acc_flags);
1631void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
1632uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
1633
1635u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg); 1634u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
1636void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v); 1635void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
1637 1636
@@ -1655,6 +1654,9 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
1655#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ) 1654#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
1656#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ) 1655#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
1657 1656
1657#define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
1658#define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
1659
1658#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0) 1660#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0)
1659#define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX) 1661#define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX)
1660#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0)) 1662#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 8a23aa8f9c73..4d36203ffb11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -221,8 +221,9 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
221 uint64_t gpu_addr_tmp = 0; 221 uint64_t gpu_addr_tmp = 0;
222 void *cpu_ptr_tmp = NULL; 222 void *cpu_ptr_tmp = NULL;
223 223
224 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, 224 r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
225 AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &bo); 225 AMDGPU_GEM_CREATE_CPU_GTT_USWC, ttm_bo_type_kernel,
226 NULL, &bo);
226 if (r) { 227 if (r) {
227 dev_err(adev->dev, 228 dev_err(adev->dev,
228 "failed to allocate BO for amdkfd (%d)\n", r); 229 "failed to allocate BO for amdkfd (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index e0371a9967b9..a12a1654e124 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -997,8 +997,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
997 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n", 997 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
998 va, size, domain_string(alloc_domain)); 998 va, size, domain_string(alloc_domain));
999 999
1000 ret = amdgpu_bo_create(adev, size, byte_align, false, 1000 ret = amdgpu_bo_create(adev, size, byte_align,
1001 alloc_domain, alloc_flags, NULL, NULL, &bo); 1001 alloc_domain, alloc_flags, ttm_bo_type_device, NULL, &bo);
1002 if (ret) { 1002 if (ret) {
1003 pr_debug("Failed to create BO on domain %s. ret %d\n", 1003 pr_debug("Failed to create BO on domain %s. ret %d\n",
1004 domain_string(alloc_domain), ret); 1004 domain_string(alloc_domain), ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index ff8efd0f8fd5..a0f48cb9b8f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -114,6 +114,9 @@ union igp_info {
114 struct atom_integrated_system_info_v1_11 v11; 114 struct atom_integrated_system_info_v1_11 v11;
115}; 115};
116 116
117union umc_info {
118 struct atom_umc_info_v3_1 v31;
119};
117/* 120/*
118 * Return vram width from integrated system info table, if available, 121 * Return vram width from integrated system info table, if available,
119 * or 0 if not. 122 * or 0 if not.
@@ -143,6 +146,94 @@ int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev)
143 return 0; 146 return 0;
144} 147}
145 148
149static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev,
150 int atom_mem_type)
151{
152 int vram_type;
153
154 if (adev->flags & AMD_IS_APU) {
155 switch (atom_mem_type) {
156 case Ddr2MemType:
157 case LpDdr2MemType:
158 vram_type = AMDGPU_VRAM_TYPE_DDR2;
159 break;
160 case Ddr3MemType:
161 case LpDdr3MemType:
162 vram_type = AMDGPU_VRAM_TYPE_DDR3;
163 break;
164 case Ddr4MemType:
165 case LpDdr4MemType:
166 vram_type = AMDGPU_VRAM_TYPE_DDR4;
167 break;
168 default:
169 vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
170 break;
171 }
172 } else {
173 switch (atom_mem_type) {
174 case ATOM_DGPU_VRAM_TYPE_GDDR5:
175 vram_type = AMDGPU_VRAM_TYPE_GDDR5;
176 break;
177 case ATOM_DGPU_VRAM_TYPE_HBM:
178 vram_type = AMDGPU_VRAM_TYPE_HBM;
179 break;
180 default:
181 vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
182 break;
183 }
184 }
185
186 return vram_type;
187}
188/*
189 * Return vram type from either integrated system info table
190 * or umc info table, if available, or 0 (TYPE_UNKNOWN) if not
191 */
192int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
193{
194 struct amdgpu_mode_info *mode_info = &adev->mode_info;
195 int index;
196 u16 data_offset, size;
197 union igp_info *igp_info;
198 union umc_info *umc_info;
199 u8 frev, crev;
200 u8 mem_type;
201
202 if (adev->flags & AMD_IS_APU)
203 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
204 integratedsysteminfo);
205 else
206 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
207 umc_info);
208 if (amdgpu_atom_parse_data_header(mode_info->atom_context,
209 index, &size,
210 &frev, &crev, &data_offset)) {
211 if (adev->flags & AMD_IS_APU) {
212 igp_info = (union igp_info *)
213 (mode_info->atom_context->bios + data_offset);
214 switch (crev) {
215 case 11:
216 mem_type = igp_info->v11.memorytype;
217 return convert_atom_mem_type_to_vram_type(adev, mem_type);
218 default:
219 return 0;
220 }
221 } else {
222 umc_info = (union umc_info *)
223 (mode_info->atom_context->bios + data_offset);
224 switch (crev) {
225 case 1:
226 mem_type = umc_info->v31.vram_type;
227 return convert_atom_mem_type_to_vram_type(adev, mem_type);
228 default:
229 return 0;
230 }
231 }
232 }
233
234 return 0;
235}
236
146union firmware_info { 237union firmware_info {
147 struct atom_firmware_info_v3_1 v31; 238 struct atom_firmware_info_v3_1 v31;
148}; 239};
@@ -151,10 +242,6 @@ union smu_info {
151 struct atom_smu_info_v3_1 v31; 242 struct atom_smu_info_v3_1 v31;
152}; 243};
153 244
154union umc_info {
155 struct atom_umc_info_v3_1 v31;
156};
157
158int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev) 245int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
159{ 246{
160 struct amdgpu_mode_info *mode_info = &adev->mode_info; 247 struct amdgpu_mode_info *mode_info = &adev->mode_info;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index 288b97e54347..7689c961c4ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -28,6 +28,7 @@ bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
28void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev); 28void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
29int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev); 29int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
30int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev); 30int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev);
31int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev);
31int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev); 32int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
32 33
33#endif 34#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 2fb299afc12b..02b849be083b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -80,8 +80,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
80 int time; 80 int time;
81 81
82 n = AMDGPU_BENCHMARK_ITERATIONS; 82 n = AMDGPU_BENCHMARK_ITERATIONS;
83 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, 83 r = amdgpu_bo_create(adev, size, PAGE_SIZE,sdomain, 0,
84 NULL, &sobj); 84 ttm_bo_type_kernel, NULL, &sobj);
85 if (r) { 85 if (r) {
86 goto out_cleanup; 86 goto out_cleanup;
87 } 87 }
@@ -93,8 +93,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
93 if (r) { 93 if (r) {
94 goto out_cleanup; 94 goto out_cleanup;
95 } 95 }
96 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, 96 r = amdgpu_bo_create(adev, size, PAGE_SIZE, ddomain, 0,
97 NULL, &dobj); 97 ttm_bo_type_kernel, NULL, &dobj);
98 if (r) { 98 if (r) {
99 goto out_cleanup; 99 goto out_cleanup;
100 } 100 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index f2dd98d3f5e6..37098c68a645 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -654,11 +654,6 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
654 else 654 else
655 strcpy(fw_name, "amdgpu/vega10_smc.bin"); 655 strcpy(fw_name, "amdgpu/vega10_smc.bin");
656 break; 656 break;
657 case CHIP_CARRIZO:
658 case CHIP_STONEY:
659 case CHIP_RAVEN:
660 adev->pm.fw_version = info->version;
661 return 0;
662 default: 657 default:
663 DRM_ERROR("SMC firmware not supported\n"); 658 DRM_ERROR("SMC firmware not supported\n");
664 return -EINVAL; 659 return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index ffc1f6f46913..9da8d5802980 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
69 /* don't do anything if sink is not display port, i.e., 69 /* don't do anything if sink is not display port, i.e.,
70 * passive dp->(dvi|hdmi) adaptor 70 * passive dp->(dvi|hdmi) adaptor
71 */ 71 */
72 if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { 72 if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
73 int saved_dpms = connector->dpms; 73 amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) &&
74 /* Only turn off the display if it's physically disconnected */ 74 amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
75 if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) { 75 /* Don't start link training before we have the DPCD */
76 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 76 if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
77 } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { 77 return;
78 /* Don't try to start link training before we 78
79 * have the dpcd */ 79 /* Turn the connector off and back on immediately, which
80 if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) 80 * will trigger link training
81 return; 81 */
82 82 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
83 /* set it to OFF so that drm_helper_connector_dpms() 83 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
84 * won't return immediately since the current state
85 * is ON at this point.
86 */
87 connector->dpms = DRM_MODE_DPMS_OFF;
88 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
89 }
90 connector->dpms = saved_dpms;
91 } 84 }
92 } 85 }
93} 86}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 856378434ea2..690cf77b950e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -87,6 +87,8 @@ static const char *amdgpu_asic_name[] = {
87 "LAST", 87 "LAST",
88}; 88};
89 89
90static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
91
90bool amdgpu_device_is_px(struct drm_device *dev) 92bool amdgpu_device_is_px(struct drm_device *dev)
91{ 93{
92 struct amdgpu_device *adev = dev->dev_private; 94 struct amdgpu_device *adev = dev->dev_private;
@@ -121,6 +123,32 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
121 return ret; 123 return ret;
122} 124}
123 125
126/*
127 * MMIO register read with bytes helper functions
128 * @offset:bytes offset from MMIO start
129 *
130*/
131
132uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
133 if (offset < adev->rmmio_size)
134 return (readb(adev->rmmio + offset));
135 BUG();
136}
137
138/*
139 * MMIO register write with bytes helper functions
140 * @offset:bytes offset from MMIO start
141 * @value: the value want to be written to the register
142 *
143*/
144void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
145 if (offset < adev->rmmio_size)
146 writeb(value, adev->rmmio + offset);
147 else
148 BUG();
149}
150
151
124void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, 152void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
125 uint32_t acc_flags) 153 uint32_t acc_flags)
126{ 154{
@@ -830,6 +858,8 @@ static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
830 dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n"); 858 dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
831 amdgpu_lockup_timeout = 10000; 859 amdgpu_lockup_timeout = 10000;
832 } 860 }
861
862 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
833} 863}
834 864
835/** 865/**
@@ -1387,7 +1417,8 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
1387 continue; 1417 continue;
1388 /* skip CG for VCE/UVD, it's handled specially */ 1418 /* skip CG for VCE/UVD, it's handled specially */
1389 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 1419 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1390 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) { 1420 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1421 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1391 /* enable clockgating to save power */ 1422 /* enable clockgating to save power */
1392 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, 1423 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1393 AMD_CG_STATE_GATE); 1424 AMD_CG_STATE_GATE);
@@ -1436,7 +1467,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
1436 for (i = 0; i < adev->num_ip_blocks; i++) { 1467 for (i = 0; i < adev->num_ip_blocks; i++) {
1437 if (!adev->ip_blocks[i].status.hw) 1468 if (!adev->ip_blocks[i].status.hw)
1438 continue; 1469 continue;
1439 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { 1470 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC &&
1471 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1440 /* ungate blocks before hw fini so that we can shutdown the blocks safely */ 1472 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1441 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, 1473 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1442 AMD_CG_STATE_UNGATE); 1474 AMD_CG_STATE_UNGATE);
@@ -1545,7 +1577,8 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
1545 if (!adev->ip_blocks[i].status.valid) 1577 if (!adev->ip_blocks[i].status.valid)
1546 continue; 1578 continue;
1547 /* ungate blocks so that suspend can properly shut them down */ 1579 /* ungate blocks so that suspend can properly shut them down */
1548 if (i != AMD_IP_BLOCK_TYPE_SMC) { 1580 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC &&
1581 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1549 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, 1582 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1550 AMD_CG_STATE_UNGATE); 1583 AMD_CG_STATE_UNGATE);
1551 if (r) { 1584 if (r) {
@@ -1878,6 +1911,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1878 if (adev->rio_mem == NULL) 1911 if (adev->rio_mem == NULL)
1879 DRM_INFO("PCI I/O BAR is not found.\n"); 1912 DRM_INFO("PCI I/O BAR is not found.\n");
1880 1913
1914 amdgpu_device_get_pcie_info(adev);
1915
1881 /* early init functions */ 1916 /* early init functions */
1882 r = amdgpu_device_ip_early_init(adev); 1917 r = amdgpu_device_ip_early_init(adev);
1883 if (r) 1918 if (r)
@@ -2086,6 +2121,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
2086 2121
2087 amdgpu_ib_pool_fini(adev); 2122 amdgpu_ib_pool_fini(adev);
2088 amdgpu_fence_driver_fini(adev); 2123 amdgpu_fence_driver_fini(adev);
2124 amdgpu_pm_sysfs_fini(adev);
2089 amdgpu_fbdev_fini(adev); 2125 amdgpu_fbdev_fini(adev);
2090 r = amdgpu_device_ip_fini(adev); 2126 r = amdgpu_device_ip_fini(adev);
2091 if (adev->firmware.gpu_info_fw) { 2127 if (adev->firmware.gpu_info_fw) {
@@ -2114,7 +2150,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
2114 iounmap(adev->rmmio); 2150 iounmap(adev->rmmio);
2115 adev->rmmio = NULL; 2151 adev->rmmio = NULL;
2116 amdgpu_device_doorbell_fini(adev); 2152 amdgpu_device_doorbell_fini(adev);
2117 amdgpu_pm_sysfs_fini(adev);
2118 amdgpu_debugfs_regs_cleanup(adev); 2153 amdgpu_debugfs_regs_cleanup(adev);
2119} 2154}
2120 2155
@@ -2755,7 +2790,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
2755 return r; 2790 return r;
2756} 2791}
2757 2792
2758void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) 2793static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
2759{ 2794{
2760 u32 mask; 2795 u32 mask;
2761 int ret; 2796 int ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 137145dd14a9..cf0f186c6092 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -113,11 +113,12 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
113 int r; 113 int r;
114 114
115 if (adev->gart.robj == NULL) { 115 if (adev->gart.robj == NULL) {
116 r = amdgpu_bo_create(adev, adev->gart.table_size, 116 r = amdgpu_bo_create(adev, adev->gart.table_size, PAGE_SIZE,
117 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 117 AMDGPU_GEM_DOMAIN_VRAM,
118 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 118 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
119 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, 119 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
120 NULL, NULL, &adev->gart.robj); 120 ttm_bo_type_kernel, NULL,
121 &adev->gart.robj);
121 if (r) { 122 if (r) {
122 return r; 123 return r;
123 } 124 }
@@ -315,7 +316,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
315 t = offset / AMDGPU_GPU_PAGE_SIZE; 316 t = offset / AMDGPU_GPU_PAGE_SIZE;
316 p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); 317 p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
317 for (i = 0; i < pages; i++, p++) 318 for (i = 0; i < pages; i++, p++)
318 adev->gart.pages[p] = pagelist[i]; 319 adev->gart.pages[p] = pagelist ? pagelist[i] : NULL;
319#endif 320#endif
320 321
321 if (!adev->gart.ptr) 322 if (!adev->gart.ptr)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 55a840ae6d68..46b9ea4e6103 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -36,8 +36,6 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
36 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); 36 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
37 37
38 if (robj) { 38 if (robj) {
39 if (robj->gem_base.import_attach)
40 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
41 amdgpu_mn_unregister(robj); 39 amdgpu_mn_unregister(robj);
42 amdgpu_bo_unref(&robj); 40 amdgpu_bo_unref(&robj);
43 } 41 }
@@ -45,7 +43,7 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
45 43
46int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, 44int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
47 int alignment, u32 initial_domain, 45 int alignment, u32 initial_domain,
48 u64 flags, bool kernel, 46 u64 flags, enum ttm_bo_type type,
49 struct reservation_object *resv, 47 struct reservation_object *resv,
50 struct drm_gem_object **obj) 48 struct drm_gem_object **obj)
51{ 49{
@@ -59,8 +57,8 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
59 } 57 }
60 58
61retry: 59retry:
62 r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, 60 r = amdgpu_bo_create(adev, size, alignment, initial_domain,
63 flags, NULL, resv, &bo); 61 flags, type, resv, &bo);
64 if (r) { 62 if (r) {
65 if (r != -ERESTARTSYS) { 63 if (r != -ERESTARTSYS) {
66 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { 64 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index b8a7dba69595..0e01f115bbe5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -25,51 +25,12 @@
25#define __AMDGPU_IH_H__ 25#define __AMDGPU_IH_H__
26 26
27#include <linux/chash.h> 27#include <linux/chash.h>
28#include "soc15_ih_clientid.h"
28 29
29struct amdgpu_device; 30struct amdgpu_device;
30 /*
31 * vega10+ IH clients
32 */
33enum amdgpu_ih_clientid
34{
35 AMDGPU_IH_CLIENTID_IH = 0x00,
36 AMDGPU_IH_CLIENTID_ACP = 0x01,
37 AMDGPU_IH_CLIENTID_ATHUB = 0x02,
38 AMDGPU_IH_CLIENTID_BIF = 0x03,
39 AMDGPU_IH_CLIENTID_DCE = 0x04,
40 AMDGPU_IH_CLIENTID_ISP = 0x05,
41 AMDGPU_IH_CLIENTID_PCIE0 = 0x06,
42 AMDGPU_IH_CLIENTID_RLC = 0x07,
43 AMDGPU_IH_CLIENTID_SDMA0 = 0x08,
44 AMDGPU_IH_CLIENTID_SDMA1 = 0x09,
45 AMDGPU_IH_CLIENTID_SE0SH = 0x0a,
46 AMDGPU_IH_CLIENTID_SE1SH = 0x0b,
47 AMDGPU_IH_CLIENTID_SE2SH = 0x0c,
48 AMDGPU_IH_CLIENTID_SE3SH = 0x0d,
49 AMDGPU_IH_CLIENTID_SYSHUB = 0x0e,
50 AMDGPU_IH_CLIENTID_THM = 0x0f,
51 AMDGPU_IH_CLIENTID_UVD = 0x10,
52 AMDGPU_IH_CLIENTID_VCE0 = 0x11,
53 AMDGPU_IH_CLIENTID_VMC = 0x12,
54 AMDGPU_IH_CLIENTID_XDMA = 0x13,
55 AMDGPU_IH_CLIENTID_GRBM_CP = 0x14,
56 AMDGPU_IH_CLIENTID_ATS = 0x15,
57 AMDGPU_IH_CLIENTID_ROM_SMUIO = 0x16,
58 AMDGPU_IH_CLIENTID_DF = 0x17,
59 AMDGPU_IH_CLIENTID_VCE1 = 0x18,
60 AMDGPU_IH_CLIENTID_PWR = 0x19,
61 AMDGPU_IH_CLIENTID_UTCL2 = 0x1b,
62 AMDGPU_IH_CLIENTID_EA = 0x1c,
63 AMDGPU_IH_CLIENTID_UTCL2LOG = 0x1d,
64 AMDGPU_IH_CLIENTID_MP0 = 0x1e,
65 AMDGPU_IH_CLIENTID_MP1 = 0x1f,
66
67 AMDGPU_IH_CLIENTID_MAX,
68
69 AMDGPU_IH_CLIENTID_VCN = AMDGPU_IH_CLIENTID_UVD
70};
71 31
72#define AMDGPU_IH_CLIENTID_LEGACY 0 32#define AMDGPU_IH_CLIENTID_LEGACY 0
33#define AMDGPU_IH_CLIENTID_MAX SOC15_IH_CLIENTID_MAX
73 34
74#define AMDGPU_PAGEFAULT_HASH_BITS 8 35#define AMDGPU_PAGEFAULT_HASH_BITS 8
75struct amdgpu_retryfault_hashtable { 36struct amdgpu_retryfault_hashtable {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index d9533bbc467c..d6416ee52e32 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -350,6 +350,7 @@ struct amdgpu_mode_info {
350 u16 firmware_flags; 350 u16 firmware_flags;
351 /* pointer to backlight encoder */ 351 /* pointer to backlight encoder */
352 struct amdgpu_encoder *bl_encoder; 352 struct amdgpu_encoder *bl_encoder;
353 u8 bl_level; /* saved backlight level */
353 struct amdgpu_audio audio; /* audio stuff */ 354 struct amdgpu_audio audio; /* audio stuff */
354 int num_crtc; /* number of crtcs */ 355 int num_crtc; /* number of crtcs */
355 int num_hpd; /* number of hpd pins */ 356 int num_hpd; /* number of hpd pins */
@@ -550,14 +551,6 @@ struct amdgpu_connector {
550 /* we need to mind the EDID between detect 551 /* we need to mind the EDID between detect
551 and get modes due to analog/digital/tvencoder */ 552 and get modes due to analog/digital/tvencoder */
552 struct edid *edid; 553 struct edid *edid;
553 /* number of modes generated from EDID at 'dc_sink' */
554 int num_modes;
555 /* The 'old' sink - before an HPD.
556 * The 'current' sink is in dc_link->sink. */
557 struct dc_sink *dc_sink;
558 struct dc_link *dc_link;
559 struct dc_sink *dc_em_sink;
560 const struct dc_stream *stream;
561 void *con_priv; 554 void *con_priv;
562 bool dac_load_detect; 555 bool dac_load_detect;
563 bool detected_by_load; /* if the connection status was determined by load */ 556 bool detected_by_load; /* if the connection status was determined by load */
@@ -568,27 +561,6 @@ struct amdgpu_connector {
568 enum amdgpu_connector_audio audio; 561 enum amdgpu_connector_audio audio;
569 enum amdgpu_connector_dither dither; 562 enum amdgpu_connector_dither dither;
570 unsigned pixelclock_for_modeset; 563 unsigned pixelclock_for_modeset;
571
572 struct drm_dp_mst_topology_mgr mst_mgr;
573 struct amdgpu_dm_dp_aux dm_dp_aux;
574 struct drm_dp_mst_port *port;
575 struct amdgpu_connector *mst_port;
576 struct amdgpu_encoder *mst_encoder;
577 struct semaphore mst_sem;
578
579 /* TODO see if we can merge with ddc_bus or make a dm_connector */
580 struct amdgpu_i2c_adapter *i2c;
581
582 /* Monitor range limits */
583 int min_vfreq ;
584 int max_vfreq ;
585 int pixel_clock_mhz;
586
587 /*freesync caps*/
588 struct mod_freesync_caps caps;
589
590 struct mutex hpd_lock;
591
592}; 564};
593 565
594/* TODO: start to use this struct and remove same field from base one */ 566/* TODO: start to use this struct and remove same field from base one */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 9157745fce14..6d08cde8443c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -60,6 +60,8 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
60 60
61 amdgpu_bo_kunmap(bo); 61 amdgpu_bo_kunmap(bo);
62 62
63 if (bo->gem_base.import_attach)
64 drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
63 drm_gem_object_release(&bo->gem_base); 65 drm_gem_object_release(&bo->gem_base);
64 amdgpu_bo_unref(&bo->parent); 66 amdgpu_bo_unref(&bo->parent);
65 if (!list_empty(&bo->shadow_list)) { 67 if (!list_empty(&bo->shadow_list)) {
@@ -173,13 +175,15 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
173 * @size: size for the new BO 175 * @size: size for the new BO
174 * @align: alignment for the new BO 176 * @align: alignment for the new BO
175 * @domain: where to place it 177 * @domain: where to place it
176 * @bo_ptr: resulting BO 178 * @bo_ptr: used to initialize BOs in structures
177 * @gpu_addr: GPU addr of the pinned BO 179 * @gpu_addr: GPU addr of the pinned BO
178 * @cpu_addr: optional CPU address mapping 180 * @cpu_addr: optional CPU address mapping
179 * 181 *
180 * Allocates and pins a BO for kernel internal use, and returns it still 182 * Allocates and pins a BO for kernel internal use, and returns it still
181 * reserved. 183 * reserved.
182 * 184 *
185 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
186 *
183 * Returns 0 on success, negative error code otherwise. 187 * Returns 0 on success, negative error code otherwise.
184 */ 188 */
185int amdgpu_bo_create_reserved(struct amdgpu_device *adev, 189int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
@@ -191,10 +195,10 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
191 int r; 195 int r;
192 196
193 if (!*bo_ptr) { 197 if (!*bo_ptr) {
194 r = amdgpu_bo_create(adev, size, align, true, domain, 198 r = amdgpu_bo_create(adev, size, align, domain,
195 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 199 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
196 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, 200 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
197 NULL, NULL, bo_ptr); 201 ttm_bo_type_kernel, NULL, bo_ptr);
198 if (r) { 202 if (r) {
199 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", 203 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
200 r); 204 r);
@@ -242,12 +246,14 @@ error_free:
242 * @size: size for the new BO 246 * @size: size for the new BO
243 * @align: alignment for the new BO 247 * @align: alignment for the new BO
244 * @domain: where to place it 248 * @domain: where to place it
245 * @bo_ptr: resulting BO 249 * @bo_ptr: used to initialize BOs in structures
246 * @gpu_addr: GPU addr of the pinned BO 250 * @gpu_addr: GPU addr of the pinned BO
247 * @cpu_addr: optional CPU address mapping 251 * @cpu_addr: optional CPU address mapping
248 * 252 *
249 * Allocates and pins a BO for kernel internal use. 253 * Allocates and pins a BO for kernel internal use.
250 * 254 *
255 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
256 *
251 * Returns 0 on success, negative error code otherwise. 257 * Returns 0 on success, negative error code otherwise.
252 */ 258 */
253int amdgpu_bo_create_kernel(struct amdgpu_device *adev, 259int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
@@ -335,21 +341,19 @@ fail:
335 return false; 341 return false;
336} 342}
337 343
338static int amdgpu_bo_do_create(struct amdgpu_device *adev, 344static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
339 unsigned long size, int byte_align, 345 int byte_align, u32 domain,
340 bool kernel, u32 domain, u64 flags, 346 u64 flags, enum ttm_bo_type type,
341 struct sg_table *sg,
342 struct reservation_object *resv, 347 struct reservation_object *resv,
343 struct amdgpu_bo **bo_ptr) 348 struct amdgpu_bo **bo_ptr)
344{ 349{
345 struct ttm_operation_ctx ctx = { 350 struct ttm_operation_ctx ctx = {
346 .interruptible = !kernel, 351 .interruptible = (type != ttm_bo_type_kernel),
347 .no_wait_gpu = false, 352 .no_wait_gpu = false,
348 .resv = resv, 353 .resv = resv,
349 .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT 354 .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
350 }; 355 };
351 struct amdgpu_bo *bo; 356 struct amdgpu_bo *bo;
352 enum ttm_bo_type type;
353 unsigned long page_align; 357 unsigned long page_align;
354 size_t acc_size; 358 size_t acc_size;
355 int r; 359 int r;
@@ -360,13 +364,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
360 if (!amdgpu_bo_validate_size(adev, size, domain)) 364 if (!amdgpu_bo_validate_size(adev, size, domain))
361 return -ENOMEM; 365 return -ENOMEM;
362 366
363 if (kernel) {
364 type = ttm_bo_type_kernel;
365 } else if (sg) {
366 type = ttm_bo_type_sg;
367 } else {
368 type = ttm_bo_type_device;
369 }
370 *bo_ptr = NULL; 367 *bo_ptr = NULL;
371 368
372 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, 369 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
@@ -385,7 +382,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
385 AMDGPU_GEM_DOMAIN_GWS | 382 AMDGPU_GEM_DOMAIN_GWS |
386 AMDGPU_GEM_DOMAIN_OA); 383 AMDGPU_GEM_DOMAIN_OA);
387 bo->allowed_domains = bo->preferred_domains; 384 bo->allowed_domains = bo->preferred_domains;
388 if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) 385 if (type != ttm_bo_type_kernel &&
386 bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
389 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; 387 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
390 388
391 bo->flags = flags; 389 bo->flags = flags;
@@ -423,7 +421,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
423 421
424 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, 422 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
425 &bo->placement, page_align, &ctx, acc_size, 423 &bo->placement, page_align, &ctx, acc_size,
426 sg, resv, &amdgpu_ttm_bo_destroy); 424 NULL, resv, &amdgpu_ttm_bo_destroy);
427 if (unlikely(r != 0)) 425 if (unlikely(r != 0))
428 return r; 426 return r;
429 427
@@ -435,7 +433,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
435 else 433 else
436 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0); 434 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
437 435
438 if (kernel) 436 if (type == ttm_bo_type_kernel)
439 bo->tbo.priority = 1; 437 bo->tbo.priority = 1;
440 438
441 if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && 439 if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
@@ -479,12 +477,11 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
479 if (bo->shadow) 477 if (bo->shadow)
480 return 0; 478 return 0;
481 479
482 r = amdgpu_bo_do_create(adev, size, byte_align, true, 480 r = amdgpu_bo_do_create(adev, size, byte_align, AMDGPU_GEM_DOMAIN_GTT,
483 AMDGPU_GEM_DOMAIN_GTT,
484 AMDGPU_GEM_CREATE_CPU_GTT_USWC | 481 AMDGPU_GEM_CREATE_CPU_GTT_USWC |
485 AMDGPU_GEM_CREATE_SHADOW, 482 AMDGPU_GEM_CREATE_SHADOW,
486 NULL, bo->tbo.resv, 483 ttm_bo_type_kernel,
487 &bo->shadow); 484 bo->tbo.resv, &bo->shadow);
488 if (!r) { 485 if (!r) {
489 bo->shadow->parent = amdgpu_bo_ref(bo); 486 bo->shadow->parent = amdgpu_bo_ref(bo);
490 mutex_lock(&adev->shadow_list_lock); 487 mutex_lock(&adev->shadow_list_lock);
@@ -495,18 +492,17 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
495 return r; 492 return r;
496} 493}
497 494
498int amdgpu_bo_create(struct amdgpu_device *adev, 495int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
499 unsigned long size, int byte_align, 496 int byte_align, u32 domain,
500 bool kernel, u32 domain, u64 flags, 497 u64 flags, enum ttm_bo_type type,
501 struct sg_table *sg,
502 struct reservation_object *resv, 498 struct reservation_object *resv,
503 struct amdgpu_bo **bo_ptr) 499 struct amdgpu_bo **bo_ptr)
504{ 500{
505 uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW; 501 uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
506 int r; 502 int r;
507 503
508 r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain, 504 r = amdgpu_bo_do_create(adev, size, byte_align, domain,
509 parent_flags, sg, resv, bo_ptr); 505 parent_flags, type, resv, bo_ptr);
510 if (r) 506 if (r)
511 return r; 507 return r;
512 508
@@ -821,7 +817,8 @@ static const char *amdgpu_vram_names[] = {
821 "GDDR4", 817 "GDDR4",
822 "GDDR5", 818 "GDDR5",
823 "HBM", 819 "HBM",
824 "DDR3" 820 "DDR3",
821 "DDR4",
825}; 822};
826 823
827int amdgpu_bo_init(struct amdgpu_device *adev) 824int amdgpu_bo_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index d4dbfe1f842e..546f77cb7882 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -203,12 +203,11 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
203 return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC; 203 return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
204} 204}
205 205
206int amdgpu_bo_create(struct amdgpu_device *adev, 206int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
207 unsigned long size, int byte_align, 207 int byte_align, u32 domain,
208 bool kernel, u32 domain, u64 flags, 208 u64 flags, enum ttm_bo_type type,
209 struct sg_table *sg, 209 struct reservation_object *resv,
210 struct reservation_object *resv, 210 struct amdgpu_bo **bo_ptr);
211 struct amdgpu_bo **bo_ptr);
212int amdgpu_bo_create_reserved(struct amdgpu_device *adev, 211int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
213 unsigned long size, int align, 212 unsigned long size, int align,
214 u32 domain, struct amdgpu_bo **bo_ptr, 213 u32 domain, struct amdgpu_bo **bo_ptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 632b18670098..361975cf45a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1154,7 +1154,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
1154 umode_t effective_mode = attr->mode; 1154 umode_t effective_mode = attr->mode;
1155 1155
1156 /* handle non-powerplay limitations */ 1156 /* handle non-powerplay limitations */
1157 if (!adev->powerplay.cgs_device) { 1157 if (!adev->powerplay.pp_handle) {
1158 /* Skip fan attributes if fan is not present */ 1158 /* Skip fan attributes if fan is not present */
1159 if (adev->pm.no_fan && 1159 if (adev->pm.no_fan &&
1160 (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 1160 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
deleted file mode 100644
index 5c2e2d5dc1ee..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ /dev/null
@@ -1,288 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25#include "atom.h"
26#include "amdgpu.h"
27#include "amd_shared.h"
28#include <linux/module.h>
29#include <linux/moduleparam.h>
30#include "amdgpu_pm.h"
31#include <drm/amdgpu_drm.h>
32#include "amdgpu_powerplay.h"
33#include "si_dpm.h"
34#include "cik_dpm.h"
35#include "vi_dpm.h"
36
37static int amdgpu_pp_early_init(void *handle)
38{
39 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
40 struct amd_powerplay *amd_pp;
41 int ret = 0;
42
43 amd_pp = &(adev->powerplay);
44 amd_pp->pp_handle = (void *)adev;
45
46 switch (adev->asic_type) {
47 case CHIP_POLARIS11:
48 case CHIP_POLARIS10:
49 case CHIP_POLARIS12:
50 case CHIP_TONGA:
51 case CHIP_FIJI:
52 case CHIP_TOPAZ:
53 case CHIP_CARRIZO:
54 case CHIP_STONEY:
55 case CHIP_VEGA10:
56 case CHIP_RAVEN:
57 amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
58 amd_pp->ip_funcs = &pp_ip_funcs;
59 amd_pp->pp_funcs = &pp_dpm_funcs;
60 break;
61 /* These chips don't have powerplay implemenations */
62#ifdef CONFIG_DRM_AMDGPU_SI
63 case CHIP_TAHITI:
64 case CHIP_PITCAIRN:
65 case CHIP_VERDE:
66 case CHIP_OLAND:
67 case CHIP_HAINAN:
68 amd_pp->ip_funcs = &si_dpm_ip_funcs;
69 amd_pp->pp_funcs = &si_dpm_funcs;
70 break;
71#endif
72#ifdef CONFIG_DRM_AMDGPU_CIK
73 case CHIP_BONAIRE:
74 case CHIP_HAWAII:
75 if (amdgpu_dpm == -1) {
76 amd_pp->ip_funcs = &ci_dpm_ip_funcs;
77 amd_pp->pp_funcs = &ci_dpm_funcs;
78 } else {
79 amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
80 amd_pp->ip_funcs = &pp_ip_funcs;
81 amd_pp->pp_funcs = &pp_dpm_funcs;
82 }
83 break;
84 case CHIP_KABINI:
85 case CHIP_MULLINS:
86 case CHIP_KAVERI:
87 amd_pp->ip_funcs = &kv_dpm_ip_funcs;
88 amd_pp->pp_funcs = &kv_dpm_funcs;
89 break;
90#endif
91 default:
92 ret = -EINVAL;
93 break;
94 }
95
96 if (adev->powerplay.ip_funcs->early_init)
97 ret = adev->powerplay.ip_funcs->early_init(adev);
98
99 return ret;
100}
101
102
103static int amdgpu_pp_late_init(void *handle)
104{
105 int ret = 0;
106 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
107
108 if (adev->powerplay.ip_funcs->late_init)
109 ret = adev->powerplay.ip_funcs->late_init(
110 adev->powerplay.pp_handle);
111
112 return ret;
113}
114
115static int amdgpu_pp_sw_init(void *handle)
116{
117 int ret = 0;
118 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
119
120 if (adev->powerplay.ip_funcs->sw_init)
121 ret = adev->powerplay.ip_funcs->sw_init(
122 adev->powerplay.pp_handle);
123
124 return ret;
125}
126
127static int amdgpu_pp_sw_fini(void *handle)
128{
129 int ret = 0;
130 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
131
132 if (adev->powerplay.ip_funcs->sw_fini)
133 ret = adev->powerplay.ip_funcs->sw_fini(
134 adev->powerplay.pp_handle);
135 if (ret)
136 return ret;
137
138 return ret;
139}
140
141static int amdgpu_pp_hw_init(void *handle)
142{
143 int ret = 0;
144 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
145
146 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
147 amdgpu_ucode_init_bo(adev);
148
149 if (adev->powerplay.ip_funcs->hw_init)
150 ret = adev->powerplay.ip_funcs->hw_init(
151 adev->powerplay.pp_handle);
152
153 return ret;
154}
155
156static int amdgpu_pp_hw_fini(void *handle)
157{
158 int ret = 0;
159 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
160
161 if (adev->powerplay.ip_funcs->hw_fini)
162 ret = adev->powerplay.ip_funcs->hw_fini(
163 adev->powerplay.pp_handle);
164
165 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
166 amdgpu_ucode_fini_bo(adev);
167
168 return ret;
169}
170
171static void amdgpu_pp_late_fini(void *handle)
172{
173 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
174
175 if (adev->powerplay.ip_funcs->late_fini)
176 adev->powerplay.ip_funcs->late_fini(
177 adev->powerplay.pp_handle);
178
179 if (adev->powerplay.cgs_device)
180 amdgpu_cgs_destroy_device(adev->powerplay.cgs_device);
181}
182
183static int amdgpu_pp_suspend(void *handle)
184{
185 int ret = 0;
186 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
187
188 if (adev->powerplay.ip_funcs->suspend)
189 ret = adev->powerplay.ip_funcs->suspend(
190 adev->powerplay.pp_handle);
191 return ret;
192}
193
194static int amdgpu_pp_resume(void *handle)
195{
196 int ret = 0;
197 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
198
199 if (adev->powerplay.ip_funcs->resume)
200 ret = adev->powerplay.ip_funcs->resume(
201 adev->powerplay.pp_handle);
202 return ret;
203}
204
205static int amdgpu_pp_set_clockgating_state(void *handle,
206 enum amd_clockgating_state state)
207{
208 int ret = 0;
209 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
210
211 if (adev->powerplay.ip_funcs->set_clockgating_state)
212 ret = adev->powerplay.ip_funcs->set_clockgating_state(
213 adev->powerplay.pp_handle, state);
214 return ret;
215}
216
217static int amdgpu_pp_set_powergating_state(void *handle,
218 enum amd_powergating_state state)
219{
220 int ret = 0;
221 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
222
223 if (adev->powerplay.ip_funcs->set_powergating_state)
224 ret = adev->powerplay.ip_funcs->set_powergating_state(
225 adev->powerplay.pp_handle, state);
226 return ret;
227}
228
229
230static bool amdgpu_pp_is_idle(void *handle)
231{
232 bool ret = true;
233 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
234
235 if (adev->powerplay.ip_funcs->is_idle)
236 ret = adev->powerplay.ip_funcs->is_idle(
237 adev->powerplay.pp_handle);
238 return ret;
239}
240
241static int amdgpu_pp_wait_for_idle(void *handle)
242{
243 int ret = 0;
244 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
245
246 if (adev->powerplay.ip_funcs->wait_for_idle)
247 ret = adev->powerplay.ip_funcs->wait_for_idle(
248 adev->powerplay.pp_handle);
249 return ret;
250}
251
252static int amdgpu_pp_soft_reset(void *handle)
253{
254 int ret = 0;
255 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
256
257 if (adev->powerplay.ip_funcs->soft_reset)
258 ret = adev->powerplay.ip_funcs->soft_reset(
259 adev->powerplay.pp_handle);
260 return ret;
261}
262
263static const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
264 .name = "amdgpu_powerplay",
265 .early_init = amdgpu_pp_early_init,
266 .late_init = amdgpu_pp_late_init,
267 .sw_init = amdgpu_pp_sw_init,
268 .sw_fini = amdgpu_pp_sw_fini,
269 .hw_init = amdgpu_pp_hw_init,
270 .hw_fini = amdgpu_pp_hw_fini,
271 .late_fini = amdgpu_pp_late_fini,
272 .suspend = amdgpu_pp_suspend,
273 .resume = amdgpu_pp_resume,
274 .is_idle = amdgpu_pp_is_idle,
275 .wait_for_idle = amdgpu_pp_wait_for_idle,
276 .soft_reset = amdgpu_pp_soft_reset,
277 .set_clockgating_state = amdgpu_pp_set_clockgating_state,
278 .set_powergating_state = amdgpu_pp_set_powergating_state,
279};
280
281const struct amdgpu_ip_block_version amdgpu_pp_ip_block =
282{
283 .type = AMD_IP_BLOCK_TYPE_SMC,
284 .major = 1,
285 .minor = 0,
286 .rev = 0,
287 .funcs = &amdgpu_pp_ip_funcs,
288};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index fb66b45548d3..1c9991738477 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -105,11 +105,16 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
105 int ret; 105 int ret;
106 106
107 ww_mutex_lock(&resv->lock, NULL); 107 ww_mutex_lock(&resv->lock, NULL);
108 ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false, 108 ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE,
109 AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo); 109 AMDGPU_GEM_DOMAIN_CPU, 0, ttm_bo_type_sg,
110 resv, &bo);
110 if (ret) 111 if (ret)
111 goto error; 112 goto error;
112 113
114 bo->tbo.sg = sg;
115 bo->tbo.ttm->sg = sg;
116 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
117 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
113 if (attach->dmabuf->ops != &amdgpu_dmabuf_ops) 118 if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
114 bo->prime_shared_count = 1; 119 bo->prime_shared_count = 1;
115 120
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 6e712f12eecd..9a75410cd576 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -62,6 +62,9 @@ static int psp_sw_init(void *handle)
62 62
63 psp->adev = adev; 63 psp->adev = adev;
64 64
65 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
66 return 0;
67
65 ret = psp_init_microcode(psp); 68 ret = psp_init_microcode(psp);
66 if (ret) { 69 if (ret) {
67 DRM_ERROR("Failed to load psp firmware!\n"); 70 DRM_ERROR("Failed to load psp firmware!\n");
@@ -75,6 +78,9 @@ static int psp_sw_fini(void *handle)
75{ 78{
76 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 79 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
77 80
81 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
82 return 0;
83
78 release_firmware(adev->psp.sos_fw); 84 release_firmware(adev->psp.sos_fw);
79 adev->psp.sos_fw = NULL; 85 adev->psp.sos_fw = NULL;
80 release_firmware(adev->psp.asd_fw); 86 release_firmware(adev->psp.asd_fw);
@@ -453,6 +459,9 @@ static int psp_suspend(void *handle)
453 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 459 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
454 struct psp_context *psp = &adev->psp; 460 struct psp_context *psp = &adev->psp;
455 461
462 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
463 return 0;
464
456 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); 465 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
457 if (ret) { 466 if (ret) {
458 DRM_ERROR("PSP ring stop failed\n"); 467 DRM_ERROR("PSP ring stop failed\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index f3d81b6fb499..2dbe87591f81 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -59,9 +59,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
59 goto out_cleanup; 59 goto out_cleanup;
60 } 60 }
61 61
62 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, 62 r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 0,
63 AMDGPU_GEM_DOMAIN_VRAM, 0, 63 ttm_bo_type_kernel, NULL, &vram_obj);
64 NULL, NULL, &vram_obj);
65 if (r) { 64 if (r) {
66 DRM_ERROR("Failed to create VRAM object\n"); 65 DRM_ERROR("Failed to create VRAM object\n");
67 goto out_cleanup; 66 goto out_cleanup;
@@ -80,9 +79,9 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
80 void **vram_start, **vram_end; 79 void **vram_start, **vram_end;
81 struct dma_fence *fence = NULL; 80 struct dma_fence *fence = NULL;
82 81
83 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, 82 r = amdgpu_bo_create(adev, size, PAGE_SIZE,
84 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, 83 AMDGPU_GEM_DOMAIN_GTT, 0,
85 NULL, gtt_obj + i); 84 ttm_bo_type_kernel, NULL, gtt_obj + i);
86 if (r) { 85 if (r) {
87 DRM_ERROR("Failed to create GTT object %d\n", i); 86 DRM_ERROR("Failed to create GTT object %d\n", i);
88 goto out_lclean; 87 goto out_lclean;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c2fae04d769a..e28b73609fbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -204,6 +204,12 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
204 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM 204 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
205 }; 205 };
206 206
207 if (bo->type == ttm_bo_type_sg) {
208 placement->num_placement = 0;
209 placement->num_busy_placement = 0;
210 return;
211 }
212
207 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) { 213 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
208 placement->placement = &placements; 214 placement->placement = &placements;
209 placement->busy_placement = &placements; 215 placement->busy_placement = &placements;
@@ -982,20 +988,20 @@ static struct ttm_backend_func amdgpu_backend_func = {
982 .destroy = &amdgpu_ttm_backend_destroy, 988 .destroy = &amdgpu_ttm_backend_destroy,
983}; 989};
984 990
985static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev, 991static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
986 unsigned long size, uint32_t page_flags) 992 uint32_t page_flags)
987{ 993{
988 struct amdgpu_device *adev; 994 struct amdgpu_device *adev;
989 struct amdgpu_ttm_tt *gtt; 995 struct amdgpu_ttm_tt *gtt;
990 996
991 adev = amdgpu_ttm_adev(bdev); 997 adev = amdgpu_ttm_adev(bo->bdev);
992 998
993 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); 999 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
994 if (gtt == NULL) { 1000 if (gtt == NULL) {
995 return NULL; 1001 return NULL;
996 } 1002 }
997 gtt->ttm.ttm.func = &amdgpu_backend_func; 1003 gtt->ttm.ttm.func = &amdgpu_backend_func;
998 if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags)) { 1004 if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
999 kfree(gtt); 1005 kfree(gtt);
1000 return NULL; 1006 return NULL;
1001 } 1007 }
@@ -1021,7 +1027,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
1021 1027
1022 if (slave && ttm->sg) { 1028 if (slave && ttm->sg) {
1023 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, 1029 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1024 gtt->ttm.dma_address, ttm->num_pages); 1030 gtt->ttm.dma_address,
1031 ttm->num_pages);
1025 ttm->state = tt_unbound; 1032 ttm->state = tt_unbound;
1026 return 0; 1033 return 0;
1027 } 1034 }
@@ -1335,11 +1342,12 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1335 if (adev->fw_vram_usage.size > 0 && 1342 if (adev->fw_vram_usage.size > 0 &&
1336 adev->fw_vram_usage.size <= vram_size) { 1343 adev->fw_vram_usage.size <= vram_size) {
1337 1344
1338 r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, 1345 r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, PAGE_SIZE,
1339 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 1346 AMDGPU_GEM_DOMAIN_VRAM,
1340 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 1347 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1341 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 1348 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
1342 &adev->fw_vram_usage.reserved_bo); 1349 ttm_bo_type_kernel, NULL,
1350 &adev->fw_vram_usage.reserved_bo);
1343 if (r) 1351 if (r)
1344 goto error_create; 1352 goto error_create;
1345 1353
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 0b237e027cab..24474294c92a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -413,9 +413,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
413 if (!entry->base.bo) { 413 if (!entry->base.bo) {
414 r = amdgpu_bo_create(adev, 414 r = amdgpu_bo_create(adev,
415 amdgpu_vm_bo_size(adev, level), 415 amdgpu_vm_bo_size(adev, level),
416 AMDGPU_GPU_PAGE_SIZE, true, 416 AMDGPU_GPU_PAGE_SIZE,
417 AMDGPU_GEM_DOMAIN_VRAM, flags, 417 AMDGPU_GEM_DOMAIN_VRAM, flags,
418 NULL, resv, &pt); 418 ttm_bo_type_kernel, resv, &pt);
419 if (r) 419 if (r)
420 return r; 420 return r;
421 421
@@ -2409,8 +2409,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2409 AMDGPU_GEM_CREATE_SHADOW); 2409 AMDGPU_GEM_CREATE_SHADOW);
2410 2410
2411 size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level); 2411 size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
2412 r = amdgpu_bo_create(adev, size, align, true, AMDGPU_GEM_DOMAIN_VRAM, 2412 r = amdgpu_bo_create(adev, size, align, AMDGPU_GEM_DOMAIN_VRAM, flags,
2413 flags, NULL, NULL, &vm->root.base.bo); 2413 ttm_bo_type_kernel, NULL, &vm->root.base.bo);
2414 if (r) 2414 if (r)
2415 goto error_free_sched_entity; 2415 goto error_free_sched_entity;
2416 2416
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 2af26d2da127..d702fb8e3427 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -34,7 +34,7 @@
34#include <linux/backlight.h> 34#include <linux/backlight.h>
35#include "bif/bif_4_1_d.h" 35#include "bif/bif_4_1_d.h"
36 36
37static u8 37u8
38amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev) 38amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
39{ 39{
40 u8 backlight_level; 40 u8 backlight_level;
@@ -48,7 +48,7 @@ amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
48 return backlight_level; 48 return backlight_level;
49} 49}
50 50
51static void 51void
52amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev, 52amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
53 u8 backlight_level) 53 u8 backlight_level)
54{ 54{
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
index 2bdec40515ce..f77cbdef679e 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
@@ -25,6 +25,11 @@
25#define __ATOMBIOS_ENCODER_H__ 25#define __ATOMBIOS_ENCODER_H__
26 26
27u8 27u8
28amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev);
29void
30amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
31 u8 backlight_level);
32u8
28amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder); 33amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder);
29void 34void
30amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder, 35amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index ddb814f7e952..98d1dd253596 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -65,6 +65,8 @@ MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
65#define VOLTAGE_VID_OFFSET_SCALE1 625 65#define VOLTAGE_VID_OFFSET_SCALE1 625
66#define VOLTAGE_VID_OFFSET_SCALE2 100 66#define VOLTAGE_VID_OFFSET_SCALE2 100
67 67
68static const struct amd_pm_funcs ci_dpm_funcs;
69
68static const struct ci_pt_defaults defaults_hawaii_xt = 70static const struct ci_pt_defaults defaults_hawaii_xt =
69{ 71{
70 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000, 72 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
@@ -6241,6 +6243,7 @@ static int ci_dpm_early_init(void *handle)
6241{ 6243{
6242 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 6244 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6243 6245
6246 adev->powerplay.pp_funcs = &ci_dpm_funcs;
6244 ci_dpm_set_irq_funcs(adev); 6247 ci_dpm_set_irq_funcs(adev);
6245 6248
6246 return 0; 6249 return 0;
@@ -6760,7 +6763,7 @@ static int ci_dpm_read_sensor(void *handle, int idx,
6760 } 6763 }
6761} 6764}
6762 6765
6763const struct amd_ip_funcs ci_dpm_ip_funcs = { 6766static const struct amd_ip_funcs ci_dpm_ip_funcs = {
6764 .name = "ci_dpm", 6767 .name = "ci_dpm",
6765 .early_init = ci_dpm_early_init, 6768 .early_init = ci_dpm_early_init,
6766 .late_init = ci_dpm_late_init, 6769 .late_init = ci_dpm_late_init,
@@ -6777,7 +6780,16 @@ const struct amd_ip_funcs ci_dpm_ip_funcs = {
6777 .set_powergating_state = ci_dpm_set_powergating_state, 6780 .set_powergating_state = ci_dpm_set_powergating_state,
6778}; 6781};
6779 6782
6780const struct amd_pm_funcs ci_dpm_funcs = { 6783const struct amdgpu_ip_block_version ci_smu_ip_block =
6784{
6785 .type = AMD_IP_BLOCK_TYPE_SMC,
6786 .major = 7,
6787 .minor = 0,
6788 .rev = 0,
6789 .funcs = &ci_dpm_ip_funcs,
6790};
6791
6792static const struct amd_pm_funcs ci_dpm_funcs = {
6781 .pre_set_power_state = &ci_dpm_pre_set_power_state, 6793 .pre_set_power_state = &ci_dpm_pre_set_power_state,
6782 .set_power_state = &ci_dpm_set_power_state, 6794 .set_power_state = &ci_dpm_set_power_state,
6783 .post_set_power_state = &ci_dpm_post_set_power_state, 6795 .post_set_power_state = &ci_dpm_post_set_power_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 4324184996a5..0df22030e713 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -67,7 +67,6 @@
67 67
68#include "amdgpu_dm.h" 68#include "amdgpu_dm.h"
69#include "amdgpu_amdkfd.h" 69#include "amdgpu_amdkfd.h"
70#include "amdgpu_powerplay.h"
71#include "dce_virtual.h" 70#include "dce_virtual.h"
72 71
73/* 72/*
@@ -1887,10 +1886,6 @@ static int cik_common_early_init(void *handle)
1887 return -EINVAL; 1886 return -EINVAL;
1888 } 1887 }
1889 1888
1890 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1891
1892 amdgpu_device_get_pcie_info(adev);
1893
1894 return 0; 1889 return 0;
1895} 1890}
1896 1891
@@ -2000,7 +1995,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
2000 amdgpu_device_ip_block_add(adev, &cik_common_ip_block); 1995 amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
2001 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); 1996 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
2002 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); 1997 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
2003 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1998 if (amdgpu_dpm == -1)
1999 amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
2000 else
2001 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2004 if (adev->enable_virtual_display) 2002 if (adev->enable_virtual_display)
2005 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2003 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
2006#if defined(CONFIG_DRM_AMD_DC) 2004#if defined(CONFIG_DRM_AMD_DC)
@@ -2018,7 +2016,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
2018 amdgpu_device_ip_block_add(adev, &cik_common_ip_block); 2016 amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
2019 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); 2017 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
2020 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); 2018 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
2021 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 2019 if (amdgpu_dpm == -1)
2020 amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
2021 else
2022 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2022 if (adev->enable_virtual_display) 2023 if (adev->enable_virtual_display)
2023 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2024 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
2024#if defined(CONFIG_DRM_AMD_DC) 2025#if defined(CONFIG_DRM_AMD_DC)
@@ -2036,7 +2037,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
2036 amdgpu_device_ip_block_add(adev, &cik_common_ip_block); 2037 amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
2037 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); 2038 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
2038 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); 2039 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
2039 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 2040 amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
2040 if (adev->enable_virtual_display) 2041 if (adev->enable_virtual_display)
2041 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2042 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
2042#if defined(CONFIG_DRM_AMD_DC) 2043#if defined(CONFIG_DRM_AMD_DC)
@@ -2055,7 +2056,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
2055 amdgpu_device_ip_block_add(adev, &cik_common_ip_block); 2056 amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
2056 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); 2057 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
2057 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); 2058 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
2058 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 2059 amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
2059 if (adev->enable_virtual_display) 2060 if (adev->enable_virtual_display)
2060 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2061 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
2061#if defined(CONFIG_DRM_AMD_DC) 2062#if defined(CONFIG_DRM_AMD_DC)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
index c7b4349f6319..2a086610f74d 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
@@ -24,8 +24,7 @@
24#ifndef __CIK_DPM_H__ 24#ifndef __CIK_DPM_H__
25#define __CIK_DPM_H__ 25#define __CIK_DPM_H__
26 26
27extern const struct amd_ip_funcs ci_dpm_ip_funcs; 27extern const struct amdgpu_ip_block_version ci_smu_ip_block;
28extern const struct amd_ip_funcs kv_dpm_ip_funcs; 28extern const struct amdgpu_ip_block_version kv_smu_ip_block;
29extern const struct amd_pm_funcs ci_dpm_funcs; 29
30extern const struct amd_pm_funcs kv_dpm_funcs;
31#endif 30#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 7ea900010702..452f88ea46a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2862,6 +2862,11 @@ static int dce_v10_0_hw_fini(void *handle)
2862 2862
2863static int dce_v10_0_suspend(void *handle) 2863static int dce_v10_0_suspend(void *handle)
2864{ 2864{
2865 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2866
2867 adev->mode_info.bl_level =
2868 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2869
2865 return dce_v10_0_hw_fini(handle); 2870 return dce_v10_0_hw_fini(handle);
2866} 2871}
2867 2872
@@ -2870,6 +2875,9 @@ static int dce_v10_0_resume(void *handle)
2870 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2875 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2871 int ret; 2876 int ret;
2872 2877
2878 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2879 adev->mode_info.bl_level);
2880
2873 ret = dce_v10_0_hw_init(handle); 2881 ret = dce_v10_0_hw_init(handle);
2874 2882
2875 /* turn on the BL */ 2883 /* turn on the BL */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 158b92ea435f..a7c1c584a191 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2988,6 +2988,11 @@ static int dce_v11_0_hw_fini(void *handle)
2988 2988
2989static int dce_v11_0_suspend(void *handle) 2989static int dce_v11_0_suspend(void *handle)
2990{ 2990{
2991 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2992
2993 adev->mode_info.bl_level =
2994 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2995
2991 return dce_v11_0_hw_fini(handle); 2996 return dce_v11_0_hw_fini(handle);
2992} 2997}
2993 2998
@@ -2996,6 +3001,9 @@ static int dce_v11_0_resume(void *handle)
2996 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3001 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2997 int ret; 3002 int ret;
2998 3003
3004 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
3005 adev->mode_info.bl_level);
3006
2999 ret = dce_v11_0_hw_init(handle); 3007 ret = dce_v11_0_hw_init(handle);
3000 3008
3001 /* turn on the BL */ 3009 /* turn on the BL */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index ee2162e81da9..9f67b7fd3487 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2730,6 +2730,11 @@ static int dce_v6_0_hw_fini(void *handle)
2730 2730
2731static int dce_v6_0_suspend(void *handle) 2731static int dce_v6_0_suspend(void *handle)
2732{ 2732{
2733 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2734
2735 adev->mode_info.bl_level =
2736 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2737
2733 return dce_v6_0_hw_fini(handle); 2738 return dce_v6_0_hw_fini(handle);
2734} 2739}
2735 2740
@@ -2738,6 +2743,9 @@ static int dce_v6_0_resume(void *handle)
2738 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2743 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2739 int ret; 2744 int ret;
2740 2745
2746 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2747 adev->mode_info.bl_level);
2748
2741 ret = dce_v6_0_hw_init(handle); 2749 ret = dce_v6_0_hw_init(handle);
2742 2750
2743 /* turn on the BL */ 2751 /* turn on the BL */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 8dbe97dff58c..f55422cbd77a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2760,6 +2760,11 @@ static int dce_v8_0_hw_fini(void *handle)
2760 2760
2761static int dce_v8_0_suspend(void *handle) 2761static int dce_v8_0_suspend(void *handle)
2762{ 2762{
2763 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2764
2765 adev->mode_info.bl_level =
2766 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2767
2763 return dce_v8_0_hw_fini(handle); 2768 return dce_v8_0_hw_fini(handle);
2764} 2769}
2765 2770
@@ -2768,6 +2773,9 @@ static int dce_v8_0_resume(void *handle)
2768 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2773 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2769 int ret; 2774 int ret;
2770 2775
2776 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2777 adev->mode_info.bl_level);
2778
2771 ret = dce_v8_0_hw_init(handle); 2779 ret = dce_v8_0_hw_init(handle);
2772 2780
2773 /* turn on the BL */ 2781 /* turn on the BL */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index d73bbb092202..d1d2c27156b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1261,23 +1261,23 @@ static int gfx_v9_0_sw_init(void *handle)
1261 adev->gfx.mec.num_queue_per_pipe = 8; 1261 adev->gfx.mec.num_queue_per_pipe = 8;
1262 1262
1263 /* KIQ event */ 1263 /* KIQ event */
1264 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq); 1264 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
1265 if (r) 1265 if (r)
1266 return r; 1266 return r;
1267 1267
1268 /* EOP Event */ 1268 /* EOP Event */
1269 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq); 1269 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
1270 if (r) 1270 if (r)
1271 return r; 1271 return r;
1272 1272
1273 /* Privileged reg */ 1273 /* Privileged reg */
1274 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 184, 1274 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 184,
1275 &adev->gfx.priv_reg_irq); 1275 &adev->gfx.priv_reg_irq);
1276 if (r) 1276 if (r)
1277 return r; 1277 return r;
1278 1278
1279 /* Privileged inst */ 1279 /* Privileged inst */
1280 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 185, 1280 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 185,
1281 &adev->gfx.priv_inst_irq); 1281 &adev->gfx.priv_inst_irq);
1282 if (r) 1282 if (r)
1283 return r; 1283 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 67cd1fe17649..a70cbc45c4c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -836,9 +836,9 @@ static int gmc_v9_0_sw_init(void *handle)
836 836
837 spin_lock_init(&adev->gmc.invalidate_lock); 837 spin_lock_init(&adev->gmc.invalidate_lock);
838 838
839 adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
839 switch (adev->asic_type) { 840 switch (adev->asic_type) {
840 case CHIP_RAVEN: 841 case CHIP_RAVEN:
841 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
842 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { 842 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
843 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); 843 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
844 } else { 844 } else {
@@ -849,8 +849,6 @@ static int gmc_v9_0_sw_init(void *handle)
849 } 849 }
850 break; 850 break;
851 case CHIP_VEGA10: 851 case CHIP_VEGA10:
852 /* XXX Don't know how to get VRAM type yet. */
853 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
854 /* 852 /*
855 * To fulfill 4-level page support, 853 * To fulfill 4-level page support,
856 * vm size is 256TB (48bit), maximum size of Vega10, 854 * vm size is 256TB (48bit), maximum size of Vega10,
@@ -863,9 +861,9 @@ static int gmc_v9_0_sw_init(void *handle)
863 } 861 }
864 862
865 /* This interrupt is VMC page fault.*/ 863 /* This interrupt is VMC page fault.*/
866 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0, 864 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, 0,
867 &adev->gmc.vm_fault); 865 &adev->gmc.vm_fault);
868 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UTCL2, 0, 866 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, 0,
869 &adev->gmc.vm_fault); 867 &adev->gmc.vm_fault);
870 868
871 if (r) 869 if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 8766681cfd3f..81babe026529 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -42,6 +42,8 @@
42#define KV_MINIMUM_ENGINE_CLOCK 800 42#define KV_MINIMUM_ENGINE_CLOCK 800
43#define SMC_RAM_END 0x40000 43#define SMC_RAM_END 0x40000
44 44
45static const struct amd_pm_funcs kv_dpm_funcs;
46
45static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev); 47static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev);
46static int kv_enable_nb_dpm(struct amdgpu_device *adev, 48static int kv_enable_nb_dpm(struct amdgpu_device *adev,
47 bool enable); 49 bool enable);
@@ -2960,6 +2962,7 @@ static int kv_dpm_early_init(void *handle)
2960{ 2962{
2961 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2963 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2962 2964
2965 adev->powerplay.pp_funcs = &kv_dpm_funcs;
2963 kv_dpm_set_irq_funcs(adev); 2966 kv_dpm_set_irq_funcs(adev);
2964 2967
2965 return 0; 2968 return 0;
@@ -3301,7 +3304,7 @@ static int kv_dpm_read_sensor(void *handle, int idx,
3301 } 3304 }
3302} 3305}
3303 3306
3304const struct amd_ip_funcs kv_dpm_ip_funcs = { 3307static const struct amd_ip_funcs kv_dpm_ip_funcs = {
3305 .name = "kv_dpm", 3308 .name = "kv_dpm",
3306 .early_init = kv_dpm_early_init, 3309 .early_init = kv_dpm_early_init,
3307 .late_init = kv_dpm_late_init, 3310 .late_init = kv_dpm_late_init,
@@ -3318,7 +3321,16 @@ const struct amd_ip_funcs kv_dpm_ip_funcs = {
3318 .set_powergating_state = kv_dpm_set_powergating_state, 3321 .set_powergating_state = kv_dpm_set_powergating_state,
3319}; 3322};
3320 3323
3321const struct amd_pm_funcs kv_dpm_funcs = { 3324const struct amdgpu_ip_block_version kv_smu_ip_block =
3325{
3326 .type = AMD_IP_BLOCK_TYPE_SMC,
3327 .major = 1,
3328 .minor = 0,
3329 .rev = 0,
3330 .funcs = &kv_dpm_ip_funcs,
3331};
3332
3333static const struct amd_pm_funcs kv_dpm_funcs = {
3322 .pre_set_power_state = &kv_dpm_pre_set_power_state, 3334 .pre_set_power_state = &kv_dpm_pre_set_power_state,
3323 .set_power_state = &kv_dpm_set_power_state, 3335 .set_power_state = &kv_dpm_set_power_state,
3324 .post_set_power_state = &kv_dpm_post_set_power_state, 3336 .post_set_power_state = &kv_dpm_post_set_power_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 271452d3999a..8fb933c62cf5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -33,56 +33,34 @@
33 33
34static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev) 34static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
35{ 35{
36 u32 reg; 36 WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
37 int timeout = AI_MAILBOX_TIMEDOUT;
38 u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID);
39
40 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
41 mmBIF_BX_PF0_MAILBOX_CONTROL));
42 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_ACK, 1);
43 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
44 mmBIF_BX_PF0_MAILBOX_CONTROL), reg);
45
46 /*Wait for RCV_MSG_VALID to be 0*/
47 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
48 mmBIF_BX_PF0_MAILBOX_CONTROL));
49 while (reg & mask) {
50 if (timeout <= 0) {
51 pr_err("RCV_MSG_VALID is not cleared\n");
52 break;
53 }
54 mdelay(1);
55 timeout -=1;
56
57 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
58 mmBIF_BX_PF0_MAILBOX_CONTROL));
59 }
60} 37}
61 38
62static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val) 39static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
63{ 40{
64 u32 reg; 41 WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
42}
65 43
66 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 44/*
67 mmBIF_BX_PF0_MAILBOX_CONTROL)); 45 * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
68 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL, 46 * RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1
69 TRN_MSG_VALID, val ? 1 : 0); 47 * by host.
70 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL), 48 *
71 reg); 49 * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
50 * correct value since it doesn't return the RCV_DW0 under the case that
51 * RCV_MSG_VALID is set by host.
52 */
53static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev)
54{
55 return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
56 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
72} 57}
73 58
59
74static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev, 60static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
75 enum idh_event event) 61 enum idh_event event)
76{ 62{
77 u32 reg; 63 u32 reg;
78 u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID);
79
80 if (event != IDH_FLR_NOTIFICATION_CMPL) {
81 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
82 mmBIF_BX_PF0_MAILBOX_CONTROL));
83 if (!(reg & mask))
84 return -ENOENT;
85 }
86 64
87 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 65 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
88 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0)); 66 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
@@ -94,54 +72,67 @@ static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
94 return 0; 72 return 0;
95} 73}
96 74
75static uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) {
76 return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
77}
78
97static int xgpu_ai_poll_ack(struct amdgpu_device *adev) 79static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
98{ 80{
99 int r = 0, timeout = AI_MAILBOX_TIMEDOUT; 81 int timeout = AI_MAILBOX_POLL_ACK_TIMEDOUT;
100 u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, TRN_MSG_ACK); 82 u8 reg;
101 u32 reg; 83
84 do {
85 reg = RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
86 if (reg & 2)
87 return 0;
102 88
103 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
104 mmBIF_BX_PF0_MAILBOX_CONTROL));
105 while (!(reg & mask)) {
106 if (timeout <= 0) {
107 pr_err("Doesn't get ack from pf.\n");
108 r = -ETIME;
109 break;
110 }
111 mdelay(5); 89 mdelay(5);
112 timeout -= 5; 90 timeout -= 5;
91 } while (timeout > 1);
113 92
114 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 93 pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT);
115 mmBIF_BX_PF0_MAILBOX_CONTROL));
116 }
117 94
118 return r; 95 return -ETIME;
119} 96}
120 97
121static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event) 98static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
122{ 99{
123 int r = 0, timeout = AI_MAILBOX_TIMEDOUT; 100 int r, timeout = AI_MAILBOX_POLL_MSG_TIMEDOUT;
124
125 r = xgpu_ai_mailbox_rcv_msg(adev, event);
126 while (r) {
127 if (timeout <= 0) {
128 pr_err("Doesn't get msg:%d from pf.\n", event);
129 r = -ETIME;
130 break;
131 }
132 mdelay(5);
133 timeout -= 5;
134 101
102 do {
135 r = xgpu_ai_mailbox_rcv_msg(adev, event); 103 r = xgpu_ai_mailbox_rcv_msg(adev, event);
136 } 104 if (!r)
105 return 0;
137 106
138 return r; 107 msleep(10);
108 timeout -= 10;
109 } while (timeout > 1);
110
111 pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
112
113 return -ETIME;
139} 114}
140 115
141static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev, 116static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
142 enum idh_request req, u32 data1, u32 data2, u32 data3) { 117 enum idh_request req, u32 data1, u32 data2, u32 data3) {
143 u32 reg; 118 u32 reg;
144 int r; 119 int r;
120 uint8_t trn;
121
122 /* IMPORTANT:
123 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
124 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
125 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack()
126 * will return immediatly
127 */
128 do {
129 xgpu_ai_mailbox_set_valid(adev, false);
130 trn = xgpu_ai_peek_ack(adev);
131 if (trn) {
132 pr_err("trn=%x ACK should not asssert! wait again !\n", trn);
133 msleep(1);
134 }
135 } while(trn);
145 136
146 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 137 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
147 mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0)); 138 mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
@@ -245,15 +236,36 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
245{ 236{
246 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); 237 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
247 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); 238 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
248 239 int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
249 /* wait until RCV_MSG become 3 */ 240 int locked;
250 if (xgpu_ai_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) { 241
251 pr_err("failed to recieve FLR_CMPL\n"); 242 /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
252 return; 243 * otherwise the mailbox msg will be ruined/reseted by
253 } 244 * the VF FLR.
254 245 *
255 /* Trigger recovery due to world switch failure */ 246 * we can unlock the lock_reset to allow "amdgpu_job_timedout"
256 amdgpu_device_gpu_recover(adev, NULL, false); 247 * to run gpu_recover() after FLR_NOTIFICATION_CMPL received
248 * which means host side had finished this VF's FLR.
249 */
250 locked = mutex_trylock(&adev->lock_reset);
251 if (locked)
252 adev->in_gpu_reset = 1;
253
254 do {
255 if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
256 goto flr_done;
257
258 msleep(10);
259 timeout -= 10;
260 } while (timeout > 1);
261
262flr_done:
263 if (locked)
264 mutex_unlock(&adev->lock_reset);
265
266 /* Trigger recovery for world switch failure if no TDR */
267 if (amdgpu_lockup_timeout == 0)
268 amdgpu_device_gpu_recover(adev, NULL, true);
257} 269}
258 270
259static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev, 271static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
@@ -274,24 +286,22 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
274 struct amdgpu_irq_src *source, 286 struct amdgpu_irq_src *source,
275 struct amdgpu_iv_entry *entry) 287 struct amdgpu_iv_entry *entry)
276{ 288{
277 int r; 289 enum idh_event event = xgpu_ai_mailbox_peek_msg(adev);
278 290
279 /* trigger gpu-reset by hypervisor only if TDR disbaled */ 291 switch (event) {
280 if (!amdgpu_gpu_recovery) { 292 case IDH_FLR_NOTIFICATION:
281 /* see what event we get */ 293 if (amdgpu_sriov_runtime(adev))
282 r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION); 294 schedule_work(&adev->virt.flr_work);
283 295 break;
284 /* sometimes the interrupt is delayed to inject to VM, so under such case 296 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
285 * the IDH_FLR_NOTIFICATION is overwritten by VF FLR from GIM side, thus 297 * it byfar since that polling thread will handle it,
286 * above recieve message could be failed, we should schedule the flr_work 298 * other msg like flr complete is not handled here.
287 * anyway
288 */ 299 */
289 if (r) { 300 case IDH_CLR_MSG_BUF:
290 DRM_ERROR("FLR_NOTIFICATION is missed\n"); 301 case IDH_FLR_NOTIFICATION_CMPL:
291 xgpu_ai_mailbox_send_ack(adev); 302 case IDH_READY_TO_ACCESS_GPU:
292 } 303 default:
293 304 break;
294 schedule_work(&adev->virt.flr_work);
295 } 305 }
296 306
297 return 0; 307 return 0;
@@ -319,11 +329,11 @@ int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
319{ 329{
320 int r; 330 int r;
321 331
322 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq); 332 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
323 if (r) 333 if (r)
324 return r; 334 return r;
325 335
326 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq); 336 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
327 if (r) { 337 if (r) {
328 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 338 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
329 return r; 339 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index 67e78576a9eb..b4a9ceea334b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -24,7 +24,9 @@
24#ifndef __MXGPU_AI_H__ 24#ifndef __MXGPU_AI_H__
25#define __MXGPU_AI_H__ 25#define __MXGPU_AI_H__
26 26
27#define AI_MAILBOX_TIMEDOUT 12000 27#define AI_MAILBOX_POLL_ACK_TIMEDOUT 500
28#define AI_MAILBOX_POLL_MSG_TIMEDOUT 12000
29#define AI_MAILBOX_POLL_FLR_TIMEDOUT 500
28 30
29enum idh_request { 31enum idh_request {
30 IDH_REQ_GPU_INIT_ACCESS = 1, 32 IDH_REQ_GPU_INIT_ACCESS = 1,
@@ -51,4 +53,7 @@ int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev);
51int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev); 53int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev);
52void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev); 54void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev);
53 55
56#define AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL) * 4
57#define AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL) * 4 + 1
58
54#endif 59#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 215743df0957..9448c45d1b60 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -31,8 +31,6 @@
31#include "sdma0/sdma0_4_0_sh_mask.h" 31#include "sdma0/sdma0_4_0_sh_mask.h"
32#include "sdma1/sdma1_4_0_offset.h" 32#include "sdma1/sdma1_4_0_offset.h"
33#include "sdma1/sdma1_4_0_sh_mask.h" 33#include "sdma1/sdma1_4_0_sh_mask.h"
34#include "mmhub/mmhub_1_0_offset.h"
35#include "mmhub/mmhub_1_0_sh_mask.h"
36#include "hdp/hdp_4_0_offset.h" 34#include "hdp/hdp_4_0_offset.h"
37#include "sdma0/sdma0_4_1_default.h" 35#include "sdma0/sdma0_4_1_default.h"
38 36
@@ -1172,13 +1170,13 @@ static int sdma_v4_0_sw_init(void *handle)
1172 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1170 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1173 1171
1174 /* SDMA trap event */ 1172 /* SDMA trap event */
1175 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_SDMA0, 224, 1173 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, 224,
1176 &adev->sdma.trap_irq); 1174 &adev->sdma.trap_irq);
1177 if (r) 1175 if (r)
1178 return r; 1176 return r;
1179 1177
1180 /* SDMA trap event */ 1178 /* SDMA trap event */
1181 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_SDMA1, 224, 1179 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, 224,
1182 &adev->sdma.trap_irq); 1180 &adev->sdma.trap_irq);
1183 if (r) 1181 if (r)
1184 return r; 1182 return r;
@@ -1333,7 +1331,7 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
1333{ 1331{
1334 DRM_DEBUG("IH: SDMA trap\n"); 1332 DRM_DEBUG("IH: SDMA trap\n");
1335 switch (entry->client_id) { 1333 switch (entry->client_id) {
1336 case AMDGPU_IH_CLIENTID_SDMA0: 1334 case SOC15_IH_CLIENTID_SDMA0:
1337 switch (entry->ring_id) { 1335 switch (entry->ring_id) {
1338 case 0: 1336 case 0:
1339 amdgpu_fence_process(&adev->sdma.instance[0].ring); 1337 amdgpu_fence_process(&adev->sdma.instance[0].ring);
@@ -1349,7 +1347,7 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
1349 break; 1347 break;
1350 } 1348 }
1351 break; 1349 break;
1352 case AMDGPU_IH_CLIENTID_SDMA1: 1350 case SOC15_IH_CLIENTID_SDMA1:
1353 switch (entry->ring_id) { 1351 switch (entry->ring_id) {
1354 case 0: 1352 case 0:
1355 amdgpu_fence_process(&adev->sdma.instance[1].ring); 1353 amdgpu_fence_process(&adev->sdma.instance[1].ring);
@@ -1399,7 +1397,7 @@ static void sdma_v4_0_update_medium_grain_clock_gating(
1399 if (def != data) 1397 if (def != data)
1400 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), data); 1398 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), data);
1401 1399
1402 if (adev->asic_type == CHIP_VEGA10) { 1400 if (adev->sdma.num_instances > 1) {
1403 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL)); 1401 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL));
1404 data &= ~(SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK | 1402 data &= ~(SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1405 SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK | 1403 SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
@@ -1427,7 +1425,7 @@ static void sdma_v4_0_update_medium_grain_clock_gating(
1427 if (def != data) 1425 if (def != data)
1428 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), data); 1426 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), data);
1429 1427
1430 if (adev->asic_type == CHIP_VEGA10) { 1428 if (adev->sdma.num_instances > 1) {
1431 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL)); 1429 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL));
1432 data |= (SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK | 1430 data |= (SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1433 SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK | 1431 SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
@@ -1458,7 +1456,7 @@ static void sdma_v4_0_update_medium_grain_light_sleep(
1458 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data); 1456 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
1459 1457
1460 /* 1-not override: enable sdma1 mem light sleep */ 1458 /* 1-not override: enable sdma1 mem light sleep */
1461 if (adev->asic_type == CHIP_VEGA10) { 1459 if (adev->sdma.num_instances > 1) {
1462 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL)); 1460 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL));
1463 data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; 1461 data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1464 if (def != data) 1462 if (def != data)
@@ -1472,7 +1470,7 @@ static void sdma_v4_0_update_medium_grain_light_sleep(
1472 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data); 1470 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
1473 1471
1474 /* 0-override:disable sdma1 mem light sleep */ 1472 /* 0-override:disable sdma1 mem light sleep */
1475 if (adev->asic_type == CHIP_VEGA10) { 1473 if (adev->sdma.num_instances > 1) {
1476 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL)); 1474 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL));
1477 data &= ~SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; 1475 data &= ~SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1478 if (def != data) 1476 if (def != data)
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 6e61b56bfbfc..b154667a8fd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -32,7 +32,7 @@
32#include "amdgpu_vce.h" 32#include "amdgpu_vce.h"
33#include "atom.h" 33#include "atom.h"
34#include "amd_pcie.h" 34#include "amd_pcie.h"
35#include "amdgpu_powerplay.h" 35#include "si_dpm.h"
36#include "sid.h" 36#include "sid.h"
37#include "si_ih.h" 37#include "si_ih.h"
38#include "gfx_v6_0.h" 38#include "gfx_v6_0.h"
@@ -1983,7 +1983,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
1983 amdgpu_device_ip_block_add(adev, &si_common_ip_block); 1983 amdgpu_device_ip_block_add(adev, &si_common_ip_block);
1984 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block); 1984 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
1985 amdgpu_device_ip_block_add(adev, &si_ih_ip_block); 1985 amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
1986 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1986 amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
1987 if (adev->enable_virtual_display) 1987 if (adev->enable_virtual_display)
1988 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1988 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1989 else 1989 else
@@ -1997,7 +1997,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
1997 amdgpu_device_ip_block_add(adev, &si_common_ip_block); 1997 amdgpu_device_ip_block_add(adev, &si_common_ip_block);
1998 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block); 1998 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
1999 amdgpu_device_ip_block_add(adev, &si_ih_ip_block); 1999 amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
2000 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 2000 amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
2001 if (adev->enable_virtual_display) 2001 if (adev->enable_virtual_display)
2002 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2002 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
2003 else 2003 else
@@ -2011,7 +2011,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
2011 amdgpu_device_ip_block_add(adev, &si_common_ip_block); 2011 amdgpu_device_ip_block_add(adev, &si_common_ip_block);
2012 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block); 2012 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
2013 amdgpu_device_ip_block_add(adev, &si_ih_ip_block); 2013 amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
2014 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 2014 amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
2015 if (adev->enable_virtual_display) 2015 if (adev->enable_virtual_display)
2016 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2016 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
2017 amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block); 2017 amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 8137c02fd16a..3bfcf0d257ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -67,6 +67,8 @@ MODULE_FIRMWARE("radeon/hainan_smc.bin");
67MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); 67MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
68MODULE_FIRMWARE("radeon/banks_k_2_smc.bin"); 68MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
69 69
70static const struct amd_pm_funcs si_dpm_funcs;
71
70union power_info { 72union power_info {
71 struct _ATOM_POWERPLAY_INFO info; 73 struct _ATOM_POWERPLAY_INFO info;
72 struct _ATOM_POWERPLAY_INFO_V2 info_2; 74 struct _ATOM_POWERPLAY_INFO_V2 info_2;
@@ -7914,6 +7916,7 @@ static int si_dpm_early_init(void *handle)
7914 7916
7915 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 7917 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7916 7918
7919 adev->powerplay.pp_funcs = &si_dpm_funcs;
7917 si_dpm_set_irq_funcs(adev); 7920 si_dpm_set_irq_funcs(adev);
7918 return 0; 7921 return 0;
7919} 7922}
@@ -8014,7 +8017,7 @@ static int si_dpm_read_sensor(void *handle, int idx,
8014 } 8017 }
8015} 8018}
8016 8019
8017const struct amd_ip_funcs si_dpm_ip_funcs = { 8020static const struct amd_ip_funcs si_dpm_ip_funcs = {
8018 .name = "si_dpm", 8021 .name = "si_dpm",
8019 .early_init = si_dpm_early_init, 8022 .early_init = si_dpm_early_init,
8020 .late_init = si_dpm_late_init, 8023 .late_init = si_dpm_late_init,
@@ -8031,7 +8034,16 @@ const struct amd_ip_funcs si_dpm_ip_funcs = {
8031 .set_powergating_state = si_dpm_set_powergating_state, 8034 .set_powergating_state = si_dpm_set_powergating_state,
8032}; 8035};
8033 8036
8034const struct amd_pm_funcs si_dpm_funcs = { 8037const struct amdgpu_ip_block_version si_smu_ip_block =
8038{
8039 .type = AMD_IP_BLOCK_TYPE_SMC,
8040 .major = 6,
8041 .minor = 0,
8042 .rev = 0,
8043 .funcs = &si_dpm_ip_funcs,
8044};
8045
8046static const struct amd_pm_funcs si_dpm_funcs = {
8035 .pre_set_power_state = &si_dpm_pre_set_power_state, 8047 .pre_set_power_state = &si_dpm_pre_set_power_state,
8036 .set_power_state = &si_dpm_set_power_state, 8048 .set_power_state = &si_dpm_set_power_state,
8037 .post_set_power_state = &si_dpm_post_set_power_state, 8049 .post_set_power_state = &si_dpm_post_set_power_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.h b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
index 9fe343de3477..6b7d292b919f 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
@@ -245,8 +245,7 @@ enum si_display_gap
245 SI_PM_DISPLAY_GAP_IGNORE = 3, 245 SI_PM_DISPLAY_GAP_IGNORE = 3,
246}; 246};
247 247
248extern const struct amd_ip_funcs si_dpm_ip_funcs; 248extern const struct amdgpu_ip_block_version si_smu_ip_block;
249extern const struct amd_pm_funcs si_dpm_funcs;
250 249
251struct ni_leakage_coeffients 250struct ni_leakage_coeffients
252{ 251{
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 8dc8b72ed49b..c6e857325b58 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -57,7 +57,6 @@
57#include "uvd_v7_0.h" 57#include "uvd_v7_0.h"
58#include "vce_v4_0.h" 58#include "vce_v4_0.h"
59#include "vcn_v1_0.h" 59#include "vcn_v1_0.h"
60#include "amdgpu_powerplay.h"
61#include "dce_virtual.h" 60#include "dce_virtual.h"
62#include "mxgpu_ai.h" 61#include "mxgpu_ai.h"
63 62
@@ -531,10 +530,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
531 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 530 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
532 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 531 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
533 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 532 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
534 if (amdgpu_fw_load_type == 2 || amdgpu_fw_load_type == -1) 533 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
535 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
536 if (!amdgpu_sriov_vf(adev)) 534 if (!amdgpu_sriov_vf(adev))
537 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 535 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
538 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 536 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
539 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 537 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
540#if defined(CONFIG_DRM_AMD_DC) 538#if defined(CONFIG_DRM_AMD_DC)
@@ -553,7 +551,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
553 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 551 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
554 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 552 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
555 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); 553 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
556 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 554 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
557 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 555 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
558 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 556 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
559#if defined(CONFIG_DRM_AMD_DC) 557#if defined(CONFIG_DRM_AMD_DC)
@@ -692,10 +690,6 @@ static int soc15_common_early_init(void *handle)
692 xgpu_ai_mailbox_set_irq_funcs(adev); 690 xgpu_ai_mailbox_set_irq_funcs(adev);
693 } 691 }
694 692
695 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
696
697 amdgpu_device_get_pcie_info(adev);
698
699 return 0; 693 return 0;
700} 694}
701 695
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index e54cc3ca2303..eddc57f3b72a 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -390,13 +390,13 @@ static int uvd_v7_0_sw_init(void *handle)
390 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 390 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
391 391
392 /* UVD TRAP */ 392 /* UVD TRAP */
393 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UVD, 124, &adev->uvd.irq); 393 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.irq);
394 if (r) 394 if (r)
395 return r; 395 return r;
396 396
397 /* UVD ENC TRAP */ 397 /* UVD ENC TRAP */
398 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { 398 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
399 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UVD, i + 119, &adev->uvd.irq); 399 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.irq);
400 if (r) 400 if (r)
401 return r; 401 return r;
402 } 402 }
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 2329b310ccf2..73fd48d6c756 100755
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -420,7 +420,7 @@ static int vce_v4_0_sw_init(void *handle)
420 unsigned size; 420 unsigned size;
421 int r, i; 421 int r, i;
422 422
423 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCE0, 167, &adev->vce.irq); 423 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCE0, 167, &adev->vce.irq);
424 if (r) 424 if (r)
425 return r; 425 return r;
426 426
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index fdf4ac9313cf..8c132673bc79 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -75,13 +75,13 @@ static int vcn_v1_0_sw_init(void *handle)
75 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 75 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
76 76
77 /* VCN DEC TRAP */ 77 /* VCN DEC TRAP */
78 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCN, 124, &adev->vcn.irq); 78 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 124, &adev->vcn.irq);
79 if (r) 79 if (r)
80 return r; 80 return r;
81 81
82 /* VCN ENC TRAP */ 82 /* VCN ENC TRAP */
83 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 83 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
84 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCN, i + 119, 84 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + 119,
85 &adev->vcn.irq); 85 &adev->vcn.irq);
86 if (r) 86 if (r)
87 return r; 87 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index cc8ce7e352a8..5ae5ed2e62d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -245,8 +245,8 @@ static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
245 * some faults get cleared. 245 * some faults get cleared.
246 */ 246 */
247 switch (dw0 & 0xff) { 247 switch (dw0 & 0xff) {
248 case AMDGPU_IH_CLIENTID_VMC: 248 case SOC15_IH_CLIENTID_VMC:
249 case AMDGPU_IH_CLIENTID_UTCL2: 249 case SOC15_IH_CLIENTID_UTCL2:
250 break; 250 break;
251 default: 251 default:
252 /* Not a VM fault */ 252 /* Not a VM fault */
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 61360a1552d8..e7fb165cc9db 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -71,7 +71,6 @@
71#include "uvd_v5_0.h" 71#include "uvd_v5_0.h"
72#include "uvd_v6_0.h" 72#include "uvd_v6_0.h"
73#include "vce_v3_0.h" 73#include "vce_v3_0.h"
74#include "amdgpu_powerplay.h"
75#if defined(CONFIG_DRM_AMD_ACP) 74#if defined(CONFIG_DRM_AMD_ACP)
76#include "amdgpu_acp.h" 75#include "amdgpu_acp.h"
77#endif 76#endif
@@ -1097,11 +1096,6 @@ static int vi_common_early_init(void *handle)
1097 xgpu_vi_mailbox_set_irq_funcs(adev); 1096 xgpu_vi_mailbox_set_irq_funcs(adev);
1098 } 1097 }
1099 1098
1100 /* vi use smc load by default */
1101 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1102
1103 amdgpu_device_get_pcie_info(adev);
1104
1105 return 0; 1099 return 0;
1106} 1100}
1107 1101
@@ -1516,7 +1510,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
1516 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1510 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1517 amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block); 1511 amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
1518 amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block); 1512 amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
1519 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1513 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1520 if (adev->enable_virtual_display) 1514 if (adev->enable_virtual_display)
1521 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1515 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1522 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1516 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1526,7 +1520,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
1526 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1520 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1527 amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block); 1521 amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
1528 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1522 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1529 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1523 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1530 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1524 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1531 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1525 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1532#if defined(CONFIG_DRM_AMD_DC) 1526#if defined(CONFIG_DRM_AMD_DC)
@@ -1546,7 +1540,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
1546 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1540 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1547 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1541 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1548 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1542 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1549 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1543 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1550 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1544 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1551 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1545 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1552#if defined(CONFIG_DRM_AMD_DC) 1546#if defined(CONFIG_DRM_AMD_DC)
@@ -1568,7 +1562,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
1568 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1562 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1569 amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block); 1563 amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
1570 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1564 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1571 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1565 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1572 if (adev->enable_virtual_display) 1566 if (adev->enable_virtual_display)
1573 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1567 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1574#if defined(CONFIG_DRM_AMD_DC) 1568#if defined(CONFIG_DRM_AMD_DC)
@@ -1586,7 +1580,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
1586 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1580 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1587 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1581 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1588 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 1582 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1589 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1583 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1590 if (adev->enable_virtual_display) 1584 if (adev->enable_virtual_display)
1591 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1585 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1592#if defined(CONFIG_DRM_AMD_DC) 1586#if defined(CONFIG_DRM_AMD_DC)
@@ -1607,7 +1601,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
1607 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1601 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1608 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1602 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1609 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 1603 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1610 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1604 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1611 if (adev->enable_virtual_display) 1605 if (adev->enable_virtual_display)
1612 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1606 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1613#if defined(CONFIG_DRM_AMD_DC) 1607#if defined(CONFIG_DRM_AMD_DC)
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index ec3285f65517..5b124a67404c 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -11,7 +11,7 @@ config DRM_AMD_DC
11 11
12config DRM_AMD_DC_PRE_VEGA 12config DRM_AMD_DC_PRE_VEGA
13 bool "DC support for Polaris and older ASICs" 13 bool "DC support for Polaris and older ASICs"
14 default n 14 default y
15 help 15 help
16 Choose this option to enable the new DC support for older asics 16 Choose this option to enable the new DC support for older asics
17 by default. This includes Polaris, Carrizo, Tonga, Bonaire, 17 by default. This includes Polaris, Carrizo, Tonga, Bonaire,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 7e5c5c9eeb4f..ae512ecb65ee 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1131,7 +1131,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1131 1131
1132 if (adev->asic_type == CHIP_VEGA10 || 1132 if (adev->asic_type == CHIP_VEGA10 ||
1133 adev->asic_type == CHIP_RAVEN) 1133 adev->asic_type == CHIP_RAVEN)
1134 client_id = AMDGPU_IH_CLIENTID_DCE; 1134 client_id = SOC15_IH_CLIENTID_DCE;
1135 1135
1136 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 1136 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1137 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 1137 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
@@ -1231,7 +1231,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1231 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP; 1231 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1232 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1; 1232 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1233 i++) { 1233 i++) {
1234 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq); 1234 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1235 1235
1236 if (r) { 1236 if (r) {
1237 DRM_ERROR("Failed to add crtc irq id!\n"); 1237 DRM_ERROR("Failed to add crtc irq id!\n");
@@ -1255,7 +1255,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1255 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; 1255 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1256 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1; 1256 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1257 i++) { 1257 i++) {
1258 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq); 1258 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1259 if (r) { 1259 if (r) {
1260 DRM_ERROR("Failed to add page flip irq id!\n"); 1260 DRM_ERROR("Failed to add page flip irq id!\n");
1261 return r; 1261 return r;
@@ -1276,7 +1276,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1276 } 1276 }
1277 1277
1278 /* HPD */ 1278 /* HPD */
1279 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT, 1279 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1280 &adev->hpd_irq); 1280 &adev->hpd_irq);
1281 if (r) { 1281 if (r) {
1282 DRM_ERROR("Failed to add hpd irq id!\n"); 1282 DRM_ERROR("Failed to add hpd irq id!\n");
@@ -1365,6 +1365,43 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1365 1365
1366#endif 1366#endif
1367 1367
1368static int initialize_plane(struct amdgpu_display_manager *dm,
1369 struct amdgpu_mode_info *mode_info,
1370 int plane_id)
1371{
1372 struct amdgpu_plane *plane;
1373 unsigned long possible_crtcs;
1374 int ret = 0;
1375
1376 plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
1377 mode_info->planes[plane_id] = plane;
1378
1379 if (!plane) {
1380 DRM_ERROR("KMS: Failed to allocate plane\n");
1381 return -ENOMEM;
1382 }
1383 plane->base.type = mode_info->plane_type[plane_id];
1384
1385 /*
1386 * HACK: IGT tests expect that each plane can only have one
1387 * one possible CRTC. For now, set one CRTC for each
1388 * plane that is not an underlay, but still allow multiple
1389 * CRTCs for underlay planes.
1390 */
1391 possible_crtcs = 1 << plane_id;
1392 if (plane_id >= dm->dc->caps.max_streams)
1393 possible_crtcs = 0xff;
1394
1395 ret = amdgpu_dm_plane_init(dm, mode_info->planes[plane_id], possible_crtcs);
1396
1397 if (ret) {
1398 DRM_ERROR("KMS: Failed to initialize plane\n");
1399 return ret;
1400 }
1401
1402 return ret;
1403}
1404
1368/* In this architecture, the association 1405/* In this architecture, the association
1369 * connector -> encoder -> crtc 1406 * connector -> encoder -> crtc
1370 * id not really requried. The crtc and connector will hold the 1407 * id not really requried. The crtc and connector will hold the
@@ -1375,12 +1412,12 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1375static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) 1412static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1376{ 1413{
1377 struct amdgpu_display_manager *dm = &adev->dm; 1414 struct amdgpu_display_manager *dm = &adev->dm;
1378 uint32_t i; 1415 int32_t i;
1379 struct amdgpu_dm_connector *aconnector = NULL; 1416 struct amdgpu_dm_connector *aconnector = NULL;
1380 struct amdgpu_encoder *aencoder = NULL; 1417 struct amdgpu_encoder *aencoder = NULL;
1381 struct amdgpu_mode_info *mode_info = &adev->mode_info; 1418 struct amdgpu_mode_info *mode_info = &adev->mode_info;
1382 uint32_t link_cnt; 1419 uint32_t link_cnt;
1383 unsigned long possible_crtcs; 1420 int32_t total_overlay_planes, total_primary_planes;
1384 1421
1385 link_cnt = dm->dc->caps.max_links; 1422 link_cnt = dm->dc->caps.max_links;
1386 if (amdgpu_dm_mode_config_init(dm->adev)) { 1423 if (amdgpu_dm_mode_config_init(dm->adev)) {
@@ -1388,30 +1425,22 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1388 return -1; 1425 return -1;
1389 } 1426 }
1390 1427
1391 for (i = 0; i < dm->dc->caps.max_planes; i++) { 1428 /* Identify the number of planes to be initialized */
1392 struct amdgpu_plane *plane; 1429 total_overlay_planes = dm->dc->caps.max_slave_planes;
1430 total_primary_planes = dm->dc->caps.max_planes - dm->dc->caps.max_slave_planes;
1393 1431
1394 plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL); 1432 /* First initialize overlay planes, index starting after primary planes */
1395 mode_info->planes[i] = plane; 1433 for (i = (total_overlay_planes - 1); i >= 0; i--) {
1396 1434 if (initialize_plane(dm, mode_info, (total_primary_planes + i))) {
1397 if (!plane) { 1435 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
1398 DRM_ERROR("KMS: Failed to allocate plane\n");
1399 goto fail; 1436 goto fail;
1400 } 1437 }
1401 plane->base.type = mode_info->plane_type[i]; 1438 }
1402
1403 /*
1404 * HACK: IGT tests expect that each plane can only have one
1405 * one possible CRTC. For now, set one CRTC for each
1406 * plane that is not an underlay, but still allow multiple
1407 * CRTCs for underlay planes.
1408 */
1409 possible_crtcs = 1 << i;
1410 if (i >= dm->dc->caps.max_streams)
1411 possible_crtcs = 0xff;
1412 1439
1413 if (amdgpu_dm_plane_init(dm, mode_info->planes[i], possible_crtcs)) { 1440 /* Initialize primary planes */
1414 DRM_ERROR("KMS: Failed to initialize plane\n"); 1441 for (i = (total_primary_planes - 1); i >= 0; i--) {
1442 if (initialize_plane(dm, mode_info, i)) {
1443 DRM_ERROR("KMS: Failed to initialize primary plane\n");
1415 goto fail; 1444 goto fail;
1416 } 1445 }
1417 } 1446 }
@@ -1982,6 +2011,10 @@ static int fill_plane_attributes(struct amdgpu_device *adev,
1982 * every time. 2011 * every time.
1983 */ 2012 */
1984 ret = amdgpu_dm_set_degamma_lut(crtc_state, dc_plane_state); 2013 ret = amdgpu_dm_set_degamma_lut(crtc_state, dc_plane_state);
2014 if (ret) {
2015 dc_transfer_func_release(dc_plane_state->in_transfer_func);
2016 dc_plane_state->in_transfer_func = NULL;
2017 }
1985 2018
1986 return ret; 2019 return ret;
1987} 2020}
@@ -4691,8 +4724,8 @@ static int dm_update_planes_state(struct dc *dc,
4691 int ret = 0; 4724 int ret = 0;
4692 4725
4693 4726
4694 /* Add new planes */ 4727 /* Add new planes, in reverse order as DC expectation */
4695 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 4728 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
4696 new_plane_crtc = new_plane_state->crtc; 4729 new_plane_crtc = new_plane_state->crtc;
4697 old_plane_crtc = old_plane_state->crtc; 4730 old_plane_crtc = old_plane_state->crtc;
4698 dm_new_plane_state = to_dm_plane_state(new_plane_state); 4731 dm_new_plane_state = to_dm_plane_state(new_plane_state);
@@ -4737,6 +4770,7 @@ static int dm_update_planes_state(struct dc *dc,
4737 *lock_and_validation_needed = true; 4770 *lock_and_validation_needed = true;
4738 4771
4739 } else { /* Add new planes */ 4772 } else { /* Add new planes */
4773 struct dc_plane_state *dc_new_plane_state;
4740 4774
4741 if (drm_atomic_plane_disabling(plane->state, new_plane_state)) 4775 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
4742 continue; 4776 continue;
@@ -4755,34 +4789,42 @@ static int dm_update_planes_state(struct dc *dc,
4755 4789
4756 WARN_ON(dm_new_plane_state->dc_state); 4790 WARN_ON(dm_new_plane_state->dc_state);
4757 4791
4758 dm_new_plane_state->dc_state = dc_create_plane_state(dc); 4792 dc_new_plane_state = dc_create_plane_state(dc);
4793 if (!dc_new_plane_state)
4794 return -ENOMEM;
4759 4795
4760 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n", 4796 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
4761 plane->base.id, new_plane_crtc->base.id); 4797 plane->base.id, new_plane_crtc->base.id);
4762 4798
4763 if (!dm_new_plane_state->dc_state) {
4764 ret = -EINVAL;
4765 return ret;
4766 }
4767
4768 ret = fill_plane_attributes( 4799 ret = fill_plane_attributes(
4769 new_plane_crtc->dev->dev_private, 4800 new_plane_crtc->dev->dev_private,
4770 dm_new_plane_state->dc_state, 4801 dc_new_plane_state,
4771 new_plane_state, 4802 new_plane_state,
4772 new_crtc_state); 4803 new_crtc_state);
4773 if (ret) 4804 if (ret) {
4805 dc_plane_state_release(dc_new_plane_state);
4774 return ret; 4806 return ret;
4807 }
4775 4808
4809 /*
4810 * Any atomic check errors that occur after this will
4811 * not need a release. The plane state will be attached
4812 * to the stream, and therefore part of the atomic
4813 * state. It'll be released when the atomic state is
4814 * cleaned.
4815 */
4776 if (!dc_add_plane_to_context( 4816 if (!dc_add_plane_to_context(
4777 dc, 4817 dc,
4778 dm_new_crtc_state->stream, 4818 dm_new_crtc_state->stream,
4779 dm_new_plane_state->dc_state, 4819 dc_new_plane_state,
4780 dm_state->context)) { 4820 dm_state->context)) {
4781 4821
4782 ret = -EINVAL; 4822 dc_plane_state_release(dc_new_plane_state);
4783 return ret; 4823 return -EINVAL;
4784 } 4824 }
4785 4825
4826 dm_new_plane_state->dc_state = dc_new_plane_state;
4827
4786 /* Tell DC to do a full surface update every time there 4828 /* Tell DC to do a full surface update every time there
4787 * is a plane change. Inefficient, but works for now. 4829 * is a plane change. Inefficient, but works for now.
4788 */ 4830 */
@@ -4812,6 +4854,9 @@ static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
4812 return -EDEADLK; 4854 return -EDEADLK;
4813 4855
4814 crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc); 4856 crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
4857 if (IS_ERR(crtc_state))
4858 return PTR_ERR(crtc_state);
4859
4815 if (crtc->primary == plane && crtc_state->active) { 4860 if (crtc->primary == plane && crtc_state->active) {
4816 if (!plane_state->fb) 4861 if (!plane_state->fb)
4817 return -EINVAL; 4862 return -EINVAL;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index e845c511656e..f6cb502c303f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -193,6 +193,7 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc)
193 struct drm_property_blob *blob = crtc->base.ctm; 193 struct drm_property_blob *blob = crtc->base.ctm;
194 struct dc_stream_state *stream = crtc->stream; 194 struct dc_stream_state *stream = crtc->stream;
195 struct drm_color_ctm *ctm; 195 struct drm_color_ctm *ctm;
196 int64_t val;
196 int i; 197 int i;
197 198
198 if (!blob) { 199 if (!blob) {
@@ -206,7 +207,9 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc)
206 * DRM gives a 3x3 matrix, but DC wants 3x4. Assuming we're operating 207 * DRM gives a 3x3 matrix, but DC wants 3x4. Assuming we're operating
207 * with homogeneous coordinates, augment the matrix with 0's. 208 * with homogeneous coordinates, augment the matrix with 0's.
208 * 209 *
209 * The format provided is S31.32, which is the same as our fixed31_32. 210 * The format provided is S31.32, using signed-magnitude representation.
211 * Our fixed31_32 is also S31.32, but is using 2's complement. We have
212 * to convert from signed-magnitude to 2's complement.
210 */ 213 */
211 for (i = 0; i < 12; i++) { 214 for (i = 0; i < 12; i++) {
212 /* Skip 4th element */ 215 /* Skip 4th element */
@@ -214,8 +217,14 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc)
214 stream->gamut_remap_matrix.matrix[i] = dal_fixed31_32_zero; 217 stream->gamut_remap_matrix.matrix[i] = dal_fixed31_32_zero;
215 continue; 218 continue;
216 } 219 }
217 /* csc[i] = ctm[i - floor(i/4)] */ 220
218 stream->gamut_remap_matrix.matrix[i].value = ctm->matrix[i - (i/4)]; 221 /* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */
222 val = ctm->matrix[i - (i/4)];
223 /* If negative, convert to 2's complement. */
224 if (val & (1ULL << 63))
225 val = -(val & ~(1ULL << 63));
226
227 stream->gamut_remap_matrix.matrix[i].value = val;
219 } 228 }
220} 229}
221 230
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 39cfe0fbf1b9..8291d74f26bc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -85,6 +85,9 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
85 enum ddc_result res; 85 enum ddc_result res;
86 ssize_t read_bytes; 86 ssize_t read_bytes;
87 87
88 if (WARN_ON(msg->size > 16))
89 return -E2BIG;
90
88 switch (msg->request & ~DP_AUX_I2C_MOT) { 91 switch (msg->request & ~DP_AUX_I2C_MOT) {
89 case DP_AUX_NATIVE_READ: 92 case DP_AUX_NATIVE_READ:
90 read_bytes = dal_ddc_service_read_dpcd_data( 93 read_bytes = dal_ddc_service_read_dpcd_data(
diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.c b/drivers/gpu/drm/amd/display/dc/basics/logger.c
index 180a9d69d351..31bee054f43a 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/logger.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/logger.c
@@ -60,7 +60,8 @@ static const struct dc_log_type_info log_type_info_tbl[] = {
60 {LOG_EVENT_LINK_LOSS, "LinkLoss"}, 60 {LOG_EVENT_LINK_LOSS, "LinkLoss"},
61 {LOG_EVENT_UNDERFLOW, "Underflow"}, 61 {LOG_EVENT_UNDERFLOW, "Underflow"},
62 {LOG_IF_TRACE, "InterfaceTrace"}, 62 {LOG_IF_TRACE, "InterfaceTrace"},
63 {LOG_DTN, "DTN"} 63 {LOG_DTN, "DTN"},
64 {LOG_PROFILING, "Profiling"}
64}; 65};
65 66
66 67
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 1689c670ca6f..e7680c41f117 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -44,7 +44,7 @@
44 44
45#include "bios_parser_common.h" 45#include "bios_parser_common.h"
46#define LAST_RECORD_TYPE 0xff 46#define LAST_RECORD_TYPE 0xff
47 47#define SMU9_SYSPLL0_ID 0
48 48
49struct i2c_id_config_access { 49struct i2c_id_config_access {
50 uint8_t bfI2C_LineMux:4; 50 uint8_t bfI2C_LineMux:4;
@@ -1220,7 +1220,7 @@ static unsigned int bios_parser_get_smu_clock_info(
1220 if (!bp->cmd_tbl.get_smu_clock_info) 1220 if (!bp->cmd_tbl.get_smu_clock_info)
1221 return BP_RESULT_FAILURE; 1221 return BP_RESULT_FAILURE;
1222 1222
1223 return bp->cmd_tbl.get_smu_clock_info(bp); 1223 return bp->cmd_tbl.get_smu_clock_info(bp, 0);
1224} 1224}
1225 1225
1226static enum bp_result bios_parser_program_crtc_timing( 1226static enum bp_result bios_parser_program_crtc_timing(
@@ -1376,7 +1376,7 @@ static enum bp_result get_firmware_info_v3_1(
1376 if (bp->cmd_tbl.get_smu_clock_info != NULL) { 1376 if (bp->cmd_tbl.get_smu_clock_info != NULL) {
1377 /* VBIOS gives in 10KHz */ 1377 /* VBIOS gives in 10KHz */
1378 info->smu_gpu_pll_output_freq = 1378 info->smu_gpu_pll_output_freq =
1379 bp->cmd_tbl.get_smu_clock_info(bp) * 10; 1379 bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10;
1380 } 1380 }
1381 1381
1382 return BP_RESULT_OK; 1382 return BP_RESULT_OK;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index e362658aa3ce..3f63f712c8a4 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -796,7 +796,7 @@ static enum bp_result set_dce_clock_v2_1(
796 ****************************************************************************** 796 ******************************************************************************
797 *****************************************************************************/ 797 *****************************************************************************/
798 798
799static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp); 799static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id);
800 800
801static void init_get_smu_clock_info(struct bios_parser *bp) 801static void init_get_smu_clock_info(struct bios_parser *bp)
802{ 802{
@@ -805,12 +805,13 @@ static void init_get_smu_clock_info(struct bios_parser *bp)
805 805
806} 806}
807 807
808static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp) 808static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id)
809{ 809{
810 struct atom_get_smu_clock_info_parameters_v3_1 smu_input = {0}; 810 struct atom_get_smu_clock_info_parameters_v3_1 smu_input = {0};
811 struct atom_get_smu_clock_info_output_parameters_v3_1 smu_output; 811 struct atom_get_smu_clock_info_output_parameters_v3_1 smu_output;
812 812
813 smu_input.command = GET_SMU_CLOCK_INFO_V3_1_GET_PLLVCO_FREQ; 813 smu_input.command = GET_SMU_CLOCK_INFO_V3_1_GET_PLLVCO_FREQ;
814 smu_input.syspll_id = id;
814 815
815 /* Get Specific Clock */ 816 /* Get Specific Clock */
816 if (EXEC_BIOS_CMD_TABLE(getsmuclockinfo, smu_input)) { 817 if (EXEC_BIOS_CMD_TABLE(getsmuclockinfo, smu_input)) {
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
index 59061b806df5..ec1c0c9f3f1d 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
@@ -96,7 +96,7 @@ struct cmd_tbl {
96 struct bios_parser *bp, 96 struct bios_parser *bp,
97 struct bp_set_dce_clock_parameters *bp_params); 97 struct bp_set_dce_clock_parameters *bp_params);
98 unsigned int (*get_smu_clock_info)( 98 unsigned int (*get_smu_clock_info)(
99 struct bios_parser *bp); 99 struct bios_parser *bp, uint8_t id);
100 100
101}; 101};
102 102
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 6d38b8f43198..0cbab81ab304 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -85,7 +85,6 @@ static void calculate_bandwidth(
85 const uint32_t s_mid5 = 5; 85 const uint32_t s_mid5 = 5;
86 const uint32_t s_mid6 = 6; 86 const uint32_t s_mid6 = 6;
87 const uint32_t s_high = 7; 87 const uint32_t s_high = 7;
88 const uint32_t bus_efficiency = 1;
89 const uint32_t dmif_chunk_buff_margin = 1; 88 const uint32_t dmif_chunk_buff_margin = 1;
90 89
91 uint32_t max_chunks_fbc_mode; 90 uint32_t max_chunks_fbc_mode;
@@ -592,7 +591,12 @@ static void calculate_bandwidth(
592 /* 1 = use channel 0 and 1*/ 591 /* 1 = use channel 0 and 1*/
593 /* 2 = use channel 0,1,2,3*/ 592 /* 2 = use channel 0,1,2,3*/
594 if ((fbc_enabled == 1 && lpt_enabled == 1)) { 593 if ((fbc_enabled == 1 && lpt_enabled == 1)) {
595 data->dram_efficiency = bw_int_to_fixed(1); 594 if (vbios->memory_type == bw_def_hbm)
595 data->dram_efficiency = bw_frc_to_fixed(5, 10);
596 else
597 data->dram_efficiency = bw_int_to_fixed(1);
598
599
596 if (dceip->low_power_tiling_mode == 0) { 600 if (dceip->low_power_tiling_mode == 0) {
597 data->number_of_dram_channels = 1; 601 data->number_of_dram_channels = 1;
598 } 602 }
@@ -607,7 +611,10 @@ static void calculate_bandwidth(
607 } 611 }
608 } 612 }
609 else { 613 else {
610 data->dram_efficiency = bw_frc_to_fixed(8, 10); 614 if (vbios->memory_type == bw_def_hbm)
615 data->dram_efficiency = bw_frc_to_fixed(5, 10);
616 else
617 data->dram_efficiency = bw_frc_to_fixed(8, 10);
611 } 618 }
612 /*memory request size and latency hiding:*/ 619 /*memory request size and latency hiding:*/
613 /*request size is normally 64 byte, 2-line interleaved, with full latency hiding*/ 620 /*request size is normally 64 byte, 2-line interleaved, with full latency hiding*/
@@ -1171,9 +1178,9 @@ static void calculate_bandwidth(
1171 } 1178 }
1172 for (i = 0; i <= 2; i++) { 1179 for (i = 0; i <= 2; i++) {
1173 for (j = 0; j <= 7; j++) { 1180 for (j = 0; j <= 7; j++) {
1174 data->dmif_burst_time[i][j] = bw_max3(data->dmif_total_page_close_open_time, bw_div(data->total_display_reads_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))), bw_div(data->total_display_reads_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_int_to_fixed(bus_efficiency))))); 1181 data->dmif_burst_time[i][j] = bw_max3(data->dmif_total_page_close_open_time, bw_div(data->total_display_reads_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))), bw_div(data->total_display_reads_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100)))));
1175 if (data->d1_display_write_back_dwb_enable == 1) { 1182 if (data->d1_display_write_back_dwb_enable == 1) {
1176 data->mcifwr_burst_time[i][j] = bw_max3(data->mcifwr_total_page_close_open_time, bw_div(data->total_display_writes_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_wrchannels)))), bw_div(data->total_display_writes_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_int_to_fixed(bus_efficiency))))); 1183 data->mcifwr_burst_time[i][j] = bw_max3(data->mcifwr_total_page_close_open_time, bw_div(data->total_display_writes_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_wrchannels)))), bw_div(data->total_display_writes_required_data, (bw_mul(sclk[j], vbios->data_return_bus_width))));
1177 } 1184 }
1178 } 1185 }
1179 } 1186 }
@@ -1258,6 +1265,16 @@ static void calculate_bandwidth(
1258 /* / (dispclk - display bw)*/ 1265 /* / (dispclk - display bw)*/
1259 /*the minimum latency hiding is the minimum for all pipes of one screen line time, plus one more line time if doing lb prefetch, plus the dmif data buffer size equivalent in time, minus the urgent latency.*/ 1266 /*the minimum latency hiding is the minimum for all pipes of one screen line time, plus one more line time if doing lb prefetch, plus the dmif data buffer size equivalent in time, minus the urgent latency.*/
1260 /*the minimum latency hiding is further limited by the cursor. the cursor latency hiding is the number of lines of the cursor buffer, minus one if the downscaling is less than two, or minus three if it is more*/ 1267 /*the minimum latency hiding is further limited by the cursor. the cursor latency hiding is the number of lines of the cursor buffer, minus one if the downscaling is less than two, or minus three if it is more*/
1268
1269 /*initialize variables*/
1270 number_of_displays_enabled = 0;
1271 number_of_displays_enabled_with_margin = 0;
1272 for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
1273 if (data->enable[k]) {
1274 number_of_displays_enabled = number_of_displays_enabled + 1;
1275 }
1276 data->display_pstate_change_enable[k] = 0;
1277 }
1261 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { 1278 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1262 if (data->enable[i]) { 1279 if (data->enable[i]) {
1263 if ((bw_equ(dceip->stutter_and_dram_clock_state_change_gated_before_cursor, bw_int_to_fixed(0)) && bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0)))) { 1280 if ((bw_equ(dceip->stutter_and_dram_clock_state_change_gated_before_cursor, bw_int_to_fixed(0)) && bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0)))) {
@@ -1276,7 +1293,10 @@ static void calculate_bandwidth(
1276 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { 1293 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1277 if (data->enable[i]) { 1294 if (data->enable[i]) {
1278 if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1 && (bw_equ(data->vsr[i], bw_int_to_fixed(1)) || (bw_leq(data->vsr[i], bw_frc_to_fixed(8, 10)) && bw_leq(data->v_taps[i], bw_int_to_fixed(2)) && data->lb_bpc[i] == 8)) && surface_type[i] == bw_def_graphics) { 1295 if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1 && (bw_equ(data->vsr[i], bw_int_to_fixed(1)) || (bw_leq(data->vsr[i], bw_frc_to_fixed(8, 10)) && bw_leq(data->v_taps[i], bw_int_to_fixed(2)) && data->lb_bpc[i] == 8)) && surface_type[i] == bw_def_graphics) {
1279 data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(1)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency); 1296 if (number_of_displays_enabled > 2)
1297 data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(2)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
1298 else
1299 data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(1)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
1280 } 1300 }
1281 else { 1301 else {
1282 data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_int_to_fixed(1 + data->line_buffer_prefetch[i]), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency); 1302 data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_int_to_fixed(1 + data->line_buffer_prefetch[i]), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
@@ -1338,24 +1358,15 @@ static void calculate_bandwidth(
1338 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { 1358 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1339 if (data->enable[i]) { 1359 if (data->enable[i]) {
1340 if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1) { 1360 if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1) {
1341 data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(8, 10), data->total_dmifmc_urgent_latency)); 1361 data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(5, 10), data->total_dmifmc_urgent_latency));
1342 } 1362 }
1343 else { 1363 else {
1344 /*maximum_latency_hiding(i) = minimum_latency_hiding(i) + 1 / vsr(i) * h_total(i) / pixel_rate(i) + 0.5 * total_dmifmc_urgent_latency*/ 1364 /*maximum_latency_hiding(i) = minimum_latency_hiding(i) + 1 / vsr(i) * h_total(i) / pixel_rate(i) + 0.5 * total_dmifmc_urgent_latency*/
1345 data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(8, 10), data->total_dmifmc_urgent_latency)); 1365 data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(5, 10), data->total_dmifmc_urgent_latency));
1346 } 1366 }
1347 data->maximum_latency_hiding_with_cursor[i] = bw_min2(data->maximum_latency_hiding[i], data->cursor_latency_hiding[i]); 1367 data->maximum_latency_hiding_with_cursor[i] = bw_min2(data->maximum_latency_hiding[i], data->cursor_latency_hiding[i]);
1348 } 1368 }
1349 } 1369 }
1350 /*initialize variables*/
1351 number_of_displays_enabled = 0;
1352 number_of_displays_enabled_with_margin = 0;
1353 for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
1354 if (data->enable[k]) {
1355 number_of_displays_enabled = number_of_displays_enabled + 1;
1356 }
1357 data->display_pstate_change_enable[k] = 0;
1358 }
1359 for (i = 0; i <= 2; i++) { 1370 for (i = 0; i <= 2; i++) {
1360 for (j = 0; j <= 7; j++) { 1371 for (j = 0; j <= 7; j++) {
1361 data->min_dram_speed_change_margin[i][j] = bw_int_to_fixed(9999); 1372 data->min_dram_speed_change_margin[i][j] = bw_int_to_fixed(9999);
@@ -1370,10 +1381,11 @@ static void calculate_bandwidth(
1370 /*determine the minimum dram clock change margin for each set of clock frequencies*/ 1381 /*determine the minimum dram clock change margin for each set of clock frequencies*/
1371 data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin); 1382 data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
1372 /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/ 1383 /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
1373 data->dispclk_required_for_dram_speed_change[i][j] = bw_max3(data->dispclk_required_for_dram_speed_change[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k])))); 1384 data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k]))));
1374 if ((bw_ltn(data->dispclk_required_for_dram_speed_change[i][j], vbios->high_voltage_max_dispclk))) { 1385 if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) {
1375 data->display_pstate_change_enable[k] = 1; 1386 data->display_pstate_change_enable[k] = 1;
1376 data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1; 1387 data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
1388 data->dispclk_required_for_dram_speed_change[i][j] = bw_max2(data->dispclk_required_for_dram_speed_change[i][j], data->dispclk_required_for_dram_speed_change_pipe[i][j]);
1377 } 1389 }
1378 } 1390 }
1379 } 1391 }
@@ -1383,10 +1395,11 @@ static void calculate_bandwidth(
1383 /*determine the minimum dram clock change margin for each display pipe*/ 1395 /*determine the minimum dram clock change margin for each display pipe*/
1384 data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin); 1396 data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
1385 /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/ 1397 /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
1386 data->dispclk_required_for_dram_speed_change[i][j] = bw_max3(data->dispclk_required_for_dram_speed_change[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k])))); 1398 data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
1387 if ((bw_ltn(data->dispclk_required_for_dram_speed_change[i][j], vbios->high_voltage_max_dispclk))) { 1399 if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) {
1388 data->display_pstate_change_enable[k] = 1; 1400 data->display_pstate_change_enable[k] = 1;
1389 data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1; 1401 data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
1402 data->dispclk_required_for_dram_speed_change[i][j] = bw_max2(data->dispclk_required_for_dram_speed_change[i][j], data->dispclk_required_for_dram_speed_change_pipe[i][j]);
1390 } 1403 }
1391 } 1404 }
1392 } 1405 }
@@ -1420,7 +1433,7 @@ static void calculate_bandwidth(
1420 data->displays_with_same_mode[i] = bw_int_to_fixed(0); 1433 data->displays_with_same_mode[i] = bw_int_to_fixed(0);
1421 if (data->enable[i] == 1 && data->display_pstate_change_enable[i] == 0 && bw_mtn(data->v_blank_dram_speed_change_margin[i], bw_int_to_fixed(0))) { 1434 if (data->enable[i] == 1 && data->display_pstate_change_enable[i] == 0 && bw_mtn(data->v_blank_dram_speed_change_margin[i], bw_int_to_fixed(0))) {
1422 for (j = 0; j <= maximum_number_of_surfaces - 1; j++) { 1435 for (j = 0; j <= maximum_number_of_surfaces - 1; j++) {
1423 if ((data->enable[j] == 1 && bw_equ(data->source_width_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[j]) && bw_equ(data->source_height_rounded_up_to_chunks[i], data->source_height_rounded_up_to_chunks[j]) && bw_equ(data->vsr[i], data->vsr[j]) && bw_equ(data->hsr[i], data->hsr[j]) && bw_equ(data->pixel_rate[i], data->pixel_rate[j]))) { 1436 if ((i == j || data->display_synchronization_enabled) && (data->enable[j] == 1 && bw_equ(data->source_width_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[j]) && bw_equ(data->source_height_rounded_up_to_chunks[i], data->source_height_rounded_up_to_chunks[j]) && bw_equ(data->vsr[i], data->vsr[j]) && bw_equ(data->hsr[i], data->hsr[j]) && bw_equ(data->pixel_rate[i], data->pixel_rate[j]))) {
1424 data->displays_with_same_mode[i] = bw_add(data->displays_with_same_mode[i], bw_int_to_fixed(1)); 1437 data->displays_with_same_mode[i] = bw_add(data->displays_with_same_mode[i], bw_int_to_fixed(1));
1425 } 1438 }
1426 } 1439 }
@@ -1435,7 +1448,7 @@ static void calculate_bandwidth(
1435 /*aligned displays with the same timing.*/ 1448 /*aligned displays with the same timing.*/
1436 /*the display(s) with the negative margin can be switched in the v_blank region while the other*/ 1449 /*the display(s) with the negative margin can be switched in the v_blank region while the other*/
1437 /*displays are in v_blank or v_active.*/ 1450 /*displays are in v_blank or v_active.*/
1438 if ((number_of_displays_enabled_with_margin + number_of_aligned_displays_with_no_margin == number_of_displays_enabled && bw_mtn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(9999)) && bw_ltn(data->dispclk_required_for_dram_speed_change[high][s_high], vbios->high_voltage_max_dispclk))) { 1451 if (number_of_displays_enabled_with_margin > 0 && (number_of_displays_enabled_with_margin + number_of_aligned_displays_with_no_margin) == number_of_displays_enabled && bw_mtn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(9999)) && bw_ltn(data->dispclk_required_for_dram_speed_change[high][s_high], vbios->high_voltage_max_dispclk)) {
1439 data->nbp_state_change_enable = bw_def_yes; 1452 data->nbp_state_change_enable = bw_def_yes;
1440 } 1453 }
1441 else { 1454 else {
@@ -1448,6 +1461,25 @@ static void calculate_bandwidth(
1448 else { 1461 else {
1449 nbp_state_change_enable_blank = bw_def_no; 1462 nbp_state_change_enable_blank = bw_def_no;
1450 } 1463 }
1464
1465 /*average bandwidth*/
1466 /*the average bandwidth with no compression is the vertical active time is the source width times the bytes per pixel divided by the line time, multiplied by the vertical scale ratio and the ratio of bytes per request divided by the useful bytes per request.*/
1467 /*the average bandwidth with compression is the same, divided by the compression ratio*/
1468 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1469 if (data->enable[i]) {
1470 data->average_bandwidth_no_compression[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(data->bytes_per_pixel[i])), (bw_div(data->h_total[i], data->pixel_rate[i]))), data->vsr[i]), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
1471 data->average_bandwidth[i] = bw_div(data->average_bandwidth_no_compression[i], data->compression_rate[i]);
1472 }
1473 }
1474 data->total_average_bandwidth_no_compression = bw_int_to_fixed(0);
1475 data->total_average_bandwidth = bw_int_to_fixed(0);
1476 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1477 if (data->enable[i]) {
1478 data->total_average_bandwidth_no_compression = bw_add(data->total_average_bandwidth_no_compression, data->average_bandwidth_no_compression[i]);
1479 data->total_average_bandwidth = bw_add(data->total_average_bandwidth, data->average_bandwidth[i]);
1480 }
1481 }
1482
1451 /*required yclk(pclk)*/ 1483 /*required yclk(pclk)*/
1452 /*yclk requirement only makes sense if the dmif and mcifwr data total page close-open time is less than the time for data transfer and the total pte requests fit in the scatter-gather saw queque size*/ 1484 /*yclk requirement only makes sense if the dmif and mcifwr data total page close-open time is less than the time for data transfer and the total pte requests fit in the scatter-gather saw queque size*/
1453 /*if that is the case, the yclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/low yclk(pclk) is chosen accordingly*/ 1485 /*if that is the case, the yclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/low yclk(pclk) is chosen accordingly*/
@@ -1497,17 +1529,20 @@ static void calculate_bandwidth(
1497 } 1529 }
1498 else { 1530 else {
1499 data->required_dram_bandwidth_gbyte_per_second = bw_div(bw_max2(data->dmif_required_dram_bandwidth, data->mcifwr_required_dram_bandwidth), bw_int_to_fixed(1000)); 1531 data->required_dram_bandwidth_gbyte_per_second = bw_div(bw_max2(data->dmif_required_dram_bandwidth, data->mcifwr_required_dram_bandwidth), bw_int_to_fixed(1000));
1500 if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[low][s_high], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[low][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[low][s_high] == number_of_displays_enabled_with_margin))) { 1532 if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[low]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels)))
1533 && bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[low][s_high], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[low][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[low][s_high] == number_of_displays_enabled_with_margin))) {
1501 yclk_message = bw_fixed_to_int(vbios->low_yclk); 1534 yclk_message = bw_fixed_to_int(vbios->low_yclk);
1502 data->y_clk_level = low; 1535 data->y_clk_level = low;
1503 data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); 1536 data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
1504 } 1537 }
1505 else if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[mid][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[mid][s_high] == number_of_displays_enabled_with_margin))) { 1538 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[mid]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels)))
1539 && bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[mid][s_high], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[mid][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[mid][s_high] == number_of_displays_enabled_with_margin))) {
1506 yclk_message = bw_fixed_to_int(vbios->mid_yclk); 1540 yclk_message = bw_fixed_to_int(vbios->mid_yclk);
1507 data->y_clk_level = mid; 1541 data->y_clk_level = mid;
1508 data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); 1542 data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
1509 } 1543 }
1510 else if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))) { 1544 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[high]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels)))
1545 && bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))) {
1511 yclk_message = bw_fixed_to_int(vbios->high_yclk); 1546 yclk_message = bw_fixed_to_int(vbios->high_yclk);
1512 data->y_clk_level = high; 1547 data->y_clk_level = high;
1513 data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); 1548 data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
@@ -1523,8 +1558,8 @@ static void calculate_bandwidth(
1523 /*if that is the case, the sclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/mid/low sclk is chosen accordingly, unless that choice results in foresaking dram speed/nb p-state change.*/ 1558 /*if that is the case, the sclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/mid/low sclk is chosen accordingly, unless that choice results in foresaking dram speed/nb p-state change.*/
1524 /*the dmif and mcifwr sclk required is the one that allows the transfer of all pipe's data buffer size through the sclk bus in the time for data transfer*/ 1559 /*the dmif and mcifwr sclk required is the one that allows the transfer of all pipe's data buffer size through the sclk bus in the time for data transfer*/
1525 /*for dmif, pte and cursor requests have to be included.*/ 1560 /*for dmif, pte and cursor requests have to be included.*/
1526 data->dmif_required_sclk = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency)))); 1561 data->dmif_required_sclk = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100))));
1527 data->mcifwr_required_sclk = bw_div(bw_div(data->total_display_writes_required_data, data->display_writes_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency)))); 1562 data->mcifwr_required_sclk = bw_div(bw_div(data->total_display_writes_required_data, data->display_writes_time_for_data_transfer), vbios->data_return_bus_width);
1528 if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) { 1563 if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) {
1529 data->required_sclk = bw_int_to_fixed(9999); 1564 data->required_sclk = bw_int_to_fixed(9999);
1530 sclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size; 1565 sclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size;
@@ -1537,42 +1572,56 @@ static void calculate_bandwidth(
1537 } 1572 }
1538 else { 1573 else {
1539 data->required_sclk = bw_max2(data->dmif_required_sclk, data->mcifwr_required_sclk); 1574 data->required_sclk = bw_max2(data->dmif_required_sclk, data->mcifwr_required_sclk);
1540 if (bw_ltn(data->required_sclk, sclk[s_low]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_low], vbios->low_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_low] == number_of_displays_enabled_with_margin))) { 1575 if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[low]),vbios->data_return_bus_width))
1576 && bw_ltn(data->required_sclk, sclk[s_low]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_low], vbios->low_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_low] == number_of_displays_enabled_with_margin))) {
1541 sclk_message = bw_def_low; 1577 sclk_message = bw_def_low;
1542 data->sclk_level = s_low; 1578 data->sclk_level = s_low;
1543 data->required_sclk = vbios->low_sclk; 1579 data->required_sclk = vbios->low_sclk;
1544 } 1580 }
1545 else if (bw_ltn(data->required_sclk, sclk[s_mid1]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid1], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid1] == number_of_displays_enabled_with_margin))) { 1581 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[mid]),vbios->data_return_bus_width))
1582 && bw_ltn(data->required_sclk, sclk[s_mid1]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid1], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid1] == number_of_displays_enabled_with_margin))) {
1546 sclk_message = bw_def_mid; 1583 sclk_message = bw_def_mid;
1547 data->sclk_level = s_mid1; 1584 data->sclk_level = s_mid1;
1548 data->required_sclk = vbios->mid1_sclk; 1585 data->required_sclk = vbios->mid1_sclk;
1549 } 1586 }
1550 else if (bw_ltn(data->required_sclk, sclk[s_mid2]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid2], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid2] == number_of_displays_enabled_with_margin))) { 1587 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid2]),vbios->data_return_bus_width))
1588 && bw_ltn(data->required_sclk, sclk[s_mid2]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid2], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid2] == number_of_displays_enabled_with_margin))) {
1551 sclk_message = bw_def_mid; 1589 sclk_message = bw_def_mid;
1552 data->sclk_level = s_mid2; 1590 data->sclk_level = s_mid2;
1553 data->required_sclk = vbios->mid2_sclk; 1591 data->required_sclk = vbios->mid2_sclk;
1554 } 1592 }
1555 else if (bw_ltn(data->required_sclk, sclk[s_mid3]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid3], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid3] == number_of_displays_enabled_with_margin))) { 1593 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid3]),vbios->data_return_bus_width))
1594 && bw_ltn(data->required_sclk, sclk[s_mid3]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid3], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid3] == number_of_displays_enabled_with_margin))) {
1556 sclk_message = bw_def_mid; 1595 sclk_message = bw_def_mid;
1557 data->sclk_level = s_mid3; 1596 data->sclk_level = s_mid3;
1558 data->required_sclk = vbios->mid3_sclk; 1597 data->required_sclk = vbios->mid3_sclk;
1559 } 1598 }
1560 else if (bw_ltn(data->required_sclk, sclk[s_mid4]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid4], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid4] == number_of_displays_enabled_with_margin))) { 1599 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid4]),vbios->data_return_bus_width))
1600 && bw_ltn(data->required_sclk, sclk[s_mid4]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid4], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid4] == number_of_displays_enabled_with_margin))) {
1561 sclk_message = bw_def_mid; 1601 sclk_message = bw_def_mid;
1562 data->sclk_level = s_mid4; 1602 data->sclk_level = s_mid4;
1563 data->required_sclk = vbios->mid4_sclk; 1603 data->required_sclk = vbios->mid4_sclk;
1564 } 1604 }
1565 else if (bw_ltn(data->required_sclk, sclk[s_mid5]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid5], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid5] == number_of_displays_enabled_with_margin))) { 1605 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid5]),vbios->data_return_bus_width))
1606 && bw_ltn(data->required_sclk, sclk[s_mid5]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid5], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid5] == number_of_displays_enabled_with_margin))) {
1566 sclk_message = bw_def_mid; 1607 sclk_message = bw_def_mid;
1567 data->sclk_level = s_mid5; 1608 data->sclk_level = s_mid5;
1568 data->required_sclk = vbios->mid5_sclk; 1609 data->required_sclk = vbios->mid5_sclk;
1569 } 1610 }
1570 else if (bw_ltn(data->required_sclk, sclk[s_mid6]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid6] == number_of_displays_enabled_with_margin))) { 1611 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid6]),vbios->data_return_bus_width))
1612 && bw_ltn(data->required_sclk, sclk[s_mid6]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid6] == number_of_displays_enabled_with_margin))) {
1571 sclk_message = bw_def_mid; 1613 sclk_message = bw_def_mid;
1572 data->sclk_level = s_mid6; 1614 data->sclk_level = s_mid6;
1573 data->required_sclk = vbios->mid6_sclk; 1615 data->required_sclk = vbios->mid6_sclk;
1574 } 1616 }
1575 else if (bw_ltn(data->required_sclk, sclk[s_high])) { 1617 else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_high]),vbios->data_return_bus_width))
1618 && bw_ltn(data->required_sclk, sclk[s_high])) {
1619 sclk_message = bw_def_high;
1620 data->sclk_level = s_high;
1621 data->required_sclk = vbios->high_sclk;
1622 }
1623 else if (bw_meq(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_high]),vbios->data_return_bus_width))
1624 && bw_ltn(data->required_sclk, sclk[s_high])) {
1576 sclk_message = bw_def_high; 1625 sclk_message = bw_def_high;
1577 data->sclk_level = s_high; 1626 data->sclk_level = s_high;
1578 data->required_sclk = vbios->high_sclk; 1627 data->required_sclk = vbios->high_sclk;
@@ -1681,7 +1730,7 @@ static void calculate_bandwidth(
1681 data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]); 1730 data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
1682 data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]); 1731 data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
1683 } 1732 }
1684 if (data->nbp_state_change_enable == bw_def_yes) { 1733 if (data->nbp_state_change_enable == bw_def_yes && data->increase_voltage_to_support_mclk_switch) {
1685 data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]); 1734 data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
1686 data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]); 1735 data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
1687 } 1736 }
@@ -1861,23 +1910,6 @@ static void calculate_bandwidth(
1861 else { 1910 else {
1862 data->mcifwrdram_access_efficiency = bw_int_to_fixed(0); 1911 data->mcifwrdram_access_efficiency = bw_int_to_fixed(0);
1863 } 1912 }
1864 /*average bandwidth*/
1865 /*the average bandwidth with no compression is the vertical active time is the source width times the bytes per pixel divided by the line time, multiplied by the vertical scale ratio and the ratio of bytes per request divided by the useful bytes per request.*/
1866 /*the average bandwidth with compression is the same, divided by the compression ratio*/
1867 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1868 if (data->enable[i]) {
1869 data->average_bandwidth_no_compression[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(data->bytes_per_pixel[i])), (bw_div(data->h_total[i], data->pixel_rate[i]))), data->vsr[i]), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
1870 data->average_bandwidth[i] = bw_div(data->average_bandwidth_no_compression[i], data->compression_rate[i]);
1871 }
1872 }
1873 data->total_average_bandwidth_no_compression = bw_int_to_fixed(0);
1874 data->total_average_bandwidth = bw_int_to_fixed(0);
1875 for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
1876 if (data->enable[i]) {
1877 data->total_average_bandwidth_no_compression = bw_add(data->total_average_bandwidth_no_compression, data->average_bandwidth_no_compression[i]);
1878 data->total_average_bandwidth = bw_add(data->total_average_bandwidth, data->average_bandwidth[i]);
1879 }
1880 }
1881 /*stutter efficiency*/ 1913 /*stutter efficiency*/
1882 /*the stutter efficiency is the frame-average time in self-refresh divided by the frame-average stutter cycle duration. only applies if the display write-back is not enabled.*/ 1914 /*the stutter efficiency is the frame-average time in self-refresh divided by the frame-average stutter cycle duration. only applies if the display write-back is not enabled.*/
1883 /*the frame-average stutter cycle used is the minimum for all pipes of the frame-average data buffer size in time, times the compression rate*/ 1915 /*the frame-average stutter cycle used is the minimum for all pipes of the frame-average data buffer size in time, times the compression rate*/
@@ -1905,7 +1937,7 @@ static void calculate_bandwidth(
1905 data->total_stutter_dmif_buffer_size = bw_fixed_to_int(bw_add(data->stutter_dmif_buffer_size[i], bw_int_to_fixed(data->total_stutter_dmif_buffer_size))); 1937 data->total_stutter_dmif_buffer_size = bw_fixed_to_int(bw_add(data->stutter_dmif_buffer_size[i], bw_int_to_fixed(data->total_stutter_dmif_buffer_size)));
1906 } 1938 }
1907 } 1939 }
1908 data->stutter_burst_time = bw_div(bw_int_to_fixed(data->total_stutter_dmif_buffer_size), bw_min2(bw_mul(data->dram_bandwidth, data->dmifdram_access_efficiency), bw_mul(sclk[data->sclk_level], bw_int_to_fixed(32)))); 1940 data->stutter_burst_time = bw_div(bw_int_to_fixed(data->total_stutter_dmif_buffer_size), bw_mul(sclk[data->sclk_level], vbios->data_return_bus_width));
1909 data->num_stutter_bursts = data->total_bytes_requested / data->min_stutter_dmif_buffer_size; 1941 data->num_stutter_bursts = data->total_bytes_requested / data->min_stutter_dmif_buffer_size;
1910 data->total_stutter_cycle_duration = bw_add(bw_add(data->min_stutter_refresh_duration, vbios->stutter_self_refresh_exit_latency), data->stutter_burst_time); 1942 data->total_stutter_cycle_duration = bw_add(bw_add(data->min_stutter_refresh_duration, vbios->stutter_self_refresh_exit_latency), data->stutter_burst_time);
1911 data->time_in_self_refresh = data->min_stutter_refresh_duration; 1943 data->time_in_self_refresh = data->min_stutter_refresh_duration;
@@ -1957,7 +1989,7 @@ static void calculate_bandwidth(
1957 for (i = 1; i <= 5; i++) { 1989 for (i = 1; i <= 5; i++) {
1958 data->display_reads_time_for_data_transfer_and_urgent_latency = bw_sub(data->min_read_buffer_size_in_time, bw_mul(data->total_dmifmc_urgent_trips, bw_int_to_fixed(i))); 1990 data->display_reads_time_for_data_transfer_and_urgent_latency = bw_sub(data->min_read_buffer_size_in_time, bw_mul(data->total_dmifmc_urgent_trips, bw_int_to_fixed(i)));
1959 if (pipe_check == bw_def_ok && (bw_mtn(data->display_reads_time_for_data_transfer_and_urgent_latency, data->dmif_total_page_close_open_time))) { 1991 if (pipe_check == bw_def_ok && (bw_mtn(data->display_reads_time_for_data_transfer_and_urgent_latency, data->dmif_total_page_close_open_time))) {
1960 data->dmif_required_sclk_for_urgent_latency[i] = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer_and_urgent_latency), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency)))); 1992 data->dmif_required_sclk_for_urgent_latency[i] = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer_and_urgent_latency), (bw_mul(vbios->data_return_bus_width, bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100))));
1961 } 1993 }
1962 else { 1994 else {
1963 data->dmif_required_sclk_for_urgent_latency[i] = bw_int_to_fixed(bw_def_na); 1995 data->dmif_required_sclk_for_urgent_latency[i] = bw_int_to_fixed(bw_def_na);
@@ -2036,6 +2068,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
2036 vbios.blackout_duration = bw_int_to_fixed(0); /* us */ 2068 vbios.blackout_duration = bw_int_to_fixed(0); /* us */
2037 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0); 2069 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
2038 2070
2071 dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
2072 dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
2073 dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
2039 dceip.large_cursor = false; 2074 dceip.large_cursor = false;
2040 dceip.dmif_request_buffer_size = bw_int_to_fixed(768); 2075 dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
2041 dceip.dmif_pipe_en_fbc_chunk_tracker = false; 2076 dceip.dmif_pipe_en_fbc_chunk_tracker = false;
@@ -2146,6 +2181,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
2146 vbios.blackout_duration = bw_int_to_fixed(0); /* us */ 2181 vbios.blackout_duration = bw_int_to_fixed(0); /* us */
2147 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0); 2182 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
2148 2183
2184 dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
2185 dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
2186 dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
2149 dceip.large_cursor = false; 2187 dceip.large_cursor = false;
2150 dceip.dmif_request_buffer_size = bw_int_to_fixed(768); 2188 dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
2151 dceip.dmif_pipe_en_fbc_chunk_tracker = false; 2189 dceip.dmif_pipe_en_fbc_chunk_tracker = false;
@@ -2259,6 +2297,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
2259 vbios.blackout_duration = bw_int_to_fixed(0); /* us */ 2297 vbios.blackout_duration = bw_int_to_fixed(0); /* us */
2260 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0); 2298 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
2261 2299
2300 dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
2301 dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
2302 dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
2262 dceip.large_cursor = false; 2303 dceip.large_cursor = false;
2263 dceip.dmif_request_buffer_size = bw_int_to_fixed(768); 2304 dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
2264 dceip.dmif_pipe_en_fbc_chunk_tracker = false; 2305 dceip.dmif_pipe_en_fbc_chunk_tracker = false;
@@ -2369,6 +2410,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
2369 vbios.blackout_duration = bw_int_to_fixed(0); /* us */ 2410 vbios.blackout_duration = bw_int_to_fixed(0); /* us */
2370 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0); 2411 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
2371 2412
2413 dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
2414 dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
2415 dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
2372 dceip.large_cursor = false; 2416 dceip.large_cursor = false;
2373 dceip.dmif_request_buffer_size = bw_int_to_fixed(768); 2417 dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
2374 dceip.dmif_pipe_en_fbc_chunk_tracker = false; 2418 dceip.dmif_pipe_en_fbc_chunk_tracker = false;
@@ -2479,6 +2523,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
2479 vbios.blackout_duration = bw_int_to_fixed(0); /* us */ 2523 vbios.blackout_duration = bw_int_to_fixed(0); /* us */
2480 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0); 2524 vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
2481 2525
2526 dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
2527 dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
2528 dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
2482 dceip.large_cursor = false; 2529 dceip.large_cursor = false;
2483 dceip.dmif_request_buffer_size = bw_int_to_fixed(2304); 2530 dceip.dmif_request_buffer_size = bw_int_to_fixed(2304);
2484 dceip.dmif_pipe_en_fbc_chunk_tracker = true; 2531 dceip.dmif_pipe_en_fbc_chunk_tracker = true;
@@ -2597,6 +2644,7 @@ static void populate_initial_data(
2597 data->graphics_tiling_mode = bw_def_tiled; 2644 data->graphics_tiling_mode = bw_def_tiled;
2598 data->underlay_micro_tile_mode = bw_def_display_micro_tiling; 2645 data->underlay_micro_tile_mode = bw_def_display_micro_tiling;
2599 data->graphics_micro_tile_mode = bw_def_display_micro_tiling; 2646 data->graphics_micro_tile_mode = bw_def_display_micro_tiling;
2647 data->increase_voltage_to_support_mclk_switch = true;
2600 2648
2601 /* Pipes with underlay first */ 2649 /* Pipes with underlay first */
2602 for (i = 0; i < pipe_count; i++) { 2650 for (i = 0; i < pipe_count; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 8020bc7742c1..4bb43a371292 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -983,8 +983,6 @@ bool dcn_validate_bandwidth(
983 context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 / 32); 983 context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
984 } 984 }
985 985
986 context->bw.dcn.calc_clk.dram_ccm_us = (int)(v->dram_clock_change_margin);
987 context->bw.dcn.calc_clk.min_active_dram_ccm_us = (int)(v->min_active_dram_clock_change_margin);
988 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000); 986 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
989 context->bw.dcn.calc_clk.dcfclk_khz = (int)(v->dcfclk * 1000); 987 context->bw.dcn.calc_clk.dcfclk_khz = (int)(v->dcfclk * 1000);
990 988
@@ -998,7 +996,26 @@ bool dcn_validate_bandwidth(
998 dc->debug.min_disp_clk_khz; 996 dc->debug.min_disp_clk_khz;
999 } 997 }
1000 998
1001 context->bw.dcn.calc_clk.max_dppclk_khz = context->bw.dcn.calc_clk.dispclk_khz / v->dispclk_dppclk_ratio; 999 context->bw.dcn.calc_clk.dppclk_khz = context->bw.dcn.calc_clk.dispclk_khz / v->dispclk_dppclk_ratio;
1000
1001 switch (v->voltage_level) {
1002 case 0:
1003 context->bw.dcn.calc_clk.max_supported_dppclk_khz =
1004 (int)(dc->dcn_soc->max_dppclk_vmin0p65 * 1000);
1005 break;
1006 case 1:
1007 context->bw.dcn.calc_clk.max_supported_dppclk_khz =
1008 (int)(dc->dcn_soc->max_dppclk_vmid0p72 * 1000);
1009 break;
1010 case 2:
1011 context->bw.dcn.calc_clk.max_supported_dppclk_khz =
1012 (int)(dc->dcn_soc->max_dppclk_vnom0p8 * 1000);
1013 break;
1014 default:
1015 context->bw.dcn.calc_clk.max_supported_dppclk_khz =
1016 (int)(dc->dcn_soc->max_dppclk_vmax0p9 * 1000);
1017 break;
1018 }
1002 1019
1003 for (i = 0, input_idx = 0; i < pool->pipe_count; i++) { 1020 for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
1004 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1021 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 8394d69b963f..63a3d468939a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -42,6 +42,7 @@
42#include "dmcu.h" 42#include "dmcu.h"
43#include "dpp.h" 43#include "dpp.h"
44#include "timing_generator.h" 44#include "timing_generator.h"
45#include "abm.h"
45#include "virtual/virtual_link_encoder.h" 46#include "virtual/virtual_link_encoder.h"
46 47
47#include "link_hwss.h" 48#include "link_hwss.h"
@@ -802,6 +803,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
802 if (!dcb->funcs->is_accelerated_mode(dcb)) 803 if (!dcb->funcs->is_accelerated_mode(dcb))
803 dc->hwss.enable_accelerated_mode(dc, context); 804 dc->hwss.enable_accelerated_mode(dc, context);
804 805
806 dc->hwss.set_bandwidth(dc, context, false);
807
805 /* re-program planes for existing stream, in case we need to 808 /* re-program planes for existing stream, in case we need to
806 * free up plane resource for later use 809 * free up plane resource for later use
807 */ 810 */
@@ -870,6 +873,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
870 873
871 dc_enable_stereo(dc, context, dc_streams, context->stream_count); 874 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
872 875
876 /* pplib is notified if disp_num changed */
877 dc->hwss.set_bandwidth(dc, context, true);
878
873 dc_release_state(dc->current_state); 879 dc_release_state(dc->current_state);
874 880
875 dc->current_state = context; 881 dc->current_state = context;
@@ -1104,9 +1110,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
1104 if (u->plane_info->input_tf != u->surface->input_tf) 1110 if (u->plane_info->input_tf != u->surface->input_tf)
1105 update_flags->bits.input_tf_change = 1; 1111 update_flags->bits.input_tf_change = 1;
1106 1112
1107 if (u->plane_info->sdr_white_level != u->surface->sdr_white_level)
1108 update_flags->bits.output_tf_change = 1;
1109
1110 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) 1113 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror)
1111 update_flags->bits.horizontal_mirror_change = 1; 1114 update_flags->bits.horizontal_mirror_change = 1;
1112 1115
@@ -1361,6 +1364,17 @@ static void commit_planes_for_stream(struct dc *dc,
1361 1364
1362 dc->hwss.apply_ctx_for_surface( 1365 dc->hwss.apply_ctx_for_surface(
1363 dc, pipe_ctx->stream, stream_status->plane_count, context); 1366 dc, pipe_ctx->stream, stream_status->plane_count, context);
1367
1368 if (stream_update && stream_update->abm_level && pipe_ctx->stream_res.abm) {
1369 if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
1370 // if otg funcs defined check if blanked before programming
1371 if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
1372 pipe_ctx->stream_res.abm->funcs->set_abm_level(
1373 pipe_ctx->stream_res.abm, stream->abm_level);
1374 } else
1375 pipe_ctx->stream_res.abm->funcs->set_abm_level(
1376 pipe_ctx->stream_res.abm, stream->abm_level);
1377 }
1364 } 1378 }
1365 } 1379 }
1366 1380
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index c15565092ca8..5a552cb3f8a7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -36,26 +36,22 @@
36#include "hw_sequencer.h" 36#include "hw_sequencer.h"
37 37
38#include "resource.h" 38#include "resource.h"
39#define DC_LOGGER \
40 logger
39 41
40#define SURFACE_TRACE(...) do {\ 42#define SURFACE_TRACE(...) do {\
41 if (dc->debug.surface_trace) \ 43 if (dc->debug.surface_trace) \
42 dm_logger_write(logger, \ 44 DC_LOG_IF_TRACE(__VA_ARGS__); \
43 LOG_IF_TRACE, \
44 ##__VA_ARGS__); \
45} while (0) 45} while (0)
46 46
47#define TIMING_TRACE(...) do {\ 47#define TIMING_TRACE(...) do {\
48 if (dc->debug.timing_trace) \ 48 if (dc->debug.timing_trace) \
49 dm_logger_write(logger, \ 49 DC_LOG_SYNC(__VA_ARGS__); \
50 LOG_SYNC, \
51 ##__VA_ARGS__); \
52} while (0) 50} while (0)
53 51
54#define CLOCK_TRACE(...) do {\ 52#define CLOCK_TRACE(...) do {\
55 if (dc->debug.clock_trace) \ 53 if (dc->debug.clock_trace) \
56 dm_logger_write(logger, \ 54 DC_LOG_BANDWIDTH_CALCS(__VA_ARGS__); \
57 LOG_BANDWIDTH_CALCS, \
58 ##__VA_ARGS__); \
59} while (0) 55} while (0)
60 56
61void pre_surface_trace( 57void pre_surface_trace(
@@ -362,25 +358,19 @@ void context_clock_trace(
362 struct dal_logger *logger = core_dc->ctx->logger; 358 struct dal_logger *logger = core_dc->ctx->logger;
363 359
364 CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n" 360 CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
365 "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n" 361 "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
366 "dram_ccm_us:%d min_active_dram_ccm_us:%d\n",
367 context->bw.dcn.calc_clk.dispclk_khz, 362 context->bw.dcn.calc_clk.dispclk_khz,
368 context->bw.dcn.calc_clk.max_dppclk_khz, 363 context->bw.dcn.calc_clk.dppclk_khz,
369 context->bw.dcn.calc_clk.dcfclk_khz, 364 context->bw.dcn.calc_clk.dcfclk_khz,
370 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz, 365 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
371 context->bw.dcn.calc_clk.fclk_khz, 366 context->bw.dcn.calc_clk.fclk_khz,
372 context->bw.dcn.calc_clk.socclk_khz, 367 context->bw.dcn.calc_clk.socclk_khz);
373 context->bw.dcn.calc_clk.dram_ccm_us,
374 context->bw.dcn.calc_clk.min_active_dram_ccm_us);
375 CLOCK_TRACE("Calculated: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n" 368 CLOCK_TRACE("Calculated: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
376 "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n" 369 "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
377 "dram_ccm_us:%d min_active_dram_ccm_us:%d\n",
378 context->bw.dcn.calc_clk.dispclk_khz, 370 context->bw.dcn.calc_clk.dispclk_khz,
379 context->bw.dcn.calc_clk.max_dppclk_khz, 371 context->bw.dcn.calc_clk.dppclk_khz,
380 context->bw.dcn.calc_clk.dcfclk_khz, 372 context->bw.dcn.calc_clk.dcfclk_khz,
381 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz, 373 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
382 context->bw.dcn.calc_clk.fclk_khz, 374 context->bw.dcn.calc_clk.fclk_khz);
383 context->bw.dcn.calc_clk.dram_ccm_us,
384 context->bw.dcn.calc_clk.min_active_dram_ccm_us);
385#endif 375#endif
386} 376}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index f8c09273e0f1..eeb04471b2f5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1960,6 +1960,13 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
1960 (abm->funcs->set_backlight_level == NULL)) 1960 (abm->funcs->set_backlight_level == NULL))
1961 return false; 1961 return false;
1962 1962
1963 if (stream) {
1964 if (stream->bl_pwm_level == 0)
1965 frame_ramp = 0;
1966
1967 ((struct dc_stream_state *)stream)->bl_pwm_level = level;
1968 }
1969
1963 use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); 1970 use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
1964 1971
1965 DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", level, level); 1972 DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", level, level);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index b9fc6d842931..ba3487e97361 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1124,6 +1124,7 @@ bool dc_add_plane_to_context(
1124 ASSERT(tail_pipe); 1124 ASSERT(tail_pipe);
1125 1125
1126 free_pipe->stream_res.tg = tail_pipe->stream_res.tg; 1126 free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
1127 free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
1127 free_pipe->stream_res.opp = tail_pipe->stream_res.opp; 1128 free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
1128 free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc; 1129 free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc;
1129 free_pipe->stream_res.audio = tail_pipe->stream_res.audio; 1130 free_pipe->stream_res.audio = tail_pipe->stream_res.audio;
@@ -1736,6 +1737,10 @@ enum dc_status resource_map_pool_resources(
1736 pipe_ctx->stream_res.audio, true); 1737 pipe_ctx->stream_res.audio, true);
1737 } 1738 }
1738 1739
1740 /* Add ABM to the resource if on EDP */
1741 if (pipe_ctx->stream && dc_is_embedded_signal(pipe_ctx->stream->signal))
1742 pipe_ctx->stream_res.abm = pool->abm;
1743
1739 for (i = 0; i < context->stream_count; i++) 1744 for (i = 0; i < context->stream_count; i++)
1740 if (context->streams[i] == stream) { 1745 if (context->streams[i] == stream) {
1741 context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst; 1746 context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index cd5819789d76..ce0747ed0f00 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -198,8 +198,7 @@ bool dc_stream_set_cursor_attributes(
198 for (i = 0; i < MAX_PIPES; i++) { 198 for (i = 0; i < MAX_PIPES; i++) {
199 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; 199 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
200 200
201 if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm && 201 if (pipe_ctx->stream != stream)
202 !pipe_ctx->plane_res.dpp) || !pipe_ctx->plane_res.ipp)
203 continue; 202 continue;
204 if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state) 203 if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
205 continue; 204 continue;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 2cd97342bf0f..fa4b3c8b3bb7 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
38#include "inc/compressor.h" 38#include "inc/compressor.h"
39#include "dml/display_mode_lib.h" 39#include "dml/display_mode_lib.h"
40 40
41#define DC_VER "3.1.37" 41#define DC_VER "3.1.38"
42 42
43#define MAX_SURFACES 3 43#define MAX_SURFACES 3
44#define MAX_STREAMS 6 44#define MAX_STREAMS 6
@@ -186,13 +186,12 @@ enum wm_report_mode {
186 186
187struct dc_clocks { 187struct dc_clocks {
188 int dispclk_khz; 188 int dispclk_khz;
189 int max_dppclk_khz; 189 int max_supported_dppclk_khz;
190 int dppclk_khz;
190 int dcfclk_khz; 191 int dcfclk_khz;
191 int socclk_khz; 192 int socclk_khz;
192 int dcfclk_deep_sleep_khz; 193 int dcfclk_deep_sleep_khz;
193 int fclk_khz; 194 int fclk_khz;
194 int dram_ccm_us;
195 int min_active_dram_ccm_us;
196}; 195};
197 196
198struct dc_debug { 197struct dc_debug {
@@ -447,6 +446,7 @@ union surface_update_flags {
447 446
448struct dc_plane_state { 447struct dc_plane_state {
449 struct dc_plane_address address; 448 struct dc_plane_address address;
449 struct dc_plane_flip_time time;
450 struct scaling_taps scaling_quality; 450 struct scaling_taps scaling_quality;
451 struct rect src_rect; 451 struct rect src_rect;
452 struct rect dst_rect; 452 struct rect dst_rect;
@@ -557,6 +557,7 @@ struct dc_transfer_func *dc_create_transfer_func(void);
557 */ 557 */
558struct dc_flip_addrs { 558struct dc_flip_addrs {
559 struct dc_plane_address address; 559 struct dc_plane_address address;
560 unsigned int flip_timestamp_in_us;
560 bool flip_immediate; 561 bool flip_immediate;
561 /* TODO: add flip duration for FreeSync */ 562 /* TODO: add flip duration for FreeSync */
562}; 563};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index e91ac6811990..b83a7dc2f5a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -692,8 +692,18 @@ struct crtc_trigger_info {
692 enum trigger_delay delay; 692 enum trigger_delay delay;
693}; 693};
694 694
695struct dc_crtc_timing { 695enum vrr_state {
696 VRR_STATE_OFF = 0,
697 VRR_STATE_VARIABLE,
698 VRR_STATE_FIXED,
699};
696 700
701struct dc_crtc_timing_adjust {
702 uint32_t v_total_min;
703 uint32_t v_total_max;
704};
705
706struct dc_crtc_timing {
697 uint32_t h_total; 707 uint32_t h_total;
698 uint32_t h_border_left; 708 uint32_t h_border_left;
699 uint32_t h_addressable; 709 uint32_t h_addressable;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index f44cd4d87b79..d017df56b2ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -48,6 +48,8 @@ struct dc_stream_status {
48struct dc_stream_state { 48struct dc_stream_state {
49 struct dc_sink *sink; 49 struct dc_sink *sink;
50 struct dc_crtc_timing timing; 50 struct dc_crtc_timing timing;
51 struct dc_crtc_timing_adjust timing_adjust;
52 struct vrr_params vrr_params;
51 53
52 struct rect src; /* composition area */ 54 struct rect src; /* composition area */
53 struct rect dst; /* stream addressable area */ 55 struct rect dst; /* stream addressable area */
@@ -74,6 +76,10 @@ struct dc_stream_state {
74 unsigned char psr_version; 76 unsigned char psr_version;
75 /* TODO: CEA VIC */ 77 /* TODO: CEA VIC */
76 78
79 /* DMCU info */
80 unsigned int abm_level;
81 unsigned int bl_pwm_level;
82
77 /* from core_stream struct */ 83 /* from core_stream struct */
78 struct dc_context *ctx; 84 struct dc_context *ctx;
79 85
@@ -106,6 +112,7 @@ struct dc_stream_update {
106 struct dc_transfer_func *out_transfer_func; 112 struct dc_transfer_func *out_transfer_func;
107 struct dc_hdr_static_metadata *hdr_static_metadata; 113 struct dc_hdr_static_metadata *hdr_static_metadata;
108 enum color_transfer_func color_output_tf; 114 enum color_transfer_func color_output_tf;
115 unsigned int *abm_level;
109}; 116};
110 117
111bool dc_is_stream_unchanged( 118bool dc_is_stream_unchanged(
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 8811b6f86bff..9441305d3ab5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -521,6 +521,24 @@ struct audio_info {
521 struct audio_mode modes[DC_MAX_AUDIO_DESC_COUNT]; 521 struct audio_mode modes[DC_MAX_AUDIO_DESC_COUNT];
522}; 522};
523 523
524struct vrr_params {
525 enum vrr_state state;
526 uint32_t window_min;
527 uint32_t window_max;
528 uint32_t inserted_frame_duration_in_us;
529 uint32_t frames_to_insert;
530 uint32_t frame_counter;
531};
532
533#define DC_PLANE_UPDATE_TIMES_MAX 10
534
535struct dc_plane_flip_time {
536 unsigned int time_elapsed_in_us[DC_PLANE_UPDATE_TIMES_MAX];
537 unsigned int index;
538 unsigned int prev_update_time_in_us;
539};
540
541// Will combine with vrr_params at some point.
524struct freesync_context { 542struct freesync_context {
525 bool supported; 543 bool supported;
526 bool enabled; 544 bool enabled;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
index 1d4546f23135..c24c0e5ea44e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
@@ -46,6 +46,23 @@
46 SR(SMU_INTERRUPT_CONTROL), \ 46 SR(SMU_INTERRUPT_CONTROL), \
47 SR(DC_DMCU_SCRATCH) 47 SR(DC_DMCU_SCRATCH)
48 48
49#define DMCU_DCE80_REG_LIST() \
50 SR(DMCU_CTRL), \
51 SR(DMCU_STATUS), \
52 SR(DMCU_RAM_ACCESS_CTRL), \
53 SR(DMCU_IRAM_WR_CTRL), \
54 SR(DMCU_IRAM_WR_DATA), \
55 SR(MASTER_COMM_DATA_REG1), \
56 SR(MASTER_COMM_DATA_REG2), \
57 SR(MASTER_COMM_DATA_REG3), \
58 SR(MASTER_COMM_CMD_REG), \
59 SR(MASTER_COMM_CNTL_REG), \
60 SR(DMCU_IRAM_RD_CTRL), \
61 SR(DMCU_IRAM_RD_DATA), \
62 SR(DMCU_INTERRUPT_TO_UC_EN_MASK), \
63 SR(SMU_INTERRUPT_CONTROL), \
64 SR(DC_DMCU_SCRATCH)
65
49#define DMCU_DCE110_COMMON_REG_LIST() \ 66#define DMCU_DCE110_COMMON_REG_LIST() \
50 DMCU_COMMON_REG_LIST_DCE_BASE(), \ 67 DMCU_COMMON_REG_LIST_DCE_BASE(), \
51 SR(DCI_MEM_PWR_STATUS) 68 SR(DCI_MEM_PWR_STATUS)
@@ -83,6 +100,24 @@
83 STATIC_SCREEN4_INT_TO_UC_EN, mask_sh), \ 100 STATIC_SCREEN4_INT_TO_UC_EN, mask_sh), \
84 DMCU_SF(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, mask_sh) 101 DMCU_SF(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, mask_sh)
85 102
103#define DMCU_MASK_SH_LIST_DCE80(mask_sh) \
104 DMCU_SF(DMCU_CTRL, \
105 DMCU_ENABLE, mask_sh), \
106 DMCU_SF(DMCU_STATUS, \
107 UC_IN_STOP_MODE, mask_sh), \
108 DMCU_SF(DMCU_STATUS, \
109 UC_IN_RESET, mask_sh), \
110 DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
111 IRAM_HOST_ACCESS_EN, mask_sh), \
112 DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
113 IRAM_WR_ADDR_AUTO_INC, mask_sh), \
114 DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
115 IRAM_RD_ADDR_AUTO_INC, mask_sh), \
116 DMCU_SF(MASTER_COMM_CMD_REG, \
117 MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
118 DMCU_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
119 DMCU_SF(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, mask_sh)
120
86#define DMCU_MASK_SH_LIST_DCE110(mask_sh) \ 121#define DMCU_MASK_SH_LIST_DCE110(mask_sh) \
87 DMCU_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \ 122 DMCU_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
88 DMCU_SF(DCI_MEM_PWR_STATUS, \ 123 DMCU_SF(DCI_MEM_PWR_STATUS, \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
index 4b8e7ce2de8c..487724345d9d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
@@ -56,7 +56,7 @@ void dce_pipe_control_lock(struct dc *dc,
56 if (lock && pipe->stream_res.tg->funcs->is_blanked(pipe->stream_res.tg)) 56 if (lock && pipe->stream_res.tg->funcs->is_blanked(pipe->stream_res.tg))
57 return; 57 return;
58 58
59 val = REG_GET_4(BLND_V_UPDATE_LOCK[pipe->pipe_idx], 59 val = REG_GET_4(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst],
60 BLND_DCP_GRPH_V_UPDATE_LOCK, &dcp_grph, 60 BLND_DCP_GRPH_V_UPDATE_LOCK, &dcp_grph,
61 BLND_SCL_V_UPDATE_LOCK, &scl, 61 BLND_SCL_V_UPDATE_LOCK, &scl,
62 BLND_BLND_V_UPDATE_LOCK, &blnd, 62 BLND_BLND_V_UPDATE_LOCK, &blnd,
@@ -67,19 +67,19 @@ void dce_pipe_control_lock(struct dc *dc,
67 blnd = lock_val; 67 blnd = lock_val;
68 update_lock_mode = lock_val; 68 update_lock_mode = lock_val;
69 69
70 REG_SET_2(BLND_V_UPDATE_LOCK[pipe->pipe_idx], val, 70 REG_SET_2(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst], val,
71 BLND_DCP_GRPH_V_UPDATE_LOCK, dcp_grph, 71 BLND_DCP_GRPH_V_UPDATE_LOCK, dcp_grph,
72 BLND_SCL_V_UPDATE_LOCK, scl); 72 BLND_SCL_V_UPDATE_LOCK, scl);
73 73
74 if (hws->masks->BLND_BLND_V_UPDATE_LOCK != 0) 74 if (hws->masks->BLND_BLND_V_UPDATE_LOCK != 0)
75 REG_SET_2(BLND_V_UPDATE_LOCK[pipe->pipe_idx], val, 75 REG_SET_2(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst], val,
76 BLND_BLND_V_UPDATE_LOCK, blnd, 76 BLND_BLND_V_UPDATE_LOCK, blnd,
77 BLND_V_UPDATE_LOCK_MODE, update_lock_mode); 77 BLND_V_UPDATE_LOCK_MODE, update_lock_mode);
78 78
79 if (hws->wa.blnd_crtc_trigger) { 79 if (hws->wa.blnd_crtc_trigger) {
80 if (!lock) { 80 if (!lock) {
81 uint32_t value = REG_READ(CRTC_H_BLANK_START_END[pipe->pipe_idx]); 81 uint32_t value = REG_READ(CRTC_H_BLANK_START_END[pipe->stream_res.tg->inst]);
82 REG_WRITE(CRTC_H_BLANK_START_END[pipe->pipe_idx], value); 82 REG_WRITE(CRTC_H_BLANK_START_END[pipe->stream_res.tg->inst], value);
83 } 83 }
84 } 84 }
85} 85}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index 3336428b1fed..057b8afd74bc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -190,6 +190,7 @@
190 SR(D2VGA_CONTROL), \ 190 SR(D2VGA_CONTROL), \
191 SR(D3VGA_CONTROL), \ 191 SR(D3VGA_CONTROL), \
192 SR(D4VGA_CONTROL), \ 192 SR(D4VGA_CONTROL), \
193 SR(VGA_TEST_CONTROL), \
193 SR(DC_IP_REQUEST_CNTL), \ 194 SR(DC_IP_REQUEST_CNTL), \
194 BL_REG_LIST() 195 BL_REG_LIST()
195 196
@@ -261,6 +262,7 @@ struct dce_hwseq_registers {
261 uint32_t D2VGA_CONTROL; 262 uint32_t D2VGA_CONTROL;
262 uint32_t D3VGA_CONTROL; 263 uint32_t D3VGA_CONTROL;
263 uint32_t D4VGA_CONTROL; 264 uint32_t D4VGA_CONTROL;
265 uint32_t VGA_TEST_CONTROL;
264 /* MMHUB registers. read only. temporary hack */ 266 /* MMHUB registers. read only. temporary hack */
265 uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32; 267 uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32;
266 uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; 268 uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
@@ -327,6 +329,8 @@ struct dce_hwseq_registers {
327 HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\ 329 HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\
328 SF(DCFEV_CLOCK_CONTROL, DCFEV_CLOCK_ENABLE, mask_sh),\ 330 SF(DCFEV_CLOCK_CONTROL, DCFEV_CLOCK_ENABLE, mask_sh),\
329 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\ 331 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
332 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh),\
333 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh),\
330 HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh),\ 334 HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh),\
331 HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_) 335 HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
332 336
@@ -403,7 +407,15 @@ struct dce_hwseq_registers {
403 HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \ 407 HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \
404 HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \ 408 HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \
405 HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ 409 HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
410 HWS_SF(, D1VGA_CONTROL, D1VGA_MODE_ENABLE, mask_sh),\
411 HWS_SF(, D2VGA_CONTROL, D2VGA_MODE_ENABLE, mask_sh),\
412 HWS_SF(, D3VGA_CONTROL, D3VGA_MODE_ENABLE, mask_sh),\
413 HWS_SF(, D4VGA_CONTROL, D4VGA_MODE_ENABLE, mask_sh),\
414 HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\
415 HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\
406 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \ 416 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
417 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh), \
418 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh), \
407 HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh) 419 HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
408 420
409#define HWSEQ_REG_FIELD_LIST(type) \ 421#define HWSEQ_REG_FIELD_LIST(type) \
@@ -436,7 +448,9 @@ struct dce_hwseq_registers {
436 type ENABLE_L1_TLB;\ 448 type ENABLE_L1_TLB;\
437 type SYSTEM_ACCESS_MODE;\ 449 type SYSTEM_ACCESS_MODE;\
438 type LVTMA_BLON;\ 450 type LVTMA_BLON;\
439 type LVTMA_PWRSEQ_TARGET_STATE_R; 451 type LVTMA_PWRSEQ_TARGET_STATE_R;\
452 type LVTMA_DIGON;\
453 type LVTMA_DIGON_OVRD;
440 454
441#define HWSEQ_DCN_REG_FIELD_LIST(type) \ 455#define HWSEQ_DCN_REG_FIELD_LIST(type) \
442 type HUBP_VTG_SEL; \ 456 type HUBP_VTG_SEL; \
@@ -483,7 +497,13 @@ struct dce_hwseq_registers {
483 type DCFCLK_GATE_DIS; \ 497 type DCFCLK_GATE_DIS; \
484 type DCHUBBUB_GLOBAL_TIMER_REFDIV; \ 498 type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
485 type DENTIST_DPPCLK_WDIVIDER; \ 499 type DENTIST_DPPCLK_WDIVIDER; \
486 type DENTIST_DISPCLK_WDIVIDER; 500 type DENTIST_DISPCLK_WDIVIDER; \
501 type VGA_TEST_ENABLE; \
502 type VGA_TEST_RENDER_START; \
503 type D1VGA_MODE_ENABLE; \
504 type D2VGA_MODE_ENABLE; \
505 type D3VGA_MODE_ENABLE; \
506 type D4VGA_MODE_ENABLE;
487 507
488struct dce_hwseq_shift { 508struct dce_hwseq_shift {
489 HWSEQ_REG_FIELD_LIST(uint8_t) 509 HWSEQ_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 81776e4797ed..8167cad7bcf7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -767,8 +767,7 @@ void dce110_link_encoder_construct(
767 bp_cap_info.DP_HBR3_EN; 767 bp_cap_info.DP_HBR3_EN;
768 enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; 768 enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
769 } else { 769 } else {
770 dm_logger_write(enc110->base.ctx->logger, LOG_WARNING, 770 DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
771 "%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
772 __func__, 771 __func__,
773 result); 772 result);
774 } 773 }
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 3bdbed80f7f8..3092f76bdb75 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -51,6 +51,9 @@
51#include "dce/dce_10_0_d.h" 51#include "dce/dce_10_0_d.h"
52#include "dce/dce_10_0_sh_mask.h" 52#include "dce/dce_10_0_sh_mask.h"
53 53
54#include "dce/dce_dmcu.h"
55#include "dce/dce_abm.h"
56
54#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT 57#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
55#include "gmc/gmc_8_2_d.h" 58#include "gmc/gmc_8_2_d.h"
56#include "gmc/gmc_8_2_sh_mask.h" 59#include "gmc/gmc_8_2_sh_mask.h"
@@ -320,7 +323,29 @@ static const struct dce110_clk_src_mask cs_mask = {
320 CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) 323 CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
321}; 324};
322 325
326static const struct dce_dmcu_registers dmcu_regs = {
327 DMCU_DCE110_COMMON_REG_LIST()
328};
329
330static const struct dce_dmcu_shift dmcu_shift = {
331 DMCU_MASK_SH_LIST_DCE110(__SHIFT)
332};
333
334static const struct dce_dmcu_mask dmcu_mask = {
335 DMCU_MASK_SH_LIST_DCE110(_MASK)
336};
337
338static const struct dce_abm_registers abm_regs = {
339 ABM_DCE110_COMMON_REG_LIST()
340};
341
342static const struct dce_abm_shift abm_shift = {
343 ABM_MASK_SH_LIST_DCE110(__SHIFT)
344};
323 345
346static const struct dce_abm_mask abm_mask = {
347 ABM_MASK_SH_LIST_DCE110(_MASK)
348};
324 349
325#define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03 350#define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03
326 351
@@ -622,6 +647,12 @@ static void destruct(struct dce110_resource_pool *pool)
622 if (pool->base.display_clock != NULL) 647 if (pool->base.display_clock != NULL)
623 dce_disp_clk_destroy(&pool->base.display_clock); 648 dce_disp_clk_destroy(&pool->base.display_clock);
624 649
650 if (pool->base.abm != NULL)
651 dce_abm_destroy(&pool->base.abm);
652
653 if (pool->base.dmcu != NULL)
654 dce_dmcu_destroy(&pool->base.dmcu);
655
625 if (pool->base.irqs != NULL) 656 if (pool->base.irqs != NULL)
626 dal_irq_service_destroy(&pool->base.irqs); 657 dal_irq_service_destroy(&pool->base.irqs);
627} 658}
@@ -829,6 +860,25 @@ static bool construct(
829 goto res_create_fail; 860 goto res_create_fail;
830 } 861 }
831 862
863 pool->base.dmcu = dce_dmcu_create(ctx,
864 &dmcu_regs,
865 &dmcu_shift,
866 &dmcu_mask);
867 if (pool->base.dmcu == NULL) {
868 dm_error("DC: failed to create dmcu!\n");
869 BREAK_TO_DEBUGGER();
870 goto res_create_fail;
871 }
872
873 pool->base.abm = dce_abm_create(ctx,
874 &abm_regs,
875 &abm_shift,
876 &abm_mask);
877 if (pool->base.abm == NULL) {
878 dm_error("DC: failed to create abm!\n");
879 BREAK_TO_DEBUGGER();
880 goto res_create_fail;
881 }
832 882
833 /* get static clock information for PPLIB or firmware, save 883 /* get static clock information for PPLIB or firmware, save
834 * max_clock_state 884 * max_clock_state
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index c2041a63cccd..30dd62f0f5fa 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -737,10 +737,14 @@ static bool is_panel_backlight_on(struct dce_hwseq *hws)
737 737
738static bool is_panel_powered_on(struct dce_hwseq *hws) 738static bool is_panel_powered_on(struct dce_hwseq *hws)
739{ 739{
740 uint32_t value; 740 uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
741
742
743 REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state);
744
745 REG_GET_2(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd);
741 746
742 REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &value); 747 return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1);
743 return value == 1;
744} 748}
745 749
746static enum bp_result link_transmitter_control( 750static enum bp_result link_transmitter_control(
@@ -1002,8 +1006,10 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
1002 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 1006 if (dc_is_dp_signal(pipe_ctx->stream->signal))
1003 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params); 1007 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
1004 1008
1005 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) 1009 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1006 link->dc->hwss.edp_backlight_control(link, true); 1010 link->dc->hwss.edp_backlight_control(link, true);
1011 stream->bl_pwm_level = 0;
1012 }
1007} 1013}
1008void dce110_blank_stream(struct pipe_ctx *pipe_ctx) 1014void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
1009{ 1015{
@@ -1128,7 +1134,7 @@ static void build_audio_output(
1128static void get_surface_visual_confirm_color(const struct pipe_ctx *pipe_ctx, 1134static void get_surface_visual_confirm_color(const struct pipe_ctx *pipe_ctx,
1129 struct tg_color *color) 1135 struct tg_color *color)
1130{ 1136{
1131 uint32_t color_value = MAX_TG_COLOR_VALUE * (4 - pipe_ctx->pipe_idx) / 4; 1137 uint32_t color_value = MAX_TG_COLOR_VALUE * (4 - pipe_ctx->stream_res.tg->inst) / 4;
1132 1138
1133 switch (pipe_ctx->plane_res.scl_data.format) { 1139 switch (pipe_ctx->plane_res.scl_data.format) {
1134 case PIXEL_FORMAT_ARGB8888: 1140 case PIXEL_FORMAT_ARGB8888:
@@ -2106,9 +2112,6 @@ enum dc_status dce110_apply_ctx_to_hw(
2106 return status; 2112 return status;
2107 } 2113 }
2108 2114
2109 /* pplib is notified if disp_num changed */
2110 dc->hwss.set_bandwidth(dc, context, true);
2111
2112 /* to save power */ 2115 /* to save power */
2113 apply_min_clocks(dc, context, &clocks_state, false); 2116 apply_min_clocks(dc, context, &clocks_state, false);
2114 2117
@@ -2936,15 +2939,18 @@ void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
2936{ 2939{
2937 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes; 2940 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
2938 2941
2939 if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes) 2942 if (pipe_ctx->plane_res.ipp &&
2943 pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes)
2940 pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes( 2944 pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes(
2941 pipe_ctx->plane_res.ipp, attributes); 2945 pipe_ctx->plane_res.ipp, attributes);
2942 2946
2943 if (pipe_ctx->plane_res.mi->funcs->set_cursor_attributes) 2947 if (pipe_ctx->plane_res.mi &&
2948 pipe_ctx->plane_res.mi->funcs->set_cursor_attributes)
2944 pipe_ctx->plane_res.mi->funcs->set_cursor_attributes( 2949 pipe_ctx->plane_res.mi->funcs->set_cursor_attributes(
2945 pipe_ctx->plane_res.mi, attributes); 2950 pipe_ctx->plane_res.mi, attributes);
2946 2951
2947 if (pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes) 2952 if (pipe_ctx->plane_res.xfm &&
2953 pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes)
2948 pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes( 2954 pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes(
2949 pipe_ctx->plane_res.xfm, attributes); 2955 pipe_ctx->plane_res.xfm, attributes);
2950} 2956}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index a36c14d3d9a8..5d854a37a978 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -53,6 +53,8 @@
53 53
54#include "reg_helper.h" 54#include "reg_helper.h"
55 55
56#include "dce/dce_dmcu.h"
57#include "dce/dce_abm.h"
56/* TODO remove this include */ 58/* TODO remove this include */
57 59
58#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT 60#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
@@ -364,6 +366,29 @@ static const struct resource_caps res_cap_83 = {
364 .num_pll = 2, 366 .num_pll = 2,
365}; 367};
366 368
369static const struct dce_dmcu_registers dmcu_regs = {
370 DMCU_DCE80_REG_LIST()
371};
372
373static const struct dce_dmcu_shift dmcu_shift = {
374 DMCU_MASK_SH_LIST_DCE80(__SHIFT)
375};
376
377static const struct dce_dmcu_mask dmcu_mask = {
378 DMCU_MASK_SH_LIST_DCE80(_MASK)
379};
380static const struct dce_abm_registers abm_regs = {
381 ABM_DCE110_COMMON_REG_LIST()
382};
383
384static const struct dce_abm_shift abm_shift = {
385 ABM_MASK_SH_LIST_DCE110(__SHIFT)
386};
387
388static const struct dce_abm_mask abm_mask = {
389 ABM_MASK_SH_LIST_DCE110(_MASK)
390};
391
367#define CTX ctx 392#define CTX ctx
368#define REG(reg) mm ## reg 393#define REG(reg) mm ## reg
369 394
@@ -643,6 +668,12 @@ static void destruct(struct dce110_resource_pool *pool)
643 } 668 }
644 } 669 }
645 670
671 if (pool->base.abm != NULL)
672 dce_abm_destroy(&pool->base.abm);
673
674 if (pool->base.dmcu != NULL)
675 dce_dmcu_destroy(&pool->base.dmcu);
676
646 if (pool->base.dp_clock_source != NULL) 677 if (pool->base.dp_clock_source != NULL)
647 dce80_clock_source_destroy(&pool->base.dp_clock_source); 678 dce80_clock_source_destroy(&pool->base.dp_clock_source);
648 679
@@ -850,7 +881,25 @@ static bool dce80_construct(
850 goto res_create_fail; 881 goto res_create_fail;
851 } 882 }
852 883
884 pool->base.dmcu = dce_dmcu_create(ctx,
885 &dmcu_regs,
886 &dmcu_shift,
887 &dmcu_mask);
888 if (pool->base.dmcu == NULL) {
889 dm_error("DC: failed to create dmcu!\n");
890 BREAK_TO_DEBUGGER();
891 goto res_create_fail;
892 }
853 893
894 pool->base.abm = dce_abm_create(ctx,
895 &abm_regs,
896 &abm_shift,
897 &abm_mask);
898 if (pool->base.abm == NULL) {
899 dm_error("DC: failed to create abm!\n");
900 BREAK_TO_DEBUGGER();
901 goto res_create_fail;
902 }
854 if (dm_pp_get_static_clocks(ctx, &static_clk_info)) 903 if (dm_pp_get_static_clocks(ctx, &static_clk_info))
855 pool->base.display_clock->max_clks_state = 904 pool->base.display_clock->max_clks_state =
856 static_clk_info.max_clocks_state; 905 static_clk_info.max_clocks_state;
@@ -1016,6 +1065,25 @@ static bool dce81_construct(
1016 goto res_create_fail; 1065 goto res_create_fail;
1017 } 1066 }
1018 1067
1068 pool->base.dmcu = dce_dmcu_create(ctx,
1069 &dmcu_regs,
1070 &dmcu_shift,
1071 &dmcu_mask);
1072 if (pool->base.dmcu == NULL) {
1073 dm_error("DC: failed to create dmcu!\n");
1074 BREAK_TO_DEBUGGER();
1075 goto res_create_fail;
1076 }
1077
1078 pool->base.abm = dce_abm_create(ctx,
1079 &abm_regs,
1080 &abm_shift,
1081 &abm_mask);
1082 if (pool->base.abm == NULL) {
1083 dm_error("DC: failed to create abm!\n");
1084 BREAK_TO_DEBUGGER();
1085 goto res_create_fail;
1086 }
1019 1087
1020 if (dm_pp_get_static_clocks(ctx, &static_clk_info)) 1088 if (dm_pp_get_static_clocks(ctx, &static_clk_info))
1021 pool->base.display_clock->max_clks_state = 1089 pool->base.display_clock->max_clks_state =
@@ -1178,6 +1246,25 @@ static bool dce83_construct(
1178 goto res_create_fail; 1246 goto res_create_fail;
1179 } 1247 }
1180 1248
1249 pool->base.dmcu = dce_dmcu_create(ctx,
1250 &dmcu_regs,
1251 &dmcu_shift,
1252 &dmcu_mask);
1253 if (pool->base.dmcu == NULL) {
1254 dm_error("DC: failed to create dmcu!\n");
1255 BREAK_TO_DEBUGGER();
1256 goto res_create_fail;
1257 }
1258
1259 pool->base.abm = dce_abm_create(ctx,
1260 &abm_regs,
1261 &abm_shift,
1262 &abm_mask);
1263 if (pool->base.abm == NULL) {
1264 dm_error("DC: failed to create abm!\n");
1265 BREAK_TO_DEBUGGER();
1266 goto res_create_fail;
1267 }
1181 1268
1182 if (dm_pp_get_static_clocks(ctx, &static_clk_info)) 1269 if (dm_pp_get_static_clocks(ctx, &static_clk_info))
1183 pool->base.display_clock->max_clks_state = 1270 pool->base.display_clock->max_clks_state =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index f0b798930b51..e305c28c98de 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -464,6 +464,7 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
464 .set_cursor_attributes = dpp1_set_cursor_attributes, 464 .set_cursor_attributes = dpp1_set_cursor_attributes,
465 .set_cursor_position = dpp1_set_cursor_position, 465 .set_cursor_position = dpp1_set_cursor_position,
466 .dpp_dppclk_control = dpp1_dppclk_control, 466 .dpp_dppclk_control = dpp1_dppclk_control,
467 .dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier,
467}; 468};
468 469
469static struct dpp_caps dcn10_dpp_cap = { 470static struct dpp_caps dcn10_dpp_cap = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index 07003d9c6bba..17b062a8f88a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -113,7 +113,8 @@
113 SRI(CURSOR0_CONTROL, CNVC_CUR, id), \ 113 SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
114 SRI(CURSOR0_COLOR0, CNVC_CUR, id), \ 114 SRI(CURSOR0_COLOR0, CNVC_CUR, id), \
115 SRI(CURSOR0_COLOR1, CNVC_CUR, id), \ 115 SRI(CURSOR0_COLOR1, CNVC_CUR, id), \
116 SRI(DPP_CONTROL, DPP_TOP, id) 116 SRI(DPP_CONTROL, DPP_TOP, id), \
117 SRI(CM_HDR_MULT_COEF, CM, id)
117 118
118 119
119 120
@@ -308,7 +309,8 @@
308 TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \ 309 TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \
309 TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \ 310 TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \
310 TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \ 311 TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \
311 TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh) 312 TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \
313 TF_SF(CM0_CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, mask_sh)
312 314
313#define TF_REG_LIST_SH_MASK_DCN10(mask_sh)\ 315#define TF_REG_LIST_SH_MASK_DCN10(mask_sh)\
314 TF_REG_LIST_SH_MASK_DCN(mask_sh),\ 316 TF_REG_LIST_SH_MASK_DCN(mask_sh),\
@@ -1012,7 +1014,8 @@
1012 type CUR0_COLOR0; \ 1014 type CUR0_COLOR0; \
1013 type CUR0_COLOR1; \ 1015 type CUR0_COLOR1; \
1014 type DPPCLK_RATE_CONTROL; \ 1016 type DPPCLK_RATE_CONTROL; \
1015 type DPP_CLOCK_ENABLE; 1017 type DPP_CLOCK_ENABLE; \
1018 type CM_HDR_MULT_COEF;
1016 1019
1017struct dcn_dpp_shift { 1020struct dcn_dpp_shift {
1018 TF_REG_FIELD_LIST(uint8_t) 1021 TF_REG_FIELD_LIST(uint8_t)
@@ -1258,7 +1261,8 @@ struct dcn_dpp_mask {
1258 uint32_t CURSOR0_CONTROL; \ 1261 uint32_t CURSOR0_CONTROL; \
1259 uint32_t CURSOR0_COLOR0; \ 1262 uint32_t CURSOR0_COLOR0; \
1260 uint32_t CURSOR0_COLOR1; \ 1263 uint32_t CURSOR0_COLOR1; \
1261 uint32_t DPP_CONTROL; 1264 uint32_t DPP_CONTROL; \
1265 uint32_t CM_HDR_MULT_COEF;
1262 1266
1263struct dcn_dpp_registers { 1267struct dcn_dpp_registers {
1264 DPP_COMMON_REG_VARIABLE_LIST 1268 DPP_COMMON_REG_VARIABLE_LIST
@@ -1414,6 +1418,10 @@ void dpp1_dppclk_control(
1414 bool dppclk_div, 1418 bool dppclk_div,
1415 bool enable); 1419 bool enable);
1416 1420
1421void dpp1_set_hdr_multiplier(
1422 struct dpp *dpp_base,
1423 uint32_t multiplier);
1424
1417void dpp1_construct(struct dcn10_dpp *dpp1, 1425void dpp1_construct(struct dcn10_dpp *dpp1,
1418 struct dc_context *ctx, 1426 struct dc_context *ctx,
1419 uint32_t inst, 1427 uint32_t inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index bd3fcdfb79c5..fb32975e4b67 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -804,3 +804,12 @@ void dpp1_program_input_lut(
804 REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, rama_occupied ? 3 : 2); 804 REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, rama_occupied ? 3 : 2);
805 REG_GET(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, &ram_num); 805 REG_GET(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, &ram_num);
806} 806}
807
808void dpp1_set_hdr_multiplier(
809 struct dpp *dpp_base,
810 uint32_t multiplier)
811{
812 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
813
814 REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier);
815}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 1907ade1574a..8b0f6b8a5627 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -220,10 +220,34 @@ static void enable_power_gating_plane(
220static void disable_vga( 220static void disable_vga(
221 struct dce_hwseq *hws) 221 struct dce_hwseq *hws)
222{ 222{
223 unsigned int in_vga1_mode = 0;
224 unsigned int in_vga2_mode = 0;
225 unsigned int in_vga3_mode = 0;
226 unsigned int in_vga4_mode = 0;
227
228 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
229 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
230 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
231 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
232
233 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
234 in_vga3_mode == 0 && in_vga4_mode == 0)
235 return;
236
223 REG_WRITE(D1VGA_CONTROL, 0); 237 REG_WRITE(D1VGA_CONTROL, 0);
224 REG_WRITE(D2VGA_CONTROL, 0); 238 REG_WRITE(D2VGA_CONTROL, 0);
225 REG_WRITE(D3VGA_CONTROL, 0); 239 REG_WRITE(D3VGA_CONTROL, 0);
226 REG_WRITE(D4VGA_CONTROL, 0); 240 REG_WRITE(D4VGA_CONTROL, 0);
241
242 /* HW Engineer's Notes:
243 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
244 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
245 *
246 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
247 * VGA_TEST_ENABLE, to leave it in the same state as before.
248 */
249 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
250 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
227} 251}
228 252
229static void dpp_pg_control( 253static void dpp_pg_control(
@@ -1685,16 +1709,22 @@ static void update_dchubp_dpp(
1685 union plane_size size = plane_state->plane_size; 1709 union plane_size size = plane_state->plane_size;
1686 1710
1687 /* depends on DML calculation, DPP clock value may change dynamically */ 1711 /* depends on DML calculation, DPP clock value may change dynamically */
1712 /* If request max dpp clk is lower than current dispclk, no need to
1713 * divided by 2
1714 */
1688 if (plane_state->update_flags.bits.full_update) { 1715 if (plane_state->update_flags.bits.full_update) {
1716 bool should_divided_by_2 = context->bw.dcn.calc_clk.dppclk_khz <=
1717 context->bw.dcn.cur_clk.dispclk_khz / 2;
1718
1689 dpp->funcs->dpp_dppclk_control( 1719 dpp->funcs->dpp_dppclk_control(
1690 dpp, 1720 dpp,
1691 context->bw.dcn.calc_clk.max_dppclk_khz < 1721 should_divided_by_2,
1692 context->bw.dcn.calc_clk.dispclk_khz,
1693 true); 1722 true);
1694 1723
1695 dc->current_state->bw.dcn.cur_clk.max_dppclk_khz = 1724 dc->current_state->bw.dcn.cur_clk.dppclk_khz =
1696 context->bw.dcn.calc_clk.max_dppclk_khz; 1725 should_divided_by_2 ?
1697 context->bw.dcn.cur_clk.max_dppclk_khz = context->bw.dcn.calc_clk.max_dppclk_khz; 1726 context->bw.dcn.cur_clk.dispclk_khz / 2 :
1727 context->bw.dcn.cur_clk.dispclk_khz;
1698 } 1728 }
1699 1729
1700 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG 1730 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
@@ -1780,14 +1810,62 @@ static void update_dchubp_dpp(
1780 hubp->funcs->set_blank(hubp, false); 1810 hubp->funcs->set_blank(hubp, false);
1781} 1811}
1782 1812
1813static void dcn10_otg_blank(
1814 struct dc *dc,
1815 struct stream_resource stream_res,
1816 struct dc_stream_state *stream,
1817 bool blank)
1818{
1819 enum dc_color_space color_space;
1820 struct tg_color black_color = {0};
1821
1822 /* program otg blank color */
1823 color_space = stream->output_color_space;
1824 color_space_to_black_color(dc, color_space, &black_color);
1825
1826 if (stream_res.tg->funcs->set_blank_color)
1827 stream_res.tg->funcs->set_blank_color(
1828 stream_res.tg,
1829 &black_color);
1830
1831 if (!blank) {
1832 if (stream_res.tg->funcs->set_blank)
1833 stream_res.tg->funcs->set_blank(stream_res.tg, blank);
1834 if (stream_res.abm)
1835 stream_res.abm->funcs->set_abm_level(stream_res.abm, stream->abm_level);
1836 } else if (blank) {
1837 if (stream_res.abm)
1838 stream_res.abm->funcs->set_abm_immediate_disable(stream_res.abm);
1839 if (stream_res.tg->funcs->set_blank)
1840 stream_res.tg->funcs->set_blank(stream_res.tg, blank);
1841 }
1842}
1843
1844static void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
1845{
1846 struct fixed31_32 multiplier = dal_fixed31_32_from_fraction(
1847 pipe_ctx->plane_state->sdr_white_level, 80);
1848 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
1849 struct custom_float_format fmt;
1850
1851 fmt.exponenta_bits = 6;
1852 fmt.mantissa_bits = 12;
1853 fmt.sign = true;
1854
1855 if (pipe_ctx->plane_state->sdr_white_level > 80)
1856 convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
1857
1858 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
1859 pipe_ctx->plane_res.dpp, hw_mult);
1860}
1783 1861
1784static void program_all_pipe_in_tree( 1862static void program_all_pipe_in_tree(
1785 struct dc *dc, 1863 struct dc *dc,
1786 struct pipe_ctx *pipe_ctx, 1864 struct pipe_ctx *pipe_ctx,
1787 struct dc_state *context) 1865 struct dc_state *context)
1788{ 1866{
1789
1790 if (pipe_ctx->top_pipe == NULL) { 1867 if (pipe_ctx->top_pipe == NULL) {
1868 bool blank = !is_pipe_tree_visible(pipe_ctx);
1791 1869
1792 pipe_ctx->stream_res.tg->dlg_otg_param.vready_offset = pipe_ctx->pipe_dlg_param.vready_offset; 1870 pipe_ctx->stream_res.tg->dlg_otg_param.vready_offset = pipe_ctx->pipe_dlg_param.vready_offset;
1793 pipe_ctx->stream_res.tg->dlg_otg_param.vstartup_start = pipe_ctx->pipe_dlg_param.vstartup_start; 1871 pipe_ctx->stream_res.tg->dlg_otg_param.vstartup_start = pipe_ctx->pipe_dlg_param.vstartup_start;
@@ -1798,10 +1876,8 @@ static void program_all_pipe_in_tree(
1798 pipe_ctx->stream_res.tg->funcs->program_global_sync( 1876 pipe_ctx->stream_res.tg->funcs->program_global_sync(
1799 pipe_ctx->stream_res.tg); 1877 pipe_ctx->stream_res.tg);
1800 1878
1801 if (pipe_ctx->stream_res.tg->funcs->set_blank) 1879 dcn10_otg_blank(dc, pipe_ctx->stream_res,
1802 pipe_ctx->stream_res.tg->funcs->set_blank( 1880 pipe_ctx->stream, blank);
1803 pipe_ctx->stream_res.tg,
1804 !is_pipe_tree_visible(pipe_ctx));
1805 } 1881 }
1806 1882
1807 if (pipe_ctx->plane_state != NULL) { 1883 if (pipe_ctx->plane_state != NULL) {
@@ -1810,6 +1886,8 @@ static void program_all_pipe_in_tree(
1810 1886
1811 update_dchubp_dpp(dc, pipe_ctx, context); 1887 update_dchubp_dpp(dc, pipe_ctx, context);
1812 1888
1889 set_hdr_multiplier(pipe_ctx);
1890
1813 if (pipe_ctx->plane_state->update_flags.bits.full_update || 1891 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
1814 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || 1892 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
1815 pipe_ctx->plane_state->update_flags.bits.gamma_change) 1893 pipe_ctx->plane_state->update_flags.bits.gamma_change)
@@ -1836,16 +1914,10 @@ static void dcn10_pplib_apply_display_requirements(
1836{ 1914{
1837 struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; 1915 struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
1838 1916
1839 pp_display_cfg->all_displays_in_sync = false;/*todo*/
1840 pp_display_cfg->nb_pstate_switch_disable = false;
1841 pp_display_cfg->min_engine_clock_khz = context->bw.dcn.cur_clk.dcfclk_khz; 1917 pp_display_cfg->min_engine_clock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
1842 pp_display_cfg->min_memory_clock_khz = context->bw.dcn.cur_clk.fclk_khz; 1918 pp_display_cfg->min_memory_clock_khz = context->bw.dcn.cur_clk.fclk_khz;
1843 pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz; 1919 pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
1844 pp_display_cfg->min_dcfc_deep_sleep_clock_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz; 1920 pp_display_cfg->min_dcfc_deep_sleep_clock_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
1845 pp_display_cfg->avail_mclk_switch_time_us =
1846 context->bw.dcn.cur_clk.dram_ccm_us > 0 ? context->bw.dcn.cur_clk.dram_ccm_us : 0;
1847 pp_display_cfg->avail_mclk_switch_time_in_disp_active_us =
1848 context->bw.dcn.cur_clk.min_active_dram_ccm_us > 0 ? context->bw.dcn.cur_clk.min_active_dram_ccm_us : 0;
1849 pp_display_cfg->min_dcfclock_khz = context->bw.dcn.cur_clk.dcfclk_khz; 1921 pp_display_cfg->min_dcfclock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
1850 pp_display_cfg->disp_clk_khz = context->bw.dcn.cur_clk.dispclk_khz; 1922 pp_display_cfg->disp_clk_khz = context->bw.dcn.cur_clk.dispclk_khz;
1851 dce110_fill_display_configs(context, pp_display_cfg); 1923 dce110_fill_display_configs(context, pp_display_cfg);
@@ -1908,29 +1980,23 @@ static void dcn10_apply_ctx_for_surface(
1908{ 1980{
1909 int i; 1981 int i;
1910 struct timing_generator *tg; 1982 struct timing_generator *tg;
1911 struct output_pixel_processor *opp;
1912 bool removed_pipe[4] = { false }; 1983 bool removed_pipe[4] = { false };
1913 unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000; 1984 unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000;
1914 bool program_water_mark = false; 1985 bool program_water_mark = false;
1915 struct dc_context *ctx = dc->ctx; 1986 struct dc_context *ctx = dc->ctx;
1916
1917 struct pipe_ctx *top_pipe_to_program = 1987 struct pipe_ctx *top_pipe_to_program =
1918 find_top_pipe_for_stream(dc, context, stream); 1988 find_top_pipe_for_stream(dc, context, stream);
1919 1989
1920 if (!top_pipe_to_program) 1990 if (!top_pipe_to_program)
1921 return; 1991 return;
1922 1992
1923 opp = top_pipe_to_program->stream_res.opp;
1924
1925 tg = top_pipe_to_program->stream_res.tg; 1993 tg = top_pipe_to_program->stream_res.tg;
1926 1994
1927 dcn10_pipe_control_lock(dc, top_pipe_to_program, true); 1995 dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
1928 1996
1929 if (num_planes == 0) { 1997 if (num_planes == 0) {
1930
1931 /* OTG blank before remove all front end */ 1998 /* OTG blank before remove all front end */
1932 if (tg->funcs->set_blank) 1999 dcn10_otg_blank(dc, top_pipe_to_program->stream_res, top_pipe_to_program->stream, true);
1933 tg->funcs->set_blank(tg, true);
1934 } 2000 }
1935 2001
1936 /* Disconnect unused mpcc */ 2002 /* Disconnect unused mpcc */
@@ -2056,6 +2122,101 @@ static void dcn10_apply_ctx_for_surface(
2056*/ 2122*/
2057} 2123}
2058 2124
2125static inline bool should_set_clock(bool decrease_allowed, int calc_clk, int cur_clk)
2126{
2127 return ((decrease_allowed && calc_clk < cur_clk) || calc_clk > cur_clk);
2128}
2129
2130static int determine_dppclk_threshold(struct dc *dc, struct dc_state *context)
2131{
2132 bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
2133 context->bw.dcn.calc_clk.dppclk_khz;
2134 bool dispclk_increase = context->bw.dcn.calc_clk.dispclk_khz >
2135 context->bw.dcn.cur_clk.dispclk_khz;
2136 int disp_clk_threshold = context->bw.dcn.calc_clk.max_supported_dppclk_khz;
2137 bool cur_dpp_div = context->bw.dcn.cur_clk.dispclk_khz >
2138 context->bw.dcn.cur_clk.dppclk_khz;
2139
2140 /* increase clock, looking for div is 0 for current, request div is 1*/
2141 if (dispclk_increase) {
2142 /* already divided by 2, no need to reach target clk with 2 steps*/
2143 if (cur_dpp_div)
2144 return context->bw.dcn.calc_clk.dispclk_khz;
2145
2146 /* request disp clk is lower than maximum supported dpp clk,
2147 * no need to reach target clk with two steps.
2148 */
2149 if (context->bw.dcn.calc_clk.dispclk_khz <= disp_clk_threshold)
2150 return context->bw.dcn.calc_clk.dispclk_khz;
2151
2152 /* target dpp clk not request divided by 2, still within threshold */
2153 if (!request_dpp_div)
2154 return context->bw.dcn.calc_clk.dispclk_khz;
2155
2156 } else {
2157 /* decrease clock, looking for current dppclk divided by 2,
2158 * request dppclk not divided by 2.
2159 */
2160
2161 /* current dpp clk not divided by 2, no need to ramp*/
2162 if (!cur_dpp_div)
2163 return context->bw.dcn.calc_clk.dispclk_khz;
2164
2165 /* current disp clk is lower than current maximum dpp clk,
2166 * no need to ramp
2167 */
2168 if (context->bw.dcn.cur_clk.dispclk_khz <= disp_clk_threshold)
2169 return context->bw.dcn.calc_clk.dispclk_khz;
2170
2171 /* request dpp clk need to be divided by 2 */
2172 if (request_dpp_div)
2173 return context->bw.dcn.calc_clk.dispclk_khz;
2174 }
2175
2176 return disp_clk_threshold;
2177}
2178
2179static void ramp_up_dispclk_with_dpp(struct dc *dc, struct dc_state *context)
2180{
2181 int i;
2182 bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
2183 context->bw.dcn.calc_clk.dppclk_khz;
2184
2185 int dispclk_to_dpp_threshold = determine_dppclk_threshold(dc, context);
2186
2187 /* set disp clk to dpp clk threshold */
2188 dc->res_pool->display_clock->funcs->set_clock(
2189 dc->res_pool->display_clock,
2190 dispclk_to_dpp_threshold);
2191
2192 /* update request dpp clk division option */
2193 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2194 struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
2195
2196 if (!pipe_ctx->plane_state)
2197 continue;
2198
2199 pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
2200 pipe_ctx->plane_res.dpp,
2201 request_dpp_div,
2202 true);
2203 }
2204
2205 /* If target clk not same as dppclk threshold, set to target clock */
2206 if (dispclk_to_dpp_threshold != context->bw.dcn.calc_clk.dispclk_khz) {
2207 dc->res_pool->display_clock->funcs->set_clock(
2208 dc->res_pool->display_clock,
2209 context->bw.dcn.calc_clk.dispclk_khz);
2210 }
2211
2212 context->bw.dcn.cur_clk.dispclk_khz =
2213 context->bw.dcn.calc_clk.dispclk_khz;
2214 context->bw.dcn.cur_clk.dppclk_khz =
2215 context->bw.dcn.calc_clk.dppclk_khz;
2216 context->bw.dcn.cur_clk.max_supported_dppclk_khz =
2217 context->bw.dcn.calc_clk.max_supported_dppclk_khz;
2218}
2219
2059static void dcn10_set_bandwidth( 2220static void dcn10_set_bandwidth(
2060 struct dc *dc, 2221 struct dc *dc,
2061 struct dc_state *context, 2222 struct dc_state *context,
@@ -2073,32 +2234,32 @@ static void dcn10_set_bandwidth(
2073 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) 2234 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
2074 return; 2235 return;
2075 2236
2076 if (decrease_allowed || context->bw.dcn.calc_clk.dispclk_khz 2237 if (should_set_clock(
2077 > dc->current_state->bw.dcn.cur_clk.dispclk_khz) { 2238 decrease_allowed,
2078 dc->res_pool->display_clock->funcs->set_clock( 2239 context->bw.dcn.calc_clk.dcfclk_khz,
2079 dc->res_pool->display_clock, 2240 dc->current_state->bw.dcn.cur_clk.dcfclk_khz)) {
2080 context->bw.dcn.calc_clk.dispclk_khz);
2081 context->bw.dcn.cur_clk.dispclk_khz =
2082 context->bw.dcn.calc_clk.dispclk_khz;
2083 }
2084 if (decrease_allowed || context->bw.dcn.calc_clk.dcfclk_khz
2085 > dc->current_state->bw.dcn.cur_clk.dcfclk_khz) {
2086 context->bw.dcn.cur_clk.dcfclk_khz = 2241 context->bw.dcn.cur_clk.dcfclk_khz =
2087 context->bw.dcn.calc_clk.dcfclk_khz; 2242 context->bw.dcn.calc_clk.dcfclk_khz;
2088 smu_req.hard_min_dcefclk_khz = 2243 smu_req.hard_min_dcefclk_khz =
2089 context->bw.dcn.calc_clk.dcfclk_khz; 2244 context->bw.dcn.calc_clk.dcfclk_khz;
2090 } 2245 }
2091 if (decrease_allowed || context->bw.dcn.calc_clk.fclk_khz 2246
2092 > dc->current_state->bw.dcn.cur_clk.fclk_khz) { 2247 if (should_set_clock(
2248 decrease_allowed,
2249 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
2250 dc->current_state->bw.dcn.cur_clk.dcfclk_deep_sleep_khz)) {
2251 context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz =
2252 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
2253 }
2254
2255 if (should_set_clock(
2256 decrease_allowed,
2257 context->bw.dcn.calc_clk.fclk_khz,
2258 dc->current_state->bw.dcn.cur_clk.fclk_khz)) {
2093 context->bw.dcn.cur_clk.fclk_khz = 2259 context->bw.dcn.cur_clk.fclk_khz =
2094 context->bw.dcn.calc_clk.fclk_khz; 2260 context->bw.dcn.calc_clk.fclk_khz;
2095 smu_req.hard_min_fclk_khz = context->bw.dcn.calc_clk.fclk_khz; 2261 smu_req.hard_min_fclk_khz = context->bw.dcn.calc_clk.fclk_khz;
2096 } 2262 }
2097 if (decrease_allowed || context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz
2098 > dc->current_state->bw.dcn.cur_clk.dcfclk_deep_sleep_khz) {
2099 context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz =
2100 context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
2101 }
2102 2263
2103 smu_req.display_count = context->stream_count; 2264 smu_req.display_count = context->stream_count;
2104 2265
@@ -2107,17 +2268,17 @@ static void dcn10_set_bandwidth(
2107 2268
2108 *smu_req_cur = smu_req; 2269 *smu_req_cur = smu_req;
2109 2270
2110 /* Decrease in freq is increase in period so opposite comparison for dram_ccm */ 2271 /* make sure dcf clk is before dpp clk to
2111 if (decrease_allowed || context->bw.dcn.calc_clk.dram_ccm_us 2272 * make sure we have enough voltage to run dpp clk
2112 < dc->current_state->bw.dcn.cur_clk.dram_ccm_us) { 2273 */
2113 context->bw.dcn.cur_clk.dram_ccm_us = 2274 if (should_set_clock(
2114 context->bw.dcn.calc_clk.dram_ccm_us; 2275 decrease_allowed,
2115 } 2276 context->bw.dcn.calc_clk.dispclk_khz,
2116 if (decrease_allowed || context->bw.dcn.calc_clk.min_active_dram_ccm_us 2277 dc->current_state->bw.dcn.cur_clk.dispclk_khz)) {
2117 < dc->current_state->bw.dcn.cur_clk.min_active_dram_ccm_us) { 2278
2118 context->bw.dcn.cur_clk.min_active_dram_ccm_us = 2279 ramp_up_dispclk_with_dpp(dc, context);
2119 context->bw.dcn.calc_clk.min_active_dram_ccm_us;
2120 } 2280 }
2281
2121 dcn10_pplib_apply_display_requirements(dc, context); 2282 dcn10_pplib_apply_display_requirements(dc, context);
2122 2283
2123 if (dc->debug.sanity_checks) { 2284 if (dc->debug.sanity_checks) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index c4a564cb56b9..02bd664aed3e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -440,7 +440,11 @@ static const struct dc_debug debug_defaults_drv = {
440 .timing_trace = false, 440 .timing_trace = false,
441 .clock_trace = true, 441 .clock_trace = true,
442 442
443 .min_disp_clk_khz = 300000, 443 /* raven smu dones't allow 0 disp clk,
444 * smu min disp clk limit is 50Mhz
445 * keep min disp clk 100Mhz avoid smu hang
446 */
447 .min_disp_clk_khz = 100000,
444 448
445 .disable_pplib_clock_request = true, 449 .disable_pplib_clock_request = true,
446 .disable_pplib_wm_range = false, 450 .disable_pplib_wm_range = false,
@@ -963,6 +967,7 @@ static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
963 967
964 idle_pipe->stream = head_pipe->stream; 968 idle_pipe->stream = head_pipe->stream;
965 idle_pipe->stream_res.tg = head_pipe->stream_res.tg; 969 idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
970 idle_pipe->stream_res.abm = head_pipe->stream_res.abm;
966 idle_pipe->stream_res.opp = head_pipe->stream_res.opp; 971 idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
967 972
968 idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx]; 973 idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
index 189052e911fc..48400d642610 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
@@ -24,6 +24,7 @@
24 */ 24 */
25 25
26#include "display_rq_dlg_helpers.h" 26#include "display_rq_dlg_helpers.h"
27#include "dml_logger.h"
27 28
28void print__rq_params_st(struct display_mode_lib *mode_lib, display_rq_params_st rq_param) 29void print__rq_params_st(struct display_mode_lib *mode_lib, display_rq_params_st rq_param)
29{ 30{
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h
index b2847bc469fe..f78cbae9db88 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h
@@ -31,8 +31,6 @@
31#include "display_mode_structs.h" 31#include "display_mode_structs.h"
32#include "display_mode_enums.h" 32#include "display_mode_enums.h"
33 33
34#define dml_print(str, ...) {dm_logger_write(mode_lib->logger, LOG_DML, str, ##__VA_ARGS__); }
35#define DTRACE(str, ...) {dm_logger_write(mode_lib->logger, LOG_DML, str, ##__VA_ARGS__); }
36 34
37double dml_round(double a); 35double dml_round(double a);
38 36
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
index e68086b8a22f..f9cf08357989 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
@@ -28,6 +28,7 @@
28 28
29#include "dml_common_defs.h" 29#include "dml_common_defs.h"
30#include "../calcs/dcn_calc_math.h" 30#include "../calcs/dcn_calc_math.h"
31#include "dml_logger.h"
31 32
32static inline double dml_min(double a, double b) 33static inline double dml_min(double a, double b)
33{ 34{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h b/drivers/gpu/drm/amd/display/dc/dml/dml_logger.h
index c0c4bfdcdb14..465859b77248 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_logger.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2015 Advanced Micro Devices, Inc. 2 * Copyright 2018 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -23,11 +23,16 @@
23 * 23 *
24 */ 24 */
25 25
26#ifndef __AMDGPU_POWERPLAY_H__
27#define __AMDGPU_POWERPLAY_H__
28 26
29#include "amd_shared.h" 27#ifndef __DML_LOGGER_H_
28#define __DML_LOGGER_H_
29
30#define DC_LOGGER \
31 mode_lib->logger
32
33#define dml_print(str, ...) {DC_LOG_DML(str, ##__VA_ARGS__); }
34#define DTRACE(str, ...) {DC_LOG_DML(str, ##__VA_ARGS__); }
35
36#endif
30 37
31extern const struct amdgpu_ip_block_version amdgpu_pp_ip_block;
32 38
33#endif /* __AMDGPU_POWERPLAY_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index b8f05384a897..8c51ad70cace 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -194,6 +194,8 @@ struct stream_resource {
194 194
195 struct pixel_clk_params pix_clk_params; 195 struct pixel_clk_params pix_clk_params;
196 struct encoder_info_frame encoder_info_frame; 196 struct encoder_info_frame encoder_info_frame;
197
198 struct abm *abm;
197}; 199};
198 200
199struct plane_resource { 201struct plane_resource {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
index ae2399f16d1c..a9bfe9ff8ce6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
@@ -130,6 +130,9 @@ enum bw_defines {
130 130
131struct bw_calcs_dceip { 131struct bw_calcs_dceip {
132 enum bw_calcs_version version; 132 enum bw_calcs_version version;
133 uint32_t percent_of_ideal_port_bw_received_after_urgent_latency;
134 uint32_t max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation;
135 uint32_t max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation;
133 bool large_cursor; 136 bool large_cursor;
134 uint32_t cursor_max_outstanding_group_num; 137 uint32_t cursor_max_outstanding_group_num;
135 bool dmif_pipe_en_fbc_chunk_tracker; 138 bool dmif_pipe_en_fbc_chunk_tracker;
@@ -230,6 +233,7 @@ struct bw_calcs_vbios {
230 233
231struct bw_calcs_data { 234struct bw_calcs_data {
232 /* data for all displays */ 235 /* data for all displays */
236 bool display_synchronization_enabled;
233 uint32_t number_of_displays; 237 uint32_t number_of_displays;
234 enum bw_defines underlay_surface_type; 238 enum bw_defines underlay_surface_type;
235 enum bw_defines panning_and_bezel_adjustment; 239 enum bw_defines panning_and_bezel_adjustment;
@@ -241,6 +245,7 @@ struct bw_calcs_data {
241 bool d1_display_write_back_dwb_enable; 245 bool d1_display_write_back_dwb_enable;
242 enum bw_defines d1_underlay_mode; 246 enum bw_defines d1_underlay_mode;
243 247
248 bool increase_voltage_to_support_mclk_switch;
244 bool cpup_state_change_enable; 249 bool cpup_state_change_enable;
245 bool cpuc_state_change_enable; 250 bool cpuc_state_change_enable;
246 bool nbp_state_change_enable; 251 bool nbp_state_change_enable;
@@ -449,6 +454,7 @@ struct bw_calcs_data {
449 struct bw_fixed dram_speed_change_line_source_transfer_time[maximum_number_of_surfaces][3][8]; 454 struct bw_fixed dram_speed_change_line_source_transfer_time[maximum_number_of_surfaces][3][8];
450 struct bw_fixed min_dram_speed_change_margin[3][8]; 455 struct bw_fixed min_dram_speed_change_margin[3][8];
451 struct bw_fixed dispclk_required_for_dram_speed_change[3][8]; 456 struct bw_fixed dispclk_required_for_dram_speed_change[3][8];
457 struct bw_fixed dispclk_required_for_dram_speed_change_pipe[3][8];
452 struct bw_fixed blackout_duration_margin[3][8]; 458 struct bw_fixed blackout_duration_margin[3][8];
453 struct bw_fixed dispclk_required_for_blackout_duration[3][8]; 459 struct bw_fixed dispclk_required_for_blackout_duration[3][8];
454 struct bw_fixed dispclk_required_for_blackout_recovery[3][8]; 460 struct bw_fixed dispclk_required_for_blackout_recovery[3][8];
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index c5aae2daf442..99995608b620 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -132,6 +132,9 @@ struct dpp_funcs {
132 const struct dc_cursor_mi_param *param, 132 const struct dc_cursor_mi_param *param,
133 uint32_t width 133 uint32_t width
134 ); 134 );
135 void (*dpp_set_hdr_multiplier)(
136 struct dpp *dpp_base,
137 uint32_t multiplier);
135 138
136 void (*dpp_dppclk_control)( 139 void (*dpp_dppclk_control)(
137 struct dpp *dpp_base, 140 struct dpp *dpp_base,
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index b727f5eeb3a9..427796bdc14a 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -98,6 +98,7 @@ enum dc_log_type {
98 LOG_EVENT_UNDERFLOW, 98 LOG_EVENT_UNDERFLOW,
99 LOG_IF_TRACE, 99 LOG_IF_TRACE,
100 LOG_PERF_TRACE, 100 LOG_PERF_TRACE,
101 LOG_PROFILING,
101 102
102 LOG_SECTION_TOTAL_COUNT 103 LOG_SECTION_TOTAL_COUNT
103}; 104};
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 57d5c2575de1..e7e374f56864 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1267,7 +1267,8 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
1267 bool ret = false; 1267 bool ret = false;
1268 struct pwl_float_data_ex *rgb_regamma = NULL; 1268 struct pwl_float_data_ex *rgb_regamma = NULL;
1269 1269
1270 if (trans == TRANSFER_FUNCTION_UNITY) { 1270 if (trans == TRANSFER_FUNCTION_UNITY ||
1271 trans == TRANSFER_FUNCTION_LINEAR) {
1271 points->end_exponent = 0; 1272 points->end_exponent = 0;
1272 points->x_point_at_y1_red = 1; 1273 points->x_point_at_y1_red = 1;
1273 points->x_point_at_y1_green = 1; 1274 points->x_point_at_y1_green = 1;
@@ -1337,7 +1338,8 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
1337 bool ret = false; 1338 bool ret = false;
1338 struct pwl_float_data_ex *rgb_degamma = NULL; 1339 struct pwl_float_data_ex *rgb_degamma = NULL;
1339 1340
1340 if (trans == TRANSFER_FUNCTION_UNITY) { 1341 if (trans == TRANSFER_FUNCTION_UNITY ||
1342 trans == TRANSFER_FUNCTION_LINEAR) {
1341 1343
1342 for (i = 0; i <= MAX_HW_POINTS ; i++) { 1344 for (i = 0; i <= MAX_HW_POINTS ; i++) {
1343 points->red[i] = coordinates_x[i].x; 1345 points->red[i] = coordinates_x[i].x;
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index b4723af368a5..27d4003aa2c7 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -33,7 +33,7 @@
33/* Refresh rate ramp at a fixed rate of 65 Hz/second */ 33/* Refresh rate ramp at a fixed rate of 65 Hz/second */
34#define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65) 34#define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65)
35/* Number of elements in the render times cache array */ 35/* Number of elements in the render times cache array */
36#define RENDER_TIMES_MAX_COUNT 20 36#define RENDER_TIMES_MAX_COUNT 10
37/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */ 37/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
38#define BTR_EXIT_MARGIN 2000 38#define BTR_EXIT_MARGIN 2000
39/* Number of consecutive frames to check before entering/exiting fixed refresh*/ 39/* Number of consecutive frames to check before entering/exiting fixed refresh*/
@@ -46,13 +46,15 @@
46 46
47#define FREESYNC_NO_STATIC_FOR_INTERNAL_REGKEY "DalFreeSyncNoStaticForInternal" 47#define FREESYNC_NO_STATIC_FOR_INTERNAL_REGKEY "DalFreeSyncNoStaticForInternal"
48 48
49#define FREESYNC_DEFAULT_REGKEY "LCDFreeSyncDefault"
50
49struct gradual_static_ramp { 51struct gradual_static_ramp {
50 bool ramp_is_active; 52 bool ramp_is_active;
51 bool ramp_direction_is_up; 53 bool ramp_direction_is_up;
52 unsigned int ramp_current_frame_duration_in_ns; 54 unsigned int ramp_current_frame_duration_in_ns;
53}; 55};
54 56
55struct time_cache { 57struct freesync_time {
56 /* video (48Hz feature) related */ 58 /* video (48Hz feature) related */
57 unsigned int update_duration_in_ns; 59 unsigned int update_duration_in_ns;
58 60
@@ -64,6 +66,9 @@ struct time_cache {
64 66
65 unsigned int render_times_index; 67 unsigned int render_times_index;
66 unsigned int render_times[RENDER_TIMES_MAX_COUNT]; 68 unsigned int render_times[RENDER_TIMES_MAX_COUNT];
69
70 unsigned int min_window;
71 unsigned int max_window;
67}; 72};
68 73
69struct below_the_range { 74struct below_the_range {
@@ -98,11 +103,14 @@ struct freesync_state {
98 bool static_screen; 103 bool static_screen;
99 bool video; 104 bool video;
100 105
106 unsigned int vmin;
107 unsigned int vmax;
108
109 struct freesync_time time;
110
101 unsigned int nominal_refresh_rate_in_micro_hz; 111 unsigned int nominal_refresh_rate_in_micro_hz;
102 bool windowed_fullscreen; 112 bool windowed_fullscreen;
103 113
104 struct time_cache time;
105
106 struct gradual_static_ramp static_ramp; 114 struct gradual_static_ramp static_ramp;
107 struct below_the_range btr; 115 struct below_the_range btr;
108 struct fixed_refresh fixed_refresh; 116 struct fixed_refresh fixed_refresh;
@@ -119,14 +127,16 @@ struct freesync_entity {
119struct freesync_registry_options { 127struct freesync_registry_options {
120 bool drr_external_supported; 128 bool drr_external_supported;
121 bool drr_internal_supported; 129 bool drr_internal_supported;
130 bool lcd_freesync_default_set;
131 int lcd_freesync_default_value;
122}; 132};
123 133
124struct core_freesync { 134struct core_freesync {
125 struct mod_freesync public; 135 struct mod_freesync public;
126 struct dc *dc; 136 struct dc *dc;
137 struct freesync_registry_options opts;
127 struct freesync_entity *map; 138 struct freesync_entity *map;
128 int num_entities; 139 int num_entities;
129 struct freesync_registry_options opts;
130}; 140};
131 141
132#define MOD_FREESYNC_TO_CORE(mod_freesync)\ 142#define MOD_FREESYNC_TO_CORE(mod_freesync)\
@@ -146,7 +156,7 @@ struct mod_freesync *mod_freesync_create(struct dc *dc)
146 goto fail_alloc_context; 156 goto fail_alloc_context;
147 157
148 core_freesync->map = kzalloc(sizeof(struct freesync_entity) * MOD_FREESYNC_MAX_CONCURRENT_STREAMS, 158 core_freesync->map = kzalloc(sizeof(struct freesync_entity) * MOD_FREESYNC_MAX_CONCURRENT_STREAMS,
149 GFP_KERNEL); 159 GFP_KERNEL);
150 160
151 if (core_freesync->map == NULL) 161 if (core_freesync->map == NULL)
152 goto fail_alloc_map; 162 goto fail_alloc_map;
@@ -183,6 +193,16 @@ struct mod_freesync *mod_freesync_create(struct dc *dc)
183 (data & 1) ? false : true; 193 (data & 1) ? false : true;
184 } 194 }
185 195
196 if (dm_read_persistent_data(dc->ctx, NULL, NULL,
197 FREESYNC_DEFAULT_REGKEY,
198 &data, sizeof(data), &flag)) {
199 core_freesync->opts.lcd_freesync_default_set = true;
200 core_freesync->opts.lcd_freesync_default_value = data;
201 } else {
202 core_freesync->opts.lcd_freesync_default_set = false;
203 core_freesync->opts.lcd_freesync_default_value = 0;
204 }
205
186 return &core_freesync->public; 206 return &core_freesync->public;
187 207
188fail_construct: 208fail_construct:
@@ -288,6 +308,18 @@ bool mod_freesync_add_stream(struct mod_freesync *mod_freesync,
288 core_freesync->map[core_freesync->num_entities].user_enable. 308 core_freesync->map[core_freesync->num_entities].user_enable.
289 enable_for_video = 309 enable_for_video =
290 (persistent_freesync_enable & 4) ? true : false; 310 (persistent_freesync_enable & 4) ? true : false;
311 /* If FreeSync display and LCDFreeSyncDefault is set, use as default values write back to userenable */
312 } else if (caps->supported && (core_freesync->opts.lcd_freesync_default_set)) {
313 core_freesync->map[core_freesync->num_entities].user_enable.enable_for_gaming =
314 (core_freesync->opts.lcd_freesync_default_value & 1) ? true : false;
315 core_freesync->map[core_freesync->num_entities].user_enable.enable_for_static =
316 (core_freesync->opts.lcd_freesync_default_value & 2) ? true : false;
317 core_freesync->map[core_freesync->num_entities].user_enable.enable_for_video =
318 (core_freesync->opts.lcd_freesync_default_value & 4) ? true : false;
319 dm_write_persistent_data(dc->ctx, stream->sink,
320 FREESYNC_REGISTRY_NAME,
321 "userenable", &core_freesync->opts.lcd_freesync_default_value,
322 sizeof(int), &flag);
291 } else { 323 } else {
292 core_freesync->map[core_freesync->num_entities].user_enable. 324 core_freesync->map[core_freesync->num_entities].user_enable.
293 enable_for_gaming = false; 325 enable_for_gaming = false;
@@ -330,6 +362,25 @@ bool mod_freesync_remove_stream(struct mod_freesync *mod_freesync,
330 return true; 362 return true;
331} 363}
332 364
365static void adjust_vmin_vmax(struct core_freesync *core_freesync,
366 struct dc_stream_state **streams,
367 int num_streams,
368 int map_index,
369 unsigned int v_total_min,
370 unsigned int v_total_max)
371{
372 if (num_streams == 0 || streams == NULL || num_streams > 1)
373 return;
374
375 core_freesync->map[map_index].state.vmin = v_total_min;
376 core_freesync->map[map_index].state.vmax = v_total_max;
377
378 dc_stream_adjust_vmin_vmax(core_freesync->dc, streams,
379 num_streams, v_total_min,
380 v_total_max);
381}
382
383
333static void update_stream_freesync_context(struct core_freesync *core_freesync, 384static void update_stream_freesync_context(struct core_freesync *core_freesync,
334 struct dc_stream_state *stream) 385 struct dc_stream_state *stream)
335{ 386{
@@ -588,9 +639,10 @@ static bool set_freesync_on_streams(struct core_freesync *core_freesync,
588 update_stream_freesync_context(core_freesync, 639 update_stream_freesync_context(core_freesync,
589 streams[stream_idx]); 640 streams[stream_idx]);
590 641
591 dc_stream_adjust_vmin_vmax(core_freesync->dc, streams, 642 adjust_vmin_vmax(core_freesync, streams,
592 num_streams, v_total_min, 643 num_streams, map_index,
593 v_total_max); 644 v_total_min,
645 v_total_max);
594 646
595 return true; 647 return true;
596 648
@@ -613,9 +665,10 @@ static bool set_freesync_on_streams(struct core_freesync *core_freesync,
613 core_freesync, 665 core_freesync,
614 streams[stream_idx]); 666 streams[stream_idx]);
615 667
616 dc_stream_adjust_vmin_vmax( 668 adjust_vmin_vmax(
617 core_freesync->dc, streams, 669 core_freesync, streams,
618 num_streams, v_total_nominal, 670 num_streams, map_index,
671 v_total_nominal,
619 v_total_nominal); 672 v_total_nominal);
620 } 673 }
621 return true; 674 return true;
@@ -632,9 +685,10 @@ static bool set_freesync_on_streams(struct core_freesync *core_freesync,
632 core_freesync, 685 core_freesync,
633 streams[stream_idx]); 686 streams[stream_idx]);
634 687
635 dc_stream_adjust_vmin_vmax(core_freesync->dc, streams, 688 adjust_vmin_vmax(core_freesync, streams,
636 num_streams, v_total_nominal, 689 num_streams, map_index,
637 v_total_nominal); 690 v_total_nominal,
691 v_total_nominal);
638 692
639 /* Reset the cached variables */ 693 /* Reset the cached variables */
640 reset_freesync_state_variables(state); 694 reset_freesync_state_variables(state);
@@ -650,9 +704,10 @@ static bool set_freesync_on_streams(struct core_freesync *core_freesync,
650 * not support freesync because a former stream has 704 * not support freesync because a former stream has
651 * be programmed 705 * be programmed
652 */ 706 */
653 dc_stream_adjust_vmin_vmax(core_freesync->dc, streams, 707 adjust_vmin_vmax(core_freesync, streams,
654 num_streams, v_total_nominal, 708 num_streams, map_index,
655 v_total_nominal); 709 v_total_nominal,
710 v_total_nominal);
656 /* Reset the cached variables */ 711 /* Reset the cached variables */
657 reset_freesync_state_variables(state); 712 reset_freesync_state_variables(state);
658 } 713 }
@@ -769,8 +824,9 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
769 vmin = inserted_frame_v_total; 824 vmin = inserted_frame_v_total;
770 825
771 /* Program V_TOTAL */ 826 /* Program V_TOTAL */
772 dc_stream_adjust_vmin_vmax(core_freesync->dc, streams, 827 adjust_vmin_vmax(core_freesync, streams,
773 num_streams, vmin, vmax); 828 num_streams, index,
829 vmin, vmax);
774 } 830 }
775 831
776 if (state->btr.frame_counter > 0) 832 if (state->btr.frame_counter > 0)
@@ -804,9 +860,10 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
804 update_stream_freesync_context(core_freesync, streams[0]); 860 update_stream_freesync_context(core_freesync, streams[0]);
805 861
806 /* Program static screen ramp values */ 862 /* Program static screen ramp values */
807 dc_stream_adjust_vmin_vmax(core_freesync->dc, streams, 863 adjust_vmin_vmax(core_freesync, streams,
808 num_streams, v_total, 864 num_streams, index,
809 v_total); 865 v_total,
866 v_total);
810 867
811 triggers.overlay_update = true; 868 triggers.overlay_update = true;
812 triggers.surface_update = true; 869 triggers.surface_update = true;
@@ -1063,9 +1120,9 @@ bool mod_freesync_override_min_max(struct mod_freesync *mod_freesync,
1063 max_refresh); 1120 max_refresh);
1064 1121
1065 /* Program vtotal min/max */ 1122 /* Program vtotal min/max */
1066 dc_stream_adjust_vmin_vmax(core_freesync->dc, &streams, 1, 1123 adjust_vmin_vmax(core_freesync, &streams, 1, index,
1067 state->freesync_range.vmin, 1124 state->freesync_range.vmin,
1068 state->freesync_range.vmax); 1125 state->freesync_range.vmax);
1069 } 1126 }
1070 1127
1071 if (min_refresh != 0 && 1128 if (min_refresh != 0 &&
@@ -1399,11 +1456,9 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync,
1399 } else { 1456 } else {
1400 1457
1401 vmin = state->freesync_range.vmin; 1458 vmin = state->freesync_range.vmin;
1402
1403 vmax = vmin; 1459 vmax = vmin;
1404 1460 adjust_vmin_vmax(core_freesync, &stream, map_index,
1405 dc_stream_adjust_vmin_vmax(core_freesync->dc, &stream, 1461 1, vmin, vmax);
1406 1, vmin, vmax);
1407 } 1462 }
1408} 1463}
1409 1464
@@ -1457,3 +1512,43 @@ void mod_freesync_pre_update_plane_addresses(struct mod_freesync *mod_freesync,
1457 1512
1458 } 1513 }
1459} 1514}
1515
1516void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
1517 struct dc_stream_state **streams, int num_streams,
1518 unsigned int *v_total_min, unsigned int *v_total_max,
1519 unsigned int *event_triggers,
1520 unsigned int *window_min, unsigned int *window_max,
1521 unsigned int *lfc_mid_point_in_us,
1522 unsigned int *inserted_frames,
1523 unsigned int *inserted_duration_in_us)
1524{
1525 unsigned int stream_index, map_index;
1526 struct core_freesync *core_freesync = NULL;
1527
1528 if (mod_freesync == NULL)
1529 return;
1530
1531 core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
1532
1533 for (stream_index = 0; stream_index < num_streams; stream_index++) {
1534
1535 map_index = map_index_from_stream(core_freesync,
1536 streams[stream_index]);
1537
1538 if (core_freesync->map[map_index].caps->supported) {
1539 struct freesync_state state =
1540 core_freesync->map[map_index].state;
1541 *v_total_min = state.vmin;
1542 *v_total_max = state.vmax;
1543 *event_triggers = 0;
1544 *window_min = state.time.min_window;
1545 *window_max = state.time.max_window;
1546 *lfc_mid_point_in_us = state.btr.mid_point_in_us;
1547 *inserted_frames = state.btr.frames_to_insert;
1548 *inserted_duration_in_us =
1549 state.btr.inserted_frame_duration_in_us;
1550 }
1551
1552 }
1553}
1554
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index 84b53425f2c8..f083e1619dbe 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -164,4 +164,13 @@ void mod_freesync_pre_update_plane_addresses(struct mod_freesync *mod_freesync,
164 struct dc_stream_state **streams, int num_streams, 164 struct dc_stream_state **streams, int num_streams,
165 unsigned int curr_time_stamp); 165 unsigned int curr_time_stamp);
166 166
167void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
168 struct dc_stream_state **streams, int num_streams,
169 unsigned int *v_total_min, unsigned int *v_total_max,
170 unsigned int *event_triggers,
171 unsigned int *window_min, unsigned int *window_max,
172 unsigned int *lfc_mid_point_in_us,
173 unsigned int *inserted_frames,
174 unsigned int *inserted_duration_in_us);
175
167#endif 176#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_asicblocks.h b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
index 0c1593e53654..3230e2adb870 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_asicblocks.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2015 Advanced Micro Devices, Inc. 2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -19,29 +19,47 @@
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 * Authors: AMD
23 *
22 */ 24 */
23#ifndef PP_ASICBLOCKS_H
24#define PP_ASICBLOCKS_H
25 25
26#ifndef MODULES_INC_MOD_STATS_H_
27#define MODULES_INC_MOD_STATS_H_
26 28
27enum PHM_AsicBlock { 29#include "dm_services.h"
28 PHM_AsicBlock_GFX,
29 PHM_AsicBlock_UVD_MVC,
30 PHM_AsicBlock_UVD,
31 PHM_AsicBlock_UVD_HD,
32 PHM_AsicBlock_UVD_SD,
33 PHM_AsicBlock_Count
34};
35 30
36enum PHM_ClockGateSetting { 31struct mod_stats {
37 PHM_ClockGateSetting_StaticOn, 32 int dummy;
38 PHM_ClockGateSetting_StaticOff,
39 PHM_ClockGateSetting_Dynamic
40}; 33};
41 34
42struct phm_asic_blocks { 35struct mod_stats_caps {
43 bool gfx : 1; 36 bool dummy;
44 bool uvd : 1;
45}; 37};
46 38
47#endif 39struct mod_stats *mod_stats_create(struct dc *dc);
40
41void mod_stats_destroy(struct mod_stats *mod_stats);
42
43bool mod_stats_init(struct mod_stats *mod_stats);
44
45void mod_stats_dump(struct mod_stats *mod_stats);
46
47void mod_stats_reset_data(struct mod_stats *mod_stats);
48
49void mod_stats_update_flip(struct mod_stats *mod_stats,
50 unsigned long timestamp_in_ns);
51
52void mod_stats_update_vupdate(struct mod_stats *mod_stats,
53 unsigned long timestamp_in_ns);
54
55void mod_stats_update_freesync(struct mod_stats *mod_stats,
56 unsigned int v_total_min,
57 unsigned int v_total_max,
58 unsigned int event_triggers,
59 unsigned int window_min,
60 unsigned int window_max,
61 unsigned int lfc_mid_point_in_us,
62 unsigned int inserted_frames,
63 unsigned int inserted_frame_duration_in_us);
64
65#endif /* MODULES_INC_MOD_STATS_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
new file mode 100644
index 000000000000..041f87b73d5f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
@@ -0,0 +1,334 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "mod_stats.h"
27#include "dm_services.h"
28#include "dc.h"
29#include "core_types.h"
30
31#define DAL_STATS_ENABLE_REGKEY "DalStatsEnable"
32#define DAL_STATS_ENABLE_REGKEY_DEFAULT 0x00000001
33#define DAL_STATS_ENABLE_REGKEY_ENABLED 0x00000001
34
35#define DAL_STATS_ENTRIES_REGKEY "DalStatsEntries"
36#define DAL_STATS_ENTRIES_REGKEY_DEFAULT 0x00350000
37#define DAL_STATS_ENTRIES_REGKEY_MAX 0x01000000
38
39#define MOD_STATS_NUM_VSYNCS 5
40
41struct stats_time_cache {
42 unsigned long flip_timestamp_in_ns;
43 unsigned long vupdate_timestamp_in_ns;
44
45 unsigned int render_time_in_us;
46 unsigned int avg_render_time_in_us_last_ten;
47 unsigned int v_sync_time_in_us[MOD_STATS_NUM_VSYNCS];
48 unsigned int num_vsync_between_flips;
49
50 unsigned int flip_to_vsync_time_in_us;
51 unsigned int vsync_to_flip_time_in_us;
52
53 unsigned int min_window;
54 unsigned int max_window;
55 unsigned int v_total_min;
56 unsigned int v_total_max;
57 unsigned int event_triggers;
58
59 unsigned int lfc_mid_point_in_us;
60 unsigned int num_frames_inserted;
61 unsigned int inserted_duration_in_us;
62
63 unsigned int flags;
64};
65
66struct core_stats {
67 struct mod_stats public;
68 struct dc *dc;
69
70 struct stats_time_cache *time;
71 unsigned int index;
72
73 bool enabled;
74 unsigned int entries;
75};
76
77#define MOD_STATS_TO_CORE(mod_stats)\
78 container_of(mod_stats, struct core_stats, public)
79
80bool mod_stats_init(struct mod_stats *mod_stats)
81{
82 bool result = false;
83 struct core_stats *core_stats = NULL;
84 struct dc *dc = NULL;
85
86 if (mod_stats == NULL)
87 return false;
88
89 core_stats = MOD_STATS_TO_CORE(mod_stats);
90 dc = core_stats->dc;
91
92 return result;
93}
94
95struct mod_stats *mod_stats_create(struct dc *dc)
96{
97 struct core_stats *core_stats = NULL;
98 struct persistent_data_flag flag;
99 unsigned int reg_data;
100 int i = 0;
101
102 core_stats = kzalloc(sizeof(struct core_stats), GFP_KERNEL);
103
104 if (core_stats == NULL)
105 goto fail_alloc_context;
106
107 if (dc == NULL)
108 goto fail_construct;
109
110 core_stats->dc = dc;
111
112 core_stats->enabled = DAL_STATS_ENABLE_REGKEY_DEFAULT;
113 if (dm_read_persistent_data(dc->ctx, NULL, NULL,
114 DAL_STATS_ENABLE_REGKEY,
115 &reg_data, sizeof(unsigned int), &flag))
116 core_stats->enabled = reg_data;
117
118 core_stats->entries = DAL_STATS_ENTRIES_REGKEY_DEFAULT;
119 if (dm_read_persistent_data(dc->ctx, NULL, NULL,
120 DAL_STATS_ENTRIES_REGKEY,
121 &reg_data, sizeof(unsigned int), &flag)) {
122 if (reg_data > DAL_STATS_ENTRIES_REGKEY_MAX)
123 core_stats->entries = DAL_STATS_ENTRIES_REGKEY_MAX;
124 else
125 core_stats->entries = reg_data;
126 }
127
128 core_stats->time = kzalloc(sizeof(struct stats_time_cache) * core_stats->entries,
129 GFP_KERNEL);
130
131 if (core_stats->time == NULL)
132 goto fail_construct;
133
134 /* Purposely leave index 0 unused so we don't need special logic to
135 * handle calculation cases that depend on previous flip data.
136 */
137 core_stats->index = 1;
138
139 return &core_stats->public;
140
141fail_construct:
142 kfree(core_stats);
143
144fail_alloc_context:
145 return NULL;
146}
147
148void mod_stats_destroy(struct mod_stats *mod_stats)
149{
150 if (mod_stats != NULL) {
151 struct core_stats *core_stats = MOD_STATS_TO_CORE(mod_stats);
152
153 if (core_stats->time != NULL)
154 kfree(core_stats->time);
155
156 kfree(core_stats);
157 }
158}
159
160void mod_stats_dump(struct mod_stats *mod_stats)
161{
162 struct dc *dc = NULL;
163 struct dal_logger *logger = NULL;
164 struct core_stats *core_stats = NULL;
165 struct stats_time_cache *time = NULL;
166 unsigned int index = 0;
167
168 if (mod_stats == NULL)
169 return;
170
171 core_stats = MOD_STATS_TO_CORE(mod_stats);
172 dc = core_stats->dc;
173 logger = dc->ctx->logger;
174 time = core_stats->time;
175
176 //LogEntry* pLog = GetLog()->Open(LogMajor_ISR, LogMinor_ISR_FreeSyncSW);
177
178 //if (!pLog->IsDummyEntry())
179 {
180 dm_logger_write(logger, LOG_PROFILING, "==Display Caps==\n");
181 dm_logger_write(logger, LOG_PROFILING, "\n");
182 dm_logger_write(logger, LOG_PROFILING, "\n");
183
184 dm_logger_write(logger, LOG_PROFILING, "==Stats==\n");
185 dm_logger_write(logger, LOG_PROFILING,
186 "render avgRender minWindow midPoint maxWindow vsyncToFlip flipToVsync #vsyncBetweenFlip #frame insertDuration vTotalMin vTotalMax eventTrigs vSyncTime1 vSyncTime2 vSyncTime3 vSyncTime4 vSyncTime5 flags\n");
187
188 for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) {
189 dm_logger_write(logger, LOG_PROFILING,
190 "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u\n",
191 time[i].render_time_in_us,
192 time[i].avg_render_time_in_us_last_ten,
193 time[i].min_window,
194 time[i].lfc_mid_point_in_us,
195 time[i].max_window,
196 time[i].vsync_to_flip_time_in_us,
197 time[i].flip_to_vsync_time_in_us,
198 time[i].num_vsync_between_flips,
199 time[i].num_frames_inserted,
200 time[i].inserted_duration_in_us,
201 time[i].v_total_min,
202 time[i].v_total_max,
203 time[i].event_triggers,
204 time[i].v_sync_time_in_us[0],
205 time[i].v_sync_time_in_us[1],
206 time[i].v_sync_time_in_us[2],
207 time[i].v_sync_time_in_us[3],
208 time[i].v_sync_time_in_us[4],
209 time[i].flags);
210 }
211 }
212 //GetLog()->Close(pLog);
213 //GetLog()->UnSetLogMask(LogMajor_ISR, LogMinor_ISR_FreeSyncSW);
214}
215
216void mod_stats_reset_data(struct mod_stats *mod_stats)
217{
218 struct core_stats *core_stats = NULL;
219 struct stats_time_cache *time = NULL;
220 unsigned int index = 0;
221
222 if (mod_stats == NULL)
223 return;
224
225 core_stats = MOD_STATS_TO_CORE(mod_stats);
226
227 memset(core_stats->time, 0,
228 sizeof(struct stats_time_cache) * core_stats->entries);
229
230 core_stats->index = 0;
231}
232
233void mod_stats_update_flip(struct mod_stats *mod_stats,
234 unsigned long timestamp_in_ns)
235{
236 struct core_stats *core_stats = NULL;
237 struct stats_time_cache *time = NULL;
238 unsigned int index = 0;
239
240 if (mod_stats == NULL)
241 return;
242
243 core_stats = MOD_STATS_TO_CORE(mod_stats);
244
245 if (core_stats->index >= core_stats->entries)
246 return;
247
248 time = core_stats->time;
249 index = core_stats->index;
250
251 time[index].flip_timestamp_in_ns = timestamp_in_ns;
252 time[index].render_time_in_us =
253 timestamp_in_ns - time[index - 1].flip_timestamp_in_ns;
254
255 if (index >= 10) {
256 for (unsigned int i = 0; i < 10; i++)
257 time[index].avg_render_time_in_us_last_ten +=
258 time[index - i].render_time_in_us;
259 time[index].avg_render_time_in_us_last_ten /= 10;
260 }
261
262 if (time[index].num_vsync_between_flips > 0)
263 time[index].vsync_to_flip_time_in_us =
264 timestamp_in_ns - time[index].vupdate_timestamp_in_ns;
265 else
266 time[index].vsync_to_flip_time_in_us =
267 timestamp_in_ns - time[index - 1].vupdate_timestamp_in_ns;
268
269 core_stats->index++;
270}
271
272void mod_stats_update_vupdate(struct mod_stats *mod_stats,
273 unsigned long timestamp_in_ns)
274{
275 struct core_stats *core_stats = NULL;
276 struct stats_time_cache *time = NULL;
277 unsigned int index = 0;
278
279 if (mod_stats == NULL)
280 return;
281
282 core_stats = MOD_STATS_TO_CORE(mod_stats);
283
284 if (core_stats->index >= core_stats->entries)
285 return;
286
287 time = core_stats->time;
288 index = core_stats->index;
289
290 time[index].vupdate_timestamp_in_ns = timestamp_in_ns;
291 if (time[index].num_vsync_between_flips < MOD_STATS_NUM_VSYNCS)
292 time[index].v_sync_time_in_us[time[index].num_vsync_between_flips] =
293 timestamp_in_ns - time[index - 1].vupdate_timestamp_in_ns;
294 time[index].flip_to_vsync_time_in_us =
295 timestamp_in_ns - time[index - 1].flip_timestamp_in_ns;
296
297 time[index].num_vsync_between_flips++;
298}
299
300void mod_stats_update_freesync(struct mod_stats *mod_stats,
301 unsigned int v_total_min,
302 unsigned int v_total_max,
303 unsigned int event_triggers,
304 unsigned int window_min,
305 unsigned int window_max,
306 unsigned int lfc_mid_point_in_us,
307 unsigned int inserted_frames,
308 unsigned int inserted_duration_in_us)
309{
310 struct core_stats *core_stats = NULL;
311 struct stats_time_cache *time = NULL;
312 unsigned int index = 0;
313
314 if (mod_stats == NULL)
315 return;
316
317 core_stats = MOD_STATS_TO_CORE(mod_stats);
318
319 if (core_stats->index >= core_stats->entries)
320 return;
321
322 time = core_stats->time;
323 index = core_stats->index;
324
325 time[index].v_total_min = v_total_min;
326 time[index].v_total_max = v_total_max;
327 time[index].event_triggers = event_triggers;
328 time[index].min_window = window_min;
329 time[index].max_window = window_max;
330 time[index].lfc_mid_point_in_us = lfc_mid_point_in_us;
331 time[index].num_frames_inserted = inserted_frames;
332 time[index].inserted_duration_in_us = inserted_duration_in_us;
333}
334
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 15bd0f9acf73..5c840c022b52 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -24,8 +24,7 @@
24#ifndef __KGD_PP_INTERFACE_H__ 24#ifndef __KGD_PP_INTERFACE_H__
25#define __KGD_PP_INTERFACE_H__ 25#define __KGD_PP_INTERFACE_H__
26 26
27extern const struct amd_ip_funcs pp_ip_funcs; 27extern const struct amdgpu_ip_block_version pp_smu_ip_block;
28extern const struct amd_pm_funcs pp_dpm_funcs;
29 28
30struct amd_vce_state { 29struct amd_vce_state {
31 /* vce clocks */ 30 /* vce clocks */
diff --git a/drivers/gpu/drm/amd/include/soc15_ih_clientid.h b/drivers/gpu/drm/amd/include/soc15_ih_clientid.h
new file mode 100644
index 000000000000..a12d4f27cfa4
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/soc15_ih_clientid.h
@@ -0,0 +1,70 @@
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef __SOC15_IH_CLIENTID_H__
25#define __SOC15_IH_CLIENTID_H__
26
27 /*
28 * vega10+ IH clients
29 */
30enum soc15_ih_clientid {
31 SOC15_IH_CLIENTID_IH = 0x00,
32 SOC15_IH_CLIENTID_ACP = 0x01,
33 SOC15_IH_CLIENTID_ATHUB = 0x02,
34 SOC15_IH_CLIENTID_BIF = 0x03,
35 SOC15_IH_CLIENTID_DCE = 0x04,
36 SOC15_IH_CLIENTID_ISP = 0x05,
37 SOC15_IH_CLIENTID_PCIE0 = 0x06,
38 SOC15_IH_CLIENTID_RLC = 0x07,
39 SOC15_IH_CLIENTID_SDMA0 = 0x08,
40 SOC15_IH_CLIENTID_SDMA1 = 0x09,
41 SOC15_IH_CLIENTID_SE0SH = 0x0a,
42 SOC15_IH_CLIENTID_SE1SH = 0x0b,
43 SOC15_IH_CLIENTID_SE2SH = 0x0c,
44 SOC15_IH_CLIENTID_SE3SH = 0x0d,
45 SOC15_IH_CLIENTID_SYSHUB = 0x0e,
46 SOC15_IH_CLIENTID_THM = 0x0f,
47 SOC15_IH_CLIENTID_UVD = 0x10,
48 SOC15_IH_CLIENTID_VCE0 = 0x11,
49 SOC15_IH_CLIENTID_VMC = 0x12,
50 SOC15_IH_CLIENTID_XDMA = 0x13,
51 SOC15_IH_CLIENTID_GRBM_CP = 0x14,
52 SOC15_IH_CLIENTID_ATS = 0x15,
53 SOC15_IH_CLIENTID_ROM_SMUIO = 0x16,
54 SOC15_IH_CLIENTID_DF = 0x17,
55 SOC15_IH_CLIENTID_VCE1 = 0x18,
56 SOC15_IH_CLIENTID_PWR = 0x19,
57 SOC15_IH_CLIENTID_UTCL2 = 0x1b,
58 SOC15_IH_CLIENTID_EA = 0x1c,
59 SOC15_IH_CLIENTID_UTCL2LOG = 0x1d,
60 SOC15_IH_CLIENTID_MP0 = 0x1e,
61 SOC15_IH_CLIENTID_MP1 = 0x1f,
62
63 SOC15_IH_CLIENTID_MAX,
64
65 SOC15_IH_CLIENTID_VCN = SOC15_IH_CLIENTID_UVD
66};
67
68#endif
69
70
diff --git a/drivers/gpu/drm/amd/include/vega10_ip_offset.h b/drivers/gpu/drm/amd/include/vega10_ip_offset.h
index 4c78dba5cf25..976dd2d565ba 100644
--- a/drivers/gpu/drm/amd/include/vega10_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/vega10_ip_offset.h
@@ -24,191 +24,191 @@
24#define MAX_INSTANCE 5 24#define MAX_INSTANCE 5
25#define MAX_SEGMENT 5 25#define MAX_SEGMENT 5
26 26
27struct IP_BASE_INSTANCE 27struct IP_BASE_INSTANCE
28{ 28{
29 unsigned int segment[MAX_SEGMENT]; 29 unsigned int segment[MAX_SEGMENT];
30}; 30};
31 31
32struct IP_BASE 32struct IP_BASE
33{ 33{
34 struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; 34 struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
35}; 35};
36 36
37 37
38static const struct IP_BASE NBIF_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0 } }, 38static const struct IP_BASE NBIF_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0 } },
39 { { 0, 0, 0, 0, 0 } }, 39 { { 0, 0, 0, 0, 0 } },
40 { { 0, 0, 0, 0, 0 } }, 40 { { 0, 0, 0, 0, 0 } },
41 { { 0, 0, 0, 0, 0 } }, 41 { { 0, 0, 0, 0, 0 } },
42 { { 0, 0, 0, 0, 0 } } } }; 42 { { 0, 0, 0, 0, 0 } } } };
43static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0 } }, 43static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0 } },
44 { { 0, 0, 0, 0, 0 } }, 44 { { 0, 0, 0, 0, 0 } },
45 { { 0, 0, 0, 0, 0 } }, 45 { { 0, 0, 0, 0, 0 } },
46 { { 0, 0, 0, 0, 0 } }, 46 { { 0, 0, 0, 0, 0 } },
47 { { 0, 0, 0, 0, 0 } } } }; 47 { { 0, 0, 0, 0, 0 } } } };
48static const struct IP_BASE DCE_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } }, 48static const struct IP_BASE DCE_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } },
49 { { 0, 0, 0, 0, 0 } }, 49 { { 0, 0, 0, 0, 0 } },
50 { { 0, 0, 0, 0, 0 } }, 50 { { 0, 0, 0, 0, 0 } },
51 { { 0, 0, 0, 0, 0 } }, 51 { { 0, 0, 0, 0, 0 } },
52 { { 0, 0, 0, 0, 0 } } } }; 52 { { 0, 0, 0, 0, 0 } } } };
53static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } }, 53static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } },
54 { { 0, 0, 0, 0, 0 } }, 54 { { 0, 0, 0, 0, 0 } },
55 { { 0, 0, 0, 0, 0 } }, 55 { { 0, 0, 0, 0, 0 } },
56 { { 0, 0, 0, 0, 0 } }, 56 { { 0, 0, 0, 0, 0 } },
57 { { 0, 0, 0, 0, 0 } } } }; 57 { { 0, 0, 0, 0, 0 } } } };
58static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0, 0, 0, 0 } }, 58static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
59 { { 0, 0, 0, 0, 0 } }, 59 { { 0, 0, 0, 0, 0 } },
60 { { 0, 0, 0, 0, 0 } }, 60 { { 0, 0, 0, 0, 0 } },
61 { { 0, 0, 0, 0, 0 } }, 61 { { 0, 0, 0, 0, 0 } },
62 { { 0, 0, 0, 0, 0 } } } }; 62 { { 0, 0, 0, 0, 0 } } } };
63static const struct IP_BASE MP1_BASE = { { { { 0x00016000, 0, 0, 0, 0 } }, 63static const struct IP_BASE MP1_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
64 { { 0, 0, 0, 0, 0 } }, 64 { { 0, 0, 0, 0, 0 } },
65 { { 0, 0, 0, 0, 0 } }, 65 { { 0, 0, 0, 0, 0 } },
66 { { 0, 0, 0, 0, 0 } }, 66 { { 0, 0, 0, 0, 0 } },
67 { { 0, 0, 0, 0, 0 } } } }; 67 { { 0, 0, 0, 0, 0 } } } };
68static const struct IP_BASE MP2_BASE = { { { { 0x00016000, 0, 0, 0, 0 } }, 68static const struct IP_BASE MP2_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
69 { { 0, 0, 0, 0, 0 } }, 69 { { 0, 0, 0, 0, 0 } },
70 { { 0, 0, 0, 0, 0 } }, 70 { { 0, 0, 0, 0, 0 } },
71 { { 0, 0, 0, 0, 0 } }, 71 { { 0, 0, 0, 0, 0 } },
72 { { 0, 0, 0, 0, 0 } } } }; 72 { { 0, 0, 0, 0, 0 } } } };
73static const struct IP_BASE DF_BASE = { { { { 0x00007000, 0, 0, 0, 0 } }, 73static const struct IP_BASE DF_BASE = { { { { 0x00007000, 0, 0, 0, 0 } },
74 { { 0, 0, 0, 0, 0 } }, 74 { { 0, 0, 0, 0, 0 } },
75 { { 0, 0, 0, 0, 0 } }, 75 { { 0, 0, 0, 0, 0 } },
76 { { 0, 0, 0, 0, 0 } }, 76 { { 0, 0, 0, 0, 0 } },
77 { { 0, 0, 0, 0, 0 } } } }; 77 { { 0, 0, 0, 0, 0 } } } };
78static const struct IP_BASE UVD_BASE = { { { { 0x00007800, 0x00007E00, 0, 0, 0 } }, 78static const struct IP_BASE UVD_BASE = { { { { 0x00007800, 0x00007E00, 0, 0, 0 } },
79 { { 0, 0, 0, 0, 0 } }, 79 { { 0, 0, 0, 0, 0 } },
80 { { 0, 0, 0, 0, 0 } }, 80 { { 0, 0, 0, 0, 0 } },
81 { { 0, 0, 0, 0, 0 } }, 81 { { 0, 0, 0, 0, 0 } },
82 { { 0, 0, 0, 0, 0 } } } }; //note: GLN does not use the first segment 82 { { 0, 0, 0, 0, 0 } } } }; //note: GLN does not use the first segment
83static const struct IP_BASE VCN_BASE = { { { { 0x00007800, 0x00007E00, 0, 0, 0 } }, 83static const struct IP_BASE VCN_BASE = { { { { 0x00007800, 0x00007E00, 0, 0, 0 } },
84 { { 0, 0, 0, 0, 0 } }, 84 { { 0, 0, 0, 0, 0 } },
85 { { 0, 0, 0, 0, 0 } }, 85 { { 0, 0, 0, 0, 0 } },
86 { { 0, 0, 0, 0, 0 } }, 86 { { 0, 0, 0, 0, 0 } },
87 { { 0, 0, 0, 0, 0 } } } }; //note: GLN does not use the first segment 87 { { 0, 0, 0, 0, 0 } } } }; //note: GLN does not use the first segment
88static const struct IP_BASE DBGU_BASE = { { { { 0x00000180, 0x000001A0, 0, 0, 0 } }, 88static const struct IP_BASE DBGU_BASE = { { { { 0x00000180, 0x000001A0, 0, 0, 0 } },
89 { { 0, 0, 0, 0, 0 } }, 89 { { 0, 0, 0, 0, 0 } },
90 { { 0, 0, 0, 0, 0 } }, 90 { { 0, 0, 0, 0, 0 } },
91 { { 0, 0, 0, 0, 0 } }, 91 { { 0, 0, 0, 0, 0 } },
92 { { 0, 0, 0, 0, 0 } } } }; // not exist 92 { { 0, 0, 0, 0, 0 } } } }; // not exist
93static const struct IP_BASE DBGU_NBIO_BASE = { { { { 0x000001C0, 0, 0, 0, 0 } }, 93static const struct IP_BASE DBGU_NBIO_BASE = { { { { 0x000001C0, 0, 0, 0, 0 } },
94 { { 0, 0, 0, 0, 0 } }, 94 { { 0, 0, 0, 0, 0 } },
95 { { 0, 0, 0, 0, 0 } }, 95 { { 0, 0, 0, 0, 0 } },
96 { { 0, 0, 0, 0, 0 } }, 96 { { 0, 0, 0, 0, 0 } },
97 { { 0, 0, 0, 0, 0 } } } }; // not exist 97 { { 0, 0, 0, 0, 0 } } } }; // not exist
98static const struct IP_BASE DBGU_IO_BASE = { { { { 0x000001E0, 0, 0, 0, 0 } }, 98static const struct IP_BASE DBGU_IO_BASE = { { { { 0x000001E0, 0, 0, 0, 0 } },
99 { { 0, 0, 0, 0, 0 } }, 99 { { 0, 0, 0, 0, 0 } },
100 { { 0, 0, 0, 0, 0 } }, 100 { { 0, 0, 0, 0, 0 } },
101 { { 0, 0, 0, 0, 0 } }, 101 { { 0, 0, 0, 0, 0 } },
102 { { 0, 0, 0, 0, 0 } } } }; // not exist 102 { { 0, 0, 0, 0, 0 } } } }; // not exist
103static const struct IP_BASE DFX_DAP_BASE = { { { { 0x000005A0, 0, 0, 0, 0 } }, 103static const struct IP_BASE DFX_DAP_BASE = { { { { 0x000005A0, 0, 0, 0, 0 } },
104 { { 0, 0, 0, 0, 0 } }, 104 { { 0, 0, 0, 0, 0 } },
105 { { 0, 0, 0, 0, 0 } }, 105 { { 0, 0, 0, 0, 0 } },
106 { { 0, 0, 0, 0, 0 } }, 106 { { 0, 0, 0, 0, 0 } },
107 { { 0, 0, 0, 0, 0 } } } }; // not exist 107 { { 0, 0, 0, 0, 0 } } } }; // not exist
108static const struct IP_BASE DFX_BASE = { { { { 0x00000580, 0, 0, 0, 0 } }, 108static const struct IP_BASE DFX_BASE = { { { { 0x00000580, 0, 0, 0, 0 } },
109 { { 0, 0, 0, 0, 0 } }, 109 { { 0, 0, 0, 0, 0 } },
110 { { 0, 0, 0, 0, 0 } }, 110 { { 0, 0, 0, 0, 0 } },
111 { { 0, 0, 0, 0, 0 } }, 111 { { 0, 0, 0, 0, 0 } },
112 { { 0, 0, 0, 0, 0 } } } }; // this file does not contain registers 112 { { 0, 0, 0, 0, 0 } } } }; // this file does not contain registers
113static const struct IP_BASE ISP_BASE = { { { { 0x00018000, 0, 0, 0, 0 } }, 113static const struct IP_BASE ISP_BASE = { { { { 0x00018000, 0, 0, 0, 0 } },
114 { { 0, 0, 0, 0, 0 } }, 114 { { 0, 0, 0, 0, 0 } },
115 { { 0, 0, 0, 0, 0 } }, 115 { { 0, 0, 0, 0, 0 } },
116 { { 0, 0, 0, 0, 0 } }, 116 { { 0, 0, 0, 0, 0 } },
117 { { 0, 0, 0, 0, 0 } } } }; // not exist 117 { { 0, 0, 0, 0, 0 } } } }; // not exist
118static const struct IP_BASE SYSTEMHUB_BASE = { { { { 0x00000EA0, 0, 0, 0, 0 } }, 118static const struct IP_BASE SYSTEMHUB_BASE = { { { { 0x00000EA0, 0, 0, 0, 0 } },
119 { { 0, 0, 0, 0, 0 } }, 119 { { 0, 0, 0, 0, 0 } },
120 { { 0, 0, 0, 0, 0 } }, 120 { { 0, 0, 0, 0, 0 } },
121 { { 0, 0, 0, 0, 0 } }, 121 { { 0, 0, 0, 0, 0 } },
122 { { 0, 0, 0, 0, 0 } } } }; // not exist 122 { { 0, 0, 0, 0, 0 } } } }; // not exist
123static const struct IP_BASE L2IMU_BASE = { { { { 0x00007DC0, 0, 0, 0, 0 } }, 123static const struct IP_BASE L2IMU_BASE = { { { { 0x00007DC0, 0, 0, 0, 0 } },
124 { { 0, 0, 0, 0, 0 } }, 124 { { 0, 0, 0, 0, 0 } },
125 { { 0, 0, 0, 0, 0 } }, 125 { { 0, 0, 0, 0, 0 } },
126 { { 0, 0, 0, 0, 0 } }, 126 { { 0, 0, 0, 0, 0 } },
127 { { 0, 0, 0, 0, 0 } } } }; 127 { { 0, 0, 0, 0, 0 } } } };
128static const struct IP_BASE IOHC_BASE = { { { { 0x00010000, 0, 0, 0, 0 } }, 128static const struct IP_BASE IOHC_BASE = { { { { 0x00010000, 0, 0, 0, 0 } },
129 { { 0, 0, 0, 0, 0 } }, 129 { { 0, 0, 0, 0, 0 } },
130 { { 0, 0, 0, 0, 0 } }, 130 { { 0, 0, 0, 0, 0 } },
131 { { 0, 0, 0, 0, 0 } }, 131 { { 0, 0, 0, 0, 0 } },
132 { { 0, 0, 0, 0, 0 } } } }; 132 { { 0, 0, 0, 0, 0 } } } };
133static const struct IP_BASE ATHUB_BASE = { { { { 0x00000C20, 0, 0, 0, 0 } }, 133static const struct IP_BASE ATHUB_BASE = { { { { 0x00000C20, 0, 0, 0, 0 } },
134 { { 0, 0, 0, 0, 0 } }, 134 { { 0, 0, 0, 0, 0 } },
135 { { 0, 0, 0, 0, 0 } }, 135 { { 0, 0, 0, 0, 0 } },
136 { { 0, 0, 0, 0, 0 } }, 136 { { 0, 0, 0, 0, 0 } },
137 { { 0, 0, 0, 0, 0 } } } }; 137 { { 0, 0, 0, 0, 0 } } } };
138static const struct IP_BASE VCE_BASE = { { { { 0x00007E00, 0x00048800, 0, 0, 0 } }, 138static const struct IP_BASE VCE_BASE = { { { { 0x00007E00, 0x00048800, 0, 0, 0 } },
139 { { 0, 0, 0, 0, 0 } }, 139 { { 0, 0, 0, 0, 0 } },
140 { { 0, 0, 0, 0, 0 } }, 140 { { 0, 0, 0, 0, 0 } },
141 { { 0, 0, 0, 0, 0 } }, 141 { { 0, 0, 0, 0, 0 } },
142 { { 0, 0, 0, 0, 0 } } } }; 142 { { 0, 0, 0, 0, 0 } } } };
143static const struct IP_BASE GC_BASE = { { { { 0x00002000, 0x0000A000, 0, 0, 0 } }, 143static const struct IP_BASE GC_BASE = { { { { 0x00002000, 0x0000A000, 0, 0, 0 } },
144 { { 0, 0, 0, 0, 0 } }, 144 { { 0, 0, 0, 0, 0 } },
145 { { 0, 0, 0, 0, 0 } }, 145 { { 0, 0, 0, 0, 0 } },
146 { { 0, 0, 0, 0, 0 } }, 146 { { 0, 0, 0, 0, 0 } },
147 { { 0, 0, 0, 0, 0 } } } }; 147 { { 0, 0, 0, 0, 0 } } } };
148static const struct IP_BASE MMHUB_BASE = { { { { 0x0001A000, 0, 0, 0, 0 } }, 148static const struct IP_BASE MMHUB_BASE = { { { { 0x0001A000, 0, 0, 0, 0 } },
149 { { 0, 0, 0, 0, 0 } }, 149 { { 0, 0, 0, 0, 0 } },
150 { { 0, 0, 0, 0, 0 } }, 150 { { 0, 0, 0, 0, 0 } },
151 { { 0, 0, 0, 0, 0 } }, 151 { { 0, 0, 0, 0, 0 } },
152 { { 0, 0, 0, 0, 0 } } } }; 152 { { 0, 0, 0, 0, 0 } } } };
153static const struct IP_BASE RSMU_BASE = { { { { 0x00012000, 0, 0, 0, 0 } }, 153static const struct IP_BASE RSMU_BASE = { { { { 0x00012000, 0, 0, 0, 0 } },
154 { { 0, 0, 0, 0, 0 } }, 154 { { 0, 0, 0, 0, 0 } },
155 { { 0, 0, 0, 0, 0 } }, 155 { { 0, 0, 0, 0, 0 } },
156 { { 0, 0, 0, 0, 0 } }, 156 { { 0, 0, 0, 0, 0 } },
157 { { 0, 0, 0, 0, 0 } } } }; 157 { { 0, 0, 0, 0, 0 } } } };
158static const struct IP_BASE HDP_BASE = { { { { 0x00000F20, 0, 0, 0, 0 } }, 158static const struct IP_BASE HDP_BASE = { { { { 0x00000F20, 0, 0, 0, 0 } },
159 { { 0, 0, 0, 0, 0 } }, 159 { { 0, 0, 0, 0, 0 } },
160 { { 0, 0, 0, 0, 0 } }, 160 { { 0, 0, 0, 0, 0 } },
161 { { 0, 0, 0, 0, 0 } }, 161 { { 0, 0, 0, 0, 0 } },
162 { { 0, 0, 0, 0, 0 } } } }; 162 { { 0, 0, 0, 0, 0 } } } };
163static const struct IP_BASE OSSSYS_BASE = { { { { 0x000010A0, 0, 0, 0, 0 } }, 163static const struct IP_BASE OSSSYS_BASE = { { { { 0x000010A0, 0, 0, 0, 0 } },
164 { { 0, 0, 0, 0, 0 } }, 164 { { 0, 0, 0, 0, 0 } },
165 { { 0, 0, 0, 0, 0 } }, 165 { { 0, 0, 0, 0, 0 } },
166 { { 0, 0, 0, 0, 0 } }, 166 { { 0, 0, 0, 0, 0 } },
167 { { 0, 0, 0, 0, 0 } } } }; 167 { { 0, 0, 0, 0, 0 } } } };
168static const struct IP_BASE SDMA0_BASE = { { { { 0x00001260, 0, 0, 0, 0 } }, 168static const struct IP_BASE SDMA0_BASE = { { { { 0x00001260, 0, 0, 0, 0 } },
169 { { 0, 0, 0, 0, 0 } }, 169 { { 0, 0, 0, 0, 0 } },
170 { { 0, 0, 0, 0, 0 } }, 170 { { 0, 0, 0, 0, 0 } },
171 { { 0, 0, 0, 0, 0 } }, 171 { { 0, 0, 0, 0, 0 } },
172 { { 0, 0, 0, 0, 0 } } } }; 172 { { 0, 0, 0, 0, 0 } } } };
173static const struct IP_BASE SDMA1_BASE = { { { { 0x00001460, 0, 0, 0, 0 } }, 173static const struct IP_BASE SDMA1_BASE = { { { { 0x00001460, 0, 0, 0, 0 } },
174 { { 0, 0, 0, 0, 0 } }, 174 { { 0, 0, 0, 0, 0 } },
175 { { 0, 0, 0, 0, 0 } }, 175 { { 0, 0, 0, 0, 0 } },
176 { { 0, 0, 0, 0, 0 } }, 176 { { 0, 0, 0, 0, 0 } },
177 { { 0, 0, 0, 0, 0 } } } }; 177 { { 0, 0, 0, 0, 0 } } } };
178static const struct IP_BASE XDMA_BASE = { { { { 0x00003400, 0, 0, 0, 0 } }, 178static const struct IP_BASE XDMA_BASE = { { { { 0x00003400, 0, 0, 0, 0 } },
179 { { 0, 0, 0, 0, 0 } }, 179 { { 0, 0, 0, 0, 0 } },
180 { { 0, 0, 0, 0, 0 } }, 180 { { 0, 0, 0, 0, 0 } },
181 { { 0, 0, 0, 0, 0 } }, 181 { { 0, 0, 0, 0, 0 } },
182 { { 0, 0, 0, 0, 0 } } } }; 182 { { 0, 0, 0, 0, 0 } } } };
183static const struct IP_BASE UMC_BASE = { { { { 0x00014000, 0, 0, 0, 0 } }, 183static const struct IP_BASE UMC_BASE = { { { { 0x00014000, 0, 0, 0, 0 } },
184 { { 0, 0, 0, 0, 0 } }, 184 { { 0, 0, 0, 0, 0 } },
185 { { 0, 0, 0, 0, 0 } }, 185 { { 0, 0, 0, 0, 0 } },
186 { { 0, 0, 0, 0, 0 } }, 186 { { 0, 0, 0, 0, 0 } },
187 { { 0, 0, 0, 0, 0 } } } }; 187 { { 0, 0, 0, 0, 0 } } } };
188static const struct IP_BASE THM_BASE = { { { { 0x00016600, 0, 0, 0, 0 } }, 188static const struct IP_BASE THM_BASE = { { { { 0x00016600, 0, 0, 0, 0 } },
189 { { 0, 0, 0, 0, 0 } }, 189 { { 0, 0, 0, 0, 0 } },
190 { { 0, 0, 0, 0, 0 } }, 190 { { 0, 0, 0, 0, 0 } },
191 { { 0, 0, 0, 0, 0 } }, 191 { { 0, 0, 0, 0, 0 } },
192 { { 0, 0, 0, 0, 0 } } } }; 192 { { 0, 0, 0, 0, 0 } } } };
193static const struct IP_BASE SMUIO_BASE = { { { { 0x00016800, 0, 0, 0, 0 } }, 193static const struct IP_BASE SMUIO_BASE = { { { { 0x00016800, 0, 0, 0, 0 } },
194 { { 0, 0, 0, 0, 0 } }, 194 { { 0, 0, 0, 0, 0 } },
195 { { 0, 0, 0, 0, 0 } }, 195 { { 0, 0, 0, 0, 0 } },
196 { { 0, 0, 0, 0, 0 } }, 196 { { 0, 0, 0, 0, 0 } },
197 { { 0, 0, 0, 0, 0 } } } }; 197 { { 0, 0, 0, 0, 0 } } } };
198static const struct IP_BASE PWR_BASE = { { { { 0x00016A00, 0, 0, 0, 0 } }, 198static const struct IP_BASE PWR_BASE = { { { { 0x00016A00, 0, 0, 0, 0 } },
199 { { 0, 0, 0, 0, 0 } }, 199 { { 0, 0, 0, 0, 0 } },
200 { { 0, 0, 0, 0, 0 } }, 200 { { 0, 0, 0, 0, 0 } },
201 { { 0, 0, 0, 0, 0 } }, 201 { { 0, 0, 0, 0, 0 } },
202 { { 0, 0, 0, 0, 0 } } } }; 202 { { 0, 0, 0, 0, 0 } } } };
203static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0, 0, 0, 0 } }, 203static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0, 0, 0, 0 } },
204 { { 0x00016E00, 0, 0, 0, 0 } }, 204 { { 0x00016E00, 0, 0, 0, 0 } },
205 { { 0x00017000, 0, 0, 0, 0 } }, 205 { { 0x00017000, 0, 0, 0, 0 } },
206 { { 0x00017200, 0, 0, 0, 0 } }, 206 { { 0x00017200, 0, 0, 0, 0 } },
207 { { 0x00017E00, 0, 0, 0, 0 } } } }; 207 { { 0x00017E00, 0, 0, 0, 0 } } } };
208static const struct IP_BASE FUSE_BASE = { { { { 0x00017400, 0, 0, 0, 0 } }, 208static const struct IP_BASE FUSE_BASE = { { { { 0x00017400, 0, 0, 0, 0 } },
209 { { 0, 0, 0, 0, 0 } }, 209 { { 0, 0, 0, 0, 0 } },
210 { { 0, 0, 0, 0, 0 } }, 210 { { 0, 0, 0, 0, 0 } },
211 { { 0, 0, 0, 0, 0 } }, 211 { { 0, 0, 0, 0, 0 } },
212 { { 0, 0, 0, 0, 0 } } } }; 212 { { 0, 0, 0, 0, 0 } } } };
213 213
214 214
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index b989bf3542d6..3da3dccd13e2 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -27,7 +27,6 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include "amd_shared.h" 28#include "amd_shared.h"
29#include "amd_powerplay.h" 29#include "amd_powerplay.h"
30#include "pp_instance.h"
31#include "power_state.h" 30#include "power_state.h"
32#include "amdgpu.h" 31#include "amdgpu.h"
33#include "hwmgr.h" 32#include "hwmgr.h"
@@ -37,18 +36,14 @@
37static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id, 36static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
38 enum amd_pm_state_type *user_state); 37 enum amd_pm_state_type *user_state);
39 38
40static inline int pp_check(struct pp_instance *handle) 39static const struct amd_pm_funcs pp_dpm_funcs;
41{
42 if (handle == NULL)
43 return -EINVAL;
44 40
45 if (handle->hwmgr == NULL || handle->hwmgr->smumgr_funcs == NULL) 41static inline int pp_check(struct pp_hwmgr *hwmgr)
42{
43 if (hwmgr == NULL || hwmgr->smumgr_funcs == NULL)
46 return -EINVAL; 44 return -EINVAL;
47 45
48 if (handle->pm_en == 0) 46 if (hwmgr->pm_en == 0 || hwmgr->hwmgr_func == NULL)
49 return PP_DPM_DISABLED;
50
51 if (handle->hwmgr->hwmgr_func == NULL)
52 return PP_DPM_DISABLED; 47 return PP_DPM_DISABLED;
53 48
54 return 0; 49 return 0;
@@ -56,54 +51,52 @@ static inline int pp_check(struct pp_instance *handle)
56 51
57static int amd_powerplay_create(struct amdgpu_device *adev) 52static int amd_powerplay_create(struct amdgpu_device *adev)
58{ 53{
59 struct pp_instance *instance; 54 struct pp_hwmgr *hwmgr;
60 55
61 if (adev == NULL) 56 if (adev == NULL)
62 return -EINVAL; 57 return -EINVAL;
63 58
64 instance = kzalloc(sizeof(struct pp_instance), GFP_KERNEL); 59 hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
65 if (instance == NULL) 60 if (hwmgr == NULL)
66 return -ENOMEM; 61 return -ENOMEM;
67 62
68 instance->parent = adev; 63 hwmgr->adev = adev;
69 instance->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false; 64 hwmgr->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
70 instance->device = adev->powerplay.cgs_device; 65 hwmgr->device = amdgpu_cgs_create_device(adev);
71 mutex_init(&instance->pp_lock); 66 mutex_init(&hwmgr->smu_lock);
72 adev->powerplay.pp_handle = instance; 67 hwmgr->chip_family = adev->family;
73 68 hwmgr->chip_id = adev->asic_type;
69 hwmgr->feature_mask = amdgpu_pp_feature_mask;
70 adev->powerplay.pp_handle = hwmgr;
71 adev->powerplay.pp_funcs = &pp_dpm_funcs;
74 return 0; 72 return 0;
75} 73}
76 74
77 75
78static int amd_powerplay_destroy(void *handle) 76static int amd_powerplay_destroy(struct amdgpu_device *adev)
79{ 77{
80 struct pp_instance *instance = (struct pp_instance *)handle; 78 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
81 79
82 kfree(instance->hwmgr->hardcode_pp_table); 80 kfree(hwmgr->hardcode_pp_table);
83 instance->hwmgr->hardcode_pp_table = NULL; 81 hwmgr->hardcode_pp_table = NULL;
84 82
85 kfree(instance->hwmgr); 83 kfree(hwmgr);
86 instance->hwmgr = NULL; 84 hwmgr = NULL;
87 85
88 kfree(instance);
89 instance = NULL;
90 return 0; 86 return 0;
91} 87}
92 88
93static int pp_early_init(void *handle) 89static int pp_early_init(void *handle)
94{ 90{
95 int ret; 91 int ret;
96 struct pp_instance *pp_handle = NULL; 92 struct amdgpu_device *adev = handle;
97 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
98 93
99 ret = amd_powerplay_create(adev); 94 ret = amd_powerplay_create(adev);
100 95
101 if (ret != 0) 96 if (ret != 0)
102 return ret; 97 return ret;
103 98
104 pp_handle = adev->powerplay.pp_handle; 99 ret = hwmgr_early_init(adev->powerplay.pp_handle);
105
106 ret = hwmgr_early_init(pp_handle);
107 if (ret) 100 if (ret)
108 return -EINVAL; 101 return -EINVAL;
109 102
@@ -112,15 +105,13 @@ static int pp_early_init(void *handle)
112 105
113static int pp_sw_init(void *handle) 106static int pp_sw_init(void *handle)
114{ 107{
115 struct pp_hwmgr *hwmgr; 108 struct amdgpu_device *adev = handle;
109 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
116 int ret = 0; 110 int ret = 0;
117 struct pp_instance *pp_handle = (struct pp_instance *)handle;
118 111
119 ret = pp_check(pp_handle); 112 ret = pp_check(hwmgr);
120 113
121 if (ret >= 0) { 114 if (ret >= 0) {
122 hwmgr = pp_handle->hwmgr;
123
124 if (hwmgr->smumgr_funcs->smu_init == NULL) 115 if (hwmgr->smumgr_funcs->smu_init == NULL)
125 return -EINVAL; 116 return -EINVAL;
126 117
@@ -128,55 +119,57 @@ static int pp_sw_init(void *handle)
128 119
129 pr_debug("amdgpu: powerplay sw initialized\n"); 120 pr_debug("amdgpu: powerplay sw initialized\n");
130 } 121 }
122
131 return ret; 123 return ret;
132} 124}
133 125
134static int pp_sw_fini(void *handle) 126static int pp_sw_fini(void *handle)
135{ 127{
136 struct pp_hwmgr *hwmgr; 128 struct amdgpu_device *adev = handle;
129 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
137 int ret = 0; 130 int ret = 0;
138 struct pp_instance *pp_handle = (struct pp_instance *)handle;
139 131
140 ret = pp_check(pp_handle); 132 ret = pp_check(hwmgr);
141 if (ret >= 0) { 133 if (ret >= 0) {
142 hwmgr = pp_handle->hwmgr; 134 if (hwmgr->smumgr_funcs->smu_fini != NULL)
135 hwmgr->smumgr_funcs->smu_fini(hwmgr);
136 }
143 137
144 if (hwmgr->smumgr_funcs->smu_fini == NULL) 138 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
145 return -EINVAL; 139 amdgpu_ucode_fini_bo(adev);
146 140
147 ret = hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); 141 return 0;
148 }
149 return ret;
150} 142}
151 143
152static int pp_hw_init(void *handle) 144static int pp_hw_init(void *handle)
153{ 145{
154 int ret = 0; 146 int ret = 0;
155 struct pp_instance *pp_handle = (struct pp_instance *)handle; 147 struct amdgpu_device *adev = handle;
156 struct pp_hwmgr *hwmgr; 148 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
157 149
158 ret = pp_check(pp_handle); 150 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
151 amdgpu_ucode_init_bo(adev);
159 152
160 if (ret >= 0) { 153 ret = pp_check(hwmgr);
161 hwmgr = pp_handle->hwmgr;
162 154
155 if (ret >= 0) {
163 if (hwmgr->smumgr_funcs->start_smu == NULL) 156 if (hwmgr->smumgr_funcs->start_smu == NULL)
164 return -EINVAL; 157 return -EINVAL;
165 158
166 if(hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) { 159 if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
167 pr_err("smc start failed\n"); 160 pr_err("smc start failed\n");
168 hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); 161 hwmgr->smumgr_funcs->smu_fini(hwmgr);
169 return -EINVAL; 162 return -EINVAL;
170 } 163 }
171 if (ret == PP_DPM_DISABLED) 164 if (ret == PP_DPM_DISABLED)
172 goto exit; 165 goto exit;
173 ret = hwmgr_hw_init(pp_handle); 166 ret = hwmgr_hw_init(hwmgr);
174 if (ret) 167 if (ret)
175 goto exit; 168 goto exit;
176 } 169 }
177 return ret; 170 return ret;
178exit: 171exit:
179 pp_handle->pm_en = 0; 172 hwmgr->pm_en = 0;
180 cgs_notify_dpm_enabled(hwmgr->device, false); 173 cgs_notify_dpm_enabled(hwmgr->device, false);
181 return 0; 174 return 0;
182 175
@@ -184,24 +177,27 @@ exit:
184 177
185static int pp_hw_fini(void *handle) 178static int pp_hw_fini(void *handle)
186{ 179{
187 struct pp_instance *pp_handle = (struct pp_instance *)handle; 180 struct amdgpu_device *adev = handle;
181 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
188 int ret = 0; 182 int ret = 0;
189 183
190 ret = pp_check(pp_handle); 184 ret = pp_check(hwmgr);
191 if (ret == 0) 185 if (ret == 0)
192 hwmgr_hw_fini(pp_handle); 186 hwmgr_hw_fini(hwmgr);
193 187
194 return 0; 188 return 0;
195} 189}
196 190
197static int pp_late_init(void *handle) 191static int pp_late_init(void *handle)
198{ 192{
199 struct pp_instance *pp_handle = (struct pp_instance *)handle; 193 struct amdgpu_device *adev = handle;
194 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
200 int ret = 0; 195 int ret = 0;
201 196
202 ret = pp_check(pp_handle); 197 ret = pp_check(hwmgr);
198
203 if (ret == 0) 199 if (ret == 0)
204 pp_dpm_dispatch_tasks(pp_handle, 200 pp_dpm_dispatch_tasks(hwmgr,
205 AMD_PP_TASK_COMPLETE_INIT, NULL); 201 AMD_PP_TASK_COMPLETE_INIT, NULL);
206 202
207 return 0; 203 return 0;
@@ -209,7 +205,9 @@ static int pp_late_init(void *handle)
209 205
210static void pp_late_fini(void *handle) 206static void pp_late_fini(void *handle)
211{ 207{
212 amd_powerplay_destroy(handle); 208 struct amdgpu_device *adev = handle;
209
210 amd_powerplay_destroy(adev);
213} 211}
214 212
215 213
@@ -231,17 +229,15 @@ static int pp_sw_reset(void *handle)
231static int pp_set_powergating_state(void *handle, 229static int pp_set_powergating_state(void *handle,
232 enum amd_powergating_state state) 230 enum amd_powergating_state state)
233{ 231{
234 struct pp_hwmgr *hwmgr; 232 struct amdgpu_device *adev = handle;
235 struct pp_instance *pp_handle = (struct pp_instance *)handle; 233 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
236 int ret = 0; 234 int ret = 0;
237 235
238 ret = pp_check(pp_handle); 236 ret = pp_check(hwmgr);
239 237
240 if (ret) 238 if (ret)
241 return ret; 239 return ret;
242 240
243 hwmgr = pp_handle->hwmgr;
244
245 if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) { 241 if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
246 pr_info("%s was not implemented.\n", __func__); 242 pr_info("%s was not implemented.\n", __func__);
247 return 0; 243 return 0;
@@ -254,44 +250,43 @@ static int pp_set_powergating_state(void *handle,
254 250
255static int pp_suspend(void *handle) 251static int pp_suspend(void *handle)
256{ 252{
257 struct pp_instance *pp_handle = (struct pp_instance *)handle; 253 struct amdgpu_device *adev = handle;
254 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
258 int ret = 0; 255 int ret = 0;
259 256
260 ret = pp_check(pp_handle); 257 ret = pp_check(hwmgr);
261 if (ret == 0) 258 if (ret == 0)
262 hwmgr_hw_suspend(pp_handle); 259 hwmgr_hw_suspend(hwmgr);
263 return 0; 260 return 0;
264} 261}
265 262
266static int pp_resume(void *handle) 263static int pp_resume(void *handle)
267{ 264{
268 struct pp_hwmgr *hwmgr; 265 struct amdgpu_device *adev = handle;
266 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
269 int ret; 267 int ret;
270 struct pp_instance *pp_handle = (struct pp_instance *)handle;
271 268
272 ret = pp_check(pp_handle); 269 ret = pp_check(hwmgr);
273 270
274 if (ret < 0) 271 if (ret < 0)
275 return ret; 272 return ret;
276 273
277 hwmgr = pp_handle->hwmgr;
278
279 if (hwmgr->smumgr_funcs->start_smu == NULL) 274 if (hwmgr->smumgr_funcs->start_smu == NULL)
280 return -EINVAL; 275 return -EINVAL;
281 276
282 if (hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) { 277 if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
283 pr_err("smc start failed\n"); 278 pr_err("smc start failed\n");
284 hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); 279 hwmgr->smumgr_funcs->smu_fini(hwmgr);
285 return -EINVAL; 280 return -EINVAL;
286 } 281 }
287 282
288 if (ret == PP_DPM_DISABLED) 283 if (ret == PP_DPM_DISABLED)
289 return 0; 284 return 0;
290 285
291 return hwmgr_hw_resume(pp_handle); 286 return hwmgr_hw_resume(hwmgr);
292} 287}
293 288
294const struct amd_ip_funcs pp_ip_funcs = { 289static const struct amd_ip_funcs pp_ip_funcs = {
295 .name = "powerplay", 290 .name = "powerplay",
296 .early_init = pp_early_init, 291 .early_init = pp_early_init,
297 .late_init = pp_late_init, 292 .late_init = pp_late_init,
@@ -309,6 +304,15 @@ const struct amd_ip_funcs pp_ip_funcs = {
309 .set_powergating_state = pp_set_powergating_state, 304 .set_powergating_state = pp_set_powergating_state,
310}; 305};
311 306
307const struct amdgpu_ip_block_version pp_smu_ip_block =
308{
309 .type = AMD_IP_BLOCK_TYPE_SMC,
310 .major = 1,
311 .minor = 0,
312 .rev = 0,
313 .funcs = &pp_ip_funcs,
314};
315
312static int pp_dpm_load_fw(void *handle) 316static int pp_dpm_load_fw(void *handle)
313{ 317{
314 return 0; 318 return 0;
@@ -321,17 +325,14 @@ static int pp_dpm_fw_loading_complete(void *handle)
321 325
322static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id) 326static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
323{ 327{
324 struct pp_hwmgr *hwmgr; 328 struct pp_hwmgr *hwmgr = handle;
325 struct pp_instance *pp_handle = (struct pp_instance *)handle;
326 int ret = 0; 329 int ret = 0;
327 330
328 ret = pp_check(pp_handle); 331 ret = pp_check(hwmgr);
329 332
330 if (ret) 333 if (ret)
331 return ret; 334 return ret;
332 335
333 hwmgr = pp_handle->hwmgr;
334
335 if (hwmgr->hwmgr_func->update_clock_gatings == NULL) { 336 if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
336 pr_info("%s was not implemented.\n", __func__); 337 pr_info("%s was not implemented.\n", __func__);
337 return 0; 338 return 0;
@@ -379,25 +380,22 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
379static int pp_dpm_force_performance_level(void *handle, 380static int pp_dpm_force_performance_level(void *handle,
380 enum amd_dpm_forced_level level) 381 enum amd_dpm_forced_level level)
381{ 382{
382 struct pp_hwmgr *hwmgr; 383 struct pp_hwmgr *hwmgr = handle;
383 struct pp_instance *pp_handle = (struct pp_instance *)handle;
384 int ret = 0; 384 int ret = 0;
385 385
386 ret = pp_check(pp_handle); 386 ret = pp_check(hwmgr);
387 387
388 if (ret) 388 if (ret)
389 return ret; 389 return ret;
390 390
391 hwmgr = pp_handle->hwmgr;
392
393 if (level == hwmgr->dpm_level) 391 if (level == hwmgr->dpm_level)
394 return 0; 392 return 0;
395 393
396 mutex_lock(&pp_handle->pp_lock); 394 mutex_lock(&hwmgr->smu_lock);
397 pp_dpm_en_umd_pstate(hwmgr, &level); 395 pp_dpm_en_umd_pstate(hwmgr, &level);
398 hwmgr->request_dpm_level = level; 396 hwmgr->request_dpm_level = level;
399 hwmgr_handle_task(pp_handle, AMD_PP_TASK_READJUST_POWER_STATE, NULL); 397 hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
400 mutex_unlock(&pp_handle->pp_lock); 398 mutex_unlock(&hwmgr->smu_lock);
401 399
402 return 0; 400 return 0;
403} 401}
@@ -405,152 +403,135 @@ static int pp_dpm_force_performance_level(void *handle,
405static enum amd_dpm_forced_level pp_dpm_get_performance_level( 403static enum amd_dpm_forced_level pp_dpm_get_performance_level(
406 void *handle) 404 void *handle)
407{ 405{
408 struct pp_hwmgr *hwmgr; 406 struct pp_hwmgr *hwmgr = handle;
409 struct pp_instance *pp_handle = (struct pp_instance *)handle;
410 int ret = 0; 407 int ret = 0;
411 enum amd_dpm_forced_level level; 408 enum amd_dpm_forced_level level;
412 409
413 ret = pp_check(pp_handle); 410 ret = pp_check(hwmgr);
414 411
415 if (ret) 412 if (ret)
416 return ret; 413 return ret;
417 414
418 hwmgr = pp_handle->hwmgr; 415 mutex_lock(&hwmgr->smu_lock);
419 mutex_lock(&pp_handle->pp_lock);
420 level = hwmgr->dpm_level; 416 level = hwmgr->dpm_level;
421 mutex_unlock(&pp_handle->pp_lock); 417 mutex_unlock(&hwmgr->smu_lock);
422 return level; 418 return level;
423} 419}
424 420
425static uint32_t pp_dpm_get_sclk(void *handle, bool low) 421static uint32_t pp_dpm_get_sclk(void *handle, bool low)
426{ 422{
427 struct pp_hwmgr *hwmgr; 423 struct pp_hwmgr *hwmgr = handle;
428 struct pp_instance *pp_handle = (struct pp_instance *)handle;
429 int ret = 0; 424 int ret = 0;
430 uint32_t clk = 0; 425 uint32_t clk = 0;
431 426
432 ret = pp_check(pp_handle); 427 ret = pp_check(hwmgr);
433 428
434 if (ret) 429 if (ret)
435 return ret; 430 return ret;
436 431
437 hwmgr = pp_handle->hwmgr;
438
439 if (hwmgr->hwmgr_func->get_sclk == NULL) { 432 if (hwmgr->hwmgr_func->get_sclk == NULL) {
440 pr_info("%s was not implemented.\n", __func__); 433 pr_info("%s was not implemented.\n", __func__);
441 return 0; 434 return 0;
442 } 435 }
443 mutex_lock(&pp_handle->pp_lock); 436 mutex_lock(&hwmgr->smu_lock);
444 clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low); 437 clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
445 mutex_unlock(&pp_handle->pp_lock); 438 mutex_unlock(&hwmgr->smu_lock);
446 return clk; 439 return clk;
447} 440}
448 441
449static uint32_t pp_dpm_get_mclk(void *handle, bool low) 442static uint32_t pp_dpm_get_mclk(void *handle, bool low)
450{ 443{
451 struct pp_hwmgr *hwmgr; 444 struct pp_hwmgr *hwmgr = handle;
452 struct pp_instance *pp_handle = (struct pp_instance *)handle;
453 int ret = 0; 445 int ret = 0;
454 uint32_t clk = 0; 446 uint32_t clk = 0;
455 447
456 ret = pp_check(pp_handle); 448 ret = pp_check(hwmgr);
457 449
458 if (ret) 450 if (ret)
459 return ret; 451 return ret;
460 452
461 hwmgr = pp_handle->hwmgr;
462
463 if (hwmgr->hwmgr_func->get_mclk == NULL) { 453 if (hwmgr->hwmgr_func->get_mclk == NULL) {
464 pr_info("%s was not implemented.\n", __func__); 454 pr_info("%s was not implemented.\n", __func__);
465 return 0; 455 return 0;
466 } 456 }
467 mutex_lock(&pp_handle->pp_lock); 457 mutex_lock(&hwmgr->smu_lock);
468 clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low); 458 clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
469 mutex_unlock(&pp_handle->pp_lock); 459 mutex_unlock(&hwmgr->smu_lock);
470 return clk; 460 return clk;
471} 461}
472 462
473static void pp_dpm_powergate_vce(void *handle, bool gate) 463static void pp_dpm_powergate_vce(void *handle, bool gate)
474{ 464{
475 struct pp_hwmgr *hwmgr; 465 struct pp_hwmgr *hwmgr = handle;
476 struct pp_instance *pp_handle = (struct pp_instance *)handle;
477 int ret = 0; 466 int ret = 0;
478 467
479 ret = pp_check(pp_handle); 468 ret = pp_check(hwmgr);
480 469
481 if (ret) 470 if (ret)
482 return; 471 return;
483 472
484 hwmgr = pp_handle->hwmgr;
485
486 if (hwmgr->hwmgr_func->powergate_vce == NULL) { 473 if (hwmgr->hwmgr_func->powergate_vce == NULL) {
487 pr_info("%s was not implemented.\n", __func__); 474 pr_info("%s was not implemented.\n", __func__);
488 return; 475 return;
489 } 476 }
490 mutex_lock(&pp_handle->pp_lock); 477 mutex_lock(&hwmgr->smu_lock);
491 hwmgr->hwmgr_func->powergate_vce(hwmgr, gate); 478 hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
492 mutex_unlock(&pp_handle->pp_lock); 479 mutex_unlock(&hwmgr->smu_lock);
493} 480}
494 481
495static void pp_dpm_powergate_uvd(void *handle, bool gate) 482static void pp_dpm_powergate_uvd(void *handle, bool gate)
496{ 483{
497 struct pp_hwmgr *hwmgr; 484 struct pp_hwmgr *hwmgr = handle;
498 struct pp_instance *pp_handle = (struct pp_instance *)handle;
499 int ret = 0; 485 int ret = 0;
500 486
501 ret = pp_check(pp_handle); 487 ret = pp_check(hwmgr);
502 488
503 if (ret) 489 if (ret)
504 return; 490 return;
505 491
506 hwmgr = pp_handle->hwmgr;
507
508 if (hwmgr->hwmgr_func->powergate_uvd == NULL) { 492 if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
509 pr_info("%s was not implemented.\n", __func__); 493 pr_info("%s was not implemented.\n", __func__);
510 return; 494 return;
511 } 495 }
512 mutex_lock(&pp_handle->pp_lock); 496 mutex_lock(&hwmgr->smu_lock);
513 hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate); 497 hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
514 mutex_unlock(&pp_handle->pp_lock); 498 mutex_unlock(&hwmgr->smu_lock);
515} 499}
516 500
517static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id, 501static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
518 enum amd_pm_state_type *user_state) 502 enum amd_pm_state_type *user_state)
519{ 503{
520 int ret = 0; 504 int ret = 0;
521 struct pp_instance *pp_handle = (struct pp_instance *)handle; 505 struct pp_hwmgr *hwmgr = handle;
522 506
523 ret = pp_check(pp_handle); 507 ret = pp_check(hwmgr);
524 508
525 if (ret) 509 if (ret)
526 return ret; 510 return ret;
527 511
528 mutex_lock(&pp_handle->pp_lock); 512 mutex_lock(&hwmgr->smu_lock);
529 ret = hwmgr_handle_task(pp_handle, task_id, user_state); 513 ret = hwmgr_handle_task(hwmgr, task_id, user_state);
530 mutex_unlock(&pp_handle->pp_lock); 514 mutex_unlock(&hwmgr->smu_lock);
531 515
532 return ret; 516 return ret;
533} 517}
534 518
535static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) 519static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
536{ 520{
537 struct pp_hwmgr *hwmgr; 521 struct pp_hwmgr *hwmgr = handle;
538 struct pp_power_state *state; 522 struct pp_power_state *state;
539 struct pp_instance *pp_handle = (struct pp_instance *)handle;
540 int ret = 0; 523 int ret = 0;
541 enum amd_pm_state_type pm_type; 524 enum amd_pm_state_type pm_type;
542 525
543 ret = pp_check(pp_handle); 526 ret = pp_check(hwmgr);
544 527
545 if (ret) 528 if (ret)
546 return ret; 529 return ret;
547 530
548 hwmgr = pp_handle->hwmgr;
549
550 if (hwmgr->current_ps == NULL) 531 if (hwmgr->current_ps == NULL)
551 return -EINVAL; 532 return -EINVAL;
552 533
553 mutex_lock(&pp_handle->pp_lock); 534 mutex_lock(&hwmgr->smu_lock);
554 535
555 state = hwmgr->current_ps; 536 state = hwmgr->current_ps;
556 537
@@ -571,147 +552,129 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
571 pm_type = POWER_STATE_TYPE_DEFAULT; 552 pm_type = POWER_STATE_TYPE_DEFAULT;
572 break; 553 break;
573 } 554 }
574 mutex_unlock(&pp_handle->pp_lock); 555 mutex_unlock(&hwmgr->smu_lock);
575 556
576 return pm_type; 557 return pm_type;
577} 558}
578 559
579static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) 560static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
580{ 561{
581 struct pp_hwmgr *hwmgr; 562 struct pp_hwmgr *hwmgr = handle;
582 struct pp_instance *pp_handle = (struct pp_instance *)handle;
583 int ret = 0; 563 int ret = 0;
584 564
585 ret = pp_check(pp_handle); 565 ret = pp_check(hwmgr);
586 566
587 if (ret) 567 if (ret)
588 return; 568 return;
589 569
590 hwmgr = pp_handle->hwmgr;
591
592 if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) { 570 if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
593 pr_info("%s was not implemented.\n", __func__); 571 pr_info("%s was not implemented.\n", __func__);
594 return; 572 return;
595 } 573 }
596 mutex_lock(&pp_handle->pp_lock); 574 mutex_lock(&hwmgr->smu_lock);
597 hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode); 575 hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
598 mutex_unlock(&pp_handle->pp_lock); 576 mutex_unlock(&hwmgr->smu_lock);
599} 577}
600 578
601static uint32_t pp_dpm_get_fan_control_mode(void *handle) 579static uint32_t pp_dpm_get_fan_control_mode(void *handle)
602{ 580{
603 struct pp_hwmgr *hwmgr; 581 struct pp_hwmgr *hwmgr = handle;
604 struct pp_instance *pp_handle = (struct pp_instance *)handle;
605 int ret = 0; 582 int ret = 0;
606 uint32_t mode = 0; 583 uint32_t mode = 0;
607 584
608 ret = pp_check(pp_handle); 585 ret = pp_check(hwmgr);
609 586
610 if (ret) 587 if (ret)
611 return ret; 588 return ret;
612 589
613 hwmgr = pp_handle->hwmgr;
614
615 if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) { 590 if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
616 pr_info("%s was not implemented.\n", __func__); 591 pr_info("%s was not implemented.\n", __func__);
617 return 0; 592 return 0;
618 } 593 }
619 mutex_lock(&pp_handle->pp_lock); 594 mutex_lock(&hwmgr->smu_lock);
620 mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr); 595 mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
621 mutex_unlock(&pp_handle->pp_lock); 596 mutex_unlock(&hwmgr->smu_lock);
622 return mode; 597 return mode;
623} 598}
624 599
625static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent) 600static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
626{ 601{
627 struct pp_hwmgr *hwmgr; 602 struct pp_hwmgr *hwmgr = handle;
628 struct pp_instance *pp_handle = (struct pp_instance *)handle;
629 int ret = 0; 603 int ret = 0;
630 604
631 ret = pp_check(pp_handle); 605 ret = pp_check(hwmgr);
632 606
633 if (ret) 607 if (ret)
634 return ret; 608 return ret;
635 609
636 hwmgr = pp_handle->hwmgr;
637
638 if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) { 610 if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
639 pr_info("%s was not implemented.\n", __func__); 611 pr_info("%s was not implemented.\n", __func__);
640 return 0; 612 return 0;
641 } 613 }
642 mutex_lock(&pp_handle->pp_lock); 614 mutex_lock(&hwmgr->smu_lock);
643 ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent); 615 ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
644 mutex_unlock(&pp_handle->pp_lock); 616 mutex_unlock(&hwmgr->smu_lock);
645 return ret; 617 return ret;
646} 618}
647 619
648static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed) 620static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
649{ 621{
650 struct pp_hwmgr *hwmgr; 622 struct pp_hwmgr *hwmgr = handle;
651 struct pp_instance *pp_handle = (struct pp_instance *)handle;
652 int ret = 0; 623 int ret = 0;
653 624
654 ret = pp_check(pp_handle); 625 ret = pp_check(hwmgr);
655 626
656 if (ret) 627 if (ret)
657 return ret; 628 return ret;
658 629
659 hwmgr = pp_handle->hwmgr;
660
661 if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) { 630 if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
662 pr_info("%s was not implemented.\n", __func__); 631 pr_info("%s was not implemented.\n", __func__);
663 return 0; 632 return 0;
664 } 633 }
665 634
666 mutex_lock(&pp_handle->pp_lock); 635 mutex_lock(&hwmgr->smu_lock);
667 ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed); 636 ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
668 mutex_unlock(&pp_handle->pp_lock); 637 mutex_unlock(&hwmgr->smu_lock);
669 return ret; 638 return ret;
670} 639}
671 640
672static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm) 641static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
673{ 642{
674 struct pp_hwmgr *hwmgr; 643 struct pp_hwmgr *hwmgr = handle;
675 struct pp_instance *pp_handle = (struct pp_instance *)handle;
676 int ret = 0; 644 int ret = 0;
677 645
678 ret = pp_check(pp_handle); 646 ret = pp_check(hwmgr);
679 647
680 if (ret) 648 if (ret)
681 return ret; 649 return ret;
682 650
683 hwmgr = pp_handle->hwmgr;
684
685 if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL) 651 if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
686 return -EINVAL; 652 return -EINVAL;
687 653
688 mutex_lock(&pp_handle->pp_lock); 654 mutex_lock(&hwmgr->smu_lock);
689 ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm); 655 ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
690 mutex_unlock(&pp_handle->pp_lock); 656 mutex_unlock(&hwmgr->smu_lock);
691 return ret; 657 return ret;
692} 658}
693 659
694static int pp_dpm_get_pp_num_states(void *handle, 660static int pp_dpm_get_pp_num_states(void *handle,
695 struct pp_states_info *data) 661 struct pp_states_info *data)
696{ 662{
697 struct pp_hwmgr *hwmgr; 663 struct pp_hwmgr *hwmgr = handle;
698 int i; 664 int i;
699 struct pp_instance *pp_handle = (struct pp_instance *)handle;
700 int ret = 0; 665 int ret = 0;
701 666
702 memset(data, 0, sizeof(*data)); 667 memset(data, 0, sizeof(*data));
703 668
704 ret = pp_check(pp_handle); 669 ret = pp_check(hwmgr);
705 670
706 if (ret) 671 if (ret)
707 return ret; 672 return ret;
708 673
709 hwmgr = pp_handle->hwmgr;
710
711 if (hwmgr->ps == NULL) 674 if (hwmgr->ps == NULL)
712 return -EINVAL; 675 return -EINVAL;
713 676
714 mutex_lock(&pp_handle->pp_lock); 677 mutex_lock(&hwmgr->smu_lock);
715 678
716 data->nums = hwmgr->num_ps; 679 data->nums = hwmgr->num_ps;
717 680
@@ -735,73 +698,68 @@ static int pp_dpm_get_pp_num_states(void *handle,
735 data->states[i] = POWER_STATE_TYPE_DEFAULT; 698 data->states[i] = POWER_STATE_TYPE_DEFAULT;
736 } 699 }
737 } 700 }
738 mutex_unlock(&pp_handle->pp_lock); 701 mutex_unlock(&hwmgr->smu_lock);
739 return 0; 702 return 0;
740} 703}
741 704
742static int pp_dpm_get_pp_table(void *handle, char **table) 705static int pp_dpm_get_pp_table(void *handle, char **table)
743{ 706{
744 struct pp_hwmgr *hwmgr; 707 struct pp_hwmgr *hwmgr = handle;
745 struct pp_instance *pp_handle = (struct pp_instance *)handle;
746 int ret = 0; 708 int ret = 0;
747 int size = 0; 709 int size = 0;
748 710
749 ret = pp_check(pp_handle); 711 ret = pp_check(hwmgr);
750 712
751 if (ret) 713 if (ret)
752 return ret; 714 return ret;
753 715
754 hwmgr = pp_handle->hwmgr;
755
756 if (!hwmgr->soft_pp_table) 716 if (!hwmgr->soft_pp_table)
757 return -EINVAL; 717 return -EINVAL;
758 718
759 mutex_lock(&pp_handle->pp_lock); 719 mutex_lock(&hwmgr->smu_lock);
760 *table = (char *)hwmgr->soft_pp_table; 720 *table = (char *)hwmgr->soft_pp_table;
761 size = hwmgr->soft_pp_table_size; 721 size = hwmgr->soft_pp_table_size;
762 mutex_unlock(&pp_handle->pp_lock); 722 mutex_unlock(&hwmgr->smu_lock);
763 return size; 723 return size;
764} 724}
765 725
766static int amd_powerplay_reset(void *handle) 726static int amd_powerplay_reset(void *handle)
767{ 727{
768 struct pp_instance *instance = (struct pp_instance *)handle; 728 struct pp_hwmgr *hwmgr = handle;
769 int ret; 729 int ret;
770 730
771 ret = pp_check(instance); 731 ret = pp_check(hwmgr);
772 if (ret) 732 if (ret)
773 return ret; 733 return ret;
774 734
775 ret = pp_hw_fini(instance); 735 ret = pp_hw_fini(hwmgr);
776 if (ret) 736 if (ret)
777 return ret; 737 return ret;
778 738
779 ret = hwmgr_hw_init(instance); 739 ret = hwmgr_hw_init(hwmgr);
780 if (ret) 740 if (ret)
781 return ret; 741 return ret;
782 742
783 return hwmgr_handle_task(instance, AMD_PP_TASK_COMPLETE_INIT, NULL); 743 return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
784} 744}
785 745
786static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) 746static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
787{ 747{
788 struct pp_hwmgr *hwmgr; 748 struct pp_hwmgr *hwmgr = handle;
789 struct pp_instance *pp_handle = (struct pp_instance *)handle;
790 int ret = 0; 749 int ret = 0;
791 750
792 ret = pp_check(pp_handle); 751 ret = pp_check(hwmgr);
793 752
794 if (ret) 753 if (ret)
795 return ret; 754 return ret;
796 755
797 hwmgr = pp_handle->hwmgr; 756 mutex_lock(&hwmgr->smu_lock);
798 mutex_lock(&pp_handle->pp_lock);
799 if (!hwmgr->hardcode_pp_table) { 757 if (!hwmgr->hardcode_pp_table) {
800 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table, 758 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
801 hwmgr->soft_pp_table_size, 759 hwmgr->soft_pp_table_size,
802 GFP_KERNEL); 760 GFP_KERNEL);
803 if (!hwmgr->hardcode_pp_table) { 761 if (!hwmgr->hardcode_pp_table) {
804 mutex_unlock(&pp_handle->pp_lock); 762 mutex_unlock(&hwmgr->smu_lock);
805 return -ENOMEM; 763 return -ENOMEM;
806 } 764 }
807 } 765 }
@@ -809,7 +767,7 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
809 memcpy(hwmgr->hardcode_pp_table, buf, size); 767 memcpy(hwmgr->hardcode_pp_table, buf, size);
810 768
811 hwmgr->soft_pp_table = hwmgr->hardcode_pp_table; 769 hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
812 mutex_unlock(&pp_handle->pp_lock); 770 mutex_unlock(&hwmgr->smu_lock);
813 771
814 ret = amd_powerplay_reset(handle); 772 ret = amd_powerplay_reset(handle);
815 if (ret) 773 if (ret)
@@ -827,163 +785,142 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
827static int pp_dpm_force_clock_level(void *handle, 785static int pp_dpm_force_clock_level(void *handle,
828 enum pp_clock_type type, uint32_t mask) 786 enum pp_clock_type type, uint32_t mask)
829{ 787{
830 struct pp_hwmgr *hwmgr; 788 struct pp_hwmgr *hwmgr = handle;
831 struct pp_instance *pp_handle = (struct pp_instance *)handle;
832 int ret = 0; 789 int ret = 0;
833 790
834 ret = pp_check(pp_handle); 791 ret = pp_check(hwmgr);
835 792
836 if (ret) 793 if (ret)
837 return ret; 794 return ret;
838 795
839 hwmgr = pp_handle->hwmgr;
840
841 if (hwmgr->hwmgr_func->force_clock_level == NULL) { 796 if (hwmgr->hwmgr_func->force_clock_level == NULL) {
842 pr_info("%s was not implemented.\n", __func__); 797 pr_info("%s was not implemented.\n", __func__);
843 return 0; 798 return 0;
844 } 799 }
845 mutex_lock(&pp_handle->pp_lock); 800 mutex_lock(&hwmgr->smu_lock);
846 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) 801 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
847 ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask); 802 ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
848 else 803 else
849 ret = -EINVAL; 804 ret = -EINVAL;
850 mutex_unlock(&pp_handle->pp_lock); 805 mutex_unlock(&hwmgr->smu_lock);
851 return ret; 806 return ret;
852} 807}
853 808
854static int pp_dpm_print_clock_levels(void *handle, 809static int pp_dpm_print_clock_levels(void *handle,
855 enum pp_clock_type type, char *buf) 810 enum pp_clock_type type, char *buf)
856{ 811{
857 struct pp_hwmgr *hwmgr; 812 struct pp_hwmgr *hwmgr = handle;
858 struct pp_instance *pp_handle = (struct pp_instance *)handle;
859 int ret = 0; 813 int ret = 0;
860 814
861 ret = pp_check(pp_handle); 815 ret = pp_check(hwmgr);
862 816
863 if (ret) 817 if (ret)
864 return ret; 818 return ret;
865 819
866 hwmgr = pp_handle->hwmgr;
867
868 if (hwmgr->hwmgr_func->print_clock_levels == NULL) { 820 if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
869 pr_info("%s was not implemented.\n", __func__); 821 pr_info("%s was not implemented.\n", __func__);
870 return 0; 822 return 0;
871 } 823 }
872 mutex_lock(&pp_handle->pp_lock); 824 mutex_lock(&hwmgr->smu_lock);
873 ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf); 825 ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
874 mutex_unlock(&pp_handle->pp_lock); 826 mutex_unlock(&hwmgr->smu_lock);
875 return ret; 827 return ret;
876} 828}
877 829
878static int pp_dpm_get_sclk_od(void *handle) 830static int pp_dpm_get_sclk_od(void *handle)
879{ 831{
880 struct pp_hwmgr *hwmgr; 832 struct pp_hwmgr *hwmgr = handle;
881 struct pp_instance *pp_handle = (struct pp_instance *)handle;
882 int ret = 0; 833 int ret = 0;
883 834
884 ret = pp_check(pp_handle); 835 ret = pp_check(hwmgr);
885 836
886 if (ret) 837 if (ret)
887 return ret; 838 return ret;
888 839
889 hwmgr = pp_handle->hwmgr;
890
891 if (hwmgr->hwmgr_func->get_sclk_od == NULL) { 840 if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
892 pr_info("%s was not implemented.\n", __func__); 841 pr_info("%s was not implemented.\n", __func__);
893 return 0; 842 return 0;
894 } 843 }
895 mutex_lock(&pp_handle->pp_lock); 844 mutex_lock(&hwmgr->smu_lock);
896 ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr); 845 ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
897 mutex_unlock(&pp_handle->pp_lock); 846 mutex_unlock(&hwmgr->smu_lock);
898 return ret; 847 return ret;
899} 848}
900 849
901static int pp_dpm_set_sclk_od(void *handle, uint32_t value) 850static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
902{ 851{
903 struct pp_hwmgr *hwmgr; 852 struct pp_hwmgr *hwmgr = handle;
904 struct pp_instance *pp_handle = (struct pp_instance *)handle;
905 int ret = 0; 853 int ret = 0;
906 854
907 ret = pp_check(pp_handle); 855 ret = pp_check(hwmgr);
908 856
909 if (ret) 857 if (ret)
910 return ret; 858 return ret;
911 859
912 hwmgr = pp_handle->hwmgr;
913
914 if (hwmgr->hwmgr_func->set_sclk_od == NULL) { 860 if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
915 pr_info("%s was not implemented.\n", __func__); 861 pr_info("%s was not implemented.\n", __func__);
916 return 0; 862 return 0;
917 } 863 }
918 864
919 mutex_lock(&pp_handle->pp_lock); 865 mutex_lock(&hwmgr->smu_lock);
920 ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value); 866 ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
921 mutex_unlock(&pp_handle->pp_lock); 867 mutex_unlock(&hwmgr->smu_lock);
922 return ret; 868 return ret;
923} 869}
924 870
925static int pp_dpm_get_mclk_od(void *handle) 871static int pp_dpm_get_mclk_od(void *handle)
926{ 872{
927 struct pp_hwmgr *hwmgr; 873 struct pp_hwmgr *hwmgr = handle;
928 struct pp_instance *pp_handle = (struct pp_instance *)handle;
929 int ret = 0; 874 int ret = 0;
930 875
931 ret = pp_check(pp_handle); 876 ret = pp_check(hwmgr);
932 877
933 if (ret) 878 if (ret)
934 return ret; 879 return ret;
935 880
936 hwmgr = pp_handle->hwmgr;
937
938 if (hwmgr->hwmgr_func->get_mclk_od == NULL) { 881 if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
939 pr_info("%s was not implemented.\n", __func__); 882 pr_info("%s was not implemented.\n", __func__);
940 return 0; 883 return 0;
941 } 884 }
942 mutex_lock(&pp_handle->pp_lock); 885 mutex_lock(&hwmgr->smu_lock);
943 ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr); 886 ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
944 mutex_unlock(&pp_handle->pp_lock); 887 mutex_unlock(&hwmgr->smu_lock);
945 return ret; 888 return ret;
946} 889}
947 890
948static int pp_dpm_set_mclk_od(void *handle, uint32_t value) 891static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
949{ 892{
950 struct pp_hwmgr *hwmgr; 893 struct pp_hwmgr *hwmgr = handle;
951 struct pp_instance *pp_handle = (struct pp_instance *)handle;
952 int ret = 0; 894 int ret = 0;
953 895
954 ret = pp_check(pp_handle); 896 ret = pp_check(hwmgr);
955 897
956 if (ret) 898 if (ret)
957 return ret; 899 return ret;
958 900
959 hwmgr = pp_handle->hwmgr;
960
961 if (hwmgr->hwmgr_func->set_mclk_od == NULL) { 901 if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
962 pr_info("%s was not implemented.\n", __func__); 902 pr_info("%s was not implemented.\n", __func__);
963 return 0; 903 return 0;
964 } 904 }
965 mutex_lock(&pp_handle->pp_lock); 905 mutex_lock(&hwmgr->smu_lock);
966 ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value); 906 ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
967 mutex_unlock(&pp_handle->pp_lock); 907 mutex_unlock(&hwmgr->smu_lock);
968 return ret; 908 return ret;
969} 909}
970 910
971static int pp_dpm_read_sensor(void *handle, int idx, 911static int pp_dpm_read_sensor(void *handle, int idx,
972 void *value, int *size) 912 void *value, int *size)
973{ 913{
974 struct pp_hwmgr *hwmgr; 914 struct pp_hwmgr *hwmgr = handle;
975 struct pp_instance *pp_handle = (struct pp_instance *)handle;
976 int ret = 0; 915 int ret = 0;
977 916
978 ret = pp_check(pp_handle); 917 ret = pp_check(hwmgr);
979 if (ret) 918 if (ret)
980 return ret; 919 return ret;
981 920
982 if (value == NULL) 921 if (value == NULL)
983 return -EINVAL; 922 return -EINVAL;
984 923
985 hwmgr = pp_handle->hwmgr;
986
987 switch (idx) { 924 switch (idx) {
988 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: 925 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
989 *((uint32_t *)value) = hwmgr->pstate_sclk; 926 *((uint32_t *)value) = hwmgr->pstate_sclk;
@@ -992,9 +929,9 @@ static int pp_dpm_read_sensor(void *handle, int idx,
992 *((uint32_t *)value) = hwmgr->pstate_mclk; 929 *((uint32_t *)value) = hwmgr->pstate_mclk;
993 return 0; 930 return 0;
994 default: 931 default:
995 mutex_lock(&pp_handle->pp_lock); 932 mutex_lock(&hwmgr->smu_lock);
996 ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size); 933 ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
997 mutex_unlock(&pp_handle->pp_lock); 934 mutex_unlock(&hwmgr->smu_lock);
998 return ret; 935 return ret;
999 } 936 }
1000} 937}
@@ -1002,17 +939,14 @@ static int pp_dpm_read_sensor(void *handle, int idx,
1002static struct amd_vce_state* 939static struct amd_vce_state*
1003pp_dpm_get_vce_clock_state(void *handle, unsigned idx) 940pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
1004{ 941{
1005 struct pp_hwmgr *hwmgr; 942 struct pp_hwmgr *hwmgr = handle;
1006 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1007 int ret = 0; 943 int ret = 0;
1008 944
1009 ret = pp_check(pp_handle); 945 ret = pp_check(hwmgr);
1010 946
1011 if (ret) 947 if (ret)
1012 return NULL; 948 return NULL;
1013 949
1014 hwmgr = pp_handle->hwmgr;
1015
1016 if (hwmgr && idx < hwmgr->num_vce_state_tables) 950 if (hwmgr && idx < hwmgr->num_vce_state_tables)
1017 return &hwmgr->vce_states[idx]; 951 return &hwmgr->vce_states[idx];
1018 return NULL; 952 return NULL;
@@ -1020,14 +954,11 @@ pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
1020 954
1021static int pp_get_power_profile_mode(void *handle, char *buf) 955static int pp_get_power_profile_mode(void *handle, char *buf)
1022{ 956{
1023 struct pp_hwmgr *hwmgr; 957 struct pp_hwmgr *hwmgr = handle;
1024 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1025 958
1026 if (!buf || pp_check(pp_handle)) 959 if (!buf || pp_check(hwmgr))
1027 return -EINVAL; 960 return -EINVAL;
1028 961
1029 hwmgr = pp_handle->hwmgr;
1030
1031 if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) { 962 if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
1032 pr_info("%s was not implemented.\n", __func__); 963 pr_info("%s was not implemented.\n", __func__);
1033 return snprintf(buf, PAGE_SIZE, "\n"); 964 return snprintf(buf, PAGE_SIZE, "\n");
@@ -1038,36 +969,30 @@ static int pp_get_power_profile_mode(void *handle, char *buf)
1038 969
1039static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size) 970static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
1040{ 971{
1041 struct pp_hwmgr *hwmgr; 972 struct pp_hwmgr *hwmgr = handle;
1042 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1043 int ret = -EINVAL; 973 int ret = -EINVAL;
1044 974
1045 if (pp_check(pp_handle)) 975 if (pp_check(hwmgr))
1046 return -EINVAL; 976 return -EINVAL;
1047 977
1048 hwmgr = pp_handle->hwmgr;
1049
1050 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { 978 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
1051 pr_info("%s was not implemented.\n", __func__); 979 pr_info("%s was not implemented.\n", __func__);
1052 return -EINVAL; 980 return -EINVAL;
1053 } 981 }
1054 mutex_lock(&pp_handle->pp_lock); 982 mutex_lock(&hwmgr->smu_lock);
1055 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) 983 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
1056 ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size); 984 ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
1057 mutex_unlock(&pp_handle->pp_lock); 985 mutex_unlock(&hwmgr->smu_lock);
1058 return ret; 986 return ret;
1059} 987}
1060 988
1061static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size) 989static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
1062{ 990{
1063 struct pp_hwmgr *hwmgr; 991 struct pp_hwmgr *hwmgr = handle;
1064 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1065 992
1066 if (pp_check(pp_handle)) 993 if (pp_check(hwmgr))
1067 return -EINVAL; 994 return -EINVAL;
1068 995
1069 hwmgr = pp_handle->hwmgr;
1070
1071 if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) { 996 if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
1072 pr_info("%s was not implemented.\n", __func__); 997 pr_info("%s was not implemented.\n", __func__);
1073 return -EINVAL; 998 return -EINVAL;
@@ -1079,16 +1004,13 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3
1079static int pp_dpm_switch_power_profile(void *handle, 1004static int pp_dpm_switch_power_profile(void *handle,
1080 enum PP_SMC_POWER_PROFILE type, bool en) 1005 enum PP_SMC_POWER_PROFILE type, bool en)
1081{ 1006{
1082 struct pp_hwmgr *hwmgr; 1007 struct pp_hwmgr *hwmgr = handle;
1083 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1084 long workload; 1008 long workload;
1085 uint32_t index; 1009 uint32_t index;
1086 1010
1087 if (pp_check(pp_handle)) 1011 if (pp_check(hwmgr))
1088 return -EINVAL; 1012 return -EINVAL;
1089 1013
1090 hwmgr = pp_handle->hwmgr;
1091
1092 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { 1014 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
1093 pr_info("%s was not implemented.\n", __func__); 1015 pr_info("%s was not implemented.\n", __func__);
1094 return -EINVAL; 1016 return -EINVAL;
@@ -1097,7 +1019,7 @@ static int pp_dpm_switch_power_profile(void *handle,
1097 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) 1019 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1098 return -EINVAL; 1020 return -EINVAL;
1099 1021
1100 mutex_lock(&pp_handle->pp_lock); 1022 mutex_lock(&hwmgr->smu_lock);
1101 1023
1102 if (!en) { 1024 if (!en) {
1103 hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]); 1025 hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
@@ -1113,7 +1035,7 @@ static int pp_dpm_switch_power_profile(void *handle,
1113 1035
1114 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 1036 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1115 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0); 1037 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
1116 mutex_unlock(&pp_handle->pp_lock); 1038 mutex_unlock(&hwmgr->smu_lock);
1117 1039
1118 return 0; 1040 return 0;
1119} 1041}
@@ -1125,46 +1047,40 @@ static int pp_dpm_notify_smu_memory_info(void *handle,
1125 uint32_t mc_addr_hi, 1047 uint32_t mc_addr_hi,
1126 uint32_t size) 1048 uint32_t size)
1127{ 1049{
1128 struct pp_hwmgr *hwmgr; 1050 struct pp_hwmgr *hwmgr = handle;
1129 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1130 int ret = 0; 1051 int ret = 0;
1131 1052
1132 ret = pp_check(pp_handle); 1053 ret = pp_check(hwmgr);
1133 1054
1134 if (ret) 1055 if (ret)
1135 return ret; 1056 return ret;
1136 1057
1137 hwmgr = pp_handle->hwmgr;
1138
1139 if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) { 1058 if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) {
1140 pr_info("%s was not implemented.\n", __func__); 1059 pr_info("%s was not implemented.\n", __func__);
1141 return -EINVAL; 1060 return -EINVAL;
1142 } 1061 }
1143 1062
1144 mutex_lock(&pp_handle->pp_lock); 1063 mutex_lock(&hwmgr->smu_lock);
1145 1064
1146 ret = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, virtual_addr_low, 1065 ret = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, virtual_addr_low,
1147 virtual_addr_hi, mc_addr_low, mc_addr_hi, 1066 virtual_addr_hi, mc_addr_low, mc_addr_hi,
1148 size); 1067 size);
1149 1068
1150 mutex_unlock(&pp_handle->pp_lock); 1069 mutex_unlock(&hwmgr->smu_lock);
1151 1070
1152 return ret; 1071 return ret;
1153} 1072}
1154 1073
1155static int pp_set_power_limit(void *handle, uint32_t limit) 1074static int pp_set_power_limit(void *handle, uint32_t limit)
1156{ 1075{
1157 struct pp_hwmgr *hwmgr; 1076 struct pp_hwmgr *hwmgr = handle;
1158 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1159 int ret = 0; 1077 int ret = 0;
1160 1078
1161 ret = pp_check(pp_handle); 1079 ret = pp_check(hwmgr);
1162 1080
1163 if (ret) 1081 if (ret)
1164 return ret; 1082 return ret;
1165 1083
1166 hwmgr = pp_handle->hwmgr;
1167
1168 if (hwmgr->hwmgr_func->set_power_limit == NULL) { 1084 if (hwmgr->hwmgr_func->set_power_limit == NULL) {
1169 pr_info("%s was not implemented.\n", __func__); 1085 pr_info("%s was not implemented.\n", __func__);
1170 return -EINVAL; 1086 return -EINVAL;
@@ -1176,20 +1092,19 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
1176 if (limit > hwmgr->default_power_limit) 1092 if (limit > hwmgr->default_power_limit)
1177 return -EINVAL; 1093 return -EINVAL;
1178 1094
1179 mutex_lock(&pp_handle->pp_lock); 1095 mutex_lock(&hwmgr->smu_lock);
1180 hwmgr->hwmgr_func->set_power_limit(hwmgr, limit); 1096 hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
1181 hwmgr->power_limit = limit; 1097 hwmgr->power_limit = limit;
1182 mutex_unlock(&pp_handle->pp_lock); 1098 mutex_unlock(&hwmgr->smu_lock);
1183 return ret; 1099 return ret;
1184} 1100}
1185 1101
1186static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit) 1102static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
1187{ 1103{
1188 struct pp_hwmgr *hwmgr; 1104 struct pp_hwmgr *hwmgr = handle;
1189 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1190 int ret = 0; 1105 int ret = 0;
1191 1106
1192 ret = pp_check(pp_handle); 1107 ret = pp_check(hwmgr);
1193 1108
1194 if (ret) 1109 if (ret)
1195 return ret; 1110 return ret;
@@ -1197,16 +1112,14 @@ static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
1197 if (limit == NULL) 1112 if (limit == NULL)
1198 return -EINVAL; 1113 return -EINVAL;
1199 1114
1200 hwmgr = pp_handle->hwmgr; 1115 mutex_lock(&hwmgr->smu_lock);
1201
1202 mutex_lock(&pp_handle->pp_lock);
1203 1116
1204 if (default_limit) 1117 if (default_limit)
1205 *limit = hwmgr->default_power_limit; 1118 *limit = hwmgr->default_power_limit;
1206 else 1119 else
1207 *limit = hwmgr->power_limit; 1120 *limit = hwmgr->power_limit;
1208 1121
1209 mutex_unlock(&pp_handle->pp_lock); 1122 mutex_unlock(&hwmgr->smu_lock);
1210 1123
1211 return ret; 1124 return ret;
1212} 1125}
@@ -1214,42 +1127,37 @@ static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
1214static int pp_display_configuration_change(void *handle, 1127static int pp_display_configuration_change(void *handle,
1215 const struct amd_pp_display_configuration *display_config) 1128 const struct amd_pp_display_configuration *display_config)
1216{ 1129{
1217 struct pp_hwmgr *hwmgr; 1130 struct pp_hwmgr *hwmgr = handle;
1218 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1219 int ret = 0; 1131 int ret = 0;
1220 1132
1221 ret = pp_check(pp_handle); 1133 ret = pp_check(hwmgr);
1222 1134
1223 if (ret) 1135 if (ret)
1224 return ret; 1136 return ret;
1225 1137
1226 hwmgr = pp_handle->hwmgr; 1138 mutex_lock(&hwmgr->smu_lock);
1227 mutex_lock(&pp_handle->pp_lock);
1228 phm_store_dal_configuration_data(hwmgr, display_config); 1139 phm_store_dal_configuration_data(hwmgr, display_config);
1229 mutex_unlock(&pp_handle->pp_lock); 1140 mutex_unlock(&hwmgr->smu_lock);
1230 return 0; 1141 return 0;
1231} 1142}
1232 1143
1233static int pp_get_display_power_level(void *handle, 1144static int pp_get_display_power_level(void *handle,
1234 struct amd_pp_simple_clock_info *output) 1145 struct amd_pp_simple_clock_info *output)
1235{ 1146{
1236 struct pp_hwmgr *hwmgr; 1147 struct pp_hwmgr *hwmgr = handle;
1237 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1238 int ret = 0; 1148 int ret = 0;
1239 1149
1240 ret = pp_check(pp_handle); 1150 ret = pp_check(hwmgr);
1241 1151
1242 if (ret) 1152 if (ret)
1243 return ret; 1153 return ret;
1244 1154
1245 hwmgr = pp_handle->hwmgr;
1246
1247 if (output == NULL) 1155 if (output == NULL)
1248 return -EINVAL; 1156 return -EINVAL;
1249 1157
1250 mutex_lock(&pp_handle->pp_lock); 1158 mutex_lock(&hwmgr->smu_lock);
1251 ret = phm_get_dal_power_level(hwmgr, output); 1159 ret = phm_get_dal_power_level(hwmgr, output);
1252 mutex_unlock(&pp_handle->pp_lock); 1160 mutex_unlock(&hwmgr->smu_lock);
1253 return ret; 1161 return ret;
1254} 1162}
1255 1163
@@ -1258,18 +1166,15 @@ static int pp_get_current_clocks(void *handle,
1258{ 1166{
1259 struct amd_pp_simple_clock_info simple_clocks; 1167 struct amd_pp_simple_clock_info simple_clocks;
1260 struct pp_clock_info hw_clocks; 1168 struct pp_clock_info hw_clocks;
1261 struct pp_hwmgr *hwmgr; 1169 struct pp_hwmgr *hwmgr = handle;
1262 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1263 int ret = 0; 1170 int ret = 0;
1264 1171
1265 ret = pp_check(pp_handle); 1172 ret = pp_check(hwmgr);
1266 1173
1267 if (ret) 1174 if (ret)
1268 return ret; 1175 return ret;
1269 1176
1270 hwmgr = pp_handle->hwmgr; 1177 mutex_lock(&hwmgr->smu_lock);
1271
1272 mutex_lock(&pp_handle->pp_lock);
1273 1178
1274 phm_get_dal_power_level(hwmgr, &simple_clocks); 1179 phm_get_dal_power_level(hwmgr, &simple_clocks);
1275 1180
@@ -1283,7 +1188,7 @@ static int pp_get_current_clocks(void *handle,
1283 1188
1284 if (ret) { 1189 if (ret) {
1285 pr_info("Error in phm_get_clock_info \n"); 1190 pr_info("Error in phm_get_clock_info \n");
1286 mutex_unlock(&pp_handle->pp_lock); 1191 mutex_unlock(&hwmgr->smu_lock);
1287 return -EINVAL; 1192 return -EINVAL;
1288 } 1193 }
1289 1194
@@ -1303,29 +1208,26 @@ static int pp_get_current_clocks(void *handle,
1303 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; 1208 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1304 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; 1209 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1305 } 1210 }
1306 mutex_unlock(&pp_handle->pp_lock); 1211 mutex_unlock(&hwmgr->smu_lock);
1307 return 0; 1212 return 0;
1308} 1213}
1309 1214
1310static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks) 1215static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1311{ 1216{
1312 struct pp_hwmgr *hwmgr; 1217 struct pp_hwmgr *hwmgr = handle;
1313 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1314 int ret = 0; 1218 int ret = 0;
1315 1219
1316 ret = pp_check(pp_handle); 1220 ret = pp_check(hwmgr);
1317 1221
1318 if (ret) 1222 if (ret)
1319 return ret; 1223 return ret;
1320 1224
1321 hwmgr = pp_handle->hwmgr;
1322
1323 if (clocks == NULL) 1225 if (clocks == NULL)
1324 return -EINVAL; 1226 return -EINVAL;
1325 1227
1326 mutex_lock(&pp_handle->pp_lock); 1228 mutex_lock(&hwmgr->smu_lock);
1327 ret = phm_get_clock_by_type(hwmgr, type, clocks); 1229 ret = phm_get_clock_by_type(hwmgr, type, clocks);
1328 mutex_unlock(&pp_handle->pp_lock); 1230 mutex_unlock(&hwmgr->smu_lock);
1329 return ret; 1231 return ret;
1330} 1232}
1331 1233
@@ -1333,21 +1235,19 @@ static int pp_get_clock_by_type_with_latency(void *handle,
1333 enum amd_pp_clock_type type, 1235 enum amd_pp_clock_type type,
1334 struct pp_clock_levels_with_latency *clocks) 1236 struct pp_clock_levels_with_latency *clocks)
1335{ 1237{
1336 struct pp_hwmgr *hwmgr; 1238 struct pp_hwmgr *hwmgr = handle;
1337 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1338 int ret = 0; 1239 int ret = 0;
1339 1240
1340 ret = pp_check(pp_handle); 1241 ret = pp_check(hwmgr);
1341 if (ret) 1242 if (ret)
1342 return ret; 1243 return ret;
1343 1244
1344 if (!clocks) 1245 if (!clocks)
1345 return -EINVAL; 1246 return -EINVAL;
1346 1247
1347 mutex_lock(&pp_handle->pp_lock); 1248 mutex_lock(&hwmgr->smu_lock);
1348 hwmgr = ((struct pp_instance *)handle)->hwmgr;
1349 ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks); 1249 ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1350 mutex_unlock(&pp_handle->pp_lock); 1250 mutex_unlock(&hwmgr->smu_lock);
1351 return ret; 1251 return ret;
1352} 1252}
1353 1253
@@ -1355,47 +1255,41 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
1355 enum amd_pp_clock_type type, 1255 enum amd_pp_clock_type type,
1356 struct pp_clock_levels_with_voltage *clocks) 1256 struct pp_clock_levels_with_voltage *clocks)
1357{ 1257{
1358 struct pp_hwmgr *hwmgr; 1258 struct pp_hwmgr *hwmgr = handle;
1359 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1360 int ret = 0; 1259 int ret = 0;
1361 1260
1362 ret = pp_check(pp_handle); 1261 ret = pp_check(hwmgr);
1363 if (ret) 1262 if (ret)
1364 return ret; 1263 return ret;
1365 1264
1366 if (!clocks) 1265 if (!clocks)
1367 return -EINVAL; 1266 return -EINVAL;
1368 1267
1369 hwmgr = ((struct pp_instance *)handle)->hwmgr; 1268 mutex_lock(&hwmgr->smu_lock);
1370
1371 mutex_lock(&pp_handle->pp_lock);
1372 1269
1373 ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks); 1270 ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1374 1271
1375 mutex_unlock(&pp_handle->pp_lock); 1272 mutex_unlock(&hwmgr->smu_lock);
1376 return ret; 1273 return ret;
1377} 1274}
1378 1275
1379static int pp_set_watermarks_for_clocks_ranges(void *handle, 1276static int pp_set_watermarks_for_clocks_ranges(void *handle,
1380 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) 1277 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
1381{ 1278{
1382 struct pp_hwmgr *hwmgr; 1279 struct pp_hwmgr *hwmgr = handle;
1383 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1384 int ret = 0; 1280 int ret = 0;
1385 1281
1386 ret = pp_check(pp_handle); 1282 ret = pp_check(hwmgr);
1387 if (ret) 1283 if (ret)
1388 return ret; 1284 return ret;
1389 1285
1390 if (!wm_with_clock_ranges) 1286 if (!wm_with_clock_ranges)
1391 return -EINVAL; 1287 return -EINVAL;
1392 1288
1393 hwmgr = ((struct pp_instance *)handle)->hwmgr; 1289 mutex_lock(&hwmgr->smu_lock);
1394
1395 mutex_lock(&pp_handle->pp_lock);
1396 ret = phm_set_watermarks_for_clocks_ranges(hwmgr, 1290 ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
1397 wm_with_clock_ranges); 1291 wm_with_clock_ranges);
1398 mutex_unlock(&pp_handle->pp_lock); 1292 mutex_unlock(&hwmgr->smu_lock);
1399 1293
1400 return ret; 1294 return ret;
1401} 1295}
@@ -1403,22 +1297,19 @@ static int pp_set_watermarks_for_clocks_ranges(void *handle,
1403static int pp_display_clock_voltage_request(void *handle, 1297static int pp_display_clock_voltage_request(void *handle,
1404 struct pp_display_clock_request *clock) 1298 struct pp_display_clock_request *clock)
1405{ 1299{
1406 struct pp_hwmgr *hwmgr; 1300 struct pp_hwmgr *hwmgr = handle;
1407 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1408 int ret = 0; 1301 int ret = 0;
1409 1302
1410 ret = pp_check(pp_handle); 1303 ret = pp_check(hwmgr);
1411 if (ret) 1304 if (ret)
1412 return ret; 1305 return ret;
1413 1306
1414 if (!clock) 1307 if (!clock)
1415 return -EINVAL; 1308 return -EINVAL;
1416 1309
1417 hwmgr = ((struct pp_instance *)handle)->hwmgr; 1310 mutex_lock(&hwmgr->smu_lock);
1418
1419 mutex_lock(&pp_handle->pp_lock);
1420 ret = phm_display_clock_voltage_request(hwmgr, clock); 1311 ret = phm_display_clock_voltage_request(hwmgr, clock);
1421 mutex_unlock(&pp_handle->pp_lock); 1312 mutex_unlock(&hwmgr->smu_lock);
1422 1313
1423 return ret; 1314 return ret;
1424} 1315}
@@ -1426,42 +1317,36 @@ static int pp_display_clock_voltage_request(void *handle,
1426static int pp_get_display_mode_validation_clocks(void *handle, 1317static int pp_get_display_mode_validation_clocks(void *handle,
1427 struct amd_pp_simple_clock_info *clocks) 1318 struct amd_pp_simple_clock_info *clocks)
1428{ 1319{
1429 struct pp_hwmgr *hwmgr; 1320 struct pp_hwmgr *hwmgr = handle;
1430 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1431 int ret = 0; 1321 int ret = 0;
1432 1322
1433 ret = pp_check(pp_handle); 1323 ret = pp_check(hwmgr);
1434 1324
1435 if (ret) 1325 if (ret)
1436 return ret; 1326 return ret;
1437 1327
1438 hwmgr = pp_handle->hwmgr;
1439
1440 if (clocks == NULL) 1328 if (clocks == NULL)
1441 return -EINVAL; 1329 return -EINVAL;
1442 1330
1443 mutex_lock(&pp_handle->pp_lock); 1331 mutex_lock(&hwmgr->smu_lock);
1444 1332
1445 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState)) 1333 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1446 ret = phm_get_max_high_clocks(hwmgr, clocks); 1334 ret = phm_get_max_high_clocks(hwmgr, clocks);
1447 1335
1448 mutex_unlock(&pp_handle->pp_lock); 1336 mutex_unlock(&hwmgr->smu_lock);
1449 return ret; 1337 return ret;
1450} 1338}
1451 1339
1452static int pp_set_mmhub_powergating_by_smu(void *handle) 1340static int pp_set_mmhub_powergating_by_smu(void *handle)
1453{ 1341{
1454 struct pp_hwmgr *hwmgr; 1342 struct pp_hwmgr *hwmgr = handle;
1455 struct pp_instance *pp_handle = (struct pp_instance *)handle;
1456 int ret = 0; 1343 int ret = 0;
1457 1344
1458 ret = pp_check(pp_handle); 1345 ret = pp_check(hwmgr);
1459 1346
1460 if (ret) 1347 if (ret)
1461 return ret; 1348 return ret;
1462 1349
1463 hwmgr = pp_handle->hwmgr;
1464
1465 if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) { 1350 if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
1466 pr_info("%s was not implemented.\n", __func__); 1351 pr_info("%s was not implemented.\n", __func__);
1467 return 0; 1352 return 0;
@@ -1470,7 +1355,7 @@ static int pp_set_mmhub_powergating_by_smu(void *handle)
1470 return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr); 1355 return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr);
1471} 1356}
1472 1357
1473const struct amd_pm_funcs pp_dpm_funcs = { 1358static const struct amd_pm_funcs pp_dpm_funcs = {
1474 .load_firmware = pp_dpm_load_fw, 1359 .load_firmware = pp_dpm_load_fw,
1475 .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete, 1360 .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1476 .force_performance_level = pp_dpm_force_performance_level, 1361 .force_performance_level = pp_dpm_force_performance_level,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index e8c5a4f84324..f868b955da92 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -24,14 +24,14 @@
24# It provides the hardware management services for the driver. 24# It provides the hardware management services for the driver.
25 25
26HARDWARE_MGR = hwmgr.o processpptables.o \ 26HARDWARE_MGR = hwmgr.o processpptables.o \
27 hardwaremanager.o cz_hwmgr.o \ 27 hardwaremanager.o smu8_hwmgr.o \
28 cz_clockpowergating.o pppcielanes.o\ 28 pppcielanes.o\
29 process_pptables_v1_0.o ppatomctrl.o ppatomfwctrl.o \ 29 process_pptables_v1_0.o ppatomctrl.o ppatomfwctrl.o \
30 smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \ 30 smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \
31 smu7_clockpowergating.o \ 31 smu7_clockpowergating.o \
32 vega10_processpptables.o vega10_hwmgr.o vega10_powertune.o \ 32 vega10_processpptables.o vega10_hwmgr.o vega10_powertune.o \
33 vega10_thermal.o rv_hwmgr.o pp_psm.o\ 33 vega10_thermal.o smu10_hwmgr.o pp_psm.o\
34 pp_overdriver.o 34 pp_overdriver.o smu_helper.o
35 35
36AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR)) 36AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
37 37
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
deleted file mode 100644
index 416abebb8b86..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ /dev/null
@@ -1,209 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "hwmgr.h"
25#include "cz_clockpowergating.h"
26#include "cz_ppsmc.h"
27
28/* PhyID -> Status Mapping in DDI_PHY_GEN_STATUS
29 0 GFX0L (3:0), (27:24),
30 1 GFX0H (7:4), (31:28),
31 2 GFX1L (3:0), (19:16),
32 3 GFX1H (7:4), (23:20),
33 4 DDIL (3:0), (11: 8),
34 5 DDIH (7:4), (15:12),
35 6 DDI2L (3:0), ( 3: 0),
36 7 DDI2H (7:4), ( 7: 4),
37*/
38#define DDI_PHY_GEN_STATUS_VAL(phyID) (1 << ((3 - ((phyID & 0x07)/2))*8 + (phyID & 0x01)*4))
39#define IS_PHY_ID_USED_BY_PLL(PhyID) (((0xF3 & (1 << PhyID)) & 0xFF) ? true : false)
40
41
42int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating)
43{
44 int ret = 0;
45
46 switch (block) {
47 case PHM_AsicBlock_UVD_MVC:
48 case PHM_AsicBlock_UVD:
49 case PHM_AsicBlock_UVD_HD:
50 case PHM_AsicBlock_UVD_SD:
51 if (gating == PHM_ClockGateSetting_StaticOff)
52 ret = cz_dpm_powerdown_uvd(hwmgr);
53 else
54 ret = cz_dpm_powerup_uvd(hwmgr);
55 break;
56 case PHM_AsicBlock_GFX:
57 default:
58 break;
59 }
60
61 return ret;
62}
63
64
65bool cz_phm_is_safe_for_asic_block(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, enum PHM_AsicBlock block)
66{
67 return true;
68}
69
70
71int cz_phm_enable_disable_gfx_power_gating(struct pp_hwmgr *hwmgr, bool enable)
72{
73 return 0;
74}
75
76int cz_phm_smu_power_up_down_pcie(struct pp_hwmgr *hwmgr, uint32_t target, bool up, uint32_t args)
77{
78 /* TODO */
79 return 0;
80}
81
82int cz_phm_initialize_display_phy_access(struct pp_hwmgr *hwmgr, bool initialize, bool accesshw)
83{
84 /* TODO */
85 return 0;
86}
87
88int cz_phm_get_display_phy_access_info(struct pp_hwmgr *hwmgr)
89{
90 /* TODO */
91 return 0;
92}
93
94int cz_phm_gate_unused_display_phys(struct pp_hwmgr *hwmgr)
95{
96 /* TODO */
97 return 0;
98}
99
100int cz_phm_ungate_all_display_phys(struct pp_hwmgr *hwmgr)
101{
102 /* TODO */
103 return 0;
104}
105
106int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
107{
108 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
109 uint32_t dpm_features = 0;
110
111 if (enable &&
112 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
113 PHM_PlatformCaps_UVDDPM)) {
114 cz_hwmgr->dpm_flags |= DPMFlags_UVD_Enabled;
115 dpm_features |= UVD_DPM_MASK;
116 smum_send_msg_to_smc_with_parameter(hwmgr,
117 PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
118 } else {
119 dpm_features |= UVD_DPM_MASK;
120 cz_hwmgr->dpm_flags &= ~DPMFlags_UVD_Enabled;
121 smum_send_msg_to_smc_with_parameter(hwmgr,
122 PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
123 }
124 return 0;
125}
126
127int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
128{
129 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
130 uint32_t dpm_features = 0;
131
132 if (enable && phm_cap_enabled(
133 hwmgr->platform_descriptor.platformCaps,
134 PHM_PlatformCaps_VCEDPM)) {
135 cz_hwmgr->dpm_flags |= DPMFlags_VCE_Enabled;
136 dpm_features |= VCE_DPM_MASK;
137 smum_send_msg_to_smc_with_parameter(hwmgr,
138 PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
139 } else {
140 dpm_features |= VCE_DPM_MASK;
141 cz_hwmgr->dpm_flags &= ~DPMFlags_VCE_Enabled;
142 smum_send_msg_to_smc_with_parameter(hwmgr,
143 PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
144 }
145
146 return 0;
147}
148
149
150void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
151{
152 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
153
154 cz_hwmgr->uvd_power_gated = bgate;
155
156 if (bgate) {
157 cgs_set_powergating_state(hwmgr->device,
158 AMD_IP_BLOCK_TYPE_UVD,
159 AMD_PG_STATE_GATE);
160 cgs_set_clockgating_state(hwmgr->device,
161 AMD_IP_BLOCK_TYPE_UVD,
162 AMD_CG_STATE_GATE);
163 cz_dpm_update_uvd_dpm(hwmgr, true);
164 cz_dpm_powerdown_uvd(hwmgr);
165 } else {
166 cz_dpm_powerup_uvd(hwmgr);
167 cgs_set_clockgating_state(hwmgr->device,
168 AMD_IP_BLOCK_TYPE_UVD,
169 AMD_CG_STATE_UNGATE);
170 cgs_set_powergating_state(hwmgr->device,
171 AMD_IP_BLOCK_TYPE_UVD,
172 AMD_PG_STATE_UNGATE);
173 cz_dpm_update_uvd_dpm(hwmgr, false);
174 }
175
176}
177
178void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
179{
180 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
181
182 if (bgate) {
183 cgs_set_powergating_state(
184 hwmgr->device,
185 AMD_IP_BLOCK_TYPE_VCE,
186 AMD_PG_STATE_GATE);
187 cgs_set_clockgating_state(
188 hwmgr->device,
189 AMD_IP_BLOCK_TYPE_VCE,
190 AMD_CG_STATE_GATE);
191 cz_enable_disable_vce_dpm(hwmgr, false);
192 cz_dpm_powerdown_vce(hwmgr);
193 cz_hwmgr->vce_power_gated = true;
194 } else {
195 cz_dpm_powerup_vce(hwmgr);
196 cz_hwmgr->vce_power_gated = false;
197 cgs_set_clockgating_state(
198 hwmgr->device,
199 AMD_IP_BLOCK_TYPE_VCE,
200 AMD_CG_STATE_UNGATE);
201 cgs_set_powergating_state(
202 hwmgr->device,
203 AMD_IP_BLOCK_TYPE_VCE,
204 AMD_PG_STATE_UNGATE);
205 cz_dpm_update_vce_dpm(hwmgr);
206 cz_enable_disable_vce_dpm(hwmgr, true);
207 }
208}
209
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
deleted file mode 100644
index 92f707bc46e7..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _CZ_CLOCK_POWER_GATING_H_
25#define _CZ_CLOCK_POWER_GATING_H_
26
27#include "cz_hwmgr.h"
28#include "pp_asicblocks.h"
29
30extern int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
31extern const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master;
32extern void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
33extern void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
34extern int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
35extern int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable);
36#endif /* _CZ_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index af1b22d964fd..229030027f3e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -30,22 +30,24 @@
30#include <drm/amdgpu_drm.h> 30#include <drm/amdgpu_drm.h>
31#include "power_state.h" 31#include "power_state.h"
32#include "hwmgr.h" 32#include "hwmgr.h"
33#include "pppcielanes.h"
34#include "ppatomctrl.h"
35#include "ppsmc.h" 33#include "ppsmc.h"
36#include "amd_acpi.h" 34#include "amd_acpi.h"
37#include "pp_psm.h" 35#include "pp_psm.h"
38 36
39extern const struct pp_smumgr_func ci_smu_funcs; 37extern const struct pp_smumgr_func ci_smu_funcs;
40extern const struct pp_smumgr_func cz_smu_funcs; 38extern const struct pp_smumgr_func smu8_smu_funcs;
41extern const struct pp_smumgr_func iceland_smu_funcs; 39extern const struct pp_smumgr_func iceland_smu_funcs;
42extern const struct pp_smumgr_func tonga_smu_funcs; 40extern const struct pp_smumgr_func tonga_smu_funcs;
43extern const struct pp_smumgr_func fiji_smu_funcs; 41extern const struct pp_smumgr_func fiji_smu_funcs;
44extern const struct pp_smumgr_func polaris10_smu_funcs; 42extern const struct pp_smumgr_func polaris10_smu_funcs;
45extern const struct pp_smumgr_func vega10_smu_funcs; 43extern const struct pp_smumgr_func vega10_smu_funcs;
46extern const struct pp_smumgr_func rv_smu_funcs; 44extern const struct pp_smumgr_func smu10_smu_funcs;
45
46extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr);
47extern int smu8_init_function_pointers(struct pp_hwmgr *hwmgr);
48extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
49extern int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
47 50
48extern int cz_init_function_pointers(struct pp_hwmgr *hwmgr);
49static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr); 51static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
50static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr); 52static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
51static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr); 53static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
@@ -54,32 +56,6 @@ static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr);
54static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr); 56static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr);
55static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr); 57static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr);
56 58
57uint8_t convert_to_vid(uint16_t vddc)
58{
59 return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
60}
61
62uint16_t convert_to_vddc(uint8_t vid)
63{
64 return (uint16_t) ((6200 - (vid * 25)) / VOLTAGE_SCALE);
65}
66
67uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size)
68{
69 u32 mask = 0;
70 u32 shift = 0;
71
72 shift = (offset % 4) << 3;
73 if (size == sizeof(uint8_t))
74 mask = 0xFF << shift;
75 else if (size == sizeof(uint16_t))
76 mask = 0xFFFF << shift;
77
78 original_data &= ~mask;
79 original_data |= (field << shift);
80 return original_data;
81}
82
83static int phm_thermal_l2h_irq(void *private_data, 59static int phm_thermal_l2h_irq(void *private_data,
84 unsigned src_id, const uint32_t *iv_entry) 60 unsigned src_id, const uint32_t *iv_entry)
85{ 61{
@@ -140,23 +116,11 @@ static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
140 hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_COMPUTE; 116 hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_COMPUTE;
141} 117}
142 118
143int hwmgr_early_init(struct pp_instance *handle) 119int hwmgr_early_init(struct pp_hwmgr *hwmgr)
144{ 120{
145 struct pp_hwmgr *hwmgr; 121 if (hwmgr == NULL)
146
147 if (handle == NULL)
148 return -EINVAL; 122 return -EINVAL;
149 123
150 hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
151 if (hwmgr == NULL)
152 return -ENOMEM;
153
154 handle->hwmgr = hwmgr;
155 hwmgr->adev = handle->parent;
156 hwmgr->device = handle->device;
157 hwmgr->chip_family = ((struct amdgpu_device *)handle->parent)->family;
158 hwmgr->chip_id = ((struct amdgpu_device *)handle->parent)->asic_type;
159 hwmgr->feature_mask = amdgpu_pp_feature_mask;
160 hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; 124 hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
161 hwmgr->power_source = PP_PowerSource_AC; 125 hwmgr->power_source = PP_PowerSource_AC;
162 hwmgr->pp_table_version = PP_TABLE_V1; 126 hwmgr->pp_table_version = PP_TABLE_V1;
@@ -180,8 +144,8 @@ int hwmgr_early_init(struct pp_instance *handle)
180 break; 144 break;
181 case AMDGPU_FAMILY_CZ: 145 case AMDGPU_FAMILY_CZ:
182 hwmgr->od_enabled = false; 146 hwmgr->od_enabled = false;
183 hwmgr->smumgr_funcs = &cz_smu_funcs; 147 hwmgr->smumgr_funcs = &smu8_smu_funcs;
184 cz_init_function_pointers(hwmgr); 148 smu8_init_function_pointers(hwmgr);
185 break; 149 break;
186 case AMDGPU_FAMILY_VI: 150 case AMDGPU_FAMILY_VI:
187 switch (hwmgr->chip_id) { 151 switch (hwmgr->chip_id) {
@@ -230,8 +194,8 @@ int hwmgr_early_init(struct pp_instance *handle)
230 switch (hwmgr->chip_id) { 194 switch (hwmgr->chip_id) {
231 case CHIP_RAVEN: 195 case CHIP_RAVEN:
232 hwmgr->od_enabled = false; 196 hwmgr->od_enabled = false;
233 hwmgr->smumgr_funcs = &rv_smu_funcs; 197 hwmgr->smumgr_funcs = &smu10_smu_funcs;
234 rv_init_function_pointers(hwmgr); 198 smu10_init_function_pointers(hwmgr);
235 break; 199 break;
236 default: 200 default:
237 return -EINVAL; 201 return -EINVAL;
@@ -244,16 +208,13 @@ int hwmgr_early_init(struct pp_instance *handle)
244 return 0; 208 return 0;
245} 209}
246 210
247int hwmgr_hw_init(struct pp_instance *handle) 211int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
248{ 212{
249 struct pp_hwmgr *hwmgr;
250 int ret = 0; 213 int ret = 0;
251 214
252 if (handle == NULL) 215 if (hwmgr == NULL)
253 return -EINVAL; 216 return -EINVAL;
254 217
255 hwmgr = handle->hwmgr;
256
257 if (hwmgr->pptable_func == NULL || 218 if (hwmgr->pptable_func == NULL ||
258 hwmgr->pptable_func->pptable_init == NULL || 219 hwmgr->pptable_func->pptable_init == NULL ||
259 hwmgr->hwmgr_func->backend_init == NULL) 220 hwmgr->hwmgr_func->backend_init == NULL)
@@ -299,15 +260,11 @@ err:
299 return ret; 260 return ret;
300} 261}
301 262
302int hwmgr_hw_fini(struct pp_instance *handle) 263int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
303{ 264{
304 struct pp_hwmgr *hwmgr; 265 if (hwmgr == NULL)
305
306 if (handle == NULL || handle->hwmgr == NULL)
307 return -EINVAL; 266 return -EINVAL;
308 267
309 hwmgr = handle->hwmgr;
310
311 phm_stop_thermal_controller(hwmgr); 268 phm_stop_thermal_controller(hwmgr);
312 psm_set_boot_states(hwmgr); 269 psm_set_boot_states(hwmgr);
313 psm_adjust_power_state_dynamic(hwmgr, false, NULL); 270 psm_adjust_power_state_dynamic(hwmgr, false, NULL);
@@ -321,15 +278,13 @@ int hwmgr_hw_fini(struct pp_instance *handle)
321 return psm_fini_power_state_table(hwmgr); 278 return psm_fini_power_state_table(hwmgr);
322} 279}
323 280
324int hwmgr_hw_suspend(struct pp_instance *handle) 281int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr)
325{ 282{
326 struct pp_hwmgr *hwmgr;
327 int ret = 0; 283 int ret = 0;
328 284
329 if (handle == NULL || handle->hwmgr == NULL) 285 if (hwmgr == NULL)
330 return -EINVAL; 286 return -EINVAL;
331 287
332 hwmgr = handle->hwmgr;
333 phm_disable_smc_firmware_ctf(hwmgr); 288 phm_disable_smc_firmware_ctf(hwmgr);
334 ret = psm_set_boot_states(hwmgr); 289 ret = psm_set_boot_states(hwmgr);
335 if (ret) 290 if (ret)
@@ -342,15 +297,13 @@ int hwmgr_hw_suspend(struct pp_instance *handle)
342 return ret; 297 return ret;
343} 298}
344 299
345int hwmgr_hw_resume(struct pp_instance *handle) 300int hwmgr_hw_resume(struct pp_hwmgr *hwmgr)
346{ 301{
347 struct pp_hwmgr *hwmgr;
348 int ret = 0; 302 int ret = 0;
349 303
350 if (handle == NULL || handle->hwmgr == NULL) 304 if (hwmgr == NULL)
351 return -EINVAL; 305 return -EINVAL;
352 306
353 hwmgr = handle->hwmgr;
354 ret = phm_setup_asic(hwmgr); 307 ret = phm_setup_asic(hwmgr);
355 if (ret) 308 if (ret)
356 return ret; 309 return ret;
@@ -385,17 +338,14 @@ static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
385 } 338 }
386} 339}
387 340
388int hwmgr_handle_task(struct pp_instance *handle, enum amd_pp_task task_id, 341int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
389 enum amd_pm_state_type *user_state) 342 enum amd_pm_state_type *user_state)
390{ 343{
391 int ret = 0; 344 int ret = 0;
392 struct pp_hwmgr *hwmgr;
393 345
394 if (handle == NULL || handle->hwmgr == NULL) 346 if (hwmgr == NULL)
395 return -EINVAL; 347 return -EINVAL;
396 348
397 hwmgr = handle->hwmgr;
398
399 switch (task_id) { 349 switch (task_id) {
400 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: 350 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
401 ret = phm_set_cpu_power_state(hwmgr); 351 ret = phm_set_cpu_power_state(hwmgr);
@@ -432,468 +382,6 @@ int hwmgr_handle_task(struct pp_instance *handle, enum amd_pp_task task_id,
432 } 382 }
433 return ret; 383 return ret;
434} 384}
435/**
436 * Returns once the part of the register indicated by the mask has
437 * reached the given value.
438 */
439int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
440 uint32_t value, uint32_t mask)
441{
442 uint32_t i;
443 uint32_t cur_value;
444
445 if (hwmgr == NULL || hwmgr->device == NULL) {
446 pr_err("Invalid Hardware Manager!");
447 return -EINVAL;
448 }
449
450 for (i = 0; i < hwmgr->usec_timeout; i++) {
451 cur_value = cgs_read_register(hwmgr->device, index);
452 if ((cur_value & mask) == (value & mask))
453 break;
454 udelay(1);
455 }
456
457 /* timeout means wrong logic*/
458 if (i == hwmgr->usec_timeout)
459 return -1;
460 return 0;
461}
462
463
464/**
465 * Returns once the part of the register indicated by the mask has
466 * reached the given value.The indirect space is described by giving
467 * the memory-mapped index of the indirect index register.
468 */
469int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
470 uint32_t indirect_port,
471 uint32_t index,
472 uint32_t value,
473 uint32_t mask)
474{
475 if (hwmgr == NULL || hwmgr->device == NULL) {
476 pr_err("Invalid Hardware Manager!");
477 return -EINVAL;
478 }
479
480 cgs_write_register(hwmgr->device, indirect_port, index);
481 return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
482}
483
484int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
485 uint32_t index,
486 uint32_t value, uint32_t mask)
487{
488 uint32_t i;
489 uint32_t cur_value;
490
491 if (hwmgr == NULL || hwmgr->device == NULL)
492 return -EINVAL;
493
494 for (i = 0; i < hwmgr->usec_timeout; i++) {
495 cur_value = cgs_read_register(hwmgr->device,
496 index);
497 if ((cur_value & mask) != (value & mask))
498 break;
499 udelay(1);
500 }
501
502 /* timeout means wrong logic */
503 if (i == hwmgr->usec_timeout)
504 return -ETIME;
505 return 0;
506}
507
508int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
509 uint32_t indirect_port,
510 uint32_t index,
511 uint32_t value,
512 uint32_t mask)
513{
514 if (hwmgr == NULL || hwmgr->device == NULL)
515 return -EINVAL;
516
517 cgs_write_register(hwmgr->device, indirect_port, index);
518 return phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
519 value, mask);
520}
521
522bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
523{
524 return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating);
525}
526
527bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr)
528{
529 return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating);
530}
531
532
533int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table)
534{
535 uint32_t i, j;
536 uint16_t vvalue;
537 bool found = false;
538 struct pp_atomctrl_voltage_table *table;
539
540 PP_ASSERT_WITH_CODE((NULL != vol_table),
541 "Voltage Table empty.", return -EINVAL);
542
543 table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
544 GFP_KERNEL);
545
546 if (NULL == table)
547 return -EINVAL;
548
549 table->mask_low = vol_table->mask_low;
550 table->phase_delay = vol_table->phase_delay;
551
552 for (i = 0; i < vol_table->count; i++) {
553 vvalue = vol_table->entries[i].value;
554 found = false;
555
556 for (j = 0; j < table->count; j++) {
557 if (vvalue == table->entries[j].value) {
558 found = true;
559 break;
560 }
561 }
562
563 if (!found) {
564 table->entries[table->count].value = vvalue;
565 table->entries[table->count].smio_low =
566 vol_table->entries[i].smio_low;
567 table->count++;
568 }
569 }
570
571 memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
572 kfree(table);
573 table = NULL;
574 return 0;
575}
576
577int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
578 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
579{
580 uint32_t i;
581 int result;
582
583 PP_ASSERT_WITH_CODE((0 != dep_table->count),
584 "Voltage Dependency Table empty.", return -EINVAL);
585
586 PP_ASSERT_WITH_CODE((NULL != vol_table),
587 "vol_table empty.", return -EINVAL);
588
589 vol_table->mask_low = 0;
590 vol_table->phase_delay = 0;
591 vol_table->count = dep_table->count;
592
593 for (i = 0; i < dep_table->count; i++) {
594 vol_table->entries[i].value = dep_table->entries[i].mvdd;
595 vol_table->entries[i].smio_low = 0;
596 }
597
598 result = phm_trim_voltage_table(vol_table);
599 PP_ASSERT_WITH_CODE((0 == result),
600 "Failed to trim MVDD table.", return result);
601
602 return 0;
603}
604
605int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
606 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
607{
608 uint32_t i;
609 int result;
610
611 PP_ASSERT_WITH_CODE((0 != dep_table->count),
612 "Voltage Dependency Table empty.", return -EINVAL);
613
614 PP_ASSERT_WITH_CODE((NULL != vol_table),
615 "vol_table empty.", return -EINVAL);
616
617 vol_table->mask_low = 0;
618 vol_table->phase_delay = 0;
619 vol_table->count = dep_table->count;
620
621 for (i = 0; i < dep_table->count; i++) {
622 vol_table->entries[i].value = dep_table->entries[i].vddci;
623 vol_table->entries[i].smio_low = 0;
624 }
625
626 result = phm_trim_voltage_table(vol_table);
627 PP_ASSERT_WITH_CODE((0 == result),
628 "Failed to trim VDDCI table.", return result);
629
630 return 0;
631}
632
633int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
634 phm_ppt_v1_voltage_lookup_table *lookup_table)
635{
636 int i = 0;
637
638 PP_ASSERT_WITH_CODE((0 != lookup_table->count),
639 "Voltage Lookup Table empty.", return -EINVAL);
640
641 PP_ASSERT_WITH_CODE((NULL != vol_table),
642 "vol_table empty.", return -EINVAL);
643
644 vol_table->mask_low = 0;
645 vol_table->phase_delay = 0;
646
647 vol_table->count = lookup_table->count;
648
649 for (i = 0; i < vol_table->count; i++) {
650 vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
651 vol_table->entries[i].smio_low = 0;
652 }
653
654 return 0;
655}
656
657void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps,
658 struct pp_atomctrl_voltage_table *vol_table)
659{
660 unsigned int i, diff;
661
662 if (vol_table->count <= max_vol_steps)
663 return;
664
665 diff = vol_table->count - max_vol_steps;
666
667 for (i = 0; i < max_vol_steps; i++)
668 vol_table->entries[i] = vol_table->entries[i + diff];
669
670 vol_table->count = max_vol_steps;
671
672 return;
673}
674
675int phm_reset_single_dpm_table(void *table,
676 uint32_t count, int max)
677{
678 int i;
679
680 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
681
682 dpm_table->count = count > max ? max : count;
683
684 for (i = 0; i < dpm_table->count; i++)
685 dpm_table->dpm_level[i].enabled = false;
686
687 return 0;
688}
689
690void phm_setup_pcie_table_entry(
691 void *table,
692 uint32_t index, uint32_t pcie_gen,
693 uint32_t pcie_lanes)
694{
695 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
696 dpm_table->dpm_level[index].value = pcie_gen;
697 dpm_table->dpm_level[index].param1 = pcie_lanes;
698 dpm_table->dpm_level[index].enabled = 1;
699}
700
701int32_t phm_get_dpm_level_enable_mask_value(void *table)
702{
703 int32_t i;
704 int32_t mask = 0;
705 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
706
707 for (i = dpm_table->count; i > 0; i--) {
708 mask = mask << 1;
709 if (dpm_table->dpm_level[i - 1].enabled)
710 mask |= 0x1;
711 else
712 mask &= 0xFFFFFFFE;
713 }
714
715 return mask;
716}
717
718uint8_t phm_get_voltage_index(
719 struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
720{
721 uint8_t count = (uint8_t) (lookup_table->count);
722 uint8_t i;
723
724 PP_ASSERT_WITH_CODE((NULL != lookup_table),
725 "Lookup Table empty.", return 0);
726 PP_ASSERT_WITH_CODE((0 != count),
727 "Lookup Table empty.", return 0);
728
729 for (i = 0; i < lookup_table->count; i++) {
730 /* find first voltage equal or bigger than requested */
731 if (lookup_table->entries[i].us_vdd >= voltage)
732 return i;
733 }
734 /* voltage is bigger than max voltage in the table */
735 return i - 1;
736}
737
738uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
739 uint32_t voltage)
740{
741 uint8_t count = (uint8_t) (voltage_table->count);
742 uint8_t i = 0;
743
744 PP_ASSERT_WITH_CODE((NULL != voltage_table),
745 "Voltage Table empty.", return 0;);
746 PP_ASSERT_WITH_CODE((0 != count),
747 "Voltage Table empty.", return 0;);
748
749 for (i = 0; i < count; i++) {
750 /* find first voltage bigger than requested */
751 if (voltage_table->entries[i].value >= voltage)
752 return i;
753 }
754
755 /* voltage is bigger than max voltage in the table */
756 return i - 1;
757}
758
759uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
760{
761 uint32_t i;
762
763 for (i = 0; i < vddci_table->count; i++) {
764 if (vddci_table->entries[i].value >= vddci)
765 return vddci_table->entries[i].value;
766 }
767
768 pr_debug("vddci is larger than max value in vddci_table\n");
769 return vddci_table->entries[i-1].value;
770}
771
772int phm_find_boot_level(void *table,
773 uint32_t value, uint32_t *boot_level)
774{
775 int result = -EINVAL;
776 uint32_t i;
777 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
778
779 for (i = 0; i < dpm_table->count; i++) {
780 if (value == dpm_table->dpm_level[i].value) {
781 *boot_level = i;
782 result = 0;
783 }
784 }
785
786 return result;
787}
788
789int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
790 phm_ppt_v1_voltage_lookup_table *lookup_table,
791 uint16_t virtual_voltage_id, int32_t *sclk)
792{
793 uint8_t entry_id;
794 uint8_t voltage_id;
795 struct phm_ppt_v1_information *table_info =
796 (struct phm_ppt_v1_information *)(hwmgr->pptable);
797
798 PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
799
800 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
801 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
802 voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd;
803 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
804 break;
805 }
806
807 if (entry_id >= table_info->vdd_dep_on_sclk->count) {
808 pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n");
809 return -EINVAL;
810 }
811
812 *sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk;
813
814 return 0;
815}
816
817/**
818 * Initialize Dynamic State Adjustment Rule Settings
819 *
820 * @param hwmgr the address of the powerplay hardware manager.
821 */
822int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
823{
824 uint32_t table_size;
825 struct phm_clock_voltage_dependency_table *table_clk_vlt;
826 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
827
828 /* initialize vddc_dep_on_dal_pwrl table */
829 table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
830 table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
831
832 if (NULL == table_clk_vlt) {
833 pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
834 return -ENOMEM;
835 } else {
836 table_clk_vlt->count = 4;
837 table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
838 table_clk_vlt->entries[0].v = 0;
839 table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
840 table_clk_vlt->entries[1].v = 720;
841 table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
842 table_clk_vlt->entries[2].v = 810;
843 table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
844 table_clk_vlt->entries[3].v = 900;
845 if (pptable_info != NULL)
846 pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
847 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
848 }
849
850 return 0;
851}
852
853uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
854{
855 uint32_t level = 0;
856
857 while (0 == (mask & (1 << level)))
858 level++;
859
860 return level;
861}
862
863void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
864{
865 struct phm_ppt_v1_information *table_info =
866 (struct phm_ppt_v1_information *)hwmgr->pptable;
867 struct phm_clock_voltage_dependency_table *table =
868 table_info->vddc_dep_on_dal_pwrl;
869 struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
870 enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
871 uint32_t req_vddc = 0, req_volt, i;
872
873 if (!table || table->count <= 0
874 || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
875 || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
876 return;
877
878 for (i = 0; i < table->count; i++) {
879 if (dal_power_level == table->entries[i].clk) {
880 req_vddc = table->entries[i].v;
881 break;
882 }
883 }
884
885 vddc_table = table_info->vdd_dep_on_sclk;
886 for (i = 0; i < vddc_table->count; i++) {
887 if (req_vddc <= vddc_table->entries[i].vddc) {
888 req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
889 smum_send_msg_to_smc_with_parameter(hwmgr,
890 PPSMC_MSG_VddC_Request, req_volt);
891 return;
892 }
893 }
894 pr_err("DAL requested level can not"
895 " found a available voltage in VDDC DPM Table \n");
896}
897 385
898void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr) 386void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
899{ 387{
@@ -954,25 +442,6 @@ int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
954 return 0; 442 return 0;
955} 443}
956 444
957int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
958 uint32_t sclk, uint16_t id, uint16_t *voltage)
959{
960 uint32_t vol;
961 int ret = 0;
962
963 if (hwmgr->chip_id < CHIP_TONGA) {
964 ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
965 } else if (hwmgr->chip_id < CHIP_POLARIS10) {
966 ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
967 if (*voltage >= 2000 || *voltage == 0)
968 *voltage = 1150;
969 } else {
970 ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
971 *voltage = (uint16_t)(vol/100);
972 }
973 return ret;
974}
975
976int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr) 445int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
977{ 446{
978 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 447 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 8ddfb78f28cc..10253b89b3d8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -32,53 +32,52 @@
32#include "hwmgr.h" 32#include "hwmgr.h"
33#include "hardwaremanager.h" 33#include "hardwaremanager.h"
34#include "rv_ppsmc.h" 34#include "rv_ppsmc.h"
35#include "rv_hwmgr.h" 35#include "smu10_hwmgr.h"
36#include "power_state.h" 36#include "power_state.h"
37#include "rv_smumgr.h"
38#include "pp_soc15.h" 37#include "pp_soc15.h"
39 38
40#define RAVEN_MAX_DEEPSLEEP_DIVIDER_ID 5 39#define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5
41#define RAVEN_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */ 40#define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */
42#define SCLK_MIN_DIV_INTV_SHIFT 12 41#define SCLK_MIN_DIV_INTV_SHIFT 12
43#define RAVEN_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */ 42#define SMU10_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */
44#define SMC_RAM_END 0x40000 43#define SMC_RAM_END 0x40000
45 44
46static const unsigned long PhwRaven_Magic = (unsigned long) PHM_Rv_Magic; 45static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic;
47 46
48 47
49int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr, 48static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
50 struct pp_display_clock_request *clock_req); 49 struct pp_display_clock_request *clock_req);
51 50
52 51
53static struct rv_power_state *cast_rv_ps(struct pp_hw_power_state *hw_ps) 52static struct smu10_power_state *cast_smu10_ps(struct pp_hw_power_state *hw_ps)
54{ 53{
55 if (PhwRaven_Magic != hw_ps->magic) 54 if (SMU10_Magic != hw_ps->magic)
56 return NULL; 55 return NULL;
57 56
58 return (struct rv_power_state *)hw_ps; 57 return (struct smu10_power_state *)hw_ps;
59} 58}
60 59
61static const struct rv_power_state *cast_const_rv_ps( 60static const struct smu10_power_state *cast_const_smu10_ps(
62 const struct pp_hw_power_state *hw_ps) 61 const struct pp_hw_power_state *hw_ps)
63{ 62{
64 if (PhwRaven_Magic != hw_ps->magic) 63 if (SMU10_Magic != hw_ps->magic)
65 return NULL; 64 return NULL;
66 65
67 return (struct rv_power_state *)hw_ps; 66 return (struct smu10_power_state *)hw_ps;
68} 67}
69 68
70static int rv_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) 69static int smu10_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
71{ 70{
72 struct rv_hwmgr *rv_hwmgr = (struct rv_hwmgr *)(hwmgr->backend); 71 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
73 72
74 rv_hwmgr->dce_slow_sclk_threshold = 30000; 73 smu10_data->dce_slow_sclk_threshold = 30000;
75 rv_hwmgr->thermal_auto_throttling_treshold = 0; 74 smu10_data->thermal_auto_throttling_treshold = 0;
76 rv_hwmgr->is_nb_dpm_enabled = 1; 75 smu10_data->is_nb_dpm_enabled = 1;
77 rv_hwmgr->dpm_flags = 1; 76 smu10_data->dpm_flags = 1;
78 rv_hwmgr->gfx_off_controled_by_driver = false; 77 smu10_data->gfx_off_controled_by_driver = false;
79 rv_hwmgr->need_min_deep_sleep_dcefclk = true; 78 smu10_data->need_min_deep_sleep_dcefclk = true;
80 rv_hwmgr->num_active_display = 0; 79 smu10_data->num_active_display = 0;
81 rv_hwmgr->deep_sleep_dcefclk = 0; 80 smu10_data->deep_sleep_dcefclk = 0;
82 81
83 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 82 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
84 PHM_PlatformCaps_SclkDeepSleep); 83 PHM_PlatformCaps_SclkDeepSleep);
@@ -91,13 +90,13 @@ static int rv_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
91 return 0; 90 return 0;
92} 91}
93 92
94static int rv_construct_max_power_limits_table(struct pp_hwmgr *hwmgr, 93static int smu10_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
95 struct phm_clock_and_voltage_limits *table) 94 struct phm_clock_and_voltage_limits *table)
96{ 95{
97 return 0; 96 return 0;
98} 97}
99 98
100static int rv_init_dynamic_state_adjustment_rule_settings( 99static int smu10_init_dynamic_state_adjustment_rule_settings(
101 struct pp_hwmgr *hwmgr) 100 struct pp_hwmgr *hwmgr)
102{ 101{
103 uint32_t table_size = 102 uint32_t table_size =
@@ -134,30 +133,30 @@ static int rv_init_dynamic_state_adjustment_rule_settings(
134 return 0; 133 return 0;
135} 134}
136 135
137static int rv_get_system_info_data(struct pp_hwmgr *hwmgr) 136static int smu10_get_system_info_data(struct pp_hwmgr *hwmgr)
138{ 137{
139 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)hwmgr->backend; 138 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)hwmgr->backend;
140 139
141 rv_data->sys_info.htc_hyst_lmt = 5; 140 smu10_data->sys_info.htc_hyst_lmt = 5;
142 rv_data->sys_info.htc_tmp_lmt = 203; 141 smu10_data->sys_info.htc_tmp_lmt = 203;
143 142
144 if (rv_data->thermal_auto_throttling_treshold == 0) 143 if (smu10_data->thermal_auto_throttling_treshold == 0)
145 rv_data->thermal_auto_throttling_treshold = 203; 144 smu10_data->thermal_auto_throttling_treshold = 203;
146 145
147 rv_construct_max_power_limits_table (hwmgr, 146 smu10_construct_max_power_limits_table (hwmgr,
148 &hwmgr->dyn_state.max_clock_voltage_on_ac); 147 &hwmgr->dyn_state.max_clock_voltage_on_ac);
149 148
150 rv_init_dynamic_state_adjustment_rule_settings(hwmgr); 149 smu10_init_dynamic_state_adjustment_rule_settings(hwmgr);
151 150
152 return 0; 151 return 0;
153} 152}
154 153
155static int rv_construct_boot_state(struct pp_hwmgr *hwmgr) 154static int smu10_construct_boot_state(struct pp_hwmgr *hwmgr)
156{ 155{
157 return 0; 156 return 0;
158} 157}
159 158
160static int rv_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input) 159static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
161{ 160{
162 struct PP_Clocks clocks = {0}; 161 struct PP_Clocks clocks = {0};
163 struct pp_display_clock_request clock_req; 162 struct pp_display_clock_request clock_req;
@@ -166,111 +165,109 @@ static int rv_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
166 clock_req.clock_type = amd_pp_dcf_clock; 165 clock_req.clock_type = amd_pp_dcf_clock;
167 clock_req.clock_freq_in_khz = clocks.dcefClock * 10; 166 clock_req.clock_freq_in_khz = clocks.dcefClock * 10;
168 167
169 PP_ASSERT_WITH_CODE(!rv_display_clock_voltage_request(hwmgr, &clock_req), 168 PP_ASSERT_WITH_CODE(!smu10_display_clock_voltage_request(hwmgr, &clock_req),
170 "Attempt to set DCF Clock Failed!", return -EINVAL); 169 "Attempt to set DCF Clock Failed!", return -EINVAL);
171 170
172 return 0; 171 return 0;
173} 172}
174 173
175static int rv_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock) 174static int smu10_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
176{ 175{
177 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 176 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
178 177
179 if (rv_data->need_min_deep_sleep_dcefclk && rv_data->deep_sleep_dcefclk != clock/100) { 178 if (smu10_data->need_min_deep_sleep_dcefclk && smu10_data->deep_sleep_dcefclk != clock/100) {
180 rv_data->deep_sleep_dcefclk = clock/100; 179 smu10_data->deep_sleep_dcefclk = clock/100;
181 smum_send_msg_to_smc_with_parameter(hwmgr, 180 smum_send_msg_to_smc_with_parameter(hwmgr,
182 PPSMC_MSG_SetMinDeepSleepDcefclk, 181 PPSMC_MSG_SetMinDeepSleepDcefclk,
183 rv_data->deep_sleep_dcefclk); 182 smu10_data->deep_sleep_dcefclk);
184 } 183 }
185 return 0; 184 return 0;
186} 185}
187 186
188static int rv_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count) 187static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
189{ 188{
190 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 189 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
191 190
192 if (rv_data->num_active_display != count) { 191 if (smu10_data->num_active_display != count) {
193 rv_data->num_active_display = count; 192 smu10_data->num_active_display = count;
194 smum_send_msg_to_smc_with_parameter(hwmgr, 193 smum_send_msg_to_smc_with_parameter(hwmgr,
195 PPSMC_MSG_SetDisplayCount, 194 PPSMC_MSG_SetDisplayCount,
196 rv_data->num_active_display); 195 smu10_data->num_active_display);
197 } 196 }
198 197
199 return 0; 198 return 0;
200} 199}
201 200
202static int rv_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) 201static int smu10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
203{ 202{
204 return rv_set_clock_limit(hwmgr, input); 203 return smu10_set_clock_limit(hwmgr, input);
205} 204}
206 205
207static int rv_init_power_gate_state(struct pp_hwmgr *hwmgr) 206static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr)
208{ 207{
209 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 208 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
210 209
211 rv_data->vcn_power_gated = true; 210 smu10_data->vcn_power_gated = true;
212 rv_data->isp_tileA_power_gated = true; 211 smu10_data->isp_tileA_power_gated = true;
213 rv_data->isp_tileB_power_gated = true; 212 smu10_data->isp_tileB_power_gated = true;
214 213
215 return 0; 214 return 0;
216} 215}
217 216
218 217
219static int rv_setup_asic_task(struct pp_hwmgr *hwmgr) 218static int smu10_setup_asic_task(struct pp_hwmgr *hwmgr)
220{ 219{
221 return rv_init_power_gate_state(hwmgr); 220 return smu10_init_power_gate_state(hwmgr);
222} 221}
223 222
224static int rv_reset_cc6_data(struct pp_hwmgr *hwmgr) 223static int smu10_reset_cc6_data(struct pp_hwmgr *hwmgr)
225{ 224{
226 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 225 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
227 226
228 rv_data->separation_time = 0; 227 smu10_data->separation_time = 0;
229 rv_data->cc6_disable = false; 228 smu10_data->cc6_disable = false;
230 rv_data->pstate_disable = false; 229 smu10_data->pstate_disable = false;
231 rv_data->cc6_setting_changed = false; 230 smu10_data->cc6_setting_changed = false;
232 231
233 return 0; 232 return 0;
234} 233}
235 234
236static int rv_power_off_asic(struct pp_hwmgr *hwmgr) 235static int smu10_power_off_asic(struct pp_hwmgr *hwmgr)
237{ 236{
238 return rv_reset_cc6_data(hwmgr); 237 return smu10_reset_cc6_data(hwmgr);
239} 238}
240 239
241static int rv_disable_gfx_off(struct pp_hwmgr *hwmgr) 240static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr)
242{ 241{
243 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 242 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
244 243
245 if (rv_data->gfx_off_controled_by_driver) 244 if (smu10_data->gfx_off_controled_by_driver)
246 smum_send_msg_to_smc(hwmgr, 245 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff);
247 PPSMC_MSG_DisableGfxOff);
248 246
249 return 0; 247 return 0;
250} 248}
251 249
252static int rv_disable_dpm_tasks(struct pp_hwmgr *hwmgr) 250static int smu10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
253{ 251{
254 return rv_disable_gfx_off(hwmgr); 252 return smu10_disable_gfx_off(hwmgr);
255} 253}
256 254
257static int rv_enable_gfx_off(struct pp_hwmgr *hwmgr) 255static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
258{ 256{
259 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 257 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
260 258
261 if (rv_data->gfx_off_controled_by_driver) 259 if (smu10_data->gfx_off_controled_by_driver)
262 smum_send_msg_to_smc(hwmgr, 260 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff);
263 PPSMC_MSG_EnableGfxOff);
264 261
265 return 0; 262 return 0;
266} 263}
267 264
268static int rv_enable_dpm_tasks(struct pp_hwmgr *hwmgr) 265static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
269{ 266{
270 return rv_enable_gfx_off(hwmgr); 267 return smu10_enable_gfx_off(hwmgr);
271} 268}
272 269
273static int rv_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, 270static int smu10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
274 struct pp_power_state *prequest_ps, 271 struct pp_power_state *prequest_ps,
275 const struct pp_power_state *pcurrent_ps) 272 const struct pp_power_state *pcurrent_ps)
276{ 273{
@@ -314,14 +311,14 @@ static const DpmClock_t VddPhyClk[]= {
314 { 810, 3600}, 311 { 810, 3600},
315}; 312};
316 313
317static int rv_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr, 314static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
318 struct rv_voltage_dependency_table **pptable, 315 struct smu10_voltage_dependency_table **pptable,
319 uint32_t num_entry, const DpmClock_t *pclk_dependency_table) 316 uint32_t num_entry, const DpmClock_t *pclk_dependency_table)
320{ 317{
321 uint32_t table_size, i; 318 uint32_t table_size, i;
322 struct rv_voltage_dependency_table *ptable; 319 struct smu10_voltage_dependency_table *ptable;
323 320
324 table_size = sizeof(uint32_t) + sizeof(struct rv_voltage_dependency_table) * num_entry; 321 table_size = sizeof(uint32_t) + sizeof(struct smu10_voltage_dependency_table) * num_entry;
325 ptable = kzalloc(table_size, GFP_KERNEL); 322 ptable = kzalloc(table_size, GFP_KERNEL);
326 323
327 if (NULL == ptable) 324 if (NULL == ptable)
@@ -341,107 +338,95 @@ static int rv_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
341} 338}
342 339
343 340
344static int rv_populate_clock_table(struct pp_hwmgr *hwmgr) 341static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr)
345{ 342{
346 int result; 343 int result;
347 344
348 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 345 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
349 DpmClocks_t *table = &(rv_data->clock_table); 346 DpmClocks_t *table = &(smu10_data->clock_table);
350 struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info); 347 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
351 348
352 result = rv_copy_table_from_smc(hwmgr, (uint8_t *)table, CLOCKTABLE); 349 result = smum_smc_table_manager(hwmgr, (uint8_t *)table, SMU10_CLOCKTABLE, true);
353 350
354 PP_ASSERT_WITH_CODE((0 == result), 351 PP_ASSERT_WITH_CODE((0 == result),
355 "Attempt to copy clock table from smc failed", 352 "Attempt to copy clock table from smc failed",
356 return result); 353 return result);
357 354
358 if (0 == result && table->DcefClocks[0].Freq != 0) { 355 if (0 == result && table->DcefClocks[0].Freq != 0) {
359 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk, 356 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
360 NUM_DCEFCLK_DPM_LEVELS, 357 NUM_DCEFCLK_DPM_LEVELS,
361 &rv_data->clock_table.DcefClocks[0]); 358 &smu10_data->clock_table.DcefClocks[0]);
362 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk, 359 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
363 NUM_SOCCLK_DPM_LEVELS, 360 NUM_SOCCLK_DPM_LEVELS,
364 &rv_data->clock_table.SocClocks[0]); 361 &smu10_data->clock_table.SocClocks[0]);
365 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk, 362 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
366 NUM_FCLK_DPM_LEVELS, 363 NUM_FCLK_DPM_LEVELS,
367 &rv_data->clock_table.FClocks[0]); 364 &smu10_data->clock_table.FClocks[0]);
368 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_mclk, 365 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_mclk,
369 NUM_MEMCLK_DPM_LEVELS, 366 NUM_MEMCLK_DPM_LEVELS,
370 &rv_data->clock_table.MemClocks[0]); 367 &smu10_data->clock_table.MemClocks[0]);
371 } else { 368 } else {
372 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk, 369 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
373 ARRAY_SIZE(VddDcfClk), 370 ARRAY_SIZE(VddDcfClk),
374 &VddDcfClk[0]); 371 &VddDcfClk[0]);
375 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk, 372 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
376 ARRAY_SIZE(VddSocClk), 373 ARRAY_SIZE(VddSocClk),
377 &VddSocClk[0]); 374 &VddSocClk[0]);
378 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk, 375 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
379 ARRAY_SIZE(VddFClk), 376 ARRAY_SIZE(VddFClk),
380 &VddFClk[0]); 377 &VddFClk[0]);
381 } 378 }
382 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dispclk, 379 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dispclk,
383 ARRAY_SIZE(VddDispClk), 380 ARRAY_SIZE(VddDispClk),
384 &VddDispClk[0]); 381 &VddDispClk[0]);
385 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dppclk, 382 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dppclk,
386 ARRAY_SIZE(VddDppClk), &VddDppClk[0]); 383 ARRAY_SIZE(VddDppClk), &VddDppClk[0]);
387 rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk, 384 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk,
388 ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]); 385 ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]);
389 386
390 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, 387 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency);
391 PPSMC_MSG_GetMinGfxclkFrequency), 388 result = smum_get_argument(hwmgr);
392 "Attempt to get min GFXCLK Failed!", 389 smu10_data->gfx_min_freq_limit = result * 100;
393 return -1); 390
394 PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr, 391 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency);
395 &result), 392 result = smum_get_argument(hwmgr);
396 "Attempt to get min GFXCLK Failed!", 393 smu10_data->gfx_max_freq_limit = result * 100;
397 return -1);
398 rv_data->gfx_min_freq_limit = result * 100;
399
400 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
401 PPSMC_MSG_GetMaxGfxclkFrequency),
402 "Attempt to get max GFXCLK Failed!",
403 return -1);
404 PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
405 &result),
406 "Attempt to get max GFXCLK Failed!",
407 return -1);
408 rv_data->gfx_max_freq_limit = result * 100;
409 394
410 return 0; 395 return 0;
411} 396}
412 397
413static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr) 398static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
414{ 399{
415 int result = 0; 400 int result = 0;
416 struct rv_hwmgr *data; 401 struct smu10_hwmgr *data;
417 402
418 data = kzalloc(sizeof(struct rv_hwmgr), GFP_KERNEL); 403 data = kzalloc(sizeof(struct smu10_hwmgr), GFP_KERNEL);
419 if (data == NULL) 404 if (data == NULL)
420 return -ENOMEM; 405 return -ENOMEM;
421 406
422 hwmgr->backend = data; 407 hwmgr->backend = data;
423 408
424 result = rv_initialize_dpm_defaults(hwmgr); 409 result = smu10_initialize_dpm_defaults(hwmgr);
425 if (result != 0) { 410 if (result != 0) {
426 pr_err("rv_initialize_dpm_defaults failed\n"); 411 pr_err("smu10_initialize_dpm_defaults failed\n");
427 return result; 412 return result;
428 } 413 }
429 414
430 rv_populate_clock_table(hwmgr); 415 smu10_populate_clock_table(hwmgr);
431 416
432 result = rv_get_system_info_data(hwmgr); 417 result = smu10_get_system_info_data(hwmgr);
433 if (result != 0) { 418 if (result != 0) {
434 pr_err("rv_get_system_info_data failed\n"); 419 pr_err("smu10_get_system_info_data failed\n");
435 return result; 420 return result;
436 } 421 }
437 422
438 rv_construct_boot_state(hwmgr); 423 smu10_construct_boot_state(hwmgr);
439 424
440 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = 425 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
441 RAVEN_MAX_HARDWARE_POWERLEVELS; 426 SMU10_MAX_HARDWARE_POWERLEVELS;
442 427
443 hwmgr->platform_descriptor.hardwarePerformanceLevels = 428 hwmgr->platform_descriptor.hardwarePerformanceLevels =
444 RAVEN_MAX_HARDWARE_POWERLEVELS; 429 SMU10_MAX_HARDWARE_POWERLEVELS;
445 430
446 hwmgr->platform_descriptor.vbiosInterruptId = 0; 431 hwmgr->platform_descriptor.vbiosInterruptId = 0;
447 432
@@ -451,16 +436,16 @@ static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
451 436
452 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; 437 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
453 438
454 hwmgr->pstate_sclk = RAVEN_UMD_PSTATE_GFXCLK; 439 hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK;
455 hwmgr->pstate_mclk = RAVEN_UMD_PSTATE_FCLK; 440 hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK;
456 441
457 return result; 442 return result;
458} 443}
459 444
460static int rv_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) 445static int smu10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
461{ 446{
462 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 447 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
463 struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info); 448 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
464 449
465 kfree(pinfo->vdd_dep_on_dcefclk); 450 kfree(pinfo->vdd_dep_on_dcefclk);
466 pinfo->vdd_dep_on_dcefclk = NULL; 451 pinfo->vdd_dep_on_dcefclk = NULL;
@@ -484,7 +469,7 @@ static int rv_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
484 return 0; 469 return 0;
485} 470}
486 471
487static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, 472static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
488 enum amd_dpm_forced_level level) 473 enum amd_dpm_forced_level level)
489{ 474{
490 if (hwmgr->smu_version < 0x1E3700) { 475 if (hwmgr->smu_version < 0x1E3700) {
@@ -497,113 +482,113 @@ static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
497 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 482 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
498 smum_send_msg_to_smc_with_parameter(hwmgr, 483 smum_send_msg_to_smc_with_parameter(hwmgr,
499 PPSMC_MSG_SetHardMinGfxClk, 484 PPSMC_MSG_SetHardMinGfxClk,
500 RAVEN_UMD_PSTATE_PEAK_GFXCLK); 485 SMU10_UMD_PSTATE_PEAK_GFXCLK);
501 smum_send_msg_to_smc_with_parameter(hwmgr, 486 smum_send_msg_to_smc_with_parameter(hwmgr,
502 PPSMC_MSG_SetHardMinFclkByFreq, 487 PPSMC_MSG_SetHardMinFclkByFreq,
503 RAVEN_UMD_PSTATE_PEAK_FCLK); 488 SMU10_UMD_PSTATE_PEAK_FCLK);
504 smum_send_msg_to_smc_with_parameter(hwmgr, 489 smum_send_msg_to_smc_with_parameter(hwmgr,
505 PPSMC_MSG_SetHardMinSocclkByFreq, 490 PPSMC_MSG_SetHardMinSocclkByFreq,
506 RAVEN_UMD_PSTATE_PEAK_SOCCLK); 491 SMU10_UMD_PSTATE_PEAK_SOCCLK);
507 smum_send_msg_to_smc_with_parameter(hwmgr, 492 smum_send_msg_to_smc_with_parameter(hwmgr,
508 PPSMC_MSG_SetHardMinVcn, 493 PPSMC_MSG_SetHardMinVcn,
509 RAVEN_UMD_PSTATE_VCE); 494 SMU10_UMD_PSTATE_VCE);
510 495
511 smum_send_msg_to_smc_with_parameter(hwmgr, 496 smum_send_msg_to_smc_with_parameter(hwmgr,
512 PPSMC_MSG_SetSoftMaxGfxClk, 497 PPSMC_MSG_SetSoftMaxGfxClk,
513 RAVEN_UMD_PSTATE_PEAK_GFXCLK); 498 SMU10_UMD_PSTATE_PEAK_GFXCLK);
514 smum_send_msg_to_smc_with_parameter(hwmgr, 499 smum_send_msg_to_smc_with_parameter(hwmgr,
515 PPSMC_MSG_SetSoftMaxFclkByFreq, 500 PPSMC_MSG_SetSoftMaxFclkByFreq,
516 RAVEN_UMD_PSTATE_PEAK_FCLK); 501 SMU10_UMD_PSTATE_PEAK_FCLK);
517 smum_send_msg_to_smc_with_parameter(hwmgr, 502 smum_send_msg_to_smc_with_parameter(hwmgr,
518 PPSMC_MSG_SetSoftMaxSocclkByFreq, 503 PPSMC_MSG_SetSoftMaxSocclkByFreq,
519 RAVEN_UMD_PSTATE_PEAK_SOCCLK); 504 SMU10_UMD_PSTATE_PEAK_SOCCLK);
520 smum_send_msg_to_smc_with_parameter(hwmgr, 505 smum_send_msg_to_smc_with_parameter(hwmgr,
521 PPSMC_MSG_SetSoftMaxVcn, 506 PPSMC_MSG_SetSoftMaxVcn,
522 RAVEN_UMD_PSTATE_VCE); 507 SMU10_UMD_PSTATE_VCE);
523 break; 508 break;
524 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 509 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
525 smum_send_msg_to_smc_with_parameter(hwmgr, 510 smum_send_msg_to_smc_with_parameter(hwmgr,
526 PPSMC_MSG_SetHardMinGfxClk, 511 PPSMC_MSG_SetHardMinGfxClk,
527 RAVEN_UMD_PSTATE_MIN_GFXCLK); 512 SMU10_UMD_PSTATE_MIN_GFXCLK);
528 smum_send_msg_to_smc_with_parameter(hwmgr, 513 smum_send_msg_to_smc_with_parameter(hwmgr,
529 PPSMC_MSG_SetSoftMaxGfxClk, 514 PPSMC_MSG_SetSoftMaxGfxClk,
530 RAVEN_UMD_PSTATE_MIN_GFXCLK); 515 SMU10_UMD_PSTATE_MIN_GFXCLK);
531 break; 516 break;
532 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 517 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
533 smum_send_msg_to_smc_with_parameter(hwmgr, 518 smum_send_msg_to_smc_with_parameter(hwmgr,
534 PPSMC_MSG_SetHardMinFclkByFreq, 519 PPSMC_MSG_SetHardMinFclkByFreq,
535 RAVEN_UMD_PSTATE_MIN_FCLK); 520 SMU10_UMD_PSTATE_MIN_FCLK);
536 smum_send_msg_to_smc_with_parameter(hwmgr, 521 smum_send_msg_to_smc_with_parameter(hwmgr,
537 PPSMC_MSG_SetSoftMaxFclkByFreq, 522 PPSMC_MSG_SetSoftMaxFclkByFreq,
538 RAVEN_UMD_PSTATE_MIN_FCLK); 523 SMU10_UMD_PSTATE_MIN_FCLK);
539 break; 524 break;
540 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 525 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
541 smum_send_msg_to_smc_with_parameter(hwmgr, 526 smum_send_msg_to_smc_with_parameter(hwmgr,
542 PPSMC_MSG_SetHardMinGfxClk, 527 PPSMC_MSG_SetHardMinGfxClk,
543 RAVEN_UMD_PSTATE_GFXCLK); 528 SMU10_UMD_PSTATE_GFXCLK);
544 smum_send_msg_to_smc_with_parameter(hwmgr, 529 smum_send_msg_to_smc_with_parameter(hwmgr,
545 PPSMC_MSG_SetHardMinFclkByFreq, 530 PPSMC_MSG_SetHardMinFclkByFreq,
546 RAVEN_UMD_PSTATE_FCLK); 531 SMU10_UMD_PSTATE_FCLK);
547 smum_send_msg_to_smc_with_parameter(hwmgr, 532 smum_send_msg_to_smc_with_parameter(hwmgr,
548 PPSMC_MSG_SetHardMinSocclkByFreq, 533 PPSMC_MSG_SetHardMinSocclkByFreq,
549 RAVEN_UMD_PSTATE_SOCCLK); 534 SMU10_UMD_PSTATE_SOCCLK);
550 smum_send_msg_to_smc_with_parameter(hwmgr, 535 smum_send_msg_to_smc_with_parameter(hwmgr,
551 PPSMC_MSG_SetHardMinVcn, 536 PPSMC_MSG_SetHardMinVcn,
552 RAVEN_UMD_PSTATE_VCE); 537 SMU10_UMD_PSTATE_VCE);
553 538
554 smum_send_msg_to_smc_with_parameter(hwmgr, 539 smum_send_msg_to_smc_with_parameter(hwmgr,
555 PPSMC_MSG_SetSoftMaxGfxClk, 540 PPSMC_MSG_SetSoftMaxGfxClk,
556 RAVEN_UMD_PSTATE_GFXCLK); 541 SMU10_UMD_PSTATE_GFXCLK);
557 smum_send_msg_to_smc_with_parameter(hwmgr, 542 smum_send_msg_to_smc_with_parameter(hwmgr,
558 PPSMC_MSG_SetSoftMaxFclkByFreq, 543 PPSMC_MSG_SetSoftMaxFclkByFreq,
559 RAVEN_UMD_PSTATE_FCLK); 544 SMU10_UMD_PSTATE_FCLK);
560 smum_send_msg_to_smc_with_parameter(hwmgr, 545 smum_send_msg_to_smc_with_parameter(hwmgr,
561 PPSMC_MSG_SetSoftMaxSocclkByFreq, 546 PPSMC_MSG_SetSoftMaxSocclkByFreq,
562 RAVEN_UMD_PSTATE_SOCCLK); 547 SMU10_UMD_PSTATE_SOCCLK);
563 smum_send_msg_to_smc_with_parameter(hwmgr, 548 smum_send_msg_to_smc_with_parameter(hwmgr,
564 PPSMC_MSG_SetSoftMaxVcn, 549 PPSMC_MSG_SetSoftMaxVcn,
565 RAVEN_UMD_PSTATE_VCE); 550 SMU10_UMD_PSTATE_VCE);
566 break; 551 break;
567 case AMD_DPM_FORCED_LEVEL_AUTO: 552 case AMD_DPM_FORCED_LEVEL_AUTO:
568 smum_send_msg_to_smc_with_parameter(hwmgr, 553 smum_send_msg_to_smc_with_parameter(hwmgr,
569 PPSMC_MSG_SetHardMinGfxClk, 554 PPSMC_MSG_SetHardMinGfxClk,
570 RAVEN_UMD_PSTATE_MIN_GFXCLK); 555 SMU10_UMD_PSTATE_MIN_GFXCLK);
571 smum_send_msg_to_smc_with_parameter(hwmgr, 556 smum_send_msg_to_smc_with_parameter(hwmgr,
572 PPSMC_MSG_SetHardMinFclkByFreq, 557 PPSMC_MSG_SetHardMinFclkByFreq,
573 RAVEN_UMD_PSTATE_MIN_FCLK); 558 SMU10_UMD_PSTATE_MIN_FCLK);
574 smum_send_msg_to_smc_with_parameter(hwmgr, 559 smum_send_msg_to_smc_with_parameter(hwmgr,
575 PPSMC_MSG_SetHardMinSocclkByFreq, 560 PPSMC_MSG_SetHardMinSocclkByFreq,
576 RAVEN_UMD_PSTATE_MIN_SOCCLK); 561 SMU10_UMD_PSTATE_MIN_SOCCLK);
577 smum_send_msg_to_smc_with_parameter(hwmgr, 562 smum_send_msg_to_smc_with_parameter(hwmgr,
578 PPSMC_MSG_SetHardMinVcn, 563 PPSMC_MSG_SetHardMinVcn,
579 RAVEN_UMD_PSTATE_MIN_VCE); 564 SMU10_UMD_PSTATE_MIN_VCE);
580 565
581 smum_send_msg_to_smc_with_parameter(hwmgr, 566 smum_send_msg_to_smc_with_parameter(hwmgr,
582 PPSMC_MSG_SetSoftMaxGfxClk, 567 PPSMC_MSG_SetSoftMaxGfxClk,
583 RAVEN_UMD_PSTATE_PEAK_GFXCLK); 568 SMU10_UMD_PSTATE_PEAK_GFXCLK);
584 smum_send_msg_to_smc_with_parameter(hwmgr, 569 smum_send_msg_to_smc_with_parameter(hwmgr,
585 PPSMC_MSG_SetSoftMaxFclkByFreq, 570 PPSMC_MSG_SetSoftMaxFclkByFreq,
586 RAVEN_UMD_PSTATE_PEAK_FCLK); 571 SMU10_UMD_PSTATE_PEAK_FCLK);
587 smum_send_msg_to_smc_with_parameter(hwmgr, 572 smum_send_msg_to_smc_with_parameter(hwmgr,
588 PPSMC_MSG_SetSoftMaxSocclkByFreq, 573 PPSMC_MSG_SetSoftMaxSocclkByFreq,
589 RAVEN_UMD_PSTATE_PEAK_SOCCLK); 574 SMU10_UMD_PSTATE_PEAK_SOCCLK);
590 smum_send_msg_to_smc_with_parameter(hwmgr, 575 smum_send_msg_to_smc_with_parameter(hwmgr,
591 PPSMC_MSG_SetSoftMaxVcn, 576 PPSMC_MSG_SetSoftMaxVcn,
592 RAVEN_UMD_PSTATE_VCE); 577 SMU10_UMD_PSTATE_VCE);
593 break; 578 break;
594 case AMD_DPM_FORCED_LEVEL_LOW: 579 case AMD_DPM_FORCED_LEVEL_LOW:
595 smum_send_msg_to_smc_with_parameter(hwmgr, 580 smum_send_msg_to_smc_with_parameter(hwmgr,
596 PPSMC_MSG_SetHardMinGfxClk, 581 PPSMC_MSG_SetHardMinGfxClk,
597 RAVEN_UMD_PSTATE_MIN_GFXCLK); 582 SMU10_UMD_PSTATE_MIN_GFXCLK);
598 smum_send_msg_to_smc_with_parameter(hwmgr, 583 smum_send_msg_to_smc_with_parameter(hwmgr,
599 PPSMC_MSG_SetSoftMaxGfxClk, 584 PPSMC_MSG_SetSoftMaxGfxClk,
600 RAVEN_UMD_PSTATE_MIN_GFXCLK); 585 SMU10_UMD_PSTATE_MIN_GFXCLK);
601 smum_send_msg_to_smc_with_parameter(hwmgr, 586 smum_send_msg_to_smc_with_parameter(hwmgr,
602 PPSMC_MSG_SetHardMinFclkByFreq, 587 PPSMC_MSG_SetHardMinFclkByFreq,
603 RAVEN_UMD_PSTATE_MIN_FCLK); 588 SMU10_UMD_PSTATE_MIN_FCLK);
604 smum_send_msg_to_smc_with_parameter(hwmgr, 589 smum_send_msg_to_smc_with_parameter(hwmgr,
605 PPSMC_MSG_SetSoftMaxFclkByFreq, 590 PPSMC_MSG_SetSoftMaxFclkByFreq,
606 RAVEN_UMD_PSTATE_MIN_FCLK); 591 SMU10_UMD_PSTATE_MIN_FCLK);
607 break; 592 break;
608 case AMD_DPM_FORCED_LEVEL_MANUAL: 593 case AMD_DPM_FORCED_LEVEL_MANUAL:
609 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 594 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
@@ -613,14 +598,14 @@ static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
613 return 0; 598 return 0;
614} 599}
615 600
616static uint32_t rv_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) 601static uint32_t smu10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
617{ 602{
618 struct rv_hwmgr *data; 603 struct smu10_hwmgr *data;
619 604
620 if (hwmgr == NULL) 605 if (hwmgr == NULL)
621 return -EINVAL; 606 return -EINVAL;
622 607
623 data = (struct rv_hwmgr *)(hwmgr->backend); 608 data = (struct smu10_hwmgr *)(hwmgr->backend);
624 609
625 if (low) 610 if (low)
626 return data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk; 611 return data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
@@ -629,14 +614,14 @@ static uint32_t rv_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
629 data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk; 614 data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
630} 615}
631 616
632static uint32_t rv_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) 617static uint32_t smu10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
633{ 618{
634 struct rv_hwmgr *data; 619 struct smu10_hwmgr *data;
635 620
636 if (hwmgr == NULL) 621 if (hwmgr == NULL)
637 return -EINVAL; 622 return -EINVAL;
638 623
639 data = (struct rv_hwmgr *)(hwmgr->backend); 624 data = (struct smu10_hwmgr *)(hwmgr->backend);
640 625
641 if (low) 626 if (low)
642 return data->gfx_min_freq_limit; 627 return data->gfx_min_freq_limit;
@@ -644,34 +629,34 @@ static uint32_t rv_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
644 return data->gfx_max_freq_limit; 629 return data->gfx_max_freq_limit;
645} 630}
646 631
647static int rv_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, 632static int smu10_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
648 struct pp_hw_power_state *hw_ps) 633 struct pp_hw_power_state *hw_ps)
649{ 634{
650 return 0; 635 return 0;
651} 636}
652 637
653static int rv_dpm_get_pp_table_entry_callback( 638static int smu10_dpm_get_pp_table_entry_callback(
654 struct pp_hwmgr *hwmgr, 639 struct pp_hwmgr *hwmgr,
655 struct pp_hw_power_state *hw_ps, 640 struct pp_hw_power_state *hw_ps,
656 unsigned int index, 641 unsigned int index,
657 const void *clock_info) 642 const void *clock_info)
658{ 643{
659 struct rv_power_state *rv_ps = cast_rv_ps(hw_ps); 644 struct smu10_power_state *smu10_ps = cast_smu10_ps(hw_ps);
660 645
661 rv_ps->levels[index].engine_clock = 0; 646 smu10_ps->levels[index].engine_clock = 0;
662 647
663 rv_ps->levels[index].vddc_index = 0; 648 smu10_ps->levels[index].vddc_index = 0;
664 rv_ps->level = index + 1; 649 smu10_ps->level = index + 1;
665 650
666 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { 651 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
667 rv_ps->levels[index].ds_divider_index = 5; 652 smu10_ps->levels[index].ds_divider_index = 5;
668 rv_ps->levels[index].ss_divider_index = 5; 653 smu10_ps->levels[index].ss_divider_index = 5;
669 } 654 }
670 655
671 return 0; 656 return 0;
672} 657}
673 658
674static int rv_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr) 659static int smu10_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
675{ 660{
676 int result; 661 int result;
677 unsigned long ret = 0; 662 unsigned long ret = 0;
@@ -681,72 +666,66 @@ static int rv_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
681 return result ? 0 : ret; 666 return result ? 0 : ret;
682} 667}
683 668
684static int rv_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr, 669static int smu10_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
685 unsigned long entry, struct pp_power_state *ps) 670 unsigned long entry, struct pp_power_state *ps)
686{ 671{
687 int result; 672 int result;
688 struct rv_power_state *rv_ps; 673 struct smu10_power_state *smu10_ps;
689 674
690 ps->hardware.magic = PhwRaven_Magic; 675 ps->hardware.magic = SMU10_Magic;
691 676
692 rv_ps = cast_rv_ps(&(ps->hardware)); 677 smu10_ps = cast_smu10_ps(&(ps->hardware));
693 678
694 result = pp_tables_get_entry(hwmgr, entry, ps, 679 result = pp_tables_get_entry(hwmgr, entry, ps,
695 rv_dpm_get_pp_table_entry_callback); 680 smu10_dpm_get_pp_table_entry_callback);
696 681
697 rv_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK; 682 smu10_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
698 rv_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK; 683 smu10_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
699 684
700 return result; 685 return result;
701} 686}
702 687
703static int rv_get_power_state_size(struct pp_hwmgr *hwmgr) 688static int smu10_get_power_state_size(struct pp_hwmgr *hwmgr)
704{ 689{
705 return sizeof(struct rv_power_state); 690 return sizeof(struct smu10_power_state);
706} 691}
707 692
708static int rv_set_cpu_power_state(struct pp_hwmgr *hwmgr) 693static int smu10_set_cpu_power_state(struct pp_hwmgr *hwmgr)
709{ 694{
710 return 0; 695 return 0;
711} 696}
712 697
713 698
714static int rv_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time, 699static int smu10_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
715 bool cc6_disable, bool pstate_disable, bool pstate_switch_disable) 700 bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
716{ 701{
717 return 0; 702 return 0;
718} 703}
719 704
720static int rv_get_dal_power_level(struct pp_hwmgr *hwmgr, 705static int smu10_get_dal_power_level(struct pp_hwmgr *hwmgr,
721 struct amd_pp_simple_clock_info *info) 706 struct amd_pp_simple_clock_info *info)
722{ 707{
723 return -EINVAL; 708 return -EINVAL;
724} 709}
725 710
726static int rv_force_clock_level(struct pp_hwmgr *hwmgr, 711static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
727 enum pp_clock_type type, uint32_t mask) 712 enum pp_clock_type type, uint32_t mask)
728{ 713{
729 return 0; 714 return 0;
730} 715}
731 716
732static int rv_print_clock_levels(struct pp_hwmgr *hwmgr, 717static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
733 enum pp_clock_type type, char *buf) 718 enum pp_clock_type type, char *buf)
734{ 719{
735 struct rv_hwmgr *data = (struct rv_hwmgr *)(hwmgr->backend); 720 struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
736 struct rv_voltage_dependency_table *mclk_table = 721 struct smu10_voltage_dependency_table *mclk_table =
737 data->clock_vol_info.vdd_dep_on_fclk; 722 data->clock_vol_info.vdd_dep_on_fclk;
738 int i, now, size = 0; 723 int i, now, size = 0;
739 724
740 switch (type) { 725 switch (type) {
741 case PP_SCLK: 726 case PP_SCLK:
742 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, 727 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
743 PPSMC_MSG_GetGfxclkFrequency), 728 now = smum_get_argument(hwmgr);
744 "Attempt to get current GFXCLK Failed!",
745 return -1);
746 PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
747 &now),
748 "Attempt to get current GFXCLK Failed!",
749 return -1);
750 729
751 size += sprintf(buf + size, "0: %uMhz %s\n", 730 size += sprintf(buf + size, "0: %uMhz %s\n",
752 data->gfx_min_freq_limit / 100, 731 data->gfx_min_freq_limit / 100,
@@ -758,14 +737,8 @@ static int rv_print_clock_levels(struct pp_hwmgr *hwmgr,
758 == now) ? "*" : ""); 737 == now) ? "*" : "");
759 break; 738 break;
760 case PP_MCLK: 739 case PP_MCLK:
761 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, 740 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
762 PPSMC_MSG_GetFclkFrequency), 741 now = smum_get_argument(hwmgr);
763 "Attempt to get current MEMCLK Failed!",
764 return -1);
765 PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
766 &now),
767 "Attempt to get current MEMCLK Failed!",
768 return -1);
769 742
770 for (i = 0; i < mclk_table->count; i++) 743 for (i = 0; i < mclk_table->count; i++)
771 size += sprintf(buf + size, "%d: %uMhz %s\n", 744 size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -781,16 +754,16 @@ static int rv_print_clock_levels(struct pp_hwmgr *hwmgr,
781 return size; 754 return size;
782} 755}
783 756
784static int rv_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, 757static int smu10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
785 PHM_PerformanceLevelDesignation designation, uint32_t index, 758 PHM_PerformanceLevelDesignation designation, uint32_t index,
786 PHM_PerformanceLevel *level) 759 PHM_PerformanceLevel *level)
787{ 760{
788 struct rv_hwmgr *data; 761 struct smu10_hwmgr *data;
789 762
790 if (level == NULL || hwmgr == NULL || state == NULL) 763 if (level == NULL || hwmgr == NULL || state == NULL)
791 return -EINVAL; 764 return -EINVAL;
792 765
793 data = (struct rv_hwmgr *)(hwmgr->backend); 766 data = (struct smu10_hwmgr *)(hwmgr->backend);
794 767
795 if (index == 0) { 768 if (index == 0) {
796 level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk; 769 level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
@@ -807,10 +780,10 @@ static int rv_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_p
807 return 0; 780 return 0;
808} 781}
809 782
810static int rv_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, 783static int smu10_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
811 const struct pp_hw_power_state *state, struct pp_clock_info *clock_info) 784 const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
812{ 785{
813 const struct rv_power_state *ps = cast_const_rv_ps(state); 786 const struct smu10_power_state *ps = cast_const_smu10_ps(state);
814 787
815 clock_info->min_eng_clk = ps->levels[0].engine_clock / (1 << (ps->levels[0].ss_divider_index)); 788 clock_info->min_eng_clk = ps->levels[0].engine_clock / (1 << (ps->levels[0].ss_divider_index));
816 clock_info->max_eng_clk = ps->levels[ps->level - 1].engine_clock / (1 << (ps->levels[ps->level - 1].ss_divider_index)); 789 clock_info->max_eng_clk = ps->levels[ps->level - 1].engine_clock / (1 << (ps->levels[ps->level - 1].ss_divider_index));
@@ -825,7 +798,7 @@ static int rv_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
825#define MEM_LATENCY_ERR 0xFFFF 798#define MEM_LATENCY_ERR 0xFFFF
826 799
827 800
828static uint32_t rv_get_mem_latency(struct pp_hwmgr *hwmgr, 801static uint32_t smu10_get_mem_latency(struct pp_hwmgr *hwmgr,
829 uint32_t clock) 802 uint32_t clock)
830{ 803{
831 if (clock >= MEM_FREQ_LOW_LATENCY && 804 if (clock >= MEM_FREQ_LOW_LATENCY &&
@@ -837,14 +810,14 @@ static uint32_t rv_get_mem_latency(struct pp_hwmgr *hwmgr,
837 return MEM_LATENCY_ERR; 810 return MEM_LATENCY_ERR;
838} 811}
839 812
840static int rv_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, 813static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
841 enum amd_pp_clock_type type, 814 enum amd_pp_clock_type type,
842 struct pp_clock_levels_with_latency *clocks) 815 struct pp_clock_levels_with_latency *clocks)
843{ 816{
844 uint32_t i; 817 uint32_t i;
845 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 818 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
846 struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info); 819 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
847 struct rv_voltage_dependency_table *pclk_vol_table; 820 struct smu10_voltage_dependency_table *pclk_vol_table;
848 bool latency_required = false; 821 bool latency_required = false;
849 822
850 if (pinfo == NULL) 823 if (pinfo == NULL)
@@ -881,7 +854,7 @@ static int rv_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
881 for (i = 0; i < pclk_vol_table->count; i++) { 854 for (i = 0; i < pclk_vol_table->count; i++) {
882 clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk; 855 clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
883 clocks->data[i].latency_in_us = latency_required ? 856 clocks->data[i].latency_in_us = latency_required ?
884 rv_get_mem_latency(hwmgr, 857 smu10_get_mem_latency(hwmgr,
885 pclk_vol_table->entries[i].clk) : 858 pclk_vol_table->entries[i].clk) :
886 0; 859 0;
887 clocks->num_levels++; 860 clocks->num_levels++;
@@ -890,14 +863,14 @@ static int rv_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
890 return 0; 863 return 0;
891} 864}
892 865
893static int rv_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, 866static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
894 enum amd_pp_clock_type type, 867 enum amd_pp_clock_type type,
895 struct pp_clock_levels_with_voltage *clocks) 868 struct pp_clock_levels_with_voltage *clocks)
896{ 869{
897 uint32_t i; 870 uint32_t i;
898 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); 871 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
899 struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info); 872 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
900 struct rv_voltage_dependency_table *pclk_vol_table = NULL; 873 struct smu10_voltage_dependency_table *pclk_vol_table = NULL;
901 874
902 if (pinfo == NULL) 875 if (pinfo == NULL)
903 return -EINVAL; 876 return -EINVAL;
@@ -932,29 +905,28 @@ static int rv_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
932 return 0; 905 return 0;
933} 906}
934 907
935int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr, 908static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
936 struct pp_display_clock_request *clock_req) 909 struct pp_display_clock_request *clock_req)
937{ 910{
938 int result = 0; 911 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
939 struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
940 enum amd_pp_clock_type clk_type = clock_req->clock_type; 912 enum amd_pp_clock_type clk_type = clock_req->clock_type;
941 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; 913 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
942 PPSMC_Msg msg; 914 PPSMC_Msg msg;
943 915
944 switch (clk_type) { 916 switch (clk_type) {
945 case amd_pp_dcf_clock: 917 case amd_pp_dcf_clock:
946 if (clk_freq == rv_data->dcf_actual_hard_min_freq) 918 if (clk_freq == smu10_data->dcf_actual_hard_min_freq)
947 return 0; 919 return 0;
948 msg = PPSMC_MSG_SetHardMinDcefclkByFreq; 920 msg = PPSMC_MSG_SetHardMinDcefclkByFreq;
949 rv_data->dcf_actual_hard_min_freq = clk_freq; 921 smu10_data->dcf_actual_hard_min_freq = clk_freq;
950 break; 922 break;
951 case amd_pp_soc_clock: 923 case amd_pp_soc_clock:
952 msg = PPSMC_MSG_SetHardMinSocclkByFreq; 924 msg = PPSMC_MSG_SetHardMinSocclkByFreq;
953 break; 925 break;
954 case amd_pp_f_clock: 926 case amd_pp_f_clock:
955 if (clk_freq == rv_data->f_actual_hard_min_freq) 927 if (clk_freq == smu10_data->f_actual_hard_min_freq)
956 return 0; 928 return 0;
957 rv_data->f_actual_hard_min_freq = clk_freq; 929 smu10_data->f_actual_hard_min_freq = clk_freq;
958 msg = PPSMC_MSG_SetHardMinFclkByFreq; 930 msg = PPSMC_MSG_SetHardMinFclkByFreq;
959 break; 931 break;
960 default: 932 default:
@@ -962,19 +934,18 @@ int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
962 return -EINVAL; 934 return -EINVAL;
963 } 935 }
964 936
965 result = smum_send_msg_to_smc_with_parameter(hwmgr, msg, 937 smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq);
966 clk_freq);
967 938
968 return result; 939 return 0;
969} 940}
970 941
971static int rv_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks) 942static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
972{ 943{
973 clocks->engine_max_clock = 80000; /* driver can't get engine clock, temp hard code to 800MHz */ 944 clocks->engine_max_clock = 80000; /* driver can't get engine clock, temp hard code to 800MHz */
974 return 0; 945 return 0;
975} 946}
976 947
977static int rv_thermal_get_temperature(struct pp_hwmgr *hwmgr) 948static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
978{ 949{
979 uint32_t reg_offset = soc15_get_register_offset(THM_HWID, 0, 950 uint32_t reg_offset = soc15_get_register_offset(THM_HWID, 0,
980 mmTHM_TCON_CUR_TMP_BASE_IDX, mmTHM_TCON_CUR_TMP); 951 mmTHM_TCON_CUR_TMP_BASE_IDX, mmTHM_TCON_CUR_TMP);
@@ -990,7 +961,7 @@ static int rv_thermal_get_temperature(struct pp_hwmgr *hwmgr)
990 return cur_temp; 961 return cur_temp;
991} 962}
992 963
993static int rv_read_sensor(struct pp_hwmgr *hwmgr, int idx, 964static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
994 void *value, int *size) 965 void *value, int *size)
995{ 966{
996 uint32_t sclk, mclk; 967 uint32_t sclk, mclk;
@@ -998,25 +969,21 @@ static int rv_read_sensor(struct pp_hwmgr *hwmgr, int idx,
998 969
999 switch (idx) { 970 switch (idx) {
1000 case AMDGPU_PP_SENSOR_GFX_SCLK: 971 case AMDGPU_PP_SENSOR_GFX_SCLK:
1001 ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency); 972 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
1002 if (!ret) { 973 sclk = smum_get_argument(hwmgr);
1003 rv_read_arg_from_smc(hwmgr, &sclk);
1004 /* in units of 10KHZ */ 974 /* in units of 10KHZ */
1005 *((uint32_t *)value) = sclk * 100; 975 *((uint32_t *)value) = sclk * 100;
1006 *size = 4; 976 *size = 4;
1007 }
1008 break; 977 break;
1009 case AMDGPU_PP_SENSOR_GFX_MCLK: 978 case AMDGPU_PP_SENSOR_GFX_MCLK:
1010 ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency); 979 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
1011 if (!ret) { 980 mclk = smum_get_argument(hwmgr);
1012 rv_read_arg_from_smc(hwmgr, &mclk);
1013 /* in units of 10KHZ */ 981 /* in units of 10KHZ */
1014 *((uint32_t *)value) = mclk * 100; 982 *((uint32_t *)value) = mclk * 100;
1015 *size = 4; 983 *size = 4;
1016 }
1017 break; 984 break;
1018 case AMDGPU_PP_SENSOR_GPU_TEMP: 985 case AMDGPU_PP_SENSOR_GPU_TEMP:
1019 *((uint32_t *)value) = rv_thermal_get_temperature(hwmgr); 986 *((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr);
1020 break; 987 break;
1021 default: 988 default:
1022 ret = -EINVAL; 989 ret = -EINVAL;
@@ -1026,50 +993,50 @@ static int rv_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1026 return ret; 993 return ret;
1027} 994}
1028 995
1029static int rv_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr) 996static int smu10_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr)
1030{ 997{
1031 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub); 998 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
1032} 999}
1033 1000
1034static const struct pp_hwmgr_func rv_hwmgr_funcs = { 1001static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
1035 .backend_init = rv_hwmgr_backend_init, 1002 .backend_init = smu10_hwmgr_backend_init,
1036 .backend_fini = rv_hwmgr_backend_fini, 1003 .backend_fini = smu10_hwmgr_backend_fini,
1037 .asic_setup = NULL, 1004 .asic_setup = NULL,
1038 .apply_state_adjust_rules = rv_apply_state_adjust_rules, 1005 .apply_state_adjust_rules = smu10_apply_state_adjust_rules,
1039 .force_dpm_level = rv_dpm_force_dpm_level, 1006 .force_dpm_level = smu10_dpm_force_dpm_level,
1040 .get_power_state_size = rv_get_power_state_size, 1007 .get_power_state_size = smu10_get_power_state_size,
1041 .powerdown_uvd = NULL, 1008 .powerdown_uvd = NULL,
1042 .powergate_uvd = NULL, 1009 .powergate_uvd = NULL,
1043 .powergate_vce = NULL, 1010 .powergate_vce = NULL,
1044 .get_mclk = rv_dpm_get_mclk, 1011 .get_mclk = smu10_dpm_get_mclk,
1045 .get_sclk = rv_dpm_get_sclk, 1012 .get_sclk = smu10_dpm_get_sclk,
1046 .patch_boot_state = rv_dpm_patch_boot_state, 1013 .patch_boot_state = smu10_dpm_patch_boot_state,
1047 .get_pp_table_entry = rv_dpm_get_pp_table_entry, 1014 .get_pp_table_entry = smu10_dpm_get_pp_table_entry,
1048 .get_num_of_pp_table_entries = rv_dpm_get_num_of_pp_table_entries, 1015 .get_num_of_pp_table_entries = smu10_dpm_get_num_of_pp_table_entries,
1049 .set_cpu_power_state = rv_set_cpu_power_state, 1016 .set_cpu_power_state = smu10_set_cpu_power_state,
1050 .store_cc6_data = rv_store_cc6_data, 1017 .store_cc6_data = smu10_store_cc6_data,
1051 .force_clock_level = rv_force_clock_level, 1018 .force_clock_level = smu10_force_clock_level,
1052 .print_clock_levels = rv_print_clock_levels, 1019 .print_clock_levels = smu10_print_clock_levels,
1053 .get_dal_power_level = rv_get_dal_power_level, 1020 .get_dal_power_level = smu10_get_dal_power_level,
1054 .get_performance_level = rv_get_performance_level, 1021 .get_performance_level = smu10_get_performance_level,
1055 .get_current_shallow_sleep_clocks = rv_get_current_shallow_sleep_clocks, 1022 .get_current_shallow_sleep_clocks = smu10_get_current_shallow_sleep_clocks,
1056 .get_clock_by_type_with_latency = rv_get_clock_by_type_with_latency, 1023 .get_clock_by_type_with_latency = smu10_get_clock_by_type_with_latency,
1057 .get_clock_by_type_with_voltage = rv_get_clock_by_type_with_voltage, 1024 .get_clock_by_type_with_voltage = smu10_get_clock_by_type_with_voltage,
1058 .get_max_high_clocks = rv_get_max_high_clocks, 1025 .get_max_high_clocks = smu10_get_max_high_clocks,
1059 .read_sensor = rv_read_sensor, 1026 .read_sensor = smu10_read_sensor,
1060 .set_active_display_count = rv_set_active_display_count, 1027 .set_active_display_count = smu10_set_active_display_count,
1061 .set_deep_sleep_dcefclk = rv_set_deep_sleep_dcefclk, 1028 .set_deep_sleep_dcefclk = smu10_set_deep_sleep_dcefclk,
1062 .dynamic_state_management_enable = rv_enable_dpm_tasks, 1029 .dynamic_state_management_enable = smu10_enable_dpm_tasks,
1063 .power_off_asic = rv_power_off_asic, 1030 .power_off_asic = smu10_power_off_asic,
1064 .asic_setup = rv_setup_asic_task, 1031 .asic_setup = smu10_setup_asic_task,
1065 .power_state_set = rv_set_power_state_tasks, 1032 .power_state_set = smu10_set_power_state_tasks,
1066 .dynamic_state_management_disable = rv_disable_dpm_tasks, 1033 .dynamic_state_management_disable = smu10_disable_dpm_tasks,
1067 .set_mmhub_powergating_by_smu = rv_set_mmhub_powergating_by_smu, 1034 .set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu,
1068}; 1035};
1069 1036
1070int rv_init_function_pointers(struct pp_hwmgr *hwmgr) 1037int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
1071{ 1038{
1072 hwmgr->hwmgr_func = &rv_hwmgr_funcs; 1039 hwmgr->hwmgr_func = &smu10_hwmgr_funcs;
1073 hwmgr->pptable_func = &pptable_funcs; 1040 hwmgr->pptable_func = &pptable_funcs;
1074 return 0; 1041 return 0;
1075} 1042}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
index c3bc311dc59f..175c3a592b6c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
@@ -21,17 +21,17 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef RAVEN_HWMGR_H 24#ifndef SMU10_HWMGR_H
25#define RAVEN_HWMGR_H 25#define SMU10_HWMGR_H
26 26
27#include "hwmgr.h" 27#include "hwmgr.h"
28#include "rv_inc.h" 28#include "smu10_inc.h"
29#include "smu10_driver_if.h" 29#include "smu10_driver_if.h"
30#include "rv_ppsmc.h" 30#include "rv_ppsmc.h"
31 31
32 32
33#define RAVEN_MAX_HARDWARE_POWERLEVELS 8 33#define SMU10_MAX_HARDWARE_POWERLEVELS 8
34#define PHMRAVEN_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15 34#define SMU10_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15
35 35
36#define DPMFlags_SCLK_Enabled 0x00000001 36#define DPMFlags_SCLK_Enabled 0x00000001
37#define DPMFlags_UVD_Enabled 0x00000002 37#define DPMFlags_UVD_Enabled 0x00000002
@@ -47,10 +47,10 @@
47 47
48#define SMU_PHYID_SHIFT 8 48#define SMU_PHYID_SHIFT 8
49 49
50#define RAVEN_PCIE_POWERGATING_TARGET_GFX 0 50#define SMU10_PCIE_POWERGATING_TARGET_GFX 0
51#define RAVEN_PCIE_POWERGATING_TARGET_DDI 1 51#define SMU10_PCIE_POWERGATING_TARGET_DDI 1
52#define RAVEN_PCIE_POWERGATING_TARGET_PLLCASCADE 2 52#define SMU10_PCIE_POWERGATING_TARGET_PLLCASCADE 2
53#define RAVEN_PCIE_POWERGATING_TARGET_PHY 3 53#define SMU10_PCIE_POWERGATING_TARGET_PHY 3
54 54
55enum VQ_TYPE { 55enum VQ_TYPE {
56 CLOCK_TYPE_DCLK = 0L, 56 CLOCK_TYPE_DCLK = 0L,
@@ -65,14 +65,14 @@ enum VQ_TYPE {
65#define SUSTAINABLE_CU_MASK 0xff000000 65#define SUSTAINABLE_CU_MASK 0xff000000
66#define SUSTAINABLE_CU_SHIFT 24 66#define SUSTAINABLE_CU_SHIFT 24
67 67
68struct rv_dpm_entry { 68struct smu10_dpm_entry {
69 uint32_t soft_min_clk; 69 uint32_t soft_min_clk;
70 uint32_t hard_min_clk; 70 uint32_t hard_min_clk;
71 uint32_t soft_max_clk; 71 uint32_t soft_max_clk;
72 uint32_t hard_max_clk; 72 uint32_t hard_max_clk;
73}; 73};
74 74
75struct rv_power_level { 75struct smu10_power_level {
76 uint32_t engine_clock; 76 uint32_t engine_clock;
77 uint8_t vddc_index; 77 uint8_t vddc_index;
78 uint8_t ds_divider_index; 78 uint8_t ds_divider_index;
@@ -86,14 +86,14 @@ struct rv_power_level {
86 uint8_t rsv[3]; 86 uint8_t rsv[3];
87}; 87};
88 88
89/*used for the nbpsFlags field in rv_power state*/ 89/*used for the nbpsFlags field in smu10_power state*/
90#define RAVEN_POWERSTATE_FLAGS_NBPS_FORCEHIGH (1<<0) 90#define SMU10_POWERSTATE_FLAGS_NBPS_FORCEHIGH (1<<0)
91#define RAVEN_POWERSTATE_FLAGS_NBPS_LOCKTOHIGH (1<<1) 91#define SMU10_POWERSTATE_FLAGS_NBPS_LOCKTOHIGH (1<<1)
92#define RAVEN_POWERSTATE_FLAGS_NBPS_LOCKTOLOW (1<<2) 92#define SMU10_POWERSTATE_FLAGS_NBPS_LOCKTOLOW (1<<2)
93 93
94#define RAVEN_POWERSTATE_FLAGS_BAPM_DISABLE (1<<0) 94#define SMU10_POWERSTATE_FLAGS_BAPM_DISABLE (1<<0)
95 95
96struct rv_uvd_clocks { 96struct smu10_uvd_clocks {
97 uint32_t vclk; 97 uint32_t vclk;
98 uint32_t dclk; 98 uint32_t dclk;
99 uint32_t vclk_low_divider; 99 uint32_t vclk_low_divider;
@@ -118,16 +118,16 @@ struct pp_disable_nbpslo_flags {
118}; 118};
119 119
120 120
121enum rv_pstate_previous_action { 121enum smu10_pstate_previous_action {
122 DO_NOTHING = 1, 122 DO_NOTHING = 1,
123 FORCE_HIGH, 123 FORCE_HIGH,
124 CANCEL_FORCE_HIGH 124 CANCEL_FORCE_HIGH
125}; 125};
126 126
127struct rv_power_state { 127struct smu10_power_state {
128 unsigned int magic; 128 unsigned int magic;
129 uint32_t level; 129 uint32_t level;
130 struct rv_uvd_clocks uvd_clocks; 130 struct smu10_uvd_clocks uvd_clocks;
131 uint32_t evclk; 131 uint32_t evclk;
132 uint32_t ecclk; 132 uint32_t ecclk;
133 uint32_t samclk; 133 uint32_t samclk;
@@ -141,79 +141,79 @@ struct rv_power_state {
141 uint8_t dpm_x_nbps_low; 141 uint8_t dpm_x_nbps_low;
142 uint8_t dpm_x_nbps_high; 142 uint8_t dpm_x_nbps_high;
143 143
144 enum rv_pstate_previous_action action; 144 enum smu10_pstate_previous_action action;
145 145
146 struct rv_power_level levels[RAVEN_MAX_HARDWARE_POWERLEVELS]; 146 struct smu10_power_level levels[SMU10_MAX_HARDWARE_POWERLEVELS];
147 struct pp_disable_nbpslo_flags nbpslo_flags; 147 struct pp_disable_nbpslo_flags nbpslo_flags;
148}; 148};
149 149
150#define RAVEN_NUM_NBPSTATES 4 150#define SMU10_NUM_NBPSTATES 4
151#define RAVEN_NUM_NBPMEMORYCLOCK 2 151#define SMU10_NUM_NBPMEMORYCLOCK 2
152 152
153 153
154struct rv_display_phy_info_entry { 154struct smu10_display_phy_info_entry {
155 uint8_t phy_present; 155 uint8_t phy_present;
156 uint8_t active_lane_mapping; 156 uint8_t active_lane_mapping;
157 uint8_t display_config_type; 157 uint8_t display_config_type;
158 uint8_t active_num_of_lanes; 158 uint8_t active_num_of_lanes;
159}; 159};
160 160
161#define RAVEN_MAX_DISPLAYPHY_IDS 10 161#define SMU10_MAX_DISPLAYPHY_IDS 10
162 162
163struct rv_display_phy_info { 163struct smu10_display_phy_info {
164 bool display_phy_access_initialized; 164 bool display_phy_access_initialized;
165 struct rv_display_phy_info_entry entries[RAVEN_MAX_DISPLAYPHY_IDS]; 165 struct smu10_display_phy_info_entry entries[SMU10_MAX_DISPLAYPHY_IDS];
166}; 166};
167 167
168#define MAX_DISPLAY_CLOCK_LEVEL 8 168#define MAX_DISPLAY_CLOCK_LEVEL 8
169 169
170struct rv_system_info{ 170struct smu10_system_info{
171 uint8_t htc_tmp_lmt; 171 uint8_t htc_tmp_lmt;
172 uint8_t htc_hyst_lmt; 172 uint8_t htc_hyst_lmt;
173}; 173};
174 174
175#define MAX_REGULAR_DPM_NUMBER 8 175#define MAX_REGULAR_DPM_NUMBER 8
176 176
177struct rv_mclk_latency_entries { 177struct smu10_mclk_latency_entries {
178 uint32_t frequency; 178 uint32_t frequency;
179 uint32_t latency; 179 uint32_t latency;
180}; 180};
181 181
182struct rv_mclk_latency_table { 182struct smu10_mclk_latency_table {
183 uint32_t count; 183 uint32_t count;
184 struct rv_mclk_latency_entries entries[MAX_REGULAR_DPM_NUMBER]; 184 struct smu10_mclk_latency_entries entries[MAX_REGULAR_DPM_NUMBER];
185}; 185};
186 186
187struct rv_clock_voltage_dependency_record { 187struct smu10_clock_voltage_dependency_record {
188 uint32_t clk; 188 uint32_t clk;
189 uint32_t vol; 189 uint32_t vol;
190}; 190};
191 191
192 192
193struct rv_voltage_dependency_table { 193struct smu10_voltage_dependency_table {
194 uint32_t count; 194 uint32_t count;
195 struct rv_clock_voltage_dependency_record entries[1]; 195 struct smu10_clock_voltage_dependency_record entries[1];
196}; 196};
197 197
198struct rv_clock_voltage_information { 198struct smu10_clock_voltage_information {
199 struct rv_voltage_dependency_table *vdd_dep_on_dcefclk; 199 struct smu10_voltage_dependency_table *vdd_dep_on_dcefclk;
200 struct rv_voltage_dependency_table *vdd_dep_on_socclk; 200 struct smu10_voltage_dependency_table *vdd_dep_on_socclk;
201 struct rv_voltage_dependency_table *vdd_dep_on_fclk; 201 struct smu10_voltage_dependency_table *vdd_dep_on_fclk;
202 struct rv_voltage_dependency_table *vdd_dep_on_mclk; 202 struct smu10_voltage_dependency_table *vdd_dep_on_mclk;
203 struct rv_voltage_dependency_table *vdd_dep_on_dispclk; 203 struct smu10_voltage_dependency_table *vdd_dep_on_dispclk;
204 struct rv_voltage_dependency_table *vdd_dep_on_dppclk; 204 struct smu10_voltage_dependency_table *vdd_dep_on_dppclk;
205 struct rv_voltage_dependency_table *vdd_dep_on_phyclk; 205 struct smu10_voltage_dependency_table *vdd_dep_on_phyclk;
206}; 206};
207 207
208struct rv_hwmgr { 208struct smu10_hwmgr {
209 uint32_t disable_driver_thermal_policy; 209 uint32_t disable_driver_thermal_policy;
210 uint32_t thermal_auto_throttling_treshold; 210 uint32_t thermal_auto_throttling_treshold;
211 struct rv_system_info sys_info; 211 struct smu10_system_info sys_info;
212 struct rv_mclk_latency_table mclk_latency_table; 212 struct smu10_mclk_latency_table mclk_latency_table;
213 213
214 uint32_t ddi_power_gating_disabled; 214 uint32_t ddi_power_gating_disabled;
215 215
216 struct rv_display_phy_info_entry display_phy_info; 216 struct smu10_display_phy_info_entry display_phy_info;
217 uint32_t dce_slow_sclk_threshold; 217 uint32_t dce_slow_sclk_threshold;
218 218
219 bool disp_clk_bypass; 219 bool disp_clk_bypass;
@@ -255,10 +255,10 @@ struct rv_hwmgr {
255 uint32_t fps_low_threshold; 255 uint32_t fps_low_threshold;
256 256
257 uint32_t dpm_flags; 257 uint32_t dpm_flags;
258 struct rv_dpm_entry sclk_dpm; 258 struct smu10_dpm_entry sclk_dpm;
259 struct rv_dpm_entry uvd_dpm; 259 struct smu10_dpm_entry uvd_dpm;
260 struct rv_dpm_entry vce_dpm; 260 struct smu10_dpm_entry vce_dpm;
261 struct rv_dpm_entry acp_dpm; 261 struct smu10_dpm_entry acp_dpm;
262 bool acp_power_up_no_dsp; 262 bool acp_power_up_no_dsp;
263 263
264 uint32_t max_sclk_level; 264 uint32_t max_sclk_level;
@@ -291,7 +291,7 @@ struct rv_hwmgr {
291 291
292 bool gfx_off_controled_by_driver; 292 bool gfx_off_controled_by_driver;
293 Watermarks_t water_marks_table; 293 Watermarks_t water_marks_table;
294 struct rv_clock_voltage_information clock_vol_info; 294 struct smu10_clock_voltage_information clock_vol_info;
295 DpmClocks_t clock_table; 295 DpmClocks_t clock_table;
296 296
297 uint32_t active_process_mask; 297 uint32_t active_process_mask;
@@ -302,21 +302,21 @@ struct rv_hwmgr {
302 302
303struct pp_hwmgr; 303struct pp_hwmgr;
304 304
305int rv_init_function_pointers(struct pp_hwmgr *hwmgr); 305int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
306 306
307/* UMD PState Raven Msg Parameters in MHz */ 307/* UMD PState SMU10 Msg Parameters in MHz */
308#define RAVEN_UMD_PSTATE_GFXCLK 700 308#define SMU10_UMD_PSTATE_GFXCLK 700
309#define RAVEN_UMD_PSTATE_SOCCLK 626 309#define SMU10_UMD_PSTATE_SOCCLK 626
310#define RAVEN_UMD_PSTATE_FCLK 933 310#define SMU10_UMD_PSTATE_FCLK 933
311#define RAVEN_UMD_PSTATE_VCE 0x03C00320 311#define SMU10_UMD_PSTATE_VCE 0x03C00320
312 312
313#define RAVEN_UMD_PSTATE_PEAK_GFXCLK 1100 313#define SMU10_UMD_PSTATE_PEAK_GFXCLK 1100
314#define RAVEN_UMD_PSTATE_PEAK_SOCCLK 757 314#define SMU10_UMD_PSTATE_PEAK_SOCCLK 757
315#define RAVEN_UMD_PSTATE_PEAK_FCLK 1200 315#define SMU10_UMD_PSTATE_PEAK_FCLK 1200
316 316
317#define RAVEN_UMD_PSTATE_MIN_GFXCLK 200 317#define SMU10_UMD_PSTATE_MIN_GFXCLK 200
318#define RAVEN_UMD_PSTATE_MIN_FCLK 400 318#define SMU10_UMD_PSTATE_MIN_FCLK 400
319#define RAVEN_UMD_PSTATE_MIN_SOCCLK 200 319#define SMU10_UMD_PSTATE_MIN_SOCCLK 200
320#define RAVEN_UMD_PSTATE_MIN_VCE 0x0190012C 320#define SMU10_UMD_PSTATE_MIN_VCE 0x0190012C
321 321
322#endif 322#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_inc.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_inc.h
index ae59a3fdea8a..edb68e302f6f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_inc.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_inc.h
@@ -21,8 +21,8 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef RAVEN_INC_H 24#ifndef SMU10_INC_H
25#define RAVEN_INC_H 25#define SMU10_INC_H
26 26
27 27
28#include "asic_reg/mp/mp_10_0_default.h" 28#include "asic_reg/mp/mp_10_0_default.h"
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
index 7b54d48b2ce2..1ddce023218a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
@@ -25,7 +25,6 @@
25#define _SMU7_CLOCK_POWER_GATING_H_ 25#define _SMU7_CLOCK_POWER_GATING_H_
26 26
27#include "smu7_hwmgr.h" 27#include "smu7_hwmgr.h"
28#include "pp_asicblocks.h"
29 28
30void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); 29void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
31void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); 30void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index df2a312ca6c9..7a87209f7258 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -40,7 +40,6 @@
40 40
41#include "hwmgr.h" 41#include "hwmgr.h"
42#include "smu7_hwmgr.h" 42#include "smu7_hwmgr.h"
43#include "smu7_smumgr.h"
44#include "smu_ucode_xfer_vi.h" 43#include "smu_ucode_xfer_vi.h"
45#include "smu7_powertune.h" 44#include "smu7_powertune.h"
46#include "smu7_dyn_defaults.h" 45#include "smu7_dyn_defaults.h"
@@ -1353,12 +1352,7 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1353 1352
1354static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable) 1353static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
1355{ 1354{
1356 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); 1355 if (!hwmgr->avfs_supported)
1357
1358 if (smu_data == NULL)
1359 return -EINVAL;
1360
1361 if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
1362 return 0; 1356 return 0;
1363 1357
1364 if (enable) { 1358 if (enable) {
@@ -1382,13 +1376,9 @@ static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
1382 1376
1383static int smu7_update_avfs(struct pp_hwmgr *hwmgr) 1377static int smu7_update_avfs(struct pp_hwmgr *hwmgr)
1384{ 1378{
1385 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
1386 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1379 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1387 1380
1388 if (smu_data == NULL) 1381 if (!hwmgr->avfs_supported)
1389 return -EINVAL;
1390
1391 if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
1392 return 0; 1382 return 0;
1393 1383
1394 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { 1384 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index 8c1f884ae555..75a465f771f0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -35,9 +35,8 @@
35#include "hwmgr.h" 35#include "hwmgr.h"
36#include "hardwaremanager.h" 36#include "hardwaremanager.h"
37#include "cz_ppsmc.h" 37#include "cz_ppsmc.h"
38#include "cz_hwmgr.h" 38#include "smu8_hwmgr.h"
39#include "power_state.h" 39#include "power_state.h"
40#include "cz_clockpowergating.h"
41#include "pp_thermal.h" 40#include "pp_thermal.h"
42 41
43#define ixSMUSVI_NB_CURRENTVID 0xD8230044 42#define ixSMUSVI_NB_CURRENTVID 0xD8230044
@@ -47,26 +46,26 @@
47#define CURRENT_GFX_VID_MASK 0xff000000 46#define CURRENT_GFX_VID_MASK 0xff000000
48#define CURRENT_GFX_VID__SHIFT 24 47#define CURRENT_GFX_VID__SHIFT 24
49 48
50static const unsigned long PhwCz_Magic = (unsigned long) PHM_Cz_Magic; 49static const unsigned long smu8_magic = (unsigned long) PHM_Cz_Magic;
51 50
52static struct cz_power_state *cast_PhwCzPowerState(struct pp_hw_power_state *hw_ps) 51static struct smu8_power_state *cast_smu8_power_state(struct pp_hw_power_state *hw_ps)
53{ 52{
54 if (PhwCz_Magic != hw_ps->magic) 53 if (smu8_magic != hw_ps->magic)
55 return NULL; 54 return NULL;
56 55
57 return (struct cz_power_state *)hw_ps; 56 return (struct smu8_power_state *)hw_ps;
58} 57}
59 58
60static const struct cz_power_state *cast_const_PhwCzPowerState( 59static const struct smu8_power_state *cast_const_smu8_power_state(
61 const struct pp_hw_power_state *hw_ps) 60 const struct pp_hw_power_state *hw_ps)
62{ 61{
63 if (PhwCz_Magic != hw_ps->magic) 62 if (smu8_magic != hw_ps->magic)
64 return NULL; 63 return NULL;
65 64
66 return (struct cz_power_state *)hw_ps; 65 return (struct smu8_power_state *)hw_ps;
67} 66}
68 67
69static uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr, 68static uint32_t smu8_get_eclk_level(struct pp_hwmgr *hwmgr,
70 uint32_t clock, uint32_t msg) 69 uint32_t clock, uint32_t msg)
71{ 70{
72 int i = 0; 71 int i = 0;
@@ -97,7 +96,7 @@ static uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr,
97 return i; 96 return i;
98} 97}
99 98
100static uint32_t cz_get_sclk_level(struct pp_hwmgr *hwmgr, 99static uint32_t smu8_get_sclk_level(struct pp_hwmgr *hwmgr,
101 uint32_t clock, uint32_t msg) 100 uint32_t clock, uint32_t msg)
102{ 101{
103 int i = 0; 102 int i = 0;
@@ -127,7 +126,7 @@ static uint32_t cz_get_sclk_level(struct pp_hwmgr *hwmgr,
127 return i; 126 return i;
128} 127}
129 128
130static uint32_t cz_get_uvd_level(struct pp_hwmgr *hwmgr, 129static uint32_t smu8_get_uvd_level(struct pp_hwmgr *hwmgr,
131 uint32_t clock, uint32_t msg) 130 uint32_t clock, uint32_t msg)
132{ 131{
133 int i = 0; 132 int i = 0;
@@ -158,42 +157,42 @@ static uint32_t cz_get_uvd_level(struct pp_hwmgr *hwmgr,
158 return i; 157 return i;
159} 158}
160 159
161static uint32_t cz_get_max_sclk_level(struct pp_hwmgr *hwmgr) 160static uint32_t smu8_get_max_sclk_level(struct pp_hwmgr *hwmgr)
162{ 161{
163 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 162 struct smu8_hwmgr *data = hwmgr->backend;
164 163
165 if (cz_hwmgr->max_sclk_level == 0) { 164 if (data->max_sclk_level == 0) {
166 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel); 165 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel);
167 cz_hwmgr->max_sclk_level = smum_get_argument(hwmgr) + 1; 166 data->max_sclk_level = smum_get_argument(hwmgr) + 1;
168 } 167 }
169 168
170 return cz_hwmgr->max_sclk_level; 169 return data->max_sclk_level;
171} 170}
172 171
173static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) 172static int smu8_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
174{ 173{
175 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 174 struct smu8_hwmgr *data = hwmgr->backend;
176 struct amdgpu_device *adev = hwmgr->adev; 175 struct amdgpu_device *adev = hwmgr->adev;
177 176
178 cz_hwmgr->gfx_ramp_step = 256*25/100; 177 data->gfx_ramp_step = 256*25/100;
179 cz_hwmgr->gfx_ramp_delay = 1; /* by default, we delay 1us */ 178 data->gfx_ramp_delay = 1; /* by default, we delay 1us */
180 179
181 cz_hwmgr->mgcg_cgtt_local0 = 0x00000000; 180 data->mgcg_cgtt_local0 = 0x00000000;
182 cz_hwmgr->mgcg_cgtt_local1 = 0x00000000; 181 data->mgcg_cgtt_local1 = 0x00000000;
183 cz_hwmgr->clock_slow_down_freq = 25000; 182 data->clock_slow_down_freq = 25000;
184 cz_hwmgr->skip_clock_slow_down = 1; 183 data->skip_clock_slow_down = 1;
185 cz_hwmgr->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */ 184 data->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */
186 cz_hwmgr->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */ 185 data->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */
187 cz_hwmgr->voting_rights_clients = 0x00C00033; 186 data->voting_rights_clients = 0x00C00033;
188 cz_hwmgr->static_screen_threshold = 8; 187 data->static_screen_threshold = 8;
189 cz_hwmgr->ddi_power_gating_disabled = 0; 188 data->ddi_power_gating_disabled = 0;
190 cz_hwmgr->bapm_enabled = 1; 189 data->bapm_enabled = 1;
191 cz_hwmgr->voltage_drop_threshold = 0; 190 data->voltage_drop_threshold = 0;
192 cz_hwmgr->gfx_power_gating_threshold = 500; 191 data->gfx_power_gating_threshold = 500;
193 cz_hwmgr->vce_slow_sclk_threshold = 20000; 192 data->vce_slow_sclk_threshold = 20000;
194 cz_hwmgr->dce_slow_sclk_threshold = 30000; 193 data->dce_slow_sclk_threshold = 30000;
195 cz_hwmgr->disable_driver_thermal_policy = 1; 194 data->disable_driver_thermal_policy = 1;
196 cz_hwmgr->disable_nb_ps3_in_battery = 0; 195 data->disable_nb_ps3_in_battery = 0;
197 196
198 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 197 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
199 PHM_PlatformCaps_ABM); 198 PHM_PlatformCaps_ABM);
@@ -204,14 +203,14 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
204 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 203 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
205 PHM_PlatformCaps_DynamicM3Arbiter); 204 PHM_PlatformCaps_DynamicM3Arbiter);
206 205
207 cz_hwmgr->override_dynamic_mgpg = 1; 206 data->override_dynamic_mgpg = 1;
208 207
209 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 208 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
210 PHM_PlatformCaps_DynamicPatchPowerState); 209 PHM_PlatformCaps_DynamicPatchPowerState);
211 210
212 cz_hwmgr->thermal_auto_throttling_treshold = 0; 211 data->thermal_auto_throttling_treshold = 0;
213 cz_hwmgr->tdr_clock = 0; 212 data->tdr_clock = 0;
214 cz_hwmgr->disable_gfx_power_gating_in_uvd = 0; 213 data->disable_gfx_power_gating_in_uvd = 0;
215 214
216 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 215 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
217 PHM_PlatformCaps_DynamicUVDState); 216 PHM_PlatformCaps_DynamicUVDState);
@@ -221,10 +220,10 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
221 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 220 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
222 PHM_PlatformCaps_VCEDPM); 221 PHM_PlatformCaps_VCEDPM);
223 222
224 cz_hwmgr->cc6_settings.cpu_cc6_disable = false; 223 data->cc6_settings.cpu_cc6_disable = false;
225 cz_hwmgr->cc6_settings.cpu_pstate_disable = false; 224 data->cc6_settings.cpu_pstate_disable = false;
226 cz_hwmgr->cc6_settings.nb_pstate_switch_disable = false; 225 data->cc6_settings.nb_pstate_switch_disable = false;
227 cz_hwmgr->cc6_settings.cpu_pstate_separation_time = 0; 226 data->cc6_settings.cpu_pstate_separation_time = 0;
228 227
229 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 228 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
230 PHM_PlatformCaps_DisableVoltageIsland); 229 PHM_PlatformCaps_DisableVoltageIsland);
@@ -245,30 +244,30 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
245 return 0; 244 return 0;
246} 245}
247 246
248static uint32_t cz_convert_8Bit_index_to_voltage( 247static uint32_t smu8_convert_8Bit_index_to_voltage(
249 struct pp_hwmgr *hwmgr, uint16_t voltage) 248 struct pp_hwmgr *hwmgr, uint16_t voltage)
250{ 249{
251 return 6200 - (voltage * 25); 250 return 6200 - (voltage * 25);
252} 251}
253 252
254static int cz_construct_max_power_limits_table(struct pp_hwmgr *hwmgr, 253static int smu8_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
255 struct phm_clock_and_voltage_limits *table) 254 struct phm_clock_and_voltage_limits *table)
256{ 255{
257 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)hwmgr->backend; 256 struct smu8_hwmgr *data = hwmgr->backend;
258 struct cz_sys_info *sys_info = &cz_hwmgr->sys_info; 257 struct smu8_sys_info *sys_info = &data->sys_info;
259 struct phm_clock_voltage_dependency_table *dep_table = 258 struct phm_clock_voltage_dependency_table *dep_table =
260 hwmgr->dyn_state.vddc_dependency_on_sclk; 259 hwmgr->dyn_state.vddc_dependency_on_sclk;
261 260
262 if (dep_table->count > 0) { 261 if (dep_table->count > 0) {
263 table->sclk = dep_table->entries[dep_table->count-1].clk; 262 table->sclk = dep_table->entries[dep_table->count-1].clk;
264 table->vddc = cz_convert_8Bit_index_to_voltage(hwmgr, 263 table->vddc = smu8_convert_8Bit_index_to_voltage(hwmgr,
265 (uint16_t)dep_table->entries[dep_table->count-1].v); 264 (uint16_t)dep_table->entries[dep_table->count-1].v);
266 } 265 }
267 table->mclk = sys_info->nbp_memory_clock[0]; 266 table->mclk = sys_info->nbp_memory_clock[0];
268 return 0; 267 return 0;
269} 268}
270 269
271static int cz_init_dynamic_state_adjustment_rule_settings( 270static int smu8_init_dynamic_state_adjustment_rule_settings(
272 struct pp_hwmgr *hwmgr, 271 struct pp_hwmgr *hwmgr,
273 ATOM_CLK_VOLT_CAPABILITY *disp_voltage_table) 272 ATOM_CLK_VOLT_CAPABILITY *disp_voltage_table)
274{ 273{
@@ -306,9 +305,9 @@ static int cz_init_dynamic_state_adjustment_rule_settings(
306 return 0; 305 return 0;
307} 306}
308 307
309static int cz_get_system_info_data(struct pp_hwmgr *hwmgr) 308static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr)
310{ 309{
311 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)hwmgr->backend; 310 struct smu8_hwmgr *data = hwmgr->backend;
312 ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info = NULL; 311 ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info = NULL;
313 uint32_t i; 312 uint32_t i;
314 int result = 0; 313 int result = 0;
@@ -330,67 +329,67 @@ static int cz_get_system_info_data(struct pp_hwmgr *hwmgr)
330 return -EINVAL; 329 return -EINVAL;
331 } 330 }
332 331
333 cz_hwmgr->sys_info.bootup_uma_clock = 332 data->sys_info.bootup_uma_clock =
334 le32_to_cpu(info->ulBootUpUMAClock); 333 le32_to_cpu(info->ulBootUpUMAClock);
335 334
336 cz_hwmgr->sys_info.bootup_engine_clock = 335 data->sys_info.bootup_engine_clock =
337 le32_to_cpu(info->ulBootUpEngineClock); 336 le32_to_cpu(info->ulBootUpEngineClock);
338 337
339 cz_hwmgr->sys_info.dentist_vco_freq = 338 data->sys_info.dentist_vco_freq =
340 le32_to_cpu(info->ulDentistVCOFreq); 339 le32_to_cpu(info->ulDentistVCOFreq);
341 340
342 cz_hwmgr->sys_info.system_config = 341 data->sys_info.system_config =
343 le32_to_cpu(info->ulSystemConfig); 342 le32_to_cpu(info->ulSystemConfig);
344 343
345 cz_hwmgr->sys_info.bootup_nb_voltage_index = 344 data->sys_info.bootup_nb_voltage_index =
346 le16_to_cpu(info->usBootUpNBVoltage); 345 le16_to_cpu(info->usBootUpNBVoltage);
347 346
348 cz_hwmgr->sys_info.htc_hyst_lmt = 347 data->sys_info.htc_hyst_lmt =
349 (info->ucHtcHystLmt == 0) ? 5 : info->ucHtcHystLmt; 348 (info->ucHtcHystLmt == 0) ? 5 : info->ucHtcHystLmt;
350 349
351 cz_hwmgr->sys_info.htc_tmp_lmt = 350 data->sys_info.htc_tmp_lmt =
352 (info->ucHtcTmpLmt == 0) ? 203 : info->ucHtcTmpLmt; 351 (info->ucHtcTmpLmt == 0) ? 203 : info->ucHtcTmpLmt;
353 352
354 if (cz_hwmgr->sys_info.htc_tmp_lmt <= 353 if (data->sys_info.htc_tmp_lmt <=
355 cz_hwmgr->sys_info.htc_hyst_lmt) { 354 data->sys_info.htc_hyst_lmt) {
356 pr_err("The htcTmpLmt should be larger than htcHystLmt.\n"); 355 pr_err("The htcTmpLmt should be larger than htcHystLmt.\n");
357 return -EINVAL; 356 return -EINVAL;
358 } 357 }
359 358
360 cz_hwmgr->sys_info.nb_dpm_enable = 359 data->sys_info.nb_dpm_enable =
361 cz_hwmgr->enable_nb_ps_policy && 360 data->enable_nb_ps_policy &&
362 (le32_to_cpu(info->ulSystemConfig) >> 3 & 0x1); 361 (le32_to_cpu(info->ulSystemConfig) >> 3 & 0x1);
363 362
364 for (i = 0; i < CZ_NUM_NBPSTATES; i++) { 363 for (i = 0; i < SMU8_NUM_NBPSTATES; i++) {
365 if (i < CZ_NUM_NBPMEMORYCLOCK) { 364 if (i < SMU8_NUM_NBPMEMORYCLOCK) {
366 cz_hwmgr->sys_info.nbp_memory_clock[i] = 365 data->sys_info.nbp_memory_clock[i] =
367 le32_to_cpu(info->ulNbpStateMemclkFreq[i]); 366 le32_to_cpu(info->ulNbpStateMemclkFreq[i]);
368 } 367 }
369 cz_hwmgr->sys_info.nbp_n_clock[i] = 368 data->sys_info.nbp_n_clock[i] =
370 le32_to_cpu(info->ulNbpStateNClkFreq[i]); 369 le32_to_cpu(info->ulNbpStateNClkFreq[i]);
371 } 370 }
372 371
373 for (i = 0; i < MAX_DISPLAY_CLOCK_LEVEL; i++) { 372 for (i = 0; i < MAX_DISPLAY_CLOCK_LEVEL; i++) {
374 cz_hwmgr->sys_info.display_clock[i] = 373 data->sys_info.display_clock[i] =
375 le32_to_cpu(info->sDispClkVoltageMapping[i].ulMaximumSupportedCLK); 374 le32_to_cpu(info->sDispClkVoltageMapping[i].ulMaximumSupportedCLK);
376 } 375 }
377 376
378 /* Here use 4 levels, make sure not exceed */ 377 /* Here use 4 levels, make sure not exceed */
379 for (i = 0; i < CZ_NUM_NBPSTATES; i++) { 378 for (i = 0; i < SMU8_NUM_NBPSTATES; i++) {
380 cz_hwmgr->sys_info.nbp_voltage_index[i] = 379 data->sys_info.nbp_voltage_index[i] =
381 le16_to_cpu(info->usNBPStateVoltage[i]); 380 le16_to_cpu(info->usNBPStateVoltage[i]);
382 } 381 }
383 382
384 if (!cz_hwmgr->sys_info.nb_dpm_enable) { 383 if (!data->sys_info.nb_dpm_enable) {
385 for (i = 1; i < CZ_NUM_NBPSTATES; i++) { 384 for (i = 1; i < SMU8_NUM_NBPSTATES; i++) {
386 if (i < CZ_NUM_NBPMEMORYCLOCK) { 385 if (i < SMU8_NUM_NBPMEMORYCLOCK) {
387 cz_hwmgr->sys_info.nbp_memory_clock[i] = 386 data->sys_info.nbp_memory_clock[i] =
388 cz_hwmgr->sys_info.nbp_memory_clock[0]; 387 data->sys_info.nbp_memory_clock[0];
389 } 388 }
390 cz_hwmgr->sys_info.nbp_n_clock[i] = 389 data->sys_info.nbp_n_clock[i] =
391 cz_hwmgr->sys_info.nbp_n_clock[0]; 390 data->sys_info.nbp_n_clock[0];
392 cz_hwmgr->sys_info.nbp_voltage_index[i] = 391 data->sys_info.nbp_voltage_index[i] =
393 cz_hwmgr->sys_info.nbp_voltage_index[0]; 392 data->sys_info.nbp_voltage_index[0];
394 } 393 }
395 } 394 }
396 395
@@ -400,40 +399,40 @@ static int cz_get_system_info_data(struct pp_hwmgr *hwmgr)
400 PHM_PlatformCaps_EnableDFSBypass); 399 PHM_PlatformCaps_EnableDFSBypass);
401 } 400 }
402 401
403 cz_hwmgr->sys_info.uma_channel_number = info->ucUMAChannelNumber; 402 data->sys_info.uma_channel_number = info->ucUMAChannelNumber;
404 403
405 cz_construct_max_power_limits_table (hwmgr, 404 smu8_construct_max_power_limits_table (hwmgr,
406 &hwmgr->dyn_state.max_clock_voltage_on_ac); 405 &hwmgr->dyn_state.max_clock_voltage_on_ac);
407 406
408 cz_init_dynamic_state_adjustment_rule_settings(hwmgr, 407 smu8_init_dynamic_state_adjustment_rule_settings(hwmgr,
409 &info->sDISPCLK_Voltage[0]); 408 &info->sDISPCLK_Voltage[0]);
410 409
411 return result; 410 return result;
412} 411}
413 412
414static int cz_construct_boot_state(struct pp_hwmgr *hwmgr) 413static int smu8_construct_boot_state(struct pp_hwmgr *hwmgr)
415{ 414{
416 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 415 struct smu8_hwmgr *data = hwmgr->backend;
417 416
418 cz_hwmgr->boot_power_level.engineClock = 417 data->boot_power_level.engineClock =
419 cz_hwmgr->sys_info.bootup_engine_clock; 418 data->sys_info.bootup_engine_clock;
420 419
421 cz_hwmgr->boot_power_level.vddcIndex = 420 data->boot_power_level.vddcIndex =
422 (uint8_t)cz_hwmgr->sys_info.bootup_nb_voltage_index; 421 (uint8_t)data->sys_info.bootup_nb_voltage_index;
423 422
424 cz_hwmgr->boot_power_level.dsDividerIndex = 0; 423 data->boot_power_level.dsDividerIndex = 0;
425 cz_hwmgr->boot_power_level.ssDividerIndex = 0; 424 data->boot_power_level.ssDividerIndex = 0;
426 cz_hwmgr->boot_power_level.allowGnbSlow = 1; 425 data->boot_power_level.allowGnbSlow = 1;
427 cz_hwmgr->boot_power_level.forceNBPstate = 0; 426 data->boot_power_level.forceNBPstate = 0;
428 cz_hwmgr->boot_power_level.hysteresis_up = 0; 427 data->boot_power_level.hysteresis_up = 0;
429 cz_hwmgr->boot_power_level.numSIMDToPowerDown = 0; 428 data->boot_power_level.numSIMDToPowerDown = 0;
430 cz_hwmgr->boot_power_level.display_wm = 0; 429 data->boot_power_level.display_wm = 0;
431 cz_hwmgr->boot_power_level.vce_wm = 0; 430 data->boot_power_level.vce_wm = 0;
432 431
433 return 0; 432 return 0;
434} 433}
435 434
436static int cz_upload_pptable_to_smu(struct pp_hwmgr *hwmgr) 435static int smu8_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
437{ 436{
438 struct SMU8_Fusion_ClkTable *clock_table; 437 struct SMU8_Fusion_ClkTable *clock_table;
439 int ret; 438 int ret;
@@ -463,18 +462,18 @@ static int cz_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
463 clock_table = (struct SMU8_Fusion_ClkTable *)table; 462 clock_table = (struct SMU8_Fusion_ClkTable *)table;
464 463
465 /* patch clock table */ 464 /* patch clock table */
466 PP_ASSERT_WITH_CODE((vddc_table->count <= CZ_MAX_HARDWARE_POWERLEVELS), 465 PP_ASSERT_WITH_CODE((vddc_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
467 "Dependency table entry exceeds max limit!", return -EINVAL;); 466 "Dependency table entry exceeds max limit!", return -EINVAL;);
468 PP_ASSERT_WITH_CODE((vdd_gfx_table->count <= CZ_MAX_HARDWARE_POWERLEVELS), 467 PP_ASSERT_WITH_CODE((vdd_gfx_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
469 "Dependency table entry exceeds max limit!", return -EINVAL;); 468 "Dependency table entry exceeds max limit!", return -EINVAL;);
470 PP_ASSERT_WITH_CODE((acp_table->count <= CZ_MAX_HARDWARE_POWERLEVELS), 469 PP_ASSERT_WITH_CODE((acp_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
471 "Dependency table entry exceeds max limit!", return -EINVAL;); 470 "Dependency table entry exceeds max limit!", return -EINVAL;);
472 PP_ASSERT_WITH_CODE((uvd_table->count <= CZ_MAX_HARDWARE_POWERLEVELS), 471 PP_ASSERT_WITH_CODE((uvd_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
473 "Dependency table entry exceeds max limit!", return -EINVAL;); 472 "Dependency table entry exceeds max limit!", return -EINVAL;);
474 PP_ASSERT_WITH_CODE((vce_table->count <= CZ_MAX_HARDWARE_POWERLEVELS), 473 PP_ASSERT_WITH_CODE((vce_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
475 "Dependency table entry exceeds max limit!", return -EINVAL;); 474 "Dependency table entry exceeds max limit!", return -EINVAL;);
476 475
477 for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) { 476 for (i = 0; i < SMU8_MAX_HARDWARE_POWERLEVELS; i++) {
478 477
479 /* vddc_sclk */ 478 /* vddc_sclk */
480 clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid = 479 clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid =
@@ -552,9 +551,9 @@ static int cz_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
552 return ret; 551 return ret;
553} 552}
554 553
555static int cz_init_sclk_limit(struct pp_hwmgr *hwmgr) 554static int smu8_init_sclk_limit(struct pp_hwmgr *hwmgr)
556{ 555{
557 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 556 struct smu8_hwmgr *data = hwmgr->backend;
558 struct phm_clock_voltage_dependency_table *table = 557 struct phm_clock_voltage_dependency_table *table =
559 hwmgr->dyn_state.vddc_dependency_on_sclk; 558 hwmgr->dyn_state.vddc_dependency_on_sclk;
560 unsigned long clock = 0, level; 559 unsigned long clock = 0, level;
@@ -562,25 +561,25 @@ static int cz_init_sclk_limit(struct pp_hwmgr *hwmgr)
562 if (NULL == table || table->count <= 0) 561 if (NULL == table || table->count <= 0)
563 return -EINVAL; 562 return -EINVAL;
564 563
565 cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk; 564 data->sclk_dpm.soft_min_clk = table->entries[0].clk;
566 cz_hwmgr->sclk_dpm.hard_min_clk = table->entries[0].clk; 565 data->sclk_dpm.hard_min_clk = table->entries[0].clk;
567 566
568 level = cz_get_max_sclk_level(hwmgr) - 1; 567 level = smu8_get_max_sclk_level(hwmgr) - 1;
569 568
570 if (level < table->count) 569 if (level < table->count)
571 clock = table->entries[level].clk; 570 clock = table->entries[level].clk;
572 else 571 else
573 clock = table->entries[table->count - 1].clk; 572 clock = table->entries[table->count - 1].clk;
574 573
575 cz_hwmgr->sclk_dpm.soft_max_clk = clock; 574 data->sclk_dpm.soft_max_clk = clock;
576 cz_hwmgr->sclk_dpm.hard_max_clk = clock; 575 data->sclk_dpm.hard_max_clk = clock;
577 576
578 return 0; 577 return 0;
579} 578}
580 579
581static int cz_init_uvd_limit(struct pp_hwmgr *hwmgr) 580static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
582{ 581{
583 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 582 struct smu8_hwmgr *data = hwmgr->backend;
584 struct phm_uvd_clock_voltage_dependency_table *table = 583 struct phm_uvd_clock_voltage_dependency_table *table =
585 hwmgr->dyn_state.uvd_clock_voltage_dependency_table; 584 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
586 unsigned long clock = 0, level; 585 unsigned long clock = 0, level;
@@ -588,8 +587,8 @@ static int cz_init_uvd_limit(struct pp_hwmgr *hwmgr)
588 if (NULL == table || table->count <= 0) 587 if (NULL == table || table->count <= 0)
589 return -EINVAL; 588 return -EINVAL;
590 589
591 cz_hwmgr->uvd_dpm.soft_min_clk = 0; 590 data->uvd_dpm.soft_min_clk = 0;
592 cz_hwmgr->uvd_dpm.hard_min_clk = 0; 591 data->uvd_dpm.hard_min_clk = 0;
593 592
594 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel); 593 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel);
595 level = smum_get_argument(hwmgr); 594 level = smum_get_argument(hwmgr);
@@ -599,15 +598,15 @@ static int cz_init_uvd_limit(struct pp_hwmgr *hwmgr)
599 else 598 else
600 clock = table->entries[table->count - 1].vclk; 599 clock = table->entries[table->count - 1].vclk;
601 600
602 cz_hwmgr->uvd_dpm.soft_max_clk = clock; 601 data->uvd_dpm.soft_max_clk = clock;
603 cz_hwmgr->uvd_dpm.hard_max_clk = clock; 602 data->uvd_dpm.hard_max_clk = clock;
604 603
605 return 0; 604 return 0;
606} 605}
607 606
608static int cz_init_vce_limit(struct pp_hwmgr *hwmgr) 607static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
609{ 608{
610 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 609 struct smu8_hwmgr *data = hwmgr->backend;
611 struct phm_vce_clock_voltage_dependency_table *table = 610 struct phm_vce_clock_voltage_dependency_table *table =
612 hwmgr->dyn_state.vce_clock_voltage_dependency_table; 611 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
613 unsigned long clock = 0, level; 612 unsigned long clock = 0, level;
@@ -615,8 +614,8 @@ static int cz_init_vce_limit(struct pp_hwmgr *hwmgr)
615 if (NULL == table || table->count <= 0) 614 if (NULL == table || table->count <= 0)
616 return -EINVAL; 615 return -EINVAL;
617 616
618 cz_hwmgr->vce_dpm.soft_min_clk = 0; 617 data->vce_dpm.soft_min_clk = 0;
619 cz_hwmgr->vce_dpm.hard_min_clk = 0; 618 data->vce_dpm.hard_min_clk = 0;
620 619
621 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel); 620 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel);
622 level = smum_get_argument(hwmgr); 621 level = smum_get_argument(hwmgr);
@@ -626,15 +625,15 @@ static int cz_init_vce_limit(struct pp_hwmgr *hwmgr)
626 else 625 else
627 clock = table->entries[table->count - 1].ecclk; 626 clock = table->entries[table->count - 1].ecclk;
628 627
629 cz_hwmgr->vce_dpm.soft_max_clk = clock; 628 data->vce_dpm.soft_max_clk = clock;
630 cz_hwmgr->vce_dpm.hard_max_clk = clock; 629 data->vce_dpm.hard_max_clk = clock;
631 630
632 return 0; 631 return 0;
633} 632}
634 633
635static int cz_init_acp_limit(struct pp_hwmgr *hwmgr) 634static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
636{ 635{
637 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 636 struct smu8_hwmgr *data = hwmgr->backend;
638 struct phm_acp_clock_voltage_dependency_table *table = 637 struct phm_acp_clock_voltage_dependency_table *table =
639 hwmgr->dyn_state.acp_clock_voltage_dependency_table; 638 hwmgr->dyn_state.acp_clock_voltage_dependency_table;
640 unsigned long clock = 0, level; 639 unsigned long clock = 0, level;
@@ -642,8 +641,8 @@ static int cz_init_acp_limit(struct pp_hwmgr *hwmgr)
642 if (NULL == table || table->count <= 0) 641 if (NULL == table || table->count <= 0)
643 return -EINVAL; 642 return -EINVAL;
644 643
645 cz_hwmgr->acp_dpm.soft_min_clk = 0; 644 data->acp_dpm.soft_min_clk = 0;
646 cz_hwmgr->acp_dpm.hard_min_clk = 0; 645 data->acp_dpm.hard_min_clk = 0;
647 646
648 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel); 647 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel);
649 level = smum_get_argument(hwmgr); 648 level = smum_get_argument(hwmgr);
@@ -653,32 +652,32 @@ static int cz_init_acp_limit(struct pp_hwmgr *hwmgr)
653 else 652 else
654 clock = table->entries[table->count - 1].acpclk; 653 clock = table->entries[table->count - 1].acpclk;
655 654
656 cz_hwmgr->acp_dpm.soft_max_clk = clock; 655 data->acp_dpm.soft_max_clk = clock;
657 cz_hwmgr->acp_dpm.hard_max_clk = clock; 656 data->acp_dpm.hard_max_clk = clock;
658 return 0; 657 return 0;
659} 658}
660 659
661static void cz_init_power_gate_state(struct pp_hwmgr *hwmgr) 660static void smu8_init_power_gate_state(struct pp_hwmgr *hwmgr)
662{ 661{
663 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 662 struct smu8_hwmgr *data = hwmgr->backend;
664 663
665 cz_hwmgr->uvd_power_gated = false; 664 data->uvd_power_gated = false;
666 cz_hwmgr->vce_power_gated = false; 665 data->vce_power_gated = false;
667 cz_hwmgr->samu_power_gated = false; 666 data->samu_power_gated = false;
668 cz_hwmgr->acp_power_gated = false; 667 data->acp_power_gated = false;
669 cz_hwmgr->pgacpinit = true; 668 data->pgacpinit = true;
670} 669}
671 670
672static void cz_init_sclk_threshold(struct pp_hwmgr *hwmgr) 671static void smu8_init_sclk_threshold(struct pp_hwmgr *hwmgr)
673{ 672{
674 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 673 struct smu8_hwmgr *data = hwmgr->backend;
675 674
676 cz_hwmgr->low_sclk_interrupt_threshold = 0; 675 data->low_sclk_interrupt_threshold = 0;
677} 676}
678 677
679static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr) 678static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
680{ 679{
681 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 680 struct smu8_hwmgr *data = hwmgr->backend;
682 struct phm_clock_voltage_dependency_table *table = 681 struct phm_clock_voltage_dependency_table *table =
683 hwmgr->dyn_state.vddc_dependency_on_sclk; 682 hwmgr->dyn_state.vddc_dependency_on_sclk;
684 683
@@ -687,29 +686,29 @@ static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr)
687 unsigned long stable_pstate_sclk; 686 unsigned long stable_pstate_sclk;
688 unsigned long percentage; 687 unsigned long percentage;
689 688
690 cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk; 689 data->sclk_dpm.soft_min_clk = table->entries[0].clk;
691 level = cz_get_max_sclk_level(hwmgr) - 1; 690 level = smu8_get_max_sclk_level(hwmgr) - 1;
692 691
693 if (level < table->count) 692 if (level < table->count)
694 cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[level].clk; 693 data->sclk_dpm.soft_max_clk = table->entries[level].clk;
695 else 694 else
696 cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk; 695 data->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk;
697 696
698 clock = hwmgr->display_config.min_core_set_clock; 697 clock = hwmgr->display_config.min_core_set_clock;
699 if (clock == 0) 698 if (clock == 0)
700 pr_debug("min_core_set_clock not set\n"); 699 pr_debug("min_core_set_clock not set\n");
701 700
702 if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) { 701 if (data->sclk_dpm.hard_min_clk != clock) {
703 cz_hwmgr->sclk_dpm.hard_min_clk = clock; 702 data->sclk_dpm.hard_min_clk = clock;
704 703
705 smum_send_msg_to_smc_with_parameter(hwmgr, 704 smum_send_msg_to_smc_with_parameter(hwmgr,
706 PPSMC_MSG_SetSclkHardMin, 705 PPSMC_MSG_SetSclkHardMin,
707 cz_get_sclk_level(hwmgr, 706 smu8_get_sclk_level(hwmgr,
708 cz_hwmgr->sclk_dpm.hard_min_clk, 707 data->sclk_dpm.hard_min_clk,
709 PPSMC_MSG_SetSclkHardMin)); 708 PPSMC_MSG_SetSclkHardMin));
710 } 709 }
711 710
712 clock = cz_hwmgr->sclk_dpm.soft_min_clk; 711 clock = data->sclk_dpm.soft_min_clk;
713 712
714 /* update minimum clocks for Stable P-State feature */ 713 /* update minimum clocks for Stable P-State feature */
715 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 714 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -723,36 +722,36 @@ static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr)
723 clock = stable_pstate_sclk; 722 clock = stable_pstate_sclk;
724 } 723 }
725 724
726 if (cz_hwmgr->sclk_dpm.soft_min_clk != clock) { 725 if (data->sclk_dpm.soft_min_clk != clock) {
727 cz_hwmgr->sclk_dpm.soft_min_clk = clock; 726 data->sclk_dpm.soft_min_clk = clock;
728 smum_send_msg_to_smc_with_parameter(hwmgr, 727 smum_send_msg_to_smc_with_parameter(hwmgr,
729 PPSMC_MSG_SetSclkSoftMin, 728 PPSMC_MSG_SetSclkSoftMin,
730 cz_get_sclk_level(hwmgr, 729 smu8_get_sclk_level(hwmgr,
731 cz_hwmgr->sclk_dpm.soft_min_clk, 730 data->sclk_dpm.soft_min_clk,
732 PPSMC_MSG_SetSclkSoftMin)); 731 PPSMC_MSG_SetSclkSoftMin));
733 } 732 }
734 733
735 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 734 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
736 PHM_PlatformCaps_StablePState) && 735 PHM_PlatformCaps_StablePState) &&
737 cz_hwmgr->sclk_dpm.soft_max_clk != clock) { 736 data->sclk_dpm.soft_max_clk != clock) {
738 cz_hwmgr->sclk_dpm.soft_max_clk = clock; 737 data->sclk_dpm.soft_max_clk = clock;
739 smum_send_msg_to_smc_with_parameter(hwmgr, 738 smum_send_msg_to_smc_with_parameter(hwmgr,
740 PPSMC_MSG_SetSclkSoftMax, 739 PPSMC_MSG_SetSclkSoftMax,
741 cz_get_sclk_level(hwmgr, 740 smu8_get_sclk_level(hwmgr,
742 cz_hwmgr->sclk_dpm.soft_max_clk, 741 data->sclk_dpm.soft_max_clk,
743 PPSMC_MSG_SetSclkSoftMax)); 742 PPSMC_MSG_SetSclkSoftMax));
744 } 743 }
745 744
746 return 0; 745 return 0;
747} 746}
748 747
749static int cz_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr) 748static int smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
750{ 749{
751 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 750 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
752 PHM_PlatformCaps_SclkDeepSleep)) { 751 PHM_PlatformCaps_SclkDeepSleep)) {
753 uint32_t clks = hwmgr->display_config.min_core_set_clock_in_sr; 752 uint32_t clks = hwmgr->display_config.min_core_set_clock_in_sr;
754 if (clks == 0) 753 if (clks == 0)
755 clks = CZ_MIN_DEEP_SLEEP_SCLK; 754 clks = SMU8_MIN_DEEP_SLEEP_SCLK;
756 755
757 PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks); 756 PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks);
758 757
@@ -764,21 +763,21 @@ static int cz_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
764 return 0; 763 return 0;
765} 764}
766 765
767static int cz_set_watermark_threshold(struct pp_hwmgr *hwmgr) 766static int smu8_set_watermark_threshold(struct pp_hwmgr *hwmgr)
768{ 767{
769 struct cz_hwmgr *cz_hwmgr = 768 struct smu8_hwmgr *data =
770 (struct cz_hwmgr *)(hwmgr->backend); 769 hwmgr->backend;
771 770
772 smum_send_msg_to_smc_with_parameter(hwmgr, 771 smum_send_msg_to_smc_with_parameter(hwmgr,
773 PPSMC_MSG_SetWatermarkFrequency, 772 PPSMC_MSG_SetWatermarkFrequency,
774 cz_hwmgr->sclk_dpm.soft_max_clk); 773 data->sclk_dpm.soft_max_clk);
775 774
776 return 0; 775 return 0;
777} 776}
778 777
779static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock) 778static int smu8_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock)
780{ 779{
781 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); 780 struct smu8_hwmgr *hw_data = hwmgr->backend;
782 781
783 if (hw_data->is_nb_dpm_enabled) { 782 if (hw_data->is_nb_dpm_enabled) {
784 if (enable) { 783 if (enable) {
@@ -799,35 +798,35 @@ static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, b
799 return 0; 798 return 0;
800} 799}
801 800
802static int cz_disable_nb_dpm(struct pp_hwmgr *hwmgr) 801static int smu8_disable_nb_dpm(struct pp_hwmgr *hwmgr)
803{ 802{
804 int ret = 0; 803 int ret = 0;
805 804
806 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 805 struct smu8_hwmgr *data = hwmgr->backend;
807 unsigned long dpm_features = 0; 806 unsigned long dpm_features = 0;
808 807
809 if (cz_hwmgr->is_nb_dpm_enabled) { 808 if (data->is_nb_dpm_enabled) {
810 cz_nbdpm_pstate_enable_disable(hwmgr, true, true); 809 smu8_nbdpm_pstate_enable_disable(hwmgr, true, true);
811 dpm_features |= NB_DPM_MASK; 810 dpm_features |= NB_DPM_MASK;
812 ret = smum_send_msg_to_smc_with_parameter( 811 ret = smum_send_msg_to_smc_with_parameter(
813 hwmgr, 812 hwmgr,
814 PPSMC_MSG_DisableAllSmuFeatures, 813 PPSMC_MSG_DisableAllSmuFeatures,
815 dpm_features); 814 dpm_features);
816 if (ret == 0) 815 if (ret == 0)
817 cz_hwmgr->is_nb_dpm_enabled = false; 816 data->is_nb_dpm_enabled = false;
818 } 817 }
819 818
820 return ret; 819 return ret;
821} 820}
822 821
823static int cz_enable_nb_dpm(struct pp_hwmgr *hwmgr) 822static int smu8_enable_nb_dpm(struct pp_hwmgr *hwmgr)
824{ 823{
825 int ret = 0; 824 int ret = 0;
826 825
827 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 826 struct smu8_hwmgr *data = hwmgr->backend;
828 unsigned long dpm_features = 0; 827 unsigned long dpm_features = 0;
829 828
830 if (!cz_hwmgr->is_nb_dpm_enabled) { 829 if (!data->is_nb_dpm_enabled) {
831 PP_DBG_LOG("enabling ALL SMU features.\n"); 830 PP_DBG_LOG("enabling ALL SMU features.\n");
832 dpm_features |= NB_DPM_MASK; 831 dpm_features |= NB_DPM_MASK;
833 ret = smum_send_msg_to_smc_with_parameter( 832 ret = smum_send_msg_to_smc_with_parameter(
@@ -835,94 +834,94 @@ static int cz_enable_nb_dpm(struct pp_hwmgr *hwmgr)
835 PPSMC_MSG_EnableAllSmuFeatures, 834 PPSMC_MSG_EnableAllSmuFeatures,
836 dpm_features); 835 dpm_features);
837 if (ret == 0) 836 if (ret == 0)
838 cz_hwmgr->is_nb_dpm_enabled = true; 837 data->is_nb_dpm_enabled = true;
839 } 838 }
840 839
841 return ret; 840 return ret;
842} 841}
843 842
844static int cz_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input) 843static int smu8_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input)
845{ 844{
846 bool disable_switch; 845 bool disable_switch;
847 bool enable_low_mem_state; 846 bool enable_low_mem_state;
848 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); 847 struct smu8_hwmgr *hw_data = hwmgr->backend;
849 const struct phm_set_power_state_input *states = (struct phm_set_power_state_input *)input; 848 const struct phm_set_power_state_input *states = (struct phm_set_power_state_input *)input;
850 const struct cz_power_state *pnew_state = cast_const_PhwCzPowerState(states->pnew_state); 849 const struct smu8_power_state *pnew_state = cast_const_smu8_power_state(states->pnew_state);
851 850
852 if (hw_data->sys_info.nb_dpm_enable) { 851 if (hw_data->sys_info.nb_dpm_enable) {
853 disable_switch = hw_data->cc6_settings.nb_pstate_switch_disable ? true : false; 852 disable_switch = hw_data->cc6_settings.nb_pstate_switch_disable ? true : false;
854 enable_low_mem_state = hw_data->cc6_settings.nb_pstate_switch_disable ? false : true; 853 enable_low_mem_state = hw_data->cc6_settings.nb_pstate_switch_disable ? false : true;
855 854
856 if (pnew_state->action == FORCE_HIGH) 855 if (pnew_state->action == FORCE_HIGH)
857 cz_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch); 856 smu8_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch);
858 else if (pnew_state->action == CANCEL_FORCE_HIGH) 857 else if (pnew_state->action == CANCEL_FORCE_HIGH)
859 cz_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch); 858 smu8_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch);
860 else 859 else
861 cz_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch); 860 smu8_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch);
862 } 861 }
863 return 0; 862 return 0;
864} 863}
865 864
866static int cz_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) 865static int smu8_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
867{ 866{
868 int ret = 0; 867 int ret = 0;
869 868
870 cz_update_sclk_limit(hwmgr); 869 smu8_update_sclk_limit(hwmgr);
871 cz_set_deep_sleep_sclk_threshold(hwmgr); 870 smu8_set_deep_sleep_sclk_threshold(hwmgr);
872 cz_set_watermark_threshold(hwmgr); 871 smu8_set_watermark_threshold(hwmgr);
873 ret = cz_enable_nb_dpm(hwmgr); 872 ret = smu8_enable_nb_dpm(hwmgr);
874 if (ret) 873 if (ret)
875 return ret; 874 return ret;
876 cz_update_low_mem_pstate(hwmgr, input); 875 smu8_update_low_mem_pstate(hwmgr, input);
877 876
878 return 0; 877 return 0;
879}; 878};
880 879
881 880
882static int cz_setup_asic_task(struct pp_hwmgr *hwmgr) 881static int smu8_setup_asic_task(struct pp_hwmgr *hwmgr)
883{ 882{
884 int ret; 883 int ret;
885 884
886 ret = cz_upload_pptable_to_smu(hwmgr); 885 ret = smu8_upload_pptable_to_smu(hwmgr);
887 if (ret) 886 if (ret)
888 return ret; 887 return ret;
889 ret = cz_init_sclk_limit(hwmgr); 888 ret = smu8_init_sclk_limit(hwmgr);
890 if (ret) 889 if (ret)
891 return ret; 890 return ret;
892 ret = cz_init_uvd_limit(hwmgr); 891 ret = smu8_init_uvd_limit(hwmgr);
893 if (ret) 892 if (ret)
894 return ret; 893 return ret;
895 ret = cz_init_vce_limit(hwmgr); 894 ret = smu8_init_vce_limit(hwmgr);
896 if (ret) 895 if (ret)
897 return ret; 896 return ret;
898 ret = cz_init_acp_limit(hwmgr); 897 ret = smu8_init_acp_limit(hwmgr);
899 if (ret) 898 if (ret)
900 return ret; 899 return ret;
901 900
902 cz_init_power_gate_state(hwmgr); 901 smu8_init_power_gate_state(hwmgr);
903 cz_init_sclk_threshold(hwmgr); 902 smu8_init_sclk_threshold(hwmgr);
904 903
905 return 0; 904 return 0;
906} 905}
907 906
908static void cz_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr) 907static void smu8_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr)
909{ 908{
910 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); 909 struct smu8_hwmgr *hw_data = hwmgr->backend;
911 910
912 hw_data->disp_clk_bypass_pending = false; 911 hw_data->disp_clk_bypass_pending = false;
913 hw_data->disp_clk_bypass = false; 912 hw_data->disp_clk_bypass = false;
914} 913}
915 914
916static void cz_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr) 915static void smu8_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr)
917{ 916{
918 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); 917 struct smu8_hwmgr *hw_data = hwmgr->backend;
919 918
920 hw_data->is_nb_dpm_enabled = false; 919 hw_data->is_nb_dpm_enabled = false;
921} 920}
922 921
923static void cz_reset_cc6_data(struct pp_hwmgr *hwmgr) 922static void smu8_reset_cc6_data(struct pp_hwmgr *hwmgr)
924{ 923{
925 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); 924 struct smu8_hwmgr *hw_data = hwmgr->backend;
926 925
927 hw_data->cc6_settings.cc6_setting_changed = false; 926 hw_data->cc6_settings.cc6_setting_changed = false;
928 hw_data->cc6_settings.cpu_pstate_separation_time = 0; 927 hw_data->cc6_settings.cpu_pstate_separation_time = 0;
@@ -930,45 +929,47 @@ static void cz_reset_cc6_data(struct pp_hwmgr *hwmgr)
930 hw_data->cc6_settings.cpu_pstate_disable = false; 929 hw_data->cc6_settings.cpu_pstate_disable = false;
931} 930}
932 931
933static int cz_power_off_asic(struct pp_hwmgr *hwmgr) 932static int smu8_power_off_asic(struct pp_hwmgr *hwmgr)
934{ 933{
935 cz_power_up_display_clock_sys_pll(hwmgr); 934 smu8_power_up_display_clock_sys_pll(hwmgr);
936 cz_clear_nb_dpm_flag(hwmgr); 935 smu8_clear_nb_dpm_flag(hwmgr);
937 cz_reset_cc6_data(hwmgr); 936 smu8_reset_cc6_data(hwmgr);
938 return 0; 937 return 0;
939}; 938};
940 939
941static void cz_program_voting_clients(struct pp_hwmgr *hwmgr) 940static void smu8_program_voting_clients(struct pp_hwmgr *hwmgr)
942{ 941{
943 PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0, 942 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
944 PPCZ_VOTINGRIGHTSCLIENTS_DFLT0); 943 ixCG_FREQ_TRAN_VOTING_0,
944 SMU8_VOTINGRIGHTSCLIENTS_DFLT0);
945} 945}
946 946
947static void cz_clear_voting_clients(struct pp_hwmgr *hwmgr) 947static void smu8_clear_voting_clients(struct pp_hwmgr *hwmgr)
948{ 948{
949 PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0, 0); 949 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
950 ixCG_FREQ_TRAN_VOTING_0, 0);
950} 951}
951 952
952static int cz_start_dpm(struct pp_hwmgr *hwmgr) 953static int smu8_start_dpm(struct pp_hwmgr *hwmgr)
953{ 954{
954 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 955 struct smu8_hwmgr *data = hwmgr->backend;
955 956
956 cz_hwmgr->dpm_flags |= DPMFlags_SCLK_Enabled; 957 data->dpm_flags |= DPMFlags_SCLK_Enabled;
957 958
958 return smum_send_msg_to_smc_with_parameter(hwmgr, 959 return smum_send_msg_to_smc_with_parameter(hwmgr,
959 PPSMC_MSG_EnableAllSmuFeatures, 960 PPSMC_MSG_EnableAllSmuFeatures,
960 SCLK_DPM_MASK); 961 SCLK_DPM_MASK);
961} 962}
962 963
963static int cz_stop_dpm(struct pp_hwmgr *hwmgr) 964static int smu8_stop_dpm(struct pp_hwmgr *hwmgr)
964{ 965{
965 int ret = 0; 966 int ret = 0;
966 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 967 struct smu8_hwmgr *data = hwmgr->backend;
967 unsigned long dpm_features = 0; 968 unsigned long dpm_features = 0;
968 969
969 if (cz_hwmgr->dpm_flags & DPMFlags_SCLK_Enabled) { 970 if (data->dpm_flags & DPMFlags_SCLK_Enabled) {
970 dpm_features |= SCLK_DPM_MASK; 971 dpm_features |= SCLK_DPM_MASK;
971 cz_hwmgr->dpm_flags &= ~DPMFlags_SCLK_Enabled; 972 data->dpm_flags &= ~DPMFlags_SCLK_Enabled;
972 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 973 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
973 PPSMC_MSG_DisableAllSmuFeatures, 974 PPSMC_MSG_DisableAllSmuFeatures,
974 dpm_features); 975 dpm_features);
@@ -976,80 +977,80 @@ static int cz_stop_dpm(struct pp_hwmgr *hwmgr)
976 return ret; 977 return ret;
977} 978}
978 979
979static int cz_program_bootup_state(struct pp_hwmgr *hwmgr) 980static int smu8_program_bootup_state(struct pp_hwmgr *hwmgr)
980{ 981{
981 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 982 struct smu8_hwmgr *data = hwmgr->backend;
982 983
983 cz_hwmgr->sclk_dpm.soft_min_clk = cz_hwmgr->sys_info.bootup_engine_clock; 984 data->sclk_dpm.soft_min_clk = data->sys_info.bootup_engine_clock;
984 cz_hwmgr->sclk_dpm.soft_max_clk = cz_hwmgr->sys_info.bootup_engine_clock; 985 data->sclk_dpm.soft_max_clk = data->sys_info.bootup_engine_clock;
985 986
986 smum_send_msg_to_smc_with_parameter(hwmgr, 987 smum_send_msg_to_smc_with_parameter(hwmgr,
987 PPSMC_MSG_SetSclkSoftMin, 988 PPSMC_MSG_SetSclkSoftMin,
988 cz_get_sclk_level(hwmgr, 989 smu8_get_sclk_level(hwmgr,
989 cz_hwmgr->sclk_dpm.soft_min_clk, 990 data->sclk_dpm.soft_min_clk,
990 PPSMC_MSG_SetSclkSoftMin)); 991 PPSMC_MSG_SetSclkSoftMin));
991 992
992 smum_send_msg_to_smc_with_parameter(hwmgr, 993 smum_send_msg_to_smc_with_parameter(hwmgr,
993 PPSMC_MSG_SetSclkSoftMax, 994 PPSMC_MSG_SetSclkSoftMax,
994 cz_get_sclk_level(hwmgr, 995 smu8_get_sclk_level(hwmgr,
995 cz_hwmgr->sclk_dpm.soft_max_clk, 996 data->sclk_dpm.soft_max_clk,
996 PPSMC_MSG_SetSclkSoftMax)); 997 PPSMC_MSG_SetSclkSoftMax));
997 998
998 return 0; 999 return 0;
999} 1000}
1000 1001
1001static void cz_reset_acp_boot_level(struct pp_hwmgr *hwmgr) 1002static void smu8_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
1002{ 1003{
1003 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 1004 struct smu8_hwmgr *data = hwmgr->backend;
1004 1005
1005 cz_hwmgr->acp_boot_level = 0xff; 1006 data->acp_boot_level = 0xff;
1006} 1007}
1007 1008
1008static int cz_disable_dpm_tasks(struct pp_hwmgr *hwmgr) 1009static int smu8_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1009{ 1010{
1010 cz_disable_nb_dpm(hwmgr); 1011 smu8_disable_nb_dpm(hwmgr);
1011 1012
1012 cz_clear_voting_clients(hwmgr); 1013 smu8_clear_voting_clients(hwmgr);
1013 if (cz_stop_dpm(hwmgr)) 1014 if (smu8_stop_dpm(hwmgr))
1014 return -EINVAL; 1015 return -EINVAL;
1015 1016
1016 return 0; 1017 return 0;
1017}; 1018};
1018 1019
1019static int cz_enable_dpm_tasks(struct pp_hwmgr *hwmgr) 1020static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1020{ 1021{
1021 cz_program_voting_clients(hwmgr); 1022 smu8_program_voting_clients(hwmgr);
1022 if (cz_start_dpm(hwmgr)) 1023 if (smu8_start_dpm(hwmgr))
1023 return -EINVAL; 1024 return -EINVAL;
1024 cz_program_bootup_state(hwmgr); 1025 smu8_program_bootup_state(hwmgr);
1025 cz_reset_acp_boot_level(hwmgr); 1026 smu8_reset_acp_boot_level(hwmgr);
1026 1027
1027 return 0; 1028 return 0;
1028}; 1029};
1029 1030
1030static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, 1031static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
1031 struct pp_power_state *prequest_ps, 1032 struct pp_power_state *prequest_ps,
1032 const struct pp_power_state *pcurrent_ps) 1033 const struct pp_power_state *pcurrent_ps)
1033{ 1034{
1034 struct cz_power_state *cz_ps = 1035 struct smu8_power_state *smu8_ps =
1035 cast_PhwCzPowerState(&prequest_ps->hardware); 1036 cast_smu8_power_state(&prequest_ps->hardware);
1036 1037
1037 const struct cz_power_state *cz_current_ps = 1038 const struct smu8_power_state *smu8_current_ps =
1038 cast_const_PhwCzPowerState(&pcurrent_ps->hardware); 1039 cast_const_smu8_power_state(&pcurrent_ps->hardware);
1039 1040
1040 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 1041 struct smu8_hwmgr *data = hwmgr->backend;
1041 struct PP_Clocks clocks = {0, 0, 0, 0}; 1042 struct PP_Clocks clocks = {0, 0, 0, 0};
1042 bool force_high; 1043 bool force_high;
1043 uint32_t num_of_active_displays = 0; 1044 uint32_t num_of_active_displays = 0;
1044 struct cgs_display_info info = {0}; 1045 struct cgs_display_info info = {0};
1045 1046
1046 cz_ps->need_dfs_bypass = true; 1047 smu8_ps->need_dfs_bypass = true;
1047 1048
1048 cz_hwmgr->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label); 1049 data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
1049 1050
1050 clocks.memoryClock = hwmgr->display_config.min_mem_set_clock != 0 ? 1051 clocks.memoryClock = hwmgr->display_config.min_mem_set_clock != 0 ?
1051 hwmgr->display_config.min_mem_set_clock : 1052 hwmgr->display_config.min_mem_set_clock :
1052 cz_hwmgr->sys_info.nbp_memory_clock[1]; 1053 data->sys_info.nbp_memory_clock[1];
1053 1054
1054 cgs_get_active_displays_info(hwmgr->device, &info); 1055 cgs_get_active_displays_info(hwmgr->device, &info);
1055 num_of_active_displays = info.display_count; 1056 num_of_active_displays = info.display_count;
@@ -1057,56 +1058,56 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
1057 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) 1058 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
1058 clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk; 1059 clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk;
1059 1060
1060 force_high = (clocks.memoryClock > cz_hwmgr->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1]) 1061 force_high = (clocks.memoryClock > data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1])
1061 || (num_of_active_displays >= 3); 1062 || (num_of_active_displays >= 3);
1062 1063
1063 cz_ps->action = cz_current_ps->action; 1064 smu8_ps->action = smu8_current_ps->action;
1064 1065
1065 if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 1066 if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
1066 cz_nbdpm_pstate_enable_disable(hwmgr, false, false); 1067 smu8_nbdpm_pstate_enable_disable(hwmgr, false, false);
1067 else if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) 1068 else if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD)
1068 cz_nbdpm_pstate_enable_disable(hwmgr, false, true); 1069 smu8_nbdpm_pstate_enable_disable(hwmgr, false, true);
1069 else if (!force_high && (cz_ps->action == FORCE_HIGH)) 1070 else if (!force_high && (smu8_ps->action == FORCE_HIGH))
1070 cz_ps->action = CANCEL_FORCE_HIGH; 1071 smu8_ps->action = CANCEL_FORCE_HIGH;
1071 else if (force_high && (cz_ps->action != FORCE_HIGH)) 1072 else if (force_high && (smu8_ps->action != FORCE_HIGH))
1072 cz_ps->action = FORCE_HIGH; 1073 smu8_ps->action = FORCE_HIGH;
1073 else 1074 else
1074 cz_ps->action = DO_NOTHING; 1075 smu8_ps->action = DO_NOTHING;
1075 1076
1076 return 0; 1077 return 0;
1077} 1078}
1078 1079
1079static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr) 1080static int smu8_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
1080{ 1081{
1081 int result = 0; 1082 int result = 0;
1082 struct cz_hwmgr *data; 1083 struct smu8_hwmgr *data;
1083 1084
1084 data = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL); 1085 data = kzalloc(sizeof(struct smu8_hwmgr), GFP_KERNEL);
1085 if (data == NULL) 1086 if (data == NULL)
1086 return -ENOMEM; 1087 return -ENOMEM;
1087 1088
1088 hwmgr->backend = data; 1089 hwmgr->backend = data;
1089 1090
1090 result = cz_initialize_dpm_defaults(hwmgr); 1091 result = smu8_initialize_dpm_defaults(hwmgr);
1091 if (result != 0) { 1092 if (result != 0) {
1092 pr_err("cz_initialize_dpm_defaults failed\n"); 1093 pr_err("smu8_initialize_dpm_defaults failed\n");
1093 return result; 1094 return result;
1094 } 1095 }
1095 1096
1096 result = cz_get_system_info_data(hwmgr); 1097 result = smu8_get_system_info_data(hwmgr);
1097 if (result != 0) { 1098 if (result != 0) {
1098 pr_err("cz_get_system_info_data failed\n"); 1099 pr_err("smu8_get_system_info_data failed\n");
1099 return result; 1100 return result;
1100 } 1101 }
1101 1102
1102 cz_construct_boot_state(hwmgr); 1103 smu8_construct_boot_state(hwmgr);
1103 1104
1104 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = CZ_MAX_HARDWARE_POWERLEVELS; 1105 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = SMU8_MAX_HARDWARE_POWERLEVELS;
1105 1106
1106 return result; 1107 return result;
1107} 1108}
1108 1109
1109static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) 1110static int smu8_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
1110{ 1111{
1111 if (hwmgr != NULL) { 1112 if (hwmgr != NULL) {
1112 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); 1113 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
@@ -1118,28 +1119,28 @@ static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
1118 return 0; 1119 return 0;
1119} 1120}
1120 1121
1121static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr) 1122static int smu8_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
1122{ 1123{
1123 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 1124 struct smu8_hwmgr *data = hwmgr->backend;
1124 1125
1125 smum_send_msg_to_smc_with_parameter(hwmgr, 1126 smum_send_msg_to_smc_with_parameter(hwmgr,
1126 PPSMC_MSG_SetSclkSoftMin, 1127 PPSMC_MSG_SetSclkSoftMin,
1127 cz_get_sclk_level(hwmgr, 1128 smu8_get_sclk_level(hwmgr,
1128 cz_hwmgr->sclk_dpm.soft_max_clk, 1129 data->sclk_dpm.soft_max_clk,
1129 PPSMC_MSG_SetSclkSoftMin)); 1130 PPSMC_MSG_SetSclkSoftMin));
1130 1131
1131 smum_send_msg_to_smc_with_parameter(hwmgr, 1132 smum_send_msg_to_smc_with_parameter(hwmgr,
1132 PPSMC_MSG_SetSclkSoftMax, 1133 PPSMC_MSG_SetSclkSoftMax,
1133 cz_get_sclk_level(hwmgr, 1134 smu8_get_sclk_level(hwmgr,
1134 cz_hwmgr->sclk_dpm.soft_max_clk, 1135 data->sclk_dpm.soft_max_clk,
1135 PPSMC_MSG_SetSclkSoftMax)); 1136 PPSMC_MSG_SetSclkSoftMax));
1136 1137
1137 return 0; 1138 return 0;
1138} 1139}
1139 1140
1140static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr) 1141static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
1141{ 1142{
1142 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 1143 struct smu8_hwmgr *data = hwmgr->backend;
1143 struct phm_clock_voltage_dependency_table *table = 1144 struct phm_clock_voltage_dependency_table *table =
1144 hwmgr->dyn_state.vddc_dependency_on_sclk; 1145 hwmgr->dyn_state.vddc_dependency_on_sclk;
1145 unsigned long clock = 0, level; 1146 unsigned long clock = 0, level;
@@ -1147,56 +1148,56 @@ static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
1147 if (NULL == table || table->count <= 0) 1148 if (NULL == table || table->count <= 0)
1148 return -EINVAL; 1149 return -EINVAL;
1149 1150
1150 cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk; 1151 data->sclk_dpm.soft_min_clk = table->entries[0].clk;
1151 cz_hwmgr->sclk_dpm.hard_min_clk = table->entries[0].clk; 1152 data->sclk_dpm.hard_min_clk = table->entries[0].clk;
1152 hwmgr->pstate_sclk = table->entries[0].clk; 1153 hwmgr->pstate_sclk = table->entries[0].clk;
1153 hwmgr->pstate_mclk = 0; 1154 hwmgr->pstate_mclk = 0;
1154 1155
1155 level = cz_get_max_sclk_level(hwmgr) - 1; 1156 level = smu8_get_max_sclk_level(hwmgr) - 1;
1156 1157
1157 if (level < table->count) 1158 if (level < table->count)
1158 clock = table->entries[level].clk; 1159 clock = table->entries[level].clk;
1159 else 1160 else
1160 clock = table->entries[table->count - 1].clk; 1161 clock = table->entries[table->count - 1].clk;
1161 1162
1162 cz_hwmgr->sclk_dpm.soft_max_clk = clock; 1163 data->sclk_dpm.soft_max_clk = clock;
1163 cz_hwmgr->sclk_dpm.hard_max_clk = clock; 1164 data->sclk_dpm.hard_max_clk = clock;
1164 1165
1165 smum_send_msg_to_smc_with_parameter(hwmgr, 1166 smum_send_msg_to_smc_with_parameter(hwmgr,
1166 PPSMC_MSG_SetSclkSoftMin, 1167 PPSMC_MSG_SetSclkSoftMin,
1167 cz_get_sclk_level(hwmgr, 1168 smu8_get_sclk_level(hwmgr,
1168 cz_hwmgr->sclk_dpm.soft_min_clk, 1169 data->sclk_dpm.soft_min_clk,
1169 PPSMC_MSG_SetSclkSoftMin)); 1170 PPSMC_MSG_SetSclkSoftMin));
1170 1171
1171 smum_send_msg_to_smc_with_parameter(hwmgr, 1172 smum_send_msg_to_smc_with_parameter(hwmgr,
1172 PPSMC_MSG_SetSclkSoftMax, 1173 PPSMC_MSG_SetSclkSoftMax,
1173 cz_get_sclk_level(hwmgr, 1174 smu8_get_sclk_level(hwmgr,
1174 cz_hwmgr->sclk_dpm.soft_max_clk, 1175 data->sclk_dpm.soft_max_clk,
1175 PPSMC_MSG_SetSclkSoftMax)); 1176 PPSMC_MSG_SetSclkSoftMax));
1176 1177
1177 return 0; 1178 return 0;
1178} 1179}
1179 1180
1180static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr) 1181static int smu8_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
1181{ 1182{
1182 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 1183 struct smu8_hwmgr *data = hwmgr->backend;
1183 1184
1184 smum_send_msg_to_smc_with_parameter(hwmgr, 1185 smum_send_msg_to_smc_with_parameter(hwmgr,
1185 PPSMC_MSG_SetSclkSoftMax, 1186 PPSMC_MSG_SetSclkSoftMax,
1186 cz_get_sclk_level(hwmgr, 1187 smu8_get_sclk_level(hwmgr,
1187 cz_hwmgr->sclk_dpm.soft_min_clk, 1188 data->sclk_dpm.soft_min_clk,
1188 PPSMC_MSG_SetSclkSoftMax)); 1189 PPSMC_MSG_SetSclkSoftMax));
1189 1190
1190 smum_send_msg_to_smc_with_parameter(hwmgr, 1191 smum_send_msg_to_smc_with_parameter(hwmgr,
1191 PPSMC_MSG_SetSclkSoftMin, 1192 PPSMC_MSG_SetSclkSoftMin,
1192 cz_get_sclk_level(hwmgr, 1193 smu8_get_sclk_level(hwmgr,
1193 cz_hwmgr->sclk_dpm.soft_min_clk, 1194 data->sclk_dpm.soft_min_clk,
1194 PPSMC_MSG_SetSclkSoftMin)); 1195 PPSMC_MSG_SetSclkSoftMin));
1195 1196
1196 return 0; 1197 return 0;
1197} 1198}
1198 1199
1199static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, 1200static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1200 enum amd_dpm_forced_level level) 1201 enum amd_dpm_forced_level level)
1201{ 1202{
1202 int ret = 0; 1203 int ret = 0;
@@ -1204,15 +1205,15 @@ static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1204 switch (level) { 1205 switch (level) {
1205 case AMD_DPM_FORCED_LEVEL_HIGH: 1206 case AMD_DPM_FORCED_LEVEL_HIGH:
1206 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1207 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1207 ret = cz_phm_force_dpm_highest(hwmgr); 1208 ret = smu8_phm_force_dpm_highest(hwmgr);
1208 break; 1209 break;
1209 case AMD_DPM_FORCED_LEVEL_LOW: 1210 case AMD_DPM_FORCED_LEVEL_LOW:
1210 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1211 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1211 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1212 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1212 ret = cz_phm_force_dpm_lowest(hwmgr); 1213 ret = smu8_phm_force_dpm_lowest(hwmgr);
1213 break; 1214 break;
1214 case AMD_DPM_FORCED_LEVEL_AUTO: 1215 case AMD_DPM_FORCED_LEVEL_AUTO:
1215 ret = cz_phm_unforce_dpm_levels(hwmgr); 1216 ret = smu8_phm_unforce_dpm_levels(hwmgr);
1216 break; 1217 break;
1217 case AMD_DPM_FORCED_LEVEL_MANUAL: 1218 case AMD_DPM_FORCED_LEVEL_MANUAL:
1218 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1219 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
@@ -1223,14 +1224,14 @@ static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1223 return ret; 1224 return ret;
1224} 1225}
1225 1226
1226int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr) 1227static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
1227{ 1228{
1228 if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) 1229 if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
1229 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF); 1230 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
1230 return 0; 1231 return 0;
1231} 1232}
1232 1233
1233int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr) 1234static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
1234{ 1235{
1235 if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) { 1236 if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
1236 return smum_send_msg_to_smc_with_parameter( 1237 return smum_send_msg_to_smc_with_parameter(
@@ -1242,52 +1243,22 @@ int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
1242 return 0; 1243 return 0;
1243} 1244}
1244 1245
1245int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) 1246static int smu8_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
1246{ 1247{
1247 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 1248 struct smu8_hwmgr *data = hwmgr->backend;
1248 struct phm_uvd_clock_voltage_dependency_table *ptable =
1249 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1250
1251 if (!bgate) {
1252 /* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
1253 if (PP_CAP(PHM_PlatformCaps_StablePState) ||
1254 hwmgr->en_umd_pstate) {
1255 cz_hwmgr->uvd_dpm.hard_min_clk =
1256 ptable->entries[ptable->count - 1].vclk;
1257
1258 smum_send_msg_to_smc_with_parameter(hwmgr,
1259 PPSMC_MSG_SetUvdHardMin,
1260 cz_get_uvd_level(hwmgr,
1261 cz_hwmgr->uvd_dpm.hard_min_clk,
1262 PPSMC_MSG_SetUvdHardMin));
1263
1264 cz_enable_disable_uvd_dpm(hwmgr, true);
1265 } else {
1266 cz_enable_disable_uvd_dpm(hwmgr, true);
1267 }
1268 } else {
1269 cz_enable_disable_uvd_dpm(hwmgr, false);
1270 }
1271
1272 return 0;
1273}
1274
1275int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
1276{
1277 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1278 struct phm_vce_clock_voltage_dependency_table *ptable = 1249 struct phm_vce_clock_voltage_dependency_table *ptable =
1279 hwmgr->dyn_state.vce_clock_voltage_dependency_table; 1250 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1280 1251
1281 /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */ 1252 /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
1282 if (PP_CAP(PHM_PlatformCaps_StablePState) || 1253 if (PP_CAP(PHM_PlatformCaps_StablePState) ||
1283 hwmgr->en_umd_pstate) { 1254 hwmgr->en_umd_pstate) {
1284 cz_hwmgr->vce_dpm.hard_min_clk = 1255 data->vce_dpm.hard_min_clk =
1285 ptable->entries[ptable->count - 1].ecclk; 1256 ptable->entries[ptable->count - 1].ecclk;
1286 1257
1287 smum_send_msg_to_smc_with_parameter(hwmgr, 1258 smum_send_msg_to_smc_with_parameter(hwmgr,
1288 PPSMC_MSG_SetEclkHardMin, 1259 PPSMC_MSG_SetEclkHardMin,
1289 cz_get_eclk_level(hwmgr, 1260 smu8_get_eclk_level(hwmgr,
1290 cz_hwmgr->vce_dpm.hard_min_clk, 1261 data->vce_dpm.hard_min_clk,
1291 PPSMC_MSG_SetEclkHardMin)); 1262 PPSMC_MSG_SetEclkHardMin));
1292 } else { 1263 } else {
1293 1264
@@ -1301,7 +1272,7 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
1301 return 0; 1272 return 0;
1302} 1273}
1303 1274
1304int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr) 1275static int smu8_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
1305{ 1276{
1306 if (PP_CAP(PHM_PlatformCaps_VCEPowerGating)) 1277 if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
1307 return smum_send_msg_to_smc(hwmgr, 1278 return smum_send_msg_to_smc(hwmgr,
@@ -1309,7 +1280,7 @@ int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
1309 return 0; 1280 return 0;
1310} 1281}
1311 1282
1312int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr) 1283static int smu8_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
1313{ 1284{
1314 if (PP_CAP(PHM_PlatformCaps_VCEPowerGating)) 1285 if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
1315 return smum_send_msg_to_smc(hwmgr, 1286 return smum_send_msg_to_smc(hwmgr,
@@ -1317,17 +1288,17 @@ int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
1317 return 0; 1288 return 0;
1318} 1289}
1319 1290
1320static uint32_t cz_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) 1291static uint32_t smu8_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
1321{ 1292{
1322 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 1293 struct smu8_hwmgr *data = hwmgr->backend;
1323 1294
1324 return cz_hwmgr->sys_info.bootup_uma_clock; 1295 return data->sys_info.bootup_uma_clock;
1325} 1296}
1326 1297
1327static uint32_t cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) 1298static uint32_t smu8_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
1328{ 1299{
1329 struct pp_power_state *ps; 1300 struct pp_power_state *ps;
1330 struct cz_power_state *cz_ps; 1301 struct smu8_power_state *smu8_ps;
1331 1302
1332 if (hwmgr == NULL) 1303 if (hwmgr == NULL)
1333 return -EINVAL; 1304 return -EINVAL;
@@ -1337,59 +1308,59 @@ static uint32_t cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
1337 if (ps == NULL) 1308 if (ps == NULL)
1338 return -EINVAL; 1309 return -EINVAL;
1339 1310
1340 cz_ps = cast_PhwCzPowerState(&ps->hardware); 1311 smu8_ps = cast_smu8_power_state(&ps->hardware);
1341 1312
1342 if (low) 1313 if (low)
1343 return cz_ps->levels[0].engineClock; 1314 return smu8_ps->levels[0].engineClock;
1344 else 1315 else
1345 return cz_ps->levels[cz_ps->level-1].engineClock; 1316 return smu8_ps->levels[smu8_ps->level-1].engineClock;
1346} 1317}
1347 1318
1348static int cz_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, 1319static int smu8_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
1349 struct pp_hw_power_state *hw_ps) 1320 struct pp_hw_power_state *hw_ps)
1350{ 1321{
1351 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 1322 struct smu8_hwmgr *data = hwmgr->backend;
1352 struct cz_power_state *cz_ps = cast_PhwCzPowerState(hw_ps); 1323 struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps);
1353 1324
1354 cz_ps->level = 1; 1325 smu8_ps->level = 1;
1355 cz_ps->nbps_flags = 0; 1326 smu8_ps->nbps_flags = 0;
1356 cz_ps->bapm_flags = 0; 1327 smu8_ps->bapm_flags = 0;
1357 cz_ps->levels[0] = cz_hwmgr->boot_power_level; 1328 smu8_ps->levels[0] = data->boot_power_level;
1358 1329
1359 return 0; 1330 return 0;
1360} 1331}
1361 1332
1362static int cz_dpm_get_pp_table_entry_callback( 1333static int smu8_dpm_get_pp_table_entry_callback(
1363 struct pp_hwmgr *hwmgr, 1334 struct pp_hwmgr *hwmgr,
1364 struct pp_hw_power_state *hw_ps, 1335 struct pp_hw_power_state *hw_ps,
1365 unsigned int index, 1336 unsigned int index,
1366 const void *clock_info) 1337 const void *clock_info)
1367{ 1338{
1368 struct cz_power_state *cz_ps = cast_PhwCzPowerState(hw_ps); 1339 struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps);
1369 1340
1370 const ATOM_PPLIB_CZ_CLOCK_INFO *cz_clock_info = clock_info; 1341 const ATOM_PPLIB_CZ_CLOCK_INFO *smu8_clock_info = clock_info;
1371 1342
1372 struct phm_clock_voltage_dependency_table *table = 1343 struct phm_clock_voltage_dependency_table *table =
1373 hwmgr->dyn_state.vddc_dependency_on_sclk; 1344 hwmgr->dyn_state.vddc_dependency_on_sclk;
1374 uint8_t clock_info_index = cz_clock_info->index; 1345 uint8_t clock_info_index = smu8_clock_info->index;
1375 1346
1376 if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1)) 1347 if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1))
1377 clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1); 1348 clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1);
1378 1349
1379 cz_ps->levels[index].engineClock = table->entries[clock_info_index].clk; 1350 smu8_ps->levels[index].engineClock = table->entries[clock_info_index].clk;
1380 cz_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v; 1351 smu8_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v;
1381 1352
1382 cz_ps->level = index + 1; 1353 smu8_ps->level = index + 1;
1383 1354
1384 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { 1355 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
1385 cz_ps->levels[index].dsDividerIndex = 5; 1356 smu8_ps->levels[index].dsDividerIndex = 5;
1386 cz_ps->levels[index].ssDividerIndex = 5; 1357 smu8_ps->levels[index].ssDividerIndex = 5;
1387 } 1358 }
1388 1359
1389 return 0; 1360 return 0;
1390} 1361}
1391 1362
1392static int cz_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr) 1363static int smu8_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
1393{ 1364{
1394 int result; 1365 int result;
1395 unsigned long ret = 0; 1366 unsigned long ret = 0;
@@ -1399,31 +1370,31 @@ static int cz_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
1399 return result ? 0 : ret; 1370 return result ? 0 : ret;
1400} 1371}
1401 1372
1402static int cz_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr, 1373static int smu8_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
1403 unsigned long entry, struct pp_power_state *ps) 1374 unsigned long entry, struct pp_power_state *ps)
1404{ 1375{
1405 int result; 1376 int result;
1406 struct cz_power_state *cz_ps; 1377 struct smu8_power_state *smu8_ps;
1407 1378
1408 ps->hardware.magic = PhwCz_Magic; 1379 ps->hardware.magic = smu8_magic;
1409 1380
1410 cz_ps = cast_PhwCzPowerState(&(ps->hardware)); 1381 smu8_ps = cast_smu8_power_state(&(ps->hardware));
1411 1382
1412 result = pp_tables_get_entry(hwmgr, entry, ps, 1383 result = pp_tables_get_entry(hwmgr, entry, ps,
1413 cz_dpm_get_pp_table_entry_callback); 1384 smu8_dpm_get_pp_table_entry_callback);
1414 1385
1415 cz_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK; 1386 smu8_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
1416 cz_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK; 1387 smu8_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
1417 1388
1418 return result; 1389 return result;
1419} 1390}
1420 1391
1421static int cz_get_power_state_size(struct pp_hwmgr *hwmgr) 1392static int smu8_get_power_state_size(struct pp_hwmgr *hwmgr)
1422{ 1393{
1423 return sizeof(struct cz_power_state); 1394 return sizeof(struct smu8_power_state);
1424} 1395}
1425 1396
1426static void cz_hw_print_display_cfg( 1397static void smu8_hw_print_display_cfg(
1427 const struct cc6_settings *cc6_settings) 1398 const struct cc6_settings *cc6_settings)
1428{ 1399{
1429 PP_DBG_LOG("New Display Configuration:\n"); 1400 PP_DBG_LOG("New Display Configuration:\n");
@@ -1438,16 +1409,16 @@ static void cz_hw_print_display_cfg(
1438 cc6_settings->cpu_pstate_separation_time); 1409 cc6_settings->cpu_pstate_separation_time);
1439} 1410}
1440 1411
1441 static int cz_set_cpu_power_state(struct pp_hwmgr *hwmgr) 1412 static int smu8_set_cpu_power_state(struct pp_hwmgr *hwmgr)
1442{ 1413{
1443 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); 1414 struct smu8_hwmgr *hw_data = hwmgr->backend;
1444 uint32_t data = 0; 1415 uint32_t data = 0;
1445 1416
1446 if (hw_data->cc6_settings.cc6_setting_changed) { 1417 if (hw_data->cc6_settings.cc6_setting_changed) {
1447 1418
1448 hw_data->cc6_settings.cc6_setting_changed = false; 1419 hw_data->cc6_settings.cc6_setting_changed = false;
1449 1420
1450 cz_hw_print_display_cfg(&hw_data->cc6_settings); 1421 smu8_hw_print_display_cfg(&hw_data->cc6_settings);
1451 1422
1452 data |= (hw_data->cc6_settings.cpu_pstate_separation_time 1423 data |= (hw_data->cc6_settings.cpu_pstate_separation_time
1453 & PWRMGT_SEPARATION_TIME_MASK) 1424 & PWRMGT_SEPARATION_TIME_MASK)
@@ -1471,10 +1442,10 @@ static void cz_hw_print_display_cfg(
1471} 1442}
1472 1443
1473 1444
1474static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time, 1445static int smu8_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
1475 bool cc6_disable, bool pstate_disable, bool pstate_switch_disable) 1446 bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
1476{ 1447{
1477 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); 1448 struct smu8_hwmgr *hw_data = hwmgr->backend;
1478 1449
1479 if (separation_time != 1450 if (separation_time !=
1480 hw_data->cc6_settings.cpu_pstate_separation_time || 1451 hw_data->cc6_settings.cpu_pstate_separation_time ||
@@ -1498,7 +1469,7 @@ static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
1498 return 0; 1469 return 0;
1499} 1470}
1500 1471
1501static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr, 1472static int smu8_get_dal_power_level(struct pp_hwmgr *hwmgr,
1502 struct amd_pp_simple_clock_info *info) 1473 struct amd_pp_simple_clock_info *info)
1503{ 1474{
1504 uint32_t i; 1475 uint32_t i;
@@ -1519,7 +1490,7 @@ static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr,
1519 return -EINVAL; 1490 return -EINVAL;
1520} 1491}
1521 1492
1522static int cz_force_clock_level(struct pp_hwmgr *hwmgr, 1493static int smu8_force_clock_level(struct pp_hwmgr *hwmgr,
1523 enum pp_clock_type type, uint32_t mask) 1494 enum pp_clock_type type, uint32_t mask)
1524{ 1495{
1525 switch (type) { 1496 switch (type) {
@@ -1538,10 +1509,10 @@ static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
1538 return 0; 1509 return 0;
1539} 1510}
1540 1511
1541static int cz_print_clock_levels(struct pp_hwmgr *hwmgr, 1512static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
1542 enum pp_clock_type type, char *buf) 1513 enum pp_clock_type type, char *buf)
1543{ 1514{
1544 struct cz_hwmgr *data = (struct cz_hwmgr *)(hwmgr->backend); 1515 struct smu8_hwmgr *data = hwmgr->backend;
1545 struct phm_clock_voltage_dependency_table *sclk_table = 1516 struct phm_clock_voltage_dependency_table *sclk_table =
1546 hwmgr->dyn_state.vddc_dependency_on_sclk; 1517 hwmgr->dyn_state.vddc_dependency_on_sclk;
1547 int i, now, size = 0; 1518 int i, now, size = 0;
@@ -1566,10 +1537,10 @@ static int cz_print_clock_levels(struct pp_hwmgr *hwmgr,
1566 TARGET_AND_CURRENT_PROFILE_INDEX, 1537 TARGET_AND_CURRENT_PROFILE_INDEX,
1567 CURR_MCLK_INDEX); 1538 CURR_MCLK_INDEX);
1568 1539
1569 for (i = CZ_NUM_NBPMEMORYCLOCK; i > 0; i--) 1540 for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--)
1570 size += sprintf(buf + size, "%d: %uMhz %s\n", 1541 size += sprintf(buf + size, "%d: %uMhz %s\n",
1571 CZ_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100, 1542 SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100,
1572 (CZ_NUM_NBPMEMORYCLOCK-i == now) ? "*" : ""); 1543 (SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : "");
1573 break; 1544 break;
1574 default: 1545 default:
1575 break; 1546 break;
@@ -1577,20 +1548,20 @@ static int cz_print_clock_levels(struct pp_hwmgr *hwmgr,
1577 return size; 1548 return size;
1578} 1549}
1579 1550
1580static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, 1551static int smu8_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
1581 PHM_PerformanceLevelDesignation designation, uint32_t index, 1552 PHM_PerformanceLevelDesignation designation, uint32_t index,
1582 PHM_PerformanceLevel *level) 1553 PHM_PerformanceLevel *level)
1583{ 1554{
1584 const struct cz_power_state *ps; 1555 const struct smu8_power_state *ps;
1585 struct cz_hwmgr *data; 1556 struct smu8_hwmgr *data;
1586 uint32_t level_index; 1557 uint32_t level_index;
1587 uint32_t i; 1558 uint32_t i;
1588 1559
1589 if (level == NULL || hwmgr == NULL || state == NULL) 1560 if (level == NULL || hwmgr == NULL || state == NULL)
1590 return -EINVAL; 1561 return -EINVAL;
1591 1562
1592 data = (struct cz_hwmgr *)(hwmgr->backend); 1563 data = hwmgr->backend;
1593 ps = cast_const_PhwCzPowerState(state); 1564 ps = cast_const_smu8_power_state(state);
1594 1565
1595 level_index = index > ps->level - 1 ? ps->level - 1 : index; 1566 level_index = index > ps->level - 1 ? ps->level - 1 : index;
1596 level->coreClock = ps->levels[level_index].engineClock; 1567 level->coreClock = ps->levels[level_index].engineClock;
@@ -1605,21 +1576,21 @@ static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_p
1605 } 1576 }
1606 1577
1607 if (level_index == 0) 1578 if (level_index == 0)
1608 level->memory_clock = data->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1]; 1579 level->memory_clock = data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1];
1609 else 1580 else
1610 level->memory_clock = data->sys_info.nbp_memory_clock[0]; 1581 level->memory_clock = data->sys_info.nbp_memory_clock[0];
1611 1582
1612 level->vddc = (cz_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4; 1583 level->vddc = (smu8_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4;
1613 level->nonLocalMemoryFreq = 0; 1584 level->nonLocalMemoryFreq = 0;
1614 level->nonLocalMemoryWidth = 0; 1585 level->nonLocalMemoryWidth = 0;
1615 1586
1616 return 0; 1587 return 0;
1617} 1588}
1618 1589
1619static int cz_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, 1590static int smu8_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
1620 const struct pp_hw_power_state *state, struct pp_clock_info *clock_info) 1591 const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
1621{ 1592{
1622 const struct cz_power_state *ps = cast_const_PhwCzPowerState(state); 1593 const struct smu8_power_state *ps = cast_const_smu8_power_state(state);
1623 1594
1624 clock_info->min_eng_clk = ps->levels[0].engineClock / (1 << (ps->levels[0].ssDividerIndex)); 1595 clock_info->min_eng_clk = ps->levels[0].engineClock / (1 << (ps->levels[0].ssDividerIndex));
1625 clock_info->max_eng_clk = ps->levels[ps->level - 1].engineClock / (1 << (ps->levels[ps->level - 1].ssDividerIndex)); 1596 clock_info->max_eng_clk = ps->levels[ps->level - 1].engineClock / (1 << (ps->levels[ps->level - 1].ssDividerIndex));
@@ -1627,14 +1598,14 @@ static int cz_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
1627 return 0; 1598 return 0;
1628} 1599}
1629 1600
1630static int cz_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, 1601static int smu8_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
1631 struct amd_pp_clocks *clocks) 1602 struct amd_pp_clocks *clocks)
1632{ 1603{
1633 struct cz_hwmgr *data = (struct cz_hwmgr *)(hwmgr->backend); 1604 struct smu8_hwmgr *data = hwmgr->backend;
1634 int i; 1605 int i;
1635 struct phm_clock_voltage_dependency_table *table; 1606 struct phm_clock_voltage_dependency_table *table;
1636 1607
1637 clocks->count = cz_get_max_sclk_level(hwmgr); 1608 clocks->count = smu8_get_max_sclk_level(hwmgr);
1638 switch (type) { 1609 switch (type) {
1639 case amd_pp_disp_clock: 1610 case amd_pp_disp_clock:
1640 for (i = 0; i < clocks->count; i++) 1611 for (i = 0; i < clocks->count; i++)
@@ -1646,7 +1617,7 @@ static int cz_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type t
1646 clocks->clock[i] = table->entries[i].clk; 1617 clocks->clock[i] = table->entries[i].clk;
1647 break; 1618 break;
1648 case amd_pp_mem_clock: 1619 case amd_pp_mem_clock:
1649 clocks->count = CZ_NUM_NBPMEMORYCLOCK; 1620 clocks->count = SMU8_NUM_NBPMEMORYCLOCK;
1650 for (i = 0; i < clocks->count; i++) 1621 for (i = 0; i < clocks->count; i++)
1651 clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i]; 1622 clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i];
1652 break; 1623 break;
@@ -1657,7 +1628,7 @@ static int cz_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type t
1657 return 0; 1628 return 0;
1658} 1629}
1659 1630
1660static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks) 1631static int smu8_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
1661{ 1632{
1662 struct phm_clock_voltage_dependency_table *table = 1633 struct phm_clock_voltage_dependency_table *table =
1663 hwmgr->dyn_state.vddc_dependency_on_sclk; 1634 hwmgr->dyn_state.vddc_dependency_on_sclk;
@@ -1668,7 +1639,7 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c
1668 if ((NULL == table) || (table->count <= 0) || (clocks == NULL)) 1639 if ((NULL == table) || (table->count <= 0) || (clocks == NULL))
1669 return -EINVAL; 1640 return -EINVAL;
1670 1641
1671 level = cz_get_max_sclk_level(hwmgr) - 1; 1642 level = smu8_get_max_sclk_level(hwmgr) - 1;
1672 1643
1673 if (level < table->count) 1644 if (level < table->count)
1674 clocks->engine_max_clock = table->entries[level].clk; 1645 clocks->engine_max_clock = table->entries[level].clk;
@@ -1680,7 +1651,7 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c
1680 return 0; 1651 return 0;
1681} 1652}
1682 1653
1683static int cz_thermal_get_temperature(struct pp_hwmgr *hwmgr) 1654static int smu8_thermal_get_temperature(struct pp_hwmgr *hwmgr)
1684{ 1655{
1685 int actual_temp = 0; 1656 int actual_temp = 0;
1686 uint32_t val = cgs_read_ind_register(hwmgr->device, 1657 uint32_t val = cgs_read_ind_register(hwmgr->device,
@@ -1695,10 +1666,10 @@ static int cz_thermal_get_temperature(struct pp_hwmgr *hwmgr)
1695 return actual_temp; 1666 return actual_temp;
1696} 1667}
1697 1668
1698static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, 1669static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1699 void *value, int *size) 1670 void *value, int *size)
1700{ 1671{
1701 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 1672 struct smu8_hwmgr *data = hwmgr->backend;
1702 1673
1703 struct phm_clock_voltage_dependency_table *table = 1674 struct phm_clock_voltage_dependency_table *table =
1704 hwmgr->dyn_state.vddc_dependency_on_sclk; 1675 hwmgr->dyn_state.vddc_dependency_on_sclk;
@@ -1736,18 +1707,18 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1736 case AMDGPU_PP_SENSOR_VDDNB: 1707 case AMDGPU_PP_SENSOR_VDDNB:
1737 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) & 1708 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
1738 CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT; 1709 CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
1739 vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp); 1710 vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp);
1740 *((uint32_t *)value) = vddnb; 1711 *((uint32_t *)value) = vddnb;
1741 return 0; 1712 return 0;
1742 case AMDGPU_PP_SENSOR_VDDGFX: 1713 case AMDGPU_PP_SENSOR_VDDGFX:
1743 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) & 1714 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
1744 CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT; 1715 CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
1745 vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp); 1716 vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
1746 *((uint32_t *)value) = vddgfx; 1717 *((uint32_t *)value) = vddgfx;
1747 return 0; 1718 return 0;
1748 case AMDGPU_PP_SENSOR_UVD_VCLK: 1719 case AMDGPU_PP_SENSOR_UVD_VCLK:
1749 if (!cz_hwmgr->uvd_power_gated) { 1720 if (!data->uvd_power_gated) {
1750 if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) { 1721 if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
1751 return -EINVAL; 1722 return -EINVAL;
1752 } else { 1723 } else {
1753 vclk = uvd_table->entries[uvd_index].vclk; 1724 vclk = uvd_table->entries[uvd_index].vclk;
@@ -1758,8 +1729,8 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1758 *((uint32_t *)value) = 0; 1729 *((uint32_t *)value) = 0;
1759 return 0; 1730 return 0;
1760 case AMDGPU_PP_SENSOR_UVD_DCLK: 1731 case AMDGPU_PP_SENSOR_UVD_DCLK:
1761 if (!cz_hwmgr->uvd_power_gated) { 1732 if (!data->uvd_power_gated) {
1762 if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) { 1733 if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
1763 return -EINVAL; 1734 return -EINVAL;
1764 } else { 1735 } else {
1765 dclk = uvd_table->entries[uvd_index].dclk; 1736 dclk = uvd_table->entries[uvd_index].dclk;
@@ -1770,8 +1741,8 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1770 *((uint32_t *)value) = 0; 1741 *((uint32_t *)value) = 0;
1771 return 0; 1742 return 0;
1772 case AMDGPU_PP_SENSOR_VCE_ECCLK: 1743 case AMDGPU_PP_SENSOR_VCE_ECCLK:
1773 if (!cz_hwmgr->vce_power_gated) { 1744 if (!data->vce_power_gated) {
1774 if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) { 1745 if (vce_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
1775 return -EINVAL; 1746 return -EINVAL;
1776 } else { 1747 } else {
1777 ecclk = vce_table->entries[vce_index].ecclk; 1748 ecclk = vce_table->entries[vce_index].ecclk;
@@ -1792,20 +1763,20 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1792 *((uint32_t *)value) = activity_percent; 1763 *((uint32_t *)value) = activity_percent;
1793 return 0; 1764 return 0;
1794 case AMDGPU_PP_SENSOR_UVD_POWER: 1765 case AMDGPU_PP_SENSOR_UVD_POWER:
1795 *((uint32_t *)value) = cz_hwmgr->uvd_power_gated ? 0 : 1; 1766 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
1796 return 0; 1767 return 0;
1797 case AMDGPU_PP_SENSOR_VCE_POWER: 1768 case AMDGPU_PP_SENSOR_VCE_POWER:
1798 *((uint32_t *)value) = cz_hwmgr->vce_power_gated ? 0 : 1; 1769 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
1799 return 0; 1770 return 0;
1800 case AMDGPU_PP_SENSOR_GPU_TEMP: 1771 case AMDGPU_PP_SENSOR_GPU_TEMP:
1801 *((uint32_t *)value) = cz_thermal_get_temperature(hwmgr); 1772 *((uint32_t *)value) = smu8_thermal_get_temperature(hwmgr);
1802 return 0; 1773 return 0;
1803 default: 1774 default:
1804 return -EINVAL; 1775 return -EINVAL;
1805 } 1776 }
1806} 1777}
1807 1778
1808static int cz_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, 1779static int smu8_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
1809 uint32_t virtual_addr_low, 1780 uint32_t virtual_addr_low,
1810 uint32_t virtual_addr_hi, 1781 uint32_t virtual_addr_hi,
1811 uint32_t mc_addr_low, 1782 uint32_t mc_addr_low,
@@ -1831,56 +1802,190 @@ static int cz_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
1831 return 0; 1802 return 0;
1832} 1803}
1833 1804
1834static int cz_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, 1805static int smu8_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
1835 struct PP_TemperatureRange *thermal_data) 1806 struct PP_TemperatureRange *thermal_data)
1836{ 1807{
1837 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 1808 struct smu8_hwmgr *data = hwmgr->backend;
1838 1809
1839 memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange)); 1810 memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
1840 1811
1841 thermal_data->max = (cz_hwmgr->thermal_auto_throttling_treshold + 1812 thermal_data->max = (data->thermal_auto_throttling_treshold +
1842 cz_hwmgr->sys_info.htc_hyst_lmt) * 1813 data->sys_info.htc_hyst_lmt) *
1843 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 1814 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1844 1815
1845 return 0; 1816 return 0;
1846} 1817}
1847 1818
1848static const struct pp_hwmgr_func cz_hwmgr_funcs = { 1819static int smu8_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
1849 .backend_init = cz_hwmgr_backend_init, 1820{
1850 .backend_fini = cz_hwmgr_backend_fini, 1821 struct smu8_hwmgr *data = hwmgr->backend;
1851 .apply_state_adjust_rules = cz_apply_state_adjust_rules, 1822 uint32_t dpm_features = 0;
1852 .force_dpm_level = cz_dpm_force_dpm_level, 1823
1853 .get_power_state_size = cz_get_power_state_size, 1824 if (enable &&
1854 .powerdown_uvd = cz_dpm_powerdown_uvd, 1825 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1855 .powergate_uvd = cz_dpm_powergate_uvd, 1826 PHM_PlatformCaps_UVDDPM)) {
1856 .powergate_vce = cz_dpm_powergate_vce, 1827 data->dpm_flags |= DPMFlags_UVD_Enabled;
1857 .get_mclk = cz_dpm_get_mclk, 1828 dpm_features |= UVD_DPM_MASK;
1858 .get_sclk = cz_dpm_get_sclk, 1829 smum_send_msg_to_smc_with_parameter(hwmgr,
1859 .patch_boot_state = cz_dpm_patch_boot_state, 1830 PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
1860 .get_pp_table_entry = cz_dpm_get_pp_table_entry, 1831 } else {
1861 .get_num_of_pp_table_entries = cz_dpm_get_num_of_pp_table_entries, 1832 dpm_features |= UVD_DPM_MASK;
1862 .set_cpu_power_state = cz_set_cpu_power_state, 1833 data->dpm_flags &= ~DPMFlags_UVD_Enabled;
1863 .store_cc6_data = cz_store_cc6_data, 1834 smum_send_msg_to_smc_with_parameter(hwmgr,
1864 .force_clock_level = cz_force_clock_level, 1835 PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
1865 .print_clock_levels = cz_print_clock_levels, 1836 }
1866 .get_dal_power_level = cz_get_dal_power_level, 1837 return 0;
1867 .get_performance_level = cz_get_performance_level, 1838}
1868 .get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks, 1839
1869 .get_clock_by_type = cz_get_clock_by_type, 1840int smu8_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
1870 .get_max_high_clocks = cz_get_max_high_clocks, 1841{
1871 .read_sensor = cz_read_sensor, 1842 struct smu8_hwmgr *data = hwmgr->backend;
1872 .power_off_asic = cz_power_off_asic, 1843 struct phm_uvd_clock_voltage_dependency_table *ptable =
1873 .asic_setup = cz_setup_asic_task, 1844 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1874 .dynamic_state_management_enable = cz_enable_dpm_tasks, 1845
1875 .power_state_set = cz_set_power_state_tasks, 1846 if (!bgate) {
1876 .dynamic_state_management_disable = cz_disable_dpm_tasks, 1847 /* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
1877 .notify_cac_buffer_info = cz_notify_cac_buffer_info, 1848 if (PP_CAP(PHM_PlatformCaps_StablePState) ||
1878 .get_thermal_temperature_range = cz_get_thermal_temperature_range, 1849 hwmgr->en_umd_pstate) {
1850 data->uvd_dpm.hard_min_clk =
1851 ptable->entries[ptable->count - 1].vclk;
1852
1853 smum_send_msg_to_smc_with_parameter(hwmgr,
1854 PPSMC_MSG_SetUvdHardMin,
1855 smu8_get_uvd_level(hwmgr,
1856 data->uvd_dpm.hard_min_clk,
1857 PPSMC_MSG_SetUvdHardMin));
1858
1859 smu8_enable_disable_uvd_dpm(hwmgr, true);
1860 } else {
1861 smu8_enable_disable_uvd_dpm(hwmgr, true);
1862 }
1863 } else {
1864 smu8_enable_disable_uvd_dpm(hwmgr, false);
1865 }
1866
1867 return 0;
1868}
1869
1870static int smu8_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
1871{
1872 struct smu8_hwmgr *data = hwmgr->backend;
1873 uint32_t dpm_features = 0;
1874
1875 if (enable && phm_cap_enabled(
1876 hwmgr->platform_descriptor.platformCaps,
1877 PHM_PlatformCaps_VCEDPM)) {
1878 data->dpm_flags |= DPMFlags_VCE_Enabled;
1879 dpm_features |= VCE_DPM_MASK;
1880 smum_send_msg_to_smc_with_parameter(hwmgr,
1881 PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
1882 } else {
1883 dpm_features |= VCE_DPM_MASK;
1884 data->dpm_flags &= ~DPMFlags_VCE_Enabled;
1885 smum_send_msg_to_smc_with_parameter(hwmgr,
1886 PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
1887 }
1888
1889 return 0;
1890}
1891
1892
1893static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
1894{
1895 struct smu8_hwmgr *data = hwmgr->backend;
1896
1897 data->uvd_power_gated = bgate;
1898
1899 if (bgate) {
1900 cgs_set_powergating_state(hwmgr->device,
1901 AMD_IP_BLOCK_TYPE_UVD,
1902 AMD_PG_STATE_GATE);
1903 cgs_set_clockgating_state(hwmgr->device,
1904 AMD_IP_BLOCK_TYPE_UVD,
1905 AMD_CG_STATE_GATE);
1906 smu8_dpm_update_uvd_dpm(hwmgr, true);
1907 smu8_dpm_powerdown_uvd(hwmgr);
1908 } else {
1909 smu8_dpm_powerup_uvd(hwmgr);
1910 cgs_set_clockgating_state(hwmgr->device,
1911 AMD_IP_BLOCK_TYPE_UVD,
1912 AMD_CG_STATE_UNGATE);
1913 cgs_set_powergating_state(hwmgr->device,
1914 AMD_IP_BLOCK_TYPE_UVD,
1915 AMD_PG_STATE_UNGATE);
1916 smu8_dpm_update_uvd_dpm(hwmgr, false);
1917 }
1918
1919}
1920
1921static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
1922{
1923 struct smu8_hwmgr *data = hwmgr->backend;
1924
1925 if (bgate) {
1926 cgs_set_powergating_state(
1927 hwmgr->device,
1928 AMD_IP_BLOCK_TYPE_VCE,
1929 AMD_PG_STATE_GATE);
1930 cgs_set_clockgating_state(
1931 hwmgr->device,
1932 AMD_IP_BLOCK_TYPE_VCE,
1933 AMD_CG_STATE_GATE);
1934 smu8_enable_disable_vce_dpm(hwmgr, false);
1935 smu8_dpm_powerdown_vce(hwmgr);
1936 data->vce_power_gated = true;
1937 } else {
1938 smu8_dpm_powerup_vce(hwmgr);
1939 data->vce_power_gated = false;
1940 cgs_set_clockgating_state(
1941 hwmgr->device,
1942 AMD_IP_BLOCK_TYPE_VCE,
1943 AMD_CG_STATE_UNGATE);
1944 cgs_set_powergating_state(
1945 hwmgr->device,
1946 AMD_IP_BLOCK_TYPE_VCE,
1947 AMD_PG_STATE_UNGATE);
1948 smu8_dpm_update_vce_dpm(hwmgr);
1949 smu8_enable_disable_vce_dpm(hwmgr, true);
1950 }
1951}
1952
1953static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
1954 .backend_init = smu8_hwmgr_backend_init,
1955 .backend_fini = smu8_hwmgr_backend_fini,
1956 .apply_state_adjust_rules = smu8_apply_state_adjust_rules,
1957 .force_dpm_level = smu8_dpm_force_dpm_level,
1958 .get_power_state_size = smu8_get_power_state_size,
1959 .powerdown_uvd = smu8_dpm_powerdown_uvd,
1960 .powergate_uvd = smu8_dpm_powergate_uvd,
1961 .powergate_vce = smu8_dpm_powergate_vce,
1962 .get_mclk = smu8_dpm_get_mclk,
1963 .get_sclk = smu8_dpm_get_sclk,
1964 .patch_boot_state = smu8_dpm_patch_boot_state,
1965 .get_pp_table_entry = smu8_dpm_get_pp_table_entry,
1966 .get_num_of_pp_table_entries = smu8_dpm_get_num_of_pp_table_entries,
1967 .set_cpu_power_state = smu8_set_cpu_power_state,
1968 .store_cc6_data = smu8_store_cc6_data,
1969 .force_clock_level = smu8_force_clock_level,
1970 .print_clock_levels = smu8_print_clock_levels,
1971 .get_dal_power_level = smu8_get_dal_power_level,
1972 .get_performance_level = smu8_get_performance_level,
1973 .get_current_shallow_sleep_clocks = smu8_get_current_shallow_sleep_clocks,
1974 .get_clock_by_type = smu8_get_clock_by_type,
1975 .get_max_high_clocks = smu8_get_max_high_clocks,
1976 .read_sensor = smu8_read_sensor,
1977 .power_off_asic = smu8_power_off_asic,
1978 .asic_setup = smu8_setup_asic_task,
1979 .dynamic_state_management_enable = smu8_enable_dpm_tasks,
1980 .power_state_set = smu8_set_power_state_tasks,
1981 .dynamic_state_management_disable = smu8_disable_dpm_tasks,
1982 .notify_cac_buffer_info = smu8_notify_cac_buffer_info,
1983 .get_thermal_temperature_range = smu8_get_thermal_temperature_range,
1879}; 1984};
1880 1985
1881int cz_init_function_pointers(struct pp_hwmgr *hwmgr) 1986int smu8_init_function_pointers(struct pp_hwmgr *hwmgr)
1882{ 1987{
1883 hwmgr->hwmgr_func = &cz_hwmgr_funcs; 1988 hwmgr->hwmgr_func = &smu8_hwmgr_funcs;
1884 hwmgr->pptable_func = &pptable_funcs; 1989 hwmgr->pptable_func = &pptable_funcs;
1885 return 0; 1990 return 0;
1886} 1991}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.h
index b56720a3fc88..05a06083e1b8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.h
@@ -21,18 +21,18 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef _CZ_HWMGR_H_ 24#ifndef _SMU8_HWMGR_H_
25#define _CZ_HWMGR_H_ 25#define _SMU8_HWMGR_H_
26 26
27#include "cgs_common.h" 27#include "cgs_common.h"
28#include "ppatomctrl.h" 28#include "ppatomctrl.h"
29 29
30#define CZ_NUM_NBPSTATES 4 30#define SMU8_NUM_NBPSTATES 4
31#define CZ_NUM_NBPMEMORYCLOCK 2 31#define SMU8_NUM_NBPMEMORYCLOCK 2
32#define MAX_DISPLAY_CLOCK_LEVEL 8 32#define MAX_DISPLAY_CLOCK_LEVEL 8
33#define CZ_MAX_HARDWARE_POWERLEVELS 8 33#define SMU8_MAX_HARDWARE_POWERLEVELS 8
34#define PPCZ_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 34#define SMU8_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
35#define CZ_MIN_DEEP_SLEEP_SCLK 800 35#define SMU8_MIN_DEEP_SLEEP_SCLK 800
36 36
37/* Carrizo device IDs */ 37/* Carrizo device IDs */
38#define DEVICE_ID_CZ_9870 0x9870 38#define DEVICE_ID_CZ_9870 0x9870
@@ -41,24 +41,21 @@
41#define DEVICE_ID_CZ_9876 0x9876 41#define DEVICE_ID_CZ_9876 0x9876
42#define DEVICE_ID_CZ_9877 0x9877 42#define DEVICE_ID_CZ_9877 0x9877
43 43
44#define PHMCZ_WRITE_SMC_REGISTER(device, reg, value) \ 44struct smu8_dpm_entry {
45 cgs_write_ind_register(device, CGS_IND_REG__SMC, ix##reg, value)
46
47struct cz_dpm_entry {
48 uint32_t soft_min_clk; 45 uint32_t soft_min_clk;
49 uint32_t hard_min_clk; 46 uint32_t hard_min_clk;
50 uint32_t soft_max_clk; 47 uint32_t soft_max_clk;
51 uint32_t hard_max_clk; 48 uint32_t hard_max_clk;
52}; 49};
53 50
54struct cz_sys_info { 51struct smu8_sys_info {
55 uint32_t bootup_uma_clock; 52 uint32_t bootup_uma_clock;
56 uint32_t bootup_engine_clock; 53 uint32_t bootup_engine_clock;
57 uint32_t dentist_vco_freq; 54 uint32_t dentist_vco_freq;
58 uint32_t nb_dpm_enable; 55 uint32_t nb_dpm_enable;
59 uint32_t nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK]; 56 uint32_t nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK];
60 uint32_t nbp_n_clock[CZ_NUM_NBPSTATES]; 57 uint32_t nbp_n_clock[SMU8_NUM_NBPSTATES];
61 uint16_t nbp_voltage_index[CZ_NUM_NBPSTATES]; 58 uint16_t nbp_voltage_index[SMU8_NUM_NBPSTATES];
62 uint32_t display_clock[MAX_DISPLAY_CLOCK_LEVEL]; 59 uint32_t display_clock[MAX_DISPLAY_CLOCK_LEVEL];
63 uint16_t bootup_nb_voltage_index; 60 uint16_t bootup_nb_voltage_index;
64 uint8_t htc_tmp_lmt; 61 uint8_t htc_tmp_lmt;
@@ -85,21 +82,21 @@ struct cz_sys_info {
85 ((tx) ? DISPLAYPHY_TX_SELECT : 0) | \ 82 ((tx) ? DISPLAYPHY_TX_SELECT : 0) | \
86 ((core) ? DISPLAYPHY_CORE_SELECT : 0)) 83 ((core) ? DISPLAYPHY_CORE_SELECT : 0))
87 84
88struct cz_display_phy_info_entry { 85struct smu8_display_phy_info_entry {
89 uint8_t phy_present; 86 uint8_t phy_present;
90 uint8_t active_lane_mapping; 87 uint8_t active_lane_mapping;
91 uint8_t display_config_type; 88 uint8_t display_config_type;
92 uint8_t active_number_of_lanes; 89 uint8_t active_number_of_lanes;
93}; 90};
94 91
95#define CZ_MAX_DISPLAYPHY_IDS 10 92#define SMU8_MAX_DISPLAYPHY_IDS 10
96 93
97struct cz_display_phy_info { 94struct smu8_display_phy_info {
98 bool display_phy_access_initialized; 95 bool display_phy_access_initialized;
99 struct cz_display_phy_info_entry entries[CZ_MAX_DISPLAYPHY_IDS]; 96 struct smu8_display_phy_info_entry entries[SMU8_MAX_DISPLAYPHY_IDS];
100}; 97};
101 98
102struct cz_power_level { 99struct smu8_power_level {
103 uint32_t engineClock; 100 uint32_t engineClock;
104 uint8_t vddcIndex; 101 uint8_t vddcIndex;
105 uint8_t dsDividerIndex; 102 uint8_t dsDividerIndex;
@@ -113,7 +110,7 @@ struct cz_power_level {
113 uint8_t rsv[3]; 110 uint8_t rsv[3];
114}; 111};
115 112
116struct cz_uvd_clocks { 113struct smu8_uvd_clocks {
117 uint32_t vclk; 114 uint32_t vclk;
118 uint32_t dclk; 115 uint32_t dclk;
119 uint32_t vclk_low_divider; 116 uint32_t vclk_low_divider;
@@ -122,7 +119,7 @@ struct cz_uvd_clocks {
122 uint32_t dclk_high_divider; 119 uint32_t dclk_high_divider;
123}; 120};
124 121
125enum cz_pstate_previous_action { 122enum smu8_pstate_previous_action {
126 DO_NOTHING = 1, 123 DO_NOTHING = 1,
127 FORCE_HIGH, 124 FORCE_HIGH,
128 CANCEL_FORCE_HIGH 125 CANCEL_FORCE_HIGH
@@ -143,10 +140,10 @@ struct pp_disable_nb_ps_flags {
143 }; 140 };
144}; 141};
145 142
146struct cz_power_state { 143struct smu8_power_state {
147 unsigned int magic; 144 unsigned int magic;
148 uint32_t level; 145 uint32_t level;
149 struct cz_uvd_clocks uvd_clocks; 146 struct smu8_uvd_clocks uvd_clocks;
150 uint32_t evclk; 147 uint32_t evclk;
151 uint32_t ecclk; 148 uint32_t ecclk;
152 uint32_t samclk; 149 uint32_t samclk;
@@ -158,8 +155,8 @@ struct cz_power_state {
158 uint8_t dpm_0_pg_nb_ps_high; 155 uint8_t dpm_0_pg_nb_ps_high;
159 uint8_t dpm_x_nb_ps_low; 156 uint8_t dpm_x_nb_ps_low;
160 uint8_t dpm_x_nb_ps_high; 157 uint8_t dpm_x_nb_ps_high;
161 enum cz_pstate_previous_action action; 158 enum smu8_pstate_previous_action action;
162 struct cz_power_level levels[CZ_MAX_HARDWARE_POWERLEVELS]; 159 struct smu8_power_level levels[SMU8_MAX_HARDWARE_POWERLEVELS];
163 struct pp_disable_nb_ps_flags disable_nb_ps_flag; 160 struct pp_disable_nb_ps_flags disable_nb_ps_flag;
164}; 161};
165 162
@@ -182,7 +179,7 @@ struct cc6_settings {
182 uint32_t cpu_pstate_separation_time; 179 uint32_t cpu_pstate_separation_time;
183}; 180};
184 181
185struct cz_hwmgr { 182struct smu8_hwmgr {
186 uint32_t dpm_interval; 183 uint32_t dpm_interval;
187 184
188 uint32_t voltage_drop_threshold; 185 uint32_t voltage_drop_threshold;
@@ -202,11 +199,11 @@ struct cz_hwmgr {
202 199
203 uint32_t thermal_auto_throttling_treshold; 200 uint32_t thermal_auto_throttling_treshold;
204 201
205 struct cz_sys_info sys_info; 202 struct smu8_sys_info sys_info;
206 203
207 struct cz_power_level boot_power_level; 204 struct smu8_power_level boot_power_level;
208 struct cz_power_state *cz_current_ps; 205 struct smu8_power_state *smu8_current_ps;
209 struct cz_power_state *cz_requested_ps; 206 struct smu8_power_state *smu8_requested_ps;
210 207
211 uint32_t mgcg_cgtt_local0; 208 uint32_t mgcg_cgtt_local0;
212 uint32_t mgcg_cgtt_local1; 209 uint32_t mgcg_cgtt_local1;
@@ -219,7 +216,7 @@ struct cz_hwmgr {
219 216
220 uint32_t lock_nb_ps_in_uvd_play_back; 217 uint32_t lock_nb_ps_in_uvd_play_back;
221 218
222 struct cz_display_phy_info display_phy_info; 219 struct smu8_display_phy_info display_phy_info;
223 uint32_t vce_slow_sclk_threshold; /* default 200mhz */ 220 uint32_t vce_slow_sclk_threshold; /* default 200mhz */
224 uint32_t dce_slow_sclk_threshold; /* default 300mhz */ 221 uint32_t dce_slow_sclk_threshold; /* default 300mhz */
225 uint32_t min_sclk_did; /* minimum sclk divider */ 222 uint32_t min_sclk_did; /* minimum sclk divider */
@@ -270,10 +267,10 @@ struct cz_hwmgr {
270 uint32_t fps_low_threshold; 267 uint32_t fps_low_threshold;
271 268
272 uint32_t dpm_flags; 269 uint32_t dpm_flags;
273 struct cz_dpm_entry sclk_dpm; 270 struct smu8_dpm_entry sclk_dpm;
274 struct cz_dpm_entry uvd_dpm; 271 struct smu8_dpm_entry uvd_dpm;
275 struct cz_dpm_entry vce_dpm; 272 struct smu8_dpm_entry vce_dpm;
276 struct cz_dpm_entry acp_dpm; 273 struct smu8_dpm_entry acp_dpm;
277 274
278 uint8_t uvd_boot_level; 275 uint8_t uvd_boot_level;
279 uint8_t vce_boot_level; 276 uint8_t vce_boot_level;
@@ -311,12 +308,4 @@ struct cz_hwmgr {
311 uint32_t num_of_clk_entries; 308 uint32_t num_of_clk_entries;
312}; 309};
313 310
314struct pp_hwmgr; 311#endif /* _SMU8_HWMGR_H_ */
315
316int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr);
317int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr);
318int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr);
319int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr);
320int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
321int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr);
322#endif /* _CZ_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
new file mode 100644
index 000000000000..e11daf5cbf80
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -0,0 +1,536 @@
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "hwmgr.h"
24#include "pp_debug.h"
25#include "ppatomctrl.h"
26#include "ppsmc.h"
27
28uint8_t convert_to_vid(uint16_t vddc)
29{
30 return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
31}
32
33uint16_t convert_to_vddc(uint8_t vid)
34{
35 return (uint16_t) ((6200 - (vid * 25)) / VOLTAGE_SCALE);
36}
37
38uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size)
39{
40 u32 mask = 0;
41 u32 shift = 0;
42
43 shift = (offset % 4) << 3;
44 if (size == sizeof(uint8_t))
45 mask = 0xFF << shift;
46 else if (size == sizeof(uint16_t))
47 mask = 0xFFFF << shift;
48
49 original_data &= ~mask;
50 original_data |= (field << shift);
51 return original_data;
52}
53
54/**
55 * Returns once the part of the register indicated by the mask has
56 * reached the given value.
57 */
58int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
59 uint32_t value, uint32_t mask)
60{
61 uint32_t i;
62 uint32_t cur_value;
63
64 if (hwmgr == NULL || hwmgr->device == NULL) {
65 pr_err("Invalid Hardware Manager!");
66 return -EINVAL;
67 }
68
69 for (i = 0; i < hwmgr->usec_timeout; i++) {
70 cur_value = cgs_read_register(hwmgr->device, index);
71 if ((cur_value & mask) == (value & mask))
72 break;
73 udelay(1);
74 }
75
76 /* timeout means wrong logic*/
77 if (i == hwmgr->usec_timeout)
78 return -1;
79 return 0;
80}
81
82
83/**
84 * Returns once the part of the register indicated by the mask has
85 * reached the given value.The indirect space is described by giving
86 * the memory-mapped index of the indirect index register.
87 */
88int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
89 uint32_t indirect_port,
90 uint32_t index,
91 uint32_t value,
92 uint32_t mask)
93{
94 if (hwmgr == NULL || hwmgr->device == NULL) {
95 pr_err("Invalid Hardware Manager!");
96 return -EINVAL;
97 }
98
99 cgs_write_register(hwmgr->device, indirect_port, index);
100 return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
101}
102
103int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
104 uint32_t index,
105 uint32_t value, uint32_t mask)
106{
107 uint32_t i;
108 uint32_t cur_value;
109
110 if (hwmgr == NULL || hwmgr->device == NULL)
111 return -EINVAL;
112
113 for (i = 0; i < hwmgr->usec_timeout; i++) {
114 cur_value = cgs_read_register(hwmgr->device,
115 index);
116 if ((cur_value & mask) != (value & mask))
117 break;
118 udelay(1);
119 }
120
121 /* timeout means wrong logic */
122 if (i == hwmgr->usec_timeout)
123 return -ETIME;
124 return 0;
125}
126
127int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
128 uint32_t indirect_port,
129 uint32_t index,
130 uint32_t value,
131 uint32_t mask)
132{
133 if (hwmgr == NULL || hwmgr->device == NULL)
134 return -EINVAL;
135
136 cgs_write_register(hwmgr->device, indirect_port, index);
137 return phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
138 value, mask);
139}
140
141bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
142{
143 return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating);
144}
145
146bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr)
147{
148 return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating);
149}
150
151
152int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table)
153{
154 uint32_t i, j;
155 uint16_t vvalue;
156 bool found = false;
157 struct pp_atomctrl_voltage_table *table;
158
159 PP_ASSERT_WITH_CODE((NULL != vol_table),
160 "Voltage Table empty.", return -EINVAL);
161
162 table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
163 GFP_KERNEL);
164
165 if (NULL == table)
166 return -EINVAL;
167
168 table->mask_low = vol_table->mask_low;
169 table->phase_delay = vol_table->phase_delay;
170
171 for (i = 0; i < vol_table->count; i++) {
172 vvalue = vol_table->entries[i].value;
173 found = false;
174
175 for (j = 0; j < table->count; j++) {
176 if (vvalue == table->entries[j].value) {
177 found = true;
178 break;
179 }
180 }
181
182 if (!found) {
183 table->entries[table->count].value = vvalue;
184 table->entries[table->count].smio_low =
185 vol_table->entries[i].smio_low;
186 table->count++;
187 }
188 }
189
190 memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
191 kfree(table);
192 table = NULL;
193 return 0;
194}
195
196int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
197 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
198{
199 uint32_t i;
200 int result;
201
202 PP_ASSERT_WITH_CODE((0 != dep_table->count),
203 "Voltage Dependency Table empty.", return -EINVAL);
204
205 PP_ASSERT_WITH_CODE((NULL != vol_table),
206 "vol_table empty.", return -EINVAL);
207
208 vol_table->mask_low = 0;
209 vol_table->phase_delay = 0;
210 vol_table->count = dep_table->count;
211
212 for (i = 0; i < dep_table->count; i++) {
213 vol_table->entries[i].value = dep_table->entries[i].mvdd;
214 vol_table->entries[i].smio_low = 0;
215 }
216
217 result = phm_trim_voltage_table(vol_table);
218 PP_ASSERT_WITH_CODE((0 == result),
219 "Failed to trim MVDD table.", return result);
220
221 return 0;
222}
223
224int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
225 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
226{
227 uint32_t i;
228 int result;
229
230 PP_ASSERT_WITH_CODE((0 != dep_table->count),
231 "Voltage Dependency Table empty.", return -EINVAL);
232
233 PP_ASSERT_WITH_CODE((NULL != vol_table),
234 "vol_table empty.", return -EINVAL);
235
236 vol_table->mask_low = 0;
237 vol_table->phase_delay = 0;
238 vol_table->count = dep_table->count;
239
240 for (i = 0; i < dep_table->count; i++) {
241 vol_table->entries[i].value = dep_table->entries[i].vddci;
242 vol_table->entries[i].smio_low = 0;
243 }
244
245 result = phm_trim_voltage_table(vol_table);
246 PP_ASSERT_WITH_CODE((0 == result),
247 "Failed to trim VDDCI table.", return result);
248
249 return 0;
250}
251
252int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
253 phm_ppt_v1_voltage_lookup_table *lookup_table)
254{
255 int i = 0;
256
257 PP_ASSERT_WITH_CODE((0 != lookup_table->count),
258 "Voltage Lookup Table empty.", return -EINVAL);
259
260 PP_ASSERT_WITH_CODE((NULL != vol_table),
261 "vol_table empty.", return -EINVAL);
262
263 vol_table->mask_low = 0;
264 vol_table->phase_delay = 0;
265
266 vol_table->count = lookup_table->count;
267
268 for (i = 0; i < vol_table->count; i++) {
269 vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
270 vol_table->entries[i].smio_low = 0;
271 }
272
273 return 0;
274}
275
276void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps,
277 struct pp_atomctrl_voltage_table *vol_table)
278{
279 unsigned int i, diff;
280
281 if (vol_table->count <= max_vol_steps)
282 return;
283
284 diff = vol_table->count - max_vol_steps;
285
286 for (i = 0; i < max_vol_steps; i++)
287 vol_table->entries[i] = vol_table->entries[i + diff];
288
289 vol_table->count = max_vol_steps;
290
291 return;
292}
293
294int phm_reset_single_dpm_table(void *table,
295 uint32_t count, int max)
296{
297 int i;
298
299 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
300
301 dpm_table->count = count > max ? max : count;
302
303 for (i = 0; i < dpm_table->count; i++)
304 dpm_table->dpm_level[i].enabled = false;
305
306 return 0;
307}
308
309void phm_setup_pcie_table_entry(
310 void *table,
311 uint32_t index, uint32_t pcie_gen,
312 uint32_t pcie_lanes)
313{
314 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
315 dpm_table->dpm_level[index].value = pcie_gen;
316 dpm_table->dpm_level[index].param1 = pcie_lanes;
317 dpm_table->dpm_level[index].enabled = 1;
318}
319
320int32_t phm_get_dpm_level_enable_mask_value(void *table)
321{
322 int32_t i;
323 int32_t mask = 0;
324 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
325
326 for (i = dpm_table->count; i > 0; i--) {
327 mask = mask << 1;
328 if (dpm_table->dpm_level[i - 1].enabled)
329 mask |= 0x1;
330 else
331 mask &= 0xFFFFFFFE;
332 }
333
334 return mask;
335}
336
337uint8_t phm_get_voltage_index(
338 struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
339{
340 uint8_t count = (uint8_t) (lookup_table->count);
341 uint8_t i;
342
343 PP_ASSERT_WITH_CODE((NULL != lookup_table),
344 "Lookup Table empty.", return 0);
345 PP_ASSERT_WITH_CODE((0 != count),
346 "Lookup Table empty.", return 0);
347
348 for (i = 0; i < lookup_table->count; i++) {
349 /* find first voltage equal or bigger than requested */
350 if (lookup_table->entries[i].us_vdd >= voltage)
351 return i;
352 }
353 /* voltage is bigger than max voltage in the table */
354 return i - 1;
355}
356
357uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
358 uint32_t voltage)
359{
360 uint8_t count = (uint8_t) (voltage_table->count);
361 uint8_t i = 0;
362
363 PP_ASSERT_WITH_CODE((NULL != voltage_table),
364 "Voltage Table empty.", return 0;);
365 PP_ASSERT_WITH_CODE((0 != count),
366 "Voltage Table empty.", return 0;);
367
368 for (i = 0; i < count; i++) {
369 /* find first voltage bigger than requested */
370 if (voltage_table->entries[i].value >= voltage)
371 return i;
372 }
373
374 /* voltage is bigger than max voltage in the table */
375 return i - 1;
376}
377
378uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
379{
380 uint32_t i;
381
382 for (i = 0; i < vddci_table->count; i++) {
383 if (vddci_table->entries[i].value >= vddci)
384 return vddci_table->entries[i].value;
385 }
386
387 pr_debug("vddci is larger than max value in vddci_table\n");
388 return vddci_table->entries[i-1].value;
389}
390
391int phm_find_boot_level(void *table,
392 uint32_t value, uint32_t *boot_level)
393{
394 int result = -EINVAL;
395 uint32_t i;
396 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
397
398 for (i = 0; i < dpm_table->count; i++) {
399 if (value == dpm_table->dpm_level[i].value) {
400 *boot_level = i;
401 result = 0;
402 }
403 }
404
405 return result;
406}
407
408int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
409 phm_ppt_v1_voltage_lookup_table *lookup_table,
410 uint16_t virtual_voltage_id, int32_t *sclk)
411{
412 uint8_t entry_id;
413 uint8_t voltage_id;
414 struct phm_ppt_v1_information *table_info =
415 (struct phm_ppt_v1_information *)(hwmgr->pptable);
416
417 PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
418
419 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
420 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
421 voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd;
422 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
423 break;
424 }
425
426 if (entry_id >= table_info->vdd_dep_on_sclk->count) {
427 pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n");
428 return -EINVAL;
429 }
430
431 *sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk;
432
433 return 0;
434}
435
436/**
437 * Initialize Dynamic State Adjustment Rule Settings
438 *
439 * @param hwmgr the address of the powerplay hardware manager.
440 */
441int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
442{
443 uint32_t table_size;
444 struct phm_clock_voltage_dependency_table *table_clk_vlt;
445 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
446
447 /* initialize vddc_dep_on_dal_pwrl table */
448 table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
449 table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
450
451 if (NULL == table_clk_vlt) {
452 pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
453 return -ENOMEM;
454 } else {
455 table_clk_vlt->count = 4;
456 table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
457 table_clk_vlt->entries[0].v = 0;
458 table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
459 table_clk_vlt->entries[1].v = 720;
460 table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
461 table_clk_vlt->entries[2].v = 810;
462 table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
463 table_clk_vlt->entries[3].v = 900;
464 if (pptable_info != NULL)
465 pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
466 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
467 }
468
469 return 0;
470}
471
472uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
473{
474 uint32_t level = 0;
475
476 while (0 == (mask & (1 << level)))
477 level++;
478
479 return level;
480}
481
482void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
483{
484 struct phm_ppt_v1_information *table_info =
485 (struct phm_ppt_v1_information *)hwmgr->pptable;
486 struct phm_clock_voltage_dependency_table *table =
487 table_info->vddc_dep_on_dal_pwrl;
488 struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
489 enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
490 uint32_t req_vddc = 0, req_volt, i;
491
492 if (!table || table->count <= 0
493 || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
494 || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
495 return;
496
497 for (i = 0; i < table->count; i++) {
498 if (dal_power_level == table->entries[i].clk) {
499 req_vddc = table->entries[i].v;
500 break;
501 }
502 }
503
504 vddc_table = table_info->vdd_dep_on_sclk;
505 for (i = 0; i < vddc_table->count; i++) {
506 if (req_vddc <= vddc_table->entries[i].vddc) {
507 req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
508 smum_send_msg_to_smc_with_parameter(hwmgr,
509 PPSMC_MSG_VddC_Request, req_volt);
510 return;
511 }
512 }
513 pr_err("DAL requested level can not"
514 " found a available voltage in VDDC DPM Table \n");
515}
516
517int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
518 uint32_t sclk, uint16_t id, uint16_t *voltage)
519{
520 uint32_t vol;
521 int ret = 0;
522
523 if (hwmgr->chip_id < CHIP_TONGA) {
524 ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
525 } else if (hwmgr->chip_id < CHIP_POLARIS10) {
526 ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
527 if (*voltage >= 2000 || *voltage == 0)
528 *voltage = 1150;
529 } else {
530 ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
531 *voltage = (uint16_t)(vol/100);
532 }
533 return ret;
534}
535
536
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
new file mode 100644
index 000000000000..a1a491300348
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
@@ -0,0 +1,180 @@
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef _SMU_HELPER_H_
24#define _SMU_HELPER_H_
25
26struct pp_atomctrl_voltage_table;
27struct pp_hwmgr;
28struct phm_ppt_v1_voltage_lookup_table;
29
30extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
31 uint32_t index,
32 uint32_t value, uint32_t mask);
33extern int phm_wait_for_indirect_register_unequal(
34 struct pp_hwmgr *hwmgr,
35 uint32_t indirect_port, uint32_t index,
36 uint32_t value, uint32_t mask);
37
38
39extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr);
40extern bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr);
41extern bool phm_cf_want_microcode_fan_ctrl(struct pp_hwmgr *hwmgr);
42
43extern int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table);
44extern int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_clock_voltage_dependency_table *dep_table);
45extern int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_clock_voltage_dependency_table *dep_table);
46extern int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_voltage_lookup_table *lookup_table);
47extern void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps, struct pp_atomctrl_voltage_table *vol_table);
48extern int phm_reset_single_dpm_table(void *table, uint32_t count, int max);
49extern void phm_setup_pcie_table_entry(void *table, uint32_t index, uint32_t pcie_gen, uint32_t pcie_lanes);
50extern int32_t phm_get_dpm_level_enable_mask_value(void *table);
51extern uint8_t phm_get_voltage_id(struct pp_atomctrl_voltage_table *voltage_table,
52 uint32_t voltage);
53extern uint8_t phm_get_voltage_index(struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage);
54extern uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci);
55extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level);
56extern int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_voltage_lookup_table *lookup_table,
57 uint16_t virtual_voltage_id, int32_t *sclk);
58extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
59extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
60extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
61
62extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
63 uint32_t sclk, uint16_t id, uint16_t *voltage);
64
65extern uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size);
66
67extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
68 uint32_t value, uint32_t mask);
69
70extern int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
71 uint32_t indirect_port,
72 uint32_t index,
73 uint32_t value,
74 uint32_t mask);
75
76#define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
77#define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK
78
79#define PHM_SET_FIELD(origval, reg, field, fieldval) \
80 (((origval) & ~PHM_FIELD_MASK(reg, field)) | \
81 (PHM_FIELD_MASK(reg, field) & ((fieldval) << PHM_FIELD_SHIFT(reg, field))))
82
83#define PHM_GET_FIELD(value, reg, field) \
84 (((value) & PHM_FIELD_MASK(reg, field)) >> \
85 PHM_FIELD_SHIFT(reg, field))
86
87
88/* Operations on named fields. */
89
90#define PHM_READ_FIELD(device, reg, field) \
91 PHM_GET_FIELD(cgs_read_register(device, mm##reg), reg, field)
92
93#define PHM_READ_INDIRECT_FIELD(device, port, reg, field) \
94 PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
95 reg, field)
96
97#define PHM_READ_VFPF_INDIRECT_FIELD(device, port, reg, field) \
98 PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
99 reg, field)
100
101#define PHM_WRITE_FIELD(device, reg, field, fieldval) \
102 cgs_write_register(device, mm##reg, PHM_SET_FIELD( \
103 cgs_read_register(device, mm##reg), reg, field, fieldval))
104
105#define PHM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval) \
106 cgs_write_ind_register(device, port, ix##reg, \
107 PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
108 reg, field, fieldval))
109
110#define PHM_WRITE_VFPF_INDIRECT_FIELD(device, port, reg, field, fieldval) \
111 cgs_write_ind_register(device, port, ix##reg, \
112 PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
113 reg, field, fieldval))
114
115#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \
116 phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask)
117
118
119#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
120 PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
121
122#define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
123 PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \
124 << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
125
126#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \
127 phm_wait_for_indirect_register_unequal(hwmgr, \
128 mm##port##_INDEX, index, value, mask)
129
130#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
131 PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
132
133#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
134 PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
135 (fieldval) << PHM_FIELD_SHIFT(reg, field), \
136 PHM_FIELD_MASK(reg, field) )
137
138
139#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
140 port, index, value, mask) \
141 phm_wait_for_indirect_register_unequal(hwmgr, \
142 mm##port##_INDEX_11, index, value, mask)
143
144#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
145 PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
146
147#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
148 PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
149 (fieldval) << PHM_FIELD_SHIFT(reg, field), \
150 PHM_FIELD_MASK(reg, field))
151
152
153#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, \
154 port, index, value, mask) \
155 phm_wait_on_indirect_register(hwmgr, \
156 mm##port##_INDEX_11, index, value, mask)
157
158#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
159 PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
160
161#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
162 PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, \
163 (fieldval) << PHM_FIELD_SHIFT(reg, field), \
164 PHM_FIELD_MASK(reg, field))
165
166#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
167 index, value, mask) \
168 phm_wait_for_register_unequal(hwmgr, \
169 index, value, mask)
170
171#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask) \
172 PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
173 mm##reg, value, mask)
174
175#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval) \
176 PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, \
177 (fieldval) << PHM_FIELD_SHIFT(reg, field), \
178 PHM_FIELD_MASK(reg, field))
179
180#endif /* _SMU_HELPER_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index f23861f2c685..2fcbb17b794d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -4874,12 +4874,12 @@ static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr,
4874 hwmgr->thermal_controller.ucType == 4874 hwmgr->thermal_controller.ucType ==
4875 ATOM_VEGA10_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) { 4875 ATOM_VEGA10_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
4876 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device, 4876 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
4877 0xf, /* AMDGPU_IH_CLIENTID_THM */ 4877 SOC15_IH_CLIENTID_THM,
4878 0, 0, irq_src[0].set, irq_src[0].handler, hwmgr), 4878 0, 0, irq_src[0].set, irq_src[0].handler, hwmgr),
4879 "Failed to register high thermal interrupt!", 4879 "Failed to register high thermal interrupt!",
4880 return -EINVAL); 4880 return -EINVAL);
4881 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device, 4881 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
4882 0xf, /* AMDGPU_IH_CLIENTID_THM */ 4882 SOC15_IH_CLIENTID_THM,
4883 1, 0, irq_src[1].set, irq_src[1].handler, hwmgr), 4883 1, 0, irq_src[1].set, irq_src[1].handler, hwmgr),
4884 "Failed to register low thermal interrupt!", 4884 "Failed to register low thermal interrupt!",
4885 return -EINVAL); 4885 return -EINVAL);
@@ -4887,7 +4887,7 @@ static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr,
4887 4887
4888 /* Register CTF(GPIO_19) interrupt */ 4888 /* Register CTF(GPIO_19) interrupt */
4889 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device, 4889 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
4890 0x16, /* AMDGPU_IH_CLIENTID_ROM_SMUIO, */ 4890 SOC15_IH_CLIENTID_ROM_SMUIO,
4891 83, 0, irq_src[2].set, irq_src[2].handler, hwmgr), 4891 83, 0, irq_src[2].set, irq_src[2].handler, hwmgr),
4892 "Failed to register CTF thermal interrupt!", 4892 "Failed to register CTF thermal interrupt!",
4893 return -EINVAL); 4893 return -EINVAL);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index b151ad85666a..85b46ad68546 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -25,16 +25,14 @@
25 25
26#include <linux/seq_file.h> 26#include <linux/seq_file.h>
27#include "amd_powerplay.h" 27#include "amd_powerplay.h"
28#include "pp_instance.h"
29#include "hardwaremanager.h" 28#include "hardwaremanager.h"
30#include "pp_power_source.h" 29#include "pp_power_source.h"
31#include "hwmgr_ppt.h" 30#include "hwmgr_ppt.h"
32#include "ppatomctrl.h" 31#include "ppatomctrl.h"
33#include "hwmgr_ppt.h" 32#include "hwmgr_ppt.h"
34#include "power_state.h" 33#include "power_state.h"
35#include "cgs_linux.h" 34#include "smu_helper.h"
36 35
37struct pp_instance;
38struct pp_hwmgr; 36struct pp_hwmgr;
39struct phm_fan_speed_info; 37struct phm_fan_speed_info;
40struct pp_atomctrl_voltage_table; 38struct pp_atomctrl_voltage_table;
@@ -237,6 +235,7 @@ struct pp_smumgr_func {
237 bool (*is_dpm_running)(struct pp_hwmgr *hwmgr); 235 bool (*is_dpm_running)(struct pp_hwmgr *hwmgr);
238 bool (*is_hw_avfs_present)(struct pp_hwmgr *hwmgr); 236 bool (*is_hw_avfs_present)(struct pp_hwmgr *hwmgr);
239 int (*update_dpm_settings)(struct pp_hwmgr *hwmgr, void *profile_setting); 237 int (*update_dpm_settings)(struct pp_hwmgr *hwmgr, void *profile_setting);
238 int (*smc_table_manager)(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw); /*rw: true for read, false for write */
240}; 239};
241 240
242struct pp_hwmgr_func { 241struct pp_hwmgr_func {
@@ -702,6 +701,8 @@ struct pp_hwmgr {
702 uint32_t chip_family; 701 uint32_t chip_family;
703 uint32_t chip_id; 702 uint32_t chip_id;
704 uint32_t smu_version; 703 uint32_t smu_version;
704 bool pm_en;
705 struct mutex smu_lock;
705 706
706 uint32_t pp_table_version; 707 uint32_t pp_table_version;
707 void *device; 708 void *device;
@@ -748,7 +749,7 @@ struct pp_hwmgr {
748 struct pp_power_state *uvd_ps; 749 struct pp_power_state *uvd_ps;
749 struct amd_pp_display_configuration display_config; 750 struct amd_pp_display_configuration display_config;
750 uint32_t feature_mask; 751 uint32_t feature_mask;
751 752 bool avfs_supported;
752 /* UMD Pstate */ 753 /* UMD Pstate */
753 bool en_umd_pstate; 754 bool en_umd_pstate;
754 uint32_t power_profile_mode; 755 uint32_t power_profile_mode;
@@ -768,168 +769,17 @@ struct cgs_irq_src_funcs {
768 cgs_irq_handler_func_t handler; 769 cgs_irq_handler_func_t handler;
769}; 770};
770 771
771extern int hwmgr_early_init(struct pp_instance *handle); 772extern int hwmgr_early_init(struct pp_hwmgr *hwmgr);
772extern int hwmgr_hw_init(struct pp_instance *handle); 773extern int hwmgr_hw_init(struct pp_hwmgr *hwmgr);
773extern int hwmgr_hw_fini(struct pp_instance *handle); 774extern int hwmgr_hw_fini(struct pp_hwmgr *hwmgr);
774extern int hwmgr_hw_suspend(struct pp_instance *handle); 775extern int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr);
775extern int hwmgr_hw_resume(struct pp_instance *handle); 776extern int hwmgr_hw_resume(struct pp_hwmgr *hwmgr);
776extern int hwmgr_handle_task(struct pp_instance *handle, 777extern int hwmgr_handle_task(struct pp_hwmgr *hwmgr,
777 enum amd_pp_task task_id, 778 enum amd_pp_task task_id,
778 enum amd_pm_state_type *user_state); 779 enum amd_pm_state_type *user_state);
779extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
780 uint32_t value, uint32_t mask);
781
782extern int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
783 uint32_t indirect_port,
784 uint32_t index,
785 uint32_t value,
786 uint32_t mask);
787
788extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
789 uint32_t index,
790 uint32_t value, uint32_t mask);
791extern int phm_wait_for_indirect_register_unequal(
792 struct pp_hwmgr *hwmgr,
793 uint32_t indirect_port, uint32_t index,
794 uint32_t value, uint32_t mask);
795
796
797extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr);
798extern bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr);
799extern bool phm_cf_want_microcode_fan_ctrl(struct pp_hwmgr *hwmgr);
800
801extern int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table);
802extern int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_clock_voltage_dependency_table *dep_table);
803extern int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_clock_voltage_dependency_table *dep_table);
804extern int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_voltage_lookup_table *lookup_table);
805extern void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps, struct pp_atomctrl_voltage_table *vol_table);
806extern int phm_reset_single_dpm_table(void *table, uint32_t count, int max);
807extern void phm_setup_pcie_table_entry(void *table, uint32_t index, uint32_t pcie_gen, uint32_t pcie_lanes);
808extern int32_t phm_get_dpm_level_enable_mask_value(void *table);
809extern uint8_t phm_get_voltage_id(struct pp_atomctrl_voltage_table *voltage_table,
810 uint32_t voltage);
811extern uint8_t phm_get_voltage_index(struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage);
812extern uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci);
813extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level);
814extern int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_voltage_lookup_table *lookup_table,
815 uint16_t virtual_voltage_id, int32_t *sclk);
816extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
817extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
818extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
819
820extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr);
821extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
822extern int rv_init_function_pointers(struct pp_hwmgr *hwmgr);
823
824extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
825 uint32_t sclk, uint16_t id, uint16_t *voltage);
826
827extern uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size);
828
829#define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU
830
831#define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
832#define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK
833
834#define PHM_SET_FIELD(origval, reg, field, fieldval) \
835 (((origval) & ~PHM_FIELD_MASK(reg, field)) | \
836 (PHM_FIELD_MASK(reg, field) & ((fieldval) << PHM_FIELD_SHIFT(reg, field))))
837
838#define PHM_GET_FIELD(value, reg, field) \
839 (((value) & PHM_FIELD_MASK(reg, field)) >> \
840 PHM_FIELD_SHIFT(reg, field))
841
842
843/* Operations on named fields. */
844
845#define PHM_READ_FIELD(device, reg, field) \
846 PHM_GET_FIELD(cgs_read_register(device, mm##reg), reg, field)
847
848#define PHM_READ_INDIRECT_FIELD(device, port, reg, field) \
849 PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
850 reg, field)
851
852#define PHM_READ_VFPF_INDIRECT_FIELD(device, port, reg, field) \
853 PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
854 reg, field)
855
856#define PHM_WRITE_FIELD(device, reg, field, fieldval) \
857 cgs_write_register(device, mm##reg, PHM_SET_FIELD( \
858 cgs_read_register(device, mm##reg), reg, field, fieldval))
859
860#define PHM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval) \
861 cgs_write_ind_register(device, port, ix##reg, \
862 PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
863 reg, field, fieldval))
864
865#define PHM_WRITE_VFPF_INDIRECT_FIELD(device, port, reg, field, fieldval) \
866 cgs_write_ind_register(device, port, ix##reg, \
867 PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
868 reg, field, fieldval))
869
870#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \
871 phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask)
872 780
873 781
874#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \ 782#define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU
875 PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
876
877#define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
878 PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \
879 << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
880
881#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \
882 phm_wait_for_indirect_register_unequal(hwmgr, \
883 mm##port##_INDEX, index, value, mask)
884
885#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
886 PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
887
888#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
889 PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
890 (fieldval) << PHM_FIELD_SHIFT(reg, field), \
891 PHM_FIELD_MASK(reg, field) )
892
893
894#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
895 port, index, value, mask) \
896 phm_wait_for_indirect_register_unequal(hwmgr, \
897 mm##port##_INDEX_11, index, value, mask)
898
899#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
900 PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
901
902#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
903 PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
904 (fieldval) << PHM_FIELD_SHIFT(reg, field), \
905 PHM_FIELD_MASK(reg, field))
906
907
908#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, \
909 port, index, value, mask) \
910 phm_wait_on_indirect_register(hwmgr, \
911 mm##port##_INDEX_11, index, value, mask)
912
913#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
914 PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
915
916#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
917 PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, \
918 (fieldval) << PHM_FIELD_SHIFT(reg, field), \
919 PHM_FIELD_MASK(reg, field))
920
921#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
922 index, value, mask) \
923 phm_wait_for_register_unequal(hwmgr, \
924 index, value, mask)
925
926#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask) \
927 PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
928 mm##reg, value, mask)
929 783
930#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval) \
931 PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, \
932 (fieldval) << PHM_FIELD_SHIFT(reg, field), \
933 PHM_FIELD_MASK(reg, field))
934 784
935#endif /* _HWMGR_H_ */ 785#endif /* _HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
deleted file mode 100644
index 6c2fa33bd63a..000000000000
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef _PP_INSTANCE_H_
24#define _PP_INSTANCE_H_
25
26struct pp_hwmgr;
27
28struct pp_instance {
29 void *parent; /* e.g. amdgpu_device */
30 void *device; /* e.g. cgs_device */
31 bool pm_en;
32 struct pp_hwmgr *hwmgr;
33 struct mutex pp_lock;
34};
35
36#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index 9bba0a069ed6..fc3a2a533586 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -26,27 +26,6 @@
26#include "amd_powerplay.h" 26#include "amd_powerplay.h"
27#include "hwmgr.h" 27#include "hwmgr.h"
28 28
29enum AVFS_BTC_STATUS {
30 AVFS_BTC_BOOT = 0,
31 AVFS_BTC_BOOT_STARTEDSMU,
32 AVFS_LOAD_VIRUS,
33 AVFS_BTC_VIRUS_LOADED,
34 AVFS_BTC_VIRUS_FAIL,
35 AVFS_BTC_COMPLETED_PREVIOUSLY,
36 AVFS_BTC_ENABLEAVFS,
37 AVFS_BTC_STARTED,
38 AVFS_BTC_FAILED,
39 AVFS_BTC_RESTOREVFT_FAILED,
40 AVFS_BTC_SAVEVFT_FAILED,
41 AVFS_BTC_DPMTABLESETUP_FAILED,
42 AVFS_BTC_COMPLETED_UNSAVED,
43 AVFS_BTC_COMPLETED_SAVED,
44 AVFS_BTC_COMPLETED_RESTORED,
45 AVFS_BTC_DISABLED,
46 AVFS_BTC_NOTSUPPORTED,
47 AVFS_BTC_SMUMSG_ERROR
48};
49
50enum SMU_TABLE { 29enum SMU_TABLE {
51 SMU_UVD_TABLE = 0, 30 SMU_UVD_TABLE = 0,
52 SMU_VCE_TABLE, 31 SMU_VCE_TABLE,
@@ -90,6 +69,11 @@ enum SMU_MAC_DEFINITION {
90 SMU_UVD_MCLK_HANDSHAKE_DISABLE, 69 SMU_UVD_MCLK_HANDSHAKE_DISABLE,
91}; 70};
92 71
72enum SMU10_TABLE_ID {
73 SMU10_WMTABLE = 0,
74 SMU10_CLOCKTABLE,
75};
76
93extern int smum_get_argument(struct pp_hwmgr *hwmgr); 77extern int smum_get_argument(struct pp_hwmgr *hwmgr);
94 78
95extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table); 79extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table);
@@ -121,4 +105,6 @@ extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr);
121 105
122extern int smum_update_dpm_settings(struct pp_hwmgr *hwmgr, void *profile_setting); 106extern int smum_update_dpm_settings(struct pp_hwmgr *hwmgr, void *profile_setting);
123 107
108extern int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw);
109
124#endif 110#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index 98e701e4f553..735c38624ce1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -23,9 +23,9 @@
23# Makefile for the 'smu manager' sub-component of powerplay. 23# Makefile for the 'smu manager' sub-component of powerplay.
24# It provides the smu management services for the driver. 24# It provides the smu management services for the driver.
25 25
26SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o \ 26SMU_MGR = smumgr.o smu8_smumgr.o tonga_smumgr.o fiji_smumgr.o \
27 polaris10_smumgr.o iceland_smumgr.o \ 27 polaris10_smumgr.o iceland_smumgr.o \
28 smu7_smumgr.o vega10_smumgr.o rv_smumgr.o ci_smumgr.o 28 smu7_smumgr.o vega10_smumgr.o smu10_smumgr.o ci_smumgr.o
29 29
30AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) 30AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
31 31
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
deleted file mode 100644
index 957739aa6db9..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
+++ /dev/null
@@ -1,883 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/delay.h>
25#include <linux/gfp.h>
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/types.h>
29
30#include "cgs_common.h"
31#include "smu/smu_8_0_d.h"
32#include "smu/smu_8_0_sh_mask.h"
33#include "smu8.h"
34#include "smu8_fusion.h"
35#include "cz_smumgr.h"
36#include "cz_ppsmc.h"
37#include "smu_ucode_xfer_cz.h"
38#include "gca/gfx_8_0_d.h"
39#include "gca/gfx_8_0_sh_mask.h"
40#include "smumgr.h"
41
42#define SIZE_ALIGN_32(x) (((x) + 31) / 32 * 32)
43
44static const enum cz_scratch_entry firmware_list[] = {
45 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
46 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
47 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
48 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
49 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
50 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
51 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
52 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
53};
54
55static int cz_smum_get_argument(struct pp_hwmgr *hwmgr)
56{
57 if (hwmgr == NULL || hwmgr->device == NULL)
58 return -EINVAL;
59
60 return cgs_read_register(hwmgr->device,
61 mmSMU_MP1_SRBM2P_ARG_0);
62}
63
64static int cz_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
65{
66 int result = 0;
67
68 if (hwmgr == NULL || hwmgr->device == NULL)
69 return -EINVAL;
70
71 result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
72 SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
73 if (result != 0) {
74 pr_err("cz_send_msg_to_smc_async (0x%04x) failed\n", msg);
75 return result;
76 }
77
78 cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
79 cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
80
81 return 0;
82}
83
84/* Send a message to the SMC, and wait for its response.*/
85static int cz_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
86{
87 int result = 0;
88
89 result = cz_send_msg_to_smc_async(hwmgr, msg);
90 if (result != 0)
91 return result;
92
93 return PHM_WAIT_FIELD_UNEQUAL(hwmgr,
94 SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
95}
96
97static int cz_set_smc_sram_address(struct pp_hwmgr *hwmgr,
98 uint32_t smc_address, uint32_t limit)
99{
100 if (hwmgr == NULL || hwmgr->device == NULL)
101 return -EINVAL;
102
103 if (0 != (3 & smc_address)) {
104 pr_err("SMC address must be 4 byte aligned\n");
105 return -EINVAL;
106 }
107
108 if (limit <= (smc_address + 3)) {
109 pr_err("SMC address beyond the SMC RAM area\n");
110 return -EINVAL;
111 }
112
113 cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX_0,
114 SMN_MP1_SRAM_START_ADDR + smc_address);
115
116 return 0;
117}
118
119static int cz_write_smc_sram_dword(struct pp_hwmgr *hwmgr,
120 uint32_t smc_address, uint32_t value, uint32_t limit)
121{
122 int result;
123
124 if (hwmgr == NULL || hwmgr->device == NULL)
125 return -EINVAL;
126
127 result = cz_set_smc_sram_address(hwmgr, smc_address, limit);
128 if (!result)
129 cgs_write_register(hwmgr->device, mmMP0PUB_IND_DATA_0, value);
130
131 return result;
132}
133
134static int cz_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
135 uint16_t msg, uint32_t parameter)
136{
137 if (hwmgr == NULL || hwmgr->device == NULL)
138 return -EINVAL;
139
140 cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
141
142 return cz_send_msg_to_smc(hwmgr, msg);
143}
144
145static int cz_check_fw_load_finish(struct pp_hwmgr *hwmgr,
146 uint32_t firmware)
147{
148 int i;
149 uint32_t index = SMN_MP1_SRAM_START_ADDR +
150 SMU8_FIRMWARE_HEADER_LOCATION +
151 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
152
153 if (hwmgr == NULL || hwmgr->device == NULL)
154 return -EINVAL;
155
156 cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
157
158 for (i = 0; i < hwmgr->usec_timeout; i++) {
159 if (firmware ==
160 (cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA) & firmware))
161 break;
162 udelay(1);
163 }
164
165 if (i >= hwmgr->usec_timeout) {
166 pr_err("SMU check loaded firmware failed.\n");
167 return -EINVAL;
168 }
169
170 return 0;
171}
172
173static int cz_load_mec_firmware(struct pp_hwmgr *hwmgr)
174{
175 uint32_t reg_data;
176 uint32_t tmp;
177 int ret = 0;
178 struct cgs_firmware_info info = {0};
179 struct cz_smumgr *cz_smu;
180
181 if (hwmgr == NULL || hwmgr->device == NULL)
182 return -EINVAL;
183
184 cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
185 ret = cgs_get_firmware_info(hwmgr->device,
186 CGS_UCODE_ID_CP_MEC, &info);
187
188 if (ret)
189 return -EINVAL;
190
191 /* Disable MEC parsing/prefetching */
192 tmp = cgs_read_register(hwmgr->device,
193 mmCP_MEC_CNTL);
194 tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
195 tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
196 cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, tmp);
197
198 tmp = cgs_read_register(hwmgr->device,
199 mmCP_CPC_IC_BASE_CNTL);
200
201 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
202 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
203 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
204 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
205 cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
206
207 reg_data = lower_32_bits(info.mc_addr) &
208 PHM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
209 cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
210
211 reg_data = upper_32_bits(info.mc_addr) &
212 PHM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
213 cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
214
215 return 0;
216}
217
218static uint8_t cz_translate_firmware_enum_to_arg(struct pp_hwmgr *hwmgr,
219 enum cz_scratch_entry firmware_enum)
220{
221 uint8_t ret = 0;
222
223 switch (firmware_enum) {
224 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0:
225 ret = UCODE_ID_SDMA0;
226 break;
227 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
228 if (hwmgr->chip_id == CHIP_STONEY)
229 ret = UCODE_ID_SDMA0;
230 else
231 ret = UCODE_ID_SDMA1;
232 break;
233 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
234 ret = UCODE_ID_CP_CE;
235 break;
236 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
237 ret = UCODE_ID_CP_PFP;
238 break;
239 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME:
240 ret = UCODE_ID_CP_ME;
241 break;
242 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
243 ret = UCODE_ID_CP_MEC_JT1;
244 break;
245 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
246 if (hwmgr->chip_id == CHIP_STONEY)
247 ret = UCODE_ID_CP_MEC_JT1;
248 else
249 ret = UCODE_ID_CP_MEC_JT2;
250 break;
251 case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
252 ret = UCODE_ID_GMCON_RENG;
253 break;
254 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G:
255 ret = UCODE_ID_RLC_G;
256 break;
257 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
258 ret = UCODE_ID_RLC_SCRATCH;
259 break;
260 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
261 ret = UCODE_ID_RLC_SRM_ARAM;
262 break;
263 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
264 ret = UCODE_ID_RLC_SRM_DRAM;
265 break;
266 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
267 ret = UCODE_ID_DMCU_ERAM;
268 break;
269 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
270 ret = UCODE_ID_DMCU_IRAM;
271 break;
272 case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
273 ret = TASK_ARG_INIT_MM_PWR_LOG;
274 break;
275 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
276 case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
277 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
278 case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
279 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START:
280 case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
281 ret = TASK_ARG_REG_MMIO;
282 break;
283 case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
284 ret = TASK_ARG_INIT_CLK_TABLE;
285 break;
286 }
287
288 return ret;
289}
290
291static enum cgs_ucode_id cz_convert_fw_type_to_cgs(uint32_t fw_type)
292{
293 enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
294
295 switch (fw_type) {
296 case UCODE_ID_SDMA0:
297 result = CGS_UCODE_ID_SDMA0;
298 break;
299 case UCODE_ID_SDMA1:
300 result = CGS_UCODE_ID_SDMA1;
301 break;
302 case UCODE_ID_CP_CE:
303 result = CGS_UCODE_ID_CP_CE;
304 break;
305 case UCODE_ID_CP_PFP:
306 result = CGS_UCODE_ID_CP_PFP;
307 break;
308 case UCODE_ID_CP_ME:
309 result = CGS_UCODE_ID_CP_ME;
310 break;
311 case UCODE_ID_CP_MEC_JT1:
312 result = CGS_UCODE_ID_CP_MEC_JT1;
313 break;
314 case UCODE_ID_CP_MEC_JT2:
315 result = CGS_UCODE_ID_CP_MEC_JT2;
316 break;
317 case UCODE_ID_RLC_G:
318 result = CGS_UCODE_ID_RLC_G;
319 break;
320 default:
321 break;
322 }
323
324 return result;
325}
326
327static int cz_smu_populate_single_scratch_task(
328 struct pp_hwmgr *hwmgr,
329 enum cz_scratch_entry fw_enum,
330 uint8_t type, bool is_last)
331{
332 uint8_t i;
333 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
334 struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
335 struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
336
337 task->type = type;
338 task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum);
339 task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
340
341 for (i = 0; i < cz_smu->scratch_buffer_length; i++)
342 if (cz_smu->scratch_buffer[i].firmware_ID == fw_enum)
343 break;
344
345 if (i >= cz_smu->scratch_buffer_length) {
346 pr_err("Invalid Firmware Type\n");
347 return -EINVAL;
348 }
349
350 task->addr.low = lower_32_bits(cz_smu->scratch_buffer[i].mc_addr);
351 task->addr.high = upper_32_bits(cz_smu->scratch_buffer[i].mc_addr);
352 task->size_bytes = cz_smu->scratch_buffer[i].data_size;
353
354 if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == fw_enum) {
355 struct cz_ih_meta_data *pIHReg_restore =
356 (struct cz_ih_meta_data *)cz_smu->scratch_buffer[i].kaddr;
357 pIHReg_restore->command =
358 METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
359 }
360
361 return 0;
362}
363
364static int cz_smu_populate_single_ucode_load_task(
365 struct pp_hwmgr *hwmgr,
366 enum cz_scratch_entry fw_enum,
367 bool is_last)
368{
369 uint8_t i;
370 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
371 struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
372 struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
373
374 task->type = TASK_TYPE_UCODE_LOAD;
375 task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum);
376 task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
377
378 for (i = 0; i < cz_smu->driver_buffer_length; i++)
379 if (cz_smu->driver_buffer[i].firmware_ID == fw_enum)
380 break;
381
382 if (i >= cz_smu->driver_buffer_length) {
383 pr_err("Invalid Firmware Type\n");
384 return -EINVAL;
385 }
386
387 task->addr.low = lower_32_bits(cz_smu->driver_buffer[i].mc_addr);
388 task->addr.high = upper_32_bits(cz_smu->driver_buffer[i].mc_addr);
389 task->size_bytes = cz_smu->driver_buffer[i].data_size;
390
391 return 0;
392}
393
394static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr)
395{
396 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
397
398 cz_smu->toc_entry_aram = cz_smu->toc_entry_used_count;
399 cz_smu_populate_single_scratch_task(hwmgr,
400 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
401 TASK_TYPE_UCODE_SAVE, true);
402
403 return 0;
404}
405
406static int cz_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr)
407{
408 int i;
409 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
410 struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
411
412 for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
413 toc->JobList[i] = (uint8_t)IGNORE_JOB;
414
415 return 0;
416}
417
418static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr)
419{
420 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
421 struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
422
423 toc->JobList[JOB_GFX_SAVE] = (uint8_t)cz_smu->toc_entry_used_count;
424 cz_smu_populate_single_scratch_task(hwmgr,
425 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
426 TASK_TYPE_UCODE_SAVE, false);
427
428 cz_smu_populate_single_scratch_task(hwmgr,
429 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
430 TASK_TYPE_UCODE_SAVE, true);
431
432 return 0;
433}
434
435
436static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr)
437{
438 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
439 struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
440
441 toc->JobList[JOB_GFX_RESTORE] = (uint8_t)cz_smu->toc_entry_used_count;
442
443 cz_smu_populate_single_ucode_load_task(hwmgr,
444 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
445 cz_smu_populate_single_ucode_load_task(hwmgr,
446 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
447 cz_smu_populate_single_ucode_load_task(hwmgr,
448 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
449 cz_smu_populate_single_ucode_load_task(hwmgr,
450 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
451
452 if (hwmgr->chip_id == CHIP_STONEY)
453 cz_smu_populate_single_ucode_load_task(hwmgr,
454 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
455 else
456 cz_smu_populate_single_ucode_load_task(hwmgr,
457 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
458
459 cz_smu_populate_single_ucode_load_task(hwmgr,
460 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
461
462 /* populate scratch */
463 cz_smu_populate_single_scratch_task(hwmgr,
464 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
465 TASK_TYPE_UCODE_LOAD, false);
466
467 cz_smu_populate_single_scratch_task(hwmgr,
468 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
469 TASK_TYPE_UCODE_LOAD, false);
470
471 cz_smu_populate_single_scratch_task(hwmgr,
472 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
473 TASK_TYPE_UCODE_LOAD, true);
474
475 return 0;
476}
477
478static int cz_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr)
479{
480 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
481
482 cz_smu->toc_entry_power_profiling_index = cz_smu->toc_entry_used_count;
483
484 cz_smu_populate_single_scratch_task(hwmgr,
485 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
486 TASK_TYPE_INITIALIZE, true);
487 return 0;
488}
489
490static int cz_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr)
491{
492 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
493
494 cz_smu->toc_entry_initialize_index = cz_smu->toc_entry_used_count;
495
496 cz_smu_populate_single_ucode_load_task(hwmgr,
497 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
498 if (hwmgr->chip_id != CHIP_STONEY)
499 cz_smu_populate_single_ucode_load_task(hwmgr,
500 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
501 cz_smu_populate_single_ucode_load_task(hwmgr,
502 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
503 cz_smu_populate_single_ucode_load_task(hwmgr,
504 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
505 cz_smu_populate_single_ucode_load_task(hwmgr,
506 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
507 cz_smu_populate_single_ucode_load_task(hwmgr,
508 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
509 if (hwmgr->chip_id != CHIP_STONEY)
510 cz_smu_populate_single_ucode_load_task(hwmgr,
511 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
512 cz_smu_populate_single_ucode_load_task(hwmgr,
513 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
514
515 return 0;
516}
517
518static int cz_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr)
519{
520 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
521
522 cz_smu->toc_entry_clock_table = cz_smu->toc_entry_used_count;
523
524 cz_smu_populate_single_scratch_task(hwmgr,
525 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
526 TASK_TYPE_INITIALIZE, true);
527
528 return 0;
529}
530
531static int cz_smu_construct_toc(struct pp_hwmgr *hwmgr)
532{
533 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
534
535 cz_smu->toc_entry_used_count = 0;
536 cz_smu_initialize_toc_empty_job_list(hwmgr);
537 cz_smu_construct_toc_for_rlc_aram_save(hwmgr);
538 cz_smu_construct_toc_for_vddgfx_enter(hwmgr);
539 cz_smu_construct_toc_for_vddgfx_exit(hwmgr);
540 cz_smu_construct_toc_for_power_profiling(hwmgr);
541 cz_smu_construct_toc_for_bootup(hwmgr);
542 cz_smu_construct_toc_for_clock_table(hwmgr);
543
544 return 0;
545}
546
547static int cz_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr)
548{
549 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
550 uint32_t firmware_type;
551 uint32_t i;
552 int ret;
553 enum cgs_ucode_id ucode_id;
554 struct cgs_firmware_info info = {0};
555
556 cz_smu->driver_buffer_length = 0;
557
558 for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
559
560 firmware_type = cz_translate_firmware_enum_to_arg(hwmgr,
561 firmware_list[i]);
562
563 ucode_id = cz_convert_fw_type_to_cgs(firmware_type);
564
565 ret = cgs_get_firmware_info(hwmgr->device,
566 ucode_id, &info);
567
568 if (ret == 0) {
569 cz_smu->driver_buffer[i].mc_addr = info.mc_addr;
570
571 cz_smu->driver_buffer[i].data_size = info.image_size;
572
573 cz_smu->driver_buffer[i].firmware_ID = firmware_list[i];
574 cz_smu->driver_buffer_length++;
575 }
576 }
577
578 return 0;
579}
580
581static int cz_smu_populate_single_scratch_entry(
582 struct pp_hwmgr *hwmgr,
583 enum cz_scratch_entry scratch_type,
584 uint32_t ulsize_byte,
585 struct cz_buffer_entry *entry)
586{
587 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
588 uint32_t ulsize_aligned = SIZE_ALIGN_32(ulsize_byte);
589
590 entry->data_size = ulsize_byte;
591 entry->kaddr = (char *) cz_smu->smu_buffer.kaddr +
592 cz_smu->smu_buffer_used_bytes;
593 entry->mc_addr = cz_smu->smu_buffer.mc_addr + cz_smu->smu_buffer_used_bytes;
594 entry->firmware_ID = scratch_type;
595
596 cz_smu->smu_buffer_used_bytes += ulsize_aligned;
597
598 return 0;
599}
600
601static int cz_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
602{
603 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
604 unsigned long i;
605
606 for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
607 if (cz_smu->scratch_buffer[i].firmware_ID
608 == CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
609 break;
610 }
611
612 *table = (struct SMU8_Fusion_ClkTable *)cz_smu->scratch_buffer[i].kaddr;
613
614 cz_send_msg_to_smc_with_parameter(hwmgr,
615 PPSMC_MSG_SetClkTableAddrHi,
616 upper_32_bits(cz_smu->scratch_buffer[i].mc_addr));
617
618 cz_send_msg_to_smc_with_parameter(hwmgr,
619 PPSMC_MSG_SetClkTableAddrLo,
620 lower_32_bits(cz_smu->scratch_buffer[i].mc_addr));
621
622 cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
623 cz_smu->toc_entry_clock_table);
624
625 cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram);
626
627 return 0;
628}
629
630static int cz_upload_pptable_settings(struct pp_hwmgr *hwmgr)
631{
632 struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
633 unsigned long i;
634
635 for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
636 if (cz_smu->scratch_buffer[i].firmware_ID
637 == CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
638 break;
639 }
640
641 cz_send_msg_to_smc_with_parameter(hwmgr,
642 PPSMC_MSG_SetClkTableAddrHi,
643 upper_32_bits(cz_smu->scratch_buffer[i].mc_addr));
644
645 cz_send_msg_to_smc_with_parameter(hwmgr,
646 PPSMC_MSG_SetClkTableAddrLo,
647 lower_32_bits(cz_smu->scratch_buffer[i].mc_addr));
648
649 cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
650 cz_smu->toc_entry_clock_table);
651
652 cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu);
653
654 return 0;
655}
656
657static int cz_request_smu_load_fw(struct pp_hwmgr *hwmgr)
658{
659 struct cz_smumgr *cz_smu = (struct cz_smumgr *)(hwmgr->smu_backend);
660 uint32_t smc_address;
661
662 if (!hwmgr->reload_fw) {
663 pr_info("skip reloading...\n");
664 return 0;
665 }
666
667 cz_smu_populate_firmware_entries(hwmgr);
668
669 cz_smu_construct_toc(hwmgr);
670
671 smc_address = SMU8_FIRMWARE_HEADER_LOCATION +
672 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
673
674 cz_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4);
675
676 cz_send_msg_to_smc_with_parameter(hwmgr,
677 PPSMC_MSG_DriverDramAddrHi,
678 upper_32_bits(cz_smu->toc_buffer.mc_addr));
679
680 cz_send_msg_to_smc_with_parameter(hwmgr,
681 PPSMC_MSG_DriverDramAddrLo,
682 lower_32_bits(cz_smu->toc_buffer.mc_addr));
683
684 cz_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs);
685
686 cz_send_msg_to_smc_with_parameter(hwmgr,
687 PPSMC_MSG_ExecuteJob,
688 cz_smu->toc_entry_aram);
689 cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
690 cz_smu->toc_entry_power_profiling_index);
691
692 return cz_send_msg_to_smc_with_parameter(hwmgr,
693 PPSMC_MSG_ExecuteJob,
694 cz_smu->toc_entry_initialize_index);
695}
696
697static int cz_start_smu(struct pp_hwmgr *hwmgr)
698{
699 int ret = 0;
700 uint32_t fw_to_check = 0;
701 struct cgs_firmware_info info = {0};
702 uint32_t index = SMN_MP1_SRAM_START_ADDR +
703 SMU8_FIRMWARE_HEADER_LOCATION +
704 offsetof(struct SMU8_Firmware_Header, Version);
705
706
707 if (hwmgr == NULL || hwmgr->device == NULL)
708 return -EINVAL;
709
710 cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
711 hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
712 info.version = hwmgr->smu_version >> 8;
713 cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
714
715 fw_to_check = UCODE_ID_RLC_G_MASK |
716 UCODE_ID_SDMA0_MASK |
717 UCODE_ID_SDMA1_MASK |
718 UCODE_ID_CP_CE_MASK |
719 UCODE_ID_CP_ME_MASK |
720 UCODE_ID_CP_PFP_MASK |
721 UCODE_ID_CP_MEC_JT1_MASK |
722 UCODE_ID_CP_MEC_JT2_MASK;
723
724 if (hwmgr->chip_id == CHIP_STONEY)
725 fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
726
727 ret = cz_request_smu_load_fw(hwmgr);
728 if (ret)
729 pr_err("SMU firmware load failed\n");
730
731 cz_check_fw_load_finish(hwmgr, fw_to_check);
732
733 ret = cz_load_mec_firmware(hwmgr);
734 if (ret)
735 pr_err("Mec Firmware load failed\n");
736
737 return ret;
738}
739
740static int cz_smu_init(struct pp_hwmgr *hwmgr)
741{
742 int ret = 0;
743 struct cz_smumgr *cz_smu;
744
745 cz_smu = kzalloc(sizeof(struct cz_smumgr), GFP_KERNEL);
746 if (cz_smu == NULL)
747 return -ENOMEM;
748
749 hwmgr->smu_backend = cz_smu;
750
751 cz_smu->toc_buffer.data_size = 4096;
752 cz_smu->smu_buffer.data_size =
753 ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
754 ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
755 ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
756 ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
757 ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
758
759 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
760 cz_smu->toc_buffer.data_size,
761 PAGE_SIZE,
762 AMDGPU_GEM_DOMAIN_VRAM,
763 &cz_smu->toc_buffer.handle,
764 &cz_smu->toc_buffer.mc_addr,
765 &cz_smu->toc_buffer.kaddr);
766 if (ret)
767 return -EINVAL;
768
769 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
770 cz_smu->smu_buffer.data_size,
771 PAGE_SIZE,
772 AMDGPU_GEM_DOMAIN_VRAM,
773 &cz_smu->smu_buffer.handle,
774 &cz_smu->smu_buffer.mc_addr,
775 &cz_smu->smu_buffer.kaddr);
776 if (ret) {
777 amdgpu_bo_free_kernel(&cz_smu->toc_buffer.handle,
778 &cz_smu->toc_buffer.mc_addr,
779 &cz_smu->toc_buffer.kaddr);
780 return -EINVAL;
781 }
782
783 if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
784 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
785 UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
786 &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
787 pr_err("Error when Populate Firmware Entry.\n");
788 return -1;
789 }
790
791 if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
792 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
793 UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
794 &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
795 pr_err("Error when Populate Firmware Entry.\n");
796 return -1;
797 }
798 if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
799 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
800 UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
801 &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
802 pr_err("Error when Populate Firmware Entry.\n");
803 return -1;
804 }
805
806 if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
807 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
808 sizeof(struct SMU8_MultimediaPowerLogData),
809 &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
810 pr_err("Error when Populate Firmware Entry.\n");
811 return -1;
812 }
813
814 if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
815 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
816 sizeof(struct SMU8_Fusion_ClkTable),
817 &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
818 pr_err("Error when Populate Firmware Entry.\n");
819 return -1;
820 }
821
822 return 0;
823}
824
825static int cz_smu_fini(struct pp_hwmgr *hwmgr)
826{
827 struct cz_smumgr *cz_smu;
828
829 if (hwmgr == NULL || hwmgr->device == NULL)
830 return -EINVAL;
831
832 cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
833 if (cz_smu) {
834 amdgpu_bo_free_kernel(&cz_smu->toc_buffer.handle,
835 &cz_smu->toc_buffer.mc_addr,
836 &cz_smu->toc_buffer.kaddr);
837 amdgpu_bo_free_kernel(&cz_smu->smu_buffer.handle,
838 &cz_smu->smu_buffer.mc_addr,
839 &cz_smu->smu_buffer.kaddr);
840 kfree(cz_smu);
841 }
842
843 return 0;
844}
845
846static bool cz_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
847 unsigned long check_feature)
848{
849 int result;
850 unsigned long features;
851
852 result = cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0);
853 if (result == 0) {
854 features = smum_get_argument(hwmgr);
855 if (features & check_feature)
856 return true;
857 }
858
859 return false;
860}
861
862static bool cz_is_dpm_running(struct pp_hwmgr *hwmgr)
863{
864 if (cz_dpm_check_smu_features(hwmgr, SMU_EnabledFeatureScoreboard_SclkDpmOn))
865 return true;
866 return false;
867}
868
869const struct pp_smumgr_func cz_smu_funcs = {
870 .smu_init = cz_smu_init,
871 .smu_fini = cz_smu_fini,
872 .start_smu = cz_start_smu,
873 .check_fw_load_finish = cz_check_fw_load_finish,
874 .request_smu_load_fw = NULL,
875 .request_smu_load_specific_fw = NULL,
876 .get_argument = cz_smum_get_argument,
877 .send_msg_to_smc = cz_send_msg_to_smc,
878 .send_msg_to_smc_with_parameter = cz_send_msg_to_smc_with_parameter,
879 .download_pptable_settings = cz_download_pptable_settings,
880 .upload_pptable_settings = cz_upload_pptable_settings,
881 .is_dpm_running = cz_is_dpm_running,
882};
883
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 0b2b5d155e5e..95fcda37f890 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -205,9 +205,9 @@ static int fiji_start_avfs_btc(struct pp_hwmgr *hwmgr)
205 int result = 0; 205 int result = 0;
206 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); 206 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
207 207
208 if (0 != smu_data->avfs.avfs_btc_param) { 208 if (0 != smu_data->avfs_btc_param) {
209 if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, 209 if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr,
210 PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) { 210 PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param)) {
211 pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed"); 211 pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed");
212 result = -EINVAL; 212 result = -EINVAL;
213 } 213 }
@@ -261,43 +261,21 @@ static int fiji_setup_graphics_level_structure(struct pp_hwmgr *hwmgr)
261 return 0; 261 return 0;
262} 262}
263 263
264static int fiji_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool smu_started) 264static int fiji_avfs_event_mgr(struct pp_hwmgr *hwmgr)
265{ 265{
266 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); 266 PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(hwmgr),
267 267 "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level"
268 switch (smu_data->avfs.avfs_btc_status) { 268 " table over to SMU",
269 case AVFS_BTC_COMPLETED_PREVIOUSLY: 269 return -EINVAL);
270 break; 270 PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
271 "[AVFS][fiji_avfs_event_mgr] Could not setup "
272 "Pwr Virus for AVFS ",
273 return -EINVAL);
274 PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(hwmgr),
275 "[AVFS][fiji_avfs_event_mgr] Failure at "
276 "fiji_start_avfs_btc. AVFS Disabled",
277 return -EINVAL);
271 278
272 case AVFS_BTC_BOOT: /*Cold Boot State - Post SMU Start*/
273 if (!smu_started)
274 break;
275 smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
276 PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(hwmgr),
277 "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level"
278 " table over to SMU",
279 return -EINVAL;);
280 smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
281 PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
282 "[AVFS][fiji_avfs_event_mgr] Could not setup "
283 "Pwr Virus for AVFS ",
284 return -EINVAL;);
285 smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
286 PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(hwmgr),
287 "[AVFS][fiji_avfs_event_mgr] Failure at "
288 "fiji_start_avfs_btc. AVFS Disabled",
289 return -EINVAL;);
290
291 smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS;
292 break;
293 case AVFS_BTC_DISABLED: /* Do nothing */
294 case AVFS_BTC_NOTSUPPORTED: /* Do nothing */
295 case AVFS_BTC_ENABLEAVFS:
296 break;
297 default:
298 pr_err("AVFS failed status is %x !\n", smu_data->avfs.avfs_btc_status);
299 break;
300 }
301 return 0; 279 return 0;
302} 280}
303 281
@@ -309,8 +287,6 @@ static int fiji_start_smu(struct pp_hwmgr *hwmgr)
309 /* Only start SMC if SMC RAM is not running */ 287 /* Only start SMC if SMC RAM is not running */
310 if (!(smu7_is_smc_ram_running(hwmgr) 288 if (!(smu7_is_smc_ram_running(hwmgr)
311 || cgs_is_virtualization_enabled(hwmgr->device))) { 289 || cgs_is_virtualization_enabled(hwmgr->device))) {
312 fiji_avfs_event_mgr(hwmgr, false);
313
314 /* Check if SMU is running in protected mode */ 290 /* Check if SMU is running in protected mode */
315 if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, 291 if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
316 CGS_IND_REG__SMC, 292 CGS_IND_REG__SMC,
@@ -323,7 +299,8 @@ static int fiji_start_smu(struct pp_hwmgr *hwmgr)
323 if (result) 299 if (result)
324 return result; 300 return result;
325 } 301 }
326 fiji_avfs_event_mgr(hwmgr, true); 302 if (fiji_avfs_event_mgr(hwmgr))
303 hwmgr->avfs_supported = false;
327 } 304 }
328 305
329 /* To initialize all clock gating before RLC loaded and running.*/ 306 /* To initialize all clock gating before RLC loaded and running.*/
@@ -377,8 +354,10 @@ static int fiji_smu_init(struct pp_hwmgr *hwmgr)
377 354
378 hwmgr->smu_backend = fiji_priv; 355 hwmgr->smu_backend = fiji_priv;
379 356
380 if (smu7_init(hwmgr)) 357 if (smu7_init(hwmgr)) {
358 kfree(fiji_priv);
381 return -EINVAL; 359 return -EINVAL;
360 }
382 361
383 return 0; 362 return 0;
384} 363}
@@ -2315,19 +2294,12 @@ static int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
2315 2294
2316static int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr) 2295static int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
2317{ 2296{
2318 int ret; 2297 if (!hwmgr->avfs_supported)
2319 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
2320
2321 if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS)
2322 return 0; 2298 return 0;
2323 2299
2324 ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs); 2300 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
2325 2301
2326 if (!ret) 2302 return 0;
2327 /* If this param is not changed, this function could fire unnecessarily */
2328 smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
2329
2330 return ret;
2331} 2303}
2332 2304
2333static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) 2305static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index 6255edf58721..4e2f62e659ef 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -271,8 +271,10 @@ static int iceland_smu_init(struct pp_hwmgr *hwmgr)
271 271
272 hwmgr->smu_backend = iceland_priv; 272 hwmgr->smu_backend = iceland_priv;
273 273
274 if (smu7_init(hwmgr)) 274 if (smu7_init(hwmgr)) {
275 kfree(iceland_priv);
275 return -EINVAL; 276 return -EINVAL;
277 }
276 278
277 return 0; 279 return 0;
278} 280}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 632d1ca2f69c..03ec1e59876b 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -99,13 +99,13 @@ static int polaris10_perform_btc(struct pp_hwmgr *hwmgr)
99 int result = 0; 99 int result = 0;
100 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); 100 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
101 101
102 if (0 != smu_data->avfs.avfs_btc_param) { 102 if (0 != smu_data->avfs_btc_param) {
103 if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) { 103 if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param)) {
104 pr_info("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed"); 104 pr_info("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed");
105 result = -1; 105 result = -1;
106 } 106 }
107 } 107 }
108 if (smu_data->avfs.avfs_btc_param > 1) { 108 if (smu_data->avfs_btc_param > 1) {
109 /* Soft-Reset to reset the engine before loading uCode */ 109 /* Soft-Reset to reset the engine before loading uCode */
110 /* halt */ 110 /* halt */
111 cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, 0x50000000); 111 cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, 0x50000000);
@@ -173,46 +173,25 @@ static int polaris10_setup_graphics_level_structure(struct pp_hwmgr *hwmgr)
173 173
174 174
175static int 175static int
176polaris10_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool SMU_VFT_INTACT) 176polaris10_avfs_event_mgr(struct pp_hwmgr *hwmgr)
177{ 177{
178 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); 178 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
179 179
180 switch (smu_data->avfs.avfs_btc_status) { 180 PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(hwmgr),
181 case AVFS_BTC_COMPLETED_PREVIOUSLY: 181 "[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU",
182 break; 182 return -EINVAL);
183
184 case AVFS_BTC_BOOT: /* Cold Boot State - Post SMU Start */
185
186 smu_data->avfs.avfs_btc_status = AVFS_BTC_DPMTABLESETUP_FAILED;
187 PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(hwmgr),
188 "[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU",
189 return -EINVAL);
190
191 if (smu_data->avfs.avfs_btc_param > 1) {
192 pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting.");
193 smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
194 PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
195 "[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ",
196 return -EINVAL);
197 }
198 183
199 smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED; 184 if (smu_data->avfs_btc_param > 1) {
200 PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(hwmgr), 185 pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting.");
201 "[AVFS][Polaris10_AVFSEventMgr] Failure at SmuPolaris10_PerformBTC. AVFS Disabled", 186 PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
202 return -EINVAL); 187 "[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ",
203 smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS; 188 return -EINVAL);
204 break;
205
206 case AVFS_BTC_DISABLED:
207 case AVFS_BTC_ENABLEAVFS:
208 case AVFS_BTC_NOTSUPPORTED:
209 break;
210
211 default:
212 pr_err("AVFS failed status is %x!\n", smu_data->avfs.avfs_btc_status);
213 break;
214 } 189 }
215 190
191 PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(hwmgr),
192 "[AVFS][Polaris10_AVFSEventMgr] Failure at SmuPolaris10_PerformBTC. AVFS Disabled",
193 return -EINVAL);
194
216 return 0; 195 return 0;
217} 196}
218 197
@@ -312,11 +291,10 @@ static int polaris10_start_smu(struct pp_hwmgr *hwmgr)
312{ 291{
313 int result = 0; 292 int result = 0;
314 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); 293 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
315 bool SMU_VFT_INTACT;
316 294
317 /* Only start SMC if SMC RAM is not running */ 295 /* Only start SMC if SMC RAM is not running */
318 if (!smu7_is_smc_ram_running(hwmgr)) { 296 if (!(smu7_is_smc_ram_running(hwmgr)
319 SMU_VFT_INTACT = false; 297 || cgs_is_virtualization_enabled(hwmgr->device))) {
320 smu_data->protected_mode = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)); 298 smu_data->protected_mode = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
321 smu_data->smu7_data.security_hard_key = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL)); 299 smu_data->smu7_data.security_hard_key = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
322 300
@@ -337,11 +315,9 @@ static int polaris10_start_smu(struct pp_hwmgr *hwmgr)
337 if (result != 0) 315 if (result != 0)
338 PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result); 316 PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result);
339 317
340 polaris10_avfs_event_mgr(hwmgr, true); 318 polaris10_avfs_event_mgr(hwmgr);
341 } else 319 }
342 SMU_VFT_INTACT = true; /*Driver went offline but SMU was still alive and contains the VFT table */
343 320
344 polaris10_avfs_event_mgr(hwmgr, SMU_VFT_INTACT);
345 /* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */ 321 /* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */
346 smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters), 322 smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters),
347 &(smu_data->smu7_data.soft_regs_start), 0x40000); 323 &(smu_data->smu7_data.soft_regs_start), 0x40000);
@@ -373,8 +349,10 @@ static int polaris10_smu_init(struct pp_hwmgr *hwmgr)
373 349
374 hwmgr->smu_backend = smu_data; 350 hwmgr->smu_backend = smu_data;
375 351
376 if (smu7_init(hwmgr)) 352 if (smu7_init(hwmgr)) {
353 kfree(smu_data);
377 return -EINVAL; 354 return -EINVAL;
355 }
378 356
379 return 0; 357 return 0;
380} 358}
@@ -1732,8 +1710,8 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
1732 table_info->vdd_dep_on_sclk; 1710 table_info->vdd_dep_on_sclk;
1733 1711
1734 1712
1735 if (((struct smu7_smumgr *)smu_data)->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) 1713 if (!hwmgr->avfs_supported)
1736 return result; 1714 return 0;
1737 1715
1738 result = atomctrl_get_avfs_information(hwmgr, &avfs_params); 1716 result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
1739 1717
@@ -2070,24 +2048,17 @@ static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
2070 2048
2071int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr) 2049int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
2072{ 2050{
2073 int ret;
2074 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
2075 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2051 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2076 2052
2077 if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) 2053 if (!hwmgr->avfs_supported)
2078 return 0; 2054 return 0;
2079 2055
2080 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2056 smum_send_msg_to_smc_with_parameter(hwmgr,
2081 PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting); 2057 PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
2082 2058
2083 ret = (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs) == 0) ? 2059 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
2084 0 : -1;
2085 2060
2086 if (!ret) 2061 return 0;
2087 /* If this param is not changed, this function could fire unnecessarily */
2088 smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
2089
2090 return ret;
2091} 2062}
2092 2063
2093static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) 2064static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
deleted file mode 100644
index e2ee23ade5c5..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
+++ /dev/null
@@ -1,399 +0,0 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "smumgr.h"
25#include "rv_inc.h"
26#include "pp_soc15.h"
27#include "rv_smumgr.h"
28#include "ppatomctrl.h"
29#include "rv_ppsmc.h"
30#include "smu10_driver_if.h"
31#include "smu10.h"
32#include "ppatomctrl.h"
33#include "pp_debug.h"
34#include "smu_ucode_xfer_vi.h"
35#include "smu7_smumgr.h"
36
37#define VOLTAGE_SCALE 4
38
39#define BUFFER_SIZE 80000
40#define MAX_STRING_SIZE 15
41#define BUFFER_SIZETWO 131072
42
43#define MP0_Public 0x03800000
44#define MP0_SRAM 0x03900000
45#define MP1_Public 0x03b00000
46#define MP1_SRAM 0x03c00004
47
48#define smnMP1_FIRMWARE_FLAGS 0x3010028
49
50
51bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr)
52{
53 uint32_t mp1_fw_flags, reg;
54
55 reg = soc15_get_register_offset(NBIF_HWID, 0,
56 mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2);
57
58 cgs_write_register(hwmgr->device, reg,
59 (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
60
61 reg = soc15_get_register_offset(NBIF_HWID, 0,
62 mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2);
63
64 mp1_fw_flags = cgs_read_register(hwmgr->device, reg);
65
66 if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
67 return true;
68
69 return false;
70}
71
72static uint32_t rv_wait_for_response(struct pp_hwmgr *hwmgr)
73{
74 uint32_t reg;
75
76 if (!rv_is_smc_ram_running(hwmgr))
77 return -EINVAL;
78
79 reg = soc15_get_register_offset(MP1_HWID, 0,
80 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
81
82 phm_wait_for_register_unequal(hwmgr, reg,
83 0, MP1_C2PMSG_90__CONTENT_MASK);
84
85 return cgs_read_register(hwmgr->device, reg);
86}
87
88int rv_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
89 uint16_t msg)
90{
91 uint32_t reg;
92
93 if (!rv_is_smc_ram_running(hwmgr))
94 return -EINVAL;
95
96 reg = soc15_get_register_offset(MP1_HWID, 0,
97 mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
98 cgs_write_register(hwmgr->device, reg, msg);
99
100 return 0;
101}
102
103int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
104{
105 uint32_t reg;
106
107 reg = soc15_get_register_offset(MP1_HWID, 0,
108 mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
109
110 *arg = cgs_read_register(hwmgr->device, reg);
111
112 return 0;
113}
114
115int rv_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
116{
117 uint32_t reg;
118
119 rv_wait_for_response(hwmgr);
120
121 reg = soc15_get_register_offset(MP1_HWID, 0,
122 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
123 cgs_write_register(hwmgr->device, reg, 0);
124
125 rv_send_msg_to_smc_without_waiting(hwmgr, msg);
126
127 if (rv_wait_for_response(hwmgr) == 0)
128 printk("Failed to send Message %x.\n", msg);
129
130 return 0;
131}
132
133
134int rv_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
135 uint16_t msg, uint32_t parameter)
136{
137 uint32_t reg;
138
139 rv_wait_for_response(hwmgr);
140
141 reg = soc15_get_register_offset(MP1_HWID, 0,
142 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
143 cgs_write_register(hwmgr->device, reg, 0);
144
145 reg = soc15_get_register_offset(MP1_HWID, 0,
146 mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
147 cgs_write_register(hwmgr->device, reg, parameter);
148
149 rv_send_msg_to_smc_without_waiting(hwmgr, msg);
150
151
152 if (rv_wait_for_response(hwmgr) == 0)
153 printk("Failed to send Message %x.\n", msg);
154
155 return 0;
156}
157
158int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr,
159 uint8_t *table, int16_t table_id)
160{
161 struct rv_smumgr *priv =
162 (struct rv_smumgr *)(hwmgr->smu_backend);
163
164 PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
165 "Invalid SMU Table ID!", return -EINVAL;);
166 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
167 "Invalid SMU Table version!", return -EINVAL;);
168 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
169 "Invalid SMU Table Length!", return -EINVAL;);
170 PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
171 PPSMC_MSG_SetDriverDramAddrHigh,
172 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
173 "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL;);
174 PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
175 PPSMC_MSG_SetDriverDramAddrLow,
176 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
177 "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
178 return -EINVAL;);
179 PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
180 PPSMC_MSG_TransferTableSmu2Dram,
181 priv->smu_tables.entry[table_id].table_id) == 0,
182 "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
183 return -EINVAL;);
184
185 memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
186 priv->smu_tables.entry[table_id].size);
187
188 return 0;
189}
190
191int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr,
192 uint8_t *table, int16_t table_id)
193{
194 struct rv_smumgr *priv =
195 (struct rv_smumgr *)(hwmgr->smu_backend);
196
197 PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
198 "Invalid SMU Table ID!", return -EINVAL;);
199 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
200 "Invalid SMU Table version!", return -EINVAL;);
201 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
202 "Invalid SMU Table Length!", return -EINVAL;);
203
204 memcpy(priv->smu_tables.entry[table_id].table, table,
205 priv->smu_tables.entry[table_id].size);
206
207 PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
208 PPSMC_MSG_SetDriverDramAddrHigh,
209 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
210 "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
211 return -EINVAL;);
212 PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
213 PPSMC_MSG_SetDriverDramAddrLow,
214 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
215 "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
216 return -EINVAL;);
217 PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
218 PPSMC_MSG_TransferTableDram2Smu,
219 priv->smu_tables.entry[table_id].table_id) == 0,
220 "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
221 return -EINVAL;);
222
223 return 0;
224}
225
226static int rv_verify_smc_interface(struct pp_hwmgr *hwmgr)
227{
228 uint32_t smc_driver_if_version;
229
230 PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
231 PPSMC_MSG_GetDriverIfVersion),
232 "Attempt to get SMC IF Version Number Failed!",
233 return -EINVAL);
234 PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
235 &smc_driver_if_version),
236 "Attempt to read SMC IF Version Number Failed!",
237 return -EINVAL);
238
239 if (smc_driver_if_version != SMU10_DRIVER_IF_VERSION)
240 return -EINVAL;
241
242 return 0;
243}
244
245/* sdma is disabled by default in vbios, need to re-enable in driver */
246static int rv_smc_enable_sdma(struct pp_hwmgr *hwmgr)
247{
248 PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
249 PPSMC_MSG_PowerUpSdma),
250 "Attempt to power up sdma Failed!",
251 return -EINVAL);
252
253 return 0;
254}
255
256static int rv_smc_disable_sdma(struct pp_hwmgr *hwmgr)
257{
258 PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
259 PPSMC_MSG_PowerDownSdma),
260 "Attempt to power down sdma Failed!",
261 return -EINVAL);
262
263 return 0;
264}
265
266/* vcn is disabled by default in vbios, need to re-enable in driver */
267static int rv_smc_enable_vcn(struct pp_hwmgr *hwmgr)
268{
269 PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr,
270 PPSMC_MSG_PowerUpVcn, 0),
271 "Attempt to power up vcn Failed!",
272 return -EINVAL);
273
274 return 0;
275}
276
277static int rv_smc_disable_vcn(struct pp_hwmgr *hwmgr)
278{
279 PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr,
280 PPSMC_MSG_PowerDownVcn, 0),
281 "Attempt to power down vcn Failed!",
282 return -EINVAL);
283
284 return 0;
285}
286
287static int rv_smu_fini(struct pp_hwmgr *hwmgr)
288{
289 struct rv_smumgr *priv =
290 (struct rv_smumgr *)(hwmgr->smu_backend);
291
292 if (priv) {
293 rv_smc_disable_sdma(hwmgr);
294 rv_smc_disable_vcn(hwmgr);
295 amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle,
296 &priv->smu_tables.entry[WMTABLE].mc_addr,
297 priv->smu_tables.entry[WMTABLE].table);
298 amdgpu_bo_free_kernel(&priv->smu_tables.entry[CLOCKTABLE].handle,
299 &priv->smu_tables.entry[CLOCKTABLE].mc_addr,
300 priv->smu_tables.entry[CLOCKTABLE].table);
301 kfree(hwmgr->smu_backend);
302 hwmgr->smu_backend = NULL;
303 }
304
305 return 0;
306}
307
308static int rv_start_smu(struct pp_hwmgr *hwmgr)
309{
310 struct cgs_firmware_info info = {0};
311
312 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
313 rv_read_arg_from_smc(hwmgr, &hwmgr->smu_version);
314 info.version = hwmgr->smu_version >> 8;
315
316 cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
317
318 if (rv_verify_smc_interface(hwmgr))
319 return -EINVAL;
320 if (rv_smc_enable_sdma(hwmgr))
321 return -EINVAL;
322 if (rv_smc_enable_vcn(hwmgr))
323 return -EINVAL;
324
325 return 0;
326}
327
328static int rv_smu_init(struct pp_hwmgr *hwmgr)
329{
330 struct amdgpu_bo *handle = NULL;
331 struct rv_smumgr *priv;
332 uint64_t mc_addr;
333 void *kaddr = NULL;
334 int r;
335
336 priv = kzalloc(sizeof(struct rv_smumgr), GFP_KERNEL);
337
338 if (!priv)
339 return -ENOMEM;
340
341 hwmgr->smu_backend = priv;
342
343 /* allocate space for watermarks table */
344 r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
345 sizeof(Watermarks_t),
346 PAGE_SIZE,
347 AMDGPU_GEM_DOMAIN_VRAM,
348 &handle,
349 &mc_addr,
350 &kaddr);
351
352 if (r)
353 return -EINVAL;
354
355 priv->smu_tables.entry[WMTABLE].version = 0x01;
356 priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t);
357 priv->smu_tables.entry[WMTABLE].table_id = TABLE_WATERMARKS;
358 priv->smu_tables.entry[WMTABLE].mc_addr = mc_addr;
359 priv->smu_tables.entry[WMTABLE].table = kaddr;
360 priv->smu_tables.entry[WMTABLE].handle = handle;
361
362 /* allocate space for watermarks table */
363 r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
364 sizeof(DpmClocks_t),
365 PAGE_SIZE,
366 AMDGPU_GEM_DOMAIN_VRAM,
367 &handle,
368 &mc_addr,
369 &kaddr);
370
371 if (r) {
372 amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle,
373 &priv->smu_tables.entry[WMTABLE].mc_addr,
374 &priv->smu_tables.entry[WMTABLE].table);
375 return -EINVAL;
376 }
377
378 priv->smu_tables.entry[CLOCKTABLE].version = 0x01;
379 priv->smu_tables.entry[CLOCKTABLE].size = sizeof(DpmClocks_t);
380 priv->smu_tables.entry[CLOCKTABLE].table_id = TABLE_DPMCLOCKS;
381 priv->smu_tables.entry[CLOCKTABLE].mc_addr = mc_addr;
382 priv->smu_tables.entry[CLOCKTABLE].table = kaddr;
383 priv->smu_tables.entry[CLOCKTABLE].handle = handle;
384
385 return 0;
386}
387
388const struct pp_smumgr_func rv_smu_funcs = {
389 .smu_init = &rv_smu_init,
390 .smu_fini = &rv_smu_fini,
391 .start_smu = &rv_start_smu,
392 .request_smu_load_specific_fw = NULL,
393 .send_msg_to_smc = &rv_send_msg_to_smc,
394 .send_msg_to_smc_with_parameter = &rv_send_msg_to_smc_with_parameter,
395 .download_pptable_settings = NULL,
396 .upload_pptable_settings = NULL,
397};
398
399
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
new file mode 100644
index 000000000000..bc53f2beda30
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -0,0 +1,344 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "smumgr.h"
25#include "smu10_inc.h"
26#include "pp_soc15.h"
27#include "smu10_smumgr.h"
28#include "ppatomctrl.h"
29#include "rv_ppsmc.h"
30#include "smu10_driver_if.h"
31#include "smu10.h"
32#include "ppatomctrl.h"
33#include "pp_debug.h"
34
35
36#define VOLTAGE_SCALE 4
37
38#define BUFFER_SIZE 80000
39#define MAX_STRING_SIZE 15
40#define BUFFER_SIZETWO 131072
41
42#define MP0_Public 0x03800000
43#define MP0_SRAM 0x03900000
44#define MP1_Public 0x03b00000
45#define MP1_SRAM 0x03c00004
46
47#define smnMP1_FIRMWARE_FLAGS 0x3010028
48
49
50static uint32_t smu10_wait_for_response(struct pp_hwmgr *hwmgr)
51{
52 uint32_t reg;
53
54 reg = soc15_get_register_offset(MP1_HWID, 0,
55 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
56
57 phm_wait_for_register_unequal(hwmgr, reg,
58 0, MP1_C2PMSG_90__CONTENT_MASK);
59
60 return cgs_read_register(hwmgr->device, reg);
61}
62
63static int smu10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
64 uint16_t msg)
65{
66 uint32_t reg;
67
68 reg = soc15_get_register_offset(MP1_HWID, 0,
69 mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
70 cgs_write_register(hwmgr->device, reg, msg);
71
72 return 0;
73}
74
75static int smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr)
76{
77 uint32_t reg;
78
79 reg = soc15_get_register_offset(MP1_HWID, 0,
80 mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
81
82 return cgs_read_register(hwmgr->device, reg);
83}
84
85static int smu10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
86{
87 uint32_t reg;
88
89 smu10_wait_for_response(hwmgr);
90
91 reg = soc15_get_register_offset(MP1_HWID, 0,
92 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
93 cgs_write_register(hwmgr->device, reg, 0);
94
95 smu10_send_msg_to_smc_without_waiting(hwmgr, msg);
96
97 if (smu10_wait_for_response(hwmgr) == 0)
98 printk("Failed to send Message %x.\n", msg);
99
100 return 0;
101}
102
103
104static int smu10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
105 uint16_t msg, uint32_t parameter)
106{
107 uint32_t reg;
108
109 smu10_wait_for_response(hwmgr);
110
111 reg = soc15_get_register_offset(MP1_HWID, 0,
112 mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
113 cgs_write_register(hwmgr->device, reg, 0);
114
115 reg = soc15_get_register_offset(MP1_HWID, 0,
116 mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
117 cgs_write_register(hwmgr->device, reg, parameter);
118
119 smu10_send_msg_to_smc_without_waiting(hwmgr, msg);
120
121
122 if (smu10_wait_for_response(hwmgr) == 0)
123 printk("Failed to send Message %x.\n", msg);
124
125 return 0;
126}
127
128static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
129 uint8_t *table, int16_t table_id)
130{
131 struct smu10_smumgr *priv =
132 (struct smu10_smumgr *)(hwmgr->smu_backend);
133
134 PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
135 "Invalid SMU Table ID!", return -EINVAL;);
136 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
137 "Invalid SMU Table version!", return -EINVAL;);
138 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
139 "Invalid SMU Table Length!", return -EINVAL;);
140 smu10_send_msg_to_smc_with_parameter(hwmgr,
141 PPSMC_MSG_SetDriverDramAddrHigh,
142 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
143 smu10_send_msg_to_smc_with_parameter(hwmgr,
144 PPSMC_MSG_SetDriverDramAddrLow,
145 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
146 smu10_send_msg_to_smc_with_parameter(hwmgr,
147 PPSMC_MSG_TransferTableSmu2Dram,
148 priv->smu_tables.entry[table_id].table_id);
149
150 memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
151 priv->smu_tables.entry[table_id].size);
152
153 return 0;
154}
155
156static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
157 uint8_t *table, int16_t table_id)
158{
159 struct smu10_smumgr *priv =
160 (struct smu10_smumgr *)(hwmgr->smu_backend);
161
162 PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
163 "Invalid SMU Table ID!", return -EINVAL;);
164 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
165 "Invalid SMU Table version!", return -EINVAL;);
166 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
167 "Invalid SMU Table Length!", return -EINVAL;);
168
169 memcpy(priv->smu_tables.entry[table_id].table, table,
170 priv->smu_tables.entry[table_id].size);
171
172 smu10_send_msg_to_smc_with_parameter(hwmgr,
173 PPSMC_MSG_SetDriverDramAddrHigh,
174 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
175 smu10_send_msg_to_smc_with_parameter(hwmgr,
176 PPSMC_MSG_SetDriverDramAddrLow,
177 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
178 smu10_send_msg_to_smc_with_parameter(hwmgr,
179 PPSMC_MSG_TransferTableDram2Smu,
180 priv->smu_tables.entry[table_id].table_id);
181
182 return 0;
183}
184
185static int smu10_verify_smc_interface(struct pp_hwmgr *hwmgr)
186{
187 uint32_t smc_driver_if_version;
188
189 smu10_send_msg_to_smc(hwmgr,
190 PPSMC_MSG_GetDriverIfVersion);
191 smc_driver_if_version = smu10_read_arg_from_smc(hwmgr);
192
193 if (smc_driver_if_version != SMU10_DRIVER_IF_VERSION) {
194 pr_err("Attempt to read SMC IF Version Number Failed!\n");
195 return -EINVAL;
196 }
197
198 return 0;
199}
200
201/* sdma is disabled by default in vbios, need to re-enable in driver */
202static void smu10_smc_enable_sdma(struct pp_hwmgr *hwmgr)
203{
204 smu10_send_msg_to_smc(hwmgr,
205 PPSMC_MSG_PowerUpSdma);
206}
207
208static void smu10_smc_disable_sdma(struct pp_hwmgr *hwmgr)
209{
210 smu10_send_msg_to_smc(hwmgr,
211 PPSMC_MSG_PowerDownSdma);
212}
213
214/* vcn is disabled by default in vbios, need to re-enable in driver */
215static void smu10_smc_enable_vcn(struct pp_hwmgr *hwmgr)
216{
217 smu10_send_msg_to_smc_with_parameter(hwmgr,
218 PPSMC_MSG_PowerUpVcn, 0);
219}
220
221static void smu10_smc_disable_vcn(struct pp_hwmgr *hwmgr)
222{
223 smu10_send_msg_to_smc_with_parameter(hwmgr,
224 PPSMC_MSG_PowerDownVcn, 0);
225}
226
227static int smu10_smu_fini(struct pp_hwmgr *hwmgr)
228{
229 struct smu10_smumgr *priv =
230 (struct smu10_smumgr *)(hwmgr->smu_backend);
231
232 if (priv) {
233 smu10_smc_disable_sdma(hwmgr);
234 smu10_smc_disable_vcn(hwmgr);
235 amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_WMTABLE].handle,
236 &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr,
237 &priv->smu_tables.entry[SMU10_WMTABLE].table);
238 amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_CLOCKTABLE].handle,
239 &priv->smu_tables.entry[SMU10_CLOCKTABLE].mc_addr,
240 &priv->smu_tables.entry[SMU10_CLOCKTABLE].table);
241 kfree(hwmgr->smu_backend);
242 hwmgr->smu_backend = NULL;
243 }
244
245 return 0;
246}
247
248static int smu10_start_smu(struct pp_hwmgr *hwmgr)
249{
250 struct amdgpu_device *adev = hwmgr->adev;
251
252 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
253 hwmgr->smu_version = smu10_read_arg_from_smc(hwmgr);
254 adev->pm.fw_version = hwmgr->smu_version >> 8;
255
256 if (smu10_verify_smc_interface(hwmgr))
257 return -EINVAL;
258 smu10_smc_enable_sdma(hwmgr);
259 smu10_smc_enable_vcn(hwmgr);
260 return 0;
261}
262
263static int smu10_smu_init(struct pp_hwmgr *hwmgr)
264{
265 struct smu10_smumgr *priv;
266 int r;
267
268 priv = kzalloc(sizeof(struct smu10_smumgr), GFP_KERNEL);
269
270 if (!priv)
271 return -ENOMEM;
272
273 hwmgr->smu_backend = priv;
274
275 /* allocate space for watermarks table */
276 r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
277 sizeof(Watermarks_t),
278 PAGE_SIZE,
279 AMDGPU_GEM_DOMAIN_VRAM,
280 &priv->smu_tables.entry[SMU10_WMTABLE].handle,
281 &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr,
282 &priv->smu_tables.entry[SMU10_WMTABLE].table);
283
284 if (r)
285 goto err0;
286
287 priv->smu_tables.entry[SMU10_WMTABLE].version = 0x01;
288 priv->smu_tables.entry[SMU10_WMTABLE].size = sizeof(Watermarks_t);
289 priv->smu_tables.entry[SMU10_WMTABLE].table_id = TABLE_WATERMARKS;
290
291 /* allocate space for watermarks table */
292 r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
293 sizeof(DpmClocks_t),
294 PAGE_SIZE,
295 AMDGPU_GEM_DOMAIN_VRAM,
296 &priv->smu_tables.entry[SMU10_CLOCKTABLE].handle,
297 &priv->smu_tables.entry[SMU10_CLOCKTABLE].mc_addr,
298 &priv->smu_tables.entry[SMU10_CLOCKTABLE].table);
299
300 if (r)
301 goto err1;
302
303 priv->smu_tables.entry[SMU10_CLOCKTABLE].version = 0x01;
304 priv->smu_tables.entry[SMU10_CLOCKTABLE].size = sizeof(DpmClocks_t);
305 priv->smu_tables.entry[SMU10_CLOCKTABLE].table_id = TABLE_DPMCLOCKS;
306
307 return 0;
308
309err1:
310 amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_WMTABLE].handle,
311 &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr,
312 &priv->smu_tables.entry[SMU10_WMTABLE].table);
313err0:
314 kfree(priv);
315 return -EINVAL;
316}
317
318static int smu10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw)
319{
320 int ret;
321
322 if (rw)
323 ret = smu10_copy_table_from_smc(hwmgr, table, table_id);
324 else
325 ret = smu10_copy_table_to_smc(hwmgr, table, table_id);
326
327 return ret;
328}
329
330
331const struct pp_smumgr_func smu10_smu_funcs = {
332 .smu_init = &smu10_smu_init,
333 .smu_fini = &smu10_smu_fini,
334 .start_smu = &smu10_start_smu,
335 .request_smu_load_specific_fw = NULL,
336 .send_msg_to_smc = &smu10_send_msg_to_smc,
337 .send_msg_to_smc_with_parameter = &smu10_send_msg_to_smc_with_parameter,
338 .download_pptable_settings = NULL,
339 .upload_pptable_settings = NULL,
340 .get_argument = smu10_read_arg_from_smc,
341 .smc_table_manager = smu10_smc_table_manager,
342};
343
344
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h
index 0ff4ac5838f7..9c2be74a2b2f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h
@@ -21,17 +21,13 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef PP_RAVEN_SMUMANAGER_H 24#ifndef PP_SMU10_SMUMANAGER_H
25#define PP_RAVEN_SMUMANAGER_H 25#define PP_SMU10_SMUMANAGER_H
26 26
27#include "rv_ppsmc.h" 27#include "rv_ppsmc.h"
28#include "smu10_driver_if.h" 28#include "smu10_driver_if.h"
29 29
30enum SMU_TABLE_ID { 30#define MAX_SMU_TABLE 2
31 WMTABLE = 0,
32 CLOCKTABLE,
33 MAX_SMU_TABLE,
34};
35 31
36struct smu_table_entry { 32struct smu_table_entry {
37 uint32_t version; 33 uint32_t version;
@@ -46,16 +42,9 @@ struct smu_table_array {
46 struct smu_table_entry entry[MAX_SMU_TABLE]; 42 struct smu_table_entry entry[MAX_SMU_TABLE];
47}; 43};
48 44
49struct rv_smumgr { 45struct smu10_smumgr {
50 struct smu_table_array smu_tables; 46 struct smu_table_array smu_tables;
51}; 47};
52 48
53int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg);
54bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr);
55int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr,
56 uint8_t *table, int16_t table_id);
57int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr,
58 uint8_t *table, int16_t table_id);
59
60 49
61#endif 50#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 7394bb46b8b2..0399c10d2be0 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -585,7 +585,6 @@ int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr)
585int smu7_init(struct pp_hwmgr *hwmgr) 585int smu7_init(struct pp_hwmgr *hwmgr)
586{ 586{
587 struct smu7_smumgr *smu_data; 587 struct smu7_smumgr *smu_data;
588 uint8_t *internal_buf;
589 uint64_t mc_addr = 0; 588 uint64_t mc_addr = 0;
590 int r; 589 int r;
591 /* Allocate memory for backend private data */ 590 /* Allocate memory for backend private data */
@@ -627,13 +626,10 @@ int smu7_init(struct pp_hwmgr *hwmgr)
627 &smu_data->header_buffer.kaddr); 626 &smu_data->header_buffer.kaddr);
628 return -EINVAL; 627 return -EINVAL;
629 } 628 }
630 internal_buf = smu_data->smu_buffer.kaddr;
631 smu_data->smu_buffer.mc_addr = mc_addr; 629 smu_data->smu_buffer.mc_addr = mc_addr;
632 630
633 if (smum_is_hw_avfs_present(hwmgr)) 631 if (smum_is_hw_avfs_present(hwmgr))
634 smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT; 632 hwmgr->avfs_supported = true;
635 else
636 smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
637 633
638 return 0; 634 return 0;
639} 635}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index 64334a82b77b..126d300259ba 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -36,11 +36,6 @@ struct smu7_buffer_entry {
36 struct amdgpu_bo *handle; 36 struct amdgpu_bo *handle;
37}; 37};
38 38
39struct smu7_avfs {
40 enum AVFS_BTC_STATUS avfs_btc_status;
41 uint32_t avfs_btc_param;
42};
43
44struct smu7_smumgr { 39struct smu7_smumgr {
45 uint8_t *header; 40 uint8_t *header;
46 uint8_t *mec_image; 41 uint8_t *mec_image;
@@ -55,7 +50,7 @@ struct smu7_smumgr {
55 uint32_t ulv_setting_starts; 50 uint32_t ulv_setting_starts;
56 uint8_t security_hard_key; 51 uint8_t security_hard_key;
57 uint32_t acpi_optimization; 52 uint32_t acpi_optimization;
58 struct smu7_avfs avfs; 53 uint32_t avfs_btc_param;
59}; 54};
60 55
61 56
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
new file mode 100644
index 000000000000..8c49704b81af
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
@@ -0,0 +1,891 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/delay.h>
25#include <linux/gfp.h>
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/types.h>
29
30#include "cgs_common.h"
31#include "smu/smu_8_0_d.h"
32#include "smu/smu_8_0_sh_mask.h"
33#include "smu8.h"
34#include "smu8_fusion.h"
35#include "smu8_smumgr.h"
36#include "cz_ppsmc.h"
37#include "smu_ucode_xfer_cz.h"
38#include "gca/gfx_8_0_d.h"
39#include "gca/gfx_8_0_sh_mask.h"
40#include "smumgr.h"
41
42#define SIZE_ALIGN_32(x) (((x) + 31) / 32 * 32)
43
44static const enum smu8_scratch_entry firmware_list[] = {
45 SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0,
46 SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1,
47 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE,
48 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
49 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME,
50 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
51 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
52 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G,
53};
54
55static int smu8_smum_get_argument(struct pp_hwmgr *hwmgr)
56{
57 if (hwmgr == NULL || hwmgr->device == NULL)
58 return -EINVAL;
59
60 return cgs_read_register(hwmgr->device,
61 mmSMU_MP1_SRBM2P_ARG_0);
62}
63
64static int smu8_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
65{
66 int result = 0;
67
68 if (hwmgr == NULL || hwmgr->device == NULL)
69 return -EINVAL;
70
71 result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
72 SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
73 if (result != 0) {
74 pr_err("smu8_send_msg_to_smc_async (0x%04x) failed\n", msg);
75 return result;
76 }
77
78 cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
79 cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
80
81 return 0;
82}
83
84/* Send a message to the SMC, and wait for its response.*/
85static int smu8_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
86{
87 int result = 0;
88
89 result = smu8_send_msg_to_smc_async(hwmgr, msg);
90 if (result != 0)
91 return result;
92
93 return PHM_WAIT_FIELD_UNEQUAL(hwmgr,
94 SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
95}
96
97static int smu8_set_smc_sram_address(struct pp_hwmgr *hwmgr,
98 uint32_t smc_address, uint32_t limit)
99{
100 if (hwmgr == NULL || hwmgr->device == NULL)
101 return -EINVAL;
102
103 if (0 != (3 & smc_address)) {
104 pr_err("SMC address must be 4 byte aligned\n");
105 return -EINVAL;
106 }
107
108 if (limit <= (smc_address + 3)) {
109 pr_err("SMC address beyond the SMC RAM area\n");
110 return -EINVAL;
111 }
112
113 cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX_0,
114 SMN_MP1_SRAM_START_ADDR + smc_address);
115
116 return 0;
117}
118
119static int smu8_write_smc_sram_dword(struct pp_hwmgr *hwmgr,
120 uint32_t smc_address, uint32_t value, uint32_t limit)
121{
122 int result;
123
124 if (hwmgr == NULL || hwmgr->device == NULL)
125 return -EINVAL;
126
127 result = smu8_set_smc_sram_address(hwmgr, smc_address, limit);
128 if (!result)
129 cgs_write_register(hwmgr->device, mmMP0PUB_IND_DATA_0, value);
130
131 return result;
132}
133
134static int smu8_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
135 uint16_t msg, uint32_t parameter)
136{
137 if (hwmgr == NULL || hwmgr->device == NULL)
138 return -EINVAL;
139
140 cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
141
142 return smu8_send_msg_to_smc(hwmgr, msg);
143}
144
145static int smu8_check_fw_load_finish(struct pp_hwmgr *hwmgr,
146 uint32_t firmware)
147{
148 int i;
149 uint32_t index = SMN_MP1_SRAM_START_ADDR +
150 SMU8_FIRMWARE_HEADER_LOCATION +
151 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
152
153 if (hwmgr == NULL || hwmgr->device == NULL)
154 return -EINVAL;
155
156 cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
157
158 for (i = 0; i < hwmgr->usec_timeout; i++) {
159 if (firmware ==
160 (cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA) & firmware))
161 break;
162 udelay(1);
163 }
164
165 if (i >= hwmgr->usec_timeout) {
166 pr_err("SMU check loaded firmware failed.\n");
167 return -EINVAL;
168 }
169
170 return 0;
171}
172
173static int smu8_load_mec_firmware(struct pp_hwmgr *hwmgr)
174{
175 uint32_t reg_data;
176 uint32_t tmp;
177 int ret = 0;
178 struct cgs_firmware_info info = {0};
179 struct smu8_smumgr *smu8_smu;
180
181 if (hwmgr == NULL || hwmgr->device == NULL)
182 return -EINVAL;
183
184 smu8_smu = hwmgr->smu_backend;
185 ret = cgs_get_firmware_info(hwmgr->device,
186 CGS_UCODE_ID_CP_MEC, &info);
187
188 if (ret)
189 return -EINVAL;
190
191 /* Disable MEC parsing/prefetching */
192 tmp = cgs_read_register(hwmgr->device,
193 mmCP_MEC_CNTL);
194 tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
195 tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
196 cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, tmp);
197
198 tmp = cgs_read_register(hwmgr->device,
199 mmCP_CPC_IC_BASE_CNTL);
200
201 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
202 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
203 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
204 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
205 cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
206
207 reg_data = lower_32_bits(info.mc_addr) &
208 PHM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
209 cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
210
211 reg_data = upper_32_bits(info.mc_addr) &
212 PHM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
213 cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
214
215 return 0;
216}
217
218static uint8_t smu8_translate_firmware_enum_to_arg(struct pp_hwmgr *hwmgr,
219 enum smu8_scratch_entry firmware_enum)
220{
221 uint8_t ret = 0;
222
223 switch (firmware_enum) {
224 case SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0:
225 ret = UCODE_ID_SDMA0;
226 break;
227 case SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1:
228 if (hwmgr->chip_id == CHIP_STONEY)
229 ret = UCODE_ID_SDMA0;
230 else
231 ret = UCODE_ID_SDMA1;
232 break;
233 case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE:
234 ret = UCODE_ID_CP_CE;
235 break;
236 case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
237 ret = UCODE_ID_CP_PFP;
238 break;
239 case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME:
240 ret = UCODE_ID_CP_ME;
241 break;
242 case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
243 ret = UCODE_ID_CP_MEC_JT1;
244 break;
245 case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
246 if (hwmgr->chip_id == CHIP_STONEY)
247 ret = UCODE_ID_CP_MEC_JT1;
248 else
249 ret = UCODE_ID_CP_MEC_JT2;
250 break;
251 case SMU8_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
252 ret = UCODE_ID_GMCON_RENG;
253 break;
254 case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G:
255 ret = UCODE_ID_RLC_G;
256 break;
257 case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
258 ret = UCODE_ID_RLC_SCRATCH;
259 break;
260 case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
261 ret = UCODE_ID_RLC_SRM_ARAM;
262 break;
263 case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
264 ret = UCODE_ID_RLC_SRM_DRAM;
265 break;
266 case SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
267 ret = UCODE_ID_DMCU_ERAM;
268 break;
269 case SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
270 ret = UCODE_ID_DMCU_IRAM;
271 break;
272 case SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
273 ret = TASK_ARG_INIT_MM_PWR_LOG;
274 break;
275 case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
276 case SMU8_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
277 case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
278 case SMU8_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
279 case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_START:
280 case SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
281 ret = TASK_ARG_REG_MMIO;
282 break;
283 case SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
284 ret = TASK_ARG_INIT_CLK_TABLE;
285 break;
286 }
287
288 return ret;
289}
290
291static enum cgs_ucode_id smu8_convert_fw_type_to_cgs(uint32_t fw_type)
292{
293 enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
294
295 switch (fw_type) {
296 case UCODE_ID_SDMA0:
297 result = CGS_UCODE_ID_SDMA0;
298 break;
299 case UCODE_ID_SDMA1:
300 result = CGS_UCODE_ID_SDMA1;
301 break;
302 case UCODE_ID_CP_CE:
303 result = CGS_UCODE_ID_CP_CE;
304 break;
305 case UCODE_ID_CP_PFP:
306 result = CGS_UCODE_ID_CP_PFP;
307 break;
308 case UCODE_ID_CP_ME:
309 result = CGS_UCODE_ID_CP_ME;
310 break;
311 case UCODE_ID_CP_MEC_JT1:
312 result = CGS_UCODE_ID_CP_MEC_JT1;
313 break;
314 case UCODE_ID_CP_MEC_JT2:
315 result = CGS_UCODE_ID_CP_MEC_JT2;
316 break;
317 case UCODE_ID_RLC_G:
318 result = CGS_UCODE_ID_RLC_G;
319 break;
320 default:
321 break;
322 }
323
324 return result;
325}
326
327static int smu8_smu_populate_single_scratch_task(
328 struct pp_hwmgr *hwmgr,
329 enum smu8_scratch_entry fw_enum,
330 uint8_t type, bool is_last)
331{
332 uint8_t i;
333 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
334 struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
335 struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++];
336
337 task->type = type;
338 task->arg = smu8_translate_firmware_enum_to_arg(hwmgr, fw_enum);
339 task->next = is_last ? END_OF_TASK_LIST : smu8_smu->toc_entry_used_count;
340
341 for (i = 0; i < smu8_smu->scratch_buffer_length; i++)
342 if (smu8_smu->scratch_buffer[i].firmware_ID == fw_enum)
343 break;
344
345 if (i >= smu8_smu->scratch_buffer_length) {
346 pr_err("Invalid Firmware Type\n");
347 return -EINVAL;
348 }
349
350 task->addr.low = lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr);
351 task->addr.high = upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr);
352 task->size_bytes = smu8_smu->scratch_buffer[i].data_size;
353
354 if (SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == fw_enum) {
355 struct smu8_ih_meta_data *pIHReg_restore =
356 (struct smu8_ih_meta_data *)smu8_smu->scratch_buffer[i].kaddr;
357 pIHReg_restore->command =
358 METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
359 }
360
361 return 0;
362}
363
364static int smu8_smu_populate_single_ucode_load_task(
365 struct pp_hwmgr *hwmgr,
366 enum smu8_scratch_entry fw_enum,
367 bool is_last)
368{
369 uint8_t i;
370 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
371 struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
372 struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++];
373
374 task->type = TASK_TYPE_UCODE_LOAD;
375 task->arg = smu8_translate_firmware_enum_to_arg(hwmgr, fw_enum);
376 task->next = is_last ? END_OF_TASK_LIST : smu8_smu->toc_entry_used_count;
377
378 for (i = 0; i < smu8_smu->driver_buffer_length; i++)
379 if (smu8_smu->driver_buffer[i].firmware_ID == fw_enum)
380 break;
381
382 if (i >= smu8_smu->driver_buffer_length) {
383 pr_err("Invalid Firmware Type\n");
384 return -EINVAL;
385 }
386
387 task->addr.low = lower_32_bits(smu8_smu->driver_buffer[i].mc_addr);
388 task->addr.high = upper_32_bits(smu8_smu->driver_buffer[i].mc_addr);
389 task->size_bytes = smu8_smu->driver_buffer[i].data_size;
390
391 return 0;
392}
393
394static int smu8_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr)
395{
396 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
397
398 smu8_smu->toc_entry_aram = smu8_smu->toc_entry_used_count;
399 smu8_smu_populate_single_scratch_task(hwmgr,
400 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
401 TASK_TYPE_UCODE_SAVE, true);
402
403 return 0;
404}
405
406static int smu8_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr)
407{
408 int i;
409 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
410 struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
411
412 for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
413 toc->JobList[i] = (uint8_t)IGNORE_JOB;
414
415 return 0;
416}
417
418static int smu8_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr)
419{
420 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
421 struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
422
423 toc->JobList[JOB_GFX_SAVE] = (uint8_t)smu8_smu->toc_entry_used_count;
424 smu8_smu_populate_single_scratch_task(hwmgr,
425 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
426 TASK_TYPE_UCODE_SAVE, false);
427
428 smu8_smu_populate_single_scratch_task(hwmgr,
429 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
430 TASK_TYPE_UCODE_SAVE, true);
431
432 return 0;
433}
434
435
436static int smu8_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr)
437{
438 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
439 struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
440
441 toc->JobList[JOB_GFX_RESTORE] = (uint8_t)smu8_smu->toc_entry_used_count;
442
443 smu8_smu_populate_single_ucode_load_task(hwmgr,
444 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
445 smu8_smu_populate_single_ucode_load_task(hwmgr,
446 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
447 smu8_smu_populate_single_ucode_load_task(hwmgr,
448 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
449 smu8_smu_populate_single_ucode_load_task(hwmgr,
450 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
451
452 if (hwmgr->chip_id == CHIP_STONEY)
453 smu8_smu_populate_single_ucode_load_task(hwmgr,
454 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
455 else
456 smu8_smu_populate_single_ucode_load_task(hwmgr,
457 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
458
459 smu8_smu_populate_single_ucode_load_task(hwmgr,
460 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
461
462 /* populate scratch */
463 smu8_smu_populate_single_scratch_task(hwmgr,
464 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
465 TASK_TYPE_UCODE_LOAD, false);
466
467 smu8_smu_populate_single_scratch_task(hwmgr,
468 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
469 TASK_TYPE_UCODE_LOAD, false);
470
471 smu8_smu_populate_single_scratch_task(hwmgr,
472 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
473 TASK_TYPE_UCODE_LOAD, true);
474
475 return 0;
476}
477
478static int smu8_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr)
479{
480 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
481
482 smu8_smu->toc_entry_power_profiling_index = smu8_smu->toc_entry_used_count;
483
484 smu8_smu_populate_single_scratch_task(hwmgr,
485 SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
486 TASK_TYPE_INITIALIZE, true);
487 return 0;
488}
489
490static int smu8_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr)
491{
492 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
493
494 smu8_smu->toc_entry_initialize_index = smu8_smu->toc_entry_used_count;
495
496 smu8_smu_populate_single_ucode_load_task(hwmgr,
497 SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
498 if (hwmgr->chip_id != CHIP_STONEY)
499 smu8_smu_populate_single_ucode_load_task(hwmgr,
500 SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
501 smu8_smu_populate_single_ucode_load_task(hwmgr,
502 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
503 smu8_smu_populate_single_ucode_load_task(hwmgr,
504 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
505 smu8_smu_populate_single_ucode_load_task(hwmgr,
506 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
507 smu8_smu_populate_single_ucode_load_task(hwmgr,
508 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
509 if (hwmgr->chip_id != CHIP_STONEY)
510 smu8_smu_populate_single_ucode_load_task(hwmgr,
511 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
512 smu8_smu_populate_single_ucode_load_task(hwmgr,
513 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
514
515 return 0;
516}
517
518static int smu8_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr)
519{
520 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
521
522 smu8_smu->toc_entry_clock_table = smu8_smu->toc_entry_used_count;
523
524 smu8_smu_populate_single_scratch_task(hwmgr,
525 SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
526 TASK_TYPE_INITIALIZE, true);
527
528 return 0;
529}
530
531static int smu8_smu_construct_toc(struct pp_hwmgr *hwmgr)
532{
533 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
534
535 smu8_smu->toc_entry_used_count = 0;
536 smu8_smu_initialize_toc_empty_job_list(hwmgr);
537 smu8_smu_construct_toc_for_rlc_aram_save(hwmgr);
538 smu8_smu_construct_toc_for_vddgfx_enter(hwmgr);
539 smu8_smu_construct_toc_for_vddgfx_exit(hwmgr);
540 smu8_smu_construct_toc_for_power_profiling(hwmgr);
541 smu8_smu_construct_toc_for_bootup(hwmgr);
542 smu8_smu_construct_toc_for_clock_table(hwmgr);
543
544 return 0;
545}
546
547static int smu8_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr)
548{
549 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
550 uint32_t firmware_type;
551 uint32_t i;
552 int ret;
553 enum cgs_ucode_id ucode_id;
554 struct cgs_firmware_info info = {0};
555
556 smu8_smu->driver_buffer_length = 0;
557
558 for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
559
560 firmware_type = smu8_translate_firmware_enum_to_arg(hwmgr,
561 firmware_list[i]);
562
563 ucode_id = smu8_convert_fw_type_to_cgs(firmware_type);
564
565 ret = cgs_get_firmware_info(hwmgr->device,
566 ucode_id, &info);
567
568 if (ret == 0) {
569 smu8_smu->driver_buffer[i].mc_addr = info.mc_addr;
570
571 smu8_smu->driver_buffer[i].data_size = info.image_size;
572
573 smu8_smu->driver_buffer[i].firmware_ID = firmware_list[i];
574 smu8_smu->driver_buffer_length++;
575 }
576 }
577
578 return 0;
579}
580
581static int smu8_smu_populate_single_scratch_entry(
582 struct pp_hwmgr *hwmgr,
583 enum smu8_scratch_entry scratch_type,
584 uint32_t ulsize_byte,
585 struct smu8_buffer_entry *entry)
586{
587 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
588 uint32_t ulsize_aligned = SIZE_ALIGN_32(ulsize_byte);
589
590 entry->data_size = ulsize_byte;
591 entry->kaddr = (char *) smu8_smu->smu_buffer.kaddr +
592 smu8_smu->smu_buffer_used_bytes;
593 entry->mc_addr = smu8_smu->smu_buffer.mc_addr + smu8_smu->smu_buffer_used_bytes;
594 entry->firmware_ID = scratch_type;
595
596 smu8_smu->smu_buffer_used_bytes += ulsize_aligned;
597
598 return 0;
599}
600
601static int smu8_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
602{
603 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
604 unsigned long i;
605
606 for (i = 0; i < smu8_smu->scratch_buffer_length; i++) {
607 if (smu8_smu->scratch_buffer[i].firmware_ID
608 == SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
609 break;
610 }
611
612 *table = (struct SMU8_Fusion_ClkTable *)smu8_smu->scratch_buffer[i].kaddr;
613
614 smu8_send_msg_to_smc_with_parameter(hwmgr,
615 PPSMC_MSG_SetClkTableAddrHi,
616 upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
617
618 smu8_send_msg_to_smc_with_parameter(hwmgr,
619 PPSMC_MSG_SetClkTableAddrLo,
620 lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
621
622 smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
623 smu8_smu->toc_entry_clock_table);
624
625 smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram);
626
627 return 0;
628}
629
630static int smu8_upload_pptable_settings(struct pp_hwmgr *hwmgr)
631{
632 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
633 unsigned long i;
634
635 for (i = 0; i < smu8_smu->scratch_buffer_length; i++) {
636 if (smu8_smu->scratch_buffer[i].firmware_ID
637 == SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
638 break;
639 }
640
641 smu8_send_msg_to_smc_with_parameter(hwmgr,
642 PPSMC_MSG_SetClkTableAddrHi,
643 upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
644
645 smu8_send_msg_to_smc_with_parameter(hwmgr,
646 PPSMC_MSG_SetClkTableAddrLo,
647 lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
648
649 smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
650 smu8_smu->toc_entry_clock_table);
651
652 smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu);
653
654 return 0;
655}
656
657static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr)
658{
659 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
660 uint32_t smc_address;
661
662 if (!hwmgr->reload_fw) {
663 pr_info("skip reloading...\n");
664 return 0;
665 }
666
667 smu8_smu_populate_firmware_entries(hwmgr);
668
669 smu8_smu_construct_toc(hwmgr);
670
671 smc_address = SMU8_FIRMWARE_HEADER_LOCATION +
672 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
673
674 smu8_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4);
675
676 smu8_send_msg_to_smc_with_parameter(hwmgr,
677 PPSMC_MSG_DriverDramAddrHi,
678 upper_32_bits(smu8_smu->toc_buffer.mc_addr));
679
680 smu8_send_msg_to_smc_with_parameter(hwmgr,
681 PPSMC_MSG_DriverDramAddrLo,
682 lower_32_bits(smu8_smu->toc_buffer.mc_addr));
683
684 smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs);
685
686 smu8_send_msg_to_smc_with_parameter(hwmgr,
687 PPSMC_MSG_ExecuteJob,
688 smu8_smu->toc_entry_aram);
689 smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
690 smu8_smu->toc_entry_power_profiling_index);
691
692 return smu8_send_msg_to_smc_with_parameter(hwmgr,
693 PPSMC_MSG_ExecuteJob,
694 smu8_smu->toc_entry_initialize_index);
695}
696
697static int smu8_start_smu(struct pp_hwmgr *hwmgr)
698{
699 int ret = 0;
700 uint32_t fw_to_check = 0;
701 struct amdgpu_device *adev = hwmgr->adev;
702
703 uint32_t index = SMN_MP1_SRAM_START_ADDR +
704 SMU8_FIRMWARE_HEADER_LOCATION +
705 offsetof(struct SMU8_Firmware_Header, Version);
706
707
708 if (hwmgr == NULL || hwmgr->device == NULL)
709 return -EINVAL;
710
711 cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
712 hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
713 adev->pm.fw_version = hwmgr->smu_version >> 8;
714
715 fw_to_check = UCODE_ID_RLC_G_MASK |
716 UCODE_ID_SDMA0_MASK |
717 UCODE_ID_SDMA1_MASK |
718 UCODE_ID_CP_CE_MASK |
719 UCODE_ID_CP_ME_MASK |
720 UCODE_ID_CP_PFP_MASK |
721 UCODE_ID_CP_MEC_JT1_MASK |
722 UCODE_ID_CP_MEC_JT2_MASK;
723
724 if (hwmgr->chip_id == CHIP_STONEY)
725 fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
726
727 ret = smu8_request_smu_load_fw(hwmgr);
728 if (ret)
729 pr_err("SMU firmware load failed\n");
730
731 smu8_check_fw_load_finish(hwmgr, fw_to_check);
732
733 ret = smu8_load_mec_firmware(hwmgr);
734 if (ret)
735 pr_err("Mec Firmware load failed\n");
736
737 return ret;
738}
739
740static int smu8_smu_init(struct pp_hwmgr *hwmgr)
741{
742 int ret = 0;
743 struct smu8_smumgr *smu8_smu;
744
745 smu8_smu = kzalloc(sizeof(struct smu8_smumgr), GFP_KERNEL);
746 if (smu8_smu == NULL)
747 return -ENOMEM;
748
749 hwmgr->smu_backend = smu8_smu;
750
751 smu8_smu->toc_buffer.data_size = 4096;
752 smu8_smu->smu_buffer.data_size =
753 ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
754 ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
755 ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
756 ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
757 ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
758
759 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
760 smu8_smu->toc_buffer.data_size,
761 PAGE_SIZE,
762 AMDGPU_GEM_DOMAIN_VRAM,
763 &smu8_smu->toc_buffer.handle,
764 &smu8_smu->toc_buffer.mc_addr,
765 &smu8_smu->toc_buffer.kaddr);
766 if (ret)
767 goto err2;
768
769 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
770 smu8_smu->smu_buffer.data_size,
771 PAGE_SIZE,
772 AMDGPU_GEM_DOMAIN_VRAM,
773 &smu8_smu->smu_buffer.handle,
774 &smu8_smu->smu_buffer.mc_addr,
775 &smu8_smu->smu_buffer.kaddr);
776 if (ret)
777 goto err1;
778
779 if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
780 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
781 UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
782 &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
783 pr_err("Error when Populate Firmware Entry.\n");
784 goto err0;
785 }
786
787 if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
788 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
789 UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
790 &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
791 pr_err("Error when Populate Firmware Entry.\n");
792 goto err0;
793 }
794 if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
795 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
796 UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
797 &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
798 pr_err("Error when Populate Firmware Entry.\n");
799 goto err0;
800 }
801
802 if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
803 SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
804 sizeof(struct SMU8_MultimediaPowerLogData),
805 &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
806 pr_err("Error when Populate Firmware Entry.\n");
807 goto err0;
808 }
809
810 if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
811 SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
812 sizeof(struct SMU8_Fusion_ClkTable),
813 &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
814 pr_err("Error when Populate Firmware Entry.\n");
815 goto err0;
816 }
817
818 return 0;
819
820err0:
821 amdgpu_bo_free_kernel(&smu8_smu->smu_buffer.handle,
822 &smu8_smu->smu_buffer.mc_addr,
823 &smu8_smu->smu_buffer.kaddr);
824err1:
825 amdgpu_bo_free_kernel(&smu8_smu->toc_buffer.handle,
826 &smu8_smu->toc_buffer.mc_addr,
827 &smu8_smu->toc_buffer.kaddr);
828err2:
829 kfree(smu8_smu);
830 return -EINVAL;
831}
832
833static int smu8_smu_fini(struct pp_hwmgr *hwmgr)
834{
835 struct smu8_smumgr *smu8_smu;
836
837 if (hwmgr == NULL || hwmgr->device == NULL)
838 return -EINVAL;
839
840 smu8_smu = hwmgr->smu_backend;
841 if (smu8_smu) {
842 amdgpu_bo_free_kernel(&smu8_smu->toc_buffer.handle,
843 &smu8_smu->toc_buffer.mc_addr,
844 &smu8_smu->toc_buffer.kaddr);
845 amdgpu_bo_free_kernel(&smu8_smu->smu_buffer.handle,
846 &smu8_smu->smu_buffer.mc_addr,
847 &smu8_smu->smu_buffer.kaddr);
848 kfree(smu8_smu);
849 }
850
851 return 0;
852}
853
854static bool smu8_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
855 unsigned long check_feature)
856{
857 int result;
858 unsigned long features;
859
860 result = smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0);
861 if (result == 0) {
862 features = smum_get_argument(hwmgr);
863 if (features & check_feature)
864 return true;
865 }
866
867 return false;
868}
869
870static bool smu8_is_dpm_running(struct pp_hwmgr *hwmgr)
871{
872 if (smu8_dpm_check_smu_features(hwmgr, SMU_EnabledFeatureScoreboard_SclkDpmOn))
873 return true;
874 return false;
875}
876
877const struct pp_smumgr_func smu8_smu_funcs = {
878 .smu_init = smu8_smu_init,
879 .smu_fini = smu8_smu_fini,
880 .start_smu = smu8_start_smu,
881 .check_fw_load_finish = smu8_check_fw_load_finish,
882 .request_smu_load_fw = NULL,
883 .request_smu_load_specific_fw = NULL,
884 .get_argument = smu8_smum_get_argument,
885 .send_msg_to_smc = smu8_send_msg_to_smc,
886 .send_msg_to_smc_with_parameter = smu8_send_msg_to_smc_with_parameter,
887 .download_pptable_settings = smu8_download_pptable_settings,
888 .upload_pptable_settings = smu8_upload_pptable_settings,
889 .is_dpm_running = smu8_is_dpm_running,
890};
891
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.h
index c13ab8377e26..c7b61222d258 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.h
@@ -20,63 +20,63 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 */ 22 */
23#ifndef _CZ_SMUMGR_H_ 23#ifndef _SMU8_SMUMGR_H_
24#define _CZ_SMUMGR_H_ 24#define _SMU8_SMUMGR_H_
25 25
26 26
27#define MAX_NUM_FIRMWARE 8 27#define MAX_NUM_FIRMWARE 8
28#define MAX_NUM_SCRATCH 11 28#define MAX_NUM_SCRATCH 11
29#define CZ_SCRATCH_SIZE_NONGFX_CLOCKGATING 1024 29#define SMU8_SCRATCH_SIZE_NONGFX_CLOCKGATING 1024
30#define CZ_SCRATCH_SIZE_NONGFX_GOLDENSETTING 2048 30#define SMU8_SCRATCH_SIZE_NONGFX_GOLDENSETTING 2048
31#define CZ_SCRATCH_SIZE_SDMA_METADATA 1024 31#define SMU8_SCRATCH_SIZE_SDMA_METADATA 1024
32#define CZ_SCRATCH_SIZE_IH ((2*256+1)*4) 32#define SMU8_SCRATCH_SIZE_IH ((2*256+1)*4)
33 33
34#define SMU_EnabledFeatureScoreboard_SclkDpmOn 0x00200000 34#define SMU_EnabledFeatureScoreboard_SclkDpmOn 0x00200000
35 35
36enum cz_scratch_entry { 36enum smu8_scratch_entry {
37 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0 = 0, 37 SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0 = 0,
38 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, 38 SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1,
39 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, 39 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE,
40 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, 40 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
41 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, 41 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME,
42 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, 42 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
43 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, 43 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
44 CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG, 44 SMU8_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG,
45 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, 45 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G,
46 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, 46 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
47 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, 47 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
48 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, 48 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
49 CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM, 49 SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM,
50 CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM, 50 SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM,
51 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, 51 SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
52 CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT, 52 SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_HALT,
53 CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING, 53 SMU8_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING,
54 CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS, 54 SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS,
55 CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT, 55 SMU8_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT,
56 CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START, 56 SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_START,
57 CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS, 57 SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS,
58 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE 58 SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE
59}; 59};
60 60
61struct cz_buffer_entry { 61struct smu8_buffer_entry {
62 uint32_t data_size; 62 uint32_t data_size;
63 uint64_t mc_addr; 63 uint64_t mc_addr;
64 void *kaddr; 64 void *kaddr;
65 enum cz_scratch_entry firmware_ID; 65 enum smu8_scratch_entry firmware_ID;
66 struct amdgpu_bo *handle; /* as bo handle used when release bo */ 66 struct amdgpu_bo *handle; /* as bo handle used when release bo */
67}; 67};
68 68
69struct cz_register_index_data_pair { 69struct smu8_register_index_data_pair {
70 uint32_t offset; 70 uint32_t offset;
71 uint32_t value; 71 uint32_t value;
72}; 72};
73 73
74struct cz_ih_meta_data { 74struct smu8_ih_meta_data {
75 uint32_t command; 75 uint32_t command;
76 struct cz_register_index_data_pair register_index_value_pair[1]; 76 struct smu8_register_index_data_pair register_index_value_pair[1];
77}; 77};
78 78
79struct cz_smumgr { 79struct smu8_smumgr {
80 uint8_t driver_buffer_length; 80 uint8_t driver_buffer_length;
81 uint8_t scratch_buffer_length; 81 uint8_t scratch_buffer_length;
82 uint16_t toc_entry_used_count; 82 uint16_t toc_entry_used_count;
@@ -88,12 +88,12 @@ struct cz_smumgr {
88 uint16_t ih_register_restore_task_size; 88 uint16_t ih_register_restore_task_size;
89 uint16_t smu_buffer_used_bytes; 89 uint16_t smu_buffer_used_bytes;
90 90
91 struct cz_buffer_entry toc_buffer; 91 struct smu8_buffer_entry toc_buffer;
92 struct cz_buffer_entry smu_buffer; 92 struct smu8_buffer_entry smu_buffer;
93 struct cz_buffer_entry firmware_buffer; 93 struct smu8_buffer_entry firmware_buffer;
94 struct cz_buffer_entry driver_buffer[MAX_NUM_FIRMWARE]; 94 struct smu8_buffer_entry driver_buffer[MAX_NUM_FIRMWARE];
95 struct cz_buffer_entry meta_data_buffer[MAX_NUM_FIRMWARE]; 95 struct smu8_buffer_entry meta_data_buffer[MAX_NUM_FIRMWARE];
96 struct cz_buffer_entry scratch_buffer[MAX_NUM_SCRATCH]; 96 struct smu8_buffer_entry scratch_buffer[MAX_NUM_SCRATCH];
97}; 97};
98 98
99#endif 99#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 3645127c8ee2..04c45c236a73 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -28,7 +28,6 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include <drm/amdgpu_drm.h> 29#include <drm/amdgpu_drm.h>
30#include "smumgr.h" 30#include "smumgr.h"
31#include "cgs_common.h"
32 31
33MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); 32MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
34MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin"); 33MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin");
@@ -200,3 +199,11 @@ int smum_update_dpm_settings(struct pp_hwmgr *hwmgr, void *profile_setting)
200 199
201 return -EINVAL; 200 return -EINVAL;
202} 201}
202
203int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw)
204{
205 if (hwmgr->smumgr_funcs->smc_table_manager)
206 return hwmgr->smumgr_funcs->smc_table_manager(hwmgr, table, table_id, rw);
207
208 return -EINVAL;
209}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index 39d6f4ef96ce..26cca8cce8f1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -229,8 +229,10 @@ static int tonga_smu_init(struct pp_hwmgr *hwmgr)
229 229
230 hwmgr->smu_backend = tonga_priv; 230 hwmgr->smu_backend = tonga_priv;
231 231
232 if (smu7_init(hwmgr)) 232 if (smu7_init(hwmgr)) {
233 kfree(tonga_priv);
233 return -EINVAL; 234 return -EINVAL;
235 }
234 236
235 return 0; 237 return 0;
236} 238}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 15e1afa28018..e08a6116ac05 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -27,11 +27,9 @@
27#include "vega10_smumgr.h" 27#include "vega10_smumgr.h"
28#include "vega10_ppsmc.h" 28#include "vega10_ppsmc.h"
29#include "smu9_driver_if.h" 29#include "smu9_driver_if.h"
30
31#include "ppatomctrl.h" 30#include "ppatomctrl.h"
32#include "pp_debug.h" 31#include "pp_debug.h"
33#include "smu_ucode_xfer_vi.h" 32
34#include "smu7_smumgr.h"
35 33
36#define AVFS_EN_MSB 1568 34#define AVFS_EN_MSB 1568
37#define AVFS_EN_LSB 1568 35#define AVFS_EN_LSB 1568
@@ -377,16 +375,13 @@ static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr)
377 375
378static int vega10_smu_init(struct pp_hwmgr *hwmgr) 376static int vega10_smu_init(struct pp_hwmgr *hwmgr)
379{ 377{
380 struct amdgpu_bo *handle = NULL;
381 struct vega10_smumgr *priv; 378 struct vega10_smumgr *priv;
382 uint64_t mc_addr;
383 void *kaddr = NULL;
384 unsigned long tools_size; 379 unsigned long tools_size;
385 int ret; 380 int ret;
386 struct cgs_firmware_info info = {0}; 381 struct cgs_firmware_info info = {0};
387 382
388 ret = cgs_get_firmware_info(hwmgr->device, 383 ret = cgs_get_firmware_info(hwmgr->device,
389 smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), 384 CGS_UCODE_ID_SMU,
390 &info); 385 &info);
391 if (ret || !info.kptr) 386 if (ret || !info.kptr)
392 return -EINVAL; 387 return -EINVAL;
@@ -403,28 +398,24 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr)
403 sizeof(PPTable_t), 398 sizeof(PPTable_t),
404 PAGE_SIZE, 399 PAGE_SIZE,
405 AMDGPU_GEM_DOMAIN_VRAM, 400 AMDGPU_GEM_DOMAIN_VRAM,
406 &handle, 401 &priv->smu_tables.entry[PPTABLE].handle,
407 &mc_addr, 402 &priv->smu_tables.entry[PPTABLE].mc_addr,
408 &kaddr); 403 &priv->smu_tables.entry[PPTABLE].table);
409
410 if (ret) 404 if (ret)
411 return -EINVAL; 405 goto free_backend;
412 406
413 priv->smu_tables.entry[PPTABLE].version = 0x01; 407 priv->smu_tables.entry[PPTABLE].version = 0x01;
414 priv->smu_tables.entry[PPTABLE].size = sizeof(PPTable_t); 408 priv->smu_tables.entry[PPTABLE].size = sizeof(PPTable_t);
415 priv->smu_tables.entry[PPTABLE].table_id = TABLE_PPTABLE; 409 priv->smu_tables.entry[PPTABLE].table_id = TABLE_PPTABLE;
416 priv->smu_tables.entry[PPTABLE].mc_addr = mc_addr;
417 priv->smu_tables.entry[PPTABLE].table = kaddr;
418 priv->smu_tables.entry[PPTABLE].handle = handle;
419 410
420 /* allocate space for watermarks table */ 411 /* allocate space for watermarks table */
421 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, 412 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
422 sizeof(Watermarks_t), 413 sizeof(Watermarks_t),
423 PAGE_SIZE, 414 PAGE_SIZE,
424 AMDGPU_GEM_DOMAIN_VRAM, 415 AMDGPU_GEM_DOMAIN_VRAM,
425 &handle, 416 &priv->smu_tables.entry[WMTABLE].handle,
426 &mc_addr, 417 &priv->smu_tables.entry[WMTABLE].mc_addr,
427 &kaddr); 418 &priv->smu_tables.entry[WMTABLE].table);
428 419
429 if (ret) 420 if (ret)
430 goto err0; 421 goto err0;
@@ -432,18 +423,15 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr)
432 priv->smu_tables.entry[WMTABLE].version = 0x01; 423 priv->smu_tables.entry[WMTABLE].version = 0x01;
433 priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t); 424 priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t);
434 priv->smu_tables.entry[WMTABLE].table_id = TABLE_WATERMARKS; 425 priv->smu_tables.entry[WMTABLE].table_id = TABLE_WATERMARKS;
435 priv->smu_tables.entry[WMTABLE].mc_addr = mc_addr;
436 priv->smu_tables.entry[WMTABLE].table = kaddr;
437 priv->smu_tables.entry[WMTABLE].handle = handle;
438 426
439 /* allocate space for AVFS table */ 427 /* allocate space for AVFS table */
440 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, 428 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
441 sizeof(AvfsTable_t), 429 sizeof(AvfsTable_t),
442 PAGE_SIZE, 430 PAGE_SIZE,
443 AMDGPU_GEM_DOMAIN_VRAM, 431 AMDGPU_GEM_DOMAIN_VRAM,
444 &handle, 432 &priv->smu_tables.entry[AVFSTABLE].handle,
445 &mc_addr, 433 &priv->smu_tables.entry[AVFSTABLE].mc_addr,
446 &kaddr); 434 &priv->smu_tables.entry[AVFSTABLE].table);
447 435
448 if (ret) 436 if (ret)
449 goto err1; 437 goto err1;
@@ -451,9 +439,6 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr)
451 priv->smu_tables.entry[AVFSTABLE].version = 0x01; 439 priv->smu_tables.entry[AVFSTABLE].version = 0x01;
452 priv->smu_tables.entry[AVFSTABLE].size = sizeof(AvfsTable_t); 440 priv->smu_tables.entry[AVFSTABLE].size = sizeof(AvfsTable_t);
453 priv->smu_tables.entry[AVFSTABLE].table_id = TABLE_AVFS; 441 priv->smu_tables.entry[AVFSTABLE].table_id = TABLE_AVFS;
454 priv->smu_tables.entry[AVFSTABLE].mc_addr = mc_addr;
455 priv->smu_tables.entry[AVFSTABLE].table = kaddr;
456 priv->smu_tables.entry[AVFSTABLE].handle = handle;
457 442
458 tools_size = 0x19000; 443 tools_size = 0x19000;
459 if (tools_size) { 444 if (tools_size) {
@@ -461,17 +446,14 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr)
461 tools_size, 446 tools_size,
462 PAGE_SIZE, 447 PAGE_SIZE,
463 AMDGPU_GEM_DOMAIN_VRAM, 448 AMDGPU_GEM_DOMAIN_VRAM,
464 &handle, 449 &priv->smu_tables.entry[TOOLSTABLE].handle,
465 &mc_addr, 450 &priv->smu_tables.entry[TOOLSTABLE].mc_addr,
466 &kaddr); 451 &priv->smu_tables.entry[TOOLSTABLE].table);
467 if (ret) 452 if (ret)
468 goto err2; 453 goto err2;
469 priv->smu_tables.entry[TOOLSTABLE].version = 0x01; 454 priv->smu_tables.entry[TOOLSTABLE].version = 0x01;
470 priv->smu_tables.entry[TOOLSTABLE].size = tools_size; 455 priv->smu_tables.entry[TOOLSTABLE].size = tools_size;
471 priv->smu_tables.entry[TOOLSTABLE].table_id = TABLE_PMSTATUSLOG; 456 priv->smu_tables.entry[TOOLSTABLE].table_id = TABLE_PMSTATUSLOG;
472 priv->smu_tables.entry[TOOLSTABLE].mc_addr = mc_addr;
473 priv->smu_tables.entry[TOOLSTABLE].table = kaddr;
474 priv->smu_tables.entry[TOOLSTABLE].handle = handle;
475 } 457 }
476 458
477 /* allocate space for AVFS Fuse table */ 459 /* allocate space for AVFS Fuse table */
@@ -479,18 +461,16 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr)
479 sizeof(AvfsFuseOverride_t), 461 sizeof(AvfsFuseOverride_t),
480 PAGE_SIZE, 462 PAGE_SIZE,
481 AMDGPU_GEM_DOMAIN_VRAM, 463 AMDGPU_GEM_DOMAIN_VRAM,
482 &handle, 464 &priv->smu_tables.entry[AVFSFUSETABLE].handle,
483 &mc_addr, 465 &priv->smu_tables.entry[AVFSFUSETABLE].mc_addr,
484 &kaddr); 466 &priv->smu_tables.entry[AVFSFUSETABLE].table);
485 if (ret) 467 if (ret)
486 goto err3; 468 goto err3;
487 469
488 priv->smu_tables.entry[AVFSFUSETABLE].version = 0x01; 470 priv->smu_tables.entry[AVFSFUSETABLE].version = 0x01;
489 priv->smu_tables.entry[AVFSFUSETABLE].size = sizeof(AvfsFuseOverride_t); 471 priv->smu_tables.entry[AVFSFUSETABLE].size = sizeof(AvfsFuseOverride_t);
490 priv->smu_tables.entry[AVFSFUSETABLE].table_id = TABLE_AVFS_FUSE_OVERRIDE; 472 priv->smu_tables.entry[AVFSFUSETABLE].table_id = TABLE_AVFS_FUSE_OVERRIDE;
491 priv->smu_tables.entry[AVFSFUSETABLE].mc_addr = mc_addr; 473
492 priv->smu_tables.entry[AVFSFUSETABLE].table = kaddr;
493 priv->smu_tables.entry[AVFSFUSETABLE].handle = handle;
494 474
495 return 0; 475 return 0;
496 476
@@ -511,6 +491,9 @@ err0:
511 amdgpu_bo_free_kernel(&priv->smu_tables.entry[PPTABLE].handle, 491 amdgpu_bo_free_kernel(&priv->smu_tables.entry[PPTABLE].handle,
512 &priv->smu_tables.entry[PPTABLE].mc_addr, 492 &priv->smu_tables.entry[PPTABLE].mc_addr,
513 &priv->smu_tables.entry[PPTABLE].table); 493 &priv->smu_tables.entry[PPTABLE].table);
494free_backend:
495 kfree(hwmgr->smu_backend);
496
514 return -EINVAL; 497 return -EINVAL;
515} 498}
516 499
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 211224f6bdd3..fe354ebf374d 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -199,8 +199,8 @@ static struct ttm_backend_func ast_tt_backend_func = {
199}; 199};
200 200
201 201
202static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev, 202static struct ttm_tt *ast_ttm_tt_create(struct ttm_buffer_object *bo,
203 unsigned long size, uint32_t page_flags) 203 uint32_t page_flags)
204{ 204{
205 struct ttm_tt *tt; 205 struct ttm_tt *tt;
206 206
@@ -208,7 +208,7 @@ static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
208 if (tt == NULL) 208 if (tt == NULL)
209 return NULL; 209 return NULL;
210 tt->func = &ast_tt_backend_func; 210 tt->func = &ast_tt_backend_func;
211 if (ttm_tt_init(tt, bdev, size, page_flags)) { 211 if (ttm_tt_init(tt, bo, page_flags)) {
212 kfree(tt); 212 kfree(tt);
213 return NULL; 213 return NULL;
214 } 214 }
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 18b95329f631..39cd08416773 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -176,8 +176,7 @@ static struct ttm_backend_func bochs_tt_backend_func = {
176 .destroy = &bochs_ttm_backend_destroy, 176 .destroy = &bochs_ttm_backend_destroy,
177}; 177};
178 178
179static struct ttm_tt *bochs_ttm_tt_create(struct ttm_bo_device *bdev, 179static struct ttm_tt *bochs_ttm_tt_create(struct ttm_buffer_object *bo,
180 unsigned long size,
181 uint32_t page_flags) 180 uint32_t page_flags)
182{ 181{
183 struct ttm_tt *tt; 182 struct ttm_tt *tt;
@@ -186,7 +185,7 @@ static struct ttm_tt *bochs_ttm_tt_create(struct ttm_bo_device *bdev,
186 if (tt == NULL) 185 if (tt == NULL)
187 return NULL; 186 return NULL;
188 tt->func = &bochs_tt_backend_func; 187 tt->func = &bochs_tt_backend_func;
189 if (ttm_tt_init(tt, bdev, size, page_flags)) { 188 if (ttm_tt_init(tt, bo, page_flags)) {
190 kfree(tt); 189 kfree(tt);
191 return NULL; 190 return NULL;
192 } 191 }
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 6cd0233b3bf8..f21953243790 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -199,8 +199,8 @@ static struct ttm_backend_func cirrus_tt_backend_func = {
199}; 199};
200 200
201 201
202static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev, 202static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_buffer_object *bo,
203 unsigned long size, uint32_t page_flags) 203 uint32_t page_flags)
204{ 204{
205 struct ttm_tt *tt; 205 struct ttm_tt *tt;
206 206
@@ -208,7 +208,7 @@ static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
208 if (tt == NULL) 208 if (tt == NULL)
209 return NULL; 209 return NULL;
210 tt->func = &cirrus_tt_backend_func; 210 tt->func = &cirrus_tt_backend_func;
211 if (ttm_tt_init(tt, bdev, size, page_flags)) { 211 if (ttm_tt_init(tt, bo, page_flags)) {
212 kfree(tt); 212 kfree(tt);
213 return NULL; 213 return NULL;
214 } 214 }
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 8dfffdbb6b07..4871025f7573 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -200,8 +200,7 @@ static struct ttm_backend_func hibmc_tt_backend_func = {
200 .destroy = &hibmc_ttm_backend_destroy, 200 .destroy = &hibmc_ttm_backend_destroy,
201}; 201};
202 202
203static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_bo_device *bdev, 203static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_buffer_object *bo,
204 unsigned long size,
205 u32 page_flags) 204 u32 page_flags)
206{ 205{
207 struct ttm_tt *tt; 206 struct ttm_tt *tt;
@@ -213,7 +212,7 @@ static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_bo_device *bdev,
213 return NULL; 212 return NULL;
214 } 213 }
215 tt->func = &hibmc_tt_backend_func; 214 tt->func = &hibmc_tt_backend_func;
216 ret = ttm_tt_init(tt, bdev, size, page_flags); 215 ret = ttm_tt_init(tt, bo, page_flags);
217 if (ret) { 216 if (ret) {
218 DRM_ERROR("failed to initialize ttm_tt: %d\n", ret); 217 DRM_ERROR("failed to initialize ttm_tt: %d\n", ret);
219 kfree(tt); 218 kfree(tt);
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 69beb2046008..05570f0de4d7 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -199,8 +199,8 @@ static struct ttm_backend_func mgag200_tt_backend_func = {
199}; 199};
200 200
201 201
202static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev, 202static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_buffer_object *bo,
203 unsigned long size, uint32_t page_flags) 203 uint32_t page_flags)
204{ 204{
205 struct ttm_tt *tt; 205 struct ttm_tt *tt;
206 206
@@ -208,7 +208,7 @@ static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
208 if (tt == NULL) 208 if (tt == NULL)
209 return NULL; 209 return NULL;
210 tt->func = &mgag200_tt_backend_func; 210 tt->func = &mgag200_tt_backend_func;
211 if (ttm_tt_init(tt, bdev, size, page_flags)) { 211 if (ttm_tt_init(tt, bo, page_flags)) {
212 kfree(tt); 212 kfree(tt);
213 return NULL; 213 return NULL;
214 } 214 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 49cc8dfcb141..6f402c4f2bdd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -604,19 +604,17 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
604} 604}
605 605
606static struct ttm_tt * 606static struct ttm_tt *
607nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, 607nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
608 uint32_t page_flags)
609{ 608{
610#if IS_ENABLED(CONFIG_AGP) 609#if IS_ENABLED(CONFIG_AGP)
611 struct nouveau_drm *drm = nouveau_bdev(bdev); 610 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
612 611
613 if (drm->agp.bridge) { 612 if (drm->agp.bridge) {
614 return ttm_agp_tt_create(bdev, drm->agp.bridge, size, 613 return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags);
615 page_flags);
616 } 614 }
617#endif 615#endif
618 616
619 return nouveau_sgdma_create_ttm(bdev, size, page_flags); 617 return nouveau_sgdma_create_ttm(bo, page_flags);
620} 618}
621 619
622static int 620static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 87b030437f4d..8ebdc74cc0ad 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -82,10 +82,9 @@ static struct ttm_backend_func nv50_sgdma_backend = {
82}; 82};
83 83
84struct ttm_tt * 84struct ttm_tt *
85nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev, 85nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
86 unsigned long size, uint32_t page_flags)
87{ 86{
88 struct nouveau_drm *drm = nouveau_bdev(bdev); 87 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
89 struct nouveau_sgdma_be *nvbe; 88 struct nouveau_sgdma_be *nvbe;
90 89
91 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); 90 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
@@ -97,7 +96,7 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
97 else 96 else
98 nvbe->ttm.ttm.func = &nv50_sgdma_backend; 97 nvbe->ttm.ttm.func = &nv50_sgdma_backend;
99 98
100 if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags)) 99 if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags))
101 /* 100 /*
102 * A failing ttm_dma_tt_init() will call ttm_tt_destroy() 101 * A failing ttm_dma_tt_init() will call ttm_tt_destroy()
103 * and thus our nouveau_sgdma_destroy() hook, so we don't need 102 * and thus our nouveau_sgdma_destroy() hook, so we don't need
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.h b/drivers/gpu/drm/nouveau/nouveau_ttm.h
index 64e484ee5ef1..89929ad8c7cd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.h
@@ -12,8 +12,8 @@ extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
12extern const struct ttm_mem_type_manager_func nouveau_gart_manager; 12extern const struct ttm_mem_type_manager_func nouveau_gart_manager;
13extern const struct ttm_mem_type_manager_func nv04_gart_manager; 13extern const struct ttm_mem_type_manager_func nv04_gart_manager;
14 14
15struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *, 15struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo,
16 unsigned long size, u32 page_flags); 16 u32 page_flags);
17 17
18int nouveau_ttm_init(struct nouveau_drm *drm); 18int nouveau_ttm_init(struct nouveau_drm *drm);
19void nouveau_ttm_fini(struct nouveau_drm *drm); 19void nouveau_ttm_fini(struct nouveau_drm *drm);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 2ad70eb96207..ee2340e31f06 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -291,19 +291,19 @@ static struct ttm_backend_func qxl_backend_func = {
291 .destroy = &qxl_ttm_backend_destroy, 291 .destroy = &qxl_ttm_backend_destroy,
292}; 292};
293 293
294static struct ttm_tt *qxl_ttm_tt_create(struct ttm_bo_device *bdev, 294static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo,
295 unsigned long size, uint32_t page_flags) 295 uint32_t page_flags)
296{ 296{
297 struct qxl_device *qdev; 297 struct qxl_device *qdev;
298 struct qxl_ttm_tt *gtt; 298 struct qxl_ttm_tt *gtt;
299 299
300 qdev = qxl_get_qdev(bdev); 300 qdev = qxl_get_qdev(bo->bdev);
301 gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL); 301 gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL);
302 if (gtt == NULL) 302 if (gtt == NULL)
303 return NULL; 303 return NULL;
304 gtt->ttm.ttm.func = &qxl_backend_func; 304 gtt->ttm.ttm.func = &qxl_backend_func;
305 gtt->qdev = qdev; 305 gtt->qdev = qdev;
306 if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags)) { 306 if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
307 kfree(gtt); 307 kfree(gtt);
308 return NULL; 308 return NULL;
309 } 309 }
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 5012f5e47a1e..b108eaabb6df 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -90,25 +90,18 @@ void radeon_connector_hotplug(struct drm_connector *connector)
90 /* don't do anything if sink is not display port, i.e., 90 /* don't do anything if sink is not display port, i.e.,
91 * passive dp->(dvi|hdmi) adaptor 91 * passive dp->(dvi|hdmi) adaptor
92 */ 92 */
93 if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { 93 if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
94 int saved_dpms = connector->dpms; 94 radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
95 /* Only turn off the display if it's physically disconnected */ 95 radeon_dp_needs_link_train(radeon_connector)) {
96 if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { 96 /* Don't start link training before we have the DPCD */
97 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 97 if (!radeon_dp_getdpcd(radeon_connector))
98 } else if (radeon_dp_needs_link_train(radeon_connector)) { 98 return;
99 /* Don't try to start link training before we 99
100 * have the dpcd */ 100 /* Turn the connector off and back on immediately, which
101 if (!radeon_dp_getdpcd(radeon_connector)) 101 * will trigger link training
102 return; 102 */
103 103 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
104 /* set it to OFF so that drm_helper_connector_dpms() 104 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
105 * won't return immediately since the current state
106 * is ON at this point.
107 */
108 connector->dpms = DRM_MODE_DPMS_OFF;
109 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
110 }
111 connector->dpms = saved_dpms;
112 } 105 }
113 } 106 }
114} 107}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index a9962ffba720..27d8e7dd2d06 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -34,8 +34,6 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
34 struct radeon_bo *robj = gem_to_radeon_bo(gobj); 34 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
35 35
36 if (robj) { 36 if (robj) {
37 if (robj->gem_base.import_attach)
38 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
39 radeon_mn_unregister(robj); 37 radeon_mn_unregister(robj);
40 radeon_bo_unref(&robj); 38 radeon_bo_unref(&robj);
41 } 39 }
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 38431f682ed0..edbb4cd519fd 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -82,6 +82,8 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
82 mutex_unlock(&bo->rdev->gem.mutex); 82 mutex_unlock(&bo->rdev->gem.mutex);
83 radeon_bo_clear_surface_reg(bo); 83 radeon_bo_clear_surface_reg(bo);
84 WARN_ON_ONCE(!list_empty(&bo->va)); 84 WARN_ON_ONCE(!list_empty(&bo->va));
85 if (bo->gem_base.import_attach)
86 drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
85 drm_gem_object_release(&bo->gem_base); 87 drm_gem_object_release(&bo->gem_base);
86 kfree(bo); 88 kfree(bo);
87} 89}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 009f55a2bbf9..8689fcca051c 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -686,17 +686,17 @@ static struct ttm_backend_func radeon_backend_func = {
686 .destroy = &radeon_ttm_backend_destroy, 686 .destroy = &radeon_ttm_backend_destroy,
687}; 687};
688 688
689static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev, 689static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
690 unsigned long size, uint32_t page_flags) 690 uint32_t page_flags)
691{ 691{
692 struct radeon_device *rdev; 692 struct radeon_device *rdev;
693 struct radeon_ttm_tt *gtt; 693 struct radeon_ttm_tt *gtt;
694 694
695 rdev = radeon_get_rdev(bdev); 695 rdev = radeon_get_rdev(bo->bdev);
696#if IS_ENABLED(CONFIG_AGP) 696#if IS_ENABLED(CONFIG_AGP)
697 if (rdev->flags & RADEON_IS_AGP) { 697 if (rdev->flags & RADEON_IS_AGP) {
698 return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge, 698 return ttm_agp_tt_create(bo, rdev->ddev->agp->bridge,
699 size, page_flags); 699 page_flags);
700 } 700 }
701#endif 701#endif
702 702
@@ -706,7 +706,7 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
706 } 706 }
707 gtt->ttm.ttm.func = &radeon_backend_func; 707 gtt->ttm.ttm.func = &radeon_backend_func;
708 gtt->rdev = rdev; 708 gtt->rdev = rdev;
709 if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags)) { 709 if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
710 kfree(gtt); 710 kfree(gtt);
711 return NULL; 711 return NULL;
712 } 712 }
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index f7c2aefbec7c..7c2485fe88d8 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -110,9 +110,9 @@ static struct ttm_backend_func ttm_agp_func = {
110 .destroy = ttm_agp_destroy, 110 .destroy = ttm_agp_destroy,
111}; 111};
112 112
113struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, 113struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
114 struct agp_bridge_data *bridge, 114 struct agp_bridge_data *bridge,
115 unsigned long size, uint32_t page_flags) 115 uint32_t page_flags)
116{ 116{
117 struct ttm_agp_backend *agp_be; 117 struct ttm_agp_backend *agp_be;
118 118
@@ -124,7 +124,7 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
124 agp_be->bridge = bridge; 124 agp_be->bridge = bridge;
125 agp_be->ttm.func = &ttm_agp_func; 125 agp_be->ttm.func = &ttm_agp_func;
126 126
127 if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags)) { 127 if (ttm_tt_init(&agp_be->ttm, bo, page_flags)) {
128 kfree(agp_be); 128 kfree(agp_be);
129 return NULL; 129 return NULL;
130 } 130 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index ad142a92eb80..98e06f8bf23b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -622,14 +622,23 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
622 622
623 reservation_object_assert_held(bo->resv); 623 reservation_object_assert_held(bo->resv);
624 624
625 placement.num_placement = 0;
626 placement.num_busy_placement = 0;
627 bdev->driver->evict_flags(bo, &placement);
628
629 if (!placement.num_placement && !placement.num_busy_placement) {
630 ret = ttm_bo_pipeline_gutting(bo);
631 if (ret)
632 return ret;
633
634 return ttm_tt_create(bo, false);
635 }
636
625 evict_mem = bo->mem; 637 evict_mem = bo->mem;
626 evict_mem.mm_node = NULL; 638 evict_mem.mm_node = NULL;
627 evict_mem.bus.io_reserved_vm = false; 639 evict_mem.bus.io_reserved_vm = false;
628 evict_mem.bus.io_reserved_count = 0; 640 evict_mem.bus.io_reserved_count = 0;
629 641
630 placement.num_placement = 0;
631 placement.num_busy_placement = 0;
632 bdev->driver->evict_flags(bo, &placement);
633 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx); 642 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
634 if (ret) { 643 if (ret) {
635 if (ret != -ERESTARTSYS) { 644 if (ret != -ERESTARTSYS) {
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 6d6a3f46143b..1f730b3f18e5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -801,3 +801,27 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
801 return 0; 801 return 0;
802} 802}
803EXPORT_SYMBOL(ttm_bo_pipeline_move); 803EXPORT_SYMBOL(ttm_bo_pipeline_move);
804
805int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
806{
807 struct ttm_buffer_object *ghost;
808 int ret;
809
810 ret = ttm_buffer_object_transfer(bo, &ghost);
811 if (ret)
812 return ret;
813
814 ret = reservation_object_copy_fences(ghost->resv, bo->resv);
815 /* Last resort, wait for the BO to be idle when we are OOM */
816 if (ret)
817 ttm_bo_wait(bo, false, false);
818
819 memset(&bo->mem, 0, sizeof(bo->mem));
820 bo->mem.mem_type = TTM_PL_SYSTEM;
821 bo->ttm = NULL;
822
823 ttm_bo_unreserve(ghost);
824 ttm_bo_unref(&ghost);
825
826 return 0;
827}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 0ee3b8f11605..7e672be987b5 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -31,17 +31,11 @@
31#define pr_fmt(fmt) "[TTM] " fmt 31#define pr_fmt(fmt) "[TTM] " fmt
32 32
33#include <linux/sched.h> 33#include <linux/sched.h>
34#include <linux/highmem.h>
35#include <linux/pagemap.h> 34#include <linux/pagemap.h>
36#include <linux/shmem_fs.h> 35#include <linux/shmem_fs.h>
37#include <linux/file.h> 36#include <linux/file.h>
38#include <linux/swap.h>
39#include <linux/slab.h>
40#include <linux/export.h>
41#include <drm/drm_cache.h> 37#include <drm/drm_cache.h>
42#include <drm/ttm/ttm_module.h>
43#include <drm/ttm/ttm_bo_driver.h> 38#include <drm/ttm/ttm_bo_driver.h>
44#include <drm/ttm/ttm_placement.h>
45#include <drm/ttm/ttm_page_alloc.h> 39#include <drm/ttm/ttm_page_alloc.h>
46#ifdef CONFIG_X86 40#ifdef CONFIG_X86
47#include <asm/set_memory.h> 41#include <asm/set_memory.h>
@@ -79,14 +73,10 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
79 return -EINVAL; 73 return -EINVAL;
80 } 74 }
81 75
82 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 76 bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
83 page_flags);
84 if (unlikely(bo->ttm == NULL)) 77 if (unlikely(bo->ttm == NULL))
85 return -ENOMEM; 78 return -ENOMEM;
86 79
87 if (bo->type == ttm_bo_type_sg)
88 bo->ttm->sg = bo->sg;
89
90 return 0; 80 return 0;
91} 81}
92 82
@@ -114,6 +104,16 @@ static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
114 return 0; 104 return 0;
115} 105}
116 106
107static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
108{
109 ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
110 sizeof(*ttm->dma_address),
111 GFP_KERNEL | __GFP_ZERO);
112 if (!ttm->dma_address)
113 return -ENOMEM;
114 return 0;
115}
116
117#ifdef CONFIG_X86 117#ifdef CONFIG_X86
118static inline int ttm_tt_set_page_caching(struct page *p, 118static inline int ttm_tt_set_page_caching(struct page *p,
119 enum ttm_caching_state c_old, 119 enum ttm_caching_state c_old,
@@ -233,15 +233,22 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
233 ttm->func->destroy(ttm); 233 ttm->func->destroy(ttm);
234} 234}
235 235
236int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, 236void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
237 unsigned long size, uint32_t page_flags) 237 uint32_t page_flags)
238{ 238{
239 ttm->bdev = bdev; 239 ttm->bdev = bo->bdev;
240 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 240 ttm->num_pages = bo->num_pages;
241 ttm->caching_state = tt_cached; 241 ttm->caching_state = tt_cached;
242 ttm->page_flags = page_flags; 242 ttm->page_flags = page_flags;
243 ttm->state = tt_unpopulated; 243 ttm->state = tt_unpopulated;
244 ttm->swap_storage = NULL; 244 ttm->swap_storage = NULL;
245 ttm->sg = bo->sg;
246}
247
248int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
249 uint32_t page_flags)
250{
251 ttm_tt_init_fields(ttm, bo, page_flags);
245 252
246 if (ttm_tt_alloc_page_directory(ttm)) { 253 if (ttm_tt_alloc_page_directory(ttm)) {
247 ttm_tt_destroy(ttm); 254 ttm_tt_destroy(ttm);
@@ -259,17 +266,12 @@ void ttm_tt_fini(struct ttm_tt *ttm)
259} 266}
260EXPORT_SYMBOL(ttm_tt_fini); 267EXPORT_SYMBOL(ttm_tt_fini);
261 268
262int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, 269int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
263 unsigned long size, uint32_t page_flags) 270 uint32_t page_flags)
264{ 271{
265 struct ttm_tt *ttm = &ttm_dma->ttm; 272 struct ttm_tt *ttm = &ttm_dma->ttm;
266 273
267 ttm->bdev = bdev; 274 ttm_tt_init_fields(ttm, bo, page_flags);
268 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
269 ttm->caching_state = tt_cached;
270 ttm->page_flags = page_flags;
271 ttm->state = tt_unpopulated;
272 ttm->swap_storage = NULL;
273 275
274 INIT_LIST_HEAD(&ttm_dma->pages_list); 276 INIT_LIST_HEAD(&ttm_dma->pages_list);
275 if (ttm_dma_tt_alloc_page_directory(ttm_dma)) { 277 if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
@@ -281,11 +283,36 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
281} 283}
282EXPORT_SYMBOL(ttm_dma_tt_init); 284EXPORT_SYMBOL(ttm_dma_tt_init);
283 285
286int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
287 uint32_t page_flags)
288{
289 struct ttm_tt *ttm = &ttm_dma->ttm;
290 int ret;
291
292 ttm_tt_init_fields(ttm, bo, page_flags);
293
294 INIT_LIST_HEAD(&ttm_dma->pages_list);
295 if (page_flags & TTM_PAGE_FLAG_SG)
296 ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
297 else
298 ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
299 if (ret) {
300 ttm_tt_destroy(ttm);
301 pr_err("Failed allocating page table\n");
302 return -ENOMEM;
303 }
304 return 0;
305}
306EXPORT_SYMBOL(ttm_sg_tt_init);
307
284void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) 308void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
285{ 309{
286 struct ttm_tt *ttm = &ttm_dma->ttm; 310 struct ttm_tt *ttm = &ttm_dma->ttm;
287 311
288 kvfree(ttm->pages); 312 if (ttm->pages)
313 kvfree(ttm->pages);
314 else
315 kvfree(ttm_dma->dma_address);
289 ttm->pages = NULL; 316 ttm->pages = NULL;
290 ttm_dma->dma_address = NULL; 317 ttm_dma->dma_address = NULL;
291} 318}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index fd5d9450878e..11f8ae5b5332 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -322,20 +322,19 @@ static struct ttm_backend_func virtio_gpu_backend_func = {
322 .destroy = &virtio_gpu_ttm_backend_destroy, 322 .destroy = &virtio_gpu_ttm_backend_destroy,
323}; 323};
324 324
325static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_bo_device *bdev, 325static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
326 unsigned long size,
327 uint32_t page_flags) 326 uint32_t page_flags)
328{ 327{
329 struct virtio_gpu_device *vgdev; 328 struct virtio_gpu_device *vgdev;
330 struct virtio_gpu_ttm_tt *gtt; 329 struct virtio_gpu_ttm_tt *gtt;
331 330
332 vgdev = virtio_gpu_get_vgdev(bdev); 331 vgdev = virtio_gpu_get_vgdev(bo->bdev);
333 gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL); 332 gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL);
334 if (gtt == NULL) 333 if (gtt == NULL)
335 return NULL; 334 return NULL;
336 gtt->ttm.ttm.func = &virtio_gpu_backend_func; 335 gtt->ttm.ttm.func = &virtio_gpu_backend_func;
337 gtt->vgdev = vgdev; 336 gtt->vgdev = vgdev;
338 if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags)) { 337 if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
339 kfree(gtt); 338 kfree(gtt);
340 return NULL; 339 return NULL;
341 } 340 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index fead3f2dbb46..7177eecb8c9f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -693,8 +693,8 @@ static struct ttm_backend_func vmw_ttm_func = {
693 .destroy = vmw_ttm_destroy, 693 .destroy = vmw_ttm_destroy,
694}; 694};
695 695
696static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, 696static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
697 unsigned long size, uint32_t page_flags) 697 uint32_t page_flags)
698{ 698{
699 struct vmw_ttm_tt *vmw_be; 699 struct vmw_ttm_tt *vmw_be;
700 int ret; 700 int ret;
@@ -704,13 +704,13 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
704 return NULL; 704 return NULL;
705 705
706 vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; 706 vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
707 vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); 707 vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
708 vmw_be->mob = NULL; 708 vmw_be->mob = NULL;
709 709
710 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) 710 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
711 ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags); 711 ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
712 else 712 else
713 ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags); 713 ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
714 if (unlikely(ret != 0)) 714 if (unlikely(ret != 0))
715 goto out_no_init; 715 goto out_no_init;
716 716
diff --git a/drivers/staging/vboxvideo/vbox_ttm.c b/drivers/staging/vboxvideo/vbox_ttm.c
index 2c7daa3d0f24..548edb7c494b 100644
--- a/drivers/staging/vboxvideo/vbox_ttm.c
+++ b/drivers/staging/vboxvideo/vbox_ttm.c
@@ -193,8 +193,7 @@ static struct ttm_backend_func vbox_tt_backend_func = {
193 .destroy = &vbox_ttm_backend_destroy, 193 .destroy = &vbox_ttm_backend_destroy,
194}; 194};
195 195
196static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev, 196static struct ttm_tt *vbox_ttm_tt_create(struct ttm_buffer_object *bo,
197 unsigned long size,
198 u32 page_flags) 197 u32 page_flags)
199{ 198{
200 struct ttm_tt *tt; 199 struct ttm_tt *tt;
@@ -204,7 +203,7 @@ static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
204 return NULL; 203 return NULL;
205 204
206 tt->func = &vbox_tt_backend_func; 205 tt->func = &vbox_tt_backend_func;
207 if (ttm_tt_init(tt, bdev, size, page_flags)) { 206 if (ttm_tt_init(tt, bo, page_flags)) {
208 kfree(tt); 207 kfree(tt);
209 return NULL; 208 return NULL;
210 } 209 }
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 4312b5326f0b..3234cc322e70 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -42,111 +42,10 @@
42#include "ttm_memory.h" 42#include "ttm_memory.h"
43#include "ttm_module.h" 43#include "ttm_module.h"
44#include "ttm_placement.h" 44#include "ttm_placement.h"
45#include "ttm_tt.h"
45 46
46#define TTM_MAX_BO_PRIORITY 4U 47#define TTM_MAX_BO_PRIORITY 4U
47 48
48struct ttm_backend_func {
49 /**
50 * struct ttm_backend_func member bind
51 *
52 * @ttm: Pointer to a struct ttm_tt.
53 * @bo_mem: Pointer to a struct ttm_mem_reg describing the
54 * memory type and location for binding.
55 *
56 * Bind the backend pages into the aperture in the location
57 * indicated by @bo_mem. This function should be able to handle
58 * differences between aperture and system page sizes.
59 */
60 int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
61
62 /**
63 * struct ttm_backend_func member unbind
64 *
65 * @ttm: Pointer to a struct ttm_tt.
66 *
67 * Unbind previously bound backend pages. This function should be
68 * able to handle differences between aperture and system page sizes.
69 */
70 int (*unbind) (struct ttm_tt *ttm);
71
72 /**
73 * struct ttm_backend_func member destroy
74 *
75 * @ttm: Pointer to a struct ttm_tt.
76 *
77 * Destroy the backend. This will be call back from ttm_tt_destroy so
78 * don't call ttm_tt_destroy from the callback or infinite loop.
79 */
80 void (*destroy) (struct ttm_tt *ttm);
81};
82
83#define TTM_PAGE_FLAG_WRITE (1 << 3)
84#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
85#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
86#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
87#define TTM_PAGE_FLAG_DMA32 (1 << 7)
88#define TTM_PAGE_FLAG_SG (1 << 8)
89#define TTM_PAGE_FLAG_NO_RETRY (1 << 9)
90
91enum ttm_caching_state {
92 tt_uncached,
93 tt_wc,
94 tt_cached
95};
96
97/**
98 * struct ttm_tt
99 *
100 * @bdev: Pointer to a struct ttm_bo_device.
101 * @func: Pointer to a struct ttm_backend_func that describes
102 * the backend methods.
103 * pointer.
104 * @pages: Array of pages backing the data.
105 * @num_pages: Number of pages in the page array.
106 * @bdev: Pointer to the current struct ttm_bo_device.
107 * @be: Pointer to the ttm backend.
108 * @swap_storage: Pointer to shmem struct file for swap storage.
109 * @caching_state: The current caching state of the pages.
110 * @state: The current binding state of the pages.
111 *
112 * This is a structure holding the pages, caching- and aperture binding
113 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
114 * memory.
115 */
116
117struct ttm_tt {
118 struct ttm_bo_device *bdev;
119 struct ttm_backend_func *func;
120 struct page **pages;
121 uint32_t page_flags;
122 unsigned long num_pages;
123 struct sg_table *sg; /* for SG objects via dma-buf */
124 struct file *swap_storage;
125 enum ttm_caching_state caching_state;
126 enum {
127 tt_bound,
128 tt_unbound,
129 tt_unpopulated,
130 } state;
131};
132
133/**
134 * struct ttm_dma_tt
135 *
136 * @ttm: Base ttm_tt struct.
137 * @dma_address: The DMA (bus) addresses of the pages
138 * @pages_list: used by some page allocation backend
139 *
140 * This is a structure holding the pages, caching- and aperture binding
141 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
142 * memory.
143 */
144struct ttm_dma_tt {
145 struct ttm_tt ttm;
146 dma_addr_t *dma_address;
147 struct list_head pages_list;
148};
149
150#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ 49#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
151#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ 50#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
152#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ 51#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
@@ -326,8 +225,7 @@ struct ttm_bo_driver {
326 /** 225 /**
327 * ttm_tt_create 226 * ttm_tt_create
328 * 227 *
329 * @bdev: pointer to a struct ttm_bo_device: 228 * @bo: The buffer object to create the ttm for.
330 * @size: Size of the data needed backing.
331 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. 229 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
332 * 230 *
333 * Create a struct ttm_tt to back data with system memory pages. 231 * Create a struct ttm_tt to back data with system memory pages.
@@ -335,8 +233,7 @@ struct ttm_bo_driver {
335 * Returns: 233 * Returns:
336 * NULL: Out of memory. 234 * NULL: Out of memory.
337 */ 235 */
338 struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev, 236 struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
339 unsigned long size,
340 uint32_t page_flags); 237 uint32_t page_flags);
341 238
342 /** 239 /**
@@ -610,117 +507,6 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
610 return *old; 507 return *old;
611} 508}
612 509
613/**
614 * ttm_tt_create
615 *
616 * @bo: pointer to a struct ttm_buffer_object
617 * @zero_alloc: true if allocated pages needs to be zeroed
618 *
619 * Make sure we have a TTM structure allocated for the given BO.
620 * No pages are actually allocated.
621 */
622int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
623
624/**
625 * ttm_tt_init
626 *
627 * @ttm: The struct ttm_tt.
628 * @bdev: pointer to a struct ttm_bo_device:
629 * @size: Size of the data needed backing.
630 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
631 *
632 * Create a struct ttm_tt to back data with system memory pages.
633 * No pages are actually allocated.
634 * Returns:
635 * NULL: Out of memory.
636 */
637int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
638 unsigned long size, uint32_t page_flags);
639int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
640 unsigned long size, uint32_t page_flags);
641
642/**
643 * ttm_tt_fini
644 *
645 * @ttm: the ttm_tt structure.
646 *
647 * Free memory of ttm_tt structure
648 */
649void ttm_tt_fini(struct ttm_tt *ttm);
650void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
651
652/**
653 * ttm_ttm_bind:
654 *
655 * @ttm: The struct ttm_tt containing backing pages.
656 * @bo_mem: The struct ttm_mem_reg identifying the binding location.
657 *
658 * Bind the pages of @ttm to an aperture location identified by @bo_mem
659 */
660int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
661 struct ttm_operation_ctx *ctx);
662
663/**
664 * ttm_ttm_destroy:
665 *
666 * @ttm: The struct ttm_tt.
667 *
668 * Unbind, unpopulate and destroy common struct ttm_tt.
669 */
670void ttm_tt_destroy(struct ttm_tt *ttm);
671
672/**
673 * ttm_ttm_unbind:
674 *
675 * @ttm: The struct ttm_tt.
676 *
677 * Unbind a struct ttm_tt.
678 */
679void ttm_tt_unbind(struct ttm_tt *ttm);
680
681/**
682 * ttm_tt_swapin:
683 *
684 * @ttm: The struct ttm_tt.
685 *
686 * Swap in a previously swap out ttm_tt.
687 */
688int ttm_tt_swapin(struct ttm_tt *ttm);
689
690/**
691 * ttm_tt_set_placement_caching:
692 *
693 * @ttm A struct ttm_tt the backing pages of which will change caching policy.
694 * @placement: Flag indicating the desired caching policy.
695 *
696 * This function will change caching policy of any default kernel mappings of
697 * the pages backing @ttm. If changing from cached to uncached or
698 * write-combined,
699 * all CPU caches will first be flushed to make sure the data of the pages
700 * hit RAM. This function may be very costly as it involves global TLB
701 * and cache flushes and potential page splitting / combining.
702 */
703int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
704int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage);
705
706/**
707 * ttm_tt_populate - allocate pages for a ttm
708 *
709 * @ttm: Pointer to the ttm_tt structure
710 *
711 * Calls the driver method to allocate pages for a ttm
712 */
713int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
714
715/**
716 * ttm_tt_unpopulate - free pages from a ttm
717 *
718 * @ttm: Pointer to the ttm_tt structure
719 *
720 * Calls the driver method to free all pages from a ttm
721 */
722void ttm_tt_unpopulate(struct ttm_tt *ttm);
723
724/* 510/*
725 * ttm_bo.c 511 * ttm_bo.c
726 */ 512 */
@@ -1062,6 +848,15 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
1062 struct ttm_mem_reg *new_mem); 848 struct ttm_mem_reg *new_mem);
1063 849
1064/** 850/**
851 * ttm_bo_pipeline_gutting.
852 *
853 * @bo: A pointer to a struct ttm_buffer_object.
854 *
855 * Pipelined gutting a BO of it's backing store.
856 */
857int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
858
859/**
1065 * ttm_io_prot 860 * ttm_io_prot
1066 * 861 *
1067 * @c_state: Caching state. 862 * @c_state: Caching state.
@@ -1074,27 +869,4 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
1074 869
1075extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; 870extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
1076 871
1077#if IS_ENABLED(CONFIG_AGP)
1078#include <linux/agp_backend.h>
1079
1080/**
1081 * ttm_agp_tt_create
1082 *
1083 * @bdev: Pointer to a struct ttm_bo_device.
1084 * @bridge: The agp bridge this device is sitting on.
1085 * @size: Size of the data needed backing.
1086 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
1087 *
1088 *
1089 * Create a TTM backend that uses the indicated AGP bridge as an aperture
1090 * for TT memory. This function uses the linux agpgart interface to
1091 * bind and unbind memory backing a ttm_tt.
1092 */
1093struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
1094 struct agp_bridge_data *bridge,
1095 unsigned long size, uint32_t page_flags);
1096int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
1097void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
1098#endif
1099
1100#endif 872#endif
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
new file mode 100644
index 000000000000..c0e928abf592
--- /dev/null
+++ b/include/drm/ttm/ttm_tt.h
@@ -0,0 +1,272 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27#ifndef _TTM_TT_H_
28#define _TTM_TT_H_
29
30#include <linux/types.h>
31
32struct ttm_tt;
33struct ttm_mem_reg;
34struct ttm_buffer_object;
35struct ttm_operation_ctx;
36
37#define TTM_PAGE_FLAG_WRITE (1 << 3)
38#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
39#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
40#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
41#define TTM_PAGE_FLAG_DMA32 (1 << 7)
42#define TTM_PAGE_FLAG_SG (1 << 8)
43#define TTM_PAGE_FLAG_NO_RETRY (1 << 9)
44
45enum ttm_caching_state {
46 tt_uncached,
47 tt_wc,
48 tt_cached
49};
50
51struct ttm_backend_func {
52 /**
53 * struct ttm_backend_func member bind
54 *
55 * @ttm: Pointer to a struct ttm_tt.
56 * @bo_mem: Pointer to a struct ttm_mem_reg describing the
57 * memory type and location for binding.
58 *
59 * Bind the backend pages into the aperture in the location
60 * indicated by @bo_mem. This function should be able to handle
61 * differences between aperture and system page sizes.
62 */
63 int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
64
65 /**
66 * struct ttm_backend_func member unbind
67 *
68 * @ttm: Pointer to a struct ttm_tt.
69 *
70 * Unbind previously bound backend pages. This function should be
71 * able to handle differences between aperture and system page sizes.
72 */
73 int (*unbind) (struct ttm_tt *ttm);
74
75 /**
76 * struct ttm_backend_func member destroy
77 *
78 * @ttm: Pointer to a struct ttm_tt.
79 *
80 * Destroy the backend. This will be call back from ttm_tt_destroy so
81 * don't call ttm_tt_destroy from the callback or infinite loop.
82 */
83 void (*destroy) (struct ttm_tt *ttm);
84};
85
86/**
87 * struct ttm_tt
88 *
89 * @bdev: Pointer to a struct ttm_bo_device.
90 * @func: Pointer to a struct ttm_backend_func that describes
91 * the backend methods.
92 * pointer.
93 * @pages: Array of pages backing the data.
94 * @num_pages: Number of pages in the page array.
95 * @bdev: Pointer to the current struct ttm_bo_device.
96 * @be: Pointer to the ttm backend.
97 * @swap_storage: Pointer to shmem struct file for swap storage.
98 * @caching_state: The current caching state of the pages.
99 * @state: The current binding state of the pages.
100 *
101 * This is a structure holding the pages, caching- and aperture binding
102 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
103 * memory.
104 */
105struct ttm_tt {
106 struct ttm_bo_device *bdev;
107 struct ttm_backend_func *func;
108 struct page **pages;
109 uint32_t page_flags;
110 unsigned long num_pages;
111 struct sg_table *sg; /* for SG objects via dma-buf */
112 struct file *swap_storage;
113 enum ttm_caching_state caching_state;
114 enum {
115 tt_bound,
116 tt_unbound,
117 tt_unpopulated,
118 } state;
119};
120
121/**
122 * struct ttm_dma_tt
123 *
124 * @ttm: Base ttm_tt struct.
125 * @dma_address: The DMA (bus) addresses of the pages
126 * @pages_list: used by some page allocation backend
127 *
128 * This is a structure holding the pages, caching- and aperture binding
129 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
130 * memory.
131 */
132struct ttm_dma_tt {
133 struct ttm_tt ttm;
134 dma_addr_t *dma_address;
135 struct list_head pages_list;
136};
137
138/**
139 * ttm_tt_create
140 *
141 * @bo: pointer to a struct ttm_buffer_object
142 * @zero_alloc: true if allocated pages needs to be zeroed
143 *
144 * Make sure we have a TTM structure allocated for the given BO.
145 * No pages are actually allocated.
146 */
147int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
148
149/**
150 * ttm_tt_init
151 *
152 * @ttm: The struct ttm_tt.
153 * @bo: The buffer object we create the ttm for.
154 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
155 *
156 * Create a struct ttm_tt to back data with system memory pages.
157 * No pages are actually allocated.
158 * Returns:
159 * NULL: Out of memory.
160 */
161int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
162 uint32_t page_flags);
163int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
164 uint32_t page_flags);
165int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
166 uint32_t page_flags);
167
168/**
169 * ttm_tt_fini
170 *
171 * @ttm: the ttm_tt structure.
172 *
173 * Free memory of ttm_tt structure
174 */
175void ttm_tt_fini(struct ttm_tt *ttm);
176void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
177
178/**
179 * ttm_ttm_bind:
180 *
181 * @ttm: The struct ttm_tt containing backing pages.
182 * @bo_mem: The struct ttm_mem_reg identifying the binding location.
183 *
184 * Bind the pages of @ttm to an aperture location identified by @bo_mem
185 */
186int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
187 struct ttm_operation_ctx *ctx);
188
189/**
190 * ttm_ttm_destroy:
191 *
192 * @ttm: The struct ttm_tt.
193 *
194 * Unbind, unpopulate and destroy common struct ttm_tt.
195 */
196void ttm_tt_destroy(struct ttm_tt *ttm);
197
198/**
199 * ttm_ttm_unbind:
200 *
201 * @ttm: The struct ttm_tt.
202 *
203 * Unbind a struct ttm_tt.
204 */
205void ttm_tt_unbind(struct ttm_tt *ttm);
206
207/**
208 * ttm_tt_swapin:
209 *
210 * @ttm: The struct ttm_tt.
211 *
212 * Swap in a previously swap out ttm_tt.
213 */
214int ttm_tt_swapin(struct ttm_tt *ttm);
215
216/**
217 * ttm_tt_set_placement_caching:
218 *
219 * @ttm A struct ttm_tt the backing pages of which will change caching policy.
220 * @placement: Flag indicating the desired caching policy.
221 *
222 * This function will change caching policy of any default kernel mappings of
223 * the pages backing @ttm. If changing from cached to uncached or
224 * write-combined,
225 * all CPU caches will first be flushed to make sure the data of the pages
226 * hit RAM. This function may be very costly as it involves global TLB
227 * and cache flushes and potential page splitting / combining.
228 */
229int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
230int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage);
231
232/**
233 * ttm_tt_populate - allocate pages for a ttm
234 *
235 * @ttm: Pointer to the ttm_tt structure
236 *
237 * Calls the driver method to allocate pages for a ttm
238 */
239int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
240
241/**
242 * ttm_tt_unpopulate - free pages from a ttm
243 *
244 * @ttm: Pointer to the ttm_tt structure
245 *
246 * Calls the driver method to free all pages from a ttm
247 */
248void ttm_tt_unpopulate(struct ttm_tt *ttm);
249
250#if IS_ENABLED(CONFIG_AGP)
251#include <linux/agp_backend.h>
252
253/**
254 * ttm_agp_tt_create
255 *
256 * @bo: Buffer object we allocate the ttm for.
257 * @bridge: The agp bridge this device is sitting on.
258 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
259 *
260 *
261 * Create a TTM backend that uses the indicated AGP bridge as an aperture
262 * for TT memory. This function uses the linux agpgart interface to
263 * bind and unbind memory backing a ttm_tt.
264 */
265struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
266 struct agp_bridge_data *bridge,
267 uint32_t page_flags);
268int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
269void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
270#endif
271
272#endif
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 1816bd8200d1..528f6d041e90 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -806,6 +806,7 @@ struct drm_amdgpu_info_firmware {
806#define AMDGPU_VRAM_TYPE_GDDR5 5 806#define AMDGPU_VRAM_TYPE_GDDR5 5
807#define AMDGPU_VRAM_TYPE_HBM 6 807#define AMDGPU_VRAM_TYPE_HBM 6
808#define AMDGPU_VRAM_TYPE_DDR3 7 808#define AMDGPU_VRAM_TYPE_DDR3 7
809#define AMDGPU_VRAM_TYPE_DDR4 8
809 810
810struct drm_amdgpu_info_device { 811struct drm_amdgpu_info_device {
811 /** PCI Device ID */ 812 /** PCI Device ID */