aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-09-27 21:27:05 -0400
committerDave Airlie <airlied@redhat.com>2016-09-27 21:27:05 -0400
commit9f4ef05bcdcfdf911b056b471dd3c6a4f331b644 (patch)
treeba8dfba87b4fe5295598f5438881822b6d3395f0
parent81c5d6aa3983662b6b48b504fe3a0a4c640f6a84 (diff)
parentbeb86f29c9c7f2d04f9a42c4c61cc469c3689779 (diff)
Merge branch 'drm-next-4.9' of git://people.freedesktop.org/~agd5f/linux into drm-next
Last set of radeon and amdgpu changes for 4.9. This is mostly just the powerplay cleanup for dGPUs. Beyond that, just misc code cleanups and bug fixes. * 'drm-next-4.9' of git://people.freedesktop.org/~agd5f/linux: (49 commits) drm/amd/amdgpu: Clean up afmt allocation in DCEv6. (v2) drm/amd/amdgpu: Remove division from vblank_wait drm/radeon/atif: Send a hotplug event when we get dgpu display request drm/radeon/atpx: check for ATIF dGPU wake for display events support drm/amdgpu/atif: Send a hotplug event when we get dgpu display request drm/amdgpu/atpx: check for ATIF dGPU wake for display events support drm/amdgpu: bump version for new vce packet support drm/amdgpu/vce: allow the clock table packet drm/amdgpu:cleanup virt related define drm/amdgpu: use powerplay module for dgpu in Vi. drm/amdgpu: set gfx clock gating for tonga/polaris. drm/amdgpu: set system clock gating for tonga/polaris. drm/amd/powerplay: export function to help to set cg by smu. drm/amdgpu: avoid out of bounds access on array interrupt_status_offsets drm/amdgpu: mark symbols static where possible drm/amdgpu: remove unused functions drm/amd/powerplay: Replace per-asic print_performance with generic drm/radeon: narrow asic_init for virtualization drm/amdgpu:add fw version entry to info drm/amdgpu:determine if vPost is needed indeed ...
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c118
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h)45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_smc.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_dpm.c186
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_smc.c863
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c245
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_dpm.c200
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smc.c677
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_dpm.c186
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_smc.c862
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h37
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/si/sid.h35
-rwxr-xr-x[-rw-r--r--]drivers/gpu/drm/amd/include/cgs_common.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c120
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/psm.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile16
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c175
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c121
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h105
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c5600
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h350
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c610
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h81
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c126
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c119
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h38
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h41
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c5684
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h424
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c490
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h74
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c595
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h58
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c5289
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c716
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c)160
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h)25
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h55
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c4359
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h)241
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c)985
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h)43
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c)258
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h58
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c350
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h107
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c6370
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h402
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c495
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c590
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h61
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h19
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu7_common.h (renamed from drivers/gpu/drm/amd/amdgpu/iceland_smum.h)43
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h412
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h70
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c2374
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h (renamed from drivers/gpu/drm/amd/amdgpu/fiji_smum.h)41
-rwxr-xr-x[-rw-r--r--]drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c612
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h32
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c2576
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h)26
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c613
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h63
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c2287
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h (renamed from drivers/gpu/drm/amd/amdgpu/tonga_smum.h)32
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c703
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h41
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c589
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h87
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c101
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c3207
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h)56
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c672
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h46
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c11
106 files changed, 18603 insertions, 35882 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index dc6df075bafc..d15e9b080ce1 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -52,10 +52,7 @@ amdgpu-y += \
52amdgpu-y += \ 52amdgpu-y += \
53 amdgpu_dpm.o \ 53 amdgpu_dpm.o \
54 amdgpu_powerplay.o \ 54 amdgpu_powerplay.o \
55 cz_smc.o cz_dpm.o \ 55 cz_smc.o cz_dpm.o
56 tonga_smc.o tonga_dpm.o \
57 fiji_smc.o fiji_dpm.o \
58 iceland_smc.o iceland_dpm.o
59 56
60# add DCE block 57# add DCE block
61amdgpu-y += \ 58amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index ee45d9f7f3dc..9d79e4ba0213 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -57,6 +57,7 @@
57#include "amdgpu_acp.h" 57#include "amdgpu_acp.h"
58 58
59#include "gpu_scheduler.h" 59#include "gpu_scheduler.h"
60#include "amdgpu_virt.h"
60 61
61/* 62/*
62 * Modules parameters. 63 * Modules parameters.
@@ -1827,6 +1828,7 @@ struct amdgpu_asic_funcs {
1827 bool (*read_disabled_bios)(struct amdgpu_device *adev); 1828 bool (*read_disabled_bios)(struct amdgpu_device *adev);
1828 bool (*read_bios_from_rom)(struct amdgpu_device *adev, 1829 bool (*read_bios_from_rom)(struct amdgpu_device *adev,
1829 u8 *bios, u32 length_bytes); 1830 u8 *bios, u32 length_bytes);
1831 void (*detect_hw_virtualization) (struct amdgpu_device *adev);
1830 int (*read_register)(struct amdgpu_device *adev, u32 se_num, 1832 int (*read_register)(struct amdgpu_device *adev, u32 se_num,
1831 u32 sh_num, u32 reg_offset, u32 *value); 1833 u32 sh_num, u32 reg_offset, u32 *value);
1832 void (*set_vga_state)(struct amdgpu_device *adev, bool state); 1834 void (*set_vga_state)(struct amdgpu_device *adev, bool state);
@@ -1836,8 +1838,6 @@ struct amdgpu_asic_funcs {
1836 /* MM block clocks */ 1838 /* MM block clocks */
1837 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); 1839 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
1838 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); 1840 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
1839 /* query virtual capabilities */
1840 u32 (*get_virtual_caps)(struct amdgpu_device *adev);
1841 /* static power management */ 1841 /* static power management */
1842 int (*get_pcie_lanes)(struct amdgpu_device *adev); 1842 int (*get_pcie_lanes)(struct amdgpu_device *adev);
1843 void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes); 1843 void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
@@ -1933,16 +1933,6 @@ struct amdgpu_atcs {
1933struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev); 1933struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
1934void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); 1934void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
1935 1935
1936
1937/* GPU virtualization */
1938#define AMDGPU_VIRT_CAPS_SRIOV_EN (1 << 0)
1939#define AMDGPU_VIRT_CAPS_IS_VF (1 << 1)
1940struct amdgpu_virtualization {
1941 bool supports_sr_iov;
1942 bool is_virtual;
1943 u32 caps;
1944};
1945
1946/* 1936/*
1947 * Core structure, functions and helpers. 1937 * Core structure, functions and helpers.
1948 */ 1938 */
@@ -2260,12 +2250,12 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
2260#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) 2250#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
2261#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) 2251#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
2262#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) 2252#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
2263#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
2264#define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev)) 2253#define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev))
2265#define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l)) 2254#define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l))
2266#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) 2255#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
2267#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) 2256#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
2268#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) 2257#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
2258#define amdgpu_asic_detect_hw_virtualization(adev) (adev)->asic_funcs->detect_hw_virtualization((adev))
2269#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) 2259#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
2270#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) 2260#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
2271#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) 2261#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
@@ -2323,6 +2313,11 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
2323#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) 2313#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
2324#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) 2314#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
2325 2315
2316#define amdgpu_dpm_read_sensor(adev, idx, value) \
2317 ((adev)->pp_enabled ? \
2318 (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
2319 -EINVAL)
2320
2326#define amdgpu_dpm_get_temperature(adev) \ 2321#define amdgpu_dpm_get_temperature(adev) \
2327 ((adev)->pp_enabled ? \ 2322 ((adev)->pp_enabled ? \
2328 (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \ 2323 (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
@@ -2374,11 +2369,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
2374 (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \ 2369 (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
2375 (adev)->pm.funcs->powergate_vce((adev), (g))) 2370 (adev)->pm.funcs->powergate_vce((adev), (g)))
2376 2371
2377#define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \
2378 ((adev)->pp_enabled ? \
2379 (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \
2380 (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m)))
2381
2382#define amdgpu_dpm_get_current_power_state(adev) \ 2372#define amdgpu_dpm_get_current_power_state(adev) \
2383 (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle) 2373 (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
2384 2374
@@ -2460,11 +2450,13 @@ void amdgpu_register_atpx_handler(void);
2460void amdgpu_unregister_atpx_handler(void); 2450void amdgpu_unregister_atpx_handler(void);
2461bool amdgpu_has_atpx_dgpu_power_cntl(void); 2451bool amdgpu_has_atpx_dgpu_power_cntl(void);
2462bool amdgpu_is_atpx_hybrid(void); 2452bool amdgpu_is_atpx_hybrid(void);
2453bool amdgpu_atpx_dgpu_req_power_for_displays(void);
2463#else 2454#else
2464static inline void amdgpu_register_atpx_handler(void) {} 2455static inline void amdgpu_register_atpx_handler(void) {}
2465static inline void amdgpu_unregister_atpx_handler(void) {} 2456static inline void amdgpu_unregister_atpx_handler(void) {}
2466static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } 2457static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
2467static inline bool amdgpu_is_atpx_hybrid(void) { return false; } 2458static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
2459static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
2468#endif 2460#endif
2469 2461
2470/* 2462/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 5cd7b736a9de..5796539a0bcb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -25,6 +25,7 @@
25#include <linux/acpi.h> 25#include <linux/acpi.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/power_supply.h> 27#include <linux/power_supply.h>
28#include <linux/pm_runtime.h>
28#include <acpi/video.h> 29#include <acpi/video.h>
29#include <drm/drmP.h> 30#include <drm/drmP.h>
30#include <drm/drm_crtc_helper.h> 31#include <drm/drm_crtc_helper.h>
@@ -333,6 +334,16 @@ int amdgpu_atif_handler(struct amdgpu_device *adev,
333#endif 334#endif
334 } 335 }
335 } 336 }
337 if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
338 if ((adev->flags & AMD_IS_PX) &&
339 amdgpu_atpx_dgpu_req_power_for_displays()) {
340 pm_runtime_get_sync(adev->ddev->dev);
341 /* Just fire off a uevent and let userspace tell us what to do */
342 drm_helper_hpd_irq_event(adev->ddev);
343 pm_runtime_mark_last_busy(adev->ddev->dev);
344 pm_runtime_put_autosuspend(adev->ddev->dev);
345 }
346 }
336 /* TODO: check other events */ 347 /* TODO: check other events */
337 348
338 /* We've handled the event, stop the notifier chain. The ACPI interface 349 /* We've handled the event, stop the notifier chain. The ACPI interface
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index d080d0807a5b..dba8a5b25e66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -143,14 +143,6 @@ int amdgpu_amdkfd_resume(struct amdgpu_device *rdev)
143 return r; 143 return r;
144} 144}
145 145
146u32 pool_to_domain(enum kgd_memory_pool p)
147{
148 switch (p) {
149 case KGD_POOL_FRAMEBUFFER: return AMDGPU_GEM_DOMAIN_VRAM;
150 default: return AMDGPU_GEM_DOMAIN_GTT;
151 }
152}
153
154int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, 146int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
155 void **mem_obj, uint64_t *gpu_addr, 147 void **mem_obj, uint64_t *gpu_addr,
156 void **cpu_ptr) 148 void **cpu_ptr)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 49de92600074..550c5ee704ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -29,6 +29,7 @@ struct amdgpu_atpx {
29 acpi_handle handle; 29 acpi_handle handle;
30 struct amdgpu_atpx_functions functions; 30 struct amdgpu_atpx_functions functions;
31 bool is_hybrid; 31 bool is_hybrid;
32 bool dgpu_req_power_for_displays;
32}; 33};
33 34
34static struct amdgpu_atpx_priv { 35static struct amdgpu_atpx_priv {
@@ -73,6 +74,10 @@ bool amdgpu_is_atpx_hybrid(void) {
73 return amdgpu_atpx_priv.atpx.is_hybrid; 74 return amdgpu_atpx_priv.atpx.is_hybrid;
74} 75}
75 76
77bool amdgpu_atpx_dgpu_req_power_for_displays(void) {
78 return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
79}
80
76/** 81/**
77 * amdgpu_atpx_call - call an ATPX method 82 * amdgpu_atpx_call - call an ATPX method
78 * 83 *
@@ -213,6 +218,10 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
213 atpx->is_hybrid = true; 218 atpx->is_hybrid = true;
214 } 219 }
215 220
221 atpx->dgpu_req_power_for_displays = false;
222 if (valid_bits & ATPX_DGPU_REQ_POWER_FOR_DISPLAYS)
223 atpx->dgpu_req_power_for_displays = true;
224
216 return 0; 225 return 0;
217} 226}
218 227
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index f1c53a2b09c6..7a8bfa34682f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -711,6 +711,47 @@ static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode
711 return -EINVAL; 711 return -EINVAL;
712} 712}
713 713
714static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
715 enum cgs_ucode_id type)
716{
717 CGS_FUNC_ADEV;
718 uint16_t fw_version;
719
720 switch (type) {
721 case CGS_UCODE_ID_SDMA0:
722 fw_version = adev->sdma.instance[0].fw_version;
723 break;
724 case CGS_UCODE_ID_SDMA1:
725 fw_version = adev->sdma.instance[1].fw_version;
726 break;
727 case CGS_UCODE_ID_CP_CE:
728 fw_version = adev->gfx.ce_fw_version;
729 break;
730 case CGS_UCODE_ID_CP_PFP:
731 fw_version = adev->gfx.pfp_fw_version;
732 break;
733 case CGS_UCODE_ID_CP_ME:
734 fw_version = adev->gfx.me_fw_version;
735 break;
736 case CGS_UCODE_ID_CP_MEC:
737 fw_version = adev->gfx.mec_fw_version;
738 break;
739 case CGS_UCODE_ID_CP_MEC_JT1:
740 fw_version = adev->gfx.mec_fw_version;
741 break;
742 case CGS_UCODE_ID_CP_MEC_JT2:
743 fw_version = adev->gfx.mec_fw_version;
744 break;
745 case CGS_UCODE_ID_RLC_G:
746 fw_version = adev->gfx.rlc_fw_version;
747 break;
748 default:
749 DRM_ERROR("firmware type %d do not have version\n", type);
750 fw_version = 0;
751 }
752 return fw_version;
753}
754
714static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, 755static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
715 enum cgs_ucode_id type, 756 enum cgs_ucode_id type,
716 struct cgs_firmware_info *info) 757 struct cgs_firmware_info *info)
@@ -741,6 +782,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
741 info->mc_addr = gpu_addr; 782 info->mc_addr = gpu_addr;
742 info->image_size = data_size; 783 info->image_size = data_size;
743 info->version = (uint16_t)le32_to_cpu(header->header.ucode_version); 784 info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
785 info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
744 info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version); 786 info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
745 } else { 787 } else {
746 char fw_name[30] = {0}; 788 char fw_name[30] = {0};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 319a5e1d9389..decbba5ad438 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1545,7 +1545,8 @@ static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector,
1545 return MODE_OK; 1545 return MODE_OK;
1546} 1546}
1547 1547
1548int amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode) 1548static int
1549amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode)
1549{ 1550{
1550 return 0; 1551 return 0;
1551} 1552}
@@ -1557,7 +1558,8 @@ amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force)
1557 return connector_status_connected; 1558 return connector_status_connected;
1558} 1559}
1559 1560
1560int amdgpu_connector_virtual_set_property(struct drm_connector *connector, 1561static int
1562amdgpu_connector_virtual_set_property(struct drm_connector *connector,
1561 struct drm_property *property, 1563 struct drm_property *property,
1562 uint64_t val) 1564 uint64_t val)
1563{ 1565{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 3ddae5ff41bb..99a15cad6789 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -50,6 +50,7 @@
50#include "vi.h" 50#include "vi.h"
51#include "bif/bif_4_1_d.h" 51#include "bif/bif_4_1_d.h"
52#include <linux/pci.h> 52#include <linux/pci.h>
53#include <linux/firmware.h>
53 54
54static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); 55static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
55static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); 56static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
@@ -110,7 +111,7 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
110 bool always_indirect) 111 bool always_indirect)
111{ 112{
112 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); 113 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
113 114
114 if ((reg * 4) < adev->rmmio_size && !always_indirect) 115 if ((reg * 4) < adev->rmmio_size && !always_indirect)
115 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); 116 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
116 else { 117 else {
@@ -651,6 +652,46 @@ bool amdgpu_card_posted(struct amdgpu_device *adev)
651 652
652} 653}
653 654
655static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
656{
657 if (amdgpu_sriov_vf(adev))
658 return false;
659
660 if (amdgpu_passthrough(adev)) {
661 /* for FIJI: In whole GPU pass-through virtualization case
662 * old smc fw won't clear some registers (e.g. MEM_SIZE, BIOS_SCRATCH)
663 * so amdgpu_card_posted return false and driver will incorrectly skip vPost.
664 * but if we force vPost do in pass-through case, the driver reload will hang.
665 * whether doing vPost depends on amdgpu_card_posted if smc version is above
666 * 00160e00 for FIJI.
667 */
668 if (adev->asic_type == CHIP_FIJI) {
669 int err;
670 uint32_t fw_ver;
671 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
672 /* force vPost if error occured */
673 if (err)
674 return true;
675
676 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
677 if (fw_ver >= 0x00160e00)
678 return !amdgpu_card_posted(adev);
679 }
680 } else {
681 /* in bare-metal case, amdgpu_card_posted return false
682 * after system reboot/boot, and return true if driver
683 * reloaded.
684 * we shouldn't do vPost after driver reload otherwise GPU
685 * could hang.
686 */
687 if (amdgpu_card_posted(adev))
688 return false;
689 }
690
691 /* we assume vPost is neede for all other cases */
692 return true;
693}
694
654/** 695/**
655 * amdgpu_dummy_page_init - init dummy page used by the driver 696 * amdgpu_dummy_page_init - init dummy page used by the driver
656 * 697 *
@@ -1485,13 +1526,10 @@ static int amdgpu_resume(struct amdgpu_device *adev)
1485 return 0; 1526 return 0;
1486} 1527}
1487 1528
1488static bool amdgpu_device_is_virtual(void) 1529static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
1489{ 1530{
1490#ifdef CONFIG_X86 1531 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1491 return boot_cpu_has(X86_FEATURE_HYPERVISOR); 1532 adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1492#else
1493 return false;
1494#endif
1495} 1533}
1496 1534
1497/** 1535/**
@@ -1648,25 +1686,24 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1648 goto failed; 1686 goto failed;
1649 } 1687 }
1650 1688
1651 /* See if the asic supports SR-IOV */ 1689 /* detect if we are with an SRIOV vbios */
1652 adev->virtualization.supports_sr_iov = 1690 amdgpu_device_detect_sriov_bios(adev);
1653 amdgpu_atombios_has_gpu_virtualization_table(adev);
1654
1655 /* Check if we are executing in a virtualized environment */
1656 adev->virtualization.is_virtual = amdgpu_device_is_virtual();
1657 adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev);
1658 1691
1659 /* Post card if necessary */ 1692 /* Post card if necessary */
1660 if (!amdgpu_card_posted(adev) || 1693 if (amdgpu_vpost_needed(adev)) {
1661 (adev->virtualization.is_virtual &&
1662 !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
1663 if (!adev->bios) { 1694 if (!adev->bios) {
1664 dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); 1695 dev_err(adev->dev, "no vBIOS found\n");
1665 r = -EINVAL; 1696 r = -EINVAL;
1666 goto failed; 1697 goto failed;
1667 } 1698 }
1668 DRM_INFO("GPU not posted. posting now...\n"); 1699 DRM_INFO("GPU posting now...\n");
1669 amdgpu_atom_asic_init(adev->mode_info.atom_context); 1700 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
1701 if (r) {
1702 dev_err(adev->dev, "gpu post error!\n");
1703 goto failed;
1704 }
1705 } else {
1706 DRM_INFO("GPU post is not needed\n");
1670 } 1707 }
1671 1708
1672 /* Initialize clocks */ 1709 /* Initialize clocks */
@@ -1842,8 +1879,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
1842 1879
1843 adev = dev->dev_private; 1880 adev = dev->dev_private;
1844 1881
1845 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF || 1882 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1846 dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
1847 return 0; 1883 return 0;
1848 1884
1849 drm_kms_helper_poll_disable(dev); 1885 drm_kms_helper_poll_disable(dev);
@@ -1928,8 +1964,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
1928 struct drm_crtc *crtc; 1964 struct drm_crtc *crtc;
1929 int r; 1965 int r;
1930 1966
1931 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF || 1967 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1932 dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
1933 return 0; 1968 return 0;
1934 1969
1935 if (fbcon) 1970 if (fbcon)
@@ -2043,7 +2078,7 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2043 return asic_hang; 2078 return asic_hang;
2044} 2079}
2045 2080
2046int amdgpu_pre_soft_reset(struct amdgpu_device *adev) 2081static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
2047{ 2082{
2048 int i, r = 0; 2083 int i, r = 0;
2049 2084
@@ -2714,7 +2749,7 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
2714 if (size & 0x3 || *pos & 0x3) 2749 if (size & 0x3 || *pos & 0x3)
2715 return -EINVAL; 2750 return -EINVAL;
2716 2751
2717 config = kmalloc(256 * sizeof(*config), GFP_KERNEL); 2752 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
2718 if (!config) 2753 if (!config)
2719 return -ENOMEM; 2754 return -ENOMEM;
2720 2755
@@ -2773,6 +2808,29 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
2773 return result; 2808 return result;
2774} 2809}
2775 2810
2811static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
2812 size_t size, loff_t *pos)
2813{
2814 struct amdgpu_device *adev = f->f_inode->i_private;
2815 int idx, r;
2816 int32_t value;
2817
2818 if (size != 4 || *pos & 0x3)
2819 return -EINVAL;
2820
2821 /* convert offset to sensor number */
2822 idx = *pos >> 2;
2823
2824 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
2825 r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &value);
2826 else
2827 return -EINVAL;
2828
2829 if (!r)
2830 r = put_user(value, (int32_t *)buf);
2831
2832 return !r ? 4 : r;
2833}
2776 2834
2777static const struct file_operations amdgpu_debugfs_regs_fops = { 2835static const struct file_operations amdgpu_debugfs_regs_fops = {
2778 .owner = THIS_MODULE, 2836 .owner = THIS_MODULE,
@@ -2805,12 +2863,19 @@ static const struct file_operations amdgpu_debugfs_gca_config_fops = {
2805 .llseek = default_llseek 2863 .llseek = default_llseek
2806}; 2864};
2807 2865
2866static const struct file_operations amdgpu_debugfs_sensors_fops = {
2867 .owner = THIS_MODULE,
2868 .read = amdgpu_debugfs_sensor_read,
2869 .llseek = default_llseek
2870};
2871
2808static const struct file_operations *debugfs_regs[] = { 2872static const struct file_operations *debugfs_regs[] = {
2809 &amdgpu_debugfs_regs_fops, 2873 &amdgpu_debugfs_regs_fops,
2810 &amdgpu_debugfs_regs_didt_fops, 2874 &amdgpu_debugfs_regs_didt_fops,
2811 &amdgpu_debugfs_regs_pcie_fops, 2875 &amdgpu_debugfs_regs_pcie_fops,
2812 &amdgpu_debugfs_regs_smc_fops, 2876 &amdgpu_debugfs_regs_smc_fops,
2813 &amdgpu_debugfs_gca_config_fops, 2877 &amdgpu_debugfs_gca_config_fops,
2878 &amdgpu_debugfs_sensors_fops,
2814}; 2879};
2815 2880
2816static const char *debugfs_regs_names[] = { 2881static const char *debugfs_regs_names[] = {
@@ -2819,6 +2884,7 @@ static const char *debugfs_regs_names[] = {
2819 "amdgpu_regs_pcie", 2884 "amdgpu_regs_pcie",
2820 "amdgpu_regs_smc", 2885 "amdgpu_regs_smc",
2821 "amdgpu_gca_config", 2886 "amdgpu_gca_config",
2887 "amdgpu_sensors",
2822}; 2888};
2823 2889
2824static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) 2890static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 596362624610..7dbc7727e32b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -56,9 +56,10 @@
56 * - 3.4.0 - Add AMDGPU_INFO_NUM_EVICTIONS. 56 * - 3.4.0 - Add AMDGPU_INFO_NUM_EVICTIONS.
57 * - 3.5.0 - Add support for new UVD_NO_OP register. 57 * - 3.5.0 - Add support for new UVD_NO_OP register.
58 * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer. 58 * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
59 * - 3.7.0 - Add support for VCE clock list packet
59 */ 60 */
60#define KMS_DRIVER_MAJOR 3 61#define KMS_DRIVER_MAJOR 3
61#define KMS_DRIVER_MINOR 6 62#define KMS_DRIVER_MINOR 7
62#define KMS_DRIVER_PATCHLEVEL 0 63#define KMS_DRIVER_PATCHLEVEL 0
63 64
64int amdgpu_vram_limit = 0; 65int amdgpu_vram_limit = 0;
@@ -485,7 +486,7 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
485 /* if we are running in a VM, make sure the device 486 /* if we are running in a VM, make sure the device
486 * torn down properly on reboot/shutdown 487 * torn down properly on reboot/shutdown
487 */ 488 */
488 if (adev->virtualization.is_virtual) 489 if (amdgpu_passthrough(adev))
489 amdgpu_pci_remove(pdev); 490 amdgpu_pci_remove(pdev);
490} 491}
491 492
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index d4ec3cb187a5..accc908bdc88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1322,6 +1322,64 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1322 */ 1322 */
1323#if defined(CONFIG_DEBUG_FS) 1323#if defined(CONFIG_DEBUG_FS)
1324 1324
1325static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
1326{
1327 int32_t value;
1328
1329 /* sanity check PP is enabled */
1330 if (!(adev->powerplay.pp_funcs &&
1331 adev->powerplay.pp_funcs->read_sensor))
1332 return -EINVAL;
1333
1334 /* GPU Clocks */
1335 seq_printf(m, "GFX Clocks and Power:\n");
1336 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, &value))
1337 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
1338 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, &value))
1339 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
1340 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, &value))
1341 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
1342 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, &value))
1343 seq_printf(m, "\t%u mV (VDDNB)\n", value);
1344 seq_printf(m, "\n");
1345
1346 /* GPU Temp */
1347 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, &value))
1348 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
1349
1350 /* GPU Load */
1351 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value))
1352 seq_printf(m, "GPU Load: %u %%\n", value);
1353 seq_printf(m, "\n");
1354
1355 /* UVD clocks */
1356 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, &value)) {
1357 if (!value) {
1358 seq_printf(m, "UVD: Disabled\n");
1359 } else {
1360 seq_printf(m, "UVD: Enabled\n");
1361 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, &value))
1362 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
1363 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, &value))
1364 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
1365 }
1366 }
1367 seq_printf(m, "\n");
1368
1369 /* VCE clocks */
1370 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, &value)) {
1371 if (!value) {
1372 seq_printf(m, "VCE: Disabled\n");
1373 } else {
1374 seq_printf(m, "VCE: Enabled\n");
1375 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, &value))
1376 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
1377 }
1378 }
1379
1380 return 0;
1381}
1382
1325static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) 1383static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
1326{ 1384{
1327 struct drm_info_node *node = (struct drm_info_node *) m->private; 1385 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1337,11 +1395,11 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
1337 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { 1395 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1338 seq_printf(m, "PX asic powered off\n"); 1396 seq_printf(m, "PX asic powered off\n");
1339 } else if (adev->pp_enabled) { 1397 } else if (adev->pp_enabled) {
1340 amdgpu_dpm_debugfs_print_current_performance_level(adev, m); 1398 return amdgpu_debugfs_pm_info_pp(m, adev);
1341 } else { 1399 } else {
1342 mutex_lock(&adev->pm.mutex); 1400 mutex_lock(&adev->pm.mutex);
1343 if (adev->pm.funcs->debugfs_print_current_performance_level) 1401 if (adev->pm.funcs->debugfs_print_current_performance_level)
1344 amdgpu_dpm_debugfs_print_current_performance_level(adev, m); 1402 adev->pm.funcs->debugfs_print_current_performance_level(adev, m);
1345 else 1403 else
1346 seq_printf(m, "Debugfs support not implemented for this asic\n"); 1404 seq_printf(m, "Debugfs support not implemented for this asic\n");
1347 mutex_unlock(&adev->pm.mutex); 1405 mutex_unlock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 1e7f160f23d8..68ad24101a36 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -80,15 +80,6 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
80 amd_pp->ip_funcs = &kv_dpm_ip_funcs; 80 amd_pp->ip_funcs = &kv_dpm_ip_funcs;
81 break; 81 break;
82#endif 82#endif
83 case CHIP_TOPAZ:
84 amd_pp->ip_funcs = &iceland_dpm_ip_funcs;
85 break;
86 case CHIP_TONGA:
87 amd_pp->ip_funcs = &tonga_dpm_ip_funcs;
88 break;
89 case CHIP_FIJI:
90 amd_pp->ip_funcs = &fiji_dpm_ip_funcs;
91 break;
92 case CHIP_CARRIZO: 83 case CHIP_CARRIZO:
93 case CHIP_STONEY: 84 case CHIP_STONEY:
94 amd_pp->ip_funcs = &cz_dpm_ip_funcs; 85 amd_pp->ip_funcs = &cz_dpm_ip_funcs;
@@ -110,11 +101,11 @@ static int amdgpu_pp_early_init(void *handle)
110 switch (adev->asic_type) { 101 switch (adev->asic_type) {
111 case CHIP_POLARIS11: 102 case CHIP_POLARIS11:
112 case CHIP_POLARIS10: 103 case CHIP_POLARIS10:
113 adev->pp_enabled = true;
114 break;
115 case CHIP_TONGA: 104 case CHIP_TONGA:
116 case CHIP_FIJI: 105 case CHIP_FIJI:
117 case CHIP_TOPAZ: 106 case CHIP_TOPAZ:
107 adev->pp_enabled = true;
108 break;
118 case CHIP_CARRIZO: 109 case CHIP_CARRIZO:
119 case CHIP_STONEY: 110 case CHIP_STONEY:
120 adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true; 111 adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 2c9ea9b50f48..06b94c13c2c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -691,6 +691,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
691 case 0x04000008: /* rdo */ 691 case 0x04000008: /* rdo */
692 case 0x04000009: /* vui */ 692 case 0x04000009: /* vui */
693 case 0x05000002: /* auxiliary buffer */ 693 case 0x05000002: /* auxiliary buffer */
694 case 0x05000009: /* clock table */
694 break; 695 break;
695 696
696 case 0x03000001: /* encode */ 697 case 0x03000001: /* encode */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 88d68cb6e89d..2c37a374917f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -19,22 +19,39 @@
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 * Author: Monk.liu@amd.com
22 */ 23 */
24#ifndef AMDGPU_VIRT_H
25#define AMDGPU_VIRT_H
23 26
24#ifndef _POLARIS10_CLOCK_POWER_GATING_H_ 27#define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS (1 << 0) /* vBIOS is sr-iov ready */
25#define _POLARIS10_CLOCK_POWER_GATING_H_ 28#define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1) /* sr-iov is enabled on this GPU */
29#define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */
30#define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */
31/* GPU virtualization */
32struct amdgpu_virtualization {
33 uint32_t virtual_caps;
34};
26 35
27#include "polaris10_hwmgr.h" 36#define amdgpu_sriov_enabled(adev) \
28#include "pp_asicblocks.h" 37((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
29 38
30int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); 39#define amdgpu_sriov_vf(adev) \
31int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); 40((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_IS_VF)
32int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
33int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
34int polaris10_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
35int polaris10_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
36int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
37 const uint32_t *msg_id);
38int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable);
39 41
40#endif /* _POLARIS10_CLOCK_POWER_GATING_H_ */ 42#define amdgpu_sriov_bios(adev) \
43((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)
44
45#define amdgpu_passthrough(adev) \
46((adev)->virtualization.virtual_caps & AMDGPU_PASSTHROUGH_MODE)
47
48static inline bool is_virtual_machine(void)
49{
50#ifdef CONFIG_X86
51 return boot_cpu_has(X86_FEATURE_HYPERVISOR);
52#else
53 return false;
54#endif
55}
56
57#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 825de800b798..a845b6a93b79 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -963,12 +963,6 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
963 return true; 963 return true;
964} 964}
965 965
966static u32 cik_get_virtual_caps(struct amdgpu_device *adev)
967{
968 /* CIK does not support SR-IOV */
969 return 0;
970}
971
972static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = { 966static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
973 {mmGRBM_STATUS, false}, 967 {mmGRBM_STATUS, false},
974 {mmGB_ADDR_CONFIG, false}, 968 {mmGB_ADDR_CONFIG, false},
@@ -1641,6 +1635,12 @@ static uint32_t cik_get_rev_id(struct amdgpu_device *adev)
1641 >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT; 1635 >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
1642} 1636}
1643 1637
1638static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
1639{
1640 if (is_virtual_machine()) /* passthrough mode */
1641 adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
1642}
1643
1644static const struct amdgpu_ip_block_version bonaire_ip_blocks[] = 1644static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
1645{ 1645{
1646 /* ORDER MATTERS! */ 1646 /* ORDER MATTERS! */
@@ -2384,13 +2384,13 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
2384{ 2384{
2385 .read_disabled_bios = &cik_read_disabled_bios, 2385 .read_disabled_bios = &cik_read_disabled_bios,
2386 .read_bios_from_rom = &cik_read_bios_from_rom, 2386 .read_bios_from_rom = &cik_read_bios_from_rom,
2387 .detect_hw_virtualization = cik_detect_hw_virtualization,
2387 .read_register = &cik_read_register, 2388 .read_register = &cik_read_register,
2388 .reset = &cik_asic_reset, 2389 .reset = &cik_asic_reset,
2389 .set_vga_state = &cik_vga_set_state, 2390 .set_vga_state = &cik_vga_set_state,
2390 .get_xclk = &cik_get_xclk, 2391 .get_xclk = &cik_get_xclk,
2391 .set_uvd_clocks = &cik_set_uvd_clocks, 2392 .set_uvd_clocks = &cik_set_uvd_clocks,
2392 .set_vce_clocks = &cik_set_vce_clocks, 2393 .set_vce_clocks = &cik_set_vce_clocks,
2393 .get_virtual_caps = &cik_get_virtual_caps,
2394}; 2394};
2395 2395
2396static int cik_common_early_init(void *handle) 2396static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index c4f6f00d62bc..8659852aea9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -562,4 +562,40 @@ enum {
562 MTYPE_NONCACHED = 3 562 MTYPE_NONCACHED = 3
563}; 563};
564 564
565/* mmPA_SC_RASTER_CONFIG mask */
566#define RB_MAP_PKR0(x) ((x) << 0)
567#define RB_MAP_PKR0_MASK (0x3 << 0)
568#define RB_MAP_PKR1(x) ((x) << 2)
569#define RB_MAP_PKR1_MASK (0x3 << 2)
570#define RB_XSEL2(x) ((x) << 4)
571#define RB_XSEL2_MASK (0x3 << 4)
572#define RB_XSEL (1 << 6)
573#define RB_YSEL (1 << 7)
574#define PKR_MAP(x) ((x) << 8)
575#define PKR_MAP_MASK (0x3 << 8)
576#define PKR_XSEL(x) ((x) << 10)
577#define PKR_XSEL_MASK (0x3 << 10)
578#define PKR_YSEL(x) ((x) << 12)
579#define PKR_YSEL_MASK (0x3 << 12)
580#define SC_MAP(x) ((x) << 16)
581#define SC_MAP_MASK (0x3 << 16)
582#define SC_XSEL(x) ((x) << 18)
583#define SC_XSEL_MASK (0x3 << 18)
584#define SC_YSEL(x) ((x) << 20)
585#define SC_YSEL_MASK (0x3 << 20)
586#define SE_MAP(x) ((x) << 24)
587#define SE_MAP_MASK (0x3 << 24)
588#define SE_XSEL(x) ((x) << 26)
589#define SE_XSEL_MASK (0x3 << 26)
590#define SE_YSEL(x) ((x) << 28)
591#define SE_YSEL_MASK (0x3 << 28)
592
593/* mmPA_SC_RASTER_CONFIG_1 mask */
594#define SE_PAIR_MAP(x) ((x) << 0)
595#define SE_PAIR_MAP_MASK (0x3 << 0)
596#define SE_PAIR_XSEL(x) ((x) << 2)
597#define SE_PAIR_XSEL_MASK (0x3 << 2)
598#define SE_PAIR_YSEL(x) ((x) << 4)
599#define SE_PAIR_YSEL_MASK (0x3 << 4)
600
565#endif 601#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
index 95887e484c51..aed7033c0973 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
@@ -101,13 +101,6 @@ int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg)
101 return 0; 101 return 0;
102} 102}
103 103
104int cz_send_msg_to_smc_with_parameter_async(struct amdgpu_device *adev,
105 u16 msg, u32 parameter)
106{
107 WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
108 return cz_send_msg_to_smc_async(adev, msg);
109}
110
111int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, 104int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
112 u16 msg, u32 parameter) 105 u16 msg, u32 parameter)
113{ 106{
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index bc5bb4eb9625..9d38fe0519e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -221,7 +221,7 @@ static bool dce_v10_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
221 */ 221 */
222static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc) 222static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc)
223{ 223{
224 unsigned i = 0; 224 unsigned i = 100;
225 225
226 if (crtc >= adev->mode_info.num_crtc) 226 if (crtc >= adev->mode_info.num_crtc)
227 return; 227 return;
@@ -233,14 +233,16 @@ static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc)
233 * wait for another frame. 233 * wait for another frame.
234 */ 234 */
235 while (dce_v10_0_is_in_vblank(adev, crtc)) { 235 while (dce_v10_0_is_in_vblank(adev, crtc)) {
236 if (i++ % 100 == 0) { 236 if (i++ == 100) {
237 i = 0;
237 if (!dce_v10_0_is_counter_moving(adev, crtc)) 238 if (!dce_v10_0_is_counter_moving(adev, crtc))
238 break; 239 break;
239 } 240 }
240 } 241 }
241 242
242 while (!dce_v10_0_is_in_vblank(adev, crtc)) { 243 while (!dce_v10_0_is_in_vblank(adev, crtc)) {
243 if (i++ % 100 == 0) { 244 if (i++ == 100) {
245 i = 0;
244 if (!dce_v10_0_is_counter_moving(adev, crtc)) 246 if (!dce_v10_0_is_counter_moving(adev, crtc))
245 break; 247 break;
246 } 248 }
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index d3512f381e53..eb8f96a61491 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -146,7 +146,7 @@ static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
146 */ 146 */
147static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc) 147static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
148{ 148{
149 unsigned i = 0; 149 unsigned i = 100;
150 150
151 if (crtc >= adev->mode_info.num_crtc) 151 if (crtc >= adev->mode_info.num_crtc)
152 return; 152 return;
@@ -158,14 +158,16 @@ static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
158 * wait for another frame. 158 * wait for another frame.
159 */ 159 */
160 while (dce_v6_0_is_in_vblank(adev, crtc)) { 160 while (dce_v6_0_is_in_vblank(adev, crtc)) {
161 if (i++ % 100 == 0) { 161 if (i++ == 100) {
162 i = 0;
162 if (!dce_v6_0_is_counter_moving(adev, crtc)) 163 if (!dce_v6_0_is_counter_moving(adev, crtc))
163 break; 164 break;
164 } 165 }
165 } 166 }
166 167
167 while (!dce_v6_0_is_in_vblank(adev, crtc)) { 168 while (!dce_v6_0_is_in_vblank(adev, crtc)) {
168 if (i++ % 100 == 0) { 169 if (i++ == 100) {
170 i = 0;
169 if (!dce_v6_0_is_counter_moving(adev, crtc)) 171 if (!dce_v6_0_is_counter_moving(adev, crtc))
170 break; 172 break;
171 } 173 }
@@ -185,7 +187,7 @@ static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
185 unsigned i; 187 unsigned i;
186 188
187 /* Enable pflip interrupts */ 189 /* Enable pflip interrupts */
188 for (i = 0; i <= adev->mode_info.num_crtc; i++) 190 for (i = 0; i < adev->mode_info.num_crtc; i++)
189 amdgpu_irq_get(adev, &adev->pageflip_irq, i); 191 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
190} 192}
191 193
@@ -194,7 +196,7 @@ static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
194 unsigned i; 196 unsigned i;
195 197
196 /* Disable pflip interrupts */ 198 /* Disable pflip interrupts */
197 for (i = 0; i <= adev->mode_info.num_crtc; i++) 199 for (i = 0; i < adev->mode_info.num_crtc; i++)
198 amdgpu_irq_put(adev, &adev->pageflip_irq, i); 200 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
199} 201}
200 202
@@ -1420,21 +1422,29 @@ static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1420 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id); 1422 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1421} 1423}
1422 1424
1423static void dce_v6_0_afmt_init(struct amdgpu_device *adev) 1425static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
1424{ 1426{
1425 int i; 1427 int i, j;
1426 1428
1427 for (i = 0; i < adev->mode_info.num_dig; i++) 1429 for (i = 0; i < adev->mode_info.num_dig; i++)
1428 adev->mode_info.afmt[i] = NULL; 1430 adev->mode_info.afmt[i] = NULL;
1429 1431
1430 /* DCE8 has audio blocks tied to DIG encoders */ 1432 /* DCE6 has audio blocks tied to DIG encoders */
1431 for (i = 0; i < adev->mode_info.num_dig; i++) { 1433 for (i = 0; i < adev->mode_info.num_dig; i++) {
1432 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL); 1434 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1433 if (adev->mode_info.afmt[i]) { 1435 if (adev->mode_info.afmt[i]) {
1434 adev->mode_info.afmt[i]->offset = dig_offsets[i]; 1436 adev->mode_info.afmt[i]->offset = dig_offsets[i];
1435 adev->mode_info.afmt[i]->id = i; 1437 adev->mode_info.afmt[i]->id = i;
1438 } else {
1439 for (j = 0; j < i; j++) {
1440 kfree(adev->mode_info.afmt[j]);
1441 adev->mode_info.afmt[j] = NULL;
1442 }
1443 DRM_ERROR("Out of memory allocating afmt table\n");
1444 return -ENOMEM;
1436 } 1445 }
1437 } 1446 }
1447 return 0;
1438} 1448}
1439 1449
1440static void dce_v6_0_afmt_fini(struct amdgpu_device *adev) 1450static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
@@ -2397,7 +2407,9 @@ static int dce_v6_0_sw_init(void *handle)
2397 return -EINVAL; 2407 return -EINVAL;
2398 2408
2399 /* setup afmt */ 2409 /* setup afmt */
2400 dce_v6_0_afmt_init(adev); 2410 r = dce_v6_0_afmt_init(adev);
2411 if (r)
2412 return r;
2401 2413
2402 r = dce_v6_0_audio_init(adev); 2414 r = dce_v6_0_audio_init(adev);
2403 if (r) 2415 if (r)
@@ -2782,7 +2794,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
2782 uint32_t disp_int, mask, int_control, tmp; 2794 uint32_t disp_int, mask, int_control, tmp;
2783 unsigned hpd; 2795 unsigned hpd;
2784 2796
2785 if (entry->src_data > 6) { 2797 if (entry->src_data >= adev->mode_info.num_hpd) {
2786 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); 2798 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
2787 return 0; 2799 return 0;
2788 } 2800 }
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index abd5213dfe18..a7decf977b5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -170,7 +170,7 @@ static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
170 */ 170 */
171static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc) 171static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
172{ 172{
173 unsigned i = 0; 173 unsigned i = 100;
174 174
175 if (crtc >= adev->mode_info.num_crtc) 175 if (crtc >= adev->mode_info.num_crtc)
176 return; 176 return;
@@ -182,14 +182,16 @@ static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
182 * wait for another frame. 182 * wait for another frame.
183 */ 183 */
184 while (dce_v8_0_is_in_vblank(adev, crtc)) { 184 while (dce_v8_0_is_in_vblank(adev, crtc)) {
185 if (i++ % 100 == 0) { 185 if (i++ == 100) {
186 i = 0;
186 if (!dce_v8_0_is_counter_moving(adev, crtc)) 187 if (!dce_v8_0_is_counter_moving(adev, crtc))
187 break; 188 break;
188 } 189 }
189 } 190 }
190 191
191 while (!dce_v8_0_is_in_vblank(adev, crtc)) { 192 while (!dce_v8_0_is_in_vblank(adev, crtc)) {
192 if (i++ % 100 == 0) { 193 if (i++ == 100) {
194 i = 0;
193 if (!dce_v8_0_is_counter_moving(adev, crtc)) 195 if (!dce_v8_0_is_counter_moving(adev, crtc))
194 break; 196 break;
195 } 197 }
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 619b604ab8ae..30badd261269 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -95,7 +95,7 @@ static bool dce_virtual_is_display_hung(struct amdgpu_device *adev)
95 return false; 95 return false;
96} 96}
97 97
98void dce_virtual_stop_mc_access(struct amdgpu_device *adev, 98static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
99 struct amdgpu_mode_mc_save *save) 99 struct amdgpu_mode_mc_save *save)
100{ 100{
101 switch (adev->asic_type) { 101 switch (adev->asic_type) {
@@ -127,13 +127,13 @@ void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
127 127
128 return; 128 return;
129} 129}
130void dce_virtual_resume_mc_access(struct amdgpu_device *adev, 130static void dce_virtual_resume_mc_access(struct amdgpu_device *adev,
131 struct amdgpu_mode_mc_save *save) 131 struct amdgpu_mode_mc_save *save)
132{ 132{
133 return; 133 return;
134} 134}
135 135
136void dce_virtual_set_vga_render_state(struct amdgpu_device *adev, 136static void dce_virtual_set_vga_render_state(struct amdgpu_device *adev,
137 bool render) 137 bool render)
138{ 138{
139 return; 139 return;
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
deleted file mode 100644
index ed03b75175d4..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
+++ /dev/null
@@ -1,186 +0,0 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include "drmP.h"
26#include "amdgpu.h"
27#include "fiji_smum.h"
28
29MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
30
31static void fiji_dpm_set_funcs(struct amdgpu_device *adev);
32
33static int fiji_dpm_early_init(void *handle)
34{
35 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
36
37 fiji_dpm_set_funcs(adev);
38
39 return 0;
40}
41
42static int fiji_dpm_init_microcode(struct amdgpu_device *adev)
43{
44 char fw_name[30] = "amdgpu/fiji_smc.bin";
45 int err;
46
47 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
48 if (err)
49 goto out;
50 err = amdgpu_ucode_validate(adev->pm.fw);
51
52out:
53 if (err) {
54 DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
55 release_firmware(adev->pm.fw);
56 adev->pm.fw = NULL;
57 }
58 return err;
59}
60
61static int fiji_dpm_sw_init(void *handle)
62{
63 int ret;
64 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
65
66 ret = fiji_dpm_init_microcode(adev);
67 if (ret)
68 return ret;
69
70 return 0;
71}
72
73static int fiji_dpm_sw_fini(void *handle)
74{
75 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
76
77 release_firmware(adev->pm.fw);
78 adev->pm.fw = NULL;
79
80 return 0;
81}
82
83static int fiji_dpm_hw_init(void *handle)
84{
85 int ret;
86 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
87
88 mutex_lock(&adev->pm.mutex);
89
90 ret = fiji_smu_init(adev);
91 if (ret) {
92 DRM_ERROR("SMU initialization failed\n");
93 goto fail;
94 }
95
96 ret = fiji_smu_start(adev);
97 if (ret) {
98 DRM_ERROR("SMU start failed\n");
99 goto fail;
100 }
101
102 mutex_unlock(&adev->pm.mutex);
103 return 0;
104
105fail:
106 adev->firmware.smu_load = false;
107 mutex_unlock(&adev->pm.mutex);
108 return -EINVAL;
109}
110
111static int fiji_dpm_hw_fini(void *handle)
112{
113 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
114 mutex_lock(&adev->pm.mutex);
115 fiji_smu_fini(adev);
116 mutex_unlock(&adev->pm.mutex);
117 return 0;
118}
119
120static int fiji_dpm_suspend(void *handle)
121{
122 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
123
124 fiji_dpm_hw_fini(adev);
125
126 return 0;
127}
128
129static int fiji_dpm_resume(void *handle)
130{
131 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
132
133 fiji_dpm_hw_init(adev);
134
135 return 0;
136}
137
138static int fiji_dpm_set_clockgating_state(void *handle,
139 enum amd_clockgating_state state)
140{
141 return 0;
142}
143
144static int fiji_dpm_set_powergating_state(void *handle,
145 enum amd_powergating_state state)
146{
147 return 0;
148}
149
150const struct amd_ip_funcs fiji_dpm_ip_funcs = {
151 .name = "fiji_dpm",
152 .early_init = fiji_dpm_early_init,
153 .late_init = NULL,
154 .sw_init = fiji_dpm_sw_init,
155 .sw_fini = fiji_dpm_sw_fini,
156 .hw_init = fiji_dpm_hw_init,
157 .hw_fini = fiji_dpm_hw_fini,
158 .suspend = fiji_dpm_suspend,
159 .resume = fiji_dpm_resume,
160 .is_idle = NULL,
161 .wait_for_idle = NULL,
162 .soft_reset = NULL,
163 .set_clockgating_state = fiji_dpm_set_clockgating_state,
164 .set_powergating_state = fiji_dpm_set_powergating_state,
165};
166
167static const struct amdgpu_dpm_funcs fiji_dpm_funcs = {
168 .get_temperature = NULL,
169 .pre_set_power_state = NULL,
170 .set_power_state = NULL,
171 .post_set_power_state = NULL,
172 .display_configuration_changed = NULL,
173 .get_sclk = NULL,
174 .get_mclk = NULL,
175 .print_power_state = NULL,
176 .debugfs_print_current_performance_level = NULL,
177 .force_performance_level = NULL,
178 .vblank_too_short = NULL,
179 .powergate_uvd = NULL,
180};
181
182static void fiji_dpm_set_funcs(struct amdgpu_device *adev)
183{
184 if (NULL == adev->pm.funcs)
185 adev->pm.funcs = &fiji_dpm_funcs;
186}
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
deleted file mode 100644
index b3e19ba4c57f..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
+++ /dev/null
@@ -1,863 +0,0 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include "drmP.h"
26#include "amdgpu.h"
27#include "fiji_ppsmc.h"
28#include "fiji_smum.h"
29#include "smu_ucode_xfer_vi.h"
30#include "amdgpu_ucode.h"
31
32#include "smu/smu_7_1_3_d.h"
33#include "smu/smu_7_1_3_sh_mask.h"
34
35#define FIJI_SMC_SIZE 0x20000
36
37static int fiji_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit)
38{
39 uint32_t val;
40
41 if (smc_address & 3)
42 return -EINVAL;
43
44 if ((smc_address + 3) > limit)
45 return -EINVAL;
46
47 WREG32(mmSMC_IND_INDEX_0, smc_address);
48
49 val = RREG32(mmSMC_IND_ACCESS_CNTL);
50 val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
51 WREG32(mmSMC_IND_ACCESS_CNTL, val);
52
53 return 0;
54}
55
56static int fiji_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit)
57{
58 uint32_t addr;
59 uint32_t data, orig_data;
60 int result = 0;
61 uint32_t extra_shift;
62 unsigned long flags;
63
64 if (smc_start_address & 3)
65 return -EINVAL;
66
67 if ((smc_start_address + byte_count) > limit)
68 return -EINVAL;
69
70 addr = smc_start_address;
71
72 spin_lock_irqsave(&adev->smc_idx_lock, flags);
73 while (byte_count >= 4) {
74 /* Bytes are written into the SMC addres space with the MSB first */
75 data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
76
77 result = fiji_set_smc_sram_address(adev, addr, limit);
78
79 if (result)
80 goto out;
81
82 WREG32(mmSMC_IND_DATA_0, data);
83
84 src += 4;
85 byte_count -= 4;
86 addr += 4;
87 }
88
89 if (0 != byte_count) {
90 /* Now write odd bytes left, do a read modify write cycle */
91 data = 0;
92
93 result = fiji_set_smc_sram_address(adev, addr, limit);
94 if (result)
95 goto out;
96
97 orig_data = RREG32(mmSMC_IND_DATA_0);
98 extra_shift = 8 * (4 - byte_count);
99
100 while (byte_count > 0) {
101 data = (data << 8) + *src++;
102 byte_count--;
103 }
104
105 data <<= extra_shift;
106 data |= (orig_data & ~((~0UL) << extra_shift));
107
108 result = fiji_set_smc_sram_address(adev, addr, limit);
109 if (result)
110 goto out;
111
112 WREG32(mmSMC_IND_DATA_0, data);
113 }
114
115out:
116 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
117 return result;
118}
119
120static int fiji_program_jump_on_start(struct amdgpu_device *adev)
121{
122 static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
123 fiji_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
124
125 return 0;
126}
127
128static bool fiji_is_smc_ram_running(struct amdgpu_device *adev)
129{
130 uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
131 val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
132
133 return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
134}
135
136static int wait_smu_response(struct amdgpu_device *adev)
137{
138 int i;
139 uint32_t val;
140
141 for (i = 0; i < adev->usec_timeout; i++) {
142 val = RREG32(mmSMC_RESP_0);
143 if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
144 break;
145 udelay(1);
146 }
147
148 if (i == adev->usec_timeout)
149 return -EINVAL;
150
151 return 0;
152}
153
154static int fiji_send_msg_to_smc_offset(struct amdgpu_device *adev)
155{
156 if (wait_smu_response(adev)) {
157 DRM_ERROR("Failed to send previous message\n");
158 return -EINVAL;
159 }
160
161 WREG32(mmSMC_MSG_ARG_0, 0x20000);
162 WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test);
163
164 if (wait_smu_response(adev)) {
165 DRM_ERROR("Failed to send message\n");
166 return -EINVAL;
167 }
168
169 return 0;
170}
171
172static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
173{
174 if (!fiji_is_smc_ram_running(adev))
175 {
176 return -EINVAL;
177 }
178
179 if (wait_smu_response(adev)) {
180 DRM_ERROR("Failed to send previous message\n");
181 return -EINVAL;
182 }
183
184 WREG32(mmSMC_MESSAGE_0, msg);
185
186 if (wait_smu_response(adev)) {
187 DRM_ERROR("Failed to send message\n");
188 return -EINVAL;
189 }
190
191 return 0;
192}
193
194static int fiji_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
195 PPSMC_Msg msg)
196{
197 if (wait_smu_response(adev)) {
198 DRM_ERROR("Failed to send previous message\n");
199 return -EINVAL;
200 }
201
202 WREG32(mmSMC_MESSAGE_0, msg);
203
204 return 0;
205}
206
207static int fiji_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
208 PPSMC_Msg msg,
209 uint32_t parameter)
210{
211 if (!fiji_is_smc_ram_running(adev))
212 return -EINVAL;
213
214 if (wait_smu_response(adev)) {
215 DRM_ERROR("Failed to send previous message\n");
216 return -EINVAL;
217 }
218
219 WREG32(mmSMC_MSG_ARG_0, parameter);
220
221 return fiji_send_msg_to_smc(adev, msg);
222}
223
224static int fiji_send_msg_to_smc_with_parameter_without_waiting(
225 struct amdgpu_device *adev,
226 PPSMC_Msg msg, uint32_t parameter)
227{
228 if (wait_smu_response(adev)) {
229 DRM_ERROR("Failed to send previous message\n");
230 return -EINVAL;
231 }
232
233 WREG32(mmSMC_MSG_ARG_0, parameter);
234
235 return fiji_send_msg_to_smc_without_waiting(adev, msg);
236}
237
238#if 0 /* not used yet */
239static int fiji_wait_for_smc_inactive(struct amdgpu_device *adev)
240{
241 int i;
242 uint32_t val;
243
244 if (!fiji_is_smc_ram_running(adev))
245 return -EINVAL;
246
247 for (i = 0; i < adev->usec_timeout; i++) {
248 val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
249 if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
250 break;
251 udelay(1);
252 }
253
254 if (i == adev->usec_timeout)
255 return -EINVAL;
256
257 return 0;
258}
259#endif
260
261static int fiji_smu_upload_firmware_image(struct amdgpu_device *adev)
262{
263 const struct smc_firmware_header_v1_0 *hdr;
264 uint32_t ucode_size;
265 uint32_t ucode_start_address;
266 const uint8_t *src;
267 uint32_t val;
268 uint32_t byte_count;
269 uint32_t *data;
270 unsigned long flags;
271
272 if (!adev->pm.fw)
273 return -EINVAL;
274
275 /* Skip SMC ucode loading on SR-IOV capable boards.
276 * vbios does this for us in asic_init in that case.
277 */
278 if (adev->virtualization.supports_sr_iov)
279 return 0;
280
281 hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
282 amdgpu_ucode_print_smc_hdr(&hdr->header);
283
284 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
285 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
286 ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
287 src = (const uint8_t *)
288 (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
289
290 if (ucode_size & 3) {
291 DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
292 return -EINVAL;
293 }
294
295 if (ucode_size > FIJI_SMC_SIZE) {
296 DRM_ERROR("SMC address is beyond the SMC RAM area\n");
297 return -EINVAL;
298 }
299
300 spin_lock_irqsave(&adev->smc_idx_lock, flags);
301 WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
302
303 val = RREG32(mmSMC_IND_ACCESS_CNTL);
304 val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
305 WREG32(mmSMC_IND_ACCESS_CNTL, val);
306
307 byte_count = ucode_size;
308 data = (uint32_t *)src;
309 for (; byte_count >= 4; data++, byte_count -= 4)
310 WREG32(mmSMC_IND_DATA_0, data[0]);
311
312 val = RREG32(mmSMC_IND_ACCESS_CNTL);
313 val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
314 WREG32(mmSMC_IND_ACCESS_CNTL, val);
315 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
316
317 return 0;
318}
319
320#if 0 /* not used yet */
321static int fiji_read_smc_sram_dword(struct amdgpu_device *adev,
322 uint32_t smc_address,
323 uint32_t *value,
324 uint32_t limit)
325{
326 int result;
327 unsigned long flags;
328
329 spin_lock_irqsave(&adev->smc_idx_lock, flags);
330 result = fiji_set_smc_sram_address(adev, smc_address, limit);
331 if (result == 0)
332 *value = RREG32(mmSMC_IND_DATA_0);
333 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
334 return result;
335}
336
337static int fiji_write_smc_sram_dword(struct amdgpu_device *adev,
338 uint32_t smc_address,
339 uint32_t value,
340 uint32_t limit)
341{
342 int result;
343 unsigned long flags;
344
345 spin_lock_irqsave(&adev->smc_idx_lock, flags);
346 result = fiji_set_smc_sram_address(adev, smc_address, limit);
347 if (result == 0)
348 WREG32(mmSMC_IND_DATA_0, value);
349 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
350 return result;
351}
352
353static int fiji_smu_stop_smc(struct amdgpu_device *adev)
354{
355 uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
356 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
357 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
358
359 val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
360 val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
361 WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
362
363 return 0;
364}
365#endif
366
367static enum AMDGPU_UCODE_ID fiji_convert_fw_type(uint32_t fw_type)
368{
369 switch (fw_type) {
370 case UCODE_ID_SDMA0:
371 return AMDGPU_UCODE_ID_SDMA0;
372 case UCODE_ID_SDMA1:
373 return AMDGPU_UCODE_ID_SDMA1;
374 case UCODE_ID_CP_CE:
375 return AMDGPU_UCODE_ID_CP_CE;
376 case UCODE_ID_CP_PFP:
377 return AMDGPU_UCODE_ID_CP_PFP;
378 case UCODE_ID_CP_ME:
379 return AMDGPU_UCODE_ID_CP_ME;
380 case UCODE_ID_CP_MEC:
381 case UCODE_ID_CP_MEC_JT1:
382 case UCODE_ID_CP_MEC_JT2:
383 return AMDGPU_UCODE_ID_CP_MEC1;
384 case UCODE_ID_RLC_G:
385 return AMDGPU_UCODE_ID_RLC_G;
386 default:
387 DRM_ERROR("ucode type is out of range!\n");
388 return AMDGPU_UCODE_ID_MAXIMUM;
389 }
390}
391
392static int fiji_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
393 uint32_t fw_type,
394 struct SMU_Entry *entry)
395{
396 enum AMDGPU_UCODE_ID id = fiji_convert_fw_type(fw_type);
397 struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
398 const struct gfx_firmware_header_v1_0 *header = NULL;
399 uint64_t gpu_addr;
400 uint32_t data_size;
401
402 if (ucode->fw == NULL)
403 return -EINVAL;
404 gpu_addr = ucode->mc_addr;
405 header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
406 data_size = le32_to_cpu(header->header.ucode_size_bytes);
407
408 if ((fw_type == UCODE_ID_CP_MEC_JT1) ||
409 (fw_type == UCODE_ID_CP_MEC_JT2)) {
410 gpu_addr += le32_to_cpu(header->jt_offset) << 2;
411 data_size = le32_to_cpu(header->jt_size) << 2;
412 }
413
414 entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
415 entry->id = (uint16_t)fw_type;
416 entry->image_addr_high = upper_32_bits(gpu_addr);
417 entry->image_addr_low = lower_32_bits(gpu_addr);
418 entry->meta_data_addr_high = 0;
419 entry->meta_data_addr_low = 0;
420 entry->data_size_byte = data_size;
421 entry->num_register_entries = 0;
422
423 if (fw_type == UCODE_ID_RLC_G)
424 entry->flags = 1;
425 else
426 entry->flags = 0;
427
428 return 0;
429}
430
431static int fiji_smu_request_load_fw(struct amdgpu_device *adev)
432{
433 struct fiji_smu_private_data *private = (struct fiji_smu_private_data *)adev->smu.priv;
434 struct SMU_DRAMData_TOC *toc;
435 uint32_t fw_to_load;
436
437 WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0);
438
439 fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high);
440 fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low);
441
442 toc = (struct SMU_DRAMData_TOC *)private->header;
443 toc->num_entries = 0;
444 toc->structure_version = 1;
445
446 if (!adev->firmware.smu_load)
447 return 0;
448
449 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
450 &toc->entry[toc->num_entries++])) {
451 DRM_ERROR("Failed to get firmware entry for RLC\n");
452 return -EINVAL;
453 }
454
455 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
456 &toc->entry[toc->num_entries++])) {
457 DRM_ERROR("Failed to get firmware entry for CE\n");
458 return -EINVAL;
459 }
460
461 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
462 &toc->entry[toc->num_entries++])) {
463 DRM_ERROR("Failed to get firmware entry for PFP\n");
464 return -EINVAL;
465 }
466
467 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
468 &toc->entry[toc->num_entries++])) {
469 DRM_ERROR("Failed to get firmware entry for ME\n");
470 return -EINVAL;
471 }
472
473 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
474 &toc->entry[toc->num_entries++])) {
475 DRM_ERROR("Failed to get firmware entry for MEC\n");
476 return -EINVAL;
477 }
478
479 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
480 &toc->entry[toc->num_entries++])) {
481 DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
482 return -EINVAL;
483 }
484
485 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
486 &toc->entry[toc->num_entries++])) {
487 DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
488 return -EINVAL;
489 }
490
491 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
492 &toc->entry[toc->num_entries++])) {
493 DRM_ERROR("Failed to get firmware entry for SDMA0\n");
494 return -EINVAL;
495 }
496
497 if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
498 &toc->entry[toc->num_entries++])) {
499 DRM_ERROR("Failed to get firmware entry for SDMA1\n");
500 return -EINVAL;
501 }
502
503 fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
504 fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
505
506 fw_to_load = UCODE_ID_RLC_G_MASK |
507 UCODE_ID_SDMA0_MASK |
508 UCODE_ID_SDMA1_MASK |
509 UCODE_ID_CP_CE_MASK |
510 UCODE_ID_CP_ME_MASK |
511 UCODE_ID_CP_PFP_MASK |
512 UCODE_ID_CP_MEC_MASK;
513
514 if (fiji_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
515 DRM_ERROR("Fail to request SMU load ucode\n");
516 return -EINVAL;
517 }
518
519 return 0;
520}
521
522static uint32_t fiji_smu_get_mask_for_fw_type(uint32_t fw_type)
523{
524 switch (fw_type) {
525 case AMDGPU_UCODE_ID_SDMA0:
526 return UCODE_ID_SDMA0_MASK;
527 case AMDGPU_UCODE_ID_SDMA1:
528 return UCODE_ID_SDMA1_MASK;
529 case AMDGPU_UCODE_ID_CP_CE:
530 return UCODE_ID_CP_CE_MASK;
531 case AMDGPU_UCODE_ID_CP_PFP:
532 return UCODE_ID_CP_PFP_MASK;
533 case AMDGPU_UCODE_ID_CP_ME:
534 return UCODE_ID_CP_ME_MASK;
535 case AMDGPU_UCODE_ID_CP_MEC1:
536 return UCODE_ID_CP_MEC_MASK;
537 case AMDGPU_UCODE_ID_CP_MEC2:
538 return UCODE_ID_CP_MEC_MASK;
539 case AMDGPU_UCODE_ID_RLC_G:
540 return UCODE_ID_RLC_G_MASK;
541 default:
542 DRM_ERROR("ucode type is out of range!\n");
543 return 0;
544 }
545}
546
547static int fiji_smu_check_fw_load_finish(struct amdgpu_device *adev,
548 uint32_t fw_type)
549{
550 uint32_t fw_mask = fiji_smu_get_mask_for_fw_type(fw_type);
551 int i;
552
553 for (i = 0; i < adev->usec_timeout; i++) {
554 if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask))
555 break;
556 udelay(1);
557 }
558
559 if (i == adev->usec_timeout) {
560 DRM_ERROR("check firmware loading failed\n");
561 return -EINVAL;
562 }
563
564 return 0;
565}
566
567static int fiji_smu_start_in_protection_mode(struct amdgpu_device *adev)
568{
569 int result;
570 uint32_t val;
571 int i;
572
573 /* Assert reset */
574 val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
575 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
576 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
577
578 result = fiji_smu_upload_firmware_image(adev);
579 if (result)
580 return result;
581
582 /* Clear status */
583 WREG32_SMC(ixSMU_STATUS, 0);
584
585 /* Enable clock */
586 val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
587 val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
588 WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
589
590 /* De-assert reset */
591 val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
592 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
593 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
594
595 /* Set SMU Auto Start */
596 val = RREG32_SMC(ixSMU_INPUT_DATA);
597 val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1);
598 WREG32_SMC(ixSMU_INPUT_DATA, val);
599
600 /* Clear firmware interrupt enable flag */
601 WREG32_SMC(ixFIRMWARE_FLAGS, 0);
602
603 for (i = 0; i < adev->usec_timeout; i++) {
604 val = RREG32_SMC(ixRCU_UC_EVENTS);
605 if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED))
606 break;
607 udelay(1);
608 }
609
610 if (i == adev->usec_timeout) {
611 DRM_ERROR("Interrupt is not enabled by firmware\n");
612 return -EINVAL;
613 }
614
615 /* Call Test SMU message with 0x20000 offset
616 * to trigger SMU start
617 */
618 fiji_send_msg_to_smc_offset(adev);
619 DRM_INFO("[FM]try triger smu start\n");
620 /* Wait for done bit to be set */
621 for (i = 0; i < adev->usec_timeout; i++) {
622 val = RREG32_SMC(ixSMU_STATUS);
623 if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE))
624 break;
625 udelay(1);
626 }
627
628 if (i == adev->usec_timeout) {
629 DRM_ERROR("Timeout for SMU start\n");
630 return -EINVAL;
631 }
632
633 /* Check pass/failed indicator */
634 val = RREG32_SMC(ixSMU_STATUS);
635 if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) {
636 DRM_ERROR("SMU Firmware start failed\n");
637 return -EINVAL;
638 }
639 DRM_INFO("[FM]smu started\n");
640 /* Wait for firmware to initialize */
641 for (i = 0; i < adev->usec_timeout; i++) {
642 val = RREG32_SMC(ixFIRMWARE_FLAGS);
643 if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
644 break;
645 udelay(1);
646 }
647
648 if (i == adev->usec_timeout) {
649 DRM_ERROR("SMU firmware initialization failed\n");
650 return -EINVAL;
651 }
652 DRM_INFO("[FM]smu initialized\n");
653
654 return 0;
655}
656
657static int fiji_smu_start_in_non_protection_mode(struct amdgpu_device *adev)
658{
659 int i, result;
660 uint32_t val;
661
662 /* wait for smc boot up */
663 for (i = 0; i < adev->usec_timeout; i++) {
664 val = RREG32_SMC(ixRCU_UC_EVENTS);
665 val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done);
666 if (val)
667 break;
668 udelay(1);
669 }
670
671 if (i == adev->usec_timeout) {
672 DRM_ERROR("SMC boot sequence is not completed\n");
673 return -EINVAL;
674 }
675
676 /* Clear firmware interrupt enable flag */
677 WREG32_SMC(ixFIRMWARE_FLAGS, 0);
678
679 /* Assert reset */
680 val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
681 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
682 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
683
684 result = fiji_smu_upload_firmware_image(adev);
685 if (result)
686 return result;
687
688 /* Set smc instruct start point at 0x0 */
689 fiji_program_jump_on_start(adev);
690
691 /* Enable clock */
692 val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
693 val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
694 WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
695
696 /* De-assert reset */
697 val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
698 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
699 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
700
701 /* Wait for firmware to initialize */
702 for (i = 0; i < adev->usec_timeout; i++) {
703 val = RREG32_SMC(ixFIRMWARE_FLAGS);
704 if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
705 break;
706 udelay(1);
707 }
708
709 if (i == adev->usec_timeout) {
710 DRM_ERROR("Timeout for SMC firmware initialization\n");
711 return -EINVAL;
712 }
713
714 return 0;
715}
716
717int fiji_smu_start(struct amdgpu_device *adev)
718{
719 int result;
720 uint32_t val;
721
722 if (!fiji_is_smc_ram_running(adev)) {
723 val = RREG32_SMC(ixSMU_FIRMWARE);
724 if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) {
725 DRM_INFO("[FM]start smu in nonprotection mode\n");
726 result = fiji_smu_start_in_non_protection_mode(adev);
727 if (result)
728 return result;
729 } else {
730 DRM_INFO("[FM]start smu in protection mode\n");
731 result = fiji_smu_start_in_protection_mode(adev);
732 if (result)
733 return result;
734 }
735 }
736
737 return fiji_smu_request_load_fw(adev);
738}
739
740static const struct amdgpu_smumgr_funcs fiji_smumgr_funcs = {
741 .check_fw_load_finish = fiji_smu_check_fw_load_finish,
742 .request_smu_load_fw = NULL,
743 .request_smu_specific_fw = NULL,
744};
745
746int fiji_smu_init(struct amdgpu_device *adev)
747{
748 struct fiji_smu_private_data *private;
749 uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
750 uint32_t smu_internal_buffer_size = 200*4096;
751 struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
752 struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
753 uint64_t mc_addr;
754 void *toc_buf_ptr;
755 void *smu_buf_ptr;
756 int ret;
757
758 private = kzalloc(sizeof(struct fiji_smu_private_data), GFP_KERNEL);
759 if (NULL == private)
760 return -ENOMEM;
761
762 /* allocate firmware buffers */
763 if (adev->firmware.smu_load)
764 amdgpu_ucode_init_bo(adev);
765
766 adev->smu.priv = private;
767 adev->smu.fw_flags = 0;
768
769 /* Allocate FW image data structure and header buffer */
770 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
771 true, AMDGPU_GEM_DOMAIN_VRAM,
772 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
773 NULL, NULL, toc_buf);
774 if (ret) {
775 DRM_ERROR("Failed to allocate memory for TOC buffer\n");
776 return -ENOMEM;
777 }
778
779 /* Allocate buffer for SMU internal buffer */
780 ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
781 true, AMDGPU_GEM_DOMAIN_VRAM,
782 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
783 NULL, NULL, smu_buf);
784 if (ret) {
785 DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
786 return -ENOMEM;
787 }
788
789 /* Retrieve GPU address for header buffer and internal buffer */
790 ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
791 if (ret) {
792 amdgpu_bo_unref(&adev->smu.toc_buf);
793 DRM_ERROR("Failed to reserve the TOC buffer\n");
794 return -EINVAL;
795 }
796
797 ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
798 if (ret) {
799 amdgpu_bo_unreserve(adev->smu.toc_buf);
800 amdgpu_bo_unref(&adev->smu.toc_buf);
801 DRM_ERROR("Failed to pin the TOC buffer\n");
802 return -EINVAL;
803 }
804
805 ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
806 if (ret) {
807 amdgpu_bo_unreserve(adev->smu.toc_buf);
808 amdgpu_bo_unref(&adev->smu.toc_buf);
809 DRM_ERROR("Failed to map the TOC buffer\n");
810 return -EINVAL;
811 }
812
813 amdgpu_bo_unreserve(adev->smu.toc_buf);
814 private->header_addr_low = lower_32_bits(mc_addr);
815 private->header_addr_high = upper_32_bits(mc_addr);
816 private->header = toc_buf_ptr;
817
818 ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
819 if (ret) {
820 amdgpu_bo_unref(&adev->smu.smu_buf);
821 amdgpu_bo_unref(&adev->smu.toc_buf);
822 DRM_ERROR("Failed to reserve the SMU internal buffer\n");
823 return -EINVAL;
824 }
825
826 ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
827 if (ret) {
828 amdgpu_bo_unreserve(adev->smu.smu_buf);
829 amdgpu_bo_unref(&adev->smu.smu_buf);
830 amdgpu_bo_unref(&adev->smu.toc_buf);
831 DRM_ERROR("Failed to pin the SMU internal buffer\n");
832 return -EINVAL;
833 }
834
835 ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
836 if (ret) {
837 amdgpu_bo_unreserve(adev->smu.smu_buf);
838 amdgpu_bo_unref(&adev->smu.smu_buf);
839 amdgpu_bo_unref(&adev->smu.toc_buf);
840 DRM_ERROR("Failed to map the SMU internal buffer\n");
841 return -EINVAL;
842 }
843
844 amdgpu_bo_unreserve(adev->smu.smu_buf);
845 private->smu_buffer_addr_low = lower_32_bits(mc_addr);
846 private->smu_buffer_addr_high = upper_32_bits(mc_addr);
847
848 adev->smu.smumgr_funcs = &fiji_smumgr_funcs;
849
850 return 0;
851}
852
853int fiji_smu_fini(struct amdgpu_device *adev)
854{
855 amdgpu_bo_unref(&adev->smu.toc_buf);
856 amdgpu_bo_unref(&adev->smu.smu_buf);
857 kfree(adev->smu.priv);
858 adev->smu.priv = NULL;
859 if (adev->firmware.fw_buf)
860 amdgpu_ucode_fini_bo(adev);
861
862 return 0;
863}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 410b29c05671..40abb6b81c09 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -931,6 +931,123 @@ static u32 gfx_v6_0_get_rb_disabled(struct amdgpu_device *adev,
931 return data & mask; 931 return data & mask;
932} 932}
933 933
934static void gfx_v6_0_raster_config(struct amdgpu_device *adev, u32 *rconf)
935{
936 switch (adev->asic_type) {
937 case CHIP_TAHITI:
938 case CHIP_PITCAIRN:
939 *rconf |= RB_XSEL2(2) | RB_XSEL | PKR_MAP(2) | PKR_YSEL(1) |
940 SE_MAP(2) | SE_XSEL(2) | SE_YSEL(2);
941 break;
942 case CHIP_VERDE:
943 *rconf |= RB_XSEL | PKR_MAP(2) | PKR_YSEL(1);
944 break;
945 case CHIP_OLAND:
946 *rconf |= RB_YSEL;
947 break;
948 case CHIP_HAINAN:
949 *rconf |= 0x0;
950 break;
951 default:
952 DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
953 break;
954 }
955}
956
957static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
958 u32 raster_config, unsigned rb_mask,
959 unsigned num_rb)
960{
961 unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
962 unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
963 unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
964 unsigned rb_per_se = num_rb / num_se;
965 unsigned se_mask[4];
966 unsigned se;
967
968 se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
969 se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
970 se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
971 se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
972
973 WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
974 WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
975 WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
976
977 for (se = 0; se < num_se; se++) {
978 unsigned raster_config_se = raster_config;
979 unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
980 unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
981 int idx = (se / 2) * 2;
982
983 if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
984 raster_config_se &= ~SE_MAP_MASK;
985
986 if (!se_mask[idx]) {
987 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
988 } else {
989 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
990 }
991 }
992
993 pkr0_mask &= rb_mask;
994 pkr1_mask &= rb_mask;
995 if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
996 raster_config_se &= ~PKR_MAP_MASK;
997
998 if (!pkr0_mask) {
999 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
1000 } else {
1001 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
1002 }
1003 }
1004
1005 if (rb_per_se >= 2) {
1006 unsigned rb0_mask = 1 << (se * rb_per_se);
1007 unsigned rb1_mask = rb0_mask << 1;
1008
1009 rb0_mask &= rb_mask;
1010 rb1_mask &= rb_mask;
1011 if (!rb0_mask || !rb1_mask) {
1012 raster_config_se &= ~RB_MAP_PKR0_MASK;
1013
1014 if (!rb0_mask) {
1015 raster_config_se |=
1016 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
1017 } else {
1018 raster_config_se |=
1019 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
1020 }
1021 }
1022
1023 if (rb_per_se > 2) {
1024 rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
1025 rb1_mask = rb0_mask << 1;
1026 rb0_mask &= rb_mask;
1027 rb1_mask &= rb_mask;
1028 if (!rb0_mask || !rb1_mask) {
1029 raster_config_se &= ~RB_MAP_PKR1_MASK;
1030
1031 if (!rb0_mask) {
1032 raster_config_se |=
1033 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
1034 } else {
1035 raster_config_se |=
1036 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
1037 }
1038 }
1039 }
1040 }
1041
1042 /* GRBM_GFX_INDEX has a different offset on SI */
1043 gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
1044 WREG32(PA_SC_RASTER_CONFIG, raster_config_se);
1045 }
1046
1047 /* GRBM_GFX_INDEX has a different offset on SI */
1048 gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1049}
1050
934static void gfx_v6_0_setup_rb(struct amdgpu_device *adev, 1051static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
935 u32 se_num, u32 sh_per_se, 1052 u32 se_num, u32 sh_per_se,
936 u32 max_rb_num_per_se) 1053 u32 max_rb_num_per_se)
@@ -939,6 +1056,7 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
939 u32 data, mask; 1056 u32 data, mask;
940 u32 disabled_rbs = 0; 1057 u32 disabled_rbs = 0;
941 u32 enabled_rbs = 0; 1058 u32 enabled_rbs = 0;
1059 unsigned num_rb_pipes;
942 1060
943 mutex_lock(&adev->grbm_idx_mutex); 1061 mutex_lock(&adev->grbm_idx_mutex);
944 for (i = 0; i < se_num; i++) { 1062 for (i = 0; i < se_num; i++) {
@@ -961,6 +1079,9 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
961 adev->gfx.config.backend_enable_mask = enabled_rbs; 1079 adev->gfx.config.backend_enable_mask = enabled_rbs;
962 adev->gfx.config.num_rbs = hweight32(enabled_rbs); 1080 adev->gfx.config.num_rbs = hweight32(enabled_rbs);
963 1081
1082 num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
1083 adev->gfx.config.max_shader_engines, 16);
1084
964 mutex_lock(&adev->grbm_idx_mutex); 1085 mutex_lock(&adev->grbm_idx_mutex);
965 for (i = 0; i < se_num; i++) { 1086 for (i = 0; i < se_num; i++) {
966 gfx_v6_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff); 1087 gfx_v6_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff);
@@ -980,7 +1101,15 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
980 } 1101 }
981 enabled_rbs >>= 2; 1102 enabled_rbs >>= 2;
982 } 1103 }
983 WREG32(PA_SC_RASTER_CONFIG, data); 1104 gfx_v6_0_raster_config(adev, &data);
1105
1106 if (!adev->gfx.config.backend_enable_mask ||
1107 adev->gfx.config.num_rbs >= num_rb_pipes)
1108 WREG32(PA_SC_RASTER_CONFIG, data);
1109 else
1110 gfx_v6_0_write_harvested_raster_configs(adev, data,
1111 adev->gfx.config.backend_enable_mask,
1112 num_rb_pipes);
984 } 1113 }
985 gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 1114 gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
986 mutex_unlock(&adev->grbm_idx_mutex); 1115 mutex_unlock(&adev->grbm_idx_mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 90102f123bb8..32a676291e67 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1645,6 +1645,147 @@ static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1645 return (~data) & mask; 1645 return (~data) & mask;
1646} 1646}
1647 1647
1648static void
1649gfx_v7_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
1650{
1651 switch (adev->asic_type) {
1652 case CHIP_BONAIRE:
1653 *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
1654 SE_XSEL(1) | SE_YSEL(1);
1655 *rconf1 |= 0x0;
1656 break;
1657 case CHIP_HAWAII:
1658 *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
1659 RB_XSEL2(1) | PKR_MAP(2) | PKR_XSEL(1) |
1660 PKR_YSEL(1) | SE_MAP(2) | SE_XSEL(2) |
1661 SE_YSEL(3);
1662 *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
1663 SE_PAIR_YSEL(2);
1664 break;
1665 case CHIP_KAVERI:
1666 *rconf |= RB_MAP_PKR0(2);
1667 *rconf1 |= 0x0;
1668 break;
1669 case CHIP_KABINI:
1670 case CHIP_MULLINS:
1671 *rconf |= 0x0;
1672 *rconf1 |= 0x0;
1673 break;
1674 default:
1675 DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
1676 break;
1677 }
1678}
1679
1680static void
1681gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev,
1682 u32 raster_config, u32 raster_config_1,
1683 unsigned rb_mask, unsigned num_rb)
1684{
1685 unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
1686 unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
1687 unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
1688 unsigned rb_per_se = num_rb / num_se;
1689 unsigned se_mask[4];
1690 unsigned se;
1691
1692 se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
1693 se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
1694 se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
1695 se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
1696
1697 WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
1698 WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
1699 WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
1700
1701 if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
1702 (!se_mask[2] && !se_mask[3]))) {
1703 raster_config_1 &= ~SE_PAIR_MAP_MASK;
1704
1705 if (!se_mask[0] && !se_mask[1]) {
1706 raster_config_1 |=
1707 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
1708 } else {
1709 raster_config_1 |=
1710 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
1711 }
1712 }
1713
1714 for (se = 0; se < num_se; se++) {
1715 unsigned raster_config_se = raster_config;
1716 unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
1717 unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
1718 int idx = (se / 2) * 2;
1719
1720 if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
1721 raster_config_se &= ~SE_MAP_MASK;
1722
1723 if (!se_mask[idx]) {
1724 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
1725 } else {
1726 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
1727 }
1728 }
1729
1730 pkr0_mask &= rb_mask;
1731 pkr1_mask &= rb_mask;
1732 if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
1733 raster_config_se &= ~PKR_MAP_MASK;
1734
1735 if (!pkr0_mask) {
1736 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
1737 } else {
1738 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
1739 }
1740 }
1741
1742 if (rb_per_se >= 2) {
1743 unsigned rb0_mask = 1 << (se * rb_per_se);
1744 unsigned rb1_mask = rb0_mask << 1;
1745
1746 rb0_mask &= rb_mask;
1747 rb1_mask &= rb_mask;
1748 if (!rb0_mask || !rb1_mask) {
1749 raster_config_se &= ~RB_MAP_PKR0_MASK;
1750
1751 if (!rb0_mask) {
1752 raster_config_se |=
1753 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
1754 } else {
1755 raster_config_se |=
1756 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
1757 }
1758 }
1759
1760 if (rb_per_se > 2) {
1761 rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
1762 rb1_mask = rb0_mask << 1;
1763 rb0_mask &= rb_mask;
1764 rb1_mask &= rb_mask;
1765 if (!rb0_mask || !rb1_mask) {
1766 raster_config_se &= ~RB_MAP_PKR1_MASK;
1767
1768 if (!rb0_mask) {
1769 raster_config_se |=
1770 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
1771 } else {
1772 raster_config_se |=
1773 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
1774 }
1775 }
1776 }
1777 }
1778
1779 /* GRBM_GFX_INDEX has a different offset on CI+ */
1780 gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
1781 WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
1782 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
1783 }
1784
1785 /* GRBM_GFX_INDEX has a different offset on CI+ */
1786 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1787}
1788
1648/** 1789/**
1649 * gfx_v7_0_setup_rb - setup the RBs on the asic 1790 * gfx_v7_0_setup_rb - setup the RBs on the asic
1650 * 1791 *
@@ -1658,9 +1799,11 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
1658{ 1799{
1659 int i, j; 1800 int i, j;
1660 u32 data; 1801 u32 data;
1802 u32 raster_config = 0, raster_config_1 = 0;
1661 u32 active_rbs = 0; 1803 u32 active_rbs = 0;
1662 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / 1804 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1663 adev->gfx.config.max_sh_per_se; 1805 adev->gfx.config.max_sh_per_se;
1806 unsigned num_rb_pipes;
1664 1807
1665 mutex_lock(&adev->grbm_idx_mutex); 1808 mutex_lock(&adev->grbm_idx_mutex);
1666 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 1809 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
@@ -1672,10 +1815,25 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
1672 } 1815 }
1673 } 1816 }
1674 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 1817 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1675 mutex_unlock(&adev->grbm_idx_mutex);
1676 1818
1677 adev->gfx.config.backend_enable_mask = active_rbs; 1819 adev->gfx.config.backend_enable_mask = active_rbs;
1678 adev->gfx.config.num_rbs = hweight32(active_rbs); 1820 adev->gfx.config.num_rbs = hweight32(active_rbs);
1821
1822 num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
1823 adev->gfx.config.max_shader_engines, 16);
1824
1825 gfx_v7_0_raster_config(adev, &raster_config, &raster_config_1);
1826
1827 if (!adev->gfx.config.backend_enable_mask ||
1828 adev->gfx.config.num_rbs >= num_rb_pipes) {
1829 WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
1830 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
1831 } else {
1832 gfx_v7_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
1833 adev->gfx.config.backend_enable_mask,
1834 num_rb_pipes);
1835 }
1836 mutex_unlock(&adev->grbm_idx_mutex);
1679} 1837}
1680 1838
1681/** 1839/**
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 47e270ad4fe3..6c6ff57b1c95 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -3492,13 +3492,163 @@ static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
3492 return (~data) & mask; 3492 return (~data) & mask;
3493} 3493}
3494 3494
3495static void
3496gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
3497{
3498 switch (adev->asic_type) {
3499 case CHIP_FIJI:
3500 *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
3501 RB_XSEL2(1) | PKR_MAP(2) |
3502 PKR_XSEL(1) | PKR_YSEL(1) |
3503 SE_MAP(2) | SE_XSEL(2) | SE_YSEL(3);
3504 *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
3505 SE_PAIR_YSEL(2);
3506 break;
3507 case CHIP_TONGA:
3508 case CHIP_POLARIS10:
3509 *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
3510 SE_XSEL(1) | SE_YSEL(1);
3511 *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(2) |
3512 SE_PAIR_YSEL(2);
3513 break;
3514 case CHIP_TOPAZ:
3515 case CHIP_CARRIZO:
3516 *rconf |= RB_MAP_PKR0(2);
3517 *rconf1 |= 0x0;
3518 break;
3519 case CHIP_POLARIS11:
3520 *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
3521 SE_XSEL(1) | SE_YSEL(1);
3522 *rconf1 |= 0x0;
3523 break;
3524 case CHIP_STONEY:
3525 *rconf |= 0x0;
3526 *rconf1 |= 0x0;
3527 break;
3528 default:
3529 DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
3530 break;
3531 }
3532}
3533
3534static void
3535gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev,
3536 u32 raster_config, u32 raster_config_1,
3537 unsigned rb_mask, unsigned num_rb)
3538{
3539 unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
3540 unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
3541 unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
3542 unsigned rb_per_se = num_rb / num_se;
3543 unsigned se_mask[4];
3544 unsigned se;
3545
3546 se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
3547 se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
3548 se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
3549 se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
3550
3551 WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
3552 WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
3553 WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
3554
3555 if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
3556 (!se_mask[2] && !se_mask[3]))) {
3557 raster_config_1 &= ~SE_PAIR_MAP_MASK;
3558
3559 if (!se_mask[0] && !se_mask[1]) {
3560 raster_config_1 |=
3561 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
3562 } else {
3563 raster_config_1 |=
3564 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
3565 }
3566 }
3567
3568 for (se = 0; se < num_se; se++) {
3569 unsigned raster_config_se = raster_config;
3570 unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
3571 unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
3572 int idx = (se / 2) * 2;
3573
3574 if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
3575 raster_config_se &= ~SE_MAP_MASK;
3576
3577 if (!se_mask[idx]) {
3578 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
3579 } else {
3580 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
3581 }
3582 }
3583
3584 pkr0_mask &= rb_mask;
3585 pkr1_mask &= rb_mask;
3586 if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
3587 raster_config_se &= ~PKR_MAP_MASK;
3588
3589 if (!pkr0_mask) {
3590 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
3591 } else {
3592 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
3593 }
3594 }
3595
3596 if (rb_per_se >= 2) {
3597 unsigned rb0_mask = 1 << (se * rb_per_se);
3598 unsigned rb1_mask = rb0_mask << 1;
3599
3600 rb0_mask &= rb_mask;
3601 rb1_mask &= rb_mask;
3602 if (!rb0_mask || !rb1_mask) {
3603 raster_config_se &= ~RB_MAP_PKR0_MASK;
3604
3605 if (!rb0_mask) {
3606 raster_config_se |=
3607 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
3608 } else {
3609 raster_config_se |=
3610 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
3611 }
3612 }
3613
3614 if (rb_per_se > 2) {
3615 rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
3616 rb1_mask = rb0_mask << 1;
3617 rb0_mask &= rb_mask;
3618 rb1_mask &= rb_mask;
3619 if (!rb0_mask || !rb1_mask) {
3620 raster_config_se &= ~RB_MAP_PKR1_MASK;
3621
3622 if (!rb0_mask) {
3623 raster_config_se |=
3624 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
3625 } else {
3626 raster_config_se |=
3627 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
3628 }
3629 }
3630 }
3631 }
3632
3633 /* GRBM_GFX_INDEX has a different offset on VI */
3634 gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
3635 WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
3636 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
3637 }
3638
3639 /* GRBM_GFX_INDEX has a different offset on VI */
3640 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3641}
3642
3495static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) 3643static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
3496{ 3644{
3497 int i, j; 3645 int i, j;
3498 u32 data; 3646 u32 data;
3647 u32 raster_config = 0, raster_config_1 = 0;
3499 u32 active_rbs = 0; 3648 u32 active_rbs = 0;
3500 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / 3649 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
3501 adev->gfx.config.max_sh_per_se; 3650 adev->gfx.config.max_sh_per_se;
3651 unsigned num_rb_pipes;
3502 3652
3503 mutex_lock(&adev->grbm_idx_mutex); 3653 mutex_lock(&adev->grbm_idx_mutex);
3504 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 3654 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
@@ -3510,10 +3660,26 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
3510 } 3660 }
3511 } 3661 }
3512 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 3662 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3513 mutex_unlock(&adev->grbm_idx_mutex);
3514 3663
3515 adev->gfx.config.backend_enable_mask = active_rbs; 3664 adev->gfx.config.backend_enable_mask = active_rbs;
3516 adev->gfx.config.num_rbs = hweight32(active_rbs); 3665 adev->gfx.config.num_rbs = hweight32(active_rbs);
3666
3667 num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
3668 adev->gfx.config.max_shader_engines, 16);
3669
3670 gfx_v8_0_raster_config(adev, &raster_config, &raster_config_1);
3671
3672 if (!adev->gfx.config.backend_enable_mask ||
3673 adev->gfx.config.num_rbs >= num_rb_pipes) {
3674 WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
3675 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
3676 } else {
3677 gfx_v8_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
3678 adev->gfx.config.backend_enable_mask,
3679 num_rb_pipes);
3680 }
3681
3682 mutex_unlock(&adev->grbm_idx_mutex);
3517} 3683}
3518 3684
3519/** 3685/**
@@ -5817,6 +5983,76 @@ static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
5817 return 0; 5983 return 0;
5818} 5984}
5819 5985
5986static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
5987 enum amd_clockgating_state state)
5988{
5989 uint32_t msg_id, pp_state;
5990 void *pp_handle = adev->powerplay.pp_handle;
5991
5992 if (state == AMD_CG_STATE_UNGATE)
5993 pp_state = 0;
5994 else
5995 pp_state = PP_STATE_CG | PP_STATE_LS;
5996
5997 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5998 PP_BLOCK_GFX_CG,
5999 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
6000 pp_state);
6001 amd_set_clockgating_by_smu(pp_handle, msg_id);
6002
6003 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
6004 PP_BLOCK_GFX_MG,
6005 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
6006 pp_state);
6007 amd_set_clockgating_by_smu(pp_handle, msg_id);
6008
6009 return 0;
6010}
6011
6012static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
6013 enum amd_clockgating_state state)
6014{
6015 uint32_t msg_id, pp_state;
6016 void *pp_handle = adev->powerplay.pp_handle;
6017
6018 if (state == AMD_CG_STATE_UNGATE)
6019 pp_state = 0;
6020 else
6021 pp_state = PP_STATE_CG | PP_STATE_LS;
6022
6023 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
6024 PP_BLOCK_GFX_CG,
6025 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
6026 pp_state);
6027 amd_set_clockgating_by_smu(pp_handle, msg_id);
6028
6029 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
6030 PP_BLOCK_GFX_3D,
6031 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
6032 pp_state);
6033 amd_set_clockgating_by_smu(pp_handle, msg_id);
6034
6035 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
6036 PP_BLOCK_GFX_MG,
6037 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
6038 pp_state);
6039 amd_set_clockgating_by_smu(pp_handle, msg_id);
6040
6041 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
6042 PP_BLOCK_GFX_RLC,
6043 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
6044 pp_state);
6045 amd_set_clockgating_by_smu(pp_handle, msg_id);
6046
6047 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
6048 PP_BLOCK_GFX_CP,
6049 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
6050 pp_state);
6051 amd_set_clockgating_by_smu(pp_handle, msg_id);
6052
6053 return 0;
6054}
6055
5820static int gfx_v8_0_set_clockgating_state(void *handle, 6056static int gfx_v8_0_set_clockgating_state(void *handle,
5821 enum amd_clockgating_state state) 6057 enum amd_clockgating_state state)
5822{ 6058{
@@ -5829,6 +6065,13 @@ static int gfx_v8_0_set_clockgating_state(void *handle,
5829 gfx_v8_0_update_gfx_clock_gating(adev, 6065 gfx_v8_0_update_gfx_clock_gating(adev,
5830 state == AMD_CG_STATE_GATE ? true : false); 6066 state == AMD_CG_STATE_GATE ? true : false);
5831 break; 6067 break;
6068 case CHIP_TONGA:
6069 gfx_v8_0_tonga_update_gfx_clock_gating(adev, state);
6070 break;
6071 case CHIP_POLARIS10:
6072 case CHIP_POLARIS11:
6073 gfx_v8_0_polaris_update_gfx_clock_gating(adev, state);
6074 break;
5832 default: 6075 default:
5833 break; 6076 break;
5834 } 6077 }
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 84c10d5117a9..1b319f5bc696 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -269,8 +269,10 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
269 269
270 /* Skip MC ucode loading on SR-IOV capable boards. 270 /* Skip MC ucode loading on SR-IOV capable boards.
271 * vbios does this for us in asic_init in that case. 271 * vbios does this for us in asic_init in that case.
272 * Skip MC ucode loading on VF, because hypervisor will do that
273 * for this adaptor.
272 */ 274 */
273 if (adev->virtualization.supports_sr_iov) 275 if (amdgpu_sriov_bios(adev))
274 return 0; 276 return 0;
275 277
276 hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; 278 hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
deleted file mode 100644
index 2f078ad6095c..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
+++ /dev/null
@@ -1,200 +0,0 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include "drmP.h"
26#include "amdgpu.h"
27#include "iceland_smum.h"
28
29MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
30
31static void iceland_dpm_set_funcs(struct amdgpu_device *adev);
32
33static int iceland_dpm_early_init(void *handle)
34{
35 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
36
37 iceland_dpm_set_funcs(adev);
38
39 return 0;
40}
41
42static int iceland_dpm_init_microcode(struct amdgpu_device *adev)
43{
44 char fw_name[30] = "amdgpu/topaz_smc.bin";
45 int err;
46
47 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
48 if (err)
49 goto out;
50 err = amdgpu_ucode_validate(adev->pm.fw);
51
52out:
53 if (err) {
54 DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
55 release_firmware(adev->pm.fw);
56 adev->pm.fw = NULL;
57 }
58 return err;
59}
60
61static int iceland_dpm_sw_init(void *handle)
62{
63 int ret;
64 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
65
66 ret = iceland_dpm_init_microcode(adev);
67 if (ret)
68 return ret;
69
70 return 0;
71}
72
73static int iceland_dpm_sw_fini(void *handle)
74{
75 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
76
77 release_firmware(adev->pm.fw);
78 adev->pm.fw = NULL;
79
80 return 0;
81}
82
83static int iceland_dpm_hw_init(void *handle)
84{
85 int ret;
86 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
87
88 mutex_lock(&adev->pm.mutex);
89
90 /* smu init only needs to be called at startup, not resume.
91 * It should be in sw_init, but requires the fw info gathered
92 * in sw_init from other IP modules.
93 */
94 ret = iceland_smu_init(adev);
95 if (ret) {
96 DRM_ERROR("SMU initialization failed\n");
97 goto fail;
98 }
99
100 ret = iceland_smu_start(adev);
101 if (ret) {
102 DRM_ERROR("SMU start failed\n");
103 goto fail;
104 }
105
106 mutex_unlock(&adev->pm.mutex);
107 return 0;
108
109fail:
110 adev->firmware.smu_load = false;
111 mutex_unlock(&adev->pm.mutex);
112 return -EINVAL;
113}
114
115static int iceland_dpm_hw_fini(void *handle)
116{
117 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
118
119 mutex_lock(&adev->pm.mutex);
120 /* smu fini only needs to be called at teardown, not suspend.
121 * It should be in sw_fini, but we put it here for symmetry
122 * with smu init.
123 */
124 iceland_smu_fini(adev);
125 mutex_unlock(&adev->pm.mutex);
126 return 0;
127}
128
129static int iceland_dpm_suspend(void *handle)
130{
131 return 0;
132}
133
134static int iceland_dpm_resume(void *handle)
135{
136 int ret;
137 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
138
139 mutex_lock(&adev->pm.mutex);
140
141 ret = iceland_smu_start(adev);
142 if (ret) {
143 DRM_ERROR("SMU start failed\n");
144 goto fail;
145 }
146
147fail:
148 mutex_unlock(&adev->pm.mutex);
149 return ret;
150}
151
152static int iceland_dpm_set_clockgating_state(void *handle,
153 enum amd_clockgating_state state)
154{
155 return 0;
156}
157
158static int iceland_dpm_set_powergating_state(void *handle,
159 enum amd_powergating_state state)
160{
161 return 0;
162}
163
164const struct amd_ip_funcs iceland_dpm_ip_funcs = {
165 .name = "iceland_dpm",
166 .early_init = iceland_dpm_early_init,
167 .late_init = NULL,
168 .sw_init = iceland_dpm_sw_init,
169 .sw_fini = iceland_dpm_sw_fini,
170 .hw_init = iceland_dpm_hw_init,
171 .hw_fini = iceland_dpm_hw_fini,
172 .suspend = iceland_dpm_suspend,
173 .resume = iceland_dpm_resume,
174 .is_idle = NULL,
175 .wait_for_idle = NULL,
176 .soft_reset = NULL,
177 .set_clockgating_state = iceland_dpm_set_clockgating_state,
178 .set_powergating_state = iceland_dpm_set_powergating_state,
179};
180
181static const struct amdgpu_dpm_funcs iceland_dpm_funcs = {
182 .get_temperature = NULL,
183 .pre_set_power_state = NULL,
184 .set_power_state = NULL,
185 .post_set_power_state = NULL,
186 .display_configuration_changed = NULL,
187 .get_sclk = NULL,
188 .get_mclk = NULL,
189 .print_power_state = NULL,
190 .debugfs_print_current_performance_level = NULL,
191 .force_performance_level = NULL,
192 .vblank_too_short = NULL,
193 .powergate_uvd = NULL,
194};
195
196static void iceland_dpm_set_funcs(struct amdgpu_device *adev)
197{
198 if (NULL == adev->pm.funcs)
199 adev->pm.funcs = &iceland_dpm_funcs;
200}
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
deleted file mode 100644
index ef7c27d7356a..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ /dev/null
@@ -1,677 +0,0 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include "drmP.h"
26#include "amdgpu.h"
27#include "ppsmc.h"
28#include "iceland_smum.h"
29#include "smu_ucode_xfer_vi.h"
30#include "amdgpu_ucode.h"
31
32#include "smu/smu_7_1_1_d.h"
33#include "smu/smu_7_1_1_sh_mask.h"
34
35#define ICELAND_SMC_SIZE 0x20000
36
37static int iceland_set_smc_sram_address(struct amdgpu_device *adev,
38 uint32_t smc_address, uint32_t limit)
39{
40 uint32_t val;
41
42 if (smc_address & 3)
43 return -EINVAL;
44
45 if ((smc_address + 3) > limit)
46 return -EINVAL;
47
48 WREG32(mmSMC_IND_INDEX_0, smc_address);
49
50 val = RREG32(mmSMC_IND_ACCESS_CNTL);
51 val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
52 WREG32(mmSMC_IND_ACCESS_CNTL, val);
53
54 return 0;
55}
56
57static int iceland_copy_bytes_to_smc(struct amdgpu_device *adev,
58 uint32_t smc_start_address,
59 const uint8_t *src,
60 uint32_t byte_count, uint32_t limit)
61{
62 uint32_t addr;
63 uint32_t data, orig_data;
64 int result = 0;
65 uint32_t extra_shift;
66 unsigned long flags;
67
68 if (smc_start_address & 3)
69 return -EINVAL;
70
71 if ((smc_start_address + byte_count) > limit)
72 return -EINVAL;
73
74 addr = smc_start_address;
75
76 spin_lock_irqsave(&adev->smc_idx_lock, flags);
77 while (byte_count >= 4) {
78 /* Bytes are written into the SMC addres space with the MSB first */
79 data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
80
81 result = iceland_set_smc_sram_address(adev, addr, limit);
82
83 if (result)
84 goto out;
85
86 WREG32(mmSMC_IND_DATA_0, data);
87
88 src += 4;
89 byte_count -= 4;
90 addr += 4;
91 }
92
93 if (0 != byte_count) {
94 /* Now write odd bytes left, do a read modify write cycle */
95 data = 0;
96
97 result = iceland_set_smc_sram_address(adev, addr, limit);
98 if (result)
99 goto out;
100
101 orig_data = RREG32(mmSMC_IND_DATA_0);
102 extra_shift = 8 * (4 - byte_count);
103
104 while (byte_count > 0) {
105 data = (data << 8) + *src++;
106 byte_count--;
107 }
108
109 data <<= extra_shift;
110 data |= (orig_data & ~((~0UL) << extra_shift));
111
112 result = iceland_set_smc_sram_address(adev, addr, limit);
113 if (result)
114 goto out;
115
116 WREG32(mmSMC_IND_DATA_0, data);
117 }
118
119out:
120 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
121 return result;
122}
123
124static void iceland_start_smc(struct amdgpu_device *adev)
125{
126 uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
127
128 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
129 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
130}
131
132static void iceland_reset_smc(struct amdgpu_device *adev)
133{
134 uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
135
136 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
137 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
138}
139
140static int iceland_program_jump_on_start(struct amdgpu_device *adev)
141{
142 static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
143 iceland_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
144
145 return 0;
146}
147
148static void iceland_stop_smc_clock(struct amdgpu_device *adev)
149{
150 uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
151
152 val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
153 WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
154}
155
156static void iceland_start_smc_clock(struct amdgpu_device *adev)
157{
158 uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
159
160 val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
161 WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
162}
163
164static bool iceland_is_smc_ram_running(struct amdgpu_device *adev)
165{
166 uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
167 val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
168
169 return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
170}
171
172static int wait_smu_response(struct amdgpu_device *adev)
173{
174 int i;
175 uint32_t val;
176
177 for (i = 0; i < adev->usec_timeout; i++) {
178 val = RREG32(mmSMC_RESP_0);
179 if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
180 break;
181 udelay(1);
182 }
183
184 if (i == adev->usec_timeout)
185 return -EINVAL;
186
187 return 0;
188}
189
190static int iceland_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
191{
192 if (!iceland_is_smc_ram_running(adev))
193 return -EINVAL;
194
195 if (wait_smu_response(adev)) {
196 DRM_ERROR("Failed to send previous message\n");
197 return -EINVAL;
198 }
199
200 WREG32(mmSMC_MESSAGE_0, msg);
201
202 if (wait_smu_response(adev)) {
203 DRM_ERROR("Failed to send message\n");
204 return -EINVAL;
205 }
206
207 return 0;
208}
209
210static int iceland_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
211 PPSMC_Msg msg)
212{
213 if (!iceland_is_smc_ram_running(adev))
214 return -EINVAL;
215
216 if (wait_smu_response(adev)) {
217 DRM_ERROR("Failed to send previous message\n");
218 return -EINVAL;
219 }
220
221 WREG32(mmSMC_MESSAGE_0, msg);
222
223 return 0;
224}
225
226static int iceland_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
227 PPSMC_Msg msg,
228 uint32_t parameter)
229{
230 WREG32(mmSMC_MSG_ARG_0, parameter);
231
232 return iceland_send_msg_to_smc(adev, msg);
233}
234
235static int iceland_send_msg_to_smc_with_parameter_without_waiting(
236 struct amdgpu_device *adev,
237 PPSMC_Msg msg, uint32_t parameter)
238{
239 WREG32(mmSMC_MSG_ARG_0, parameter);
240
241 return iceland_send_msg_to_smc_without_waiting(adev, msg);
242}
243
244#if 0 /* not used yet */
245static int iceland_wait_for_smc_inactive(struct amdgpu_device *adev)
246{
247 int i;
248 uint32_t val;
249
250 if (!iceland_is_smc_ram_running(adev))
251 return -EINVAL;
252
253 for (i = 0; i < adev->usec_timeout; i++) {
254 val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
255 if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
256 break;
257 udelay(1);
258 }
259
260 if (i == adev->usec_timeout)
261 return -EINVAL;
262
263 return 0;
264}
265#endif
266
267static int iceland_smu_upload_firmware_image(struct amdgpu_device *adev)
268{
269 const struct smc_firmware_header_v1_0 *hdr;
270 uint32_t ucode_size;
271 uint32_t ucode_start_address;
272 const uint8_t *src;
273 uint32_t val;
274 uint32_t byte_count;
275 uint32_t data;
276 unsigned long flags;
277 int i;
278
279 if (!adev->pm.fw)
280 return -EINVAL;
281
282 /* Skip SMC ucode loading on SR-IOV capable boards.
283 * vbios does this for us in asic_init in that case.
284 */
285 if (adev->virtualization.supports_sr_iov)
286 return 0;
287
288 hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
289 amdgpu_ucode_print_smc_hdr(&hdr->header);
290
291 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
292 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
293 ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
294 src = (const uint8_t *)
295 (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
296
297 if (ucode_size & 3) {
298 DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
299 return -EINVAL;
300 }
301
302 if (ucode_size > ICELAND_SMC_SIZE) {
303 DRM_ERROR("SMC address is beyond the SMC RAM area\n");
304 return -EINVAL;
305 }
306
307 for (i = 0; i < adev->usec_timeout; i++) {
308 val = RREG32_SMC(ixRCU_UC_EVENTS);
309 if (REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done) == 0)
310 break;
311 udelay(1);
312 }
313 val = RREG32_SMC(ixSMC_SYSCON_MISC_CNTL);
314 WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, val | 1);
315
316 iceland_stop_smc_clock(adev);
317 iceland_reset_smc(adev);
318
319 spin_lock_irqsave(&adev->smc_idx_lock, flags);
320 WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
321
322 val = RREG32(mmSMC_IND_ACCESS_CNTL);
323 val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
324 WREG32(mmSMC_IND_ACCESS_CNTL, val);
325
326 byte_count = ucode_size;
327 while (byte_count >= 4) {
328 data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
329 WREG32(mmSMC_IND_DATA_0, data);
330 src += 4;
331 byte_count -= 4;
332 }
333 val = RREG32(mmSMC_IND_ACCESS_CNTL);
334 val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
335 WREG32(mmSMC_IND_ACCESS_CNTL, val);
336 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
337
338 return 0;
339}
340
341#if 0 /* not used yet */
342static int iceland_read_smc_sram_dword(struct amdgpu_device *adev,
343 uint32_t smc_address,
344 uint32_t *value,
345 uint32_t limit)
346{
347 int result;
348 unsigned long flags;
349
350 spin_lock_irqsave(&adev->smc_idx_lock, flags);
351 result = iceland_set_smc_sram_address(adev, smc_address, limit);
352 if (result == 0)
353 *value = RREG32(mmSMC_IND_DATA_0);
354 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
355 return result;
356}
357
358static int iceland_write_smc_sram_dword(struct amdgpu_device *adev,
359 uint32_t smc_address,
360 uint32_t value,
361 uint32_t limit)
362{
363 int result;
364 unsigned long flags;
365
366 spin_lock_irqsave(&adev->smc_idx_lock, flags);
367 result = iceland_set_smc_sram_address(adev, smc_address, limit);
368 if (result == 0)
369 WREG32(mmSMC_IND_DATA_0, value);
370 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
371 return result;
372}
373
374static int iceland_smu_stop_smc(struct amdgpu_device *adev)
375{
376 iceland_reset_smc(adev);
377 iceland_stop_smc_clock(adev);
378
379 return 0;
380}
381#endif
382
383static int iceland_smu_start_smc(struct amdgpu_device *adev)
384{
385 int i;
386 uint32_t val;
387
388 iceland_program_jump_on_start(adev);
389 iceland_start_smc_clock(adev);
390 iceland_start_smc(adev);
391
392 for (i = 0; i < adev->usec_timeout; i++) {
393 val = RREG32_SMC(ixFIRMWARE_FLAGS);
394 if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED) == 1)
395 break;
396 udelay(1);
397 }
398 return 0;
399}
400
401static enum AMDGPU_UCODE_ID iceland_convert_fw_type(uint32_t fw_type)
402{
403 switch (fw_type) {
404 case UCODE_ID_SDMA0:
405 return AMDGPU_UCODE_ID_SDMA0;
406 case UCODE_ID_SDMA1:
407 return AMDGPU_UCODE_ID_SDMA1;
408 case UCODE_ID_CP_CE:
409 return AMDGPU_UCODE_ID_CP_CE;
410 case UCODE_ID_CP_PFP:
411 return AMDGPU_UCODE_ID_CP_PFP;
412 case UCODE_ID_CP_ME:
413 return AMDGPU_UCODE_ID_CP_ME;
414 case UCODE_ID_CP_MEC:
415 case UCODE_ID_CP_MEC_JT1:
416 return AMDGPU_UCODE_ID_CP_MEC1;
417 case UCODE_ID_CP_MEC_JT2:
418 return AMDGPU_UCODE_ID_CP_MEC2;
419 case UCODE_ID_RLC_G:
420 return AMDGPU_UCODE_ID_RLC_G;
421 default:
422 DRM_ERROR("ucode type is out of range!\n");
423 return AMDGPU_UCODE_ID_MAXIMUM;
424 }
425}
426
427static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
428{
429 switch (fw_type) {
430 case AMDGPU_UCODE_ID_SDMA0:
431 return UCODE_ID_SDMA0_MASK;
432 case AMDGPU_UCODE_ID_SDMA1:
433 return UCODE_ID_SDMA1_MASK;
434 case AMDGPU_UCODE_ID_CP_CE:
435 return UCODE_ID_CP_CE_MASK;
436 case AMDGPU_UCODE_ID_CP_PFP:
437 return UCODE_ID_CP_PFP_MASK;
438 case AMDGPU_UCODE_ID_CP_ME:
439 return UCODE_ID_CP_ME_MASK;
440 case AMDGPU_UCODE_ID_CP_MEC1:
441 return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
442 case AMDGPU_UCODE_ID_CP_MEC2:
443 return UCODE_ID_CP_MEC_MASK;
444 case AMDGPU_UCODE_ID_RLC_G:
445 return UCODE_ID_RLC_G_MASK;
446 default:
447 DRM_ERROR("ucode type is out of range!\n");
448 return 0;
449 }
450}
451
452static int iceland_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
453 uint32_t fw_type,
454 struct SMU_Entry *entry)
455{
456 enum AMDGPU_UCODE_ID id = iceland_convert_fw_type(fw_type);
457 struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
458 const struct gfx_firmware_header_v1_0 *header = NULL;
459 uint64_t gpu_addr;
460 uint32_t data_size;
461
462 if (ucode->fw == NULL)
463 return -EINVAL;
464
465 gpu_addr = ucode->mc_addr;
466 header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
467 data_size = le32_to_cpu(header->header.ucode_size_bytes);
468
469 entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
470 entry->id = (uint16_t)fw_type;
471 entry->image_addr_high = upper_32_bits(gpu_addr);
472 entry->image_addr_low = lower_32_bits(gpu_addr);
473 entry->meta_data_addr_high = 0;
474 entry->meta_data_addr_low = 0;
475 entry->data_size_byte = data_size;
476 entry->num_register_entries = 0;
477 entry->flags = 0;
478
479 return 0;
480}
481
482static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
483{
484 struct iceland_smu_private_data *private = (struct iceland_smu_private_data *)adev->smu.priv;
485 struct SMU_DRAMData_TOC *toc;
486 uint32_t fw_to_load;
487
488 toc = (struct SMU_DRAMData_TOC *)private->header;
489 toc->num_entries = 0;
490 toc->structure_version = 1;
491
492 if (!adev->firmware.smu_load)
493 return 0;
494
495 if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
496 &toc->entry[toc->num_entries++])) {
497 DRM_ERROR("Failed to get firmware entry for RLC\n");
498 return -EINVAL;
499 }
500
501 if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
502 &toc->entry[toc->num_entries++])) {
503 DRM_ERROR("Failed to get firmware entry for CE\n");
504 return -EINVAL;
505 }
506
507 if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
508 &toc->entry[toc->num_entries++])) {
509 DRM_ERROR("Failed to get firmware entry for PFP\n");
510 return -EINVAL;
511 }
512
513 if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
514 &toc->entry[toc->num_entries++])) {
515 DRM_ERROR("Failed to get firmware entry for ME\n");
516 return -EINVAL;
517 }
518
519 if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
520 &toc->entry[toc->num_entries++])) {
521 DRM_ERROR("Failed to get firmware entry for MEC\n");
522 return -EINVAL;
523 }
524
525 if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
526 &toc->entry[toc->num_entries++])) {
527 DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
528 return -EINVAL;
529 }
530
531 if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
532 &toc->entry[toc->num_entries++])) {
533 DRM_ERROR("Failed to get firmware entry for SDMA0\n");
534 return -EINVAL;
535 }
536
537 if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
538 &toc->entry[toc->num_entries++])) {
539 DRM_ERROR("Failed to get firmware entry for SDMA1\n");
540 return -EINVAL;
541 }
542
543 iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
544 iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
545
546 fw_to_load = UCODE_ID_RLC_G_MASK |
547 UCODE_ID_SDMA0_MASK |
548 UCODE_ID_SDMA1_MASK |
549 UCODE_ID_CP_CE_MASK |
550 UCODE_ID_CP_ME_MASK |
551 UCODE_ID_CP_PFP_MASK |
552 UCODE_ID_CP_MEC_MASK |
553 UCODE_ID_CP_MEC_JT1_MASK;
554
555
556 if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
557 DRM_ERROR("Fail to request SMU load ucode\n");
558 return -EINVAL;
559 }
560
561 return 0;
562}
563
564static int iceland_smu_check_fw_load_finish(struct amdgpu_device *adev,
565 uint32_t fw_type)
566{
567 uint32_t fw_mask = iceland_smu_get_mask_for_fw_type(fw_type);
568 int i;
569
570 for (i = 0; i < adev->usec_timeout; i++) {
571 if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_27) & fw_mask))
572 break;
573 udelay(1);
574 }
575
576 if (i == adev->usec_timeout) {
577 DRM_ERROR("check firmware loading failed\n");
578 return -EINVAL;
579 }
580
581 return 0;
582}
583
584int iceland_smu_start(struct amdgpu_device *adev)
585{
586 int result;
587
588 result = iceland_smu_upload_firmware_image(adev);
589 if (result)
590 return result;
591 result = iceland_smu_start_smc(adev);
592 if (result)
593 return result;
594
595 return iceland_smu_request_load_fw(adev);
596}
597
598static const struct amdgpu_smumgr_funcs iceland_smumgr_funcs = {
599 .check_fw_load_finish = iceland_smu_check_fw_load_finish,
600 .request_smu_load_fw = NULL,
601 .request_smu_specific_fw = NULL,
602};
603
604int iceland_smu_init(struct amdgpu_device *adev)
605{
606 struct iceland_smu_private_data *private;
607 uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
608 struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
609 uint64_t mc_addr;
610 void *toc_buf_ptr;
611 int ret;
612
613 private = kzalloc(sizeof(struct iceland_smu_private_data), GFP_KERNEL);
614 if (NULL == private)
615 return -ENOMEM;
616
617 /* allocate firmware buffers */
618 if (adev->firmware.smu_load)
619 amdgpu_ucode_init_bo(adev);
620
621 adev->smu.priv = private;
622 adev->smu.fw_flags = 0;
623
624 /* Allocate FW image data structure and header buffer */
625 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
626 true, AMDGPU_GEM_DOMAIN_VRAM,
627 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
628 NULL, NULL, toc_buf);
629 if (ret) {
630 DRM_ERROR("Failed to allocate memory for TOC buffer\n");
631 return -ENOMEM;
632 }
633
634 /* Retrieve GPU address for header buffer and internal buffer */
635 ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
636 if (ret) {
637 amdgpu_bo_unref(&adev->smu.toc_buf);
638 DRM_ERROR("Failed to reserve the TOC buffer\n");
639 return -EINVAL;
640 }
641
642 ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
643 if (ret) {
644 amdgpu_bo_unreserve(adev->smu.toc_buf);
645 amdgpu_bo_unref(&adev->smu.toc_buf);
646 DRM_ERROR("Failed to pin the TOC buffer\n");
647 return -EINVAL;
648 }
649
650 ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
651 if (ret) {
652 amdgpu_bo_unreserve(adev->smu.toc_buf);
653 amdgpu_bo_unref(&adev->smu.toc_buf);
654 DRM_ERROR("Failed to map the TOC buffer\n");
655 return -EINVAL;
656 }
657
658 amdgpu_bo_unreserve(adev->smu.toc_buf);
659 private->header_addr_low = lower_32_bits(mc_addr);
660 private->header_addr_high = upper_32_bits(mc_addr);
661 private->header = toc_buf_ptr;
662
663 adev->smu.smumgr_funcs = &iceland_smumgr_funcs;
664
665 return 0;
666}
667
668int iceland_smu_fini(struct amdgpu_device *adev)
669{
670 amdgpu_bo_unref(&adev->smu.toc_buf);
671 kfree(adev->smu.priv);
672 adev->smu.priv = NULL;
673 if (adev->firmware.fw_buf)
674 amdgpu_ucode_fini_bo(adev);
675
676 return 0;
677}
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index fee76b8a536f..dc9511c5ecb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -952,12 +952,6 @@ static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
952 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 952 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
953} 953}
954 954
955static u32 si_get_virtual_caps(struct amdgpu_device *adev)
956{
957 /* SI does not support SR-IOV */
958 return 0;
959}
960
961static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = { 955static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
962 {GRBM_STATUS, false}, 956 {GRBM_STATUS, false},
963 {GB_ADDR_CONFIG, false}, 957 {GB_ADDR_CONFIG, false},
@@ -1124,16 +1118,22 @@ static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
1124 return 0; 1118 return 0;
1125} 1119}
1126 1120
1121static void si_detect_hw_virtualization(struct amdgpu_device *adev)
1122{
1123 if (is_virtual_machine()) /* passthrough mode */
1124 adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
1125}
1126
1127static const struct amdgpu_asic_funcs si_asic_funcs = 1127static const struct amdgpu_asic_funcs si_asic_funcs =
1128{ 1128{
1129 .read_disabled_bios = &si_read_disabled_bios, 1129 .read_disabled_bios = &si_read_disabled_bios,
1130 .detect_hw_virtualization = si_detect_hw_virtualization,
1130 .read_register = &si_read_register, 1131 .read_register = &si_read_register,
1131 .reset = &si_asic_reset, 1132 .reset = &si_asic_reset,
1132 .set_vga_state = &si_vga_set_state, 1133 .set_vga_state = &si_vga_set_state,
1133 .get_xclk = &si_get_xclk, 1134 .get_xclk = &si_get_xclk,
1134 .set_uvd_clocks = &si_set_uvd_clocks, 1135 .set_uvd_clocks = &si_set_uvd_clocks,
1135 .set_vce_clocks = NULL, 1136 .set_vce_clocks = NULL,
1136 .get_virtual_caps = &si_get_virtual_caps,
1137}; 1137};
1138 1138
1139static uint32_t si_get_rev_id(struct amdgpu_device *adev) 1139static uint32_t si_get_rev_id(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
deleted file mode 100644
index f06f6f4dc3a8..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
+++ /dev/null
@@ -1,186 +0,0 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include "drmP.h"
26#include "amdgpu.h"
27#include "tonga_smum.h"
28
29MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
30
31static void tonga_dpm_set_funcs(struct amdgpu_device *adev);
32
33static int tonga_dpm_early_init(void *handle)
34{
35 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
36
37 tonga_dpm_set_funcs(adev);
38
39 return 0;
40}
41
42static int tonga_dpm_init_microcode(struct amdgpu_device *adev)
43{
44 char fw_name[30] = "amdgpu/tonga_smc.bin";
45 int err;
46 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
47 if (err)
48 goto out;
49 err = amdgpu_ucode_validate(adev->pm.fw);
50
51out:
52 if (err) {
53 DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
54 release_firmware(adev->pm.fw);
55 adev->pm.fw = NULL;
56 }
57 return err;
58}
59
60static int tonga_dpm_sw_init(void *handle)
61{
62 int ret;
63 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
64
65 ret = tonga_dpm_init_microcode(adev);
66 if (ret)
67 return ret;
68
69 return 0;
70}
71
72static int tonga_dpm_sw_fini(void *handle)
73{
74 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
75
76 release_firmware(adev->pm.fw);
77 adev->pm.fw = NULL;
78
79 return 0;
80}
81
82static int tonga_dpm_hw_init(void *handle)
83{
84 int ret;
85 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
86
87 mutex_lock(&adev->pm.mutex);
88
89 /* smu init only needs to be called at startup, not resume.
90 * It should be in sw_init, but requires the fw info gathered
91 * in sw_init from other IP modules.
92 */
93 ret = tonga_smu_init(adev);
94 if (ret) {
95 DRM_ERROR("SMU initialization failed\n");
96 goto fail;
97 }
98
99 ret = tonga_smu_start(adev);
100 if (ret) {
101 DRM_ERROR("SMU start failed\n");
102 goto fail;
103 }
104
105 mutex_unlock(&adev->pm.mutex);
106 return 0;
107
108fail:
109 adev->firmware.smu_load = false;
110 mutex_unlock(&adev->pm.mutex);
111 return -EINVAL;
112}
113
114static int tonga_dpm_hw_fini(void *handle)
115{
116 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
117
118 mutex_lock(&adev->pm.mutex);
119 /* smu fini only needs to be called at teardown, not suspend.
120 * It should be in sw_fini, but we put it here for symmetry
121 * with smu init.
122 */
123 tonga_smu_fini(adev);
124 mutex_unlock(&adev->pm.mutex);
125 return 0;
126}
127
128static int tonga_dpm_suspend(void *handle)
129{
130 return tonga_dpm_hw_fini(handle);
131}
132
133static int tonga_dpm_resume(void *handle)
134{
135 return tonga_dpm_hw_init(handle);
136}
137
138static int tonga_dpm_set_clockgating_state(void *handle,
139 enum amd_clockgating_state state)
140{
141 return 0;
142}
143
144static int tonga_dpm_set_powergating_state(void *handle,
145 enum amd_powergating_state state)
146{
147 return 0;
148}
149
150const struct amd_ip_funcs tonga_dpm_ip_funcs = {
151 .name = "tonga_dpm",
152 .early_init = tonga_dpm_early_init,
153 .late_init = NULL,
154 .sw_init = tonga_dpm_sw_init,
155 .sw_fini = tonga_dpm_sw_fini,
156 .hw_init = tonga_dpm_hw_init,
157 .hw_fini = tonga_dpm_hw_fini,
158 .suspend = tonga_dpm_suspend,
159 .resume = tonga_dpm_resume,
160 .is_idle = NULL,
161 .wait_for_idle = NULL,
162 .soft_reset = NULL,
163 .set_clockgating_state = tonga_dpm_set_clockgating_state,
164 .set_powergating_state = tonga_dpm_set_powergating_state,
165};
166
167static const struct amdgpu_dpm_funcs tonga_dpm_funcs = {
168 .get_temperature = NULL,
169 .pre_set_power_state = NULL,
170 .set_power_state = NULL,
171 .post_set_power_state = NULL,
172 .display_configuration_changed = NULL,
173 .get_sclk = NULL,
174 .get_mclk = NULL,
175 .print_power_state = NULL,
176 .debugfs_print_current_performance_level = NULL,
177 .force_performance_level = NULL,
178 .vblank_too_short = NULL,
179 .powergate_uvd = NULL,
180};
181
182static void tonga_dpm_set_funcs(struct amdgpu_device *adev)
183{
184 if (NULL == adev->pm.funcs)
185 adev->pm.funcs = &tonga_dpm_funcs;
186}
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
deleted file mode 100644
index 940de1836f8f..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+++ /dev/null
@@ -1,862 +0,0 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include "drmP.h"
26#include "amdgpu.h"
27#include "tonga_ppsmc.h"
28#include "tonga_smum.h"
29#include "smu_ucode_xfer_vi.h"
30#include "amdgpu_ucode.h"
31
32#include "smu/smu_7_1_2_d.h"
33#include "smu/smu_7_1_2_sh_mask.h"
34
35#define TONGA_SMC_SIZE 0x20000
36
37static int tonga_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit)
38{
39 uint32_t val;
40
41 if (smc_address & 3)
42 return -EINVAL;
43
44 if ((smc_address + 3) > limit)
45 return -EINVAL;
46
47 WREG32(mmSMC_IND_INDEX_0, smc_address);
48
49 val = RREG32(mmSMC_IND_ACCESS_CNTL);
50 val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
51 WREG32(mmSMC_IND_ACCESS_CNTL, val);
52
53 return 0;
54}
55
56static int tonga_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit)
57{
58 uint32_t addr;
59 uint32_t data, orig_data;
60 int result = 0;
61 uint32_t extra_shift;
62 unsigned long flags;
63
64 if (smc_start_address & 3)
65 return -EINVAL;
66
67 if ((smc_start_address + byte_count) > limit)
68 return -EINVAL;
69
70 addr = smc_start_address;
71
72 spin_lock_irqsave(&adev->smc_idx_lock, flags);
73 while (byte_count >= 4) {
74 /* Bytes are written into the SMC addres space with the MSB first */
75 data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
76
77 result = tonga_set_smc_sram_address(adev, addr, limit);
78
79 if (result)
80 goto out;
81
82 WREG32(mmSMC_IND_DATA_0, data);
83
84 src += 4;
85 byte_count -= 4;
86 addr += 4;
87 }
88
89 if (0 != byte_count) {
90 /* Now write odd bytes left, do a read modify write cycle */
91 data = 0;
92
93 result = tonga_set_smc_sram_address(adev, addr, limit);
94 if (result)
95 goto out;
96
97 orig_data = RREG32(mmSMC_IND_DATA_0);
98 extra_shift = 8 * (4 - byte_count);
99
100 while (byte_count > 0) {
101 data = (data << 8) + *src++;
102 byte_count--;
103 }
104
105 data <<= extra_shift;
106 data |= (orig_data & ~((~0UL) << extra_shift));
107
108 result = tonga_set_smc_sram_address(adev, addr, limit);
109 if (result)
110 goto out;
111
112 WREG32(mmSMC_IND_DATA_0, data);
113 }
114
115out:
116 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
117 return result;
118}
119
120static int tonga_program_jump_on_start(struct amdgpu_device *adev)
121{
122 static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
123 tonga_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
124
125 return 0;
126}
127
128static bool tonga_is_smc_ram_running(struct amdgpu_device *adev)
129{
130 uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
131 val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
132
133 return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
134}
135
136static int wait_smu_response(struct amdgpu_device *adev)
137{
138 int i;
139 uint32_t val;
140
141 for (i = 0; i < adev->usec_timeout; i++) {
142 val = RREG32(mmSMC_RESP_0);
143 if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
144 break;
145 udelay(1);
146 }
147
148 if (i == adev->usec_timeout)
149 return -EINVAL;
150
151 return 0;
152}
153
154static int tonga_send_msg_to_smc_offset(struct amdgpu_device *adev)
155{
156 if (wait_smu_response(adev)) {
157 DRM_ERROR("Failed to send previous message\n");
158 return -EINVAL;
159 }
160
161 WREG32(mmSMC_MSG_ARG_0, 0x20000);
162 WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test);
163
164 if (wait_smu_response(adev)) {
165 DRM_ERROR("Failed to send message\n");
166 return -EINVAL;
167 }
168
169 return 0;
170}
171
172static int tonga_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
173{
174 if (!tonga_is_smc_ram_running(adev))
175 {
176 return -EINVAL;
177 }
178
179 if (wait_smu_response(adev)) {
180 DRM_ERROR("Failed to send previous message\n");
181 return -EINVAL;
182 }
183
184 WREG32(mmSMC_MESSAGE_0, msg);
185
186 if (wait_smu_response(adev)) {
187 DRM_ERROR("Failed to send message\n");
188 return -EINVAL;
189 }
190
191 return 0;
192}
193
194static int tonga_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
195 PPSMC_Msg msg)
196{
197 if (wait_smu_response(adev)) {
198 DRM_ERROR("Failed to send previous message\n");
199 return -EINVAL;
200 }
201
202 WREG32(mmSMC_MESSAGE_0, msg);
203
204 return 0;
205}
206
207static int tonga_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
208 PPSMC_Msg msg,
209 uint32_t parameter)
210{
211 if (!tonga_is_smc_ram_running(adev))
212 return -EINVAL;
213
214 if (wait_smu_response(adev)) {
215 DRM_ERROR("Failed to send previous message\n");
216 return -EINVAL;
217 }
218
219 WREG32(mmSMC_MSG_ARG_0, parameter);
220
221 return tonga_send_msg_to_smc(adev, msg);
222}
223
224static int tonga_send_msg_to_smc_with_parameter_without_waiting(
225 struct amdgpu_device *adev,
226 PPSMC_Msg msg, uint32_t parameter)
227{
228 if (wait_smu_response(adev)) {
229 DRM_ERROR("Failed to send previous message\n");
230 return -EINVAL;
231 }
232
233 WREG32(mmSMC_MSG_ARG_0, parameter);
234
235 return tonga_send_msg_to_smc_without_waiting(adev, msg);
236}
237
238#if 0 /* not used yet */
239static int tonga_wait_for_smc_inactive(struct amdgpu_device *adev)
240{
241 int i;
242 uint32_t val;
243
244 if (!tonga_is_smc_ram_running(adev))
245 return -EINVAL;
246
247 for (i = 0; i < adev->usec_timeout; i++) {
248 val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
249 if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
250 break;
251 udelay(1);
252 }
253
254 if (i == adev->usec_timeout)
255 return -EINVAL;
256
257 return 0;
258}
259#endif
260
261static int tonga_smu_upload_firmware_image(struct amdgpu_device *adev)
262{
263 const struct smc_firmware_header_v1_0 *hdr;
264 uint32_t ucode_size;
265 uint32_t ucode_start_address;
266 const uint8_t *src;
267 uint32_t val;
268 uint32_t byte_count;
269 uint32_t *data;
270 unsigned long flags;
271
272 if (!adev->pm.fw)
273 return -EINVAL;
274
275 /* Skip SMC ucode loading on SR-IOV capable boards.
276 * vbios does this for us in asic_init in that case.
277 */
278 if (adev->virtualization.supports_sr_iov)
279 return 0;
280
281 hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
282 amdgpu_ucode_print_smc_hdr(&hdr->header);
283
284 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
285 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
286 ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
287 src = (const uint8_t *)
288 (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
289
290 if (ucode_size & 3) {
291 DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
292 return -EINVAL;
293 }
294
295 if (ucode_size > TONGA_SMC_SIZE) {
296 DRM_ERROR("SMC address is beyond the SMC RAM area\n");
297 return -EINVAL;
298 }
299
300 spin_lock_irqsave(&adev->smc_idx_lock, flags);
301 WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
302
303 val = RREG32(mmSMC_IND_ACCESS_CNTL);
304 val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
305 WREG32(mmSMC_IND_ACCESS_CNTL, val);
306
307 byte_count = ucode_size;
308 data = (uint32_t *)src;
309 for (; byte_count >= 4; data++, byte_count -= 4)
310 WREG32(mmSMC_IND_DATA_0, data[0]);
311
312 val = RREG32(mmSMC_IND_ACCESS_CNTL);
313 val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
314 WREG32(mmSMC_IND_ACCESS_CNTL, val);
315 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
316
317 return 0;
318}
319
320#if 0 /* not used yet */
321static int tonga_read_smc_sram_dword(struct amdgpu_device *adev,
322 uint32_t smc_address,
323 uint32_t *value,
324 uint32_t limit)
325{
326 int result;
327 unsigned long flags;
328
329 spin_lock_irqsave(&adev->smc_idx_lock, flags);
330 result = tonga_set_smc_sram_address(adev, smc_address, limit);
331 if (result == 0)
332 *value = RREG32(mmSMC_IND_DATA_0);
333 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
334 return result;
335}
336
337static int tonga_write_smc_sram_dword(struct amdgpu_device *adev,
338 uint32_t smc_address,
339 uint32_t value,
340 uint32_t limit)
341{
342 int result;
343 unsigned long flags;
344
345 spin_lock_irqsave(&adev->smc_idx_lock, flags);
346 result = tonga_set_smc_sram_address(adev, smc_address, limit);
347 if (result == 0)
348 WREG32(mmSMC_IND_DATA_0, value);
349 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
350 return result;
351}
352
353static int tonga_smu_stop_smc(struct amdgpu_device *adev)
354{
355 uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
356 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
357 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
358
359 val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
360 val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
361 WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
362
363 return 0;
364}
365#endif
366
367static enum AMDGPU_UCODE_ID tonga_convert_fw_type(uint32_t fw_type)
368{
369 switch (fw_type) {
370 case UCODE_ID_SDMA0:
371 return AMDGPU_UCODE_ID_SDMA0;
372 case UCODE_ID_SDMA1:
373 return AMDGPU_UCODE_ID_SDMA1;
374 case UCODE_ID_CP_CE:
375 return AMDGPU_UCODE_ID_CP_CE;
376 case UCODE_ID_CP_PFP:
377 return AMDGPU_UCODE_ID_CP_PFP;
378 case UCODE_ID_CP_ME:
379 return AMDGPU_UCODE_ID_CP_ME;
380 case UCODE_ID_CP_MEC:
381 case UCODE_ID_CP_MEC_JT1:
382 return AMDGPU_UCODE_ID_CP_MEC1;
383 case UCODE_ID_CP_MEC_JT2:
384 return AMDGPU_UCODE_ID_CP_MEC2;
385 case UCODE_ID_RLC_G:
386 return AMDGPU_UCODE_ID_RLC_G;
387 default:
388 DRM_ERROR("ucode type is out of range!\n");
389 return AMDGPU_UCODE_ID_MAXIMUM;
390 }
391}
392
393static int tonga_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
394 uint32_t fw_type,
395 struct SMU_Entry *entry)
396{
397 enum AMDGPU_UCODE_ID id = tonga_convert_fw_type(fw_type);
398 struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
399 const struct gfx_firmware_header_v1_0 *header = NULL;
400 uint64_t gpu_addr;
401 uint32_t data_size;
402
403 if (ucode->fw == NULL)
404 return -EINVAL;
405
406 gpu_addr = ucode->mc_addr;
407 header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
408 data_size = le32_to_cpu(header->header.ucode_size_bytes);
409
410 if ((fw_type == UCODE_ID_CP_MEC_JT1) ||
411 (fw_type == UCODE_ID_CP_MEC_JT2)) {
412 gpu_addr += le32_to_cpu(header->jt_offset) << 2;
413 data_size = le32_to_cpu(header->jt_size) << 2;
414 }
415
416 entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
417 entry->id = (uint16_t)fw_type;
418 entry->image_addr_high = upper_32_bits(gpu_addr);
419 entry->image_addr_low = lower_32_bits(gpu_addr);
420 entry->meta_data_addr_high = 0;
421 entry->meta_data_addr_low = 0;
422 entry->data_size_byte = data_size;
423 entry->num_register_entries = 0;
424
425 if (fw_type == UCODE_ID_RLC_G)
426 entry->flags = 1;
427 else
428 entry->flags = 0;
429
430 return 0;
431}
432
433static int tonga_smu_request_load_fw(struct amdgpu_device *adev)
434{
435 struct tonga_smu_private_data *private = (struct tonga_smu_private_data *)adev->smu.priv;
436 struct SMU_DRAMData_TOC *toc;
437 uint32_t fw_to_load;
438
439 WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0);
440
441 tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high);
442 tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low);
443
444 toc = (struct SMU_DRAMData_TOC *)private->header;
445 toc->num_entries = 0;
446 toc->structure_version = 1;
447
448 if (!adev->firmware.smu_load)
449 return 0;
450
451 if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
452 &toc->entry[toc->num_entries++])) {
453 DRM_ERROR("Failed to get firmware entry for RLC\n");
454 return -EINVAL;
455 }
456
457 if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
458 &toc->entry[toc->num_entries++])) {
459 DRM_ERROR("Failed to get firmware entry for CE\n");
460 return -EINVAL;
461 }
462
463 if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
464 &toc->entry[toc->num_entries++])) {
465 DRM_ERROR("Failed to get firmware entry for PFP\n");
466 return -EINVAL;
467 }
468
469 if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
470 &toc->entry[toc->num_entries++])) {
471 DRM_ERROR("Failed to get firmware entry for ME\n");
472 return -EINVAL;
473 }
474
475 if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
476 &toc->entry[toc->num_entries++])) {
477 DRM_ERROR("Failed to get firmware entry for MEC\n");
478 return -EINVAL;
479 }
480
481 if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
482 &toc->entry[toc->num_entries++])) {
483 DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
484 return -EINVAL;
485 }
486
487 if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
488 &toc->entry[toc->num_entries++])) {
489 DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
490 return -EINVAL;
491 }
492
493 if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
494 &toc->entry[toc->num_entries++])) {
495 DRM_ERROR("Failed to get firmware entry for SDMA0\n");
496 return -EINVAL;
497 }
498
499 if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
500 &toc->entry[toc->num_entries++])) {
501 DRM_ERROR("Failed to get firmware entry for SDMA1\n");
502 return -EINVAL;
503 }
504
505 tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
506 tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
507
508 fw_to_load = UCODE_ID_RLC_G_MASK |
509 UCODE_ID_SDMA0_MASK |
510 UCODE_ID_SDMA1_MASK |
511 UCODE_ID_CP_CE_MASK |
512 UCODE_ID_CP_ME_MASK |
513 UCODE_ID_CP_PFP_MASK |
514 UCODE_ID_CP_MEC_MASK;
515
516 if (tonga_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
517 DRM_ERROR("Fail to request SMU load ucode\n");
518 return -EINVAL;
519 }
520
521 return 0;
522}
523
524static uint32_t tonga_smu_get_mask_for_fw_type(uint32_t fw_type)
525{
526 switch (fw_type) {
527 case AMDGPU_UCODE_ID_SDMA0:
528 return UCODE_ID_SDMA0_MASK;
529 case AMDGPU_UCODE_ID_SDMA1:
530 return UCODE_ID_SDMA1_MASK;
531 case AMDGPU_UCODE_ID_CP_CE:
532 return UCODE_ID_CP_CE_MASK;
533 case AMDGPU_UCODE_ID_CP_PFP:
534 return UCODE_ID_CP_PFP_MASK;
535 case AMDGPU_UCODE_ID_CP_ME:
536 return UCODE_ID_CP_ME_MASK;
537 case AMDGPU_UCODE_ID_CP_MEC1:
538 return UCODE_ID_CP_MEC_MASK;
539 case AMDGPU_UCODE_ID_CP_MEC2:
540 return UCODE_ID_CP_MEC_MASK;
541 case AMDGPU_UCODE_ID_RLC_G:
542 return UCODE_ID_RLC_G_MASK;
543 default:
544 DRM_ERROR("ucode type is out of range!\n");
545 return 0;
546 }
547}
548
549static int tonga_smu_check_fw_load_finish(struct amdgpu_device *adev,
550 uint32_t fw_type)
551{
552 uint32_t fw_mask = tonga_smu_get_mask_for_fw_type(fw_type);
553 int i;
554
555 for (i = 0; i < adev->usec_timeout; i++) {
556 if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask))
557 break;
558 udelay(1);
559 }
560
561 if (i == adev->usec_timeout) {
562 DRM_ERROR("check firmware loading failed\n");
563 return -EINVAL;
564 }
565
566 return 0;
567}
568
569static int tonga_smu_start_in_protection_mode(struct amdgpu_device *adev)
570{
571 int result;
572 uint32_t val;
573 int i;
574
575 /* Assert reset */
576 val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
577 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
578 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
579
580 result = tonga_smu_upload_firmware_image(adev);
581 if (result)
582 return result;
583
584 /* Clear status */
585 WREG32_SMC(ixSMU_STATUS, 0);
586
587 /* Enable clock */
588 val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
589 val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
590 WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
591
592 /* De-assert reset */
593 val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
594 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
595 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
596
597 /* Set SMU Auto Start */
598 val = RREG32_SMC(ixSMU_INPUT_DATA);
599 val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1);
600 WREG32_SMC(ixSMU_INPUT_DATA, val);
601
602 /* Clear firmware interrupt enable flag */
603 WREG32_SMC(ixFIRMWARE_FLAGS, 0);
604
605 for (i = 0; i < adev->usec_timeout; i++) {
606 val = RREG32_SMC(ixRCU_UC_EVENTS);
607 if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED))
608 break;
609 udelay(1);
610 }
611
612 if (i == adev->usec_timeout) {
613 DRM_ERROR("Interrupt is not enabled by firmware\n");
614 return -EINVAL;
615 }
616
617 /* Call Test SMU message with 0x20000 offset
618 * to trigger SMU start
619 */
620 tonga_send_msg_to_smc_offset(adev);
621
622 /* Wait for done bit to be set */
623 for (i = 0; i < adev->usec_timeout; i++) {
624 val = RREG32_SMC(ixSMU_STATUS);
625 if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE))
626 break;
627 udelay(1);
628 }
629
630 if (i == adev->usec_timeout) {
631 DRM_ERROR("Timeout for SMU start\n");
632 return -EINVAL;
633 }
634
635 /* Check pass/failed indicator */
636 val = RREG32_SMC(ixSMU_STATUS);
637 if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) {
638 DRM_ERROR("SMU Firmware start failed\n");
639 return -EINVAL;
640 }
641
642 /* Wait for firmware to initialize */
643 for (i = 0; i < adev->usec_timeout; i++) {
644 val = RREG32_SMC(ixFIRMWARE_FLAGS);
645 if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
646 break;
647 udelay(1);
648 }
649
650 if (i == adev->usec_timeout) {
651 DRM_ERROR("SMU firmware initialization failed\n");
652 return -EINVAL;
653 }
654
655 return 0;
656}
657
658static int tonga_smu_start_in_non_protection_mode(struct amdgpu_device *adev)
659{
660 int i, result;
661 uint32_t val;
662
663 /* wait for smc boot up */
664 for (i = 0; i < adev->usec_timeout; i++) {
665 val = RREG32_SMC(ixRCU_UC_EVENTS);
666 val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done);
667 if (val)
668 break;
669 udelay(1);
670 }
671
672 if (i == adev->usec_timeout) {
673 DRM_ERROR("SMC boot sequence is not completed\n");
674 return -EINVAL;
675 }
676
677 /* Clear firmware interrupt enable flag */
678 WREG32_SMC(ixFIRMWARE_FLAGS, 0);
679
680 /* Assert reset */
681 val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
682 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
683 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
684
685 result = tonga_smu_upload_firmware_image(adev);
686 if (result)
687 return result;
688
689 /* Set smc instruct start point at 0x0 */
690 tonga_program_jump_on_start(adev);
691
692 /* Enable clock */
693 val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
694 val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
695 WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
696
697 /* De-assert reset */
698 val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
699 val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
700 WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
701
702 /* Wait for firmware to initialize */
703 for (i = 0; i < adev->usec_timeout; i++) {
704 val = RREG32_SMC(ixFIRMWARE_FLAGS);
705 if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
706 break;
707 udelay(1);
708 }
709
710 if (i == adev->usec_timeout) {
711 DRM_ERROR("Timeout for SMC firmware initialization\n");
712 return -EINVAL;
713 }
714
715 return 0;
716}
717
718int tonga_smu_start(struct amdgpu_device *adev)
719{
720 int result;
721 uint32_t val;
722
723 if (!tonga_is_smc_ram_running(adev)) {
724 val = RREG32_SMC(ixSMU_FIRMWARE);
725 if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) {
726 result = tonga_smu_start_in_non_protection_mode(adev);
727 if (result)
728 return result;
729 } else {
730 result = tonga_smu_start_in_protection_mode(adev);
731 if (result)
732 return result;
733 }
734 }
735
736 return tonga_smu_request_load_fw(adev);
737}
738
739static const struct amdgpu_smumgr_funcs tonga_smumgr_funcs = {
740 .check_fw_load_finish = tonga_smu_check_fw_load_finish,
741 .request_smu_load_fw = NULL,
742 .request_smu_specific_fw = NULL,
743};
744
745int tonga_smu_init(struct amdgpu_device *adev)
746{
747 struct tonga_smu_private_data *private;
748 uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
749 uint32_t smu_internal_buffer_size = 200*4096;
750 struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
751 struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
752 uint64_t mc_addr;
753 void *toc_buf_ptr;
754 void *smu_buf_ptr;
755 int ret;
756
757 private = kzalloc(sizeof(struct tonga_smu_private_data), GFP_KERNEL);
758 if (NULL == private)
759 return -ENOMEM;
760
761 /* allocate firmware buffers */
762 if (adev->firmware.smu_load)
763 amdgpu_ucode_init_bo(adev);
764
765 adev->smu.priv = private;
766 adev->smu.fw_flags = 0;
767
768 /* Allocate FW image data structure and header buffer */
769 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
770 true, AMDGPU_GEM_DOMAIN_VRAM,
771 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
772 NULL, NULL, toc_buf);
773 if (ret) {
774 DRM_ERROR("Failed to allocate memory for TOC buffer\n");
775 return -ENOMEM;
776 }
777
778 /* Allocate buffer for SMU internal buffer */
779 ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
780 true, AMDGPU_GEM_DOMAIN_VRAM,
781 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
782 NULL, NULL, smu_buf);
783 if (ret) {
784 DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
785 return -ENOMEM;
786 }
787
788 /* Retrieve GPU address for header buffer and internal buffer */
789 ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
790 if (ret) {
791 amdgpu_bo_unref(&adev->smu.toc_buf);
792 DRM_ERROR("Failed to reserve the TOC buffer\n");
793 return -EINVAL;
794 }
795
796 ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
797 if (ret) {
798 amdgpu_bo_unreserve(adev->smu.toc_buf);
799 amdgpu_bo_unref(&adev->smu.toc_buf);
800 DRM_ERROR("Failed to pin the TOC buffer\n");
801 return -EINVAL;
802 }
803
804 ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
805 if (ret) {
806 amdgpu_bo_unreserve(adev->smu.toc_buf);
807 amdgpu_bo_unref(&adev->smu.toc_buf);
808 DRM_ERROR("Failed to map the TOC buffer\n");
809 return -EINVAL;
810 }
811
812 amdgpu_bo_unreserve(adev->smu.toc_buf);
813 private->header_addr_low = lower_32_bits(mc_addr);
814 private->header_addr_high = upper_32_bits(mc_addr);
815 private->header = toc_buf_ptr;
816
817 ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
818 if (ret) {
819 amdgpu_bo_unref(&adev->smu.smu_buf);
820 amdgpu_bo_unref(&adev->smu.toc_buf);
821 DRM_ERROR("Failed to reserve the SMU internal buffer\n");
822 return -EINVAL;
823 }
824
825 ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
826 if (ret) {
827 amdgpu_bo_unreserve(adev->smu.smu_buf);
828 amdgpu_bo_unref(&adev->smu.smu_buf);
829 amdgpu_bo_unref(&adev->smu.toc_buf);
830 DRM_ERROR("Failed to pin the SMU internal buffer\n");
831 return -EINVAL;
832 }
833
834 ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
835 if (ret) {
836 amdgpu_bo_unreserve(adev->smu.smu_buf);
837 amdgpu_bo_unref(&adev->smu.smu_buf);
838 amdgpu_bo_unref(&adev->smu.toc_buf);
839 DRM_ERROR("Failed to map the SMU internal buffer\n");
840 return -EINVAL;
841 }
842
843 amdgpu_bo_unreserve(adev->smu.smu_buf);
844 private->smu_buffer_addr_low = lower_32_bits(mc_addr);
845 private->smu_buffer_addr_high = upper_32_bits(mc_addr);
846
847 adev->smu.smumgr_funcs = &tonga_smumgr_funcs;
848
849 return 0;
850}
851
852int tonga_smu_fini(struct amdgpu_device *adev)
853{
854 amdgpu_bo_unref(&adev->smu.toc_buf);
855 amdgpu_bo_unref(&adev->smu.smu_buf);
856 kfree(adev->smu.priv);
857 adev->smu.priv = NULL;
858 if (adev->firmware.fw_buf)
859 amdgpu_ucode_fini_bo(adev);
860
861 return 0;
862}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index b688e2f77419..c0d9aad7126f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -79,6 +79,9 @@
79#endif 79#endif
80#include "dce_virtual.h" 80#include "dce_virtual.h"
81 81
82MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
83MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
84MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
82MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); 85MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
83MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); 86MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
84MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); 87MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
@@ -445,18 +448,21 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
445 return true; 448 return true;
446} 449}
447 450
448static u32 vi_get_virtual_caps(struct amdgpu_device *adev) 451static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
449{ 452{
450 u32 caps = 0; 453 uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
451 u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); 454 /* bit0: 0 means pf and 1 means vf */
455 /* bit31: 0 means disable IOV and 1 means enable */
456 if (reg & 1)
457 adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_IS_VF;
452 458
453 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) 459 if (reg & 0x80000000)
454 caps |= AMDGPU_VIRT_CAPS_SRIOV_EN; 460 adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
455 461
456 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) 462 if (reg == 0) {
457 caps |= AMDGPU_VIRT_CAPS_IS_VF; 463 if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
458 464 adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
459 return caps; 465 }
460} 466}
461 467
462static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { 468static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
@@ -1521,13 +1527,13 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
1521{ 1527{
1522 .read_disabled_bios = &vi_read_disabled_bios, 1528 .read_disabled_bios = &vi_read_disabled_bios,
1523 .read_bios_from_rom = &vi_read_bios_from_rom, 1529 .read_bios_from_rom = &vi_read_bios_from_rom,
1530 .detect_hw_virtualization = vi_detect_hw_virtualization,
1524 .read_register = &vi_read_register, 1531 .read_register = &vi_read_register,
1525 .reset = &vi_asic_reset, 1532 .reset = &vi_asic_reset,
1526 .set_vga_state = &vi_vga_set_state, 1533 .set_vga_state = &vi_vga_set_state,
1527 .get_xclk = &vi_get_xclk, 1534 .get_xclk = &vi_get_xclk,
1528 .set_uvd_clocks = &vi_set_uvd_clocks, 1535 .set_uvd_clocks = &vi_set_uvd_clocks,
1529 .set_vce_clocks = &vi_set_vce_clocks, 1536 .set_vce_clocks = &vi_set_vce_clocks,
1530 .get_virtual_caps = &vi_get_virtual_caps,
1531}; 1537};
1532 1538
1533static int vi_common_early_init(void *handle) 1539static int vi_common_early_init(void *handle)
@@ -1657,6 +1663,10 @@ static int vi_common_early_init(void *handle)
1657 return -EINVAL; 1663 return -EINVAL;
1658 } 1664 }
1659 1665
1666 /* in early init stage, vbios code won't work */
1667 if (adev->asic_funcs->detect_hw_virtualization)
1668 amdgpu_asic_detect_hw_virtualization(adev);
1669
1660 if (amdgpu_smc_load_fw && smc_enabled) 1670 if (amdgpu_smc_load_fw && smc_enabled)
1661 adev->firmware.smu_load = true; 1671 adev->firmware.smu_load = true;
1662 1672
@@ -1800,6 +1810,63 @@ static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1800 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); 1810 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
1801} 1811}
1802 1812
1813static int vi_common_set_clockgating_state_by_smu(void *handle,
1814 enum amd_clockgating_state state)
1815{
1816 uint32_t msg_id, pp_state;
1817 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1818 void *pp_handle = adev->powerplay.pp_handle;
1819
1820 if (state == AMD_CG_STATE_UNGATE)
1821 pp_state = 0;
1822 else
1823 pp_state = PP_STATE_CG | PP_STATE_LS;
1824
1825 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1826 PP_BLOCK_SYS_MC,
1827 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
1828 pp_state);
1829 amd_set_clockgating_by_smu(pp_handle, msg_id);
1830
1831 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1832 PP_BLOCK_SYS_SDMA,
1833 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
1834 pp_state);
1835 amd_set_clockgating_by_smu(pp_handle, msg_id);
1836
1837 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1838 PP_BLOCK_SYS_HDP,
1839 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
1840 pp_state);
1841 amd_set_clockgating_by_smu(pp_handle, msg_id);
1842
1843 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1844 PP_BLOCK_SYS_BIF,
1845 PP_STATE_SUPPORT_LS,
1846 pp_state);
1847 amd_set_clockgating_by_smu(pp_handle, msg_id);
1848
1849 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1850 PP_BLOCK_SYS_BIF,
1851 PP_STATE_SUPPORT_CG,
1852 pp_state);
1853 amd_set_clockgating_by_smu(pp_handle, msg_id);
1854
1855 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1856 PP_BLOCK_SYS_DRM,
1857 PP_STATE_SUPPORT_LS,
1858 pp_state);
1859 amd_set_clockgating_by_smu(pp_handle, msg_id);
1860
1861 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1862 PP_BLOCK_SYS_ROM,
1863 PP_STATE_SUPPORT_CG,
1864 pp_state);
1865 amd_set_clockgating_by_smu(pp_handle, msg_id);
1866
1867 return 0;
1868}
1869
1803static int vi_common_set_clockgating_state(void *handle, 1870static int vi_common_set_clockgating_state(void *handle,
1804 enum amd_clockgating_state state) 1871 enum amd_clockgating_state state)
1805{ 1872{
@@ -1825,6 +1892,10 @@ static int vi_common_set_clockgating_state(void *handle,
1825 vi_update_hdp_light_sleep(adev, 1892 vi_update_hdp_light_sleep(adev,
1826 state == AMD_CG_STATE_GATE ? true : false); 1893 state == AMD_CG_STATE_GATE ? true : false);
1827 break; 1894 break;
1895 case CHIP_TONGA:
1896 case CHIP_POLARIS10:
1897 case CHIP_POLARIS11:
1898 vi_common_set_clockgating_state_by_smu(adev, state);
1828 default: 1899 default:
1829 break; 1900 break;
1830 } 1901 }
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index f62b261660d4..11746f22d0c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -373,4 +373,41 @@
373#define VCE_CMD_WAIT_GE 0x00000106 373#define VCE_CMD_WAIT_GE 0x00000106
374#define VCE_CMD_UPDATE_PTB 0x00000107 374#define VCE_CMD_UPDATE_PTB 0x00000107
375#define VCE_CMD_FLUSH_TLB 0x00000108 375#define VCE_CMD_FLUSH_TLB 0x00000108
376
377/* mmPA_SC_RASTER_CONFIG mask */
378#define RB_MAP_PKR0(x) ((x) << 0)
379#define RB_MAP_PKR0_MASK (0x3 << 0)
380#define RB_MAP_PKR1(x) ((x) << 2)
381#define RB_MAP_PKR1_MASK (0x3 << 2)
382#define RB_XSEL2(x) ((x) << 4)
383#define RB_XSEL2_MASK (0x3 << 4)
384#define RB_XSEL (1 << 6)
385#define RB_YSEL (1 << 7)
386#define PKR_MAP(x) ((x) << 8)
387#define PKR_MAP_MASK (0x3 << 8)
388#define PKR_XSEL(x) ((x) << 10)
389#define PKR_XSEL_MASK (0x3 << 10)
390#define PKR_YSEL(x) ((x) << 12)
391#define PKR_YSEL_MASK (0x3 << 12)
392#define SC_MAP(x) ((x) << 16)
393#define SC_MAP_MASK (0x3 << 16)
394#define SC_XSEL(x) ((x) << 18)
395#define SC_XSEL_MASK (0x3 << 18)
396#define SC_YSEL(x) ((x) << 20)
397#define SC_YSEL_MASK (0x3 << 20)
398#define SE_MAP(x) ((x) << 24)
399#define SE_MAP_MASK (0x3 << 24)
400#define SE_XSEL(x) ((x) << 26)
401#define SE_XSEL_MASK (0x3 << 26)
402#define SE_YSEL(x) ((x) << 28)
403#define SE_YSEL_MASK (0x3 << 28)
404
405/* mmPA_SC_RASTER_CONFIG_1 mask */
406#define SE_PAIR_MAP(x) ((x) << 0)
407#define SE_PAIR_MAP_MASK (0x3 << 0)
408#define SE_PAIR_XSEL(x) ((x) << 2)
409#define SE_PAIR_XSEL_MASK (0x3 << 2)
410#define SE_PAIR_YSEL(x) ((x) << 4)
411#define SE_PAIR_YSEL_MASK (0x3 << 4)
412
376#endif 413#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/sid.h b/drivers/gpu/drm/amd/include/asic_reg/si/sid.h
index 8c5608a4d526..c57eff159374 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/si/sid.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/si/sid.h
@@ -1398,10 +1398,45 @@
1398#define DB_DEPTH_INFO 0xA00F 1398#define DB_DEPTH_INFO 0xA00F
1399 1399
1400#define PA_SC_RASTER_CONFIG 0xA0D4 1400#define PA_SC_RASTER_CONFIG 0xA0D4
1401# define RB_MAP_PKR0(x) ((x) << 0)
1402# define RB_MAP_PKR0_MASK (0x3 << 0)
1403# define RB_MAP_PKR1(x) ((x) << 2)
1404# define RB_MAP_PKR1_MASK (0x3 << 2)
1401# define RASTER_CONFIG_RB_MAP_0 0 1405# define RASTER_CONFIG_RB_MAP_0 0
1402# define RASTER_CONFIG_RB_MAP_1 1 1406# define RASTER_CONFIG_RB_MAP_1 1
1403# define RASTER_CONFIG_RB_MAP_2 2 1407# define RASTER_CONFIG_RB_MAP_2 2
1404# define RASTER_CONFIG_RB_MAP_3 3 1408# define RASTER_CONFIG_RB_MAP_3 3
1409# define RB_XSEL2(x) ((x) << 4)
1410# define RB_XSEL2_MASK (0x3 << 4)
1411# define RB_XSEL (1 << 6)
1412# define RB_YSEL (1 << 7)
1413# define PKR_MAP(x) ((x) << 8)
1414# define PKR_MAP_MASK (0x3 << 8)
1415# define RASTER_CONFIG_PKR_MAP_0 0
1416# define RASTER_CONFIG_PKR_MAP_1 1
1417# define RASTER_CONFIG_PKR_MAP_2 2
1418# define RASTER_CONFIG_PKR_MAP_3 3
1419# define PKR_XSEL(x) ((x) << 10)
1420# define PKR_XSEL_MASK (0x3 << 10)
1421# define PKR_YSEL(x) ((x) << 12)
1422# define PKR_YSEL_MASK (0x3 << 12)
1423# define SC_MAP(x) ((x) << 16)
1424# define SC_MAP_MASK (0x3 << 16)
1425# define SC_XSEL(x) ((x) << 18)
1426# define SC_XSEL_MASK (0x3 << 18)
1427# define SC_YSEL(x) ((x) << 20)
1428# define SC_YSEL_MASK (0x3 << 20)
1429# define SE_MAP(x) ((x) << 24)
1430# define SE_MAP_MASK (0x3 << 24)
1431# define RASTER_CONFIG_SE_MAP_0 0
1432# define RASTER_CONFIG_SE_MAP_1 1
1433# define RASTER_CONFIG_SE_MAP_2 2
1434# define RASTER_CONFIG_SE_MAP_3 3
1435# define SE_XSEL(x) ((x) << 26)
1436# define SE_XSEL_MASK (0x3 << 26)
1437# define SE_YSEL(x) ((x) << 28)
1438# define SE_YSEL_MASK (0x3 << 28)
1439
1405 1440
1406#define VGT_EVENT_INITIATOR 0xA2A4 1441#define VGT_EVENT_INITIATOR 0xA2A4
1407# define SAMPLE_STREAMOUTSTATS1 (1 << 0) 1442# define SAMPLE_STREAMOUTSTATS1 (1 << 0)
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index 6aa8938fd826..df7c18b6a02a 100644..100755
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -161,6 +161,7 @@ struct cgs_clock_limits {
161 */ 161 */
162struct cgs_firmware_info { 162struct cgs_firmware_info {
163 uint16_t version; 163 uint16_t version;
164 uint16_t fw_version;
164 uint16_t feature_version; 165 uint16_t feature_version;
165 uint32_t image_size; 166 uint32_t image_size;
166 uint64_t mc_addr; 167 uint64_t mc_addr;
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index b1d19409bf86..7174f7a68266 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -191,11 +191,9 @@ static int pp_sw_reset(void *handle)
191} 191}
192 192
193 193
194static int pp_set_clockgating_state(void *handle, 194int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id)
195 enum amd_clockgating_state state)
196{ 195{
197 struct pp_hwmgr *hwmgr; 196 struct pp_hwmgr *hwmgr;
198 uint32_t msg_id, pp_state;
199 197
200 if (handle == NULL) 198 if (handle == NULL)
201 return -EINVAL; 199 return -EINVAL;
@@ -209,76 +207,7 @@ static int pp_set_clockgating_state(void *handle,
209 return 0; 207 return 0;
210 } 208 }
211 209
212 if (state == AMD_CG_STATE_UNGATE) 210 return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
213 pp_state = 0;
214 else
215 pp_state = PP_STATE_CG | PP_STATE_LS;
216
217 /* Enable/disable GFX blocks clock gating through SMU */
218 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
219 PP_BLOCK_GFX_CG,
220 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
221 pp_state);
222 hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
223 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
224 PP_BLOCK_GFX_3D,
225 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
226 pp_state);
227 hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
228 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
229 PP_BLOCK_GFX_RLC,
230 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
231 pp_state);
232 hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
233 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
234 PP_BLOCK_GFX_CP,
235 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
236 pp_state);
237 hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
238 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
239 PP_BLOCK_GFX_MG,
240 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
241 pp_state);
242 hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
243
244 /* Enable/disable System blocks clock gating through SMU */
245 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
246 PP_BLOCK_SYS_BIF,
247 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
248 pp_state);
249 hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
250 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
251 PP_BLOCK_SYS_BIF,
252 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
253 pp_state);
254 hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
255 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
256 PP_BLOCK_SYS_MC,
257 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
258 pp_state);
259 hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
260 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
261 PP_BLOCK_SYS_ROM,
262 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
263 pp_state);
264 hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
265 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
266 PP_BLOCK_SYS_DRM,
267 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
268 pp_state);
269 hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
270 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
271 PP_BLOCK_SYS_HDP,
272 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
273 pp_state);
274 hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
275 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
276 PP_BLOCK_SYS_SDMA,
277 PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
278 pp_state);
279 hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
280
281 return 0;
282} 211}
283 212
284static int pp_set_powergating_state(void *handle, 213static int pp_set_powergating_state(void *handle,
@@ -362,7 +291,7 @@ const struct amd_ip_funcs pp_ip_funcs = {
362 .is_idle = pp_is_idle, 291 .is_idle = pp_is_idle,
363 .wait_for_idle = pp_wait_for_idle, 292 .wait_for_idle = pp_wait_for_idle,
364 .soft_reset = pp_sw_reset, 293 .soft_reset = pp_sw_reset,
365 .set_clockgating_state = pp_set_clockgating_state, 294 .set_clockgating_state = NULL,
366 .set_powergating_state = pp_set_powergating_state, 295 .set_powergating_state = pp_set_powergating_state,
367}; 296};
368 297
@@ -576,28 +505,6 @@ enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
576 } 505 }
577} 506}
578 507
579static void
580pp_debugfs_print_current_performance_level(void *handle,
581 struct seq_file *m)
582{
583 struct pp_hwmgr *hwmgr;
584
585 if (handle == NULL)
586 return;
587
588 hwmgr = ((struct pp_instance *)handle)->hwmgr;
589
590 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL)
591 return;
592
593 if (hwmgr->hwmgr_func->print_current_perforce_level == NULL) {
594 printk(KERN_INFO "%s was not implemented.\n", __func__);
595 return;
596 }
597
598 hwmgr->hwmgr_func->print_current_perforce_level(hwmgr, m);
599}
600
601static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) 508static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
602{ 509{
603 struct pp_hwmgr *hwmgr; 510 struct pp_hwmgr *hwmgr;
@@ -894,6 +801,25 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
894 return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value); 801 return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
895} 802}
896 803
804static int pp_dpm_read_sensor(void *handle, int idx, int32_t *value)
805{
806 struct pp_hwmgr *hwmgr;
807
808 if (!handle)
809 return -EINVAL;
810
811 hwmgr = ((struct pp_instance *)handle)->hwmgr;
812
813 PP_CHECK_HW(hwmgr);
814
815 if (hwmgr->hwmgr_func->read_sensor == NULL) {
816 printk(KERN_INFO "%s was not implemented.\n", __func__);
817 return 0;
818 }
819
820 return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value);
821}
822
897const struct amd_powerplay_funcs pp_dpm_funcs = { 823const struct amd_powerplay_funcs pp_dpm_funcs = {
898 .get_temperature = pp_dpm_get_temperature, 824 .get_temperature = pp_dpm_get_temperature,
899 .load_firmware = pp_dpm_load_fw, 825 .load_firmware = pp_dpm_load_fw,
@@ -906,7 +832,6 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
906 .powergate_vce = pp_dpm_powergate_vce, 832 .powergate_vce = pp_dpm_powergate_vce,
907 .powergate_uvd = pp_dpm_powergate_uvd, 833 .powergate_uvd = pp_dpm_powergate_uvd,
908 .dispatch_tasks = pp_dpm_dispatch_tasks, 834 .dispatch_tasks = pp_dpm_dispatch_tasks,
909 .print_current_performance_level = pp_debugfs_print_current_performance_level,
910 .set_fan_control_mode = pp_dpm_set_fan_control_mode, 835 .set_fan_control_mode = pp_dpm_set_fan_control_mode,
911 .get_fan_control_mode = pp_dpm_get_fan_control_mode, 836 .get_fan_control_mode = pp_dpm_get_fan_control_mode,
912 .set_fan_speed_percent = pp_dpm_set_fan_speed_percent, 837 .set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
@@ -920,6 +845,7 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
920 .set_sclk_od = pp_dpm_set_sclk_od, 845 .set_sclk_od = pp_dpm_set_sclk_od,
921 .get_mclk_od = pp_dpm_get_mclk_od, 846 .get_mclk_od = pp_dpm_get_mclk_od,
922 .set_mclk_od = pp_dpm_set_mclk_od, 847 .set_mclk_od = pp_dpm_set_mclk_od,
848 .read_sensor = pp_dpm_read_sensor,
923}; 849};
924 850
925static int amd_pp_instance_init(struct amd_pp_init *pp_init, 851static int amd_pp_instance_init(struct amd_pp_init *pp_init,
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 635fc4b48184..92b117843875 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -262,6 +262,8 @@ static const pem_event_action * const display_config_change_event[] = {
262 unblock_adjust_power_state_tasks, 262 unblock_adjust_power_state_tasks,
263 set_cpu_power_state, 263 set_cpu_power_state,
264 notify_hw_power_source_tasks, 264 notify_hw_power_source_tasks,
265 get_2d_performance_state_tasks,
266 set_performance_state_tasks,
265 /* updateDALConfigurationTasks, 267 /* updateDALConfigurationTasks,
266 variBrightDisplayConfigurationChangeTasks, */ 268 variBrightDisplayConfigurationChangeTasks, */
267 adjust_power_state_tasks, 269 adjust_power_state_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
index 1d1875a7cb2d..489908887e9c 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
@@ -101,11 +101,12 @@ int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip)
101 if (requested == NULL) 101 if (requested == NULL)
102 return 0; 102 return 0;
103 103
104 phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
105
104 if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr, &pcurrent->hardware, &requested->hardware, &equal))) 106 if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr, &pcurrent->hardware, &requested->hardware, &equal)))
105 equal = false; 107 equal = false;
106 108
107 if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) { 109 if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) {
108 phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
109 phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware); 110 phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware);
110 memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size); 111 memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size);
111 } 112 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index 6e359c90dfda..5fff1d636ab7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -3,16 +3,12 @@
3# It provides the hardware management services for the driver. 3# It provides the hardware management services for the driver.
4 4
5HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \ 5HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \
6 hardwaremanager.o pp_acpi.o cz_hwmgr.o \ 6 hardwaremanager.o pp_acpi.o cz_hwmgr.o \
7 cz_clockpowergating.o tonga_powertune.o\ 7 cz_clockpowergating.o pppcielanes.o\
8 process_pptables_v1_0.o ppatomctrl.o \ 8 process_pptables_v1_0.o ppatomctrl.o \
9 tonga_hwmgr.o pppcielanes.o tonga_thermal.o\ 9 smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \
10 fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \ 10 smu7_clockpowergating.o
11 fiji_clockpowergating.o fiji_thermal.o \ 11
12 polaris10_hwmgr.o polaris10_powertune.o polaris10_thermal.o \
13 polaris10_clockpowergating.o iceland_hwmgr.o \
14 iceland_clockpowergating.o iceland_thermal.o \
15 iceland_powertune.o
16 12
17AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR)) 13AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
18 14
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 5ecef1732e20..7e4fcbbbe086 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1538,78 +1538,6 @@ int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
1538 return sizeof(struct cz_power_state); 1538 return sizeof(struct cz_power_state);
1539} 1539}
1540 1540
1541static void
1542cz_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
1543{
1544 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1545
1546 struct phm_clock_voltage_dependency_table *table =
1547 hwmgr->dyn_state.vddc_dependency_on_sclk;
1548
1549 struct phm_vce_clock_voltage_dependency_table *vce_table =
1550 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1551
1552 struct phm_uvd_clock_voltage_dependency_table *uvd_table =
1553 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1554
1555 uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
1556 TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
1557 uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
1558 TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
1559 uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
1560 TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
1561
1562 uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent;
1563 uint16_t vddnb, vddgfx;
1564 int result;
1565
1566 if (sclk_index >= NUM_SCLK_LEVELS) {
1567 seq_printf(m, "\n invalid sclk dpm profile %d\n", sclk_index);
1568 } else {
1569 sclk = table->entries[sclk_index].clk;
1570 seq_printf(m, "\n index: %u sclk: %u MHz\n", sclk_index, sclk/100);
1571 }
1572
1573 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
1574 CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
1575 vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp);
1576 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
1577 CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
1578 vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
1579 seq_printf(m, "\n vddnb: %u vddgfx: %u\n", vddnb, vddgfx);
1580
1581 seq_printf(m, "\n uvd %sabled\n", cz_hwmgr->uvd_power_gated ? "dis" : "en");
1582 if (!cz_hwmgr->uvd_power_gated) {
1583 if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
1584 seq_printf(m, "\n invalid uvd dpm level %d\n", uvd_index);
1585 } else {
1586 vclk = uvd_table->entries[uvd_index].vclk;
1587 dclk = uvd_table->entries[uvd_index].dclk;
1588 seq_printf(m, "\n index: %u uvd vclk: %u MHz dclk: %u MHz\n", uvd_index, vclk/100, dclk/100);
1589 }
1590 }
1591
1592 seq_printf(m, "\n vce %sabled\n", cz_hwmgr->vce_power_gated ? "dis" : "en");
1593 if (!cz_hwmgr->vce_power_gated) {
1594 if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
1595 seq_printf(m, "\n invalid vce dpm level %d\n", vce_index);
1596 } else {
1597 ecclk = vce_table->entries[vce_index].ecclk;
1598 seq_printf(m, "\n index: %u vce ecclk: %u MHz\n", vce_index, ecclk/100);
1599 }
1600 }
1601
1602 result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity);
1603 if (0 == result) {
1604 activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
1605 activity_percent = activity_percent > 100 ? 100 : activity_percent;
1606 } else {
1607 activity_percent = 50;
1608 }
1609
1610 seq_printf(m, "\n [GPU load]: %u %%\n\n", activity_percent);
1611}
1612
1613static void cz_hw_print_display_cfg( 1541static void cz_hw_print_display_cfg(
1614 const struct cc6_settings *cc6_settings) 1542 const struct cc6_settings *cc6_settings)
1615{ 1543{
@@ -1857,6 +1785,107 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c
1857 return 0; 1785 return 0;
1858} 1786}
1859 1787
1788static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
1789{
1790 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1791
1792 struct phm_clock_voltage_dependency_table *table =
1793 hwmgr->dyn_state.vddc_dependency_on_sclk;
1794
1795 struct phm_vce_clock_voltage_dependency_table *vce_table =
1796 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1797
1798 struct phm_uvd_clock_voltage_dependency_table *uvd_table =
1799 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1800
1801 uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
1802 TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
1803 uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
1804 TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
1805 uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
1806 TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
1807
1808 uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent;
1809 uint16_t vddnb, vddgfx;
1810 int result;
1811
1812 switch (idx) {
1813 case AMDGPU_PP_SENSOR_GFX_SCLK:
1814 if (sclk_index < NUM_SCLK_LEVELS) {
1815 sclk = table->entries[sclk_index].clk;
1816 *value = sclk;
1817 return 0;
1818 }
1819 return -EINVAL;
1820 case AMDGPU_PP_SENSOR_VDDNB:
1821 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
1822 CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
1823 vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp);
1824 *value = vddnb;
1825 return 0;
1826 case AMDGPU_PP_SENSOR_VDDGFX:
1827 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
1828 CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
1829 vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
1830 *value = vddgfx;
1831 return 0;
1832 case AMDGPU_PP_SENSOR_UVD_VCLK:
1833 if (!cz_hwmgr->uvd_power_gated) {
1834 if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
1835 return -EINVAL;
1836 } else {
1837 vclk = uvd_table->entries[uvd_index].vclk;
1838 *value = vclk;
1839 return 0;
1840 }
1841 }
1842 *value = 0;
1843 return 0;
1844 case AMDGPU_PP_SENSOR_UVD_DCLK:
1845 if (!cz_hwmgr->uvd_power_gated) {
1846 if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
1847 return -EINVAL;
1848 } else {
1849 dclk = uvd_table->entries[uvd_index].dclk;
1850 *value = dclk;
1851 return 0;
1852 }
1853 }
1854 *value = 0;
1855 return 0;
1856 case AMDGPU_PP_SENSOR_VCE_ECCLK:
1857 if (!cz_hwmgr->vce_power_gated) {
1858 if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
1859 return -EINVAL;
1860 } else {
1861 ecclk = vce_table->entries[vce_index].ecclk;
1862 *value = ecclk;
1863 return 0;
1864 }
1865 }
1866 *value = 0;
1867 return 0;
1868 case AMDGPU_PP_SENSOR_GPU_LOAD:
1869 result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity);
1870 if (0 == result) {
1871 activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
1872 activity_percent = activity_percent > 100 ? 100 : activity_percent;
1873 } else {
1874 activity_percent = 50;
1875 }
1876 *value = activity_percent;
1877 return 0;
1878 case AMDGPU_PP_SENSOR_UVD_POWER:
1879 *value = cz_hwmgr->uvd_power_gated ? 0 : 1;
1880 return 0;
1881 case AMDGPU_PP_SENSOR_VCE_POWER:
1882 *value = cz_hwmgr->vce_power_gated ? 0 : 1;
1883 return 0;
1884 default:
1885 return -EINVAL;
1886 }
1887}
1888
1860static const struct pp_hwmgr_func cz_hwmgr_funcs = { 1889static const struct pp_hwmgr_func cz_hwmgr_funcs = {
1861 .backend_init = cz_hwmgr_backend_init, 1890 .backend_init = cz_hwmgr_backend_init,
1862 .backend_fini = cz_hwmgr_backend_fini, 1891 .backend_fini = cz_hwmgr_backend_fini,
@@ -1872,7 +1901,6 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
1872 .patch_boot_state = cz_dpm_patch_boot_state, 1901 .patch_boot_state = cz_dpm_patch_boot_state,
1873 .get_pp_table_entry = cz_dpm_get_pp_table_entry, 1902 .get_pp_table_entry = cz_dpm_get_pp_table_entry,
1874 .get_num_of_pp_table_entries = cz_dpm_get_num_of_pp_table_entries, 1903 .get_num_of_pp_table_entries = cz_dpm_get_num_of_pp_table_entries,
1875 .print_current_perforce_level = cz_print_current_perforce_level,
1876 .set_cpu_power_state = cz_set_cpu_power_state, 1904 .set_cpu_power_state = cz_set_cpu_power_state,
1877 .store_cc6_data = cz_store_cc6_data, 1905 .store_cc6_data = cz_store_cc6_data,
1878 .force_clock_level = cz_force_clock_level, 1906 .force_clock_level = cz_force_clock_level,
@@ -1882,6 +1910,7 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
1882 .get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks, 1910 .get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks,
1883 .get_clock_by_type = cz_get_clock_by_type, 1911 .get_clock_by_type = cz_get_clock_by_type,
1884 .get_max_high_clocks = cz_get_max_high_clocks, 1912 .get_max_high_clocks = cz_get_max_high_clocks,
1913 .read_sensor = cz_read_sensor,
1885}; 1914};
1886 1915
1887int cz_hwmgr_init(struct pp_hwmgr *hwmgr) 1916int cz_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
deleted file mode 100644
index 5afe82068b29..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
+++ /dev/null
@@ -1,121 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "hwmgr.h"
25#include "fiji_clockpowergating.h"
26#include "fiji_ppsmc.h"
27#include "fiji_hwmgr.h"
28
29int fiji_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
30{
31 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
32
33 data->uvd_power_gated = false;
34 data->vce_power_gated = false;
35 data->samu_power_gated = false;
36 data->acp_power_gated = false;
37
38 return 0;
39}
40
41int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
42{
43 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
44
45 if (data->uvd_power_gated == bgate)
46 return 0;
47
48 data->uvd_power_gated = bgate;
49
50 if (bgate) {
51 cgs_set_clockgating_state(hwmgr->device,
52 AMD_IP_BLOCK_TYPE_UVD,
53 AMD_CG_STATE_GATE);
54 fiji_update_uvd_dpm(hwmgr, true);
55 } else {
56 fiji_update_uvd_dpm(hwmgr, false);
57 cgs_set_clockgating_state(hwmgr->device,
58 AMD_IP_BLOCK_TYPE_UVD,
59 AMD_CG_STATE_UNGATE);
60 }
61
62 return 0;
63}
64
65int fiji_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
66{
67 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
68 struct phm_set_power_state_input states;
69 const struct pp_power_state *pcurrent;
70 struct pp_power_state *requested;
71
72 if (data->vce_power_gated == bgate)
73 return 0;
74
75 data->vce_power_gated = bgate;
76
77 pcurrent = hwmgr->current_ps;
78 requested = hwmgr->request_ps;
79
80 states.pcurrent_state = &(pcurrent->hardware);
81 states.pnew_state = &(requested->hardware);
82
83 fiji_update_vce_dpm(hwmgr, &states);
84 fiji_enable_disable_vce_dpm(hwmgr, !bgate);
85
86 return 0;
87}
88
89int fiji_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
90{
91 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
92
93 if (data->samu_power_gated == bgate)
94 return 0;
95
96 data->samu_power_gated = bgate;
97
98 if (bgate)
99 fiji_update_samu_dpm(hwmgr, true);
100 else
101 fiji_update_samu_dpm(hwmgr, false);
102
103 return 0;
104}
105
106int fiji_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate)
107{
108 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
109
110 if (data->acp_power_gated == bgate)
111 return 0;
112
113 data->acp_power_gated = bgate;
114
115 if (bgate)
116 fiji_update_acp_dpm(hwmgr, true);
117 else
118 fiji_update_acp_dpm(hwmgr, false);
119
120 return 0;
121}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h
deleted file mode 100644
index 32d43e8fecb2..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h
+++ /dev/null
@@ -1,105 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef FIJI_DYN_DEFAULTS_H
25#define FIJI_DYN_DEFAULTS_H
26
27/** \file
28* Volcanic Islands Dynamic default parameters.
29*/
30
31enum FIJIdpm_TrendDetection
32{
33 FIJIAdpm_TrendDetection_AUTO,
34 FIJIAdpm_TrendDetection_UP,
35 FIJIAdpm_TrendDetection_DOWN
36};
37typedef enum FIJIdpm_TrendDetection FIJIdpm_TrendDetection;
38
39/* We need to fill in the default values!!!!!!!!!!!!!!!!!!!!!!! */
40
41/* Bit vector representing same fields as hardware register. */
42#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 /* CP_Gfx_busy ????
43 * HDP_busy
44 * IH_busy
45 * UVD_busy
46 * VCE_busy
47 * ACP_busy
48 * SAMU_busy
49 * SDMA enabled */
50#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 /* FE_Gfx_busy - Intended for primary usage. Rest are for flexibility. ????
51 * SH_Gfx_busy
52 * RB_Gfx_busy
53 * VCE_busy */
54
55#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 /* SH_Gfx_busy - Intended for primary usage. Rest are for flexibility.
56 * FE_Gfx_busy
57 * RB_Gfx_busy
58 * ACP_busy */
59
60#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 /* RB_Gfx_busy - Intended for primary usage. Rest are for flexibility.
61 * FE_Gfx_busy
62 * SH_Gfx_busy
63 * UVD_busy */
64
65#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 /* UVD_busy
66 * VCE_busy
67 * ACP_busy
68 * SAMU_busy */
69
70#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 /* GFX, HDP */
71#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 /* GFX, HDP */
72#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 /* GFX, HDP */
73
74
75/* thermal protection counter (units). */
76#define PPFIJI_THERMALPROTECTCOUNTER_DFLT 0x200 /* ~19us */
77
78/* static screen threshold unit */
79#define PPFIJI_STATICSCREENTHRESHOLDUNIT_DFLT 0
80
81/* static screen threshold */
82#define PPFIJI_STATICSCREENTHRESHOLD_DFLT 0x00C8
83
84/* gfx idle clock stop threshold */
85#define PPFIJI_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 /* ~19us with static screen threshold unit of 0 */
86
87/* Fixed reference divider to use when building baby stepping tables. */
88#define PPFIJI_REFERENCEDIVIDER_DFLT 4
89
90/* ULV voltage change delay time
91 * Used to be delay_vreg in N.I. split for S.I.
92 * Using N.I. delay_vreg value as default
93 * ReferenceClock = 2700
94 * VoltageResponseTime = 1000
95 * VDDCDelayTime = (VoltageResponseTime * ReferenceClock) / 1600 = 1687
96 */
97#define PPFIJI_ULVVOLTAGECHANGEDELAY_DFLT 1687
98
99#define PPFIJI_CGULVPARAMETER_DFLT 0x00040035
100#define PPFIJI_CGULVCONTROL_DFLT 0x00007450
101#define PPFIJI_TARGETACTIVITY_DFLT 30 /* 30%*/
102#define PPFIJI_MCLK_TARGETACTIVITY_DFLT 10 /* 10% */
103
104#endif
105
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
deleted file mode 100644
index 74300d6ef686..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ /dev/null
@@ -1,5600 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/module.h>
24#include <linux/slab.h>
25#include "linux/delay.h"
26
27#include "hwmgr.h"
28#include "fiji_smumgr.h"
29#include "atombios.h"
30#include "hardwaremanager.h"
31#include "ppatomctrl.h"
32#include "atombios.h"
33#include "cgs_common.h"
34#include "fiji_dyn_defaults.h"
35#include "fiji_powertune.h"
36#include "smu73.h"
37#include "smu/smu_7_1_3_d.h"
38#include "smu/smu_7_1_3_sh_mask.h"
39#include "gmc/gmc_8_1_d.h"
40#include "gmc/gmc_8_1_sh_mask.h"
41#include "bif/bif_5_0_d.h"
42#include "bif/bif_5_0_sh_mask.h"
43#include "dce/dce_10_0_d.h"
44#include "dce/dce_10_0_sh_mask.h"
45#include "pppcielanes.h"
46#include "fiji_hwmgr.h"
47#include "process_pptables_v1_0.h"
48#include "pptable_v1_0.h"
49#include "pp_debug.h"
50#include "pp_acpi.h"
51#include "amd_pcie_helpers.h"
52#include "cgs_linux.h"
53#include "ppinterrupt.h"
54
55#include "fiji_clockpowergating.h"
56#include "fiji_thermal.h"
57
58#define VOLTAGE_SCALE 4
59#define SMC_RAM_END 0x40000
60#define VDDC_VDDCI_DELTA 300
61
62#define MC_SEQ_MISC0_GDDR5_SHIFT 28
63#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
64#define MC_SEQ_MISC0_GDDR5_VALUE 5
65
66#define MC_CG_ARB_FREQ_F0 0x0a /* boot-up default */
67#define MC_CG_ARB_FREQ_F1 0x0b
68#define MC_CG_ARB_FREQ_F2 0x0c
69#define MC_CG_ARB_FREQ_F3 0x0d
70
71/* From smc_reg.h */
72#define SMC_CG_IND_START 0xc0030000
73#define SMC_CG_IND_END 0xc0040000 /* First byte after SMC_CG_IND */
74
75#define VOLTAGE_SCALE 4
76#define VOLTAGE_VID_OFFSET_SCALE1 625
77#define VOLTAGE_VID_OFFSET_SCALE2 100
78
79#define VDDC_VDDCI_DELTA 300
80
81#define ixSWRST_COMMAND_1 0x1400103
82#define MC_SEQ_CNTL__CAC_EN_MASK 0x40000000
83
84/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
85enum DPM_EVENT_SRC {
86 DPM_EVENT_SRC_ANALOG = 0, /* Internal analog trip point */
87 DPM_EVENT_SRC_EXTERNAL = 1, /* External (GPIO 17) signal */
88 DPM_EVENT_SRC_DIGITAL = 2, /* Internal digital trip point (DIG_THERM_DPM) */
89 DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, /* Internal analog or external */
90 DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 /* Internal digital or external */
91};
92
93
94/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
95 * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
96 */
97static const uint16_t fiji_clock_stretcher_lookup_table[2][4] =
98{ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
99
100/* [FF, SS] type, [] 4 voltage ranges, and
101 * [Floor Freq, Boundary Freq, VID min , VID max]
102 */
103static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] =
104{ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
105 { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
106
107/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
108 * (coming from PWR_CKS_CNTL.stretch_amount reg spec)
109 */
110static const uint8_t fiji_clock_stretch_amount_conversion[2][6] =
111{ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
112
113static const unsigned long PhwFiji_Magic = (unsigned long)(PHM_VIslands_Magic);
114
115static struct fiji_power_state *cast_phw_fiji_power_state(
116 struct pp_hw_power_state *hw_ps)
117{
118 PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic),
119 "Invalid Powerstate Type!",
120 return NULL;);
121
122 return (struct fiji_power_state *)hw_ps;
123}
124
125static const struct
126fiji_power_state *cast_const_phw_fiji_power_state(
127 const struct pp_hw_power_state *hw_ps)
128{
129 PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic),
130 "Invalid Powerstate Type!",
131 return NULL;);
132
133 return (const struct fiji_power_state *)hw_ps;
134}
135
136static bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
137{
138 return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
139 CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
140 ? true : false;
141}
142
143static void fiji_init_dpm_defaults(struct pp_hwmgr *hwmgr)
144{
145 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
146 struct fiji_ulv_parm *ulv = &data->ulv;
147
148 ulv->cg_ulv_parameter = PPFIJI_CGULVPARAMETER_DFLT;
149 data->voting_rights_clients0 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT0;
150 data->voting_rights_clients1 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT1;
151 data->voting_rights_clients2 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT2;
152 data->voting_rights_clients3 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT3;
153 data->voting_rights_clients4 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT4;
154 data->voting_rights_clients5 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT5;
155 data->voting_rights_clients6 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT6;
156 data->voting_rights_clients7 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT7;
157
158 data->static_screen_threshold_unit =
159 PPFIJI_STATICSCREENTHRESHOLDUNIT_DFLT;
160 data->static_screen_threshold =
161 PPFIJI_STATICSCREENTHRESHOLD_DFLT;
162
163 /* Unset ABM cap as it moved to DAL.
164 * Add PHM_PlatformCaps_NonABMSupportInPPLib
165 * for re-direct ABM related request to DAL
166 */
167 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
168 PHM_PlatformCaps_ABM);
169 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
170 PHM_PlatformCaps_NonABMSupportInPPLib);
171
172 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
173 PHM_PlatformCaps_DynamicACTiming);
174
175 fiji_initialize_power_tune_defaults(hwmgr);
176
177 data->mclk_stutter_mode_threshold = 60000;
178 data->pcie_gen_performance.max = PP_PCIEGen1;
179 data->pcie_gen_performance.min = PP_PCIEGen3;
180 data->pcie_gen_power_saving.max = PP_PCIEGen1;
181 data->pcie_gen_power_saving.min = PP_PCIEGen3;
182 data->pcie_lane_performance.max = 0;
183 data->pcie_lane_performance.min = 16;
184 data->pcie_lane_power_saving.max = 0;
185 data->pcie_lane_power_saving.min = 16;
186
187 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
188 PHM_PlatformCaps_DynamicUVDState);
189}
190
191static int fiji_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
192 phm_ppt_v1_voltage_lookup_table *lookup_table,
193 uint16_t virtual_voltage_id, int32_t *sclk)
194{
195 uint8_t entryId;
196 uint8_t voltageId;
197 struct phm_ppt_v1_information *table_info =
198 (struct phm_ppt_v1_information *)(hwmgr->pptable);
199
200 PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
201
202 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
203 for (entryId = 0; entryId < table_info->vdd_dep_on_sclk->count; entryId++) {
204 voltageId = table_info->vdd_dep_on_sclk->entries[entryId].vddInd;
205 if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id)
206 break;
207 }
208
209 PP_ASSERT_WITH_CODE(entryId < table_info->vdd_dep_on_sclk->count,
210 "Can't find requested voltage id in vdd_dep_on_sclk table!",
211 return -EINVAL;
212 );
213
214 *sclk = table_info->vdd_dep_on_sclk->entries[entryId].clk;
215
216 return 0;
217}
218
219/**
220* Get Leakage VDDC based on leakage ID.
221*
222* @param hwmgr the address of the powerplay hardware manager.
223* @return always 0
224*/
225static int fiji_get_evv_voltages(struct pp_hwmgr *hwmgr)
226{
227 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
228 uint16_t vv_id;
229 uint16_t vddc = 0;
230 uint16_t evv_default = 1150;
231 uint16_t i, j;
232 uint32_t sclk = 0;
233 struct phm_ppt_v1_information *table_info =
234 (struct phm_ppt_v1_information *)hwmgr->pptable;
235 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
236 table_info->vdd_dep_on_sclk;
237 int result;
238
239 for (i = 0; i < FIJI_MAX_LEAKAGE_COUNT; i++) {
240 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
241 if (!fiji_get_sclk_for_voltage_evv(hwmgr,
242 table_info->vddc_lookup_table, vv_id, &sclk)) {
243 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
244 PHM_PlatformCaps_ClockStretcher)) {
245 for (j = 1; j < sclk_table->count; j++) {
246 if (sclk_table->entries[j].clk == sclk &&
247 sclk_table->entries[j].cks_enable == 0) {
248 sclk += 5000;
249 break;
250 }
251 }
252 }
253
254 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
255 PHM_PlatformCaps_EnableDriverEVV))
256 result = atomctrl_calculate_voltage_evv_on_sclk(hwmgr,
257 VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc, i, true);
258 else
259 result = -EINVAL;
260
261 if (result)
262 result = atomctrl_get_voltage_evv_on_sclk(hwmgr,
263 VOLTAGE_TYPE_VDDC, sclk,vv_id, &vddc);
264
265 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
266 PP_ASSERT_WITH_CODE((vddc < 2000),
267 "Invalid VDDC value, greater than 2v!", result = -EINVAL;);
268
269 if (result)
270 /* 1.15V is the default safe value for Fiji */
271 vddc = evv_default;
272
273 /* the voltage should not be zero nor equal to leakage ID */
274 if (vddc != 0 && vddc != vv_id) {
275 data->vddc_leakage.actual_voltage
276 [data->vddc_leakage.count] = vddc;
277 data->vddc_leakage.leakage_id
278 [data->vddc_leakage.count] = vv_id;
279 data->vddc_leakage.count++;
280 }
281 }
282 }
283 return 0;
284}
285
286/**
287 * Change virtual leakage voltage to actual value.
288 *
289 * @param hwmgr the address of the powerplay hardware manager.
290 * @param pointer to changing voltage
291 * @param pointer to leakage table
292 */
293static void fiji_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
294 uint16_t *voltage, struct fiji_leakage_voltage *leakage_table)
295{
296 uint32_t index;
297
298 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
299 for (index = 0; index < leakage_table->count; index++) {
300 /* if this voltage matches a leakage voltage ID */
301 /* patch with actual leakage voltage */
302 if (leakage_table->leakage_id[index] == *voltage) {
303 *voltage = leakage_table->actual_voltage[index];
304 break;
305 }
306 }
307
308 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
309 printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
310}
311
312/**
313* Patch voltage lookup table by EVV leakages.
314*
315* @param hwmgr the address of the powerplay hardware manager.
316* @param pointer to voltage lookup table
317* @param pointer to leakage table
318* @return always 0
319*/
320static int fiji_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
321 phm_ppt_v1_voltage_lookup_table *lookup_table,
322 struct fiji_leakage_voltage *leakage_table)
323{
324 uint32_t i;
325
326 for (i = 0; i < lookup_table->count; i++)
327 fiji_patch_with_vdd_leakage(hwmgr,
328 &lookup_table->entries[i].us_vdd, leakage_table);
329
330 return 0;
331}
332
333static int fiji_patch_clock_voltage_limits_with_vddc_leakage(
334 struct pp_hwmgr *hwmgr, struct fiji_leakage_voltage *leakage_table,
335 uint16_t *vddc)
336{
337 struct phm_ppt_v1_information *table_info =
338 (struct phm_ppt_v1_information *)(hwmgr->pptable);
339 fiji_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
340 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
341 table_info->max_clock_voltage_on_dc.vddc;
342 return 0;
343}
344
345static int fiji_patch_voltage_dependency_tables_with_lookup_table(
346 struct pp_hwmgr *hwmgr)
347{
348 uint8_t entryId;
349 uint8_t voltageId;
350 struct phm_ppt_v1_information *table_info =
351 (struct phm_ppt_v1_information *)(hwmgr->pptable);
352
353 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
354 table_info->vdd_dep_on_sclk;
355 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
356 table_info->vdd_dep_on_mclk;
357 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
358 table_info->mm_dep_table;
359
360 for (entryId = 0; entryId < sclk_table->count; ++entryId) {
361 voltageId = sclk_table->entries[entryId].vddInd;
362 sclk_table->entries[entryId].vddc =
363 table_info->vddc_lookup_table->entries[voltageId].us_vdd;
364 }
365
366 for (entryId = 0; entryId < mclk_table->count; ++entryId) {
367 voltageId = mclk_table->entries[entryId].vddInd;
368 mclk_table->entries[entryId].vddc =
369 table_info->vddc_lookup_table->entries[voltageId].us_vdd;
370 }
371
372 for (entryId = 0; entryId < mm_table->count; ++entryId) {
373 voltageId = mm_table->entries[entryId].vddcInd;
374 mm_table->entries[entryId].vddc =
375 table_info->vddc_lookup_table->entries[voltageId].us_vdd;
376 }
377
378 return 0;
379
380}
381
382static int fiji_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
383{
384 /* Need to determine if we need calculated voltage. */
385 return 0;
386}
387
388static int fiji_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
389{
390 /* Need to determine if we need calculated voltage from mm table. */
391 return 0;
392}
393
394static int fiji_sort_lookup_table(struct pp_hwmgr *hwmgr,
395 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
396{
397 uint32_t table_size, i, j;
398 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
399 table_size = lookup_table->count;
400
401 PP_ASSERT_WITH_CODE(0 != lookup_table->count,
402 "Lookup table is empty", return -EINVAL);
403
404 /* Sorting voltages */
405 for (i = 0; i < table_size - 1; i++) {
406 for (j = i + 1; j > 0; j--) {
407 if (lookup_table->entries[j].us_vdd <
408 lookup_table->entries[j - 1].us_vdd) {
409 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
410 lookup_table->entries[j - 1] = lookup_table->entries[j];
411 lookup_table->entries[j] = tmp_voltage_lookup_record;
412 }
413 }
414 }
415
416 return 0;
417}
418
419static int fiji_complete_dependency_tables(struct pp_hwmgr *hwmgr)
420{
421 int result = 0;
422 int tmp_result;
423 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
424 struct phm_ppt_v1_information *table_info =
425 (struct phm_ppt_v1_information *)(hwmgr->pptable);
426
427 tmp_result = fiji_patch_lookup_table_with_leakage(hwmgr,
428 table_info->vddc_lookup_table, &(data->vddc_leakage));
429 if (tmp_result)
430 result = tmp_result;
431
432 tmp_result = fiji_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
433 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
434 if (tmp_result)
435 result = tmp_result;
436
437 tmp_result = fiji_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
438 if (tmp_result)
439 result = tmp_result;
440
441 tmp_result = fiji_calc_voltage_dependency_tables(hwmgr);
442 if (tmp_result)
443 result = tmp_result;
444
445 tmp_result = fiji_calc_mm_voltage_dependency_table(hwmgr);
446 if (tmp_result)
447 result = tmp_result;
448
449 tmp_result = fiji_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
450 if(tmp_result)
451 result = tmp_result;
452
453 return result;
454}
455
456static int fiji_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
457{
458 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
459 struct phm_ppt_v1_information *table_info =
460 (struct phm_ppt_v1_information *)(hwmgr->pptable);
461
462 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
463 table_info->vdd_dep_on_sclk;
464 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
465 table_info->vdd_dep_on_mclk;
466
467 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
468 "VDD dependency on SCLK table is missing. \
469 This table is mandatory", return -EINVAL);
470 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
471 "VDD dependency on SCLK table has to have is missing. \
472 This table is mandatory", return -EINVAL);
473
474 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
475 "VDD dependency on MCLK table is missing. \
476 This table is mandatory", return -EINVAL);
477 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
478 "VDD dependency on MCLK table has to have is missing. \
479 This table is mandatory", return -EINVAL);
480
481 data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vdd_table->entries[0].vddc;
482 data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vdd_table->
483 entries[allowed_sclk_vdd_table->count - 1].vddc;
484
485 table_info->max_clock_voltage_on_ac.sclk =
486 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
487 table_info->max_clock_voltage_on_ac.mclk =
488 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
489 table_info->max_clock_voltage_on_ac.vddc =
490 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
491 table_info->max_clock_voltage_on_ac.vddci =
492 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
493
494 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
495 table_info->max_clock_voltage_on_ac.sclk;
496 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
497 table_info->max_clock_voltage_on_ac.mclk;
498 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
499 table_info->max_clock_voltage_on_ac.vddc;
500 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
501 table_info->max_clock_voltage_on_ac.vddci;
502
503 return 0;
504}
505
506static uint16_t fiji_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
507{
508 uint32_t speedCntl = 0;
509
510 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
511 speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
512 ixPCIE_LC_SPEED_CNTL);
513 return((uint16_t)PHM_GET_FIELD(speedCntl,
514 PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
515}
516
517static int fiji_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
518{
519 uint32_t link_width;
520
521 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
522 link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
523 PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
524
525 PP_ASSERT_WITH_CODE((7 >= link_width),
526 "Invalid PCIe lane width!", return 0);
527
528 return decode_pcie_lane_width(link_width);
529}
530
531/** Patch the Boot State to match VBIOS boot clocks and voltage.
532*
533* @param hwmgr Pointer to the hardware manager.
534* @param pPowerState The address of the PowerState instance being created.
535*
536*/
537static int fiji_patch_boot_state(struct pp_hwmgr *hwmgr,
538 struct pp_hw_power_state *hw_ps)
539{
540 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
541 struct fiji_power_state *ps = (struct fiji_power_state *)hw_ps;
542 ATOM_FIRMWARE_INFO_V2_2 *fw_info;
543 uint16_t size;
544 uint8_t frev, crev;
545 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
546
547 /* First retrieve the Boot clocks and VDDC from the firmware info table.
548 * We assume here that fw_info is unchanged if this call fails.
549 */
550 fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
551 hwmgr->device, index,
552 &size, &frev, &crev);
553 if (!fw_info)
554 /* During a test, there is no firmware info table. */
555 return 0;
556
557 /* Patch the state. */
558 data->vbios_boot_state.sclk_bootup_value =
559 le32_to_cpu(fw_info->ulDefaultEngineClock);
560 data->vbios_boot_state.mclk_bootup_value =
561 le32_to_cpu(fw_info->ulDefaultMemoryClock);
562 data->vbios_boot_state.mvdd_bootup_value =
563 le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
564 data->vbios_boot_state.vddc_bootup_value =
565 le16_to_cpu(fw_info->usBootUpVDDCVoltage);
566 data->vbios_boot_state.vddci_bootup_value =
567 le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
568 data->vbios_boot_state.pcie_gen_bootup_value =
569 fiji_get_current_pcie_speed(hwmgr);
570 data->vbios_boot_state.pcie_lane_bootup_value =
571 (uint16_t)fiji_get_current_pcie_lane_number(hwmgr);
572
573 /* set boot power state */
574 ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
575 ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
576 ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
577 ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
578
579 return 0;
580}
581
582static int fiji_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
583{
584 return phm_hwmgr_backend_fini(hwmgr);
585}
586
587static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
588{
589 struct fiji_hwmgr *data;
590 uint32_t i;
591 struct phm_ppt_v1_information *table_info =
592 (struct phm_ppt_v1_information *)(hwmgr->pptable);
593 bool stay_in_boot;
594 int result;
595
596 data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL);
597 if (data == NULL)
598 return -ENOMEM;
599
600 hwmgr->backend = data;
601
602 data->dll_default_on = false;
603 data->sram_end = SMC_RAM_END;
604
605 for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++)
606 data->activity_target[i] = FIJI_AT_DFLT;
607
608 data->vddc_vddci_delta = VDDC_VDDCI_DELTA;
609
610 data->mclk_activity_target = PPFIJI_MCLK_TARGETACTIVITY_DFLT;
611 data->mclk_dpm0_activity_target = 0xa;
612
613 data->sclk_dpm_key_disabled = 0;
614 data->mclk_dpm_key_disabled = 0;
615 data->pcie_dpm_key_disabled = 0;
616
617 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
618 PHM_PlatformCaps_UnTabledHardwareInterface);
619 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
620 PHM_PlatformCaps_TablelessHardwareInterface);
621
622 data->gpio_debug = 0;
623
624 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
625 PHM_PlatformCaps_DynamicPatchPowerState);
626
627 /* need to set voltage control types before EVV patching */
628 data->voltage_control = FIJI_VOLTAGE_CONTROL_NONE;
629 data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE;
630 data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE;
631
632 data->force_pcie_gen = PP_PCIEGenInvalid;
633
634 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
635 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
636 data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2;
637
638 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
639 PHM_PlatformCaps_EnableMVDDControl))
640 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
641 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
642 data->mvdd_control = FIJI_VOLTAGE_CONTROL_BY_GPIO;
643
644 if (data->mvdd_control == FIJI_VOLTAGE_CONTROL_NONE)
645 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
646 PHM_PlatformCaps_EnableMVDDControl);
647
648 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
649 PHM_PlatformCaps_ControlVDDCI)) {
650 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
651 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
652 data->vddci_control = FIJI_VOLTAGE_CONTROL_BY_GPIO;
653 else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
654 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
655 data->vddci_control = FIJI_VOLTAGE_CONTROL_BY_SVID2;
656 }
657
658 if (data->vddci_control == FIJI_VOLTAGE_CONTROL_NONE)
659 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
660 PHM_PlatformCaps_ControlVDDCI);
661
662 if (table_info && table_info->cac_dtp_table->usClockStretchAmount)
663 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
664 PHM_PlatformCaps_ClockStretcher);
665
666 fiji_init_dpm_defaults(hwmgr);
667
668 /* Get leakage voltage based on leakage ID. */
669 fiji_get_evv_voltages(hwmgr);
670
671 /* Patch our voltage dependency table with actual leakage voltage
672 * We need to perform leakage translation before it's used by other functions
673 */
674 fiji_complete_dependency_tables(hwmgr);
675
676 /* Parse pptable data read from VBIOS */
677 fiji_set_private_data_based_on_pptable(hwmgr);
678
679 /* ULV Support */
680 data->ulv.ulv_supported = true; /* ULV feature is enabled by default */
681
682 /* Initalize Dynamic State Adjustment Rule Settings */
683 result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
684
685 if (!result) {
686 data->uvd_enabled = false;
687 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
688 PHM_PlatformCaps_EnableSMU7ThermalManagement);
689 data->vddc_phase_shed_control = false;
690 }
691
692 stay_in_boot = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
693 PHM_PlatformCaps_StayInBootState);
694
695 if (0 == result) {
696 struct cgs_system_info sys_info = {0};
697
698 data->is_tlu_enabled = false;
699 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
700 FIJI_MAX_HARDWARE_POWERLEVELS;
701 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
702 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
703
704 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
705 PHM_PlatformCaps_FanSpeedInTableIsRPM);
706
707 if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp &&
708 hwmgr->thermal_controller.
709 advanceFanControlParameters.ucFanControlMode) {
710 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
711 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
712 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
713 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
714 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
715 table_info->cac_dtp_table->usOperatingTempMinLimit;
716 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
717 table_info->cac_dtp_table->usOperatingTempMaxLimit;
718 hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
719 table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
720 hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
721 table_info->cac_dtp_table->usOperatingTempStep;
722 hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
723 table_info->cac_dtp_table->usTargetOperatingTemp;
724
725 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
726 PHM_PlatformCaps_ODFuzzyFanControlSupport);
727 }
728
729 sys_info.size = sizeof(struct cgs_system_info);
730 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
731 result = cgs_query_system_info(hwmgr->device, &sys_info);
732 if (result)
733 data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
734 else
735 data->pcie_gen_cap = (uint32_t)sys_info.value;
736 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
737 data->pcie_spc_cap = 20;
738 sys_info.size = sizeof(struct cgs_system_info);
739 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
740 result = cgs_query_system_info(hwmgr->device, &sys_info);
741 if (result)
742 data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
743 else
744 data->pcie_lane_cap = (uint32_t)sys_info.value;
745 } else {
746 /* Ignore return value in here, we are cleaning up a mess. */
747 fiji_hwmgr_backend_fini(hwmgr);
748 }
749
750 return 0;
751}
752
753/**
754 * Read clock related registers.
755 *
756 * @param hwmgr the address of the powerplay hardware manager.
757 * @return always 0
758 */
759static int fiji_read_clock_registers(struct pp_hwmgr *hwmgr)
760{
761 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
762
763 data->clock_registers.vCG_SPLL_FUNC_CNTL =
764 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
765 ixCG_SPLL_FUNC_CNTL);
766 data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
767 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
768 ixCG_SPLL_FUNC_CNTL_2);
769 data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
770 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
771 ixCG_SPLL_FUNC_CNTL_3);
772 data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
773 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
774 ixCG_SPLL_FUNC_CNTL_4);
775 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
776 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
777 ixCG_SPLL_SPREAD_SPECTRUM);
778 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
779 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
780 ixCG_SPLL_SPREAD_SPECTRUM_2);
781
782 return 0;
783}
784
785/**
786 * Find out if memory is GDDR5.
787 *
788 * @param hwmgr the address of the powerplay hardware manager.
789 * @return always 0
790 */
791static int fiji_get_memory_type(struct pp_hwmgr *hwmgr)
792{
793 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
794 uint32_t temp;
795
796 temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
797
798 data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
799 ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
800 MC_SEQ_MISC0_GDDR5_SHIFT));
801
802 return 0;
803}
804
805/**
806 * Enables Dynamic Power Management by SMC
807 *
808 * @param hwmgr the address of the powerplay hardware manager.
809 * @return always 0
810 */
811static int fiji_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
812{
813 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
814 GENERAL_PWRMGT, STATIC_PM_EN, 1);
815
816 return 0;
817}
818
819/**
820 * Initialize PowerGating States for different engines
821 *
822 * @param hwmgr the address of the powerplay hardware manager.
823 * @return always 0
824 */
825static int fiji_init_power_gate_state(struct pp_hwmgr *hwmgr)
826{
827 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
828
829 data->uvd_power_gated = false;
830 data->vce_power_gated = false;
831 data->samu_power_gated = false;
832 data->acp_power_gated = false;
833 data->pg_acp_init = true;
834
835 return 0;
836}
837
838static int fiji_init_sclk_threshold(struct pp_hwmgr *hwmgr)
839{
840 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
841 data->low_sclk_interrupt_threshold = 0;
842
843 return 0;
844}
845
846static int fiji_setup_asic_task(struct pp_hwmgr *hwmgr)
847{
848 int tmp_result, result = 0;
849
850 tmp_result = fiji_read_clock_registers(hwmgr);
851 PP_ASSERT_WITH_CODE((0 == tmp_result),
852 "Failed to read clock registers!", result = tmp_result);
853
854 tmp_result = fiji_get_memory_type(hwmgr);
855 PP_ASSERT_WITH_CODE((0 == tmp_result),
856 "Failed to get memory type!", result = tmp_result);
857
858 tmp_result = fiji_enable_acpi_power_management(hwmgr);
859 PP_ASSERT_WITH_CODE((0 == tmp_result),
860 "Failed to enable ACPI power management!", result = tmp_result);
861
862 tmp_result = fiji_init_power_gate_state(hwmgr);
863 PP_ASSERT_WITH_CODE((0 == tmp_result),
864 "Failed to init power gate state!", result = tmp_result);
865
866 tmp_result = tonga_get_mc_microcode_version(hwmgr);
867 PP_ASSERT_WITH_CODE((0 == tmp_result),
868 "Failed to get MC microcode version!", result = tmp_result);
869
870 tmp_result = fiji_init_sclk_threshold(hwmgr);
871 PP_ASSERT_WITH_CODE((0 == tmp_result),
872 "Failed to init sclk threshold!", result = tmp_result);
873
874 return result;
875}
876
877/**
878* Checks if we want to support voltage control
879*
880* @param hwmgr the address of the powerplay hardware manager.
881*/
882static bool fiji_voltage_control(const struct pp_hwmgr *hwmgr)
883{
884 const struct fiji_hwmgr *data =
885 (const struct fiji_hwmgr *)(hwmgr->backend);
886
887 return (FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control);
888}
889
890/**
891* Enable voltage control
892*
893* @param hwmgr the address of the powerplay hardware manager.
894* @return always 0
895*/
896static int fiji_enable_voltage_control(struct pp_hwmgr *hwmgr)
897{
898 /* enable voltage control */
899 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
900 GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
901
902 return 0;
903}
904
905/**
906* Remove repeated voltage values and create table with unique values.
907*
908* @param hwmgr the address of the powerplay hardware manager.
909* @param vol_table the pointer to changing voltage table
910* @return 0 in success
911*/
912
913static int fiji_trim_voltage_table(struct pp_hwmgr *hwmgr,
914 struct pp_atomctrl_voltage_table *vol_table)
915{
916 uint32_t i, j;
917 uint16_t vvalue;
918 bool found = false;
919 struct pp_atomctrl_voltage_table *table;
920
921 PP_ASSERT_WITH_CODE((NULL != vol_table),
922 "Voltage Table empty.", return -EINVAL);
923 table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
924 GFP_KERNEL);
925
926 if (NULL == table)
927 return -ENOMEM;
928
929 table->mask_low = vol_table->mask_low;
930 table->phase_delay = vol_table->phase_delay;
931
932 for (i = 0; i < vol_table->count; i++) {
933 vvalue = vol_table->entries[i].value;
934 found = false;
935
936 for (j = 0; j < table->count; j++) {
937 if (vvalue == table->entries[j].value) {
938 found = true;
939 break;
940 }
941 }
942
943 if (!found) {
944 table->entries[table->count].value = vvalue;
945 table->entries[table->count].smio_low =
946 vol_table->entries[i].smio_low;
947 table->count++;
948 }
949 }
950
951 memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
952 kfree(table);
953
954 return 0;
955}
956
957static int fiji_get_svi2_mvdd_voltage_table(struct pp_hwmgr *hwmgr,
958 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
959{
960 uint32_t i;
961 int result;
962 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
963 struct pp_atomctrl_voltage_table *vol_table = &(data->mvdd_voltage_table);
964
965 PP_ASSERT_WITH_CODE((0 != dep_table->count),
966 "Voltage Dependency Table empty.", return -EINVAL);
967
968 vol_table->mask_low = 0;
969 vol_table->phase_delay = 0;
970 vol_table->count = dep_table->count;
971
972 for (i = 0; i < dep_table->count; i++) {
973 vol_table->entries[i].value = dep_table->entries[i].mvdd;
974 vol_table->entries[i].smio_low = 0;
975 }
976
977 result = fiji_trim_voltage_table(hwmgr, vol_table);
978 PP_ASSERT_WITH_CODE((0 == result),
979 "Failed to trim MVDD table.", return result);
980
981 return 0;
982}
983
984static int fiji_get_svi2_vddci_voltage_table(struct pp_hwmgr *hwmgr,
985 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
986{
987 uint32_t i;
988 int result;
989 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
990 struct pp_atomctrl_voltage_table *vol_table = &(data->vddci_voltage_table);
991
992 PP_ASSERT_WITH_CODE((0 != dep_table->count),
993 "Voltage Dependency Table empty.", return -EINVAL);
994
995 vol_table->mask_low = 0;
996 vol_table->phase_delay = 0;
997 vol_table->count = dep_table->count;
998
999 for (i = 0; i < dep_table->count; i++) {
1000 vol_table->entries[i].value = dep_table->entries[i].vddci;
1001 vol_table->entries[i].smio_low = 0;
1002 }
1003
1004 result = fiji_trim_voltage_table(hwmgr, vol_table);
1005 PP_ASSERT_WITH_CODE((0 == result),
1006 "Failed to trim VDDCI table.", return result);
1007
1008 return 0;
1009}
1010
1011static int fiji_get_svi2_vdd_voltage_table(struct pp_hwmgr *hwmgr,
1012 phm_ppt_v1_voltage_lookup_table *lookup_table)
1013{
1014 int i = 0;
1015 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1016 struct pp_atomctrl_voltage_table *vol_table = &(data->vddc_voltage_table);
1017
1018 PP_ASSERT_WITH_CODE((0 != lookup_table->count),
1019 "Voltage Lookup Table empty.", return -EINVAL);
1020
1021 vol_table->mask_low = 0;
1022 vol_table->phase_delay = 0;
1023
1024 vol_table->count = lookup_table->count;
1025
1026 for (i = 0; i < vol_table->count; i++) {
1027 vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
1028 vol_table->entries[i].smio_low = 0;
1029 }
1030
1031 return 0;
1032}
1033
1034/* ---- Voltage Tables ----
1035 * If the voltage table would be bigger than
1036 * what will fit into the state table on
1037 * the SMC keep only the higher entries.
1038 */
1039static void fiji_trim_voltage_table_to_fit_state_table(struct pp_hwmgr *hwmgr,
1040 uint32_t max_vol_steps, struct pp_atomctrl_voltage_table *vol_table)
1041{
1042 unsigned int i, diff;
1043
1044 if (vol_table->count <= max_vol_steps)
1045 return;
1046
1047 diff = vol_table->count - max_vol_steps;
1048
1049 for (i = 0; i < max_vol_steps; i++)
1050 vol_table->entries[i] = vol_table->entries[i + diff];
1051
1052 vol_table->count = max_vol_steps;
1053
1054 return;
1055}
1056
1057/**
1058* Create Voltage Tables.
1059*
1060* @param hwmgr the address of the powerplay hardware manager.
1061* @return always 0
1062*/
1063static int fiji_construct_voltage_tables(struct pp_hwmgr *hwmgr)
1064{
1065 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1066 struct phm_ppt_v1_information *table_info =
1067 (struct phm_ppt_v1_information *)hwmgr->pptable;
1068 int result;
1069
1070 if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1071 result = atomctrl_get_voltage_table_v3(hwmgr,
1072 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
1073 &(data->mvdd_voltage_table));
1074 PP_ASSERT_WITH_CODE((0 == result),
1075 "Failed to retrieve MVDD table.",
1076 return result);
1077 } else if (FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
1078 result = fiji_get_svi2_mvdd_voltage_table(hwmgr,
1079 table_info->vdd_dep_on_mclk);
1080 PP_ASSERT_WITH_CODE((0 == result),
1081 "Failed to retrieve SVI2 MVDD table from dependancy table.",
1082 return result;);
1083 }
1084
1085 if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
1086 result = atomctrl_get_voltage_table_v3(hwmgr,
1087 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
1088 &(data->vddci_voltage_table));
1089 PP_ASSERT_WITH_CODE((0 == result),
1090 "Failed to retrieve VDDCI table.",
1091 return result);
1092 } else if (FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
1093 result = fiji_get_svi2_vddci_voltage_table(hwmgr,
1094 table_info->vdd_dep_on_mclk);
1095 PP_ASSERT_WITH_CODE((0 == result),
1096 "Failed to retrieve SVI2 VDDCI table from dependancy table.",
1097 return result);
1098 }
1099
1100 if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1101 result = fiji_get_svi2_vdd_voltage_table(hwmgr,
1102 table_info->vddc_lookup_table);
1103 PP_ASSERT_WITH_CODE((0 == result),
1104 "Failed to retrieve SVI2 VDDC table from lookup table.",
1105 return result);
1106 }
1107
1108 PP_ASSERT_WITH_CODE(
1109 (data->vddc_voltage_table.count <= (SMU73_MAX_LEVELS_VDDC)),
1110 "Too many voltage values for VDDC. Trimming to fit state table.",
1111 fiji_trim_voltage_table_to_fit_state_table(hwmgr,
1112 SMU73_MAX_LEVELS_VDDC, &(data->vddc_voltage_table)));
1113
1114 PP_ASSERT_WITH_CODE(
1115 (data->vddci_voltage_table.count <= (SMU73_MAX_LEVELS_VDDCI)),
1116 "Too many voltage values for VDDCI. Trimming to fit state table.",
1117 fiji_trim_voltage_table_to_fit_state_table(hwmgr,
1118 SMU73_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table)));
1119
1120 PP_ASSERT_WITH_CODE(
1121 (data->mvdd_voltage_table.count <= (SMU73_MAX_LEVELS_MVDD)),
1122 "Too many voltage values for MVDD. Trimming to fit state table.",
1123 fiji_trim_voltage_table_to_fit_state_table(hwmgr,
1124 SMU73_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table)));
1125
1126 return 0;
1127}
1128
1129static int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
1130{
1131 /* Program additional LP registers
1132 * that are no longer programmed by VBIOS
1133 */
1134 cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
1135 cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
1136 cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
1137 cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
1138 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
1139 cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
1140 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
1141 cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
1142 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
1143 cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
1144 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
1145 cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
1146 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
1147 cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
1148
1149 return 0;
1150}
1151
1152/**
1153* Programs static screed detection parameters
1154*
1155* @param hwmgr the address of the powerplay hardware manager.
1156* @return always 0
1157*/
1158static int fiji_program_static_screen_threshold_parameters(
1159 struct pp_hwmgr *hwmgr)
1160{
1161 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1162
1163 /* Set static screen threshold unit */
1164 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1165 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
1166 data->static_screen_threshold_unit);
1167 /* Set static screen threshold */
1168 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1169 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
1170 data->static_screen_threshold);
1171
1172 return 0;
1173}
1174
1175/**
1176* Setup display gap for glitch free memory clock switching.
1177*
1178* @param hwmgr the address of the powerplay hardware manager.
1179* @return always 0
1180*/
1181static int fiji_enable_display_gap(struct pp_hwmgr *hwmgr)
1182{
1183 uint32_t displayGap =
1184 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1185 ixCG_DISPLAY_GAP_CNTL);
1186
1187 displayGap = PHM_SET_FIELD(displayGap, CG_DISPLAY_GAP_CNTL,
1188 DISP_GAP, DISPLAY_GAP_IGNORE);
1189
1190 displayGap = PHM_SET_FIELD(displayGap, CG_DISPLAY_GAP_CNTL,
1191 DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
1192
1193 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1194 ixCG_DISPLAY_GAP_CNTL, displayGap);
1195
1196 return 0;
1197}
1198
1199/**
1200* Programs activity state transition voting clients
1201*
1202* @param hwmgr the address of the powerplay hardware manager.
1203* @return always 0
1204*/
1205static int fiji_program_voting_clients(struct pp_hwmgr *hwmgr)
1206{
1207 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1208
1209 /* Clear reset for voting clients before enabling DPM */
1210 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1211 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
1212 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1213 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
1214
1215 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1216 ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
1217 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1218 ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
1219 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1220 ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
1221 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1222 ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
1223 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1224 ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
1225 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1226 ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
1227 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1228 ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
1229 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1230 ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
1231
1232 return 0;
1233}
1234
1235static int fiji_clear_voting_clients(struct pp_hwmgr *hwmgr)
1236{
1237 /* Reset voting clients before disabling DPM */
1238 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1239 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
1240 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1241 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
1242
1243 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1244 ixCG_FREQ_TRAN_VOTING_0, 0);
1245 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1246 ixCG_FREQ_TRAN_VOTING_1, 0);
1247 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1248 ixCG_FREQ_TRAN_VOTING_2, 0);
1249 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1250 ixCG_FREQ_TRAN_VOTING_3, 0);
1251 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1252 ixCG_FREQ_TRAN_VOTING_4, 0);
1253 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1254 ixCG_FREQ_TRAN_VOTING_5, 0);
1255 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1256 ixCG_FREQ_TRAN_VOTING_6, 0);
1257 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1258 ixCG_FREQ_TRAN_VOTING_7, 0);
1259
1260 return 0;
1261}
1262
1263/**
1264* Get the location of various tables inside the FW image.
1265*
1266* @param hwmgr the address of the powerplay hardware manager.
1267* @return always 0
1268*/
1269static int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
1270{
1271 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1272 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
1273 uint32_t tmp;
1274 int result;
1275 bool error = false;
1276
1277 result = fiji_read_smc_sram_dword(hwmgr->smumgr,
1278 SMU7_FIRMWARE_HEADER_LOCATION +
1279 offsetof(SMU73_Firmware_Header, DpmTable),
1280 &tmp, data->sram_end);
1281
1282 if (0 == result)
1283 data->dpm_table_start = tmp;
1284
1285 error |= (0 != result);
1286
1287 result = fiji_read_smc_sram_dword(hwmgr->smumgr,
1288 SMU7_FIRMWARE_HEADER_LOCATION +
1289 offsetof(SMU73_Firmware_Header, SoftRegisters),
1290 &tmp, data->sram_end);
1291
1292 if (!result) {
1293 data->soft_regs_start = tmp;
1294 smu_data->soft_regs_start = tmp;
1295 }
1296
1297 error |= (0 != result);
1298
1299 result = fiji_read_smc_sram_dword(hwmgr->smumgr,
1300 SMU7_FIRMWARE_HEADER_LOCATION +
1301 offsetof(SMU73_Firmware_Header, mcRegisterTable),
1302 &tmp, data->sram_end);
1303
1304 if (!result)
1305 data->mc_reg_table_start = tmp;
1306
1307 result = fiji_read_smc_sram_dword(hwmgr->smumgr,
1308 SMU7_FIRMWARE_HEADER_LOCATION +
1309 offsetof(SMU73_Firmware_Header, FanTable),
1310 &tmp, data->sram_end);
1311
1312 if (!result)
1313 data->fan_table_start = tmp;
1314
1315 error |= (0 != result);
1316
1317 result = fiji_read_smc_sram_dword(hwmgr->smumgr,
1318 SMU7_FIRMWARE_HEADER_LOCATION +
1319 offsetof(SMU73_Firmware_Header, mcArbDramTimingTable),
1320 &tmp, data->sram_end);
1321
1322 if (!result)
1323 data->arb_table_start = tmp;
1324
1325 error |= (0 != result);
1326
1327 result = fiji_read_smc_sram_dword(hwmgr->smumgr,
1328 SMU7_FIRMWARE_HEADER_LOCATION +
1329 offsetof(SMU73_Firmware_Header, Version),
1330 &tmp, data->sram_end);
1331
1332 if (!result)
1333 hwmgr->microcode_version_info.SMC = tmp;
1334
1335 error |= (0 != result);
1336
1337 return error ? -1 : 0;
1338}
1339
1340/* Copy one arb setting to another and then switch the active set.
1341 * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
1342 */
1343static int fiji_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
1344 uint32_t arb_src, uint32_t arb_dest)
1345{
1346 uint32_t mc_arb_dram_timing;
1347 uint32_t mc_arb_dram_timing2;
1348 uint32_t burst_time;
1349 uint32_t mc_cg_config;
1350
1351 switch (arb_src) {
1352 case MC_CG_ARB_FREQ_F0:
1353 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1354 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1355 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1356 break;
1357 case MC_CG_ARB_FREQ_F1:
1358 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
1359 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
1360 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
1361 break;
1362 default:
1363 return -EINVAL;
1364 }
1365
1366 switch (arb_dest) {
1367 case MC_CG_ARB_FREQ_F0:
1368 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
1369 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
1370 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
1371 break;
1372 case MC_CG_ARB_FREQ_F1:
1373 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
1374 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
1375 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
1376 break;
1377 default:
1378 return -EINVAL;
1379 }
1380
1381 mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
1382 mc_cg_config |= 0x0000000F;
1383 cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
1384 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
1385
1386 return 0;
1387}
1388
1389/**
1390* Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value
1391*
1392* @param hwmgr the address of the powerplay hardware manager.
1393* @return if success then 0;
1394*/
1395static int fiji_reset_to_default(struct pp_hwmgr *hwmgr)
1396{
1397 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
1398}
1399
1400/**
1401* Initial switch from ARB F0->F1
1402*
1403* @param hwmgr the address of the powerplay hardware manager.
1404* @return always 0
1405* This function is to be called from the SetPowerState table.
1406*/
1407static int fiji_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
1408{
1409 return fiji_copy_and_switch_arb_sets(hwmgr,
1410 MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
1411}
1412
1413static int fiji_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
1414{
1415 uint32_t tmp;
1416
1417 tmp = (cgs_read_ind_register(hwmgr->device,
1418 CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
1419 0x0000ff00) >> 8;
1420
1421 if (tmp == MC_CG_ARB_FREQ_F0)
1422 return 0;
1423
1424 return fiji_copy_and_switch_arb_sets(hwmgr,
1425 tmp, MC_CG_ARB_FREQ_F0);
1426}
1427
1428static int fiji_reset_single_dpm_table(struct pp_hwmgr *hwmgr,
1429 struct fiji_single_dpm_table *dpm_table, uint32_t count)
1430{
1431 int i;
1432 PP_ASSERT_WITH_CODE(count <= MAX_REGULAR_DPM_NUMBER,
1433 "Fatal error, can not set up single DPM table entries "
1434 "to exceed max number!",);
1435
1436 dpm_table->count = count;
1437 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
1438 dpm_table->dpm_levels[i].enabled = false;
1439
1440 return 0;
1441}
1442
1443static void fiji_setup_pcie_table_entry(
1444 struct fiji_single_dpm_table *dpm_table,
1445 uint32_t index, uint32_t pcie_gen,
1446 uint32_t pcie_lanes)
1447{
1448 dpm_table->dpm_levels[index].value = pcie_gen;
1449 dpm_table->dpm_levels[index].param1 = pcie_lanes;
1450 dpm_table->dpm_levels[index].enabled = true;
1451}
1452
1453static int fiji_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
1454{
1455 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1456 struct phm_ppt_v1_information *table_info =
1457 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1458 struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
1459 uint32_t i, max_entry;
1460
1461 PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
1462 data->use_pcie_power_saving_levels), "No pcie performance levels!",
1463 return -EINVAL);
1464
1465 if (data->use_pcie_performance_levels &&
1466 !data->use_pcie_power_saving_levels) {
1467 data->pcie_gen_power_saving = data->pcie_gen_performance;
1468 data->pcie_lane_power_saving = data->pcie_lane_performance;
1469 } else if (!data->use_pcie_performance_levels &&
1470 data->use_pcie_power_saving_levels) {
1471 data->pcie_gen_performance = data->pcie_gen_power_saving;
1472 data->pcie_lane_performance = data->pcie_lane_power_saving;
1473 }
1474
1475 fiji_reset_single_dpm_table(hwmgr,
1476 &data->dpm_table.pcie_speed_table, SMU73_MAX_LEVELS_LINK);
1477
1478 if (pcie_table != NULL) {
1479 /* max_entry is used to make sure we reserve one PCIE level
1480 * for boot level (fix for A+A PSPP issue).
1481 * If PCIE table from PPTable have ULV entry + 8 entries,
1482 * then ignore the last entry.*/
1483 max_entry = (SMU73_MAX_LEVELS_LINK < pcie_table->count) ?
1484 SMU73_MAX_LEVELS_LINK : pcie_table->count;
1485 for (i = 1; i < max_entry; i++) {
1486 fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
1487 get_pcie_gen_support(data->pcie_gen_cap,
1488 pcie_table->entries[i].gen_speed),
1489 get_pcie_lane_support(data->pcie_lane_cap,
1490 pcie_table->entries[i].lane_width));
1491 }
1492 data->dpm_table.pcie_speed_table.count = max_entry - 1;
1493 } else {
1494 /* Hardcode Pcie Table */
1495 fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
1496 get_pcie_gen_support(data->pcie_gen_cap,
1497 PP_Min_PCIEGen),
1498 get_pcie_lane_support(data->pcie_lane_cap,
1499 PP_Max_PCIELane));
1500 fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
1501 get_pcie_gen_support(data->pcie_gen_cap,
1502 PP_Min_PCIEGen),
1503 get_pcie_lane_support(data->pcie_lane_cap,
1504 PP_Max_PCIELane));
1505 fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
1506 get_pcie_gen_support(data->pcie_gen_cap,
1507 PP_Max_PCIEGen),
1508 get_pcie_lane_support(data->pcie_lane_cap,
1509 PP_Max_PCIELane));
1510 fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
1511 get_pcie_gen_support(data->pcie_gen_cap,
1512 PP_Max_PCIEGen),
1513 get_pcie_lane_support(data->pcie_lane_cap,
1514 PP_Max_PCIELane));
1515 fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
1516 get_pcie_gen_support(data->pcie_gen_cap,
1517 PP_Max_PCIEGen),
1518 get_pcie_lane_support(data->pcie_lane_cap,
1519 PP_Max_PCIELane));
1520 fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
1521 get_pcie_gen_support(data->pcie_gen_cap,
1522 PP_Max_PCIEGen),
1523 get_pcie_lane_support(data->pcie_lane_cap,
1524 PP_Max_PCIELane));
1525
1526 data->dpm_table.pcie_speed_table.count = 6;
1527 }
1528 /* Populate last level for boot PCIE level, but do not increment count. */
1529 fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
1530 data->dpm_table.pcie_speed_table.count,
1531 get_pcie_gen_support(data->pcie_gen_cap,
1532 PP_Min_PCIEGen),
1533 get_pcie_lane_support(data->pcie_lane_cap,
1534 PP_Max_PCIELane));
1535
1536 return 0;
1537}
1538
1539/*
1540 * This function is to initalize all DPM state tables
1541 * for SMU7 based on the dependency table.
1542 * Dynamic state patching function will then trim these
1543 * state tables to the allowed range based
1544 * on the power policy or external client requests,
1545 * such as UVD request, etc.
1546 */
1547static int fiji_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1548{
1549 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1550 struct phm_ppt_v1_information *table_info =
1551 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1552 uint32_t i;
1553
1554 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
1555 table_info->vdd_dep_on_sclk;
1556 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
1557 table_info->vdd_dep_on_mclk;
1558
1559 PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
1560 "SCLK dependency table is missing. This table is mandatory",
1561 return -EINVAL);
1562 PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
1563 "SCLK dependency table has to have is missing. "
1564 "This table is mandatory",
1565 return -EINVAL);
1566
1567 PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
1568 "MCLK dependency table is missing. This table is mandatory",
1569 return -EINVAL);
1570 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
1571 "MCLK dependency table has to have is missing. "
1572 "This table is mandatory",
1573 return -EINVAL);
1574
1575 /* clear the state table to reset everything to default */
1576 fiji_reset_single_dpm_table(hwmgr,
1577 &data->dpm_table.sclk_table, SMU73_MAX_LEVELS_GRAPHICS);
1578 fiji_reset_single_dpm_table(hwmgr,
1579 &data->dpm_table.mclk_table, SMU73_MAX_LEVELS_MEMORY);
1580
1581 /* Initialize Sclk DPM table based on allow Sclk values */
1582 data->dpm_table.sclk_table.count = 0;
1583 for (i = 0; i < dep_sclk_table->count; i++) {
1584 if (i == 0 || data->dpm_table.sclk_table.dpm_levels
1585 [data->dpm_table.sclk_table.count - 1].value !=
1586 dep_sclk_table->entries[i].clk) {
1587 data->dpm_table.sclk_table.dpm_levels
1588 [data->dpm_table.sclk_table.count].value =
1589 dep_sclk_table->entries[i].clk;
1590 data->dpm_table.sclk_table.dpm_levels
1591 [data->dpm_table.sclk_table.count].enabled =
1592 (i == 0) ? true : false;
1593 data->dpm_table.sclk_table.count++;
1594 }
1595 }
1596
1597 /* Initialize Mclk DPM table based on allow Mclk values */
1598 data->dpm_table.mclk_table.count = 0;
1599 for (i=0; i<dep_mclk_table->count; i++) {
1600 if ( i==0 || data->dpm_table.mclk_table.dpm_levels
1601 [data->dpm_table.mclk_table.count - 1].value !=
1602 dep_mclk_table->entries[i].clk) {
1603 data->dpm_table.mclk_table.dpm_levels
1604 [data->dpm_table.mclk_table.count].value =
1605 dep_mclk_table->entries[i].clk;
1606 data->dpm_table.mclk_table.dpm_levels
1607 [data->dpm_table.mclk_table.count].enabled =
1608 (i == 0) ? true : false;
1609 data->dpm_table.mclk_table.count++;
1610 }
1611 }
1612
1613 /* setup PCIE gen speed levels */
1614 fiji_setup_default_pcie_table(hwmgr);
1615
1616 /* save a copy of the default DPM table */
1617 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1618 sizeof(struct fiji_dpm_table));
1619
1620 return 0;
1621}
1622
1623/**
1624 * @brief PhwFiji_GetVoltageOrder
1625 * Returns index of requested voltage record in lookup(table)
1626 * @param lookup_table - lookup list to search in
1627 * @param voltage - voltage to look for
1628 * @return 0 on success
1629 */
1630static uint8_t fiji_get_voltage_index(
1631 struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
1632{
1633 uint8_t count = (uint8_t) (lookup_table->count);
1634 uint8_t i;
1635
1636 PP_ASSERT_WITH_CODE((NULL != lookup_table),
1637 "Lookup Table empty.", return 0);
1638 PP_ASSERT_WITH_CODE((0 != count),
1639 "Lookup Table empty.", return 0);
1640
1641 for (i = 0; i < lookup_table->count; i++) {
1642 /* find first voltage equal or bigger than requested */
1643 if (lookup_table->entries[i].us_vdd >= voltage)
1644 return i;
1645 }
1646 /* voltage is bigger than max voltage in the table */
1647 return i - 1;
1648}
1649
1650/**
1651* Preparation of vddc and vddgfx CAC tables for SMC.
1652*
1653* @param hwmgr the address of the hardware manager
1654* @param table the SMC DPM table structure to be populated
1655* @return always 0
1656*/
1657static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
1658 struct SMU73_Discrete_DpmTable *table)
1659{
1660 uint32_t count;
1661 uint8_t index;
1662 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1663 struct phm_ppt_v1_information *table_info =
1664 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1665 struct phm_ppt_v1_voltage_lookup_table *lookup_table =
1666 table_info->vddc_lookup_table;
1667 /* tables is already swapped, so in order to use the value from it,
1668 * we need to swap it back.
1669 * We are populating vddc CAC data to BapmVddc table
1670 * in split and merged mode
1671 */
1672 for( count = 0; count<lookup_table->count; count++) {
1673 index = fiji_get_voltage_index(lookup_table,
1674 data->vddc_voltage_table.entries[count].value);
1675 table->BapmVddcVidLoSidd[count] = (uint8_t) ((6200 -
1676 (lookup_table->entries[index].us_cac_low *
1677 VOLTAGE_SCALE)) / 25);
1678 table->BapmVddcVidHiSidd[count] = (uint8_t) ((6200 -
1679 (lookup_table->entries[index].us_cac_high *
1680 VOLTAGE_SCALE)) / 25);
1681 }
1682
1683 return 0;
1684}
1685
1686/**
1687* Preparation of voltage tables for SMC.
1688*
1689* @param hwmgr the address of the hardware manager
1690* @param table the SMC DPM table structure to be populated
1691* @return always 0
1692*/
1693
1694static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
1695 struct SMU73_Discrete_DpmTable *table)
1696{
1697 int result;
1698
1699 result = fiji_populate_cac_table(hwmgr, table);
1700 PP_ASSERT_WITH_CODE(0 == result,
1701 "can not populate CAC voltage tables to SMC",
1702 return -EINVAL);
1703
1704 return 0;
1705}
1706
1707static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr,
1708 struct SMU73_Discrete_Ulv *state)
1709{
1710 int result = 0;
1711 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1712 struct phm_ppt_v1_information *table_info =
1713 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1714
1715 state->CcPwrDynRm = 0;
1716 state->CcPwrDynRm1 = 0;
1717
1718 state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
1719 state->VddcOffsetVid = (uint8_t)( table_info->us_ulv_voltage_offset *
1720 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1 );
1721
1722 state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
1723
1724 if (!result) {
1725 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
1726 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
1727 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
1728 }
1729 return result;
1730}
1731
1732static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr,
1733 struct SMU73_Discrete_DpmTable *table)
1734{
1735 return fiji_populate_ulv_level(hwmgr, &table->Ulv);
1736}
1737
1738static int32_t fiji_get_dpm_level_enable_mask_value(
1739 struct fiji_single_dpm_table* dpm_table)
1740{
1741 int32_t i;
1742 int32_t mask = 0;
1743
1744 for (i = dpm_table->count; i > 0; i--) {
1745 mask = mask << 1;
1746 if (dpm_table->dpm_levels[i - 1].enabled)
1747 mask |= 0x1;
1748 else
1749 mask &= 0xFFFFFFFE;
1750 }
1751 return mask;
1752}
1753
1754static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr,
1755 struct SMU73_Discrete_DpmTable *table)
1756{
1757 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1758 struct fiji_dpm_table *dpm_table = &data->dpm_table;
1759 int i;
1760
1761 /* Index (dpm_table->pcie_speed_table.count)
1762 * is reserved for PCIE boot level. */
1763 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
1764 table->LinkLevel[i].PcieGenSpeed =
1765 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
1766 table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
1767 dpm_table->pcie_speed_table.dpm_levels[i].param1);
1768 table->LinkLevel[i].EnabledForActivity = 1;
1769 table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
1770 table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
1771 table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
1772 }
1773
1774 data->smc_state_table.LinkLevelCount =
1775 (uint8_t)dpm_table->pcie_speed_table.count;
1776 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
1777 fiji_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
1778
1779 return 0;
1780}
1781
1782/**
1783* Calculates the SCLK dividers using the provided engine clock
1784*
1785* @param hwmgr the address of the hardware manager
1786* @param clock the engine clock to use to populate the structure
1787* @param sclk the SMC SCLK structure to be populated
1788*/
1789static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr,
1790 uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk)
1791{
1792 const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1793 struct pp_atomctrl_clock_dividers_vi dividers;
1794 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
1795 uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
1796 uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
1797 uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
1798 uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
1799 uint32_t ref_clock;
1800 uint32_t ref_divider;
1801 uint32_t fbdiv;
1802 int result;
1803
1804 /* get the engine clock dividers for this clock value */
1805 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, &dividers);
1806
1807 PP_ASSERT_WITH_CODE(result == 0,
1808 "Error retrieving Engine Clock dividers from VBIOS.",
1809 return result);
1810
1811 /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
1812 ref_clock = atomctrl_get_reference_clock(hwmgr);
1813 ref_divider = 1 + dividers.uc_pll_ref_div;
1814
1815 /* low 14 bits is fraction and high 12 bits is divider */
1816 fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
1817
1818 /* SPLL_FUNC_CNTL setup */
1819 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
1820 SPLL_REF_DIV, dividers.uc_pll_ref_div);
1821 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
1822 SPLL_PDIV_A, dividers.uc_pll_post_div);
1823
1824 /* SPLL_FUNC_CNTL_3 setup*/
1825 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
1826 SPLL_FB_DIV, fbdiv);
1827
1828 /* set to use fractional accumulation*/
1829 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
1830 SPLL_DITHEN, 1);
1831
1832 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1833 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
1834 struct pp_atomctrl_internal_ss_info ssInfo;
1835
1836 uint32_t vco_freq = clock * dividers.uc_pll_post_div;
1837 if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
1838 vco_freq, &ssInfo)) {
1839 /*
1840 * ss_info.speed_spectrum_percentage -- in unit of 0.01%
1841 * ss_info.speed_spectrum_rate -- in unit of khz
1842 *
1843 * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2
1844 */
1845 uint32_t clk_s = ref_clock * 5 /
1846 (ref_divider * ssInfo.speed_spectrum_rate);
1847 /* clkv = 2 * D * fbdiv / NS */
1848 uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage *
1849 fbdiv / (clk_s * 10000);
1850
1851 cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
1852 CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
1853 cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
1854 CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
1855 cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
1856 CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
1857 }
1858 }
1859
1860 sclk->SclkFrequency = clock;
1861 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
1862 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
1863 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
1864 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
1865 sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
1866
1867 return 0;
1868}
1869
1870static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci)
1871{
1872 uint32_t i;
1873 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1874 struct pp_atomctrl_voltage_table *vddci_table =
1875 &(data->vddci_voltage_table);
1876
1877 for (i = 0; i < vddci_table->count; i++) {
1878 if (vddci_table->entries[i].value >= vddci)
1879 return vddci_table->entries[i].value;
1880 }
1881
1882 PP_ASSERT_WITH_CODE(false,
1883 "VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
1884 return vddci_table->entries[i-1].value);
1885}
1886
1887static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
1888 struct phm_ppt_v1_clock_voltage_dependency_table* dep_table,
1889 uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
1890{
1891 uint32_t i;
1892 uint16_t vddci;
1893 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1894
1895 *voltage = *mvdd = 0;
1896
1897 /* clock - voltage dependency table is empty table */
1898 if (dep_table->count == 0)
1899 return -EINVAL;
1900
1901 for (i = 0; i < dep_table->count; i++) {
1902 /* find first sclk bigger than request */
1903 if (dep_table->entries[i].clk >= clock) {
1904 *voltage |= (dep_table->entries[i].vddc *
1905 VOLTAGE_SCALE) << VDDC_SHIFT;
1906 if (FIJI_VOLTAGE_CONTROL_NONE == data->vddci_control)
1907 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
1908 VOLTAGE_SCALE) << VDDCI_SHIFT;
1909 else if (dep_table->entries[i].vddci)
1910 *voltage |= (dep_table->entries[i].vddci *
1911 VOLTAGE_SCALE) << VDDCI_SHIFT;
1912 else {
1913 vddci = fiji_find_closest_vddci(hwmgr,
1914 (dep_table->entries[i].vddc -
1915 (uint16_t)data->vddc_vddci_delta));
1916 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1917 }
1918
1919 if (FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control)
1920 *mvdd = data->vbios_boot_state.mvdd_bootup_value *
1921 VOLTAGE_SCALE;
1922 else if (dep_table->entries[i].mvdd)
1923 *mvdd = (uint32_t) dep_table->entries[i].mvdd *
1924 VOLTAGE_SCALE;
1925
1926 *voltage |= 1 << PHASES_SHIFT;
1927 return 0;
1928 }
1929 }
1930
1931 /* sclk is bigger than max sclk in the dependence table */
1932 *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
1933
1934 if (FIJI_VOLTAGE_CONTROL_NONE == data->vddci_control)
1935 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
1936 VOLTAGE_SCALE) << VDDCI_SHIFT;
1937 else if (dep_table->entries[i-1].vddci) {
1938 vddci = fiji_find_closest_vddci(hwmgr,
1939 (dep_table->entries[i].vddc -
1940 (uint16_t)data->vddc_vddci_delta));
1941 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1942 }
1943
1944 if (FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control)
1945 *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
1946 else if (dep_table->entries[i].mvdd)
1947 *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
1948
1949 return 0;
1950}
1951
1952static uint8_t fiji_get_sleep_divider_id_from_clock(uint32_t clock,
1953 uint32_t clock_insr)
1954{
1955 uint8_t i;
1956 uint32_t temp;
1957 uint32_t min = max(clock_insr, (uint32_t)FIJI_MINIMUM_ENGINE_CLOCK);
1958
1959 PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
1960 for (i = FIJI_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
1961 temp = clock >> i;
1962
1963 if (temp >= min || i == 0)
1964 break;
1965 }
1966 return i;
1967}
1968/**
1969* Populates single SMC SCLK structure using the provided engine clock
1970*
1971* @param hwmgr the address of the hardware manager
1972* @param clock the engine clock to use to populate the structure
1973* @param sclk the SMC SCLK structure to be populated
1974*/
1975
1976static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
1977 uint32_t clock, uint16_t sclk_al_threshold,
1978 struct SMU73_Discrete_GraphicsLevel *level)
1979{
1980 int result;
1981 /* PP_Clocks minClocks; */
1982 uint32_t threshold, mvdd;
1983 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1984 struct phm_ppt_v1_information *table_info =
1985 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1986
1987 result = fiji_calculate_sclk_params(hwmgr, clock, level);
1988
1989 /* populate graphics levels */
1990 result = fiji_get_dependency_volt_by_clk(hwmgr,
1991 table_info->vdd_dep_on_sclk, clock,
1992 &level->MinVoltage, &mvdd);
1993 PP_ASSERT_WITH_CODE((0 == result),
1994 "can not find VDDC voltage value for "
1995 "VDDC engine clock dependency table",
1996 return result);
1997
1998 level->SclkFrequency = clock;
1999 level->ActivityLevel = sclk_al_threshold;
2000 level->CcPwrDynRm = 0;
2001 level->CcPwrDynRm1 = 0;
2002 level->EnabledForActivity = 0;
2003 level->EnabledForThrottle = 1;
2004 level->UpHyst = 10;
2005 level->DownHyst = 0;
2006 level->VoltageDownHyst = 0;
2007 level->PowerThrottle = 0;
2008
2009 threshold = clock * data->fast_watermark_threshold / 100;
2010
2011
2012 data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
2013
2014 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
2015 level->DeepSleepDivId = fiji_get_sleep_divider_id_from_clock(clock,
2016 hwmgr->display_config.min_core_set_clock_in_sr);
2017
2018
2019 /* Default to slow, highest DPM level will be
2020 * set to PPSMC_DISPLAY_WATERMARK_LOW later.
2021 */
2022 level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2023
2024 CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
2025 CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
2026 CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
2027 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
2028 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
2029 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
2030 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
2031 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
2032 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
2033
2034 return 0;
2035}
2036/**
2037* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
2038*
2039* @param hwmgr the address of the hardware manager
2040*/
2041static int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
2042{
2043 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2044 struct fiji_dpm_table *dpm_table = &data->dpm_table;
2045 struct phm_ppt_v1_information *table_info =
2046 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2047 struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
2048 uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
2049 int result = 0;
2050 uint32_t array = data->dpm_table_start +
2051 offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
2052 uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
2053 SMU73_MAX_LEVELS_GRAPHICS;
2054 struct SMU73_Discrete_GraphicsLevel *levels =
2055 data->smc_state_table.GraphicsLevel;
2056 uint32_t i, max_entry;
2057 uint8_t hightest_pcie_level_enabled = 0,
2058 lowest_pcie_level_enabled = 0,
2059 mid_pcie_level_enabled = 0,
2060 count = 0;
2061
2062 for (i = 0; i < dpm_table->sclk_table.count; i++) {
2063 result = fiji_populate_single_graphic_level(hwmgr,
2064 dpm_table->sclk_table.dpm_levels[i].value,
2065 (uint16_t)data->activity_target[i],
2066 &levels[i]);
2067 if (result)
2068 return result;
2069
2070 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
2071 if (i > 1)
2072 levels[i].DeepSleepDivId = 0;
2073 }
2074
2075 /* Only enable level 0 for now.*/
2076 levels[0].EnabledForActivity = 1;
2077
2078 /* set highest level watermark to high */
2079 levels[dpm_table->sclk_table.count - 1].DisplayWatermark =
2080 PPSMC_DISPLAY_WATERMARK_HIGH;
2081
2082 data->smc_state_table.GraphicsDpmLevelCount =
2083 (uint8_t)dpm_table->sclk_table.count;
2084 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
2085 fiji_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
2086
2087 if (pcie_table != NULL) {
2088 PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
2089 "There must be 1 or more PCIE levels defined in PPTable.",
2090 return -EINVAL);
2091 max_entry = pcie_entry_cnt - 1;
2092 for (i = 0; i < dpm_table->sclk_table.count; i++)
2093 levels[i].pcieDpmLevel =
2094 (uint8_t) ((i < max_entry)? i : max_entry);
2095 } else {
2096 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
2097 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
2098 (1 << (hightest_pcie_level_enabled + 1))) != 0 ))
2099 hightest_pcie_level_enabled++;
2100
2101 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
2102 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
2103 (1 << lowest_pcie_level_enabled)) == 0 ))
2104 lowest_pcie_level_enabled++;
2105
2106 while ((count < hightest_pcie_level_enabled) &&
2107 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
2108 (1 << (lowest_pcie_level_enabled + 1 + count))) == 0 ))
2109 count++;
2110
2111 mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1+ count) <
2112 hightest_pcie_level_enabled?
2113 (lowest_pcie_level_enabled + 1 + count) :
2114 hightest_pcie_level_enabled;
2115
2116 /* set pcieDpmLevel to hightest_pcie_level_enabled */
2117 for(i = 2; i < dpm_table->sclk_table.count; i++)
2118 levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
2119
2120 /* set pcieDpmLevel to lowest_pcie_level_enabled */
2121 levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
2122
2123 /* set pcieDpmLevel to mid_pcie_level_enabled */
2124 levels[1].pcieDpmLevel = mid_pcie_level_enabled;
2125 }
2126 /* level count will send to smc once at init smc table and never change */
2127 result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
2128 (uint32_t)array_size, data->sram_end);
2129
2130 return result;
2131}
2132
2133/**
2134 * MCLK Frequency Ratio
2135 * SEQ_CG_RESP Bit[31:24] - 0x0
2136 * Bit[27:24] \96 DDR3 Frequency ratio
2137 * 0x0 <= 100MHz, 450 < 0x8 <= 500MHz
2138 * 100 < 0x1 <= 150MHz, 500 < 0x9 <= 550MHz
2139 * 150 < 0x2 <= 200MHz, 550 < 0xA <= 600MHz
2140 * 200 < 0x3 <= 250MHz, 600 < 0xB <= 650MHz
2141 * 250 < 0x4 <= 300MHz, 650 < 0xC <= 700MHz
2142 * 300 < 0x5 <= 350MHz, 700 < 0xD <= 750MHz
2143 * 350 < 0x6 <= 400MHz, 750 < 0xE <= 800MHz
2144 * 400 < 0x7 <= 450MHz, 800 < 0xF
2145 */
2146static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock)
2147{
2148 if (mem_clock <= 10000) return 0x0;
2149 if (mem_clock <= 15000) return 0x1;
2150 if (mem_clock <= 20000) return 0x2;
2151 if (mem_clock <= 25000) return 0x3;
2152 if (mem_clock <= 30000) return 0x4;
2153 if (mem_clock <= 35000) return 0x5;
2154 if (mem_clock <= 40000) return 0x6;
2155 if (mem_clock <= 45000) return 0x7;
2156 if (mem_clock <= 50000) return 0x8;
2157 if (mem_clock <= 55000) return 0x9;
2158 if (mem_clock <= 60000) return 0xa;
2159 if (mem_clock <= 65000) return 0xb;
2160 if (mem_clock <= 70000) return 0xc;
2161 if (mem_clock <= 75000) return 0xd;
2162 if (mem_clock <= 80000) return 0xe;
2163 /* mem_clock > 800MHz */
2164 return 0xf;
2165}
2166
2167/**
2168* Populates the SMC MCLK structure using the provided memory clock
2169*
2170* @param hwmgr the address of the hardware manager
2171* @param clock the memory clock to use to populate the structure
2172* @param sclk the SMC SCLK structure to be populated
2173*/
2174static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr,
2175 uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk)
2176{
2177 struct pp_atomctrl_memory_clock_param mem_param;
2178 int result;
2179
2180 result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param);
2181 PP_ASSERT_WITH_CODE((0 == result),
2182 "Failed to get Memory PLL Dividers.",);
2183
2184 /* Save the result data to outpupt memory level structure */
2185 mclk->MclkFrequency = clock;
2186 mclk->MclkDivider = (uint8_t)mem_param.mpll_post_divider;
2187 mclk->FreqRange = fiji_get_mclk_frequency_ratio(clock);
2188
2189 return result;
2190}
2191
2192static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
2193 uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level)
2194{
2195 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2196 struct phm_ppt_v1_information *table_info =
2197 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2198 int result = 0;
2199
2200 if (table_info->vdd_dep_on_mclk) {
2201 result = fiji_get_dependency_volt_by_clk(hwmgr,
2202 table_info->vdd_dep_on_mclk, clock,
2203 &mem_level->MinVoltage, &mem_level->MinMvdd);
2204 PP_ASSERT_WITH_CODE((0 == result),
2205 "can not find MinVddc voltage value from memory "
2206 "VDDC voltage dependency table", return result);
2207 }
2208
2209 mem_level->EnabledForThrottle = 1;
2210 mem_level->EnabledForActivity = 0;
2211 mem_level->UpHyst = 0;
2212 mem_level->DownHyst = 100;
2213 mem_level->VoltageDownHyst = 0;
2214 mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
2215 mem_level->StutterEnable = false;
2216
2217 mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2218
2219 /* enable stutter mode if all the follow condition applied
2220 * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI,
2221 * &(data->DisplayTiming.numExistingDisplays));
2222 */
2223 data->display_timing.num_existing_displays = 1;
2224
2225 if ((data->mclk_stutter_mode_threshold) &&
2226 (clock <= data->mclk_stutter_mode_threshold) &&
2227 (!data->is_uvd_enabled) &&
2228 (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
2229 STUTTER_ENABLE) & 0x1))
2230 mem_level->StutterEnable = true;
2231
2232 result = fiji_calculate_mclk_params(hwmgr, clock, mem_level);
2233 if (!result) {
2234 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
2235 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
2236 CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
2237 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
2238 }
2239 return result;
2240}
2241
2242/**
2243* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
2244*
2245* @param hwmgr the address of the hardware manager
2246*/
2247static int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
2248{
2249 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2250 struct fiji_dpm_table *dpm_table = &data->dpm_table;
2251 int result;
2252 /* populate MCLK dpm table to SMU7 */
2253 uint32_t array = data->dpm_table_start +
2254 offsetof(SMU73_Discrete_DpmTable, MemoryLevel);
2255 uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) *
2256 SMU73_MAX_LEVELS_MEMORY;
2257 struct SMU73_Discrete_MemoryLevel *levels =
2258 data->smc_state_table.MemoryLevel;
2259 uint32_t i;
2260
2261 for (i = 0; i < dpm_table->mclk_table.count; i++) {
2262 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
2263 "can not populate memory level as memory clock is zero",
2264 return -EINVAL);
2265 result = fiji_populate_single_memory_level(hwmgr,
2266 dpm_table->mclk_table.dpm_levels[i].value,
2267 &levels[i]);
2268 if (result)
2269 return result;
2270 }
2271
2272 /* Only enable level 0 for now. */
2273 levels[0].EnabledForActivity = 1;
2274
2275 /* in order to prevent MC activity from stutter mode to push DPM up.
2276 * the UVD change complements this by putting the MCLK in
2277 * a higher state by default such that we are not effected by
2278 * up threshold or and MCLK DPM latency.
2279 */
2280 levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
2281 CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
2282
2283 data->smc_state_table.MemoryDpmLevelCount =
2284 (uint8_t)dpm_table->mclk_table.count;
2285 data->dpm_level_enable_mask.mclk_dpm_enable_mask =
2286 fiji_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
2287 /* set highest level watermark to high */
2288 levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
2289 PPSMC_DISPLAY_WATERMARK_HIGH;
2290
2291 /* level count will send to smc once at init smc table and never change */
2292 result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
2293 (uint32_t)array_size, data->sram_end);
2294
2295 return result;
2296}
2297
2298/**
2299* Populates the SMC MVDD structure using the provided memory clock.
2300*
2301* @param hwmgr the address of the hardware manager
2302* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
2303* @param voltage the SMC VOLTAGE structure to be populated
2304*/
2305static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
2306 uint32_t mclk, SMIO_Pattern *smio_pat)
2307{
2308 const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2309 struct phm_ppt_v1_information *table_info =
2310 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2311 uint32_t i = 0;
2312
2313 if (FIJI_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
2314 /* find mvdd value which clock is more than request */
2315 for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
2316 if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
2317 smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
2318 break;
2319 }
2320 }
2321 PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
2322 "MVDD Voltage is outside the supported range.",
2323 return -EINVAL);
2324 } else
2325 return -EINVAL;
2326
2327 return 0;
2328}
2329
2330static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
2331 SMU73_Discrete_DpmTable *table)
2332{
2333 int result = 0;
2334 const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2335 struct phm_ppt_v1_information *table_info =
2336 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2337 struct pp_atomctrl_clock_dividers_vi dividers;
2338 SMIO_Pattern vol_level;
2339 uint32_t mvdd;
2340 uint16_t us_mvdd;
2341 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
2342 uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
2343
2344 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2345
2346 if (!data->sclk_dpm_key_disabled) {
2347 /* Get MinVoltage and Frequency from DPM0,
2348 * already converted to SMC_UL */
2349 table->ACPILevel.SclkFrequency =
2350 data->dpm_table.sclk_table.dpm_levels[0].value;
2351 result = fiji_get_dependency_volt_by_clk(hwmgr,
2352 table_info->vdd_dep_on_sclk,
2353 table->ACPILevel.SclkFrequency,
2354 &table->ACPILevel.MinVoltage, &mvdd);
2355 PP_ASSERT_WITH_CODE((0 == result),
2356 "Cannot find ACPI VDDC voltage value "
2357 "in Clock Dependency Table",);
2358 } else {
2359 table->ACPILevel.SclkFrequency =
2360 data->vbios_boot_state.sclk_bootup_value;
2361 table->ACPILevel.MinVoltage =
2362 data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
2363 }
2364
2365 /* get the engine clock dividers for this clock value */
2366 result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
2367 table->ACPILevel.SclkFrequency, &dividers);
2368 PP_ASSERT_WITH_CODE(result == 0,
2369 "Error retrieving Engine Clock dividers from VBIOS.",
2370 return result);
2371
2372 table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
2373 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2374 table->ACPILevel.DeepSleepDivId = 0;
2375
2376 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
2377 SPLL_PWRON, 0);
2378 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
2379 SPLL_RESET, 1);
2380 spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
2381 SCLK_MUX_SEL, 4);
2382
2383 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2384 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2385 table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
2386 table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
2387 table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
2388 table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
2389 table->ACPILevel.CcPwrDynRm = 0;
2390 table->ACPILevel.CcPwrDynRm1 = 0;
2391
2392 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
2393 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
2394 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
2395 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
2396 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
2397 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
2398 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
2399 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
2400 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
2401 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
2402 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
2403
2404 if (!data->mclk_dpm_key_disabled) {
2405 /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
2406 table->MemoryACPILevel.MclkFrequency =
2407 data->dpm_table.mclk_table.dpm_levels[0].value;
2408 result = fiji_get_dependency_volt_by_clk(hwmgr,
2409 table_info->vdd_dep_on_mclk,
2410 table->MemoryACPILevel.MclkFrequency,
2411 &table->MemoryACPILevel.MinVoltage, &mvdd);
2412 PP_ASSERT_WITH_CODE((0 == result),
2413 "Cannot find ACPI VDDCI voltage value "
2414 "in Clock Dependency Table",);
2415 } else {
2416 table->MemoryACPILevel.MclkFrequency =
2417 data->vbios_boot_state.mclk_bootup_value;
2418 table->MemoryACPILevel.MinVoltage =
2419 data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
2420 }
2421
2422 us_mvdd = 0;
2423 if ((FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
2424 (data->mclk_dpm_key_disabled))
2425 us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
2426 else {
2427 if (!fiji_populate_mvdd_value(hwmgr,
2428 data->dpm_table.mclk_table.dpm_levels[0].value,
2429 &vol_level))
2430 us_mvdd = vol_level.Voltage;
2431 }
2432
2433 table->MemoryACPILevel.MinMvdd =
2434 PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE);
2435
2436 table->MemoryACPILevel.EnabledForThrottle = 0;
2437 table->MemoryACPILevel.EnabledForActivity = 0;
2438 table->MemoryACPILevel.UpHyst = 0;
2439 table->MemoryACPILevel.DownHyst = 100;
2440 table->MemoryACPILevel.VoltageDownHyst = 0;
2441 table->MemoryACPILevel.ActivityLevel =
2442 PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
2443
2444 table->MemoryACPILevel.StutterEnable = false;
2445 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
2446 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
2447
2448 return result;
2449}
2450
2451static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
2452 SMU73_Discrete_DpmTable *table)
2453{
2454 int result = -EINVAL;
2455 uint8_t count;
2456 struct pp_atomctrl_clock_dividers_vi dividers;
2457 struct phm_ppt_v1_information *table_info =
2458 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2459 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
2460 table_info->mm_dep_table;
2461 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2462
2463 table->VceLevelCount = (uint8_t)(mm_table->count);
2464 table->VceBootLevel = 0;
2465
2466 for(count = 0; count < table->VceLevelCount; count++) {
2467 table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
2468 table->VceLevel[count].MinVoltage = 0;
2469 table->VceLevel[count].MinVoltage |=
2470 (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
2471 table->VceLevel[count].MinVoltage |=
2472 ((mm_table->entries[count].vddc - data->vddc_vddci_delta) *
2473 VOLTAGE_SCALE) << VDDCI_SHIFT;
2474 table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
2475
2476 /*retrieve divider value for VBIOS */
2477 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
2478 table->VceLevel[count].Frequency, &dividers);
2479 PP_ASSERT_WITH_CODE((0 == result),
2480 "can not find divide id for VCE engine clock",
2481 return result);
2482
2483 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
2484
2485 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
2486 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
2487 }
2488 return result;
2489}
2490
2491static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
2492 SMU73_Discrete_DpmTable *table)
2493{
2494 int result = -EINVAL;
2495 uint8_t count;
2496 struct pp_atomctrl_clock_dividers_vi dividers;
2497 struct phm_ppt_v1_information *table_info =
2498 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2499 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
2500 table_info->mm_dep_table;
2501 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2502
2503 table->AcpLevelCount = (uint8_t)(mm_table->count);
2504 table->AcpBootLevel = 0;
2505
2506 for (count = 0; count < table->AcpLevelCount; count++) {
2507 table->AcpLevel[count].Frequency = mm_table->entries[count].aclk;
2508 table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
2509 VOLTAGE_SCALE) << VDDC_SHIFT;
2510 table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
2511 data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
2512 table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
2513
2514 /* retrieve divider value for VBIOS */
2515 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
2516 table->AcpLevel[count].Frequency, &dividers);
2517 PP_ASSERT_WITH_CODE((0 == result),
2518 "can not find divide id for engine clock", return result);
2519
2520 table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
2521
2522 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
2523 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage);
2524 }
2525 return result;
2526}
2527
2528static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
2529 SMU73_Discrete_DpmTable *table)
2530{
2531 int result = -EINVAL;
2532 uint8_t count;
2533 struct pp_atomctrl_clock_dividers_vi dividers;
2534 struct phm_ppt_v1_information *table_info =
2535 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2536 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
2537 table_info->mm_dep_table;
2538 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2539
2540 table->SamuBootLevel = 0;
2541 table->SamuLevelCount = (uint8_t)(mm_table->count);
2542
2543 for (count = 0; count < table->SamuLevelCount; count++) {
2544 /* not sure whether we need evclk or not */
2545 table->SamuLevel[count].MinVoltage = 0;
2546 table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
2547 table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
2548 VOLTAGE_SCALE) << VDDC_SHIFT;
2549 table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
2550 data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
2551 table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
2552
2553 /* retrieve divider value for VBIOS */
2554 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
2555 table->SamuLevel[count].Frequency, &dividers);
2556 PP_ASSERT_WITH_CODE((0 == result),
2557 "can not find divide id for samu clock", return result);
2558
2559 table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
2560
2561 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
2562 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
2563 }
2564 return result;
2565}
2566
2567static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
2568 int32_t eng_clock, int32_t mem_clock,
2569 struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
2570{
2571 uint32_t dram_timing;
2572 uint32_t dram_timing2;
2573 uint32_t burstTime;
2574 ULONG state, trrds, trrdl;
2575 int result;
2576
2577 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
2578 eng_clock, mem_clock);
2579 PP_ASSERT_WITH_CODE(result == 0,
2580 "Error calling VBIOS to set DRAM_TIMING.", return result);
2581
2582 dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
2583 dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
2584 burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
2585
2586 state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
2587 trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
2588 trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
2589
2590 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
2591 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
2592 arb_regs->McArbBurstTime = (uint8_t)burstTime;
2593 arb_regs->TRRDS = (uint8_t)trrds;
2594 arb_regs->TRRDL = (uint8_t)trrdl;
2595
2596 return 0;
2597}
2598
2599static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
2600{
2601 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2602 struct SMU73_Discrete_MCArbDramTimingTable arb_regs;
2603 uint32_t i, j;
2604 int result = 0;
2605
2606 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
2607 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
2608 result = fiji_populate_memory_timing_parameters(hwmgr,
2609 data->dpm_table.sclk_table.dpm_levels[i].value,
2610 data->dpm_table.mclk_table.dpm_levels[j].value,
2611 &arb_regs.entries[i][j]);
2612 if (result)
2613 break;
2614 }
2615 }
2616
2617 if (!result)
2618 result = fiji_copy_bytes_to_smc(
2619 hwmgr->smumgr,
2620 data->arb_table_start,
2621 (uint8_t *)&arb_regs,
2622 sizeof(SMU73_Discrete_MCArbDramTimingTable),
2623 data->sram_end);
2624 return result;
2625}
2626
2627static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
2628 struct SMU73_Discrete_DpmTable *table)
2629{
2630 int result = -EINVAL;
2631 uint8_t count;
2632 struct pp_atomctrl_clock_dividers_vi dividers;
2633 struct phm_ppt_v1_information *table_info =
2634 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2635 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
2636 table_info->mm_dep_table;
2637 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2638
2639 table->UvdLevelCount = (uint8_t)(mm_table->count);
2640 table->UvdBootLevel = 0;
2641
2642 for (count = 0; count < table->UvdLevelCount; count++) {
2643 table->UvdLevel[count].MinVoltage = 0;
2644 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
2645 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
2646 table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
2647 VOLTAGE_SCALE) << VDDC_SHIFT;
2648 table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
2649 data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
2650 table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
2651
2652 /* retrieve divider value for VBIOS */
2653 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
2654 table->UvdLevel[count].VclkFrequency, &dividers);
2655 PP_ASSERT_WITH_CODE((0 == result),
2656 "can not find divide id for Vclk clock", return result);
2657
2658 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
2659
2660 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
2661 table->UvdLevel[count].DclkFrequency, &dividers);
2662 PP_ASSERT_WITH_CODE((0 == result),
2663 "can not find divide id for Dclk clock", return result);
2664
2665 table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
2666
2667 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
2668 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
2669 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
2670
2671 }
2672 return result;
2673}
2674
2675static int fiji_find_boot_level(struct fiji_single_dpm_table *table,
2676 uint32_t value, uint32_t *boot_level)
2677{
2678 int result = -EINVAL;
2679 uint32_t i;
2680
2681 for (i = 0; i < table->count; i++) {
2682 if (value == table->dpm_levels[i].value) {
2683 *boot_level = i;
2684 result = 0;
2685 }
2686 }
2687 return result;
2688}
2689
2690static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
2691 struct SMU73_Discrete_DpmTable *table)
2692{
2693 int result = 0;
2694 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2695
2696 table->GraphicsBootLevel = 0;
2697 table->MemoryBootLevel = 0;
2698
2699 /* find boot level from dpm table */
2700 result = fiji_find_boot_level(&(data->dpm_table.sclk_table),
2701 data->vbios_boot_state.sclk_bootup_value,
2702 (uint32_t *)&(table->GraphicsBootLevel));
2703
2704 result = fiji_find_boot_level(&(data->dpm_table.mclk_table),
2705 data->vbios_boot_state.mclk_bootup_value,
2706 (uint32_t *)&(table->MemoryBootLevel));
2707
2708 table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
2709 VOLTAGE_SCALE;
2710 table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
2711 VOLTAGE_SCALE;
2712 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
2713 VOLTAGE_SCALE;
2714
2715 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
2716 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
2717 CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
2718
2719 return 0;
2720}
2721
2722static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
2723{
2724 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2725 struct phm_ppt_v1_information *table_info =
2726 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2727 uint8_t count, level;
2728
2729 count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
2730 for (level = 0; level < count; level++) {
2731 if(table_info->vdd_dep_on_sclk->entries[level].clk >=
2732 data->vbios_boot_state.sclk_bootup_value) {
2733 data->smc_state_table.GraphicsBootLevel = level;
2734 break;
2735 }
2736 }
2737
2738 count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
2739 for (level = 0; level < count; level++) {
2740 if(table_info->vdd_dep_on_mclk->entries[level].clk >=
2741 data->vbios_boot_state.mclk_bootup_value) {
2742 data->smc_state_table.MemoryBootLevel = level;
2743 break;
2744 }
2745 }
2746
2747 return 0;
2748}
2749
2750static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
2751{
2752 uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
2753 volt_with_cks, value;
2754 uint16_t clock_freq_u16;
2755 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2756 uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
2757 volt_offset = 0;
2758 struct phm_ppt_v1_information *table_info =
2759 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2760 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
2761 table_info->vdd_dep_on_sclk;
2762
2763 stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
2764
2765 /* Read SMU_Eefuse to read and calculate RO and determine
2766 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
2767 */
2768 efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2769 ixSMU_EFUSE_0 + (146 * 4));
2770 efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2771 ixSMU_EFUSE_0 + (148 * 4));
2772 efuse &= 0xFF000000;
2773 efuse = efuse >> 24;
2774 efuse2 &= 0xF;
2775
2776 if (efuse2 == 1)
2777 ro = (2300 - 1350) * efuse / 255 + 1350;
2778 else
2779 ro = (2500 - 1000) * efuse / 255 + 1000;
2780
2781 if (ro >= 1660)
2782 type = 0;
2783 else
2784 type = 1;
2785
2786 /* Populate Stretch amount */
2787 data->smc_state_table.ClockStretcherAmount = stretch_amount;
2788
2789 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
2790 for (i = 0; i < sclk_table->count; i++) {
2791 data->smc_state_table.Sclk_CKS_masterEn0_7 |=
2792 sclk_table->entries[i].cks_enable << i;
2793 volt_without_cks = (uint32_t)((14041 *
2794 (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
2795 (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
2796 volt_with_cks = (uint32_t)((13946 *
2797 (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
2798 (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
2799 if (volt_without_cks >= volt_with_cks)
2800 volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
2801 sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
2802 data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
2803 }
2804
2805 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
2806 STRETCH_ENABLE, 0x0);
2807 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
2808 masterReset, 0x1);
2809 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
2810 staticEnable, 0x1);
2811 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
2812 masterReset, 0x0);
2813
2814 /* Populate CKS Lookup Table */
2815 if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
2816 stretch_amount2 = 0;
2817 else if (stretch_amount == 3 || stretch_amount == 4)
2818 stretch_amount2 = 1;
2819 else {
2820 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2821 PHM_PlatformCaps_ClockStretcher);
2822 PP_ASSERT_WITH_CODE(false,
2823 "Stretch Amount in PPTable not supported\n",
2824 return -EINVAL);
2825 }
2826
2827 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2828 ixPWR_CKS_CNTL);
2829 value &= 0xFFC2FF87;
2830 data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
2831 fiji_clock_stretcher_lookup_table[stretch_amount2][0];
2832 data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
2833 fiji_clock_stretcher_lookup_table[stretch_amount2][1];
2834 clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(data->smc_state_table.
2835 GraphicsLevel[data->smc_state_table.GraphicsDpmLevelCount - 1].
2836 SclkFrequency) / 100);
2837 if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] <
2838 clock_freq_u16 &&
2839 fiji_clock_stretcher_lookup_table[stretch_amount2][1] >
2840 clock_freq_u16) {
2841 /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
2842 value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
2843 /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
2844 value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
2845 /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
2846 value |= (fiji_clock_stretch_amount_conversion
2847 [fiji_clock_stretcher_lookup_table[stretch_amount2][3]]
2848 [stretch_amount]) << 3;
2849 }
2850 CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.
2851 CKS_LOOKUPTableEntry[0].minFreq);
2852 CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.
2853 CKS_LOOKUPTableEntry[0].maxFreq);
2854 data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
2855 fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
2856 data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
2857 (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
2858
2859 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2860 ixPWR_CKS_CNTL, value);
2861
2862 /* Populate DDT Lookup Table */
2863 for (i = 0; i < 4; i++) {
2864 /* Assign the minimum and maximum VID stored
2865 * in the last row of Clock Stretcher Voltage Table.
2866 */
2867 data->smc_state_table.ClockStretcherDataTable.
2868 ClockStretcherDataTableEntry[i].minVID =
2869 (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2];
2870 data->smc_state_table.ClockStretcherDataTable.
2871 ClockStretcherDataTableEntry[i].maxVID =
2872 (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3];
2873 /* Loop through each SCLK and check the frequency
2874 * to see if it lies within the frequency for clock stretcher.
2875 */
2876 for (j = 0; j < data->smc_state_table.GraphicsDpmLevelCount; j++) {
2877 cks_setting = 0;
2878 clock_freq = PP_SMC_TO_HOST_UL(
2879 data->smc_state_table.GraphicsLevel[j].SclkFrequency);
2880 /* Check the allowed frequency against the sclk level[j].
2881 * Sclk's endianness has already been converted,
2882 * and it's in 10Khz unit,
2883 * as opposed to Data table, which is in Mhz unit.
2884 */
2885 if (clock_freq >=
2886 (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) {
2887 cks_setting |= 0x2;
2888 if (clock_freq <
2889 (fiji_clock_stretcher_ddt_table[type][i][1]) * 100)
2890 cks_setting |= 0x1;
2891 }
2892 data->smc_state_table.ClockStretcherDataTable.
2893 ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
2894 }
2895 CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.
2896 ClockStretcherDataTable.
2897 ClockStretcherDataTableEntry[i].setting);
2898 }
2899
2900 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
2901 value &= 0xFFFFFFFE;
2902 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
2903
2904 return 0;
2905}
2906
2907/**
2908* Populates the SMC VRConfig field in DPM table.
2909*
2910* @param hwmgr the address of the hardware manager
2911* @param table the SMC DPM table structure to be populated
2912* @return always 0
2913*/
2914static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr,
2915 struct SMU73_Discrete_DpmTable *table)
2916{
2917 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2918 uint16_t config;
2919
2920 config = VR_MERGED_WITH_VDDC;
2921 table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
2922
2923 /* Set Vddc Voltage Controller */
2924 if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
2925 config = VR_SVI2_PLANE_1;
2926 table->VRConfig |= config;
2927 } else {
2928 PP_ASSERT_WITH_CODE(false,
2929 "VDDC should be on SVI2 control in merged mode!",);
2930 }
2931 /* Set Vddci Voltage Controller */
2932 if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
2933 config = VR_SVI2_PLANE_2; /* only in merged mode */
2934 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
2935 } else if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
2936 config = VR_SMIO_PATTERN_1;
2937 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
2938 } else {
2939 config = VR_STATIC_VOLTAGE;
2940 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
2941 }
2942 /* Set Mvdd Voltage Controller */
2943 if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
2944 config = VR_SVI2_PLANE_2;
2945 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
2946 } else if(FIJI_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
2947 config = VR_SMIO_PATTERN_2;
2948 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
2949 } else {
2950 config = VR_STATIC_VOLTAGE;
2951 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
2952 }
2953
2954 return 0;
2955}
2956
2957/**
2958* Initializes the SMC table and uploads it
2959*
2960* @param hwmgr the address of the powerplay hardware manager.
2961* @param pInput the pointer to input data (PowerState)
2962* @return always 0
2963*/
2964static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
2965{
2966 int result;
2967 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
2968 struct phm_ppt_v1_information *table_info =
2969 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2970 struct SMU73_Discrete_DpmTable *table = &(data->smc_state_table);
2971 const struct fiji_ulv_parm *ulv = &(data->ulv);
2972 uint8_t i;
2973 struct pp_atomctrl_gpio_pin_assignment gpio_pin;
2974
2975 result = fiji_setup_default_dpm_tables(hwmgr);
2976 PP_ASSERT_WITH_CODE(0 == result,
2977 "Failed to setup default DPM tables!", return result);
2978
2979 if(FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control)
2980 fiji_populate_smc_voltage_tables(hwmgr, table);
2981
2982 table->SystemFlags = 0;
2983
2984 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2985 PHM_PlatformCaps_AutomaticDCTransition))
2986 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
2987
2988 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2989 PHM_PlatformCaps_StepVddc))
2990 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
2991
2992 if (data->is_memory_gddr5)
2993 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
2994
2995 if (ulv->ulv_supported && table_info->us_ulv_voltage_offset) {
2996 result = fiji_populate_ulv_state(hwmgr, table);
2997 PP_ASSERT_WITH_CODE(0 == result,
2998 "Failed to initialize ULV state!", return result);
2999 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3000 ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3001 }
3002
3003 result = fiji_populate_smc_link_level(hwmgr, table);
3004 PP_ASSERT_WITH_CODE(0 == result,
3005 "Failed to initialize Link Level!", return result);
3006
3007 result = fiji_populate_all_graphic_levels(hwmgr);
3008 PP_ASSERT_WITH_CODE(0 == result,
3009 "Failed to initialize Graphics Level!", return result);
3010
3011 result = fiji_populate_all_memory_levels(hwmgr);
3012 PP_ASSERT_WITH_CODE(0 == result,
3013 "Failed to initialize Memory Level!", return result);
3014
3015 result = fiji_populate_smc_acpi_level(hwmgr, table);
3016 PP_ASSERT_WITH_CODE(0 == result,
3017 "Failed to initialize ACPI Level!", return result);
3018
3019 result = fiji_populate_smc_vce_level(hwmgr, table);
3020 PP_ASSERT_WITH_CODE(0 == result,
3021 "Failed to initialize VCE Level!", return result);
3022
3023 result = fiji_populate_smc_acp_level(hwmgr, table);
3024 PP_ASSERT_WITH_CODE(0 == result,
3025 "Failed to initialize ACP Level!", return result);
3026
3027 result = fiji_populate_smc_samu_level(hwmgr, table);
3028 PP_ASSERT_WITH_CODE(0 == result,
3029 "Failed to initialize SAMU Level!", return result);
3030
3031 /* Since only the initial state is completely set up at this point
3032 * (the other states are just copies of the boot state) we only
3033 * need to populate the ARB settings for the initial state.
3034 */
3035 result = fiji_program_memory_timing_parameters(hwmgr);
3036 PP_ASSERT_WITH_CODE(0 == result,
3037 "Failed to Write ARB settings for the initial state.", return result);
3038
3039 result = fiji_populate_smc_uvd_level(hwmgr, table);
3040 PP_ASSERT_WITH_CODE(0 == result,
3041 "Failed to initialize UVD Level!", return result);
3042
3043 result = fiji_populate_smc_boot_level(hwmgr, table);
3044 PP_ASSERT_WITH_CODE(0 == result,
3045 "Failed to initialize Boot Level!", return result);
3046
3047 result = fiji_populate_smc_initailial_state(hwmgr);
3048 PP_ASSERT_WITH_CODE(0 == result,
3049 "Failed to initialize Boot State!", return result);
3050
3051 result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr);
3052 PP_ASSERT_WITH_CODE(0 == result,
3053 "Failed to populate BAPM Parameters!", return result);
3054
3055 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3056 PHM_PlatformCaps_ClockStretcher)) {
3057 result = fiji_populate_clock_stretcher_data_table(hwmgr);
3058 PP_ASSERT_WITH_CODE(0 == result,
3059 "Failed to populate Clock Stretcher Data Table!",
3060 return result);
3061 }
3062
3063 table->GraphicsVoltageChangeEnable = 1;
3064 table->GraphicsThermThrottleEnable = 1;
3065 table->GraphicsInterval = 1;
3066 table->VoltageInterval = 1;
3067 table->ThermalInterval = 1;
3068 table->TemperatureLimitHigh =
3069 table_info->cac_dtp_table->usTargetOperatingTemp *
3070 FIJI_Q88_FORMAT_CONVERSION_UNIT;
3071 table->TemperatureLimitLow =
3072 (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
3073 FIJI_Q88_FORMAT_CONVERSION_UNIT;
3074 table->MemoryVoltageChangeEnable = 1;
3075 table->MemoryInterval = 1;
3076 table->VoltageResponseTime = 0;
3077 table->PhaseResponseTime = 0;
3078 table->MemoryThermThrottleEnable = 1;
3079 table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
3080 table->PCIeGenInterval = 1;
3081 table->VRConfig = 0;
3082
3083 result = fiji_populate_vr_config(hwmgr, table);
3084 PP_ASSERT_WITH_CODE(0 == result,
3085 "Failed to populate VRConfig setting!", return result);
3086
3087 table->ThermGpio = 17;
3088 table->SclkStepSize = 0x4000;
3089
3090 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
3091 table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
3092 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3093 PHM_PlatformCaps_RegulatorHot);
3094 } else {
3095 table->VRHotGpio = FIJI_UNUSED_GPIO_PIN;
3096 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3097 PHM_PlatformCaps_RegulatorHot);
3098 }
3099
3100 if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
3101 &gpio_pin)) {
3102 table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
3103 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3104 PHM_PlatformCaps_AutomaticDCTransition);
3105 } else {
3106 table->AcDcGpio = FIJI_UNUSED_GPIO_PIN;
3107 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3108 PHM_PlatformCaps_AutomaticDCTransition);
3109 }
3110
3111 /* Thermal Output GPIO */
3112 if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
3113 &gpio_pin)) {
3114 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3115 PHM_PlatformCaps_ThermalOutGPIO);
3116
3117 table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
3118
3119 /* For porlarity read GPIOPAD_A with assigned Gpio pin
3120 * since VBIOS will program this register to set 'inactive state',
3121 * driver can then determine 'active state' from this and
3122 * program SMU with correct polarity
3123 */
3124 table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
3125 (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
3126 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
3127
3128 /* if required, combine VRHot/PCC with thermal out GPIO */
3129 if(phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3130 PHM_PlatformCaps_RegulatorHot) &&
3131 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3132 PHM_PlatformCaps_CombinePCCWithThermalSignal))
3133 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
3134 } else {
3135 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3136 PHM_PlatformCaps_ThermalOutGPIO);
3137 table->ThermOutGpio = 17;
3138 table->ThermOutPolarity = 1;
3139 table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
3140 }
3141
3142 for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++)
3143 table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
3144
3145 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
3146 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
3147 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
3148 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
3149 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
3150 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
3151 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
3152 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
3153 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
3154
3155 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
3156 result = fiji_copy_bytes_to_smc(hwmgr->smumgr,
3157 data->dpm_table_start +
3158 offsetof(SMU73_Discrete_DpmTable, SystemFlags),
3159 (uint8_t *)&(table->SystemFlags),
3160 sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController),
3161 data->sram_end);
3162 PP_ASSERT_WITH_CODE(0 == result,
3163 "Failed to upload dpm data to SMC memory!", return result);
3164
3165 return 0;
3166}
3167
3168/**
3169* Initialize the ARB DRAM timing table's index field.
3170*
3171* @param hwmgr the address of the powerplay hardware manager.
3172* @return always 0
3173*/
3174static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr)
3175{
3176 const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3177 uint32_t tmp;
3178 int result;
3179
3180 /* This is a read-modify-write on the first byte of the ARB table.
3181 * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
3182 * is the field 'current'.
3183 * This solution is ugly, but we never write the whole table only
3184 * individual fields in it.
3185 * In reality this field should not be in that structure
3186 * but in a soft register.
3187 */
3188 result = fiji_read_smc_sram_dword(hwmgr->smumgr,
3189 data->arb_table_start, &tmp, data->sram_end);
3190
3191 if (result)
3192 return result;
3193
3194 tmp &= 0x00FFFFFF;
3195 tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
3196
3197 return fiji_write_smc_sram_dword(hwmgr->smumgr,
3198 data->arb_table_start, tmp, data->sram_end);
3199}
3200
3201static int fiji_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
3202{
3203 if(phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3204 PHM_PlatformCaps_RegulatorHot))
3205 return smum_send_msg_to_smc(hwmgr->smumgr,
3206 PPSMC_MSG_EnableVRHotGPIOInterrupt);
3207
3208 return 0;
3209}
3210
3211static int fiji_enable_sclk_control(struct pp_hwmgr *hwmgr)
3212{
3213 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
3214 SCLK_PWRMGT_OFF, 0);
3215 return 0;
3216}
3217
3218static int fiji_enable_ulv(struct pp_hwmgr *hwmgr)
3219{
3220 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3221 struct fiji_ulv_parm *ulv = &(data->ulv);
3222
3223 if (ulv->ulv_supported)
3224 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
3225
3226 return 0;
3227}
3228
3229static int fiji_disable_ulv(struct pp_hwmgr *hwmgr)
3230{
3231 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3232 struct fiji_ulv_parm *ulv = &(data->ulv);
3233
3234 if (ulv->ulv_supported)
3235 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
3236
3237 return 0;
3238}
3239
3240static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
3241{
3242 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3243 PHM_PlatformCaps_SclkDeepSleep)) {
3244 if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
3245 PP_ASSERT_WITH_CODE(false,
3246 "Attempt to enable Master Deep Sleep switch failed!",
3247 return -1);
3248 } else {
3249 if (smum_send_msg_to_smc(hwmgr->smumgr,
3250 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
3251 PP_ASSERT_WITH_CODE(false,
3252 "Attempt to disable Master Deep Sleep switch failed!",
3253 return -1);
3254 }
3255 }
3256
3257 return 0;
3258}
3259
3260static int fiji_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
3261{
3262 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3263 PHM_PlatformCaps_SclkDeepSleep)) {
3264 if (smum_send_msg_to_smc(hwmgr->smumgr,
3265 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
3266 PP_ASSERT_WITH_CODE(false,
3267 "Attempt to disable Master Deep Sleep switch failed!",
3268 return -1);
3269 }
3270 }
3271
3272 return 0;
3273}
3274
3275static int fiji_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3276{
3277 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3278 uint32_t val, val0, val2;
3279 uint32_t i, cpl_cntl, cpl_threshold, mc_threshold;
3280
3281 /* enable SCLK dpm */
3282 if(!data->sclk_dpm_key_disabled)
3283 PP_ASSERT_WITH_CODE(
3284 (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
3285 "Failed to enable SCLK DPM during DPM Start Function!",
3286 return -1);
3287
3288 /* enable MCLK dpm */
3289 if(0 == data->mclk_dpm_key_disabled) {
3290 cpl_threshold = 0;
3291 mc_threshold = 0;
3292
3293 /* Read per MCD tile (0 - 7) */
3294 for (i = 0; i < 8; i++) {
3295 PHM_WRITE_FIELD(hwmgr->device, MC_CONFIG_MCD, MC_RD_ENABLE, i);
3296 val = cgs_read_register(hwmgr->device, mmMC_SEQ_RESERVE_0_S) & 0xf0000000;
3297 if (0xf0000000 != val) {
3298 /* count number of MCQ that has channel(s) enabled */
3299 cpl_threshold++;
3300 /* only harvest 3 or full 4 supported */
3301 mc_threshold = val ? 3 : 4;
3302 }
3303 }
3304 PP_ASSERT_WITH_CODE(0 != cpl_threshold,
3305 "Number of MCQ is zero!", return -EINVAL;);
3306
3307 mc_threshold = ((mc_threshold & LCAC_MC0_CNTL__MC0_THRESHOLD_MASK) <<
3308 LCAC_MC0_CNTL__MC0_THRESHOLD__SHIFT) |
3309 LCAC_MC0_CNTL__MC0_ENABLE_MASK;
3310 cpl_cntl = ((cpl_threshold & LCAC_CPL_CNTL__CPL_THRESHOLD_MASK) <<
3311 LCAC_CPL_CNTL__CPL_THRESHOLD__SHIFT) |
3312 LCAC_CPL_CNTL__CPL_ENABLE_MASK;
3313 cpl_cntl = (cpl_cntl | (8 << LCAC_CPL_CNTL__CPL_BLOCK_ID__SHIFT));
3314 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3315 ixLCAC_MC0_CNTL, mc_threshold);
3316 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3317 ixLCAC_MC1_CNTL, mc_threshold);
3318 if (8 == cpl_threshold) {
3319 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3320 ixLCAC_MC2_CNTL, mc_threshold);
3321 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3322 ixLCAC_MC3_CNTL, mc_threshold);
3323 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3324 ixLCAC_MC4_CNTL, mc_threshold);
3325 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3326 ixLCAC_MC5_CNTL, mc_threshold);
3327 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3328 ixLCAC_MC6_CNTL, mc_threshold);
3329 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3330 ixLCAC_MC7_CNTL, mc_threshold);
3331 }
3332 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3333 ixLCAC_CPL_CNTL, cpl_cntl);
3334
3335 udelay(5);
3336
3337 mc_threshold = mc_threshold |
3338 (1 << LCAC_MC0_CNTL__MC0_SIGNAL_ID__SHIFT);
3339 cpl_cntl = cpl_cntl | (1 << LCAC_CPL_CNTL__CPL_SIGNAL_ID__SHIFT);
3340 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3341 ixLCAC_MC0_CNTL, mc_threshold);
3342 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3343 ixLCAC_MC1_CNTL, mc_threshold);
3344 if (8 == cpl_threshold) {
3345 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3346 ixLCAC_MC2_CNTL, mc_threshold);
3347 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3348 ixLCAC_MC3_CNTL, mc_threshold);
3349 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3350 ixLCAC_MC4_CNTL, mc_threshold);
3351 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3352 ixLCAC_MC5_CNTL, mc_threshold);
3353 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3354 ixLCAC_MC6_CNTL, mc_threshold);
3355 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3356 ixLCAC_MC7_CNTL, mc_threshold);
3357 }
3358 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3359 ixLCAC_CPL_CNTL, cpl_cntl);
3360
3361 /* Program CAC_EN per MCD (0-7) Tile */
3362 val0 = val = cgs_read_register(hwmgr->device, mmMC_CONFIG_MCD);
3363 val &= ~(MC_CONFIG_MCD__MCD0_WR_ENABLE_MASK |
3364 MC_CONFIG_MCD__MCD1_WR_ENABLE_MASK |
3365 MC_CONFIG_MCD__MCD2_WR_ENABLE_MASK |
3366 MC_CONFIG_MCD__MCD3_WR_ENABLE_MASK |
3367 MC_CONFIG_MCD__MCD4_WR_ENABLE_MASK |
3368 MC_CONFIG_MCD__MCD5_WR_ENABLE_MASK |
3369 MC_CONFIG_MCD__MCD6_WR_ENABLE_MASK |
3370 MC_CONFIG_MCD__MCD7_WR_ENABLE_MASK |
3371 MC_CONFIG_MCD__MC_RD_ENABLE_MASK);
3372
3373 for (i = 0; i < 8; i++) {
3374 /* Enable MCD i Tile read & write */
3375 val2 = (val | (i << MC_CONFIG_MCD__MC_RD_ENABLE__SHIFT) |
3376 (1 << i));
3377 cgs_write_register(hwmgr->device, mmMC_CONFIG_MCD, val2);
3378 /* Enbale CAC_ON MCD i Tile */
3379 val2 = cgs_read_register(hwmgr->device, mmMC_SEQ_CNTL);
3380 val2 |= MC_SEQ_CNTL__CAC_EN_MASK;
3381 cgs_write_register(hwmgr->device, mmMC_SEQ_CNTL, val2);
3382 }
3383 /* Set MC_CONFIG_MCD back to its default setting val0 */
3384 cgs_write_register(hwmgr->device, mmMC_CONFIG_MCD, val0);
3385
3386 PP_ASSERT_WITH_CODE(
3387 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
3388 PPSMC_MSG_MCLKDPM_Enable)),
3389 "Failed to enable MCLK DPM during DPM Start Function!",
3390 return -1);
3391 }
3392 return 0;
3393}
3394
3395static int fiji_start_dpm(struct pp_hwmgr *hwmgr)
3396{
3397 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3398
3399 /*enable general power management */
3400 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
3401 GLOBAL_PWRMGT_EN, 1);
3402 /* enable sclk deep sleep */
3403 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
3404 DYNAMIC_PM_EN, 1);
3405 /* prepare for PCIE DPM */
3406 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3407 data->soft_regs_start + offsetof(SMU73_SoftRegisters,
3408 VoltageChangeTimeout), 0x1000);
3409 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
3410 SWRST_COMMAND_1, RESETLC, 0x0);
3411
3412 PP_ASSERT_WITH_CODE(
3413 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
3414 PPSMC_MSG_Voltage_Cntl_Enable)),
3415 "Failed to enable voltage DPM during DPM Start Function!",
3416 return -1);
3417
3418 if (fiji_enable_sclk_mclk_dpm(hwmgr)) {
3419 printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
3420 return -1;
3421 }
3422
3423 /* enable PCIE dpm */
3424 if(!data->pcie_dpm_key_disabled) {
3425 PP_ASSERT_WITH_CODE(
3426 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
3427 PPSMC_MSG_PCIeDPM_Enable)),
3428 "Failed to enable pcie DPM during DPM Start Function!",
3429 return -1);
3430 }
3431
3432 return 0;
3433}
3434
3435static int fiji_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3436{
3437 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3438
3439 /* disable SCLK dpm */
3440 if (!data->sclk_dpm_key_disabled)
3441 PP_ASSERT_WITH_CODE(
3442 (smum_send_msg_to_smc(hwmgr->smumgr,
3443 PPSMC_MSG_DPM_Disable) == 0),
3444 "Failed to disable SCLK DPM!",
3445 return -1);
3446
3447 /* disable MCLK dpm */
3448 if (!data->mclk_dpm_key_disabled) {
3449 PP_ASSERT_WITH_CODE(
3450 (smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3451 PPSMC_MSG_MCLKDPM_SetEnabledMask, 1) == 0),
3452 "Failed to force MCLK DPM0!",
3453 return -1);
3454
3455 PP_ASSERT_WITH_CODE(
3456 (smum_send_msg_to_smc(hwmgr->smumgr,
3457 PPSMC_MSG_MCLKDPM_Disable) == 0),
3458 "Failed to disable MCLK DPM!",
3459 return -1);
3460 }
3461
3462 return 0;
3463}
3464
3465static int fiji_stop_dpm(struct pp_hwmgr *hwmgr)
3466{
3467 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3468
3469 /* disable general power management */
3470 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
3471 GLOBAL_PWRMGT_EN, 0);
3472 /* disable sclk deep sleep */
3473 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
3474 DYNAMIC_PM_EN, 0);
3475
3476 /* disable PCIE dpm */
3477 if (!data->pcie_dpm_key_disabled) {
3478 PP_ASSERT_WITH_CODE(
3479 (smum_send_msg_to_smc(hwmgr->smumgr,
3480 PPSMC_MSG_PCIeDPM_Disable) == 0),
3481 "Failed to disable pcie DPM during DPM Stop Function!",
3482 return -1);
3483 }
3484
3485 if (fiji_disable_sclk_mclk_dpm(hwmgr)) {
3486 printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
3487 return -1;
3488 }
3489
3490 PP_ASSERT_WITH_CODE(
3491 (smum_send_msg_to_smc(hwmgr->smumgr,
3492 PPSMC_MSG_Voltage_Cntl_Disable) == 0),
3493 "Failed to disable voltage DPM during DPM Stop Function!",
3494 return -1);
3495
3496 return 0;
3497}
3498
3499static void fiji_set_dpm_event_sources(struct pp_hwmgr *hwmgr,
3500 uint32_t sources)
3501{
3502 bool protection;
3503 enum DPM_EVENT_SRC src;
3504
3505 switch (sources) {
3506 default:
3507 printk(KERN_ERR "Unknown throttling event sources.");
3508 /* fall through */
3509 case 0:
3510 protection = false;
3511 /* src is unused */
3512 break;
3513 case (1 << PHM_AutoThrottleSource_Thermal):
3514 protection = true;
3515 src = DPM_EVENT_SRC_DIGITAL;
3516 break;
3517 case (1 << PHM_AutoThrottleSource_External):
3518 protection = true;
3519 src = DPM_EVENT_SRC_EXTERNAL;
3520 break;
3521 case (1 << PHM_AutoThrottleSource_External) |
3522 (1 << PHM_AutoThrottleSource_Thermal):
3523 protection = true;
3524 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
3525 break;
3526 }
3527 /* Order matters - don't enable thermal protection for the wrong source. */
3528 if (protection) {
3529 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
3530 DPM_EVENT_SRC, src);
3531 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
3532 THERMAL_PROTECTION_DIS,
3533 !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3534 PHM_PlatformCaps_ThermalController));
3535 } else
3536 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
3537 THERMAL_PROTECTION_DIS, 1);
3538}
3539
3540static int fiji_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
3541 PHM_AutoThrottleSource source)
3542{
3543 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3544
3545 if (!(data->active_auto_throttle_sources & (1 << source))) {
3546 data->active_auto_throttle_sources |= 1 << source;
3547 fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
3548 }
3549 return 0;
3550}
3551
3552static int fiji_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
3553{
3554 return fiji_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
3555}
3556
3557static int fiji_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
3558 PHM_AutoThrottleSource source)
3559{
3560 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3561
3562 if (data->active_auto_throttle_sources & (1 << source)) {
3563 data->active_auto_throttle_sources &= ~(1 << source);
3564 fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
3565 }
3566 return 0;
3567}
3568
3569static int fiji_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
3570{
3571 return fiji_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
3572}
3573
3574static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
3575{
3576 int tmp_result, result = 0;
3577
3578 tmp_result = (!fiji_is_dpm_running(hwmgr))? 0 : -1;
3579 PP_ASSERT_WITH_CODE(result == 0,
3580 "DPM is already running right now, no need to enable DPM!",
3581 return 0);
3582
3583 if (fiji_voltage_control(hwmgr)) {
3584 tmp_result = fiji_enable_voltage_control(hwmgr);
3585 PP_ASSERT_WITH_CODE(tmp_result == 0,
3586 "Failed to enable voltage control!",
3587 result = tmp_result);
3588 }
3589
3590 if (fiji_voltage_control(hwmgr)) {
3591 tmp_result = fiji_construct_voltage_tables(hwmgr);
3592 PP_ASSERT_WITH_CODE((0 == tmp_result),
3593 "Failed to contruct voltage tables!",
3594 result = tmp_result);
3595 }
3596
3597 tmp_result = fiji_initialize_mc_reg_table(hwmgr);
3598 PP_ASSERT_WITH_CODE((0 == tmp_result),
3599 "Failed to initialize MC reg table!", result = tmp_result);
3600
3601 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3602 PHM_PlatformCaps_EngineSpreadSpectrumSupport))
3603 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
3604 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
3605
3606 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3607 PHM_PlatformCaps_ThermalController))
3608 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
3609 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
3610
3611 tmp_result = fiji_program_static_screen_threshold_parameters(hwmgr);
3612 PP_ASSERT_WITH_CODE((0 == tmp_result),
3613 "Failed to program static screen threshold parameters!",
3614 result = tmp_result);
3615
3616 tmp_result = fiji_enable_display_gap(hwmgr);
3617 PP_ASSERT_WITH_CODE((0 == tmp_result),
3618 "Failed to enable display gap!", result = tmp_result);
3619
3620 tmp_result = fiji_program_voting_clients(hwmgr);
3621 PP_ASSERT_WITH_CODE((0 == tmp_result),
3622 "Failed to program voting clients!", result = tmp_result);
3623
3624 tmp_result = fiji_process_firmware_header(hwmgr);
3625 PP_ASSERT_WITH_CODE((0 == tmp_result),
3626 "Failed to process firmware header!", result = tmp_result);
3627
3628 tmp_result = fiji_initial_switch_from_arbf0_to_f1(hwmgr);
3629 PP_ASSERT_WITH_CODE((0 == tmp_result),
3630 "Failed to initialize switch from ArbF0 to F1!",
3631 result = tmp_result);
3632
3633 tmp_result = fiji_init_smc_table(hwmgr);
3634 PP_ASSERT_WITH_CODE((0 == tmp_result),
3635 "Failed to initialize SMC table!", result = tmp_result);
3636
3637 tmp_result = fiji_init_arb_table_index(hwmgr);
3638 PP_ASSERT_WITH_CODE((0 == tmp_result),
3639 "Failed to initialize ARB table index!", result = tmp_result);
3640
3641 tmp_result = fiji_populate_pm_fuses(hwmgr);
3642 PP_ASSERT_WITH_CODE((0 == tmp_result),
3643 "Failed to populate PM fuses!", result = tmp_result);
3644
3645 tmp_result = fiji_enable_vrhot_gpio_interrupt(hwmgr);
3646 PP_ASSERT_WITH_CODE((0 == tmp_result),
3647 "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
3648
3649 tmp_result = tonga_notify_smc_display_change(hwmgr, false);
3650 PP_ASSERT_WITH_CODE((0 == tmp_result),
3651 "Failed to notify no display!", result = tmp_result);
3652
3653 tmp_result = fiji_enable_sclk_control(hwmgr);
3654 PP_ASSERT_WITH_CODE((0 == tmp_result),
3655 "Failed to enable SCLK control!", result = tmp_result);
3656
3657 tmp_result = fiji_enable_ulv(hwmgr);
3658 PP_ASSERT_WITH_CODE((0 == tmp_result),
3659 "Failed to enable ULV!", result = tmp_result);
3660
3661 tmp_result = fiji_enable_deep_sleep_master_switch(hwmgr);
3662 PP_ASSERT_WITH_CODE((0 == tmp_result),
3663 "Failed to enable deep sleep master switch!", result = tmp_result);
3664
3665 tmp_result = fiji_start_dpm(hwmgr);
3666 PP_ASSERT_WITH_CODE((0 == tmp_result),
3667 "Failed to start DPM!", result = tmp_result);
3668
3669 tmp_result = fiji_enable_smc_cac(hwmgr);
3670 PP_ASSERT_WITH_CODE((0 == tmp_result),
3671 "Failed to enable SMC CAC!", result = tmp_result);
3672
3673 tmp_result = fiji_enable_power_containment(hwmgr);
3674 PP_ASSERT_WITH_CODE((0 == tmp_result),
3675 "Failed to enable power containment!", result = tmp_result);
3676
3677 tmp_result = fiji_power_control_set_level(hwmgr);
3678 PP_ASSERT_WITH_CODE((0 == tmp_result),
3679 "Failed to power control set level!", result = tmp_result);
3680
3681 tmp_result = fiji_enable_thermal_auto_throttle(hwmgr);
3682 PP_ASSERT_WITH_CODE((0 == tmp_result),
3683 "Failed to enable thermal auto throttle!", result = tmp_result);
3684
3685 return result;
3686}
3687
3688static int fiji_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
3689{
3690 int tmp_result, result = 0;
3691
3692 tmp_result = (fiji_is_dpm_running(hwmgr)) ? 0 : -1;
3693 PP_ASSERT_WITH_CODE(tmp_result == 0,
3694 "DPM is not running right now, no need to disable DPM!",
3695 return 0);
3696
3697 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3698 PHM_PlatformCaps_ThermalController))
3699 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
3700 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
3701
3702 tmp_result = fiji_disable_power_containment(hwmgr);
3703 PP_ASSERT_WITH_CODE((tmp_result == 0),
3704 "Failed to disable power containment!", result = tmp_result);
3705
3706 tmp_result = fiji_disable_smc_cac(hwmgr);
3707 PP_ASSERT_WITH_CODE((tmp_result == 0),
3708 "Failed to disable SMC CAC!", result = tmp_result);
3709
3710 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
3711 CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
3712 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
3713 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
3714
3715 tmp_result = fiji_disable_thermal_auto_throttle(hwmgr);
3716 PP_ASSERT_WITH_CODE((tmp_result == 0),
3717 "Failed to disable thermal auto throttle!", result = tmp_result);
3718
3719 tmp_result = fiji_stop_dpm(hwmgr);
3720 PP_ASSERT_WITH_CODE((tmp_result == 0),
3721 "Failed to stop DPM!", result = tmp_result);
3722
3723 tmp_result = fiji_disable_deep_sleep_master_switch(hwmgr);
3724 PP_ASSERT_WITH_CODE((tmp_result == 0),
3725 "Failed to disable deep sleep master switch!", result = tmp_result);
3726
3727 tmp_result = fiji_disable_ulv(hwmgr);
3728 PP_ASSERT_WITH_CODE((tmp_result == 0),
3729 "Failed to disable ULV!", result = tmp_result);
3730
3731 tmp_result = fiji_clear_voting_clients(hwmgr);
3732 PP_ASSERT_WITH_CODE((tmp_result == 0),
3733 "Failed to clear voting clients!", result = tmp_result);
3734
3735 tmp_result = fiji_reset_to_default(hwmgr);
3736 PP_ASSERT_WITH_CODE((tmp_result == 0),
3737 "Failed to reset to default!", result = tmp_result);
3738
3739 tmp_result = fiji_force_switch_to_arbf0(hwmgr);
3740 PP_ASSERT_WITH_CODE((tmp_result == 0),
3741 "Failed to force to switch arbf0!", result = tmp_result);
3742
3743 return result;
3744}
3745
3746static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr)
3747{
3748 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3749 uint32_t level, tmp;
3750
3751 if (!data->sclk_dpm_key_disabled) {
3752 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3753 level = 0;
3754 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
3755 while (tmp >>= 1)
3756 level++;
3757 if (level)
3758 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3759 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3760 (1 << level));
3761 }
3762 }
3763
3764 if (!data->mclk_dpm_key_disabled) {
3765 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3766 level = 0;
3767 tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
3768 while (tmp >>= 1)
3769 level++;
3770 if (level)
3771 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3772 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3773 (1 << level));
3774 }
3775 }
3776
3777 if (!data->pcie_dpm_key_disabled) {
3778 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3779 level = 0;
3780 tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
3781 while (tmp >>= 1)
3782 level++;
3783 if (level)
3784 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3785 PPSMC_MSG_PCIeDPM_ForceLevel,
3786 (1 << level));
3787 }
3788 }
3789 return 0;
3790}
3791
3792static int fiji_upload_dpmlevel_enable_mask(struct pp_hwmgr *hwmgr)
3793{
3794 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3795
3796 phm_apply_dal_min_voltage_request(hwmgr);
3797
3798 if (!data->sclk_dpm_key_disabled) {
3799 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
3800 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3801 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3802 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
3803 }
3804 return 0;
3805}
3806
3807static int fiji_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
3808{
3809 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3810
3811 if (!fiji_is_dpm_running(hwmgr))
3812 return -EINVAL;
3813
3814 if (!data->pcie_dpm_key_disabled) {
3815 smum_send_msg_to_smc(hwmgr->smumgr,
3816 PPSMC_MSG_PCIeDPM_UnForceLevel);
3817 }
3818
3819 return fiji_upload_dpmlevel_enable_mask(hwmgr);
3820}
3821
3822static uint32_t fiji_get_lowest_enabled_level(
3823 struct pp_hwmgr *hwmgr, uint32_t mask)
3824{
3825 uint32_t level = 0;
3826
3827 while(0 == (mask & (1 << level)))
3828 level++;
3829
3830 return level;
3831}
3832
3833static int fiji_force_dpm_lowest(struct pp_hwmgr *hwmgr)
3834{
3835 struct fiji_hwmgr *data =
3836 (struct fiji_hwmgr *)(hwmgr->backend);
3837 uint32_t level;
3838
3839 if (!data->sclk_dpm_key_disabled)
3840 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3841 level = fiji_get_lowest_enabled_level(hwmgr,
3842 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
3843 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3844 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3845 (1 << level));
3846
3847 }
3848
3849 if (!data->mclk_dpm_key_disabled) {
3850 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3851 level = fiji_get_lowest_enabled_level(hwmgr,
3852 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
3853 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3854 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3855 (1 << level));
3856 }
3857 }
3858
3859 if (!data->pcie_dpm_key_disabled) {
3860 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3861 level = fiji_get_lowest_enabled_level(hwmgr,
3862 data->dpm_level_enable_mask.pcie_dpm_enable_mask);
3863 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3864 PPSMC_MSG_PCIeDPM_ForceLevel,
3865 (1 << level));
3866 }
3867 }
3868
3869 return 0;
3870
3871}
3872static int fiji_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
3873 enum amd_dpm_forced_level level)
3874{
3875 int ret = 0;
3876
3877 switch (level) {
3878 case AMD_DPM_FORCED_LEVEL_HIGH:
3879 ret = fiji_force_dpm_highest(hwmgr);
3880 if (ret)
3881 return ret;
3882 break;
3883 case AMD_DPM_FORCED_LEVEL_LOW:
3884 ret = fiji_force_dpm_lowest(hwmgr);
3885 if (ret)
3886 return ret;
3887 break;
3888 case AMD_DPM_FORCED_LEVEL_AUTO:
3889 ret = fiji_unforce_dpm_levels(hwmgr);
3890 if (ret)
3891 return ret;
3892 break;
3893 default:
3894 break;
3895 }
3896
3897 hwmgr->dpm_level = level;
3898
3899 return ret;
3900}
3901
3902static int fiji_get_power_state_size(struct pp_hwmgr *hwmgr)
3903{
3904 return sizeof(struct fiji_power_state);
3905}
3906
3907static int fiji_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
3908 void *state, struct pp_power_state *power_state,
3909 void *pp_table, uint32_t classification_flag)
3910{
3911 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3912 struct fiji_power_state *fiji_power_state =
3913 (struct fiji_power_state *)(&(power_state->hardware));
3914 struct fiji_performance_level *performance_level;
3915 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
3916 ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
3917 (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
3918 ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
3919 (ATOM_Tonga_SCLK_Dependency_Table *)
3920 (((unsigned long)powerplay_table) +
3921 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
3922 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
3923 (ATOM_Tonga_MCLK_Dependency_Table *)
3924 (((unsigned long)powerplay_table) +
3925 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
3926
3927 /* The following fields are not initialized here: id orderedList allStatesList */
3928 power_state->classification.ui_label =
3929 (le16_to_cpu(state_entry->usClassification) &
3930 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
3931 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
3932 power_state->classification.flags = classification_flag;
3933 /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
3934
3935 power_state->classification.temporary_state = false;
3936 power_state->classification.to_be_deleted = false;
3937
3938 power_state->validation.disallowOnDC =
3939 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3940 ATOM_Tonga_DISALLOW_ON_DC));
3941
3942 power_state->pcie.lanes = 0;
3943
3944 power_state->display.disableFrameModulation = false;
3945 power_state->display.limitRefreshrate = false;
3946 power_state->display.enableVariBright =
3947 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3948 ATOM_Tonga_ENABLE_VARIBRIGHT));
3949
3950 power_state->validation.supportedPowerLevels = 0;
3951 power_state->uvd_clocks.VCLK = 0;
3952 power_state->uvd_clocks.DCLK = 0;
3953 power_state->temperatures.min = 0;
3954 power_state->temperatures.max = 0;
3955
3956 performance_level = &(fiji_power_state->performance_levels
3957 [fiji_power_state->performance_level_count++]);
3958
3959 PP_ASSERT_WITH_CODE(
3960 (fiji_power_state->performance_level_count < SMU73_MAX_LEVELS_GRAPHICS),
3961 "Performance levels exceeds SMC limit!",
3962 return -1);
3963
3964 PP_ASSERT_WITH_CODE(
3965 (fiji_power_state->performance_level_count <=
3966 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3967 "Performance levels exceeds Driver limit!",
3968 return -1);
3969
3970 /* Performance levels are arranged from low to high. */
3971 performance_level->memory_clock = mclk_dep_table->entries
3972 [state_entry->ucMemoryClockIndexLow].ulMclk;
3973 performance_level->engine_clock = sclk_dep_table->entries
3974 [state_entry->ucEngineClockIndexLow].ulSclk;
3975 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3976 state_entry->ucPCIEGenLow);
3977 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3978 state_entry->ucPCIELaneHigh);
3979
3980 performance_level = &(fiji_power_state->performance_levels
3981 [fiji_power_state->performance_level_count++]);
3982 performance_level->memory_clock = mclk_dep_table->entries
3983 [state_entry->ucMemoryClockIndexHigh].ulMclk;
3984 performance_level->engine_clock = sclk_dep_table->entries
3985 [state_entry->ucEngineClockIndexHigh].ulSclk;
3986 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3987 state_entry->ucPCIEGenHigh);
3988 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3989 state_entry->ucPCIELaneHigh);
3990
3991 return 0;
3992}
3993
3994static int fiji_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3995 unsigned long entry_index, struct pp_power_state *state)
3996{
3997 int result;
3998 struct fiji_power_state *ps;
3999 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4000 struct phm_ppt_v1_information *table_info =
4001 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4002 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
4003 table_info->vdd_dep_on_mclk;
4004
4005 state->hardware.magic = PHM_VIslands_Magic;
4006
4007 ps = (struct fiji_power_state *)(&state->hardware);
4008
4009 result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
4010 fiji_get_pp_table_entry_callback_func);
4011
4012 /* This is the earliest time we have all the dependency table and the VBIOS boot state
4013 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
4014 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
4015 */
4016 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
4017 if (dep_mclk_table->entries[0].clk !=
4018 data->vbios_boot_state.mclk_bootup_value)
4019 printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
4020 "does not match VBIOS boot MCLK level");
4021 if (dep_mclk_table->entries[0].vddci !=
4022 data->vbios_boot_state.vddci_bootup_value)
4023 printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
4024 "does not match VBIOS boot VDDCI level");
4025 }
4026
4027 /* set DC compatible flag if this state supports DC */
4028 if (!state->validation.disallowOnDC)
4029 ps->dc_compatible = true;
4030
4031 if (state->classification.flags & PP_StateClassificationFlag_ACPI)
4032 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
4033
4034 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
4035 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
4036
4037 if (!result) {
4038 uint32_t i;
4039
4040 switch (state->classification.ui_label) {
4041 case PP_StateUILabel_Performance:
4042 data->use_pcie_performance_levels = true;
4043
4044 for (i = 0; i < ps->performance_level_count; i++) {
4045 if (data->pcie_gen_performance.max <
4046 ps->performance_levels[i].pcie_gen)
4047 data->pcie_gen_performance.max =
4048 ps->performance_levels[i].pcie_gen;
4049
4050 if (data->pcie_gen_performance.min >
4051 ps->performance_levels[i].pcie_gen)
4052 data->pcie_gen_performance.min =
4053 ps->performance_levels[i].pcie_gen;
4054
4055 if (data->pcie_lane_performance.max <
4056 ps->performance_levels[i].pcie_lane)
4057 data->pcie_lane_performance.max =
4058 ps->performance_levels[i].pcie_lane;
4059
4060 if (data->pcie_lane_performance.min >
4061 ps->performance_levels[i].pcie_lane)
4062 data->pcie_lane_performance.min =
4063 ps->performance_levels[i].pcie_lane;
4064 }
4065 break;
4066 case PP_StateUILabel_Battery:
4067 data->use_pcie_power_saving_levels = true;
4068
4069 for (i = 0; i < ps->performance_level_count; i++) {
4070 if (data->pcie_gen_power_saving.max <
4071 ps->performance_levels[i].pcie_gen)
4072 data->pcie_gen_power_saving.max =
4073 ps->performance_levels[i].pcie_gen;
4074
4075 if (data->pcie_gen_power_saving.min >
4076 ps->performance_levels[i].pcie_gen)
4077 data->pcie_gen_power_saving.min =
4078 ps->performance_levels[i].pcie_gen;
4079
4080 if (data->pcie_lane_power_saving.max <
4081 ps->performance_levels[i].pcie_lane)
4082 data->pcie_lane_power_saving.max =
4083 ps->performance_levels[i].pcie_lane;
4084
4085 if (data->pcie_lane_power_saving.min >
4086 ps->performance_levels[i].pcie_lane)
4087 data->pcie_lane_power_saving.min =
4088 ps->performance_levels[i].pcie_lane;
4089 }
4090 break;
4091 default:
4092 break;
4093 }
4094 }
4095 return 0;
4096}
4097
4098static int fiji_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
4099 struct pp_power_state *request_ps,
4100 const struct pp_power_state *current_ps)
4101{
4102 struct fiji_power_state *fiji_ps =
4103 cast_phw_fiji_power_state(&request_ps->hardware);
4104 uint32_t sclk;
4105 uint32_t mclk;
4106 struct PP_Clocks minimum_clocks = {0};
4107 bool disable_mclk_switching;
4108 bool disable_mclk_switching_for_frame_lock;
4109 struct cgs_display_info info = {0};
4110 const struct phm_clock_and_voltage_limits *max_limits;
4111 uint32_t i;
4112 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4113 struct phm_ppt_v1_information *table_info =
4114 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4115 int32_t count;
4116 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
4117
4118 data->battery_state = (PP_StateUILabel_Battery ==
4119 request_ps->classification.ui_label);
4120
4121 PP_ASSERT_WITH_CODE(fiji_ps->performance_level_count == 2,
4122 "VI should always have 2 performance levels",);
4123
4124 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
4125 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
4126 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
4127
4128 /* Cap clock DPM tables at DC MAX if it is in DC. */
4129 if (PP_PowerSource_DC == hwmgr->power_source) {
4130 for (i = 0; i < fiji_ps->performance_level_count; i++) {
4131 if (fiji_ps->performance_levels[i].memory_clock > max_limits->mclk)
4132 fiji_ps->performance_levels[i].memory_clock = max_limits->mclk;
4133 if (fiji_ps->performance_levels[i].engine_clock > max_limits->sclk)
4134 fiji_ps->performance_levels[i].engine_clock = max_limits->sclk;
4135 }
4136 }
4137
4138 fiji_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
4139 fiji_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
4140
4141 fiji_ps->acp_clk = hwmgr->acp_arbiter.acpclk;
4142
4143 cgs_get_active_displays_info(hwmgr->device, &info);
4144
4145 /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
4146
4147 /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */
4148
4149 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4150 PHM_PlatformCaps_StablePState)) {
4151 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
4152 stable_pstate_sclk = (max_limits->sclk * 75) / 100;
4153
4154 for (count = table_info->vdd_dep_on_sclk->count - 1;
4155 count >= 0; count--) {
4156 if (stable_pstate_sclk >=
4157 table_info->vdd_dep_on_sclk->entries[count].clk) {
4158 stable_pstate_sclk =
4159 table_info->vdd_dep_on_sclk->entries[count].clk;
4160 break;
4161 }
4162 }
4163
4164 if (count < 0)
4165 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
4166
4167 stable_pstate_mclk = max_limits->mclk;
4168
4169 minimum_clocks.engineClock = stable_pstate_sclk;
4170 minimum_clocks.memoryClock = stable_pstate_mclk;
4171 }
4172
4173 if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
4174 minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
4175
4176 if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
4177 minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
4178
4179 fiji_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
4180
4181 if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
4182 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
4183 hwmgr->platform_descriptor.overdriveLimit.engineClock),
4184 "Overdrive sclk exceeds limit",
4185 hwmgr->gfx_arbiter.sclk_over_drive =
4186 hwmgr->platform_descriptor.overdriveLimit.engineClock);
4187
4188 if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
4189 fiji_ps->performance_levels[1].engine_clock =
4190 hwmgr->gfx_arbiter.sclk_over_drive;
4191 }
4192
4193 if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
4194 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
4195 hwmgr->platform_descriptor.overdriveLimit.memoryClock),
4196 "Overdrive mclk exceeds limit",
4197 hwmgr->gfx_arbiter.mclk_over_drive =
4198 hwmgr->platform_descriptor.overdriveLimit.memoryClock);
4199
4200 if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
4201 fiji_ps->performance_levels[1].memory_clock =
4202 hwmgr->gfx_arbiter.mclk_over_drive;
4203 }
4204
4205 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
4206 hwmgr->platform_descriptor.platformCaps,
4207 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
4208
4209 disable_mclk_switching = (1 < info.display_count) ||
4210 disable_mclk_switching_for_frame_lock;
4211
4212 sclk = fiji_ps->performance_levels[0].engine_clock;
4213 mclk = fiji_ps->performance_levels[0].memory_clock;
4214
4215 if (disable_mclk_switching)
4216 mclk = fiji_ps->performance_levels
4217 [fiji_ps->performance_level_count - 1].memory_clock;
4218
4219 if (sclk < minimum_clocks.engineClock)
4220 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
4221 max_limits->sclk : minimum_clocks.engineClock;
4222
4223 if (mclk < minimum_clocks.memoryClock)
4224 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
4225 max_limits->mclk : minimum_clocks.memoryClock;
4226
4227 fiji_ps->performance_levels[0].engine_clock = sclk;
4228 fiji_ps->performance_levels[0].memory_clock = mclk;
4229
4230 fiji_ps->performance_levels[1].engine_clock =
4231 (fiji_ps->performance_levels[1].engine_clock >=
4232 fiji_ps->performance_levels[0].engine_clock) ?
4233 fiji_ps->performance_levels[1].engine_clock :
4234 fiji_ps->performance_levels[0].engine_clock;
4235
4236 if (disable_mclk_switching) {
4237 if (mclk < fiji_ps->performance_levels[1].memory_clock)
4238 mclk = fiji_ps->performance_levels[1].memory_clock;
4239
4240 fiji_ps->performance_levels[0].memory_clock = mclk;
4241 fiji_ps->performance_levels[1].memory_clock = mclk;
4242 } else {
4243 if (fiji_ps->performance_levels[1].memory_clock <
4244 fiji_ps->performance_levels[0].memory_clock)
4245 fiji_ps->performance_levels[1].memory_clock =
4246 fiji_ps->performance_levels[0].memory_clock;
4247 }
4248
4249 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4250 PHM_PlatformCaps_StablePState)) {
4251 for (i = 0; i < fiji_ps->performance_level_count; i++) {
4252 fiji_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
4253 fiji_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
4254 fiji_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
4255 fiji_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
4256 }
4257 }
4258
4259 return 0;
4260}
4261
4262static int fiji_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
4263{
4264 const struct phm_set_power_state_input *states =
4265 (const struct phm_set_power_state_input *)input;
4266 const struct fiji_power_state *fiji_ps =
4267 cast_const_phw_fiji_power_state(states->pnew_state);
4268 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4269 struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4270 uint32_t sclk = fiji_ps->performance_levels
4271 [fiji_ps->performance_level_count - 1].engine_clock;
4272 struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4273 uint32_t mclk = fiji_ps->performance_levels
4274 [fiji_ps->performance_level_count - 1].memory_clock;
4275 uint32_t i;
4276 struct cgs_display_info info = {0};
4277
4278 data->need_update_smu7_dpm_table = 0;
4279
4280 for (i = 0; i < sclk_table->count; i++) {
4281 if (sclk == sclk_table->dpm_levels[i].value)
4282 break;
4283 }
4284
4285 if (i >= sclk_table->count)
4286 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4287 else {
4288 if(data->display_timing.min_clock_in_sr !=
4289 hwmgr->display_config.min_core_set_clock_in_sr)
4290 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
4291 }
4292
4293 for (i = 0; i < mclk_table->count; i++) {
4294 if (mclk == mclk_table->dpm_levels[i].value)
4295 break;
4296 }
4297
4298 if (i >= mclk_table->count)
4299 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4300
4301 cgs_get_active_displays_info(hwmgr->device, &info);
4302
4303 if (data->display_timing.num_existing_displays != info.display_count)
4304 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4305
4306 return 0;
4307}
4308
4309static uint16_t fiji_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
4310 const struct fiji_power_state *fiji_ps)
4311{
4312 uint32_t i;
4313 uint32_t sclk, max_sclk = 0;
4314 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4315 struct fiji_dpm_table *dpm_table = &data->dpm_table;
4316
4317 for (i = 0; i < fiji_ps->performance_level_count; i++) {
4318 sclk = fiji_ps->performance_levels[i].engine_clock;
4319 if (max_sclk < sclk)
4320 max_sclk = sclk;
4321 }
4322
4323 for (i = 0; i < dpm_table->sclk_table.count; i++) {
4324 if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
4325 return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
4326 dpm_table->pcie_speed_table.dpm_levels
4327 [dpm_table->pcie_speed_table.count - 1].value :
4328 dpm_table->pcie_speed_table.dpm_levels[i].value);
4329 }
4330
4331 return 0;
4332}
4333
4334static int fiji_request_link_speed_change_before_state_change(
4335 struct pp_hwmgr *hwmgr, const void *input)
4336{
4337 const struct phm_set_power_state_input *states =
4338 (const struct phm_set_power_state_input *)input;
4339 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4340 const struct fiji_power_state *fiji_nps =
4341 cast_const_phw_fiji_power_state(states->pnew_state);
4342 const struct fiji_power_state *fiji_cps =
4343 cast_const_phw_fiji_power_state(states->pcurrent_state);
4344
4345 uint16_t target_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_nps);
4346 uint16_t current_link_speed;
4347
4348 if (data->force_pcie_gen == PP_PCIEGenInvalid)
4349 current_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_cps);
4350 else
4351 current_link_speed = data->force_pcie_gen;
4352
4353 data->force_pcie_gen = PP_PCIEGenInvalid;
4354 data->pspp_notify_required = false;
4355 if (target_link_speed > current_link_speed) {
4356 switch(target_link_speed) {
4357 case PP_PCIEGen3:
4358 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
4359 break;
4360 data->force_pcie_gen = PP_PCIEGen2;
4361 if (current_link_speed == PP_PCIEGen2)
4362 break;
4363 case PP_PCIEGen2:
4364 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
4365 break;
4366 default:
4367 data->force_pcie_gen = fiji_get_current_pcie_speed(hwmgr);
4368 break;
4369 }
4370 } else {
4371 if (target_link_speed < current_link_speed)
4372 data->pspp_notify_required = true;
4373 }
4374
4375 return 0;
4376}
4377
4378static int fiji_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4379{
4380 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4381
4382 if (0 == data->need_update_smu7_dpm_table)
4383 return 0;
4384
4385 if ((0 == data->sclk_dpm_key_disabled) &&
4386 (data->need_update_smu7_dpm_table &
4387 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4388 PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
4389 "Trying to freeze SCLK DPM when DPM is disabled",
4390 );
4391 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4392 PPSMC_MSG_SCLKDPM_FreezeLevel),
4393 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
4394 return -1);
4395 }
4396
4397 if ((0 == data->mclk_dpm_key_disabled) &&
4398 (data->need_update_smu7_dpm_table &
4399 DPMTABLE_OD_UPDATE_MCLK)) {
4400 PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
4401 "Trying to freeze MCLK DPM when DPM is disabled",
4402 );
4403 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4404 PPSMC_MSG_MCLKDPM_FreezeLevel),
4405 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
4406 return -1);
4407 }
4408
4409 return 0;
4410}
4411
4412static int fiji_populate_and_upload_sclk_mclk_dpm_levels(
4413 struct pp_hwmgr *hwmgr, const void *input)
4414{
4415 int result = 0;
4416 const struct phm_set_power_state_input *states =
4417 (const struct phm_set_power_state_input *)input;
4418 const struct fiji_power_state *fiji_ps =
4419 cast_const_phw_fiji_power_state(states->pnew_state);
4420 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4421 uint32_t sclk = fiji_ps->performance_levels
4422 [fiji_ps->performance_level_count - 1].engine_clock;
4423 uint32_t mclk = fiji_ps->performance_levels
4424 [fiji_ps->performance_level_count - 1].memory_clock;
4425 struct fiji_dpm_table *dpm_table = &data->dpm_table;
4426
4427 struct fiji_dpm_table *golden_dpm_table = &data->golden_dpm_table;
4428 uint32_t dpm_count, clock_percent;
4429 uint32_t i;
4430
4431 if (0 == data->need_update_smu7_dpm_table)
4432 return 0;
4433
4434 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
4435 dpm_table->sclk_table.dpm_levels
4436 [dpm_table->sclk_table.count - 1].value = sclk;
4437
4438 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4439 PHM_PlatformCaps_OD6PlusinACSupport) ||
4440 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4441 PHM_PlatformCaps_OD6PlusinDCSupport)) {
4442 /* Need to do calculation based on the golden DPM table
4443 * as the Heatmap GPU Clock axis is also based on the default values
4444 */
4445 PP_ASSERT_WITH_CODE(
4446 (golden_dpm_table->sclk_table.dpm_levels
4447 [golden_dpm_table->sclk_table.count - 1].value != 0),
4448 "Divide by 0!",
4449 return -1);
4450 dpm_count = dpm_table->sclk_table.count < 2 ?
4451 0 : dpm_table->sclk_table.count - 2;
4452 for (i = dpm_count; i > 1; i--) {
4453 if (sclk > golden_dpm_table->sclk_table.dpm_levels
4454 [golden_dpm_table->sclk_table.count-1].value) {
4455 clock_percent =
4456 ((sclk - golden_dpm_table->sclk_table.dpm_levels
4457 [golden_dpm_table->sclk_table.count-1].value) * 100) /
4458 golden_dpm_table->sclk_table.dpm_levels
4459 [golden_dpm_table->sclk_table.count-1].value;
4460
4461 dpm_table->sclk_table.dpm_levels[i].value =
4462 golden_dpm_table->sclk_table.dpm_levels[i].value +
4463 (golden_dpm_table->sclk_table.dpm_levels[i].value *
4464 clock_percent)/100;
4465
4466 } else if (golden_dpm_table->sclk_table.dpm_levels
4467 [dpm_table->sclk_table.count-1].value > sclk) {
4468 clock_percent =
4469 ((golden_dpm_table->sclk_table.dpm_levels
4470 [golden_dpm_table->sclk_table.count - 1].value - sclk) *
4471 100) /
4472 golden_dpm_table->sclk_table.dpm_levels
4473 [golden_dpm_table->sclk_table.count-1].value;
4474
4475 dpm_table->sclk_table.dpm_levels[i].value =
4476 golden_dpm_table->sclk_table.dpm_levels[i].value -
4477 (golden_dpm_table->sclk_table.dpm_levels[i].value *
4478 clock_percent) / 100;
4479 } else
4480 dpm_table->sclk_table.dpm_levels[i].value =
4481 golden_dpm_table->sclk_table.dpm_levels[i].value;
4482 }
4483 }
4484 }
4485
4486 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
4487 dpm_table->mclk_table.dpm_levels
4488 [dpm_table->mclk_table.count - 1].value = mclk;
4489 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4490 PHM_PlatformCaps_OD6PlusinACSupport) ||
4491 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4492 PHM_PlatformCaps_OD6PlusinDCSupport)) {
4493
4494 PP_ASSERT_WITH_CODE(
4495 (golden_dpm_table->mclk_table.dpm_levels
4496 [golden_dpm_table->mclk_table.count-1].value != 0),
4497 "Divide by 0!",
4498 return -1);
4499 dpm_count = dpm_table->mclk_table.count < 2 ?
4500 0 : dpm_table->mclk_table.count - 2;
4501 for (i = dpm_count; i > 1; i--) {
4502 if (mclk > golden_dpm_table->mclk_table.dpm_levels
4503 [golden_dpm_table->mclk_table.count-1].value) {
4504 clock_percent = ((mclk -
4505 golden_dpm_table->mclk_table.dpm_levels
4506 [golden_dpm_table->mclk_table.count-1].value) * 100) /
4507 golden_dpm_table->mclk_table.dpm_levels
4508 [golden_dpm_table->mclk_table.count-1].value;
4509
4510 dpm_table->mclk_table.dpm_levels[i].value =
4511 golden_dpm_table->mclk_table.dpm_levels[i].value +
4512 (golden_dpm_table->mclk_table.dpm_levels[i].value *
4513 clock_percent) / 100;
4514
4515 } else if (golden_dpm_table->mclk_table.dpm_levels
4516 [dpm_table->mclk_table.count-1].value > mclk) {
4517 clock_percent = ((golden_dpm_table->mclk_table.dpm_levels
4518 [golden_dpm_table->mclk_table.count-1].value - mclk) * 100) /
4519 golden_dpm_table->mclk_table.dpm_levels
4520 [golden_dpm_table->mclk_table.count-1].value;
4521
4522 dpm_table->mclk_table.dpm_levels[i].value =
4523 golden_dpm_table->mclk_table.dpm_levels[i].value -
4524 (golden_dpm_table->mclk_table.dpm_levels[i].value *
4525 clock_percent) / 100;
4526 } else
4527 dpm_table->mclk_table.dpm_levels[i].value =
4528 golden_dpm_table->mclk_table.dpm_levels[i].value;
4529 }
4530 }
4531 }
4532
4533 if (data->need_update_smu7_dpm_table &
4534 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
4535 result = fiji_populate_all_graphic_levels(hwmgr);
4536 PP_ASSERT_WITH_CODE((0 == result),
4537 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
4538 return result);
4539 }
4540
4541 if (data->need_update_smu7_dpm_table &
4542 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
4543 /*populate MCLK dpm table to SMU7 */
4544 result = fiji_populate_all_memory_levels(hwmgr);
4545 PP_ASSERT_WITH_CODE((0 == result),
4546 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
4547 return result);
4548 }
4549
4550 return result;
4551}
4552
4553static int fiji_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
4554 struct fiji_single_dpm_table * dpm_table,
4555 uint32_t low_limit, uint32_t high_limit)
4556{
4557 uint32_t i;
4558
4559 for (i = 0; i < dpm_table->count; i++) {
4560 if ((dpm_table->dpm_levels[i].value < low_limit) ||
4561 (dpm_table->dpm_levels[i].value > high_limit))
4562 dpm_table->dpm_levels[i].enabled = false;
4563 else
4564 dpm_table->dpm_levels[i].enabled = true;
4565 }
4566 return 0;
4567}
4568
4569static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr,
4570 const struct fiji_power_state *fiji_ps)
4571{
4572 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4573 uint32_t high_limit_count;
4574
4575 PP_ASSERT_WITH_CODE((fiji_ps->performance_level_count >= 1),
4576 "power state did not have any performance level",
4577 return -1);
4578
4579 high_limit_count = (1 == fiji_ps->performance_level_count) ? 0 : 1;
4580
4581 fiji_trim_single_dpm_states(hwmgr,
4582 &(data->dpm_table.sclk_table),
4583 fiji_ps->performance_levels[0].engine_clock,
4584 fiji_ps->performance_levels[high_limit_count].engine_clock);
4585
4586 fiji_trim_single_dpm_states(hwmgr,
4587 &(data->dpm_table.mclk_table),
4588 fiji_ps->performance_levels[0].memory_clock,
4589 fiji_ps->performance_levels[high_limit_count].memory_clock);
4590
4591 return 0;
4592}
4593
4594static int fiji_generate_dpm_level_enable_mask(
4595 struct pp_hwmgr *hwmgr, const void *input)
4596{
4597 int result;
4598 const struct phm_set_power_state_input *states =
4599 (const struct phm_set_power_state_input *)input;
4600 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4601 const struct fiji_power_state *fiji_ps =
4602 cast_const_phw_fiji_power_state(states->pnew_state);
4603
4604 result = fiji_trim_dpm_states(hwmgr, fiji_ps);
4605 if (result)
4606 return result;
4607
4608 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
4609 fiji_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
4610 data->dpm_level_enable_mask.mclk_dpm_enable_mask =
4611 fiji_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
4612 data->last_mclk_dpm_enable_mask =
4613 data->dpm_level_enable_mask.mclk_dpm_enable_mask;
4614
4615 if (data->uvd_enabled) {
4616 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4617 data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4618 }
4619
4620 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
4621 fiji_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
4622
4623 return 0;
4624}
4625
4626static int fiji_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
4627{
4628 return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
4629 (PPSMC_Msg)PPSMC_MSG_UVDDPM_Enable :
4630 (PPSMC_Msg)PPSMC_MSG_UVDDPM_Disable);
4631}
4632
4633int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
4634{
4635 return smum_send_msg_to_smc(hwmgr->smumgr, enable?
4636 PPSMC_MSG_VCEDPM_Enable :
4637 PPSMC_MSG_VCEDPM_Disable);
4638}
4639
4640static int fiji_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
4641{
4642 return smum_send_msg_to_smc(hwmgr->smumgr, enable?
4643 PPSMC_MSG_SAMUDPM_Enable :
4644 PPSMC_MSG_SAMUDPM_Disable);
4645}
4646
4647static int fiji_enable_disable_acp_dpm(struct pp_hwmgr *hwmgr, bool enable)
4648{
4649 return smum_send_msg_to_smc(hwmgr->smumgr, enable?
4650 PPSMC_MSG_ACPDPM_Enable :
4651 PPSMC_MSG_ACPDPM_Disable);
4652}
4653
4654int fiji_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
4655{
4656 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4657 uint32_t mm_boot_level_offset, mm_boot_level_value;
4658 struct phm_ppt_v1_information *table_info =
4659 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4660
4661 if (!bgate) {
4662 data->smc_state_table.UvdBootLevel = 0;
4663 if (table_info->mm_dep_table->count > 0)
4664 data->smc_state_table.UvdBootLevel =
4665 (uint8_t) (table_info->mm_dep_table->count - 1);
4666 mm_boot_level_offset = data->dpm_table_start +
4667 offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
4668 mm_boot_level_offset /= 4;
4669 mm_boot_level_offset *= 4;
4670 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
4671 CGS_IND_REG__SMC, mm_boot_level_offset);
4672 mm_boot_level_value &= 0x00FFFFFF;
4673 mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24;
4674 cgs_write_ind_register(hwmgr->device,
4675 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
4676
4677 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4678 PHM_PlatformCaps_UVDDPM) ||
4679 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4680 PHM_PlatformCaps_StablePState))
4681 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4682 PPSMC_MSG_UVDDPM_SetEnabledMask,
4683 (uint32_t)(1 << data->smc_state_table.UvdBootLevel));
4684 }
4685
4686 return fiji_enable_disable_uvd_dpm(hwmgr, !bgate);
4687}
4688
4689int fiji_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
4690{
4691 const struct phm_set_power_state_input *states =
4692 (const struct phm_set_power_state_input *)input;
4693 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4694 const struct fiji_power_state *fiji_nps =
4695 cast_const_phw_fiji_power_state(states->pnew_state);
4696 const struct fiji_power_state *fiji_cps =
4697 cast_const_phw_fiji_power_state(states->pcurrent_state);
4698
4699 uint32_t mm_boot_level_offset, mm_boot_level_value;
4700 struct phm_ppt_v1_information *table_info =
4701 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4702
4703 if (fiji_nps->vce_clks.evclk >0 &&
4704 (fiji_cps == NULL || fiji_cps->vce_clks.evclk == 0)) {
4705 data->smc_state_table.VceBootLevel =
4706 (uint8_t) (table_info->mm_dep_table->count - 1);
4707
4708 mm_boot_level_offset = data->dpm_table_start +
4709 offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
4710 mm_boot_level_offset /= 4;
4711 mm_boot_level_offset *= 4;
4712 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
4713 CGS_IND_REG__SMC, mm_boot_level_offset);
4714 mm_boot_level_value &= 0xFF00FFFF;
4715 mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16;
4716 cgs_write_ind_register(hwmgr->device,
4717 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
4718
4719 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4720 PHM_PlatformCaps_StablePState)) {
4721 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4722 PPSMC_MSG_VCEDPM_SetEnabledMask,
4723 (uint32_t)1 << data->smc_state_table.VceBootLevel);
4724
4725 fiji_enable_disable_vce_dpm(hwmgr, true);
4726 } else if (fiji_nps->vce_clks.evclk == 0 &&
4727 fiji_cps != NULL &&
4728 fiji_cps->vce_clks.evclk > 0)
4729 fiji_enable_disable_vce_dpm(hwmgr, false);
4730 }
4731
4732 return 0;
4733}
4734
4735int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
4736{
4737 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4738 uint32_t mm_boot_level_offset, mm_boot_level_value;
4739 struct phm_ppt_v1_information *table_info =
4740 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4741
4742 if (!bgate) {
4743 data->smc_state_table.SamuBootLevel =
4744 (uint8_t) (table_info->mm_dep_table->count - 1);
4745 mm_boot_level_offset = data->dpm_table_start +
4746 offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
4747 mm_boot_level_offset /= 4;
4748 mm_boot_level_offset *= 4;
4749 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
4750 CGS_IND_REG__SMC, mm_boot_level_offset);
4751 mm_boot_level_value &= 0xFFFFFF00;
4752 mm_boot_level_value |= data->smc_state_table.SamuBootLevel << 0;
4753 cgs_write_ind_register(hwmgr->device,
4754 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
4755
4756 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4757 PHM_PlatformCaps_StablePState))
4758 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4759 PPSMC_MSG_SAMUDPM_SetEnabledMask,
4760 (uint32_t)(1 << data->smc_state_table.SamuBootLevel));
4761 }
4762
4763 return fiji_enable_disable_samu_dpm(hwmgr, !bgate);
4764}
4765
4766int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate)
4767{
4768 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4769 uint32_t mm_boot_level_offset, mm_boot_level_value;
4770 struct phm_ppt_v1_information *table_info =
4771 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4772
4773 if (!bgate) {
4774 data->smc_state_table.AcpBootLevel =
4775 (uint8_t) (table_info->mm_dep_table->count - 1);
4776 mm_boot_level_offset = data->dpm_table_start +
4777 offsetof(SMU73_Discrete_DpmTable, AcpBootLevel);
4778 mm_boot_level_offset /= 4;
4779 mm_boot_level_offset *= 4;
4780 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
4781 CGS_IND_REG__SMC, mm_boot_level_offset);
4782 mm_boot_level_value &= 0xFFFF00FF;
4783 mm_boot_level_value |= data->smc_state_table.AcpBootLevel << 8;
4784 cgs_write_ind_register(hwmgr->device,
4785 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
4786
4787 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4788 PHM_PlatformCaps_StablePState))
4789 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4790 PPSMC_MSG_ACPDPM_SetEnabledMask,
4791 (uint32_t)(1 << data->smc_state_table.AcpBootLevel));
4792 }
4793
4794 return fiji_enable_disable_acp_dpm(hwmgr, !bgate);
4795}
4796
4797static int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
4798{
4799 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4800
4801 int result = 0;
4802 uint32_t low_sclk_interrupt_threshold = 0;
4803
4804 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4805 PHM_PlatformCaps_SclkThrottleLowNotification)
4806 && (hwmgr->gfx_arbiter.sclk_threshold !=
4807 data->low_sclk_interrupt_threshold)) {
4808 data->low_sclk_interrupt_threshold =
4809 hwmgr->gfx_arbiter.sclk_threshold;
4810 low_sclk_interrupt_threshold =
4811 data->low_sclk_interrupt_threshold;
4812
4813 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
4814
4815 result = fiji_copy_bytes_to_smc(
4816 hwmgr->smumgr,
4817 data->dpm_table_start +
4818 offsetof(SMU73_Discrete_DpmTable,
4819 LowSclkInterruptThreshold),
4820 (uint8_t *)&low_sclk_interrupt_threshold,
4821 sizeof(uint32_t),
4822 data->sram_end);
4823 }
4824
4825 return result;
4826}
4827
4828static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
4829{
4830 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4831
4832 if (data->need_update_smu7_dpm_table &
4833 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
4834 return fiji_program_memory_timing_parameters(hwmgr);
4835
4836 return 0;
4837}
4838
4839static int fiji_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4840{
4841 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4842
4843 if (0 == data->need_update_smu7_dpm_table)
4844 return 0;
4845
4846 if ((0 == data->sclk_dpm_key_disabled) &&
4847 (data->need_update_smu7_dpm_table &
4848 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4849
4850 PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
4851 "Trying to Unfreeze SCLK DPM when DPM is disabled",
4852 );
4853 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4854 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
4855 "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
4856 return -1);
4857 }
4858
4859 if ((0 == data->mclk_dpm_key_disabled) &&
4860 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
4861
4862 PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
4863 "Trying to Unfreeze MCLK DPM when DPM is disabled",
4864 );
4865 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4866 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
4867 "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
4868 return -1);
4869 }
4870
4871 data->need_update_smu7_dpm_table = 0;
4872
4873 return 0;
4874}
4875
4876/* Look up the voltaged based on DAL's requested level.
4877 * and then send the requested VDDC voltage to SMC
4878 */
4879static void fiji_apply_dal_minimum_voltage_request(struct pp_hwmgr *hwmgr)
4880{
4881 return;
4882}
4883
4884static int fiji_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
4885{
4886 int result;
4887 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4888
4889 /* Apply minimum voltage based on DAL's request level */
4890 fiji_apply_dal_minimum_voltage_request(hwmgr);
4891
4892 if (0 == data->sclk_dpm_key_disabled) {
4893 /* Checking if DPM is running. If we discover hang because of this,
4894 * we should skip this message.
4895 */
4896 if (!fiji_is_dpm_running(hwmgr))
4897 printk(KERN_ERR "[ powerplay ] "
4898 "Trying to set Enable Mask when DPM is disabled \n");
4899
4900 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4901 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4902 PPSMC_MSG_SCLKDPM_SetEnabledMask,
4903 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
4904 PP_ASSERT_WITH_CODE((0 == result),
4905 "Set Sclk Dpm enable Mask failed", return -1);
4906 }
4907 }
4908
4909 if (0 == data->mclk_dpm_key_disabled) {
4910 /* Checking if DPM is running. If we discover hang because of this,
4911 * we should skip this message.
4912 */
4913 if (!fiji_is_dpm_running(hwmgr))
4914 printk(KERN_ERR "[ powerplay ]"
4915 " Trying to set Enable Mask when DPM is disabled \n");
4916
4917 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4918 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4919 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4920 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
4921 PP_ASSERT_WITH_CODE((0 == result),
4922 "Set Mclk Dpm enable Mask failed", return -1);
4923 }
4924 }
4925
4926 return 0;
4927}
4928
4929static int fiji_notify_link_speed_change_after_state_change(
4930 struct pp_hwmgr *hwmgr, const void *input)
4931{
4932 const struct phm_set_power_state_input *states =
4933 (const struct phm_set_power_state_input *)input;
4934 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4935 const struct fiji_power_state *fiji_ps =
4936 cast_const_phw_fiji_power_state(states->pnew_state);
4937 uint16_t target_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_ps);
4938 uint8_t request;
4939
4940 if (data->pspp_notify_required) {
4941 if (target_link_speed == PP_PCIEGen3)
4942 request = PCIE_PERF_REQ_GEN3;
4943 else if (target_link_speed == PP_PCIEGen2)
4944 request = PCIE_PERF_REQ_GEN2;
4945 else
4946 request = PCIE_PERF_REQ_GEN1;
4947
4948 if(request == PCIE_PERF_REQ_GEN1 &&
4949 fiji_get_current_pcie_speed(hwmgr) > 0)
4950 return 0;
4951
4952 if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
4953 if (PP_PCIEGen2 == target_link_speed)
4954 printk("PSPP request to switch to Gen2 from Gen3 Failed!");
4955 else
4956 printk("PSPP request to switch to Gen1 from Gen2 Failed!");
4957 }
4958 }
4959
4960 return 0;
4961}
4962
4963static int fiji_set_power_state_tasks(struct pp_hwmgr *hwmgr,
4964 const void *input)
4965{
4966 int tmp_result, result = 0;
4967
4968 tmp_result = fiji_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
4969 PP_ASSERT_WITH_CODE((0 == tmp_result),
4970 "Failed to find DPM states clocks in DPM table!",
4971 result = tmp_result);
4972
4973 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4974 PHM_PlatformCaps_PCIEPerformanceRequest)) {
4975 tmp_result =
4976 fiji_request_link_speed_change_before_state_change(hwmgr, input);
4977 PP_ASSERT_WITH_CODE((0 == tmp_result),
4978 "Failed to request link speed change before state change!",
4979 result = tmp_result);
4980 }
4981
4982 tmp_result = fiji_freeze_sclk_mclk_dpm(hwmgr);
4983 PP_ASSERT_WITH_CODE((0 == tmp_result),
4984 "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
4985
4986 tmp_result = fiji_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
4987 PP_ASSERT_WITH_CODE((0 == tmp_result),
4988 "Failed to populate and upload SCLK MCLK DPM levels!",
4989 result = tmp_result);
4990
4991 tmp_result = fiji_generate_dpm_level_enable_mask(hwmgr, input);
4992 PP_ASSERT_WITH_CODE((0 == tmp_result),
4993 "Failed to generate DPM level enabled mask!",
4994 result = tmp_result);
4995
4996 tmp_result = fiji_update_vce_dpm(hwmgr, input);
4997 PP_ASSERT_WITH_CODE((0 == tmp_result),
4998 "Failed to update VCE DPM!",
4999 result = tmp_result);
5000
5001 tmp_result = fiji_update_sclk_threshold(hwmgr);
5002 PP_ASSERT_WITH_CODE((0 == tmp_result),
5003 "Failed to update SCLK threshold!",
5004 result = tmp_result);
5005
5006 tmp_result = fiji_program_mem_timing_parameters(hwmgr);
5007 PP_ASSERT_WITH_CODE((0 == tmp_result),
5008 "Failed to program memory timing parameters!",
5009 result = tmp_result);
5010
5011 tmp_result = fiji_unfreeze_sclk_mclk_dpm(hwmgr);
5012 PP_ASSERT_WITH_CODE((0 == tmp_result),
5013 "Failed to unfreeze SCLK MCLK DPM!",
5014 result = tmp_result);
5015
5016 tmp_result = fiji_upload_dpm_level_enable_mask(hwmgr);
5017 PP_ASSERT_WITH_CODE((0 == tmp_result),
5018 "Failed to upload DPM level enabled mask!",
5019 result = tmp_result);
5020
5021 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
5022 PHM_PlatformCaps_PCIEPerformanceRequest)) {
5023 tmp_result =
5024 fiji_notify_link_speed_change_after_state_change(hwmgr, input);
5025 PP_ASSERT_WITH_CODE((0 == tmp_result),
5026 "Failed to notify link speed change after state change!",
5027 result = tmp_result);
5028 }
5029
5030 return result;
5031}
5032
5033static int fiji_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
5034{
5035 struct pp_power_state *ps;
5036 struct fiji_power_state *fiji_ps;
5037
5038 if (hwmgr == NULL)
5039 return -EINVAL;
5040
5041 ps = hwmgr->request_ps;
5042
5043 if (ps == NULL)
5044 return -EINVAL;
5045
5046 fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
5047
5048 if (low)
5049 return fiji_ps->performance_levels[0].engine_clock;
5050 else
5051 return fiji_ps->performance_levels
5052 [fiji_ps->performance_level_count-1].engine_clock;
5053}
5054
5055static int fiji_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
5056{
5057 struct pp_power_state *ps;
5058 struct fiji_power_state *fiji_ps;
5059
5060 if (hwmgr == NULL)
5061 return -EINVAL;
5062
5063 ps = hwmgr->request_ps;
5064
5065 if (ps == NULL)
5066 return -EINVAL;
5067
5068 fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
5069
5070 if (low)
5071 return fiji_ps->performance_levels[0].memory_clock;
5072 else
5073 return fiji_ps->performance_levels
5074 [fiji_ps->performance_level_count-1].memory_clock;
5075}
5076
5077static void fiji_print_current_perforce_level(
5078 struct pp_hwmgr *hwmgr, struct seq_file *m)
5079{
5080 uint32_t sclk, mclk, activity_percent = 0;
5081 uint32_t offset;
5082 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5083
5084 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
5085
5086 sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5087
5088 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
5089
5090 mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5091 seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n",
5092 mclk / 100, sclk / 100);
5093
5094 offset = data->soft_regs_start + offsetof(SMU73_SoftRegisters, AverageGraphicsActivity);
5095 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
5096 activity_percent += 0x80;
5097 activity_percent >>= 8;
5098
5099 seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
5100
5101 seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en");
5102
5103 seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en");
5104}
5105
5106static int fiji_program_display_gap(struct pp_hwmgr *hwmgr)
5107{
5108 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5109 uint32_t num_active_displays = 0;
5110 uint32_t display_gap = cgs_read_ind_register(hwmgr->device,
5111 CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
5112 uint32_t display_gap2;
5113 uint32_t pre_vbi_time_in_us;
5114 uint32_t frame_time_in_us;
5115 uint32_t ref_clock;
5116 uint32_t refresh_rate = 0;
5117 struct cgs_display_info info = {0};
5118 struct cgs_mode_info mode_info;
5119
5120 info.mode_info = &mode_info;
5121
5122 cgs_get_active_displays_info(hwmgr->device, &info);
5123 num_active_displays = info.display_count;
5124
5125 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
5126 DISP_GAP, (num_active_displays > 0)?
5127 DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
5128 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5129 ixCG_DISPLAY_GAP_CNTL, display_gap);
5130
5131 ref_clock = mode_info.ref_clock;
5132 refresh_rate = mode_info.refresh_rate;
5133
5134 if (refresh_rate == 0)
5135 refresh_rate = 60;
5136
5137 frame_time_in_us = 1000000 / refresh_rate;
5138
5139 pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
5140 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
5141
5142 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5143 ixCG_DISPLAY_GAP_CNTL2, display_gap2);
5144
5145 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5146 data->soft_regs_start +
5147 offsetof(SMU73_SoftRegisters, PreVBlankGap), 0x64);
5148
5149 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5150 data->soft_regs_start +
5151 offsetof(SMU73_SoftRegisters, VBlankTimeout),
5152 (frame_time_in_us - pre_vbi_time_in_us));
5153
5154 if (num_active_displays == 1)
5155 tonga_notify_smc_display_change(hwmgr, true);
5156
5157 return 0;
5158}
5159
5160static int fiji_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
5161{
5162 return fiji_program_display_gap(hwmgr);
5163}
5164
5165static int fiji_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr,
5166 uint16_t us_max_fan_pwm)
5167{
5168 hwmgr->thermal_controller.
5169 advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
5170
5171 if (phm_is_hw_access_blocked(hwmgr))
5172 return 0;
5173
5174 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5175 PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
5176}
5177
5178static int fiji_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr,
5179 uint16_t us_max_fan_rpm)
5180{
5181 hwmgr->thermal_controller.
5182 advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
5183
5184 if (phm_is_hw_access_blocked(hwmgr))
5185 return 0;
5186
5187 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5188 PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
5189}
5190
5191static int fiji_dpm_set_interrupt_state(void *private_data,
5192 unsigned src_id, unsigned type,
5193 int enabled)
5194{
5195 uint32_t cg_thermal_int;
5196 struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr;
5197
5198 if (hwmgr == NULL)
5199 return -EINVAL;
5200
5201 switch (type) {
5202 case AMD_THERMAL_IRQ_LOW_TO_HIGH:
5203 if (enabled) {
5204 cg_thermal_int = cgs_read_ind_register(hwmgr->device,
5205 CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5206 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
5207 cgs_write_ind_register(hwmgr->device,
5208 CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5209 } else {
5210 cg_thermal_int = cgs_read_ind_register(hwmgr->device,
5211 CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5212 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
5213 cgs_write_ind_register(hwmgr->device,
5214 CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5215 }
5216 break;
5217
5218 case AMD_THERMAL_IRQ_HIGH_TO_LOW:
5219 if (enabled) {
5220 cg_thermal_int = cgs_read_ind_register(hwmgr->device,
5221 CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5222 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
5223 cgs_write_ind_register(hwmgr->device,
5224 CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5225 } else {
5226 cg_thermal_int = cgs_read_ind_register(hwmgr->device,
5227 CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5228 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
5229 cgs_write_ind_register(hwmgr->device,
5230 CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5231 }
5232 break;
5233 default:
5234 break;
5235 }
5236 return 0;
5237}
5238
5239static int fiji_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
5240 const void *thermal_interrupt_info)
5241{
5242 int result;
5243 const struct pp_interrupt_registration_info *info =
5244 (const struct pp_interrupt_registration_info *)
5245 thermal_interrupt_info;
5246
5247 if (info == NULL)
5248 return -EINVAL;
5249
5250 result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST,
5251 fiji_dpm_set_interrupt_state,
5252 info->call_back, info->context);
5253
5254 if (result)
5255 return -EINVAL;
5256
5257 result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST,
5258 fiji_dpm_set_interrupt_state,
5259 info->call_back, info->context);
5260
5261 if (result)
5262 return -EINVAL;
5263
5264 return 0;
5265}
5266
5267static int fiji_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
5268{
5269 if (mode) {
5270 /* stop auto-manage */
5271 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
5272 PHM_PlatformCaps_MicrocodeFanControl))
5273 fiji_fan_ctrl_stop_smc_fan_control(hwmgr);
5274 fiji_fan_ctrl_set_static_mode(hwmgr, mode);
5275 } else
5276 /* restart auto-manage */
5277 fiji_fan_ctrl_reset_fan_speed_to_default(hwmgr);
5278
5279 return 0;
5280}
5281
5282static int fiji_get_fan_control_mode(struct pp_hwmgr *hwmgr)
5283{
5284 if (hwmgr->fan_ctrl_is_in_default_mode)
5285 return hwmgr->fan_ctrl_default_mode;
5286 else
5287 return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
5288 CG_FDO_CTRL2, FDO_PWM_MODE);
5289}
5290
5291static int fiji_force_clock_level(struct pp_hwmgr *hwmgr,
5292 enum pp_clock_type type, uint32_t mask)
5293{
5294 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5295
5296 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
5297 return -EINVAL;
5298
5299 switch (type) {
5300 case PP_SCLK:
5301 if (!data->sclk_dpm_key_disabled)
5302 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5303 PPSMC_MSG_SCLKDPM_SetEnabledMask,
5304 data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
5305 break;
5306
5307 case PP_MCLK:
5308 if (!data->mclk_dpm_key_disabled)
5309 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5310 PPSMC_MSG_MCLKDPM_SetEnabledMask,
5311 data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
5312 break;
5313
5314 case PP_PCIE:
5315 {
5316 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
5317 uint32_t level = 0;
5318
5319 while (tmp >>= 1)
5320 level++;
5321
5322 if (!data->pcie_dpm_key_disabled)
5323 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5324 PPSMC_MSG_PCIeDPM_ForceLevel,
5325 level);
5326 break;
5327 }
5328 default:
5329 break;
5330 }
5331
5332 return 0;
5333}
5334
5335static int fiji_print_clock_levels(struct pp_hwmgr *hwmgr,
5336 enum pp_clock_type type, char *buf)
5337{
5338 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5339 struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
5340 struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
5341 struct fiji_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
5342 int i, now, size = 0;
5343 uint32_t clock, pcie_speed;
5344
5345 switch (type) {
5346 case PP_SCLK:
5347 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
5348 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5349
5350 for (i = 0; i < sclk_table->count; i++) {
5351 if (clock > sclk_table->dpm_levels[i].value)
5352 continue;
5353 break;
5354 }
5355 now = i;
5356
5357 for (i = 0; i < sclk_table->count; i++)
5358 size += sprintf(buf + size, "%d: %uMhz %s\n",
5359 i, sclk_table->dpm_levels[i].value / 100,
5360 (i == now) ? "*" : "");
5361 break;
5362 case PP_MCLK:
5363 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
5364 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5365
5366 for (i = 0; i < mclk_table->count; i++) {
5367 if (clock > mclk_table->dpm_levels[i].value)
5368 continue;
5369 break;
5370 }
5371 now = i;
5372
5373 for (i = 0; i < mclk_table->count; i++)
5374 size += sprintf(buf + size, "%d: %uMhz %s\n",
5375 i, mclk_table->dpm_levels[i].value / 100,
5376 (i == now) ? "*" : "");
5377 break;
5378 case PP_PCIE:
5379 pcie_speed = fiji_get_current_pcie_speed(hwmgr);
5380 for (i = 0; i < pcie_table->count; i++) {
5381 if (pcie_speed != pcie_table->dpm_levels[i].value)
5382 continue;
5383 break;
5384 }
5385 now = i;
5386
5387 for (i = 0; i < pcie_table->count; i++)
5388 size += sprintf(buf + size, "%d: %s %s\n", i,
5389 (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
5390 (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
5391 (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
5392 (i == now) ? "*" : "");
5393 break;
5394 default:
5395 break;
5396 }
5397 return size;
5398}
5399
5400static inline bool fiji_are_power_levels_equal(const struct fiji_performance_level *pl1,
5401 const struct fiji_performance_level *pl2)
5402{
5403 return ((pl1->memory_clock == pl2->memory_clock) &&
5404 (pl1->engine_clock == pl2->engine_clock) &&
5405 (pl1->pcie_gen == pl2->pcie_gen) &&
5406 (pl1->pcie_lane == pl2->pcie_lane));
5407}
5408
5409static int
5410fiji_check_states_equal(struct pp_hwmgr *hwmgr,
5411 const struct pp_hw_power_state *pstate1,
5412 const struct pp_hw_power_state *pstate2, bool *equal)
5413{
5414 const struct fiji_power_state *psa = cast_const_phw_fiji_power_state(pstate1);
5415 const struct fiji_power_state *psb = cast_const_phw_fiji_power_state(pstate2);
5416 int i;
5417
5418 if (equal == NULL || psa == NULL || psb == NULL)
5419 return -EINVAL;
5420
5421 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
5422 if (psa->performance_level_count != psb->performance_level_count) {
5423 *equal = false;
5424 return 0;
5425 }
5426
5427 for (i = 0; i < psa->performance_level_count; i++) {
5428 if (!fiji_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
5429 /* If we have found even one performance level pair that is different the states are different. */
5430 *equal = false;
5431 return 0;
5432 }
5433 }
5434
5435 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
5436 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
5437 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
5438 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
5439 *equal &= (psa->acp_clk == psb->acp_clk);
5440
5441 return 0;
5442}
5443
5444static bool
5445fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
5446{
5447 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5448 bool is_update_required = false;
5449 struct cgs_display_info info = {0,0,NULL};
5450
5451 cgs_get_active_displays_info(hwmgr->device, &info);
5452
5453 if (data->display_timing.num_existing_displays != info.display_count)
5454 is_update_required = true;
5455
5456 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
5457 if(hwmgr->display_config.min_core_set_clock_in_sr != data->display_timing.min_clock_in_sr)
5458 is_update_required = true;
5459 }
5460
5461 return is_update_required;
5462}
5463
5464static int fiji_get_sclk_od(struct pp_hwmgr *hwmgr)
5465{
5466 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5467 struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
5468 struct fiji_single_dpm_table *golden_sclk_table =
5469 &(data->golden_dpm_table.sclk_table);
5470 int value;
5471
5472 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
5473 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
5474 100 /
5475 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
5476
5477 return value;
5478}
5479
5480static int fiji_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5481{
5482 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5483 struct fiji_single_dpm_table *golden_sclk_table =
5484 &(data->golden_dpm_table.sclk_table);
5485 struct pp_power_state *ps;
5486 struct fiji_power_state *fiji_ps;
5487
5488 if (value > 20)
5489 value = 20;
5490
5491 ps = hwmgr->request_ps;
5492
5493 if (ps == NULL)
5494 return -EINVAL;
5495
5496 fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
5497
5498 fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].engine_clock =
5499 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
5500 value / 100 +
5501 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
5502
5503 return 0;
5504}
5505
5506static int fiji_get_mclk_od(struct pp_hwmgr *hwmgr)
5507{
5508 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5509 struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
5510 struct fiji_single_dpm_table *golden_mclk_table =
5511 &(data->golden_dpm_table.mclk_table);
5512 int value;
5513
5514 value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
5515 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
5516 100 /
5517 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
5518
5519 return value;
5520}
5521
5522static int fiji_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5523{
5524 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5525 struct fiji_single_dpm_table *golden_mclk_table =
5526 &(data->golden_dpm_table.mclk_table);
5527 struct pp_power_state *ps;
5528 struct fiji_power_state *fiji_ps;
5529
5530 if (value > 20)
5531 value = 20;
5532
5533 ps = hwmgr->request_ps;
5534
5535 if (ps == NULL)
5536 return -EINVAL;
5537
5538 fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
5539
5540 fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].memory_clock =
5541 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
5542 value / 100 +
5543 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
5544
5545 return 0;
5546}
5547
5548static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
5549 .backend_init = &fiji_hwmgr_backend_init,
5550 .backend_fini = &fiji_hwmgr_backend_fini,
5551 .asic_setup = &fiji_setup_asic_task,
5552 .dynamic_state_management_enable = &fiji_enable_dpm_tasks,
5553 .dynamic_state_management_disable = &fiji_disable_dpm_tasks,
5554 .force_dpm_level = &fiji_dpm_force_dpm_level,
5555 .get_num_of_pp_table_entries = &get_number_of_powerplay_table_entries_v1_0,
5556 .get_power_state_size = &fiji_get_power_state_size,
5557 .get_pp_table_entry = &fiji_get_pp_table_entry,
5558 .patch_boot_state = &fiji_patch_boot_state,
5559 .apply_state_adjust_rules = &fiji_apply_state_adjust_rules,
5560 .power_state_set = &fiji_set_power_state_tasks,
5561 .get_sclk = &fiji_dpm_get_sclk,
5562 .get_mclk = &fiji_dpm_get_mclk,
5563 .print_current_perforce_level = &fiji_print_current_perforce_level,
5564 .powergate_uvd = &fiji_phm_powergate_uvd,
5565 .powergate_vce = &fiji_phm_powergate_vce,
5566 .disable_clock_power_gating = &fiji_phm_disable_clock_power_gating,
5567 .notify_smc_display_config_after_ps_adjustment =
5568 &tonga_notify_smc_display_config_after_ps_adjustment,
5569 .display_config_changed = &fiji_display_configuration_changed_task,
5570 .set_max_fan_pwm_output = fiji_set_max_fan_pwm_output,
5571 .set_max_fan_rpm_output = fiji_set_max_fan_rpm_output,
5572 .get_temperature = fiji_thermal_get_temperature,
5573 .stop_thermal_controller = fiji_thermal_stop_thermal_controller,
5574 .get_fan_speed_info = fiji_fan_ctrl_get_fan_speed_info,
5575 .get_fan_speed_percent = fiji_fan_ctrl_get_fan_speed_percent,
5576 .set_fan_speed_percent = fiji_fan_ctrl_set_fan_speed_percent,
5577 .reset_fan_speed_to_default = fiji_fan_ctrl_reset_fan_speed_to_default,
5578 .get_fan_speed_rpm = fiji_fan_ctrl_get_fan_speed_rpm,
5579 .set_fan_speed_rpm = fiji_fan_ctrl_set_fan_speed_rpm,
5580 .uninitialize_thermal_controller = fiji_thermal_ctrl_uninitialize_thermal_controller,
5581 .register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt,
5582 .set_fan_control_mode = fiji_set_fan_control_mode,
5583 .get_fan_control_mode = fiji_get_fan_control_mode,
5584 .check_states_equal = fiji_check_states_equal,
5585 .check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration,
5586 .force_clock_level = fiji_force_clock_level,
5587 .print_clock_levels = fiji_print_clock_levels,
5588 .get_sclk_od = fiji_get_sclk_od,
5589 .set_sclk_od = fiji_set_sclk_od,
5590 .get_mclk_od = fiji_get_mclk_od,
5591 .set_mclk_od = fiji_set_mclk_od,
5592};
5593
5594int fiji_hwmgr_init(struct pp_hwmgr *hwmgr)
5595{
5596 hwmgr->hwmgr_func = &fiji_hwmgr_funcs;
5597 hwmgr->pptable_func = &pptable_v1_0_funcs;
5598 pp_fiji_thermal_initialize(hwmgr);
5599 return 0;
5600}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
deleted file mode 100644
index bf67c2a92c68..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
+++ /dev/null
@@ -1,350 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _FIJI_HWMGR_H_
25#define _FIJI_HWMGR_H_
26
27#include "hwmgr.h"
28#include "smu73.h"
29#include "smu73_discrete.h"
30#include "ppatomctrl.h"
31#include "fiji_ppsmc.h"
32#include "pp_endian.h"
33
34#define FIJI_MAX_HARDWARE_POWERLEVELS 2
35#define FIJI_AT_DFLT 30
36
37#define FIJI_VOLTAGE_CONTROL_NONE 0x0
38#define FIJI_VOLTAGE_CONTROL_BY_GPIO 0x1
39#define FIJI_VOLTAGE_CONTROL_BY_SVID2 0x2
40#define FIJI_VOLTAGE_CONTROL_MERGED 0x3
41
42#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
43#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
44#define DPMTABLE_UPDATE_SCLK 0x00000004
45#define DPMTABLE_UPDATE_MCLK 0x00000008
46
47struct fiji_performance_level {
48 uint32_t memory_clock;
49 uint32_t engine_clock;
50 uint16_t pcie_gen;
51 uint16_t pcie_lane;
52};
53
54struct fiji_uvd_clocks {
55 uint32_t vclk;
56 uint32_t dclk;
57};
58
59struct fiji_vce_clocks {
60 uint32_t evclk;
61 uint32_t ecclk;
62};
63
64struct fiji_power_state {
65 uint32_t magic;
66 struct fiji_uvd_clocks uvd_clks;
67 struct fiji_vce_clocks vce_clks;
68 uint32_t sam_clk;
69 uint32_t acp_clk;
70 uint16_t performance_level_count;
71 bool dc_compatible;
72 uint32_t sclk_threshold;
73 struct fiji_performance_level performance_levels[FIJI_MAX_HARDWARE_POWERLEVELS];
74};
75
76struct fiji_dpm_level {
77 bool enabled;
78 uint32_t value;
79 uint32_t param1;
80};
81
82#define FIJI_MAX_DEEPSLEEP_DIVIDER_ID 5
83#define MAX_REGULAR_DPM_NUMBER 8
84#define FIJI_MINIMUM_ENGINE_CLOCK 2500
85
86struct fiji_single_dpm_table {
87 uint32_t count;
88 struct fiji_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
89};
90
91struct fiji_dpm_table {
92 struct fiji_single_dpm_table sclk_table;
93 struct fiji_single_dpm_table mclk_table;
94 struct fiji_single_dpm_table pcie_speed_table;
95 struct fiji_single_dpm_table vddc_table;
96 struct fiji_single_dpm_table vddci_table;
97 struct fiji_single_dpm_table mvdd_table;
98};
99
100struct fiji_clock_registers {
101 uint32_t vCG_SPLL_FUNC_CNTL;
102 uint32_t vCG_SPLL_FUNC_CNTL_2;
103 uint32_t vCG_SPLL_FUNC_CNTL_3;
104 uint32_t vCG_SPLL_FUNC_CNTL_4;
105 uint32_t vCG_SPLL_SPREAD_SPECTRUM;
106 uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
107 uint32_t vDLL_CNTL;
108 uint32_t vMCLK_PWRMGT_CNTL;
109 uint32_t vMPLL_AD_FUNC_CNTL;
110 uint32_t vMPLL_DQ_FUNC_CNTL;
111 uint32_t vMPLL_FUNC_CNTL;
112 uint32_t vMPLL_FUNC_CNTL_1;
113 uint32_t vMPLL_FUNC_CNTL_2;
114 uint32_t vMPLL_SS1;
115 uint32_t vMPLL_SS2;
116};
117
118struct fiji_voltage_smio_registers {
119 uint32_t vS0_VID_LOWER_SMIO_CNTL;
120};
121
122#define FIJI_MAX_LEAKAGE_COUNT 8
123struct fiji_leakage_voltage {
124 uint16_t count;
125 uint16_t leakage_id[FIJI_MAX_LEAKAGE_COUNT];
126 uint16_t actual_voltage[FIJI_MAX_LEAKAGE_COUNT];
127};
128
129struct fiji_vbios_boot_state {
130 uint16_t mvdd_bootup_value;
131 uint16_t vddc_bootup_value;
132 uint16_t vddci_bootup_value;
133 uint32_t sclk_bootup_value;
134 uint32_t mclk_bootup_value;
135 uint16_t pcie_gen_bootup_value;
136 uint16_t pcie_lane_bootup_value;
137};
138
139struct fiji_bacos {
140 uint32_t best_match;
141 uint32_t baco_flags;
142 struct fiji_performance_level performance_level;
143};
144
145/* Ultra Low Voltage parameter structure */
146struct fiji_ulv_parm {
147 bool ulv_supported;
148 uint32_t cg_ulv_parameter;
149 uint32_t ulv_volt_change_delay;
150 struct fiji_performance_level ulv_power_level;
151};
152
153struct fiji_display_timing {
154 uint32_t min_clock_in_sr;
155 uint32_t num_existing_displays;
156};
157
158struct fiji_dpmlevel_enable_mask {
159 uint32_t uvd_dpm_enable_mask;
160 uint32_t vce_dpm_enable_mask;
161 uint32_t acp_dpm_enable_mask;
162 uint32_t samu_dpm_enable_mask;
163 uint32_t sclk_dpm_enable_mask;
164 uint32_t mclk_dpm_enable_mask;
165 uint32_t pcie_dpm_enable_mask;
166};
167
168struct fiji_pcie_perf_range {
169 uint16_t max;
170 uint16_t min;
171};
172
173struct fiji_hwmgr {
174 struct fiji_dpm_table dpm_table;
175 struct fiji_dpm_table golden_dpm_table;
176
177 uint32_t voting_rights_clients0;
178 uint32_t voting_rights_clients1;
179 uint32_t voting_rights_clients2;
180 uint32_t voting_rights_clients3;
181 uint32_t voting_rights_clients4;
182 uint32_t voting_rights_clients5;
183 uint32_t voting_rights_clients6;
184 uint32_t voting_rights_clients7;
185 uint32_t static_screen_threshold_unit;
186 uint32_t static_screen_threshold;
187 uint32_t voltage_control;
188 uint32_t vddc_vddci_delta;
189
190 uint32_t active_auto_throttle_sources;
191
192 struct fiji_clock_registers clock_registers;
193 struct fiji_voltage_smio_registers voltage_smio_registers;
194
195 bool is_memory_gddr5;
196 uint16_t acpi_vddc;
197 bool pspp_notify_required;
198 uint16_t force_pcie_gen;
199 uint16_t acpi_pcie_gen;
200 uint32_t pcie_gen_cap;
201 uint32_t pcie_lane_cap;
202 uint32_t pcie_spc_cap;
203 struct fiji_leakage_voltage vddc_leakage;
204 struct fiji_leakage_voltage Vddci_leakage;
205
206 uint32_t mvdd_control;
207 uint32_t vddc_mask_low;
208 uint32_t mvdd_mask_low;
209 uint16_t max_vddc_in_pptable;
210 uint16_t min_vddc_in_pptable;
211 uint16_t max_vddci_in_pptable;
212 uint16_t min_vddci_in_pptable;
213 uint32_t mclk_strobe_mode_threshold;
214 uint32_t mclk_stutter_mode_threshold;
215 uint32_t mclk_edc_enable_threshold;
216 uint32_t mclk_edcwr_enable_threshold;
217 bool is_uvd_enabled;
218 struct fiji_vbios_boot_state vbios_boot_state;
219
220 bool battery_state;
221 bool is_tlu_enabled;
222
223 /* ---- SMC SRAM Address of firmware header tables ---- */
224 uint32_t sram_end;
225 uint32_t dpm_table_start;
226 uint32_t soft_regs_start;
227 uint32_t mc_reg_table_start;
228 uint32_t fan_table_start;
229 uint32_t arb_table_start;
230 struct SMU73_Discrete_DpmTable smc_state_table;
231 struct SMU73_Discrete_Ulv ulv_setting;
232
233 /* ---- Stuff originally coming from Evergreen ---- */
234 uint32_t vddci_control;
235 struct pp_atomctrl_voltage_table vddc_voltage_table;
236 struct pp_atomctrl_voltage_table vddci_voltage_table;
237 struct pp_atomctrl_voltage_table mvdd_voltage_table;
238
239 uint32_t mgcg_cgtt_local2;
240 uint32_t mgcg_cgtt_local3;
241 uint32_t gpio_debug;
242 uint32_t mc_micro_code_feature;
243 uint32_t highest_mclk;
244 uint16_t acpi_vddci;
245 uint8_t mvdd_high_index;
246 uint8_t mvdd_low_index;
247 bool dll_default_on;
248 bool performance_request_registered;
249
250 /* ---- Low Power Features ---- */
251 struct fiji_bacos bacos;
252 struct fiji_ulv_parm ulv;
253
254 /* ---- CAC Stuff ---- */
255 uint32_t cac_table_start;
256 bool cac_configuration_required;
257 bool driver_calculate_cac_leakage;
258 bool cac_enabled;
259
260 /* ---- DPM2 Parameters ---- */
261 uint32_t power_containment_features;
262 bool enable_dte_feature;
263 bool enable_tdc_limit_feature;
264 bool enable_pkg_pwr_tracking_feature;
265 bool disable_uvd_power_tune_feature;
266 const struct fiji_pt_defaults *power_tune_defaults;
267 struct SMU73_Discrete_PmFuses power_tune_table;
268 uint32_t dte_tj_offset;
269 uint32_t fast_watermark_threshold;
270
271 /* ---- Phase Shedding ---- */
272 bool vddc_phase_shed_control;
273
274 /* ---- DI/DT ---- */
275 struct fiji_display_timing display_timing;
276
277 /* ---- Thermal Temperature Setting ---- */
278 struct fiji_dpmlevel_enable_mask dpm_level_enable_mask;
279 uint32_t need_update_smu7_dpm_table;
280 uint32_t sclk_dpm_key_disabled;
281 uint32_t mclk_dpm_key_disabled;
282 uint32_t pcie_dpm_key_disabled;
283 uint32_t min_engine_clocks;
284 struct fiji_pcie_perf_range pcie_gen_performance;
285 struct fiji_pcie_perf_range pcie_lane_performance;
286 struct fiji_pcie_perf_range pcie_gen_power_saving;
287 struct fiji_pcie_perf_range pcie_lane_power_saving;
288 bool use_pcie_performance_levels;
289 bool use_pcie_power_saving_levels;
290 uint32_t activity_target[SMU73_MAX_LEVELS_GRAPHICS];
291 uint32_t mclk_activity_target;
292 uint32_t mclk_dpm0_activity_target;
293 uint32_t low_sclk_interrupt_threshold;
294 uint32_t last_mclk_dpm_enable_mask;
295 bool uvd_enabled;
296
297 /* ---- Power Gating States ---- */
298 bool uvd_power_gated;
299 bool vce_power_gated;
300 bool samu_power_gated;
301 bool acp_power_gated;
302 bool pg_acp_init;
303 bool frtc_enabled;
304 bool frtc_status_changed;
305};
306
307/* To convert to Q8.8 format for firmware */
308#define FIJI_Q88_FORMAT_CONVERSION_UNIT 256
309
310enum Fiji_I2CLineID {
311 Fiji_I2CLineID_DDC1 = 0x90,
312 Fiji_I2CLineID_DDC2 = 0x91,
313 Fiji_I2CLineID_DDC3 = 0x92,
314 Fiji_I2CLineID_DDC4 = 0x93,
315 Fiji_I2CLineID_DDC5 = 0x94,
316 Fiji_I2CLineID_DDC6 = 0x95,
317 Fiji_I2CLineID_SCLSDA = 0x96,
318 Fiji_I2CLineID_DDCVGA = 0x97
319};
320
321#define Fiji_I2C_DDC1DATA 0
322#define Fiji_I2C_DDC1CLK 1
323#define Fiji_I2C_DDC2DATA 2
324#define Fiji_I2C_DDC2CLK 3
325#define Fiji_I2C_DDC3DATA 4
326#define Fiji_I2C_DDC3CLK 5
327#define Fiji_I2C_SDA 40
328#define Fiji_I2C_SCL 41
329#define Fiji_I2C_DDC4DATA 65
330#define Fiji_I2C_DDC4CLK 66
331#define Fiji_I2C_DDC5DATA 0x48
332#define Fiji_I2C_DDC5CLK 0x49
333#define Fiji_I2C_DDC6DATA 0x4a
334#define Fiji_I2C_DDC6CLK 0x4b
335#define Fiji_I2C_DDCVGADATA 0x4c
336#define Fiji_I2C_DDCVGACLK 0x4d
337
338#define FIJI_UNUSED_GPIO_PIN 0x7F
339
340extern int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
341extern int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr);
342extern int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr);
343extern int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display);
344int fiji_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input);
345int fiji_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
346int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
347int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate);
348int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
349
350#endif /* _FIJI_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
deleted file mode 100644
index f5992ea0c56f..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
+++ /dev/null
@@ -1,610 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "hwmgr.h"
25#include "smumgr.h"
26#include "fiji_hwmgr.h"
27#include "fiji_powertune.h"
28#include "fiji_smumgr.h"
29#include "smu73_discrete.h"
30#include "pp_debug.h"
31
32#define VOLTAGE_SCALE 4
33#define POWERTUNE_DEFAULT_SET_MAX 1
34
35const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
36 /*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
37 {1, 0xF, 0xFD,
38 /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
39 0x19, 5, 45}
40};
41
42void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
43{
44 struct fiji_hwmgr *fiji_hwmgr = (struct fiji_hwmgr *)(hwmgr->backend);
45 struct phm_ppt_v1_information *table_info =
46 (struct phm_ppt_v1_information *)(hwmgr->pptable);
47 uint32_t tmp = 0;
48
49 if(table_info &&
50 table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
51 table_info->cac_dtp_table->usPowerTuneDataSetID)
52 fiji_hwmgr->power_tune_defaults =
53 &fiji_power_tune_data_set_array
54 [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
55 else
56 fiji_hwmgr->power_tune_defaults = &fiji_power_tune_data_set_array[0];
57
58 /* Assume disabled */
59 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
60 PHM_PlatformCaps_CAC);
61 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
62 PHM_PlatformCaps_SQRamping);
63 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
64 PHM_PlatformCaps_DBRamping);
65 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
66 PHM_PlatformCaps_TDRamping);
67 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
68 PHM_PlatformCaps_TCPRamping);
69
70 fiji_hwmgr->dte_tj_offset = tmp;
71
72 if (!tmp) {
73 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
74 PHM_PlatformCaps_CAC);
75
76 fiji_hwmgr->fast_watermark_threshold = 100;
77
78 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
79 PHM_PlatformCaps_PowerContainment)) {
80 tmp = 1;
81 fiji_hwmgr->enable_dte_feature = tmp ? false : true;
82 fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false;
83 fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false;
84 }
85 }
86}
87
88/* PPGen has the gain setting generated in x * 100 unit
89 * This function is to convert the unit to x * 4096(0x1000) unit.
90 * This is the unit expected by SMC firmware
91 */
92static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
93{
94 uint32_t tmp;
95 tmp = raw_setting * 4096 / 100;
96 return (uint16_t)tmp;
97}
98
99static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t* sda)
100{
101 switch (line) {
102 case Fiji_I2CLineID_DDC1 :
103 *scl = Fiji_I2C_DDC1CLK;
104 *sda = Fiji_I2C_DDC1DATA;
105 break;
106 case Fiji_I2CLineID_DDC2 :
107 *scl = Fiji_I2C_DDC2CLK;
108 *sda = Fiji_I2C_DDC2DATA;
109 break;
110 case Fiji_I2CLineID_DDC3 :
111 *scl = Fiji_I2C_DDC3CLK;
112 *sda = Fiji_I2C_DDC3DATA;
113 break;
114 case Fiji_I2CLineID_DDC4 :
115 *scl = Fiji_I2C_DDC4CLK;
116 *sda = Fiji_I2C_DDC4DATA;
117 break;
118 case Fiji_I2CLineID_DDC5 :
119 *scl = Fiji_I2C_DDC5CLK;
120 *sda = Fiji_I2C_DDC5DATA;
121 break;
122 case Fiji_I2CLineID_DDC6 :
123 *scl = Fiji_I2C_DDC6CLK;
124 *sda = Fiji_I2C_DDC6DATA;
125 break;
126 case Fiji_I2CLineID_SCLSDA :
127 *scl = Fiji_I2C_SCL;
128 *sda = Fiji_I2C_SDA;
129 break;
130 case Fiji_I2CLineID_DDCVGA :
131 *scl = Fiji_I2C_DDCVGACLK;
132 *sda = Fiji_I2C_DDCVGADATA;
133 break;
134 default:
135 *scl = 0;
136 *sda = 0;
137 break;
138 }
139}
140
141int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
142{
143 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
144 const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
145 SMU73_Discrete_DpmTable *dpm_table = &(data->smc_state_table);
146 struct phm_ppt_v1_information *table_info =
147 (struct phm_ppt_v1_information *)(hwmgr->pptable);
148 struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
149 struct pp_advance_fan_control_parameters *fan_table=
150 &hwmgr->thermal_controller.advanceFanControlParameters;
151 uint8_t uc_scl, uc_sda;
152
153 /* TDP number of fraction bits are changed from 8 to 7 for Fiji
154 * as requested by SMC team
155 */
156 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
157 (uint16_t)(cac_dtp_table->usTDP * 128));
158 dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
159 (uint16_t)(cac_dtp_table->usTDP * 128));
160
161 PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
162 "Target Operating Temp is out of Range!",);
163
164 dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
165 dpm_table->GpuTjHyst = 8;
166
167 dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase;
168
169 /* The following are for new Fiji Multi-input fan/thermal control */
170 dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
171 cac_dtp_table->usTargetOperatingTemp * 256);
172 dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
173 cac_dtp_table->usTemperatureLimitHotspot * 256);
174 dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US(
175 cac_dtp_table->usTemperatureLimitLiquid1 * 256);
176 dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US(
177 cac_dtp_table->usTemperatureLimitLiquid2 * 256);
178 dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US(
179 cac_dtp_table->usTemperatureLimitVrVddc * 256);
180 dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US(
181 cac_dtp_table->usTemperatureLimitVrMvdd * 256);
182 dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US(
183 cac_dtp_table->usTemperatureLimitPlx * 256);
184
185 dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
186 scale_fan_gain_settings(fan_table->usFanGainEdge));
187 dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
188 scale_fan_gain_settings(fan_table->usFanGainHotspot));
189 dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US(
190 scale_fan_gain_settings(fan_table->usFanGainLiquid));
191 dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US(
192 scale_fan_gain_settings(fan_table->usFanGainVrVddc));
193 dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US(
194 scale_fan_gain_settings(fan_table->usFanGainVrMvdd));
195 dpm_table->FanGainPlx = PP_HOST_TO_SMC_US(
196 scale_fan_gain_settings(fan_table->usFanGainPlx));
197 dpm_table->FanGainHbm = PP_HOST_TO_SMC_US(
198 scale_fan_gain_settings(fan_table->usFanGainHbm));
199
200 dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address;
201 dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address;
202 dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address;
203 dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address;
204
205 get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda);
206 dpm_table->Liquid_I2C_LineSCL = uc_scl;
207 dpm_table->Liquid_I2C_LineSDA = uc_sda;
208
209 get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda);
210 dpm_table->Vr_I2C_LineSCL = uc_scl;
211 dpm_table->Vr_I2C_LineSDA = uc_sda;
212
213 get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda);
214 dpm_table->Plx_I2C_LineSCL = uc_scl;
215 dpm_table->Plx_I2C_LineSDA = uc_sda;
216
217 return 0;
218}
219
220static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
221{
222 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
223 const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
224
225 data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
226 data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
227 data->power_tune_table.SviLoadLineTrimVddC = 3;
228 data->power_tune_table.SviLoadLineOffsetVddC = 0;
229
230 return 0;
231}
232
233static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
234{
235 uint16_t tdc_limit;
236 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
237 struct phm_ppt_v1_information *table_info =
238 (struct phm_ppt_v1_information *)(hwmgr->pptable);
239 const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
240
241 /* TDC number of fraction bits are changed from 8 to 7
242 * for Fiji as requested by SMC team
243 */
244 tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
245 data->power_tune_table.TDC_VDDC_PkgLimit =
246 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
247 data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
248 defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
249 data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
250
251 return 0;
252}
253
254static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
255{
256 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
257 const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
258 uint32_t temp;
259
260 if (fiji_read_smc_sram_dword(hwmgr->smumgr,
261 fuse_table_offset +
262 offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl),
263 (uint32_t *)&temp, data->sram_end))
264 PP_ASSERT_WITH_CODE(false,
265 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
266 return -EINVAL);
267 else {
268 data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
269 data->power_tune_table.LPMLTemperatureMin =
270 (uint8_t)((temp >> 16) & 0xff);
271 data->power_tune_table.LPMLTemperatureMax =
272 (uint8_t)((temp >> 8) & 0xff);
273 data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
274 }
275 return 0;
276}
277
278static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
279{
280 int i;
281 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
282
283 /* Currently not used. Set all to zero. */
284 for (i = 0; i < 16; i++)
285 data->power_tune_table.LPMLTemperatureScaler[i] = 0;
286
287 return 0;
288}
289
290static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
291{
292 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
293
294 if( (hwmgr->thermal_controller.advanceFanControlParameters.
295 usFanOutputSensitivity & (1 << 15)) ||
296 0 == hwmgr->thermal_controller.advanceFanControlParameters.
297 usFanOutputSensitivity )
298 hwmgr->thermal_controller.advanceFanControlParameters.
299 usFanOutputSensitivity = hwmgr->thermal_controller.
300 advanceFanControlParameters.usDefaultFanOutputSensitivity;
301
302 data->power_tune_table.FuzzyFan_PwmSetDelta =
303 PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
304 advanceFanControlParameters.usFanOutputSensitivity);
305 return 0;
306}
307
308static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
309{
310 int i;
311 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
312
313 /* Currently not used. Set all to zero. */
314 for (i = 0; i < 16; i++)
315 data->power_tune_table.GnbLPML[i] = 0;
316
317 return 0;
318}
319
320static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
321{
322 /* int i, min, max;
323 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
324 uint8_t * pHiVID = data->power_tune_table.BapmVddCVidHiSidd;
325 uint8_t * pLoVID = data->power_tune_table.BapmVddCVidLoSidd;
326
327 min = max = pHiVID[0];
328 for (i = 0; i < 8; i++) {
329 if (0 != pHiVID[i]) {
330 if (min > pHiVID[i])
331 min = pHiVID[i];
332 if (max < pHiVID[i])
333 max = pHiVID[i];
334 }
335
336 if (0 != pLoVID[i]) {
337 if (min > pLoVID[i])
338 min = pLoVID[i];
339 if (max < pLoVID[i])
340 max = pLoVID[i];
341 }
342 }
343
344 PP_ASSERT_WITH_CODE((0 != min) && (0 != max), "BapmVddcVidSidd table does not exist!", return int_Failed);
345 data->power_tune_table.GnbLPMLMaxVid = (uint8_t)max;
346 data->power_tune_table.GnbLPMLMinVid = (uint8_t)min;
347*/
348 return 0;
349}
350
351static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
352{
353 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
354 struct phm_ppt_v1_information *table_info =
355 (struct phm_ppt_v1_information *)(hwmgr->pptable);
356 uint16_t HiSidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
357 uint16_t LoSidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
358 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
359
360 HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
361 LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
362
363 data->power_tune_table.BapmVddCBaseLeakageHiSidd =
364 CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
365 data->power_tune_table.BapmVddCBaseLeakageLoSidd =
366 CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
367
368 return 0;
369}
370
371int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
372{
373 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
374 uint32_t pm_fuse_table_offset;
375
376 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
377 PHM_PlatformCaps_PowerContainment)) {
378 if (fiji_read_smc_sram_dword(hwmgr->smumgr,
379 SMU7_FIRMWARE_HEADER_LOCATION +
380 offsetof(SMU73_Firmware_Header, PmFuseTable),
381 &pm_fuse_table_offset, data->sram_end))
382 PP_ASSERT_WITH_CODE(false,
383 "Attempt to get pm_fuse_table_offset Failed!",
384 return -EINVAL);
385
386 /* DW6 */
387 if (fiji_populate_svi_load_line(hwmgr))
388 PP_ASSERT_WITH_CODE(false,
389 "Attempt to populate SviLoadLine Failed!",
390 return -EINVAL);
391 /* DW7 */
392 if (fiji_populate_tdc_limit(hwmgr))
393 PP_ASSERT_WITH_CODE(false,
394 "Attempt to populate TDCLimit Failed!", return -EINVAL);
395 /* DW8 */
396 if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset))
397 PP_ASSERT_WITH_CODE(false,
398 "Attempt to populate TdcWaterfallCtl, "
399 "LPMLTemperature Min and Max Failed!",
400 return -EINVAL);
401
402 /* DW9-DW12 */
403 if (0 != fiji_populate_temperature_scaler(hwmgr))
404 PP_ASSERT_WITH_CODE(false,
405 "Attempt to populate LPMLTemperatureScaler Failed!",
406 return -EINVAL);
407
408 /* DW13-DW14 */
409 if(fiji_populate_fuzzy_fan(hwmgr))
410 PP_ASSERT_WITH_CODE(false,
411 "Attempt to populate Fuzzy Fan Control parameters Failed!",
412 return -EINVAL);
413
414 /* DW15-DW18 */
415 if (fiji_populate_gnb_lpml(hwmgr))
416 PP_ASSERT_WITH_CODE(false,
417 "Attempt to populate GnbLPML Failed!",
418 return -EINVAL);
419
420 /* DW19 */
421 if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
422 PP_ASSERT_WITH_CODE(false,
423 "Attempt to populate GnbLPML Min and Max Vid Failed!",
424 return -EINVAL);
425
426 /* DW20 */
427 if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr))
428 PP_ASSERT_WITH_CODE(false,
429 "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
430 "Sidd Failed!", return -EINVAL);
431
432 if (fiji_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
433 (uint8_t *)&data->power_tune_table,
434 sizeof(struct SMU73_Discrete_PmFuses), data->sram_end))
435 PP_ASSERT_WITH_CODE(false,
436 "Attempt to download PmFuseTable Failed!",
437 return -EINVAL);
438 }
439 return 0;
440}
441
442int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr)
443{
444 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
445 int result = 0;
446
447 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
448 PHM_PlatformCaps_CAC)) {
449 int smc_result;
450 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
451 (uint16_t)(PPSMC_MSG_EnableCac));
452 PP_ASSERT_WITH_CODE((0 == smc_result),
453 "Failed to enable CAC in SMC.", result = -1);
454
455 data->cac_enabled = (0 == smc_result) ? true : false;
456 }
457 return result;
458}
459
460int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr)
461{
462 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
463 int result = 0;
464
465 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
466 PHM_PlatformCaps_CAC) && data->cac_enabled) {
467 int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
468 (uint16_t)(PPSMC_MSG_DisableCac));
469 PP_ASSERT_WITH_CODE((smc_result == 0),
470 "Failed to disable CAC in SMC.", result = -1);
471
472 data->cac_enabled = false;
473 }
474 return result;
475}
476
477int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
478{
479 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
480
481 if(data->power_containment_features &
482 POWERCONTAINMENT_FEATURE_PkgPwrLimit)
483 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
484 PPSMC_MSG_PkgPwrSetLimit, n);
485 return 0;
486}
487
488static int fiji_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
489{
490 return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
491 PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
492}
493
494int fiji_enable_power_containment(struct pp_hwmgr *hwmgr)
495{
496 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
497 struct phm_ppt_v1_information *table_info =
498 (struct phm_ppt_v1_information *)(hwmgr->pptable);
499 int smc_result;
500 int result = 0;
501
502 data->power_containment_features = 0;
503 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
504 PHM_PlatformCaps_PowerContainment)) {
505 if (data->enable_dte_feature) {
506 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
507 (uint16_t)(PPSMC_MSG_EnableDTE));
508 PP_ASSERT_WITH_CODE((0 == smc_result),
509 "Failed to enable DTE in SMC.", result = -1;);
510 if (0 == smc_result)
511 data->power_containment_features |= POWERCONTAINMENT_FEATURE_DTE;
512 }
513
514 if (data->enable_tdc_limit_feature) {
515 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
516 (uint16_t)(PPSMC_MSG_TDCLimitEnable));
517 PP_ASSERT_WITH_CODE((0 == smc_result),
518 "Failed to enable TDCLimit in SMC.", result = -1;);
519 if (0 == smc_result)
520 data->power_containment_features |=
521 POWERCONTAINMENT_FEATURE_TDCLimit;
522 }
523
524 if (data->enable_pkg_pwr_tracking_feature) {
525 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
526 (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
527 PP_ASSERT_WITH_CODE((0 == smc_result),
528 "Failed to enable PkgPwrTracking in SMC.", result = -1;);
529 if (0 == smc_result) {
530 struct phm_cac_tdp_table *cac_table =
531 table_info->cac_dtp_table;
532 uint32_t default_limit =
533 (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
534
535 data->power_containment_features |=
536 POWERCONTAINMENT_FEATURE_PkgPwrLimit;
537
538 if (fiji_set_power_limit(hwmgr, default_limit))
539 printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
540 }
541 }
542 }
543 return result;
544}
545
546int fiji_disable_power_containment(struct pp_hwmgr *hwmgr)
547{
548 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
549 int result = 0;
550
551 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
552 PHM_PlatformCaps_PowerContainment) &&
553 data->power_containment_features) {
554 int smc_result;
555
556 if (data->power_containment_features &
557 POWERCONTAINMENT_FEATURE_TDCLimit) {
558 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
559 (uint16_t)(PPSMC_MSG_TDCLimitDisable));
560 PP_ASSERT_WITH_CODE((smc_result == 0),
561 "Failed to disable TDCLimit in SMC.",
562 result = smc_result);
563 }
564
565 if (data->power_containment_features &
566 POWERCONTAINMENT_FEATURE_DTE) {
567 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
568 (uint16_t)(PPSMC_MSG_DisableDTE));
569 PP_ASSERT_WITH_CODE((smc_result == 0),
570 "Failed to disable DTE in SMC.",
571 result = smc_result);
572 }
573
574 if (data->power_containment_features &
575 POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
576 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
577 (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
578 PP_ASSERT_WITH_CODE((smc_result == 0),
579 "Failed to disable PkgPwrTracking in SMC.",
580 result = smc_result);
581 }
582 data->power_containment_features = 0;
583 }
584
585 return result;
586}
587
588int fiji_power_control_set_level(struct pp_hwmgr *hwmgr)
589{
590 struct phm_ppt_v1_information *table_info =
591 (struct phm_ppt_v1_information *)(hwmgr->pptable);
592 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
593 int adjust_percent, target_tdp;
594 int result = 0;
595
596 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
597 PHM_PlatformCaps_PowerContainment)) {
598 /* adjustment percentage has already been validated */
599 adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
600 hwmgr->platform_descriptor.TDPAdjustment :
601 (-1 * hwmgr->platform_descriptor.TDPAdjustment);
602 /* SMC requested that target_tdp to be 7 bit fraction in DPM table
603 * but message to be 8 bit fraction for messages
604 */
605 target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
606 result = fiji_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
607 }
608
609 return result;
610}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
deleted file mode 100644
index fec772421733..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
+++ /dev/null
@@ -1,81 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef FIJI_POWERTUNE_H
24#define FIJI_POWERTUNE_H
25
26enum fiji_pt_config_reg_type {
27 FIJI_CONFIGREG_MMR = 0,
28 FIJI_CONFIGREG_SMC_IND,
29 FIJI_CONFIGREG_DIDT_IND,
30 FIJI_CONFIGREG_CACHE,
31 FIJI_CONFIGREG_MAX
32};
33
34/* PowerContainment Features */
35#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
36#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
37#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
38
39#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xffffffc0
40#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x6
41#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xffffffc0
42#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x6
43#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xffffffc0
44#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x6
45#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
46#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
47#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
48#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
49#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
50#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
51
52struct fiji_pt_config_reg {
53 uint32_t offset;
54 uint32_t mask;
55 uint32_t shift;
56 uint32_t value;
57 enum fiji_pt_config_reg_type type;
58};
59
60struct fiji_pt_defaults
61{
62 uint8_t SviLoadLineEn;
63 uint8_t SviLoadLineVddC;
64 uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
65 uint8_t TDC_MAWt;
66 uint8_t TdcWaterfallCtl;
67 uint8_t DTEAmbientTempBase;
68};
69
70void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
71int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
72int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr);
73int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr);
74int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr);
75int fiji_enable_power_containment(struct pp_hwmgr *hwmgr);
76int fiji_disable_power_containment(struct pp_hwmgr *hwmgr);
77int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
78int fiji_power_control_set_level(struct pp_hwmgr *hwmgr);
79
80#endif /* FIJI_POWERTUNE_H */
81
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h
deleted file mode 100644
index 8621493b8574..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef FIJI_THERMAL_H
25#define FIJI_THERMAL_H
26
27#include "hwmgr.h"
28
29#define FIJI_THERMAL_HIGH_ALERT_MASK 0x1
30#define FIJI_THERMAL_LOW_ALERT_MASK 0x2
31
32#define FIJI_THERMAL_MINIMUM_TEMP_READING -256
33#define FIJI_THERMAL_MAXIMUM_TEMP_READING 255
34
35#define FIJI_THERMAL_MINIMUM_ALERT_TEMP 0
36#define FIJI_THERMAL_MAXIMUM_ALERT_TEMP 255
37
38#define FDO_PWM_MODE_STATIC 1
39#define FDO_PWM_MODE_STATIC_RPM 5
40
41
42extern int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
43extern int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
44extern int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
45
46extern int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr);
47extern int fiji_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
48extern int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
49extern int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
50extern int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
51extern int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
52extern int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
53extern int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
54extern int pp_fiji_thermal_initialize(struct pp_hwmgr *hwmgr);
55extern int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
56extern int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
57extern int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
58extern int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
59extern uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
60
61#endif
62
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 524d0dd4f0e9..1167205057b3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -36,13 +36,13 @@
36#include "amd_acpi.h" 36#include "amd_acpi.h"
37 37
38extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr); 38extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr);
39extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
40extern int fiji_hwmgr_init(struct pp_hwmgr *hwmgr);
41extern int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
42extern int iceland_hwmgr_init(struct pp_hwmgr *hwmgr);
43 39
40static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
44static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr); 41static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
45static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr); 42static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
43static int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr);
44static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr);
45static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr);
46 46
47uint8_t convert_to_vid(uint16_t vddc) 47uint8_t convert_to_vid(uint16_t vddc)
48{ 48{
@@ -79,21 +79,32 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
79 case AMDGPU_FAMILY_VI: 79 case AMDGPU_FAMILY_VI:
80 switch (hwmgr->chip_id) { 80 switch (hwmgr->chip_id) {
81 case CHIP_TOPAZ: 81 case CHIP_TOPAZ:
82 iceland_hwmgr_init(hwmgr); 82 topaz_set_asic_special_caps(hwmgr);
83 hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
84 PP_VBI_TIME_SUPPORT_MASK |
85 PP_ENABLE_GFX_CG_THRU_SMU);
86 hwmgr->pp_table_version = PP_TABLE_V0;
83 break; 87 break;
84 case CHIP_TONGA: 88 case CHIP_TONGA:
85 tonga_hwmgr_init(hwmgr); 89 tonga_set_asic_special_caps(hwmgr);
90 hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
91 PP_VBI_TIME_SUPPORT_MASK);
86 break; 92 break;
87 case CHIP_FIJI: 93 case CHIP_FIJI:
88 fiji_hwmgr_init(hwmgr); 94 fiji_set_asic_special_caps(hwmgr);
95 hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
96 PP_VBI_TIME_SUPPORT_MASK |
97 PP_ENABLE_GFX_CG_THRU_SMU);
89 break; 98 break;
90 case CHIP_POLARIS11: 99 case CHIP_POLARIS11:
91 case CHIP_POLARIS10: 100 case CHIP_POLARIS10:
92 polaris10_hwmgr_init(hwmgr); 101 polaris_set_asic_special_caps(hwmgr);
102 hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
93 break; 103 break;
94 default: 104 default:
95 return -EINVAL; 105 return -EINVAL;
96 } 106 }
107 smu7_hwmgr_init(hwmgr);
97 break; 108 break;
98 default: 109 default:
99 return -EINVAL; 110 return -EINVAL;
@@ -388,12 +399,9 @@ int phm_reset_single_dpm_table(void *table,
388 399
389 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table; 400 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
390 401
391 PP_ASSERT_WITH_CODE(count <= max, 402 dpm_table->count = count > max ? max : count;
392 "Fatal error, can not set up single DPM table entries to exceed max number!",
393 );
394 403
395 dpm_table->count = count; 404 for (i = 0; i < dpm_table->count; i++)
396 for (i = 0; i < max; i++)
397 dpm_table->dpm_level[i].enabled = false; 405 dpm_table->dpm_level[i].enabled = false;
398 406
399 return 0; 407 return 0;
@@ -713,3 +721,95 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
713 return ret; 721 return ret;
714} 722}
715 723
724int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
725{
726 /* power tune caps Assume disabled */
727 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
728 PHM_PlatformCaps_SQRamping);
729 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
730 PHM_PlatformCaps_DBRamping);
731 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
732 PHM_PlatformCaps_TDRamping);
733 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
734 PHM_PlatformCaps_TCPRamping);
735
736 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
737 PHM_PlatformCaps_CAC);
738
739 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
740 PHM_PlatformCaps_RegulatorHot);
741
742 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
743 PHM_PlatformCaps_AutomaticDCTransition);
744
745 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
746 PHM_PlatformCaps_TablelessHardwareInterface);
747
748 if (hwmgr->chip_id == CHIP_POLARIS11)
749 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
750 PHM_PlatformCaps_SPLLShutdownSupport);
751 return 0;
752}
753
754int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr)
755{
756 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
757 PHM_PlatformCaps_SQRamping);
758 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
759 PHM_PlatformCaps_DBRamping);
760 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
761 PHM_PlatformCaps_TDRamping);
762 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
763 PHM_PlatformCaps_TCPRamping);
764
765 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
766 PHM_PlatformCaps_TablelessHardwareInterface);
767
768 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
769 PHM_PlatformCaps_CAC);
770 return 0;
771}
772
773int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr)
774{
775 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
776 PHM_PlatformCaps_SQRamping);
777 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
778 PHM_PlatformCaps_DBRamping);
779 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
780 PHM_PlatformCaps_TDRamping);
781 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
782 PHM_PlatformCaps_TCPRamping);
783
784 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
785 PHM_PlatformCaps_UVDPowerGating);
786 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
787 PHM_PlatformCaps_VCEPowerGating);
788
789 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
790 PHM_PlatformCaps_TablelessHardwareInterface);
791
792 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
793 PHM_PlatformCaps_CAC);
794
795 return 0;
796}
797
798int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
799{
800 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
801 PHM_PlatformCaps_SQRamping);
802 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
803 PHM_PlatformCaps_DBRamping);
804 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
805 PHM_PlatformCaps_TDRamping);
806 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
807 PHM_PlatformCaps_TCPRamping);
808 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
809 PHM_PlatformCaps_TablelessHardwareInterface);
810 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
811 PHM_PlatformCaps_CAC);
812 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
813 PHM_PlatformCaps_EVV);
814 return 0;
815}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c
deleted file mode 100644
index 47949f5cd073..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c
+++ /dev/null
@@ -1,119 +0,0 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui <ray.huang@amd.com>
23 *
24 */
25
26#include "hwmgr.h"
27#include "iceland_clockpowergating.h"
28#include "ppsmc.h"
29#include "iceland_hwmgr.h"
30
31int iceland_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
32{
33 /* iceland does not have MM hardware block */
34 return 0;
35}
36
37static int iceland_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
38{
39 /* iceland does not have MM hardware block */
40 return 0;
41}
42
43static int iceland_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
44{
45 /* iceland does not have MM hardware block */
46 return 0;
47}
48
49static int iceland_phm_powerup_vce(struct pp_hwmgr *hwmgr)
50{
51 /* iceland does not have MM hardware block */
52 return 0;
53}
54
55int iceland_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum
56 PHM_AsicBlock block, enum PHM_ClockGateSetting gating)
57{
58 int ret = 0;
59
60 switch (block) {
61 case PHM_AsicBlock_UVD_MVC:
62 case PHM_AsicBlock_UVD:
63 case PHM_AsicBlock_UVD_HD:
64 case PHM_AsicBlock_UVD_SD:
65 if (gating == PHM_ClockGateSetting_StaticOff)
66 ret = iceland_phm_powerdown_uvd(hwmgr);
67 else
68 ret = iceland_phm_powerup_uvd(hwmgr);
69 break;
70 case PHM_AsicBlock_GFX:
71 default:
72 break;
73 }
74
75 return ret;
76}
77
78int iceland_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
79{
80 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
81
82 data->uvd_power_gated = false;
83 data->vce_power_gated = false;
84
85 iceland_phm_powerup_uvd(hwmgr);
86 iceland_phm_powerup_vce(hwmgr);
87
88 return 0;
89}
90
91int iceland_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
92{
93 if (bgate) {
94 iceland_update_uvd_dpm(hwmgr, true);
95 iceland_phm_powerdown_uvd(hwmgr);
96 } else {
97 iceland_phm_powerup_uvd(hwmgr);
98 iceland_update_uvd_dpm(hwmgr, false);
99 }
100
101 return 0;
102}
103
104int iceland_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
105{
106 if (bgate)
107 return iceland_phm_powerdown_vce(hwmgr);
108 else
109 return iceland_phm_powerup_vce(hwmgr);
110
111 return 0;
112}
113
114int iceland_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
115 const uint32_t *msg_id)
116{
117 /* iceland does not have MM hardware block */
118 return 0;
119}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h
deleted file mode 100644
index ff5ef00c7c68..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h
+++ /dev/null
@@ -1,38 +0,0 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui <ray.huang@amd.com>
23 *
24 */
25
26#ifndef _ICELAND_CLOCK_POWER_GATING_H_
27#define _ICELAND_CLOCK_POWER_GATING_H_
28
29#include "iceland_hwmgr.h"
30#include "pp_asicblocks.h"
31
32extern int iceland_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
33extern int iceland_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
34extern int iceland_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
35extern int iceland_phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
36extern int iceland_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
37extern int iceland_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id);
38#endif /* _ICELAND_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h
deleted file mode 100644
index a7b4bc6caea2..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h
+++ /dev/null
@@ -1,41 +0,0 @@
1#ifndef ICELAND_DYN_DEFAULTS_H
2#define ICELAND_DYN_DEFAULTS_H
3
4enum ICELANDdpm_TrendDetection
5{
6 ICELANDdpm_TrendDetection_AUTO,
7 ICELANDdpm_TrendDetection_UP,
8 ICELANDdpm_TrendDetection_DOWN
9};
10typedef enum ICELANDdpm_TrendDetection ICELANDdpm_TrendDetection;
11
12
13#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
14#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT1 0x000400
15#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080
16#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200
17#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680
18#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033
19#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033
20#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000
21
22
23#define PPICELAND_THERMALPROTECTCOUNTER_DFLT 0x200
24
25#define PPICELAND_STATICSCREENTHRESHOLDUNIT_DFLT 0
26
27#define PPICELAND_STATICSCREENTHRESHOLD_DFLT 0x00C8
28
29#define PPICELAND_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200
30
31#define PPICELAND_REFERENCEDIVIDER_DFLT 4
32
33#define PPICELAND_ULVVOLTAGECHANGEDELAY_DFLT 1687
34
35#define PPICELAND_CGULVPARAMETER_DFLT 0x00040035
36#define PPICELAND_CGULVCONTROL_DFLT 0x00007450
37#define PPICELAND_TARGETACTIVITY_DFLT 30
38#define PPICELAND_MCLK_TARGETACTIVITY_DFLT 10
39
40#endif
41
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c
deleted file mode 100644
index 5abe43360ec0..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c
+++ /dev/null
@@ -1,5684 +0,0 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui <ray.huang@amd.com>
23 *
24 */
25#include <linux/module.h>
26#include <linux/slab.h>
27#include <linux/fb.h>
28#include "linux/delay.h"
29#include "pp_acpi.h"
30#include "hwmgr.h"
31#include <atombios.h>
32#include "iceland_hwmgr.h"
33#include "pptable.h"
34#include "processpptables.h"
35#include "pp_debug.h"
36#include "ppsmc.h"
37#include "cgs_common.h"
38#include "pppcielanes.h"
39#include "iceland_dyn_defaults.h"
40#include "smumgr.h"
41#include "iceland_smumgr.h"
42#include "iceland_clockpowergating.h"
43#include "iceland_thermal.h"
44#include "iceland_powertune.h"
45
46#include "gmc/gmc_8_1_d.h"
47#include "gmc/gmc_8_1_sh_mask.h"
48
49#include "bif/bif_5_0_d.h"
50#include "bif/bif_5_0_sh_mask.h"
51
52#include "smu/smu_7_1_1_d.h"
53#include "smu/smu_7_1_1_sh_mask.h"
54
55#include "cgs_linux.h"
56#include "eventmgr.h"
57#include "amd_pcie_helpers.h"
58
59#define MC_CG_ARB_FREQ_F0 0x0a
60#define MC_CG_ARB_FREQ_F1 0x0b
61#define MC_CG_ARB_FREQ_F2 0x0c
62#define MC_CG_ARB_FREQ_F3 0x0d
63
64#define MC_CG_SEQ_DRAMCONF_S0 0x05
65#define MC_CG_SEQ_DRAMCONF_S1 0x06
66#define MC_CG_SEQ_YCLK_SUSPEND 0x04
67#define MC_CG_SEQ_YCLK_RESUME 0x0a
68
69#define PCIE_BUS_CLK 10000
70#define TCLK (PCIE_BUS_CLK / 10)
71
72#define SMC_RAM_END 0x40000
73#define SMC_CG_IND_START 0xc0030000
74#define SMC_CG_IND_END 0xc0040000 /* First byte after SMC_CG_IND*/
75
76#define VOLTAGE_SCALE 4
77#define VOLTAGE_VID_OFFSET_SCALE1 625
78#define VOLTAGE_VID_OFFSET_SCALE2 100
79
80const uint32_t iceland_magic = (uint32_t)(PHM_VIslands_Magic);
81
82#define MC_SEQ_MISC0_GDDR5_SHIFT 28
83#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
84#define MC_SEQ_MISC0_GDDR5_VALUE 5
85
86/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
87enum DPM_EVENT_SRC {
88 DPM_EVENT_SRC_ANALOG = 0, /* Internal analog trip point */
89 DPM_EVENT_SRC_EXTERNAL = 1, /* External (GPIO 17) signal */
90 DPM_EVENT_SRC_DIGITAL = 2, /* Internal digital trip point (DIG_THERM_DPM) */
91 DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, /* Internal analog or external */
92 DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 /* Internal digital or external */
93};
94
95static int iceland_read_clock_registers(struct pp_hwmgr *hwmgr)
96{
97 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
98
99 data->clock_registers.vCG_SPLL_FUNC_CNTL =
100 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
101 data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
102 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
103 data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
104 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
105 data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
106 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
107 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
108 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
109 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
110 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
111 data->clock_registers.vDLL_CNTL =
112 cgs_read_register(hwmgr->device, mmDLL_CNTL);
113 data->clock_registers.vMCLK_PWRMGT_CNTL =
114 cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
115 data->clock_registers.vMPLL_AD_FUNC_CNTL =
116 cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
117 data->clock_registers.vMPLL_DQ_FUNC_CNTL =
118 cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
119 data->clock_registers.vMPLL_FUNC_CNTL =
120 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
121 data->clock_registers.vMPLL_FUNC_CNTL_1 =
122 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
123 data->clock_registers.vMPLL_FUNC_CNTL_2 =
124 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
125 data->clock_registers.vMPLL_SS1 =
126 cgs_read_register(hwmgr->device, mmMPLL_SS1);
127 data->clock_registers.vMPLL_SS2 =
128 cgs_read_register(hwmgr->device, mmMPLL_SS2);
129
130 return 0;
131}
132
133/**
134 * Find out if memory is GDDR5.
135 *
136 * @param hwmgr the address of the powerplay hardware manager.
137 * @return always 0
138 */
139int iceland_get_memory_type(struct pp_hwmgr *hwmgr)
140{
141 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
142 uint32_t temp;
143
144 temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
145
146 data->is_memory_GDDR5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
147 ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
148 MC_SEQ_MISC0_GDDR5_SHIFT));
149
150 return 0;
151}
152
153int iceland_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
154{
155 /* iceland does not have MM hardware blocks */
156 return 0;
157}
158
159/**
160 * Enables Dynamic Power Management by SMC
161 *
162 * @param hwmgr the address of the powerplay hardware manager.
163 * @return always 0
164 */
165int iceland_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
166{
167 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, STATIC_PM_EN, 1);
168
169 return 0;
170}
171
172/**
173 * Find the MC microcode version and store it in the HwMgr struct
174 *
175 * @param hwmgr the address of the powerplay hardware manager.
176 * @return always 0
177 */
178int iceland_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
179{
180 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
181
182 hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
183
184 return 0;
185}
186
187static int iceland_init_sclk_threshold(struct pp_hwmgr *hwmgr)
188{
189 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
190
191 data->low_sclk_interrupt_threshold = 0;
192
193 return 0;
194}
195
196
197static int iceland_setup_asic_task(struct pp_hwmgr *hwmgr)
198{
199 int tmp_result, result = 0;
200
201 tmp_result = iceland_read_clock_registers(hwmgr);
202 PP_ASSERT_WITH_CODE((0 == tmp_result),
203 "Failed to read clock registers!", result = tmp_result);
204
205 tmp_result = iceland_get_memory_type(hwmgr);
206 PP_ASSERT_WITH_CODE((0 == tmp_result),
207 "Failed to get memory type!", result = tmp_result);
208
209 tmp_result = iceland_enable_acpi_power_management(hwmgr);
210 PP_ASSERT_WITH_CODE((0 == tmp_result),
211 "Failed to enable ACPI power management!", result = tmp_result);
212
213 tmp_result = iceland_get_mc_microcode_version(hwmgr);
214 PP_ASSERT_WITH_CODE((0 == tmp_result),
215 "Failed to get MC microcode version!", result = tmp_result);
216
217 tmp_result = iceland_init_sclk_threshold(hwmgr);
218 PP_ASSERT_WITH_CODE((0 == tmp_result),
219 "Failed to init sclk threshold!", result = tmp_result);
220
221 return result;
222}
223
224static bool cf_iceland_voltage_control(struct pp_hwmgr *hwmgr)
225{
226 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
227
228 return ICELAND_VOLTAGE_CONTROL_NONE != data->voltage_control;
229}
230
231/*
232 * -------------- Voltage Tables ----------------------
233 * If the voltage table would be bigger than what will fit into the
234 * state table on the SMC keep only the higher entries.
235 */
236
237static void iceland_trim_voltage_table_to_fit_state_table(
238 struct pp_hwmgr *hwmgr,
239 uint32_t max_voltage_steps,
240 pp_atomctrl_voltage_table *voltage_table)
241{
242 unsigned int i, diff;
243
244 if (voltage_table->count <= max_voltage_steps) {
245 return;
246 }
247
248 diff = voltage_table->count - max_voltage_steps;
249
250 for (i = 0; i < max_voltage_steps; i++) {
251 voltage_table->entries[i] = voltage_table->entries[i + diff];
252 }
253
254 voltage_table->count = max_voltage_steps;
255
256 return;
257}
258
259/**
260 * Enable voltage control
261 *
262 * @param hwmgr the address of the powerplay hardware manager.
263 * @return always 0
264 */
265int iceland_enable_voltage_control(struct pp_hwmgr *hwmgr)
266{
267 /* enable voltage control */
268 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
269
270 return 0;
271}
272
273static int iceland_get_svi2_voltage_table(struct pp_hwmgr *hwmgr,
274 struct phm_clock_voltage_dependency_table *voltage_dependency_table,
275 pp_atomctrl_voltage_table *voltage_table)
276{
277 uint32_t i;
278
279 PP_ASSERT_WITH_CODE((NULL != voltage_table),
280 "Voltage Dependency Table empty.", return -EINVAL;);
281
282 voltage_table->mask_low = 0;
283 voltage_table->phase_delay = 0;
284 voltage_table->count = voltage_dependency_table->count;
285
286 for (i = 0; i < voltage_dependency_table->count; i++) {
287 voltage_table->entries[i].value =
288 voltage_dependency_table->entries[i].v;
289 voltage_table->entries[i].smio_low = 0;
290 }
291
292 return 0;
293}
294
295/**
296 * Create Voltage Tables.
297 *
298 * @param hwmgr the address of the powerplay hardware manager.
299 * @return always 0
300 */
301int iceland_construct_voltage_tables(struct pp_hwmgr *hwmgr)
302{
303 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
304 int result;
305
306 /* GPIO voltage */
307 if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
308 result = atomctrl_get_voltage_table_v3(hwmgr,
309 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
310 &data->vddc_voltage_table);
311 PP_ASSERT_WITH_CODE((0 == result),
312 "Failed to retrieve VDDC table.", return result;);
313 } else if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
314 /* SVI2 VDDC voltage */
315 result = iceland_get_svi2_voltage_table(hwmgr,
316 hwmgr->dyn_state.vddc_dependency_on_mclk,
317 &data->vddc_voltage_table);
318 PP_ASSERT_WITH_CODE((0 == result),
319 "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;);
320 }
321
322 PP_ASSERT_WITH_CODE(
323 (data->vddc_voltage_table.count <= (SMU71_MAX_LEVELS_VDDC)),
324 "Too many voltage values for VDDC. Trimming to fit state table.",
325 iceland_trim_voltage_table_to_fit_state_table(hwmgr,
326 SMU71_MAX_LEVELS_VDDC, &(data->vddc_voltage_table));
327 );
328
329 /* GPIO */
330 if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) {
331 result = atomctrl_get_voltage_table_v3(hwmgr,
332 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, &(data->vddci_voltage_table));
333 PP_ASSERT_WITH_CODE((0 == result),
334 "Failed to retrieve VDDCI table.", return result;);
335 }
336
337 /* SVI2 VDDCI voltage */
338 if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) {
339 result = iceland_get_svi2_voltage_table(hwmgr,
340 hwmgr->dyn_state.vddci_dependency_on_mclk,
341 &data->vddci_voltage_table);
342 PP_ASSERT_WITH_CODE((0 == result),
343 "Failed to retrieve SVI2 VDDCI table from dependancy table.", return result;);
344 }
345
346 PP_ASSERT_WITH_CODE(
347 (data->vddci_voltage_table.count <= (SMU71_MAX_LEVELS_VDDCI)),
348 "Too many voltage values for VDDCI. Trimming to fit state table.",
349 iceland_trim_voltage_table_to_fit_state_table(hwmgr,
350 SMU71_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table));
351 );
352
353
354 /* GPIO */
355 if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
356 result = atomctrl_get_voltage_table_v3(hwmgr,
357 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, &(data->mvdd_voltage_table));
358 PP_ASSERT_WITH_CODE((0 == result),
359 "Failed to retrieve table.", return result;);
360 }
361
362 /* SVI2 voltage control */
363 if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
364 result = iceland_get_svi2_voltage_table(hwmgr,
365 hwmgr->dyn_state.mvdd_dependency_on_mclk,
366 &data->mvdd_voltage_table);
367 PP_ASSERT_WITH_CODE((0 == result),
368 "Failed to retrieve SVI2 MVDD table from dependancy table.", return result;);
369 }
370
371 PP_ASSERT_WITH_CODE(
372 (data->mvdd_voltage_table.count <= (SMU71_MAX_LEVELS_MVDD)),
373 "Too many voltage values for MVDD. Trimming to fit state table.",
374 iceland_trim_voltage_table_to_fit_state_table(hwmgr,
375 SMU71_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table));
376 );
377
378 return 0;
379}
380
381/*---------------------------MC----------------------------*/
382
383uint8_t iceland_get_memory_module_index(struct pp_hwmgr *hwmgr)
384{
385 return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
386}
387
388bool iceland_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg)
389{
390 bool result = true;
391
392 switch (inReg) {
393 case mmMC_SEQ_RAS_TIMING:
394 *outReg = mmMC_SEQ_RAS_TIMING_LP;
395 break;
396
397 case mmMC_SEQ_DLL_STBY:
398 *outReg = mmMC_SEQ_DLL_STBY_LP;
399 break;
400
401 case mmMC_SEQ_G5PDX_CMD0:
402 *outReg = mmMC_SEQ_G5PDX_CMD0_LP;
403 break;
404
405 case mmMC_SEQ_G5PDX_CMD1:
406 *outReg = mmMC_SEQ_G5PDX_CMD1_LP;
407 break;
408
409 case mmMC_SEQ_G5PDX_CTRL:
410 *outReg = mmMC_SEQ_G5PDX_CTRL_LP;
411 break;
412
413 case mmMC_SEQ_CAS_TIMING:
414 *outReg = mmMC_SEQ_CAS_TIMING_LP;
415 break;
416
417 case mmMC_SEQ_MISC_TIMING:
418 *outReg = mmMC_SEQ_MISC_TIMING_LP;
419 break;
420
421 case mmMC_SEQ_MISC_TIMING2:
422 *outReg = mmMC_SEQ_MISC_TIMING2_LP;
423 break;
424
425 case mmMC_SEQ_PMG_DVS_CMD:
426 *outReg = mmMC_SEQ_PMG_DVS_CMD_LP;
427 break;
428
429 case mmMC_SEQ_PMG_DVS_CTL:
430 *outReg = mmMC_SEQ_PMG_DVS_CTL_LP;
431 break;
432
433 case mmMC_SEQ_RD_CTL_D0:
434 *outReg = mmMC_SEQ_RD_CTL_D0_LP;
435 break;
436
437 case mmMC_SEQ_RD_CTL_D1:
438 *outReg = mmMC_SEQ_RD_CTL_D1_LP;
439 break;
440
441 case mmMC_SEQ_WR_CTL_D0:
442 *outReg = mmMC_SEQ_WR_CTL_D0_LP;
443 break;
444
445 case mmMC_SEQ_WR_CTL_D1:
446 *outReg = mmMC_SEQ_WR_CTL_D1_LP;
447 break;
448
449 case mmMC_PMG_CMD_EMRS:
450 *outReg = mmMC_SEQ_PMG_CMD_EMRS_LP;
451 break;
452
453 case mmMC_PMG_CMD_MRS:
454 *outReg = mmMC_SEQ_PMG_CMD_MRS_LP;
455 break;
456
457 case mmMC_PMG_CMD_MRS1:
458 *outReg = mmMC_SEQ_PMG_CMD_MRS1_LP;
459 break;
460
461 case mmMC_SEQ_PMG_TIMING:
462 *outReg = mmMC_SEQ_PMG_TIMING_LP;
463 break;
464
465 case mmMC_PMG_CMD_MRS2:
466 *outReg = mmMC_SEQ_PMG_CMD_MRS2_LP;
467 break;
468
469 case mmMC_SEQ_WR_CTL_2:
470 *outReg = mmMC_SEQ_WR_CTL_2_LP;
471 break;
472
473 default:
474 result = false;
475 break;
476 }
477
478 return result;
479}
480
481int iceland_set_s0_mc_reg_index(phw_iceland_mc_reg_table *table)
482{
483 uint32_t i;
484 uint16_t address;
485
486 for (i = 0; i < table->last; i++) {
487 table->mc_reg_address[i].s0 =
488 iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
489 ? address : table->mc_reg_address[i].s1;
490 }
491 return 0;
492}
493
494int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, phw_iceland_mc_reg_table *ni_table)
495{
496 uint8_t i, j;
497
498 PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
499 "Invalid VramInfo table.", return -1);
500 PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
501 "Invalid VramInfo table.", return -1);
502
503 for (i = 0; i < table->last; i++) {
504 ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
505 }
506 ni_table->last = table->last;
507
508 for (i = 0; i < table->num_entries; i++) {
509 ni_table->mc_reg_table_entry[i].mclk_max =
510 table->mc_reg_table_entry[i].mclk_max;
511 for (j = 0; j < table->last; j++) {
512 ni_table->mc_reg_table_entry[i].mc_data[j] =
513 table->mc_reg_table_entry[i].mc_data[j];
514 }
515 }
516
517 ni_table->num_entries = table->num_entries;
518
519 return 0;
520}
521
522/**
523 * VBIOS omits some information to reduce size, we need to recover them here.
524 * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0].
525 * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
526 * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0].
527 * 3. need to set these data for each clock range
528 *
529 * @param hwmgr the address of the powerplay hardware manager.
530 * @param table the address of MCRegTable
531 * @return always 0
532 */
533static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr, phw_iceland_mc_reg_table *table)
534{
535 uint8_t i, j, k;
536 uint32_t temp_reg;
537 const iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
538
539 for (i = 0, j = table->last; i < table->last; i++) {
540 PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
541 "Invalid VramInfo table.", return -1);
542 switch (table->mc_reg_address[i].s1) {
543 /*
544 * mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write
545 * to mmMC_PMG_CMD_EMRS/_LP[15:0]. Bit[15:0] MRS, need
546 * to be update mmMC_PMG_CMD_MRS/_LP[15:0]
547 */
548 case mmMC_SEQ_MISC1:
549 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
550 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
551 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
552 for (k = 0; k < table->num_entries; k++) {
553 table->mc_reg_table_entry[k].mc_data[j] =
554 ((temp_reg & 0xffff0000)) |
555 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
556 }
557 j++;
558 PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
559 "Invalid VramInfo table.", return -1);
560
561 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
562 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
563 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
564 for (k = 0; k < table->num_entries; k++) {
565 table->mc_reg_table_entry[k].mc_data[j] =
566 (temp_reg & 0xffff0000) |
567 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
568
569 if (!data->is_memory_GDDR5) {
570 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
571 }
572 }
573 j++;
574 PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
575 "Invalid VramInfo table.", return -1);
576
577 if (!data->is_memory_GDDR5) {
578 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
579 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
580 for (k = 0; k < table->num_entries; k++) {
581 table->mc_reg_table_entry[k].mc_data[j] =
582 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
583 }
584 j++;
585 PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
586 "Invalid VramInfo table.", return -1);
587 }
588
589 break;
590
591 case mmMC_SEQ_RESERVE_M:
592 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
593 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
594 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
595 for (k = 0; k < table->num_entries; k++) {
596 table->mc_reg_table_entry[k].mc_data[j] =
597 (temp_reg & 0xffff0000) |
598 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
599 }
600 j++;
601 PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
602 "Invalid VramInfo table.", return -1);
603 break;
604
605 default:
606 break;
607 }
608
609 }
610
611 table->last = j;
612
613 return 0;
614}
615
616
617static int iceland_set_valid_flag(phw_iceland_mc_reg_table *table)
618{
619 uint8_t i, j;
620 for (i = 0; i < table->last; i++) {
621 for (j = 1; j < table->num_entries; j++) {
622 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
623 table->mc_reg_table_entry[j].mc_data[i]) {
624 table->validflag |= (1<<i);
625 break;
626 }
627 }
628 }
629
630 return 0;
631}
632
633static int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
634{
635 int result;
636 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
637 pp_atomctrl_mc_reg_table *table;
638 phw_iceland_mc_reg_table *ni_table = &data->iceland_mc_reg_table;
639 uint8_t module_index = iceland_get_memory_module_index(hwmgr);
640
641 table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
642
643 if (NULL == table)
644 return -ENOMEM;
645
646 /* Program additional LP registers that are no longer programmed by VBIOS */
647 cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
648 cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
649 cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
650 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
651 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
652 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
653 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
654 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
655 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
656 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
657 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
658 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
659 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
660 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
661 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
662 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
663 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
664 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
665 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
666 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
667
668 memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
669
670 result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
671
672 if (0 == result)
673 result = iceland_copy_vbios_smc_reg_table(table, ni_table);
674
675 if (0 == result) {
676 iceland_set_s0_mc_reg_index(ni_table);
677 result = iceland_set_mc_special_registers(hwmgr, ni_table);
678 }
679
680 if (0 == result)
681 iceland_set_valid_flag(ni_table);
682
683 kfree(table);
684 return result;
685}
686
687/**
688 * Programs static screed detection parameters
689 *
690 * @param hwmgr the address of the powerplay hardware manager.
691 * @return always 0
692 */
693int iceland_program_static_screen_threshold_parameters(struct pp_hwmgr *hwmgr)
694{
695 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
696
697 /* Set static screen threshold unit*/
698 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
699 CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
700 data->static_screen_threshold_unit);
701 /* Set static screen threshold*/
702 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
703 CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
704 data->static_screen_threshold);
705
706 return 0;
707}
708
709/**
710 * Setup display gap for glitch free memory clock switching.
711 *
712 * @param hwmgr the address of the powerplay hardware manager.
713 * @return always 0
714 */
715int iceland_enable_display_gap(struct pp_hwmgr *hwmgr)
716{
717 uint32_t display_gap = cgs_read_ind_register(hwmgr->device,
718 CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
719
720 display_gap = PHM_SET_FIELD(display_gap,
721 CG_DISPLAY_GAP_CNTL, DISP_GAP, DISPLAY_GAP_IGNORE);
722
723 display_gap = PHM_SET_FIELD(display_gap,
724 CG_DISPLAY_GAP_CNTL, DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
725
726 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
727 ixCG_DISPLAY_GAP_CNTL, display_gap);
728
729 return 0;
730}
731
732/**
733 * Programs activity state transition voting clients
734 *
735 * @param hwmgr the address of the powerplay hardware manager.
736 * @return always 0
737 */
738int iceland_program_voting_clients(struct pp_hwmgr *hwmgr)
739{
740 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
741
742 /* Clear reset for voting clients before enabling DPM */
743 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
744 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
745 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
746 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
747
748 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
749 ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
750 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
751 ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
752 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
753 ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
754 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
755 ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
756 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
757 ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
758 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
759 ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
760 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
761 ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
762 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
763 ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
764
765 return 0;
766}
767
768static int iceland_upload_firmware(struct pp_hwmgr *hwmgr)
769{
770 int ret = 0;
771
772 if (!iceland_is_smc_ram_running(hwmgr->smumgr))
773 ret = iceland_smu_upload_firmware_image(hwmgr->smumgr);
774
775 return ret;
776}
777
778/**
779 * Get the location of various tables inside the FW image.
780 *
781 * @param hwmgr the address of the powerplay hardware manager.
782 * @return always 0
783 */
784static int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
785{
786 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
787
788 uint32_t tmp;
789 int result;
790 bool error = 0;
791
792 result = iceland_read_smc_sram_dword(hwmgr->smumgr,
793 SMU71_FIRMWARE_HEADER_LOCATION +
794 offsetof(SMU71_Firmware_Header, DpmTable),
795 &tmp, data->sram_end);
796
797 if (0 == result) {
798 data->dpm_table_start = tmp;
799 }
800
801 error |= (0 != result);
802
803 result = iceland_read_smc_sram_dword(hwmgr->smumgr,
804 SMU71_FIRMWARE_HEADER_LOCATION +
805 offsetof(SMU71_Firmware_Header, SoftRegisters),
806 &tmp, data->sram_end);
807
808 if (0 == result) {
809 data->soft_regs_start = tmp;
810 }
811
812 error |= (0 != result);
813
814
815 result = iceland_read_smc_sram_dword(hwmgr->smumgr,
816 SMU71_FIRMWARE_HEADER_LOCATION +
817 offsetof(SMU71_Firmware_Header, mcRegisterTable),
818 &tmp, data->sram_end);
819
820 if (0 == result) {
821 data->mc_reg_table_start = tmp;
822 }
823
824 result = iceland_read_smc_sram_dword(hwmgr->smumgr,
825 SMU71_FIRMWARE_HEADER_LOCATION +
826 offsetof(SMU71_Firmware_Header, FanTable),
827 &tmp, data->sram_end);
828
829 if (0 == result) {
830 data->fan_table_start = tmp;
831 }
832
833 error |= (0 != result);
834
835 result = iceland_read_smc_sram_dword(hwmgr->smumgr,
836 SMU71_FIRMWARE_HEADER_LOCATION +
837 offsetof(SMU71_Firmware_Header, mcArbDramTimingTable),
838 &tmp, data->sram_end);
839
840 if (0 == result) {
841 data->arb_table_start = tmp;
842 }
843
844 error |= (0 != result);
845
846
847 result = iceland_read_smc_sram_dword(hwmgr->smumgr,
848 SMU71_FIRMWARE_HEADER_LOCATION +
849 offsetof(SMU71_Firmware_Header, Version),
850 &tmp, data->sram_end);
851
852 if (0 == result) {
853 hwmgr->microcode_version_info.SMC = tmp;
854 }
855
856 error |= (0 != result);
857
858 result = iceland_read_smc_sram_dword(hwmgr->smumgr,
859 SMU71_FIRMWARE_HEADER_LOCATION +
860 offsetof(SMU71_Firmware_Header, UlvSettings),
861 &tmp, data->sram_end);
862
863 if (0 == result) {
864 data->ulv_settings_start = tmp;
865 }
866
867 error |= (0 != result);
868
869 return error ? 1 : 0;
870}
871
872/*
873* Copy one arb setting to another and then switch the active set.
874* arbFreqSrc and arbFreqDest is one of the MC_CG_ARB_FREQ_Fx constants.
875*/
876int iceland_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
877 uint32_t arbFreqSrc, uint32_t arbFreqDest)
878{
879 uint32_t mc_arb_dram_timing;
880 uint32_t mc_arb_dram_timing2;
881 uint32_t burst_time;
882 uint32_t mc_cg_config;
883
884 switch (arbFreqSrc) {
885 case MC_CG_ARB_FREQ_F0:
886 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
887 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
888 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
889 break;
890
891 case MC_CG_ARB_FREQ_F1:
892 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
893 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
894 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
895 break;
896
897 default:
898 return -1;
899 }
900
901 switch (arbFreqDest) {
902 case MC_CG_ARB_FREQ_F0:
903 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
904 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
905 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
906 break;
907
908 case MC_CG_ARB_FREQ_F1:
909 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
910 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
911 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
912 break;
913
914 default:
915 return -1;
916 }
917
918 mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
919 mc_cg_config |= 0x0000000F;
920 cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
921 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arbFreqDest);
922
923 return 0;
924}
925
926/**
927 * Initial switch from ARB F0->F1
928 *
929 * @param hwmgr the address of the powerplay hardware manager.
930 * @return always 0
931 * This function is to be called from the SetPowerState table.
932 */
933int iceland_initial_switch_from_arb_f0_to_f1(struct pp_hwmgr *hwmgr)
934{
935 return iceland_copy_and_switch_arb_sets(hwmgr, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
936}
937
938/* ---------------------------------------- ULV related functions ----------------------------------------------------*/
939
940
941static int iceland_reset_single_dpm_table(
942 struct pp_hwmgr *hwmgr,
943 struct iceland_single_dpm_table *dpm_table,
944 uint32_t count)
945{
946 uint32_t i;
947 if (!(count <= MAX_REGULAR_DPM_NUMBER))
948 printk(KERN_ERR "[ powerplay ] Fatal error, can not set up single DPM \
949 table entries to exceed max number! \n");
950
951 dpm_table->count = count;
952 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) {
953 dpm_table->dpm_levels[i].enabled = 0;
954 }
955
956 return 0;
957}
958
959static void iceland_setup_pcie_table_entry(
960 struct iceland_single_dpm_table *dpm_table,
961 uint32_t index, uint32_t pcie_gen,
962 uint32_t pcie_lanes)
963{
964 dpm_table->dpm_levels[index].value = pcie_gen;
965 dpm_table->dpm_levels[index].param1 = pcie_lanes;
966 dpm_table->dpm_levels[index].enabled = 1;
967}
968
969/*
970 * Set up the PCIe DPM table as follows:
971 *
972 * A = Performance State, Max, Gen Speed
973 * C = Performance State, Min, Gen Speed
974 * 1 = Performance State, Max, Lane #
975 * 3 = Performance State, Min, Lane #
976 *
977 * B = Power Saving State, Max, Gen Speed
978 * D = Power Saving State, Min, Gen Speed
979 * 2 = Power Saving State, Max, Lane #
980 * 4 = Power Saving State, Min, Lane #
981 *
982 *
983 * DPM Index Gen Speed Lane #
984 * 5 A 1
985 * 4 B 2
986 * 3 C 1
987 * 2 D 2
988 * 1 C 3
989 * 0 D 4
990 *
991 */
992static int iceland_setup_default_pcie_tables(struct pp_hwmgr *hwmgr)
993{
994 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
995
996 PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
997 data->use_pcie_power_saving_levels),
998 "No pcie performance levels!", return -EINVAL);
999
1000 if (data->use_pcie_performance_levels && !data->use_pcie_power_saving_levels) {
1001 data->pcie_gen_power_saving = data->pcie_gen_performance;
1002 data->pcie_lane_power_saving = data->pcie_lane_performance;
1003 } else if (!data->use_pcie_performance_levels && data->use_pcie_power_saving_levels) {
1004 data->pcie_gen_performance = data->pcie_gen_power_saving;
1005 data->pcie_lane_performance = data->pcie_lane_power_saving;
1006 }
1007
1008 iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.pcie_speed_table, SMU71_MAX_LEVELS_LINK);
1009
1010 /* Hardcode Pcie Table */
1011 iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
1012 get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
1013 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
1014 iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
1015 get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
1016 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
1017 iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
1018 get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
1019 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
1020 iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
1021 get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
1022 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
1023 iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
1024 get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
1025 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
1026 iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
1027 get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
1028 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
1029 data->dpm_table.pcie_speed_table.count = 6;
1030
1031 return 0;
1032
1033}
1034
1035
1036/*
1037 * This function is to initalize all DPM state tables for SMU7 based on the dependency table.
1038 * Dynamic state patching function will then trim these state tables to the allowed range based
1039 * on the power policy or external client requests, such as UVD request, etc.
1040 */
1041static int iceland_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1042{
1043 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
1044 uint32_t i;
1045
1046 struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
1047 hwmgr->dyn_state.vddc_dependency_on_sclk;
1048 struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
1049 hwmgr->dyn_state.vddc_dependency_on_mclk;
1050 struct phm_cac_leakage_table *std_voltage_table =
1051 hwmgr->dyn_state.cac_leakage_table;
1052
1053 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
1054 "SCLK dependency table is missing. This table is mandatory", return -1);
1055 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
1056 "SCLK dependency table has to have is missing. This table is mandatory", return -1);
1057
1058 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
1059 "MCLK dependency table is missing. This table is mandatory", return -1);
1060 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
1061 "VMCLK dependency table has to have is missing. This table is mandatory", return -1);
1062
1063 /* clear the state table to reset everything to default */
1064 memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
1065 iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.sclk_table, SMU71_MAX_LEVELS_GRAPHICS);
1066 iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.mclk_table, SMU71_MAX_LEVELS_MEMORY);
1067 iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.vddc_table, SMU71_MAX_LEVELS_VDDC);
1068 iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.vdd_ci_table, SMU71_MAX_LEVELS_VDDCI);
1069 iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.mvdd_table, SMU71_MAX_LEVELS_MVDD);
1070
1071 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
1072 "SCLK dependency table is missing. This table is mandatory", return -1);
1073 /* Initialize Sclk DPM table based on allow Sclk values*/
1074 data->dpm_table.sclk_table.count = 0;
1075
1076 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
1077 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
1078 allowed_vdd_sclk_table->entries[i].clk) {
1079 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
1080 allowed_vdd_sclk_table->entries[i].clk;
1081 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */
1082 data->dpm_table.sclk_table.count++;
1083 }
1084 }
1085
1086 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
1087 "MCLK dependency table is missing. This table is mandatory", return -1);
1088 /* Initialize Mclk DPM table based on allow Mclk values */
1089 data->dpm_table.mclk_table.count = 0;
1090 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
1091 if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
1092 allowed_vdd_mclk_table->entries[i].clk) {
1093 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
1094 allowed_vdd_mclk_table->entries[i].clk;
1095 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */
1096 data->dpm_table.mclk_table.count++;
1097 }
1098 }
1099
1100 /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */
1101 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
1102 data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
1103 data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
1104 /* param1 is for corresponding std voltage */
1105 data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
1106 }
1107
1108 data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
1109 allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
1110
1111 if (NULL != allowed_vdd_mclk_table) {
1112 /* Initialize Vddci DPM table based on allow Mclk values */
1113 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
1114 data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
1115 data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1;
1116 }
1117 data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count;
1118 }
1119
1120 allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
1121
1122 if (NULL != allowed_vdd_mclk_table) {
1123 /*
1124 * Initialize MVDD DPM table based on allow Mclk
1125 * values
1126 */
1127 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
1128 data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
1129 data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
1130 }
1131 data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
1132 }
1133
1134 /* setup PCIE gen speed levels*/
1135 iceland_setup_default_pcie_tables(hwmgr);
1136
1137 /* save a copy of the default DPM table*/
1138 memcpy(&(data->golden_dpm_table), &(data->dpm_table), sizeof(struct iceland_dpm_table));
1139
1140 return 0;
1141}
1142
1143/**
1144 * @brief PhwIceland_GetVoltageOrder
1145 * Returns index of requested voltage record in lookup(table)
1146 * @param hwmgr - pointer to hardware manager
1147 * @param lookutab - lookup list to search in
1148 * @param voltage - voltage to look for
1149 * @return 0 on success
1150 */
1151uint8_t iceland_get_voltage_index(phm_ppt_v1_voltage_lookup_table *look_up_table,
1152 uint16_t voltage)
1153{
1154 uint8_t count = (uint8_t) (look_up_table->count);
1155 uint8_t i;
1156
1157 PP_ASSERT_WITH_CODE((NULL != look_up_table), "Lookup Table empty.", return 0;);
1158 PP_ASSERT_WITH_CODE((0 != count), "Lookup Table empty.", return 0;);
1159
1160 for (i = 0; i < count; i++) {
1161 /* find first voltage equal or bigger than requested */
1162 if (look_up_table->entries[i].us_vdd >= voltage)
1163 return i;
1164 }
1165
1166 /* voltage is bigger than max voltage in the table */
1167 return i-1;
1168}
1169
1170
1171static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
1172 pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
1173 uint16_t *lo)
1174{
1175 uint16_t v_index;
1176 bool vol_found = false;
1177 *hi = tab->value * VOLTAGE_SCALE;
1178 *lo = tab->value * VOLTAGE_SCALE;
1179
1180 /* SCLK/VDDC Dependency Table has to exist. */
1181 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
1182 "The SCLK/VDDC Dependency Table does not exist.\n",
1183 return -EINVAL);
1184
1185 if (NULL == hwmgr->dyn_state.cac_leakage_table) {
1186 pr_warning("CAC Leakage Table does not exist, using vddc.\n");
1187 return 0;
1188 }
1189
1190 /*
1191 * Since voltage in the sclk/vddc dependency table is not
1192 * necessarily in ascending order because of ELB voltage
1193 * patching, loop through entire list to find exact voltage.
1194 */
1195 for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
1196 if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
1197 vol_found = true;
1198 if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
1199 *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
1200 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
1201 } else {
1202 pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
1203 *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
1204 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
1205 }
1206 break;
1207 }
1208 }
1209
1210 /*
1211 * If voltage is not found in the first pass, loop again to
1212 * find the best match, equal or higher value.
1213 */
1214 if (!vol_found) {
1215 for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
1216 if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
1217 vol_found = true;
1218 if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
1219 *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
1220 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
1221 } else {
1222 pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
1223 *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
1224 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
1225 }
1226 break;
1227 }
1228 }
1229
1230 if (!vol_found)
1231 pr_warning("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
1232 }
1233
1234 return 0;
1235}
1236
1237static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
1238 pp_atomctrl_voltage_table_entry *tab,
1239 SMU71_Discrete_VoltageLevel *smc_voltage_tab) {
1240 int result;
1241
1242
1243 result = iceland_get_std_voltage_value_sidd(hwmgr, tab,
1244 &smc_voltage_tab->StdVoltageHiSidd,
1245 &smc_voltage_tab->StdVoltageLoSidd);
1246 if (0 != result) {
1247 smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
1248 smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
1249 }
1250
1251 smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
1252 CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
1253 CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
1254
1255 return 0;
1256}
1257
1258/**
1259 * Vddc table preparation for SMC.
1260 *
1261 * @param hwmgr the address of the hardware manager
1262 * @param table the SMC DPM table structure to be populated
1263 * @return always 0
1264 */
1265static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
1266 SMU71_Discrete_DpmTable *table)
1267{
1268 unsigned int count;
1269 int result;
1270
1271 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
1272
1273 table->VddcLevelCount = data->vddc_voltage_table.count;
1274 for (count = 0; count < table->VddcLevelCount; count++) {
1275 result = iceland_populate_smc_voltage_table(hwmgr,
1276 &data->vddc_voltage_table.entries[count],
1277 &table->VddcLevel[count]);
1278 PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
1279
1280 /* GPIO voltage control */
1281 if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
1282 table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
1283 else if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
1284 table->VddcLevel[count].Smio = 0;
1285 }
1286
1287 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
1288
1289 return 0;
1290}
1291
1292/**
1293 * Vddci table preparation for SMC.
1294 *
1295 * @param *hwmgr The address of the hardware manager.
1296 * @param *table The SMC DPM table structure to be populated.
1297 * @return 0
1298 */
1299static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
1300 SMU71_Discrete_DpmTable *table)
1301{
1302 int result;
1303 uint32_t count;
1304 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
1305
1306 table->VddciLevelCount = data->vddci_voltage_table.count;
1307 for (count = 0; count < table->VddciLevelCount; count++) {
1308 result = iceland_populate_smc_voltage_table(hwmgr,
1309 &data->vddci_voltage_table.entries[count],
1310 &table->VddciLevel[count]);
1311 PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDCI voltage table", return -EINVAL);
1312
1313 /* GPIO voltage control */
1314 if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control)
1315 table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low;
1316 else
1317 table->VddciLevel[count].Smio = 0;
1318 }
1319
1320 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
1321
1322 return 0;
1323}
1324
1325/**
1326 * Mvdd table preparation for SMC.
1327 *
1328 * @param *hwmgr The address of the hardware manager.
1329 * @param *table The SMC DPM table structure to be populated.
1330 * @return 0
1331 */
1332static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
1333 SMU71_Discrete_DpmTable *table)
1334{
1335 int result;
1336 uint32_t count;
1337 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
1338
1339 table->MvddLevelCount = data->mvdd_voltage_table.count;
1340 for (count = 0; count < table->MvddLevelCount; count++) {
1341 result = iceland_populate_smc_voltage_table(hwmgr,
1342 &data->mvdd_voltage_table.entries[count],
1343 &table->MvddLevel[count]);
1344 PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDCI voltage table", return -EINVAL);
1345
1346 /* GPIO voltage control */
1347 if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control)
1348 table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low;
1349 else
1350 table->MvddLevel[count].Smio = 0;
1351 }
1352
1353 CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
1354
1355 return 0;
1356}
1357
1358int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
1359{
1360 int i;
1361 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
1362 uint8_t * hi_vid = data->power_tune_table.BapmVddCVidHiSidd;
1363 uint8_t * lo_vid = data->power_tune_table.BapmVddCVidLoSidd;
1364
1365 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
1366 "The CAC Leakage table does not exist!", return -EINVAL);
1367 PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
1368 "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
1369 PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
1370 "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
1371
1372 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
1373 for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
1374 lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
1375 hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
1376 }
1377 } else {
1378 PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL);
1379 }
1380
1381 return 0;
1382}
1383
1384int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr)
1385{
1386 int i;
1387 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
1388 uint8_t *vid = data->power_tune_table.VddCVid;
1389
1390 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
1391 "There should never be more than 8 entries for VddcVid!!!",
1392 return -EINVAL);
1393
1394 for (i = 0; i < (int)data->vddc_voltage_table.count; i++) {
1395 vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
1396 }
1397
1398 return 0;
1399}
1400
1401/**
1402 * Preparation of voltage tables for SMC.
1403 *
1404 * @param hwmgr the address of the hardware manager
1405 * @param table the SMC DPM table structure to be populated
1406 * @return always 0
1407 */
1408
1409int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
1410 SMU71_Discrete_DpmTable *table)
1411{
1412 int result;
1413
1414 result = iceland_populate_smc_vddc_table(hwmgr, table);
1415 PP_ASSERT_WITH_CODE(0 == result,
1416 "can not populate VDDC voltage table to SMC", return -1);
1417
1418 result = iceland_populate_smc_vdd_ci_table(hwmgr, table);
1419 PP_ASSERT_WITH_CODE(0 == result,
1420 "can not populate VDDCI voltage table to SMC", return -1);
1421
1422 result = iceland_populate_smc_mvdd_table(hwmgr, table);
1423 PP_ASSERT_WITH_CODE(0 == result,
1424 "can not populate MVDD voltage table to SMC", return -1);
1425
1426 return 0;
1427}
1428
1429
1430/**
1431 * Re-generate the DPM level mask value
1432 * @param hwmgr the address of the hardware manager
1433 */
1434static uint32_t iceland_get_dpm_level_enable_mask_value(
1435 struct iceland_single_dpm_table * dpm_table)
1436{
1437 uint32_t i;
1438 uint32_t mask_value = 0;
1439
1440 for (i = dpm_table->count; i > 0; i--) {
1441 mask_value = mask_value << 1;
1442
1443 if (dpm_table->dpm_levels[i-1].enabled)
1444 mask_value |= 0x1;
1445 else
1446 mask_value &= 0xFFFFFFFE;
1447 }
1448 return mask_value;
1449}
1450
1451int iceland_populate_memory_timing_parameters(
1452 struct pp_hwmgr *hwmgr,
1453 uint32_t engine_clock,
1454 uint32_t memory_clock,
1455 struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs
1456 )
1457{
1458 uint32_t dramTiming;
1459 uint32_t dramTiming2;
1460 uint32_t burstTime;
1461 int result;
1462
1463 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1464 engine_clock, memory_clock);
1465
1466 PP_ASSERT_WITH_CODE(result == 0,
1467 "Error calling VBIOS to set DRAM_TIMING.", return result);
1468
1469 dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1470 dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1471 burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1472
1473 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
1474 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
1475 arb_regs->McArbBurstTime = (uint8_t)burstTime;
1476
1477 return 0;
1478}
1479
1480/**
1481 * Setup parameters for the MC ARB.
1482 *
1483 * @param hwmgr the address of the powerplay hardware manager.
1484 * @return always 0
1485 * This function is to be called from the SetPowerState table.
1486 */
1487int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1488{
1489 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
1490 int result = 0;
1491 SMU71_Discrete_MCArbDramTimingTable arb_regs;
1492 uint32_t i, j;
1493
1494 memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable));
1495
1496 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1497 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1498 result = iceland_populate_memory_timing_parameters
1499 (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
1500 data->dpm_table.mclk_table.dpm_levels[j].value,
1501 &arb_regs.entries[i][j]);
1502
1503 if (0 != result) {
1504 break;
1505 }
1506 }
1507 }
1508
1509 if (0 == result) {
1510 result = iceland_copy_bytes_to_smc(
1511 hwmgr->smumgr,
1512 data->arb_table_start,
1513 (uint8_t *)&arb_regs,
1514 sizeof(SMU71_Discrete_MCArbDramTimingTable),
1515 data->sram_end
1516 );
1517 }
1518
1519 return result;
1520}
1521
1522static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table)
1523{
1524 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
1525 struct iceland_dpm_table *dpm_table = &data->dpm_table;
1526 uint32_t i;
1527
1528 /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
1529 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
1530 table->LinkLevel[i].PcieGenSpeed =
1531 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
1532 table->LinkLevel[i].PcieLaneCount =
1533 (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
1534 table->LinkLevel[i].EnabledForActivity =
1535 1;
1536 table->LinkLevel[i].SPC =
1537 (uint8_t)(data->pcie_spc_cap & 0xff);
1538 table->LinkLevel[i].DownThreshold =
1539 PP_HOST_TO_SMC_UL(5);
1540 table->LinkLevel[i].UpThreshold =
1541 PP_HOST_TO_SMC_UL(30);
1542 }
1543
1544 data->smc_state_table.LinkLevelCount =
1545 (uint8_t)dpm_table->pcie_speed_table.count;
1546 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
1547 iceland_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
1548
1549 return 0;
1550}
1551
1552static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1553 SMU71_Discrete_DpmTable *table)
1554{
1555 return 0;
1556}
1557
1558uint8_t iceland_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
1559 uint32_t voltage)
1560{
1561 uint8_t count = (uint8_t) (voltage_table->count);
1562 uint8_t i = 0;
1563
1564 PP_ASSERT_WITH_CODE((NULL != voltage_table),
1565 "Voltage Table empty.", return 0;);
1566 PP_ASSERT_WITH_CODE((0 != count),
1567 "Voltage Table empty.", return 0;);
1568
1569 for (i = 0; i < count; i++) {
1570 /* find first voltage bigger than requested */
1571 if (voltage_table->entries[i].value >= voltage)
1572 return i;
1573 }
1574
1575 /* voltage is bigger than max voltage in the table */
1576 return i - 1;
1577}
1578
1579static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1580 SMU71_Discrete_DpmTable *table)
1581{
1582 return 0;
1583}
1584
1585static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
1586 SMU71_Discrete_DpmTable *table)
1587{
1588 return 0;
1589}
1590
1591static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1592 SMU71_Discrete_DpmTable *table)
1593{
1594 return 0;
1595}
1596
1597
1598static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
1599 SMU71_Discrete_DpmTable *tab)
1600{
1601 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
1602
1603 if(ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
1604 tab->SVI2Enable |= VDDC_ON_SVI2;
1605
1606 if(ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control)
1607 tab->SVI2Enable |= VDDCI_ON_SVI2;
1608 else
1609 tab->MergedVddci = 1;
1610
1611 if(ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control)
1612 tab->SVI2Enable |= MVDD_ON_SVI2;
1613
1614 PP_ASSERT_WITH_CODE( tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) &&
1615 (tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL);
1616
1617 return 0;
1618}
1619
1620static int iceland_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
1621 struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
1622 uint32_t clock, uint32_t *vol)
1623{
1624 uint32_t i = 0;
1625
1626 /* clock - voltage dependency table is empty table */
1627 if (allowed_clock_voltage_table->count == 0)
1628 return -EINVAL;
1629
1630 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
1631 /* find first sclk bigger than request */
1632 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
1633 *vol = allowed_clock_voltage_table->entries[i].v;
1634 return 0;
1635 }
1636 }
1637
1638 /* sclk is bigger than max sclk in the dependence table */
1639 *vol = allowed_clock_voltage_table->entries[i - 1].v;
1640
1641 return 0;
1642}
1643
1644static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock,
1645 bool strobe_mode)
1646{
1647 uint8_t mc_para_index;
1648
1649 if (strobe_mode) {
1650 if (memory_clock < 12500) {
1651 mc_para_index = 0x00;
1652 } else if (memory_clock > 47500) {
1653 mc_para_index = 0x0f;
1654 } else {
1655 mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
1656 }
1657 } else {
1658 if (memory_clock < 65000) {
1659 mc_para_index = 0x00;
1660 } else if (memory_clock > 135000) {
1661 mc_para_index = 0x0f;
1662 } else {
1663 mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
1664 }
1665 }
1666
1667 return mc_para_index;
1668}
1669
1670static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
1671{
1672 uint8_t mc_para_index;
1673
1674 if (memory_clock < 10000) {
1675 mc_para_index = 0;
1676 } else if (memory_clock >= 80000) {
1677 mc_para_index = 0x0f;
1678 } else {
1679 mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
1680 }
1681
1682 return mc_para_index;
1683}
1684
1685static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
1686 uint32_t sclk, uint32_t *p_shed)
1687{
1688 unsigned int i;
1689
1690 /* use the minimum phase shedding */
1691 *p_shed = 1;
1692
1693 /*
1694 * PPGen ensures the phase shedding limits table is sorted
1695 * from lowest voltage/sclk/mclk to highest voltage/sclk/mclk.
1696 * VBIOS ensures the phase shedding masks table is sorted from
1697 * least phases enabled (phase shedding on) to most phases
1698 * enabled (phase shedding off).
1699 */
1700 for (i = 0; i < pl->count; i++) {
1701 if (sclk < pl->entries[i].Sclk) {
1702 /* Enable phase shedding */
1703 *p_shed = i;
1704 break;
1705 }
1706 }
1707
1708 return 0;
1709}
1710
1711static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
1712 uint32_t memory_clock, uint32_t *p_shed)
1713{
1714 unsigned int i;
1715
1716 /* use the minimum phase shedding */
1717 *p_shed = 1;
1718
1719 /*
1720 * PPGen ensures the phase shedding limits table is sorted
1721 * from lowest voltage/sclk/mclk to highest voltage/sclk/mclk.
1722 * VBIOS ensures the phase shedding masks table is sorted from
1723 * least phases enabled (phase shedding on) to most phases
1724 * enabled (phase shedding off).
1725 */
1726 for (i = 0; i < pl->count; i++) {
1727 if (memory_clock < pl->entries[i].Mclk) {
1728 /* Enable phase shedding */
1729 *p_shed = i;
1730 break;
1731 }
1732 }
1733
1734 return 0;
1735}
1736
1737/**
1738 * Populates the SMC MCLK structure using the provided memory clock
1739 *
1740 * @param hwmgr the address of the hardware manager
1741 * @param memory_clock the memory clock to use to populate the structure
1742 * @param sclk the SMC SCLK structure to be populated
1743 */
1744static int iceland_calculate_mclk_params(
1745 struct pp_hwmgr *hwmgr,
1746 uint32_t memory_clock,
1747 SMU71_Discrete_MemoryLevel *mclk,
1748 bool strobe_mode,
1749 bool dllStateOn
1750 )
1751{
1752 const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
1753 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1754 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1755 uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
1756 uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
1757 uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
1758 uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
1759 uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
1760 uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
1761 uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
1762
1763 pp_atomctrl_memory_clock_param mpll_param;
1764 int result;
1765
1766 result = atomctrl_get_memory_pll_dividers_si(hwmgr,
1767 memory_clock, &mpll_param, strobe_mode);
1768 PP_ASSERT_WITH_CODE(0 == result,
1769 "Error retrieving Memory Clock Parameters from VBIOS.", return result);
1770
1771 /* MPLL_FUNC_CNTL setup*/
1772 mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
1773
1774 /* MPLL_FUNC_CNTL_1 setup*/
1775 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1776 MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
1777 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1778 MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
1779 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1780 MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
1781
1782 /* MPLL_AD_FUNC_CNTL setup*/
1783 mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
1784 MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1785
1786 if (data->is_memory_GDDR5) {
1787 /* MPLL_DQ_FUNC_CNTL setup*/
1788 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
1789 MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
1790 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
1791 MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1792 }
1793
1794 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1795 PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
1796 /*
1797 ************************************
1798 Fref = Reference Frequency
1799 NF = Feedback divider ratio
1800 NR = Reference divider ratio
1801 Fnom = Nominal VCO output frequency = Fref * NF / NR
1802 Fs = Spreading Rate
1803 D = Percentage down-spread / 2
1804 Fint = Reference input frequency to PFD = Fref / NR
1805 NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
1806 CLKS = NS - 1 = ISS_STEP_NUM[11:0]
1807 NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
1808 CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
1809 *************************************
1810 */
1811 pp_atomctrl_internal_ss_info ss_info;
1812 uint32_t freq_nom;
1813 uint32_t tmp;
1814 uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
1815
1816 /* for GDDR5 for all modes and DDR3 */
1817 if (1 == mpll_param.qdr)
1818 freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
1819 else
1820 freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
1821
1822 /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
1823 tmp = (freq_nom / reference_clock);
1824 tmp = tmp * tmp;
1825
1826 if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
1827 /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
1828 /* ss.Info.speed_spectrum_rate -- in unit of khz */
1829 /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
1830 /* = reference_clock * 5 / speed_spectrum_rate */
1831 uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
1832
1833 /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
1834 /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
1835 uint32_t clkv =
1836 (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
1837 ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
1838
1839 mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
1840 mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
1841 }
1842 }
1843
1844 /* MCLK_PWRMGT_CNTL setup */
1845 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1846 MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
1847 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1848 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
1849 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1850 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
1851
1852
1853 /* Save the result data to outpupt memory level structure */
1854 mclk->MclkFrequency = memory_clock;
1855 mclk->MpllFuncCntl = mpll_func_cntl;
1856 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
1857 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
1858 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
1859 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
1860 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
1861 mclk->DllCntl = dll_cntl;
1862 mclk->MpllSs1 = mpll_ss1;
1863 mclk->MpllSs2 = mpll_ss2;
1864
1865 return 0;
1866}
1867
1868static int iceland_populate_single_memory_level(
1869 struct pp_hwmgr *hwmgr,
1870 uint32_t memory_clock,
1871 SMU71_Discrete_MemoryLevel *memory_level
1872 )
1873{
1874 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
1875 int result = 0;
1876 bool dllStateOn;
1877 struct cgs_display_info info = {0};
1878
1879
1880 if (NULL != hwmgr->dyn_state.vddc_dependency_on_mclk) {
1881 result = iceland_get_dependecy_volt_by_clk(hwmgr,
1882 hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
1883 PP_ASSERT_WITH_CODE((0 == result),
1884 "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
1885 }
1886
1887 if (data->vdd_ci_control == ICELAND_VOLTAGE_CONTROL_NONE) {
1888 memory_level->MinVddci = memory_level->MinVddc;
1889 } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
1890 result = iceland_get_dependecy_volt_by_clk(hwmgr,
1891 hwmgr->dyn_state.vddci_dependency_on_mclk,
1892 memory_clock,
1893 &memory_level->MinVddci);
1894 PP_ASSERT_WITH_CODE((0 == result),
1895 "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
1896 }
1897
1898 if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) {
1899 result = iceland_get_dependecy_volt_by_clk(hwmgr,
1900 hwmgr->dyn_state.mvdd_dependency_on_mclk, memory_clock, &memory_level->MinMvdd);
1901 PP_ASSERT_WITH_CODE((0 == result),
1902 "can not find MinMVDD voltage value from memory MVDD voltage dependency table", return result);
1903 }
1904
1905 memory_level->MinVddcPhases = 1;
1906
1907 if (data->vddc_phase_shed_control) {
1908 iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
1909 memory_clock, &memory_level->MinVddcPhases);
1910 }
1911
1912 memory_level->EnabledForThrottle = 1;
1913 memory_level->EnabledForActivity = 1;
1914 memory_level->UpHyst = 0;
1915 memory_level->DownHyst = 100;
1916 memory_level->VoltageDownHyst = 0;
1917
1918 /* Indicates maximum activity level for this performance level.*/
1919 memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
1920 memory_level->StutterEnable = 0;
1921 memory_level->StrobeEnable = 0;
1922 memory_level->EdcReadEnable = 0;
1923 memory_level->EdcWriteEnable = 0;
1924 memory_level->RttEnable = 0;
1925
1926 /* default set to low watermark. Highest level will be set to high later.*/
1927 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1928
1929 cgs_get_active_displays_info(hwmgr->device, &info);
1930 data->display_timing.num_existing_displays = info.display_count;
1931
1932 //if ((data->mclk_stutter_mode_threshold != 0) &&
1933 // (memory_clock <= data->mclk_stutter_mode_threshold) &&
1934 // (data->is_uvd_enabled == 0)
1935 // && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
1936 // && (data->display_timing.num_existing_displays <= 2)
1937 // && (data->display_timing.num_existing_displays != 0))
1938 // memory_level->StutterEnable = 1;
1939
1940 /* decide strobe mode*/
1941 memory_level->StrobeEnable = (data->mclk_strobe_mode_threshold != 0) &&
1942 (memory_clock <= data->mclk_strobe_mode_threshold);
1943
1944 /* decide EDC mode and memory clock ratio*/
1945 if (data->is_memory_GDDR5) {
1946 memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock,
1947 memory_level->StrobeEnable);
1948
1949 if ((data->mclk_edc_enable_threshold != 0) &&
1950 (memory_clock > data->mclk_edc_enable_threshold)) {
1951 memory_level->EdcReadEnable = 1;
1952 }
1953
1954 if ((data->mclk_edc_wr_enable_threshold != 0) &&
1955 (memory_clock > data->mclk_edc_wr_enable_threshold)) {
1956 memory_level->EdcWriteEnable = 1;
1957 }
1958
1959 if (memory_level->StrobeEnable) {
1960 if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >=
1961 ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
1962 dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1963 } else {
1964 dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
1965 }
1966
1967 } else {
1968 dllStateOn = data->dll_defaule_on;
1969 }
1970 } else {
1971 memory_level->StrobeRatio =
1972 iceland_get_ddr3_mclk_frequency_ratio(memory_clock);
1973 dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1974 }
1975
1976 result = iceland_calculate_mclk_params(hwmgr,
1977 memory_clock, memory_level, memory_level->StrobeEnable, dllStateOn);
1978
1979 if (0 == result) {
1980 memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
1981 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
1982 memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
1983 memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
1984 /* MCLK frequency in units of 10KHz*/
1985 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
1986 /* Indicates maximum activity level for this performance level.*/
1987 CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
1988 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
1989 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
1990 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
1991 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
1992 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
1993 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
1994 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
1995 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
1996 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
1997 }
1998
1999 return result;
2000}
2001
2002/**
2003 * Populates the SMC MVDD structure using the provided memory clock.
2004 *
2005 * @param hwmgr the address of the hardware manager
2006 * @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
2007 * @param voltage the SMC VOLTAGE structure to be populated
2008 */
2009int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk, SMU71_Discrete_VoltageLevel *voltage)
2010{
2011 const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
2012 uint32_t i = 0;
2013
2014 if (ICELAND_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
2015 /* find mvdd value which clock is more than request */
2016 for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
2017 if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
2018 /* Always round to higher voltage. */
2019 voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
2020 break;
2021 }
2022 }
2023
2024 PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
2025 "MVDD Voltage is outside the supported range.", return -1);
2026
2027 } else {
2028 return -1;
2029 }
2030
2031 return 0;
2032}
2033
2034
2035static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
2036 SMU71_Discrete_DpmTable *table)
2037{
2038 int result = 0;
2039 const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
2040 pp_atomctrl_clock_dividers_vi dividers;
2041 SMU71_Discrete_VoltageLevel voltage_level;
2042 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
2043 uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
2044 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
2045 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
2046
2047 /* The ACPI state should not do DPM on DC (or ever).*/
2048 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2049
2050 if (data->acpi_vddc)
2051 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
2052 else
2053 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pp_table * VOLTAGE_SCALE);
2054
2055 table->ACPILevel.MinVddcPhases = (data->vddc_phase_shed_control) ? 0 : 1;
2056
2057 /* assign zero for now*/
2058 table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
2059
2060 /* get the engine clock dividers for this clock value*/
2061 result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
2062 table->ACPILevel.SclkFrequency, &dividers);
2063
2064 PP_ASSERT_WITH_CODE(result == 0,
2065 "Error retrieving Engine Clock dividers from VBIOS.", return result);
2066
2067 /* divider ID for required SCLK*/
2068 table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
2069 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2070 table->ACPILevel.DeepSleepDivId = 0;
2071
2072 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
2073 CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0);
2074 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
2075 CG_SPLL_FUNC_CNTL, SPLL_RESET, 1);
2076 spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2,
2077 CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4);
2078
2079 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2080 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2081 table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
2082 table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
2083 table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
2084 table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
2085 table->ACPILevel.CcPwrDynRm = 0;
2086 table->ACPILevel.CcPwrDynRm1 = 0;
2087
2088
2089 /* For various features to be enabled/disabled while this level is active.*/
2090 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
2091 /* SCLK frequency in units of 10KHz*/
2092 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
2093 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
2094 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
2095 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
2096 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
2097 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
2098 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
2099 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
2100 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
2101
2102 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
2103 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
2104
2105 /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
2106
2107 if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level))
2108 table->MemoryACPILevel.MinMvdd =
2109 PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
2110 else
2111 table->MemoryACPILevel.MinMvdd = 0;
2112
2113 /* Force reset on DLL*/
2114 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
2115 MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
2116 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
2117 MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
2118
2119 /* Disable DLL in ACPIState*/
2120 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
2121 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
2122 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
2123 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
2124
2125 /* Enable DLL bypass signal*/
2126 dll_cntl = PHM_SET_FIELD(dll_cntl,
2127 DLL_CNTL, MRDCK0_BYPASS, 0);
2128 dll_cntl = PHM_SET_FIELD(dll_cntl,
2129 DLL_CNTL, MRDCK1_BYPASS, 0);
2130
2131 table->MemoryACPILevel.DllCntl =
2132 PP_HOST_TO_SMC_UL(dll_cntl);
2133 table->MemoryACPILevel.MclkPwrmgtCntl =
2134 PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
2135 table->MemoryACPILevel.MpllAdFuncCntl =
2136 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
2137 table->MemoryACPILevel.MpllDqFuncCntl =
2138 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
2139 table->MemoryACPILevel.MpllFuncCntl =
2140 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
2141 table->MemoryACPILevel.MpllFuncCntl_1 =
2142 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
2143 table->MemoryACPILevel.MpllFuncCntl_2 =
2144 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
2145 table->MemoryACPILevel.MpllSs1 =
2146 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
2147 table->MemoryACPILevel.MpllSs2 =
2148 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
2149
2150 table->MemoryACPILevel.EnabledForThrottle = 0;
2151 table->MemoryACPILevel.EnabledForActivity = 0;
2152 table->MemoryACPILevel.UpHyst = 0;
2153 table->MemoryACPILevel.DownHyst = 100;
2154 table->MemoryACPILevel.VoltageDownHyst = 0;
2155 /* Indicates maximum activity level for this performance level.*/
2156 table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
2157
2158 table->MemoryACPILevel.StutterEnable = 0;
2159 table->MemoryACPILevel.StrobeEnable = 0;
2160 table->MemoryACPILevel.EdcReadEnable = 0;
2161 table->MemoryACPILevel.EdcWriteEnable = 0;
2162 table->MemoryACPILevel.RttEnable = 0;
2163
2164 return result;
2165}
2166
2167static int iceland_find_boot_level(struct iceland_single_dpm_table *table, uint32_t value, uint32_t *boot_level)
2168{
2169 int result = 0;
2170 uint32_t i;
2171
2172 for (i = 0; i < table->count; i++) {
2173 if (value == table->dpm_levels[i].value) {
2174 *boot_level = i;
2175 result = 0;
2176 }
2177 }
2178 return result;
2179}
2180
2181/**
2182 * Calculates the SCLK dividers using the provided engine clock
2183 *
2184 * @param hwmgr the address of the hardware manager
2185 * @param engine_clock the engine clock to use to populate the structure
2186 * @param sclk the SMC SCLK structure to be populated
2187 */
2188int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr,
2189 uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk)
2190{
2191 const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
2192 pp_atomctrl_clock_dividers_vi dividers;
2193 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
2194 uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
2195 uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
2196 uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
2197 uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
2198 uint32_t reference_clock;
2199 uint32_t reference_divider;
2200 uint32_t fbdiv;
2201 int result;
2202
2203 /* get the engine clock dividers for this clock value*/
2204 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
2205
2206 PP_ASSERT_WITH_CODE(result == 0,
2207 "Error retrieving Engine Clock dividers from VBIOS.", return result);
2208
2209 /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
2210 reference_clock = atomctrl_get_reference_clock(hwmgr);
2211
2212 reference_divider = 1 + dividers.uc_pll_ref_div;
2213
2214 /* low 14 bits is fraction and high 12 bits is divider*/
2215 fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
2216
2217 /* SPLL_FUNC_CNTL setup*/
2218 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
2219 CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
2220 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
2221 CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
2222
2223 /* SPLL_FUNC_CNTL_3 setup*/
2224 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
2225 CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
2226
2227 /* set to use fractional accumulation*/
2228 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
2229 CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
2230
2231 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2232 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
2233 pp_atomctrl_internal_ss_info ss_info;
2234
2235 uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
2236 if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
2237 /*
2238 * ss_info.speed_spectrum_percentage -- in unit of 0.01%
2239 * ss_info.speed_spectrum_rate -- in unit of khz
2240 */
2241 /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
2242 uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
2243
2244 /* clkv = 2 * D * fbdiv / NS */
2245 uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
2246
2247 cg_spll_spread_spectrum =
2248 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
2249 cg_spll_spread_spectrum =
2250 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
2251 cg_spll_spread_spectrum_2 =
2252 PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
2253 }
2254 }
2255
2256 sclk->SclkFrequency = engine_clock;
2257 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
2258 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
2259 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
2260 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
2261 sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
2262
2263 return 0;
2264}
2265
2266static uint8_t iceland_get_sleep_divider_id_from_clock(struct pp_hwmgr *hwmgr,
2267 uint32_t engine_clock, uint32_t min_engine_clock_in_sr)
2268{
2269 uint32_t i, temp;
2270 uint32_t min = (min_engine_clock_in_sr > ICELAND_MINIMUM_ENGINE_CLOCK) ?
2271 min_engine_clock_in_sr : ICELAND_MINIMUM_ENGINE_CLOCK;
2272
2273 PP_ASSERT_WITH_CODE((engine_clock >= min),
2274 "Engine clock can't satisfy stutter requirement!", return 0);
2275
2276 for (i = ICELAND_MAX_DEEPSLEEP_DIVIDER_ID;; i--) {
2277 temp = engine_clock / (1 << i);
2278
2279 if(temp >= min || i == 0)
2280 break;
2281 }
2282 return (uint8_t)i;
2283}
2284
2285/**
2286 * Populates single SMC SCLK structure using the provided engine clock
2287 *
2288 * @param hwmgr the address of the hardware manager
2289 * @param engine_clock the engine clock to use to populate the structure
2290 * @param sclk the SMC SCLK structure to be populated
2291 */
2292static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
2293 uint32_t engine_clock, uint16_t sclk_activity_level_threshold,
2294 SMU71_Discrete_GraphicsLevel *graphic_level)
2295{
2296 int result;
2297 uint32_t threshold;
2298 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
2299
2300 result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
2301
2302
2303 /* populate graphics levels*/
2304 result = iceland_get_dependecy_volt_by_clk(hwmgr,
2305 hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock, &graphic_level->MinVddc);
2306 PP_ASSERT_WITH_CODE((0 == result),
2307 "can not find VDDC voltage value for VDDC engine clock dependency table", return result);
2308
2309 /* SCLK frequency in units of 10KHz*/
2310 graphic_level->SclkFrequency = engine_clock;
2311
2312 /*
2313 * Minimum VDDC phases required to support this level, it
2314 * should get from dependence table.
2315 */
2316 graphic_level->MinVddcPhases = 1;
2317
2318 if (data->vddc_phase_shed_control) {
2319 iceland_populate_phase_value_based_on_sclk(hwmgr,
2320 hwmgr->dyn_state.vddc_phase_shed_limits_table,
2321 engine_clock,
2322 &graphic_level->MinVddcPhases);
2323 }
2324
2325 /* Indicates maximum activity level for this performance level. 50% for now*/
2326 graphic_level->ActivityLevel = sclk_activity_level_threshold;
2327
2328 graphic_level->CcPwrDynRm = 0;
2329 graphic_level->CcPwrDynRm1 = 0;
2330 /* this level can be used if activity is high enough.*/
2331 graphic_level->EnabledForActivity = 1;
2332 /* this level can be used for throttling.*/
2333 graphic_level->EnabledForThrottle = 1;
2334 graphic_level->UpHyst = 0;
2335 graphic_level->DownHyst = 100;
2336 graphic_level->VoltageDownHyst = 0;
2337 graphic_level->PowerThrottle = 0;
2338
2339 threshold = engine_clock * data->fast_watermark_threshold / 100;
2340
2341 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2342 PHM_PlatformCaps_SclkDeepSleep)) {
2343 graphic_level->DeepSleepDivId =
2344 iceland_get_sleep_divider_id_from_clock(hwmgr, engine_clock,
2345 data->display_timing.min_clock_insr);
2346 }
2347
2348 /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
2349 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2350
2351 if (0 == result) {
2352 graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE);
2353 /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
2354 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);
2355 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
2356 CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
2357 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
2358 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
2359 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
2360 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
2361 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
2362 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
2363 }
2364
2365 return result;
2366}
2367
2368/**
2369 * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
2370 *
2371 * @param hwmgr the address of the hardware manager
2372 */
2373static int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
2374{
2375 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
2376 struct iceland_dpm_table *dpm_table = &data->dpm_table;
2377 int result = 0;
2378 uint32_t level_array_adress = data->dpm_table_start +
2379 offsetof(SMU71_Discrete_DpmTable, GraphicsLevel);
2380
2381 uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) * SMU71_MAX_LEVELS_GRAPHICS;
2382 SMU71_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel;
2383 uint32_t i;
2384 uint8_t highest_pcie_level_enabled = 0, lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0, count = 0;
2385 memset(levels, 0x00, level_array_size);
2386
2387 for (i = 0; i < dpm_table->sclk_table.count; i++) {
2388 result = iceland_populate_single_graphic_level(hwmgr,
2389 dpm_table->sclk_table.dpm_levels[i].value,
2390 (uint16_t)data->activity_target[i],
2391 &(data->smc_state_table.GraphicsLevel[i]));
2392 if (0 != result)
2393 return result;
2394
2395 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
2396 if (i > 1)
2397 data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
2398 }
2399
2400 /* set highest level watermark to high */
2401 if (dpm_table->sclk_table.count > 1)
2402 data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
2403 PPSMC_DISPLAY_WATERMARK_HIGH;
2404
2405 data->smc_state_table.GraphicsDpmLevelCount =
2406 (uint8_t)dpm_table->sclk_table.count;
2407 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
2408 iceland_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
2409
2410 while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
2411 (1 << (highest_pcie_level_enabled + 1))) != 0) {
2412 highest_pcie_level_enabled++;
2413 }
2414
2415 while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
2416 (1 << lowest_pcie_level_enabled)) == 0) {
2417 lowest_pcie_level_enabled++;
2418 }
2419
2420 while ((count < highest_pcie_level_enabled) &&
2421 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
2422 (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) {
2423 count++;
2424 }
2425
2426 mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
2427 (lowest_pcie_level_enabled + 1 + count) : highest_pcie_level_enabled;
2428
2429 /* set pcieDpmLevel to highest_pcie_level_enabled*/
2430 for (i = 2; i < dpm_table->sclk_table.count; i++) {
2431 data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
2432 }
2433
2434 /* set pcieDpmLevel to lowest_pcie_level_enabled*/
2435 data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
2436
2437 /* set pcieDpmLevel to mid_pcie_level_enabled*/
2438 data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
2439
2440 /* level count will send to smc once at init smc table and never change*/
2441 result = iceland_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end);
2442
2443 if (0 != result)
2444 return result;
2445
2446 return 0;
2447}
2448
2449/**
2450 * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
2451 *
2452 * @param hwmgr the address of the hardware manager
2453 */
2454
2455static int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
2456{
2457 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
2458 struct iceland_dpm_table *dpm_table = &data->dpm_table;
2459 int result;
2460 /* populate MCLK dpm table to SMU7 */
2461 uint32_t level_array_adress = data->dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel);
2462 uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY;
2463 SMU71_Discrete_MemoryLevel *levels = data->smc_state_table.MemoryLevel;
2464 uint32_t i;
2465
2466 memset(levels, 0x00, level_array_size);
2467
2468 for (i = 0; i < dpm_table->mclk_table.count; i++) {
2469 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
2470 "can not populate memory level as memory clock is zero", return -1);
2471 result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
2472 &(data->smc_state_table.MemoryLevel[i]));
2473 if (0 != result) {
2474 return result;
2475 }
2476 }
2477
2478 /* Only enable level 0 for now.*/
2479 data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
2480
2481 /*
2482 * in order to prevent MC activity from stutter mode to push DPM up.
2483 * the UVD change complements this by putting the MCLK in a higher state
2484 * by default such that we are not effected by up threshold or and MCLK DPM latency.
2485 */
2486 data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
2487 CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.MemoryLevel[0].ActivityLevel);
2488
2489 data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
2490 data->dpm_level_enable_mask.mclk_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
2491 /* set highest level watermark to high*/
2492 data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
2493
2494 /* level count will send to smc once at init smc table and never change*/
2495 result = iceland_copy_bytes_to_smc(hwmgr->smumgr,
2496 level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end);
2497
2498 if (0 != result) {
2499 return result;
2500 }
2501
2502 return 0;
2503}
2504
2505struct ICELAND_DLL_SPEED_SETTING
2506{
2507 uint16_t Min; /* Minimum Data Rate*/
2508 uint16_t Max; /* Maximum Data Rate*/
2509 uint32_t dll_speed; /* The desired DLL_SPEED setting*/
2510};
2511
2512static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_Ulv *pstate)
2513{
2514 int result = 0;
2515 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
2516 uint32_t voltage_response_time, ulv_voltage;
2517
2518 pstate->CcPwrDynRm = 0;
2519 pstate->CcPwrDynRm1 = 0;
2520
2521 //backbiasResponseTime is use for ULV state voltage value.
2522 result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
2523 PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
2524
2525 if(!ulv_voltage) {
2526 data->ulv.ulv_supported = false;
2527 return 0;
2528 }
2529
2530 if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 != data->voltage_control) {
2531 /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
2532 if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) {
2533 pstate->VddcOffset = 0;
2534 }
2535 else {
2536 /* used in SMIO Mode. not implemented for now. this is backup only for CI. */
2537 pstate->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
2538 }
2539 } else {
2540 /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
2541 if(ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) {
2542 pstate->VddcOffsetVid = 0;
2543 } else {
2544 /* used in SVI2 Mode */
2545 pstate->VddcOffsetVid = (uint8_t)((hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage) * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
2546 }
2547 }
2548
2549 /* used in SVI2 Mode to shed phase */
2550 pstate->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
2551
2552 if (0 == result) {
2553 CONVERT_FROM_HOST_TO_SMC_UL(pstate->CcPwrDynRm);
2554 CONVERT_FROM_HOST_TO_SMC_UL(pstate->CcPwrDynRm1);
2555 CONVERT_FROM_HOST_TO_SMC_US(pstate->VddcOffset);
2556 }
2557
2558 return result;
2559}
2560
2561static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr, SMU71_Discrete_Ulv *ulv)
2562{
2563 return iceland_populate_ulv_level(hwmgr, ulv);
2564}
2565
2566static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
2567{
2568 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
2569 uint8_t count, level;
2570
2571 count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
2572
2573 for (level = 0; level < count; level++) {
2574 if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
2575 >= data->vbios_boot_state.sclk_bootup_value) {
2576 data->smc_state_table.GraphicsBootLevel = level;
2577 break;
2578 }
2579 }
2580
2581 count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
2582
2583 for (level = 0; level < count; level++) {
2584 if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
2585 >= data->vbios_boot_state.mclk_bootup_value) {
2586 data->smc_state_table.MemoryBootLevel = level;
2587 break;
2588 }
2589 }
2590
2591 return 0;
2592}
2593
2594/**
2595 * Initializes the SMC table and uploads it
2596 *
2597 * @param hwmgr the address of the powerplay hardware manager.
2598 * @param pInput the pointer to input data (PowerState)
2599 * @return always 0
2600 */
2601static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
2602{
2603 int result;
2604 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
2605 SMU71_Discrete_DpmTable *table = &(data->smc_state_table);
2606 const struct phw_iceland_ulv_parm *ulv = &(data->ulv);
2607
2608 result = iceland_setup_default_dpm_tables(hwmgr);
2609 PP_ASSERT_WITH_CODE(0 == result,
2610 "Failed to setup default DPM tables!", return result;);
2611 memset(&(data->smc_state_table), 0x00, sizeof(data->smc_state_table));
2612
2613 if (ICELAND_VOLTAGE_CONTROL_NONE != data->voltage_control) {
2614 iceland_populate_smc_voltage_tables(hwmgr, table);
2615 }
2616
2617 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2618 PHM_PlatformCaps_AutomaticDCTransition)) {
2619 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
2620 }
2621
2622 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2623 PHM_PlatformCaps_StepVddc)) {
2624 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
2625 }
2626
2627 if (data->is_memory_GDDR5) {
2628 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
2629 }
2630
2631 if (ulv->ulv_supported) {
2632 result = iceland_populate_ulv_state(hwmgr, &data->ulv_setting);
2633 PP_ASSERT_WITH_CODE(0 == result,
2634 "Failed to initialize ULV state!", return result;);
2635
2636 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2637 ixCG_ULV_PARAMETER, ulv->ch_ulv_parameter);
2638 }
2639
2640 result = iceland_populate_smc_link_level(hwmgr, table);
2641 PP_ASSERT_WITH_CODE(0 == result,
2642 "Failed to initialize Link Level!", return result;);
2643
2644 result = iceland_populate_all_graphic_levels(hwmgr);
2645 PP_ASSERT_WITH_CODE(0 == result,
2646 "Failed to initialize Graphics Level!", return result;);
2647
2648 result = iceland_populate_all_memory_levels(hwmgr);
2649 PP_ASSERT_WITH_CODE(0 == result,
2650 "Failed to initialize Memory Level!", return result;);
2651
2652 result = iceland_populate_smc_acpi_level(hwmgr, table);
2653 PP_ASSERT_WITH_CODE(0 == result,
2654 "Failed to initialize ACPI Level!", return result;);
2655
2656 result = iceland_populate_smc_vce_level(hwmgr, table);
2657 PP_ASSERT_WITH_CODE(0 == result,
2658 "Failed to initialize VCE Level!", return result;);
2659
2660 result = iceland_populate_smc_acp_level(hwmgr, table);
2661 PP_ASSERT_WITH_CODE(0 == result,
2662 "Failed to initialize ACP Level!", return result;);
2663
2664 result = iceland_populate_smc_samu_level(hwmgr, table);
2665 PP_ASSERT_WITH_CODE(0 == result,
2666 "Failed to initialize SAMU Level!", return result;);
2667
2668 /*
2669 * Since only the initial state is completely set up at this
2670 * point (the other states are just copies of the boot state)
2671 * we only need to populate the ARB settings for the initial
2672 * state.
2673 */
2674 result = iceland_program_memory_timing_parameters(hwmgr);
2675 PP_ASSERT_WITH_CODE(0 == result,
2676 "Failed to Write ARB settings for the initial state.", return result;);
2677
2678 result = iceland_populate_smc_uvd_level(hwmgr, table);
2679 PP_ASSERT_WITH_CODE(0 == result,
2680 "Failed to initialize UVD Level!", return result;);
2681
2682 table->GraphicsBootLevel = 0;
2683 table->MemoryBootLevel = 0;
2684
2685 /* find boot level from dpm table */
2686 result = iceland_find_boot_level(&(data->dpm_table.sclk_table),
2687 data->vbios_boot_state.sclk_bootup_value,
2688 (uint32_t *)&(data->smc_state_table.GraphicsBootLevel));
2689
2690 if (result)
2691 pr_warning("VBIOS did not find boot engine clock value in dependency table.\n");
2692
2693 result = iceland_find_boot_level(&(data->dpm_table.mclk_table),
2694 data->vbios_boot_state.mclk_bootup_value,
2695 (uint32_t *)&(data->smc_state_table.MemoryBootLevel));
2696
2697 if (result)
2698 pr_warning("VBIOS did not find boot memory clock value in dependency table.\n");
2699
2700 table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
2701 if (ICELAND_VOLTAGE_CONTROL_NONE == data->vdd_ci_control) {
2702 table->BootVddci = table->BootVddc;
2703 }
2704 else {
2705 table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
2706 }
2707 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
2708
2709 result = iceland_populate_smc_initial_state(hwmgr);
2710 PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
2711
2712 result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr);
2713 PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
2714
2715 table->GraphicsVoltageChangeEnable = 1;
2716 table->GraphicsThermThrottleEnable = 1;
2717 table->GraphicsInterval = 1;
2718 table->VoltageInterval = 1;
2719 table->ThermalInterval = 1;
2720 table->TemperatureLimitHigh =
2721 (data->thermal_temp_setting.temperature_high *
2722 ICELAND_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2723 table->TemperatureLimitLow =
2724 (data->thermal_temp_setting.temperature_low *
2725 ICELAND_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2726 table->MemoryVoltageChangeEnable = 1;
2727 table->MemoryInterval = 1;
2728 table->VoltageResponseTime = 0;
2729 table->PhaseResponseTime = 0;
2730 table->MemoryThermThrottleEnable = 1;
2731 table->PCIeBootLinkLevel = 0;
2732 table->PCIeGenInterval = 1;
2733
2734 result = iceland_populate_smc_svi2_config(hwmgr, table);
2735 PP_ASSERT_WITH_CODE(0 == result,
2736 "Failed to populate SVI2 setting!", return result);
2737
2738 table->ThermGpio = 17;
2739 table->SclkStepSize = 0x4000;
2740
2741 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
2742 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
2743 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
2744 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
2745 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
2746 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
2747 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
2748 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
2749 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
2750 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
2751
2752 table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
2753 table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
2754 table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
2755
2756 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
2757 result = iceland_copy_bytes_to_smc(hwmgr->smumgr, data->dpm_table_start +
2758 offsetof(SMU71_Discrete_DpmTable, SystemFlags),
2759 (uint8_t *)&(table->SystemFlags),
2760 sizeof(SMU71_Discrete_DpmTable) - 3 * sizeof(SMU71_PIDController),
2761 data->sram_end);
2762
2763 PP_ASSERT_WITH_CODE(0 == result,
2764 "Failed to upload dpm data to SMC memory!", return result);
2765
2766 /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */
2767 result = iceland_copy_bytes_to_smc(hwmgr->smumgr,
2768 data->ulv_settings_start,
2769 (uint8_t *)&(data->ulv_setting),
2770 sizeof(SMU71_Discrete_Ulv),
2771 data->sram_end);
2772
2773#if 0
2774 /* Notify SMC to follow new GPIO scheme */
2775 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2776 PHM_PlatformCaps_AutomaticDCTransition)) {
2777 if (0 == iceland_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_UseNewGPIOScheme))
2778 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2779 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
2780 }
2781#endif
2782
2783 return result;
2784}
2785
2786int iceland_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU71_Discrete_MCRegisters *mc_reg_table)
2787{
2788 const struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
2789
2790 uint32_t i, j;
2791
2792 for (i = 0, j = 0; j < data->iceland_mc_reg_table.last; j++) {
2793 if (data->iceland_mc_reg_table.validflag & 1<<j) {
2794 PP_ASSERT_WITH_CODE(i < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE,
2795 "Index of mc_reg_table->address[] array out of boundary", return -1);
2796 mc_reg_table->address[i].s0 =
2797 PP_HOST_TO_SMC_US(data->iceland_mc_reg_table.mc_reg_address[j].s0);
2798 mc_reg_table->address[i].s1 =
2799 PP_HOST_TO_SMC_US(data->iceland_mc_reg_table.mc_reg_address[j].s1);
2800 i++;
2801 }
2802 }
2803
2804 mc_reg_table->last = (uint8_t)i;
2805
2806 return 0;
2807}
2808
2809/* convert register values from driver to SMC format */
2810void iceland_convert_mc_registers(
2811 const phw_iceland_mc_reg_entry * pEntry,
2812 SMU71_Discrete_MCRegisterSet *pData,
2813 uint32_t numEntries, uint32_t validflag)
2814{
2815 uint32_t i, j;
2816
2817 for (i = 0, j = 0; j < numEntries; j++) {
2818 if (validflag & 1<<j) {
2819 pData->value[i] = PP_HOST_TO_SMC_UL(pEntry->mc_data[j]);
2820 i++;
2821 }
2822 }
2823}
2824
2825/* find the entry in the memory range table, then populate the value to SMC's iceland_mc_reg_table */
2826int iceland_convert_mc_reg_table_entry_to_smc(
2827 struct pp_hwmgr *hwmgr,
2828 const uint32_t memory_clock,
2829 SMU71_Discrete_MCRegisterSet *mc_reg_table_data
2830 )
2831{
2832 const iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
2833 uint32_t i = 0;
2834
2835 for (i = 0; i < data->iceland_mc_reg_table.num_entries; i++) {
2836 if (memory_clock <=
2837 data->iceland_mc_reg_table.mc_reg_table_entry[i].mclk_max) {
2838 break;
2839 }
2840 }
2841
2842 if ((i == data->iceland_mc_reg_table.num_entries) && (i > 0))
2843 --i;
2844
2845 iceland_convert_mc_registers(&data->iceland_mc_reg_table.mc_reg_table_entry[i],
2846 mc_reg_table_data, data->iceland_mc_reg_table.last, data->iceland_mc_reg_table.validflag);
2847
2848 return 0;
2849}
2850
2851int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
2852 SMU71_Discrete_MCRegisters *mc_reg_table)
2853{
2854 int result = 0;
2855 iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
2856 int res;
2857 uint32_t i;
2858
2859 for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
2860 res = iceland_convert_mc_reg_table_entry_to_smc(
2861 hwmgr,
2862 data->dpm_table.mclk_table.dpm_levels[i].value,
2863 &mc_reg_table->data[i]
2864 );
2865
2866 if (0 != res)
2867 result = res;
2868 }
2869
2870 return result;
2871}
2872
2873int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
2874{
2875 int result;
2876 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
2877
2878 memset(&data->mc_reg_table, 0x00, sizeof(SMU71_Discrete_MCRegisters));
2879 result = iceland_populate_mc_reg_address(hwmgr, &(data->mc_reg_table));
2880 PP_ASSERT_WITH_CODE(0 == result,
2881 "Failed to initialize MCRegTable for the MC register addresses!", return result;);
2882
2883 result = iceland_convert_mc_reg_table_to_smc(hwmgr, &data->mc_reg_table);
2884 PP_ASSERT_WITH_CODE(0 == result,
2885 "Failed to initialize MCRegTable for driver state!", return result;);
2886
2887 return iceland_copy_bytes_to_smc(hwmgr->smumgr, data->mc_reg_table_start,
2888 (uint8_t *)&data->mc_reg_table, sizeof(SMU71_Discrete_MCRegisters), data->sram_end);
2889}
2890
2891int iceland_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
2892{
2893 PPSMC_Msg msg = has_display? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
2894
2895 return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
2896}
2897
2898int iceland_enable_sclk_control(struct pp_hwmgr *hwmgr)
2899{
2900 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, 0);
2901
2902 return 0;
2903}
2904
2905int iceland_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
2906{
2907 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
2908
2909 /* enable SCLK dpm */
2910 if (0 == data->sclk_dpm_key_disabled) {
2911 PP_ASSERT_WITH_CODE(
2912 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
2913 PPSMC_MSG_DPM_Enable)),
2914 "Failed to enable SCLK DPM during DPM Start Function!",
2915 return -1);
2916 }
2917
2918 /* enable MCLK dpm */
2919 if (0 == data->mclk_dpm_key_disabled) {
2920 PP_ASSERT_WITH_CODE(
2921 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
2922 PPSMC_MSG_MCLKDPM_Enable)),
2923 "Failed to enable MCLK DPM during DPM Start Function!",
2924 return -1);
2925
2926 PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
2927
2928 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2929 ixLCAC_MC0_CNTL, 0x05);/* CH0,1 read */
2930 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2931 ixLCAC_MC1_CNTL, 0x05);/* CH2,3 read */
2932 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2933 ixLCAC_CPL_CNTL, 0x100005);/*Read */
2934
2935 udelay(10);
2936
2937 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2938 ixLCAC_MC0_CNTL, 0x400005);/* CH0,1 write */
2939 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2940 ixLCAC_MC1_CNTL, 0x400005);/* CH2,3 write */
2941 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2942 ixLCAC_CPL_CNTL, 0x500005);/* write */
2943
2944 }
2945
2946 return 0;
2947}
2948
2949int iceland_start_dpm(struct pp_hwmgr *hwmgr)
2950{
2951 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
2952
2953 /* enable general power management */
2954 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 1);
2955 /* enable sclk deep sleep */
2956 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 1);
2957
2958 /* prepare for PCIE DPM */
2959 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SOFT_REGISTERS_TABLE_12, VoltageChangeTimeout, 0x1000);
2960
2961 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, SWRST_COMMAND_1, RESETLC, 0x0);
2962
2963 PP_ASSERT_WITH_CODE(
2964 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
2965 PPSMC_MSG_Voltage_Cntl_Enable)),
2966 "Failed to enable voltage DPM during DPM Start Function!",
2967 return -1);
2968
2969 if (0 != iceland_enable_sclk_mclk_dpm(hwmgr)) {
2970 PP_ASSERT_WITH_CODE(0, "Failed to enable Sclk DPM and Mclk DPM!", return -1);
2971 }
2972
2973 /* enable PCIE dpm */
2974 if (0 == data->pcie_dpm_key_disabled) {
2975 PP_ASSERT_WITH_CODE(
2976 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
2977 PPSMC_MSG_PCIeDPM_Enable)),
2978 "Failed to enable pcie DPM during DPM Start Function!",
2979 return -1
2980 );
2981 }
2982
2983 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2984 PHM_PlatformCaps_Falcon_QuickTransition)) {
2985 smum_send_msg_to_smc(hwmgr->smumgr,
2986 PPSMC_MSG_EnableACDCGPIOInterrupt);
2987 }
2988
2989 return 0;
2990}
2991
2992static void iceland_set_dpm_event_sources(struct pp_hwmgr *hwmgr,
2993 uint32_t sources)
2994{
2995 bool protection;
2996 enum DPM_EVENT_SRC src;
2997
2998 switch (sources) {
2999 default:
3000 printk(KERN_ERR "Unknown throttling event sources.");
3001 /* fall through */
3002 case 0:
3003 protection = false;
3004 /* src is unused */
3005 break;
3006 case (1 << PHM_AutoThrottleSource_Thermal):
3007 protection = true;
3008 src = DPM_EVENT_SRC_DIGITAL;
3009 break;
3010 case (1 << PHM_AutoThrottleSource_External):
3011 protection = true;
3012 src = DPM_EVENT_SRC_EXTERNAL;
3013 break;
3014 case (1 << PHM_AutoThrottleSource_External) |
3015 (1 << PHM_AutoThrottleSource_Thermal):
3016 protection = true;
3017 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
3018 break;
3019 }
3020 /* Order matters - don't enable thermal protection for the wrong source. */
3021 if (protection) {
3022 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
3023 DPM_EVENT_SRC, src);
3024 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
3025 THERMAL_PROTECTION_DIS,
3026 !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3027 PHM_PlatformCaps_ThermalController));
3028 } else
3029 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
3030 THERMAL_PROTECTION_DIS, 1);
3031}
3032
3033static int iceland_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
3034 PHM_AutoThrottleSource source)
3035{
3036 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
3037
3038 if (!(data->active_auto_throttle_sources & (1 << source))) {
3039 data->active_auto_throttle_sources |= 1 << source;
3040 iceland_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
3041 }
3042 return 0;
3043}
3044
3045static int iceland_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
3046{
3047 return iceland_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
3048}
3049
3050static int iceland_tf_start_smc(struct pp_hwmgr *hwmgr)
3051{
3052 int ret = 0;
3053
3054 if (!iceland_is_smc_ram_running(hwmgr->smumgr))
3055 ret = iceland_smu_start_smc(hwmgr->smumgr);
3056
3057 return ret;
3058}
3059
3060/**
3061* Programs the Deep Sleep registers
3062*
3063* @param pHwMgr the address of the powerplay hardware manager.
3064* @param pInput the pointer to input data (PhwEvergreen_DisplayConfiguration)
3065* @param pOutput the pointer to output data (unused)
3066* @param pStorage the pointer to temporary storage (unused)
3067* @param Result the last failure code (unused)
3068* @return always 0
3069*/
3070static int iceland_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
3071{
3072 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3073 PHM_PlatformCaps_SclkDeepSleep)) {
3074 if (smum_send_msg_to_smc(hwmgr->smumgr,
3075 PPSMC_MSG_MASTER_DeepSleep_ON) != 0)
3076 PP_ASSERT_WITH_CODE(false,
3077 "Attempt to enable Master Deep Sleep switch failed!",
3078 return -EINVAL);
3079 } else {
3080 if (smum_send_msg_to_smc(hwmgr->smumgr,
3081 PPSMC_MSG_MASTER_DeepSleep_OFF) != 0)
3082 PP_ASSERT_WITH_CODE(false,
3083 "Attempt to disable Master Deep Sleep switch failed!",
3084 return -EINVAL);
3085 }
3086
3087 return 0;
3088}
3089
3090static int iceland_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
3091{
3092 int tmp_result, result = 0;
3093
3094 if (cf_iceland_voltage_control(hwmgr)) {
3095 tmp_result = iceland_enable_voltage_control(hwmgr);
3096 PP_ASSERT_WITH_CODE((0 == tmp_result),
3097 "Failed to enable voltage control!", return tmp_result);
3098
3099 tmp_result = iceland_construct_voltage_tables(hwmgr);
3100 PP_ASSERT_WITH_CODE((0 == tmp_result),
3101 "Failed to contruct voltage tables!", return tmp_result);
3102 }
3103
3104 tmp_result = iceland_initialize_mc_reg_table(hwmgr);
3105 PP_ASSERT_WITH_CODE((0 == tmp_result),
3106 "Failed to initialize MC reg table!", return tmp_result);
3107
3108 tmp_result = iceland_program_static_screen_threshold_parameters(hwmgr);
3109 PP_ASSERT_WITH_CODE((0 == tmp_result),
3110 "Failed to program static screen threshold parameters!", return tmp_result);
3111
3112 tmp_result = iceland_enable_display_gap(hwmgr);
3113 PP_ASSERT_WITH_CODE((0 == tmp_result),
3114 "Failed to enable display gap!", return tmp_result);
3115
3116 tmp_result = iceland_program_voting_clients(hwmgr);
3117 PP_ASSERT_WITH_CODE((0 == tmp_result),
3118 "Failed to program voting clients!", return tmp_result);
3119
3120 tmp_result = iceland_upload_firmware(hwmgr);
3121 PP_ASSERT_WITH_CODE((0 == tmp_result),
3122 "Failed to upload firmware header!", return tmp_result);
3123
3124 tmp_result = iceland_process_firmware_header(hwmgr);
3125 PP_ASSERT_WITH_CODE((0 == tmp_result),
3126 "Failed to process firmware header!", return tmp_result);
3127
3128 tmp_result = iceland_initial_switch_from_arb_f0_to_f1(hwmgr);
3129 PP_ASSERT_WITH_CODE((0 == tmp_result),
3130 "Failed to initialize switch from ArbF0 to F1!", return tmp_result);
3131
3132 tmp_result = iceland_init_smc_table(hwmgr);
3133 PP_ASSERT_WITH_CODE((0 == tmp_result),
3134 "Failed to initialize SMC table!", return tmp_result);
3135
3136 tmp_result = iceland_populate_initial_mc_reg_table(hwmgr);
3137 PP_ASSERT_WITH_CODE((0 == tmp_result),
3138 "Failed to populate initialize MC Reg table!", return tmp_result);
3139
3140 tmp_result = iceland_populate_pm_fuses(hwmgr);
3141 PP_ASSERT_WITH_CODE((0 == tmp_result),
3142 "Failed to populate PM fuses!", return tmp_result);
3143
3144 /* start SMC */
3145 tmp_result = iceland_tf_start_smc(hwmgr);
3146 PP_ASSERT_WITH_CODE((0 == tmp_result),
3147 "Failed to start SMC!", return tmp_result);
3148
3149 /* enable SCLK control */
3150 tmp_result = iceland_enable_sclk_control(hwmgr);
3151 PP_ASSERT_WITH_CODE((0 == tmp_result),
3152 "Failed to enable SCLK control!", return tmp_result);
3153
3154 tmp_result = iceland_enable_deep_sleep_master_switch(hwmgr);
3155 PP_ASSERT_WITH_CODE((tmp_result == 0),
3156 "Failed to enable deep sleep!", return tmp_result);
3157
3158 /* enable DPM */
3159 tmp_result = iceland_start_dpm(hwmgr);
3160 PP_ASSERT_WITH_CODE((0 == tmp_result),
3161 "Failed to start DPM!", return tmp_result);
3162
3163 tmp_result = iceland_enable_smc_cac(hwmgr);
3164 PP_ASSERT_WITH_CODE((0 == tmp_result),
3165 "Failed to enable SMC CAC!", return tmp_result);
3166
3167 tmp_result = iceland_enable_power_containment(hwmgr);
3168 PP_ASSERT_WITH_CODE((0 == tmp_result),
3169 "Failed to enable power containment!", return tmp_result);
3170
3171 tmp_result = iceland_power_control_set_level(hwmgr);
3172 PP_ASSERT_WITH_CODE((0 == tmp_result),
3173 "Failed to power control set level!", result = tmp_result);
3174
3175 tmp_result = iceland_enable_thermal_auto_throttle(hwmgr);
3176 PP_ASSERT_WITH_CODE((0 == tmp_result),
3177 "Failed to enable thermal auto throttle!", result = tmp_result);
3178
3179 return result;
3180}
3181
3182static int iceland_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
3183{
3184 return phm_hwmgr_backend_fini(hwmgr);
3185}
3186
3187static void iceland_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
3188{
3189 iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
3190 struct phw_iceland_ulv_parm *ulv;
3191
3192 ulv = &data->ulv;
3193 ulv->ch_ulv_parameter = PPICELAND_CGULVPARAMETER_DFLT;
3194 data->voting_rights_clients0 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT0;
3195 data->voting_rights_clients1 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT1;
3196 data->voting_rights_clients2 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT2;
3197 data->voting_rights_clients3 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT3;
3198 data->voting_rights_clients4 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT4;
3199 data->voting_rights_clients5 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT5;
3200 data->voting_rights_clients6 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT6;
3201 data->voting_rights_clients7 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT7;
3202
3203 data->static_screen_threshold_unit = PPICELAND_STATICSCREENTHRESHOLDUNIT_DFLT;
3204 data->static_screen_threshold = PPICELAND_STATICSCREENTHRESHOLD_DFLT;
3205
3206 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3207 PHM_PlatformCaps_ABM);
3208 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3209 PHM_PlatformCaps_NonABMSupportInPPLib);
3210
3211 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3212 PHM_PlatformCaps_DynamicACTiming);
3213
3214 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3215 PHM_PlatformCaps_DisableMemoryTransition);
3216
3217 iceland_initialize_power_tune_defaults(hwmgr);
3218
3219 data->mclk_strobe_mode_threshold = 40000;
3220 data->mclk_stutter_mode_threshold = 30000;
3221 data->mclk_edc_enable_threshold = 40000;
3222 data->mclk_edc_wr_enable_threshold = 40000;
3223
3224 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3225 PHM_PlatformCaps_DisableMCLS);
3226
3227 data->pcie_gen_performance.max = PP_PCIEGen1;
3228 data->pcie_gen_performance.min = PP_PCIEGen3;
3229 data->pcie_gen_power_saving.max = PP_PCIEGen1;
3230 data->pcie_gen_power_saving.min = PP_PCIEGen3;
3231
3232 data->pcie_lane_performance.max = 0;
3233 data->pcie_lane_performance.min = 16;
3234 data->pcie_lane_power_saving.max = 0;
3235 data->pcie_lane_power_saving.min = 16;
3236
3237 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3238 PHM_PlatformCaps_SclkThrottleLowNotification);
3239}
3240
3241static int iceland_get_evv_voltage(struct pp_hwmgr *hwmgr)
3242{
3243 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
3244 uint16_t virtual_voltage_id;
3245 uint16_t vddc = 0;
3246 uint16_t i;
3247
3248 /* the count indicates actual number of entries */
3249 data->vddc_leakage.count = 0;
3250 data->vddci_leakage.count = 0;
3251
3252 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
3253 pr_err("Iceland should always support EVV\n");
3254 return -EINVAL;
3255 }
3256
3257 /* retrieve voltage for leakage ID (0xff01 + i) */
3258 for (i = 0; i < ICELAND_MAX_LEAKAGE_COUNT; i++) {
3259 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
3260
3261 PP_ASSERT_WITH_CODE((0 == atomctrl_get_voltage_evv(hwmgr, virtual_voltage_id, &vddc)),
3262 "Error retrieving EVV voltage value!\n", continue);
3263
3264 if (vddc >= 2000)
3265 pr_warning("Invalid VDDC value!\n");
3266
3267 if (vddc != 0 && vddc != virtual_voltage_id) {
3268 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
3269 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
3270 data->vddc_leakage.count++;
3271 }
3272 }
3273
3274 return 0;
3275}
3276
3277static void iceland_patch_with_vddc_leakage(struct pp_hwmgr *hwmgr,
3278 uint32_t *vddc)
3279{
3280 iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
3281 uint32_t leakage_index;
3282 struct phw_iceland_leakage_voltage *leakage_table = &data->vddc_leakage;
3283
3284 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
3285 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
3286 /*
3287 * If this voltage matches a leakage voltage ID, patch
3288 * with actual leakage voltage.
3289 */
3290 if (leakage_table->leakage_id[leakage_index] == *vddc) {
3291 /*
3292 * Need to make sure vddc is less than 2v or
3293 * else, it could burn the ASIC.
3294 */
3295 if (leakage_table->actual_voltage[leakage_index] >= 2000)
3296 pr_warning("Invalid VDDC value!\n");
3297 *vddc = leakage_table->actual_voltage[leakage_index];
3298 /* we found leakage voltage */
3299 break;
3300 }
3301 }
3302
3303 if (*vddc >= ATOM_VIRTUAL_VOLTAGE_ID0)
3304 pr_warning("Voltage value looks like a Leakage ID but it's not patched\n");
3305}
3306
3307static void iceland_patch_with_vddci_leakage(struct pp_hwmgr *hwmgr,
3308 uint32_t *vddci)
3309{
3310 iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
3311 uint32_t leakage_index;
3312 struct phw_iceland_leakage_voltage *leakage_table = &data->vddci_leakage;
3313
3314 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
3315 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
3316 /*
3317 * If this voltage matches a leakage voltage ID, patch
3318 * with actual leakage voltage.
3319 */
3320 if (leakage_table->leakage_id[leakage_index] == *vddci) {
3321 *vddci = leakage_table->actual_voltage[leakage_index];
3322 /* we found leakage voltage */
3323 break;
3324 }
3325 }
3326
3327 if (*vddci >= ATOM_VIRTUAL_VOLTAGE_ID0)
3328 pr_warning("Voltage value looks like a Leakage ID but it's not patched\n");
3329}
3330
3331static int iceland_patch_vddc(struct pp_hwmgr *hwmgr,
3332 struct phm_clock_voltage_dependency_table *tab)
3333{
3334 uint16_t i;
3335
3336 if (tab)
3337 for (i = 0; i < tab->count; i++)
3338 iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v);
3339
3340 return 0;
3341}
3342
3343static int iceland_patch_vddci(struct pp_hwmgr *hwmgr,
3344 struct phm_clock_voltage_dependency_table *tab)
3345{
3346 uint16_t i;
3347
3348 if (tab)
3349 for (i = 0; i < tab->count; i++)
3350 iceland_patch_with_vddci_leakage(hwmgr, &tab->entries[i].v);
3351
3352 return 0;
3353}
3354
3355static int iceland_patch_vce_vddc(struct pp_hwmgr *hwmgr,
3356 struct phm_vce_clock_voltage_dependency_table *tab)
3357{
3358 uint16_t i;
3359
3360 if (tab)
3361 for (i = 0; i < tab->count; i++)
3362 iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v);
3363
3364 return 0;
3365}
3366
3367
3368static int iceland_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
3369 struct phm_uvd_clock_voltage_dependency_table *tab)
3370{
3371 uint16_t i;
3372
3373 if (tab)
3374 for (i = 0; i < tab->count; i++)
3375 iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v);
3376
3377 return 0;
3378}
3379
3380static int iceland_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
3381 struct phm_phase_shedding_limits_table *tab)
3382{
3383 uint16_t i;
3384
3385 if (tab)
3386 for (i = 0; i < tab->count; i++)
3387 iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].Voltage);
3388
3389 return 0;
3390}
3391
3392static int iceland_patch_samu_vddc(struct pp_hwmgr *hwmgr,
3393 struct phm_samu_clock_voltage_dependency_table *tab)
3394{
3395 uint16_t i;
3396
3397 if (tab)
3398 for (i = 0; i < tab->count; i++)
3399 iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v);
3400
3401 return 0;
3402}
3403
3404static int iceland_patch_acp_vddc(struct pp_hwmgr *hwmgr,
3405 struct phm_acp_clock_voltage_dependency_table *tab)
3406{
3407 uint16_t i;
3408
3409 if (tab)
3410 for (i = 0; i < tab->count; i++)
3411 iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v);
3412
3413 return 0;
3414}
3415
3416static int iceland_patch_limits_vddc(struct pp_hwmgr *hwmgr,
3417 struct phm_clock_and_voltage_limits *tab)
3418{
3419 if (tab) {
3420 iceland_patch_with_vddc_leakage(hwmgr, (uint32_t *)&tab->vddc);
3421 iceland_patch_with_vddci_leakage(hwmgr, (uint32_t *)&tab->vddci);
3422 }
3423
3424 return 0;
3425}
3426
3427static int iceland_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
3428{
3429 uint32_t i;
3430 uint32_t vddc;
3431
3432 if (tab) {
3433 for (i = 0; i < tab->count; i++) {
3434 vddc = (uint32_t)(tab->entries[i].Vddc);
3435 iceland_patch_with_vddc_leakage(hwmgr, &vddc);
3436 tab->entries[i].Vddc = (uint16_t)vddc;
3437 }
3438 }
3439
3440 return 0;
3441}
3442
3443static int iceland_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
3444{
3445 int tmp;
3446
3447 tmp = iceland_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
3448 if(tmp)
3449 return -EINVAL;
3450
3451 tmp = iceland_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
3452 if(tmp)
3453 return -EINVAL;
3454
3455 tmp = iceland_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
3456 if(tmp)
3457 return -EINVAL;
3458
3459 tmp = iceland_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
3460 if(tmp)
3461 return -EINVAL;
3462
3463 tmp = iceland_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
3464 if(tmp)
3465 return -EINVAL;
3466
3467 tmp = iceland_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
3468 if(tmp)
3469 return -EINVAL;
3470
3471 tmp = iceland_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
3472 if(tmp)
3473 return -EINVAL;
3474
3475 tmp = iceland_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
3476 if(tmp)
3477 return -EINVAL;
3478
3479 tmp = iceland_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
3480 if(tmp)
3481 return -EINVAL;
3482
3483 tmp = iceland_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
3484 if(tmp)
3485 return -EINVAL;
3486
3487 tmp = iceland_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
3488 if(tmp)
3489 return -EINVAL;
3490
3491 tmp = iceland_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
3492 if(tmp)
3493 return -EINVAL;
3494
3495 return 0;
3496}
3497
3498static int iceland_set_private_var_based_on_pptale(struct pp_hwmgr *hwmgr)
3499{
3500 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
3501
3502 struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
3503 struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
3504 struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
3505
3506 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
3507 "VDDC dependency on SCLK table is missing. This table is mandatory\n", return -EINVAL);
3508 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
3509 "VDDC dependency on SCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
3510
3511 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
3512 "VDDC dependency on MCLK table is missing. This table is mandatory\n", return -EINVAL);
3513 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
3514 "VDD dependency on MCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
3515
3516 data->min_vddc_in_pp_table = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
3517 data->max_vddc_in_pp_table = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
3518
3519 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
3520 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
3521 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
3522 allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
3523 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
3524 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
3525
3526 if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
3527 data->min_vddci_in_pp_table = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
3528 data->max_vddci_in_pp_table = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
3529 }
3530
3531 if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count > 1)
3532 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
3533
3534 return 0;
3535}
3536
3537static int iceland_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
3538{
3539 uint32_t table_size;
3540 struct phm_clock_voltage_dependency_table *table_clk_vlt;
3541
3542 hwmgr->dyn_state.mclk_sclk_ratio = 4;
3543 hwmgr->dyn_state.sclk_mclk_delta = 15000; /* 150 MHz */
3544 hwmgr->dyn_state.vddc_vddci_delta = 200; /* 200mV */
3545
3546 /* initialize vddc_dep_on_dal_pwrl table */
3547 table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
3548 table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL);
3549
3550 if (NULL == table_clk_vlt) {
3551 pr_err("[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n");
3552 return -ENOMEM;
3553 } else {
3554 table_clk_vlt->count = 4;
3555 table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
3556 table_clk_vlt->entries[0].v = 0;
3557 table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
3558 table_clk_vlt->entries[1].v = 720;
3559 table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
3560 table_clk_vlt->entries[2].v = 810;
3561 table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
3562 table_clk_vlt->entries[3].v = 900;
3563 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
3564 }
3565
3566 return 0;
3567}
3568
3569/**
3570 * Initializes the Volcanic Islands Hardware Manager
3571 *
3572 * @param hwmgr the address of the powerplay hardware manager.
3573 * @return 1 if success; otherwise appropriate error code.
3574 */
3575static int iceland_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
3576{
3577 int result = 0;
3578 SMU71_Discrete_DpmTable *table = NULL;
3579 iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
3580 pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
3581 bool stay_in_boot;
3582 struct phw_iceland_ulv_parm *ulv;
3583 struct cgs_system_info sys_info = {0};
3584
3585 PP_ASSERT_WITH_CODE((NULL != hwmgr),
3586 "Invalid Parameter!", return -EINVAL;);
3587
3588 data->dll_defaule_on = 0;
3589 data->sram_end = SMC_RAM_END;
3590
3591 data->activity_target[0] = PPICELAND_TARGETACTIVITY_DFLT;
3592 data->activity_target[1] = PPICELAND_TARGETACTIVITY_DFLT;
3593 data->activity_target[2] = PPICELAND_TARGETACTIVITY_DFLT;
3594 data->activity_target[3] = PPICELAND_TARGETACTIVITY_DFLT;
3595 data->activity_target[4] = PPICELAND_TARGETACTIVITY_DFLT;
3596 data->activity_target[5] = PPICELAND_TARGETACTIVITY_DFLT;
3597 data->activity_target[6] = PPICELAND_TARGETACTIVITY_DFLT;
3598 data->activity_target[7] = PPICELAND_TARGETACTIVITY_DFLT;
3599
3600 data->mclk_activity_target = PPICELAND_MCLK_TARGETACTIVITY_DFLT;
3601
3602 data->sclk_dpm_key_disabled = 0;
3603 data->mclk_dpm_key_disabled = 0;
3604 data->pcie_dpm_key_disabled = 0;
3605 data->pcc_monitor_enabled = 0;
3606
3607 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3608 PHM_PlatformCaps_UnTabledHardwareInterface);
3609
3610 data->gpio_debug = 0;
3611 data->engine_clock_data = 0;
3612 data->memory_clock_data = 0;
3613
3614 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3615 PHM_PlatformCaps_SclkDeepSleepAboveLow);
3616
3617 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3618 PHM_PlatformCaps_DynamicPatchPowerState);
3619
3620 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3621 PHM_PlatformCaps_TablelessHardwareInterface);
3622
3623 /* Initializes DPM default values. */
3624 iceland_initialize_dpm_defaults(hwmgr);
3625
3626 /* Enable Platform EVV support. */
3627 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3628 PHM_PlatformCaps_EVV);
3629
3630 /* Get leakage voltage based on leakage ID. */
3631 result = iceland_get_evv_voltage(hwmgr);
3632 if (result)
3633 goto failed;
3634
3635 /**
3636 * Patch our voltage dependency table with actual leakage
3637 * voltage. We need to perform leakage translation before it's
3638 * used by other functions such as
3639 * iceland_set_hwmgr_variables_based_on_pptable.
3640 */
3641 result = iceland_patch_dependency_tables_with_leakage(hwmgr);
3642 if (result)
3643 goto failed;
3644
3645 /* Parse pptable data read from VBIOS. */
3646 result = iceland_set_private_var_based_on_pptale(hwmgr);
3647 if (result)
3648 goto failed;
3649
3650 /* ULV support */
3651 ulv = &(data->ulv);
3652 ulv->ulv_supported = 1;
3653
3654 /* Initalize Dynamic State Adjustment Rule Settings*/
3655 result = iceland_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
3656 if (result) {
3657 pr_err("[ powerplay ] iceland_initializa_dynamic_state_adjustment_rule_settings failed!\n");
3658 goto failed;
3659 }
3660
3661 data->voltage_control = ICELAND_VOLTAGE_CONTROL_NONE;
3662 data->vdd_ci_control = ICELAND_VOLTAGE_CONTROL_NONE;
3663 data->mvdd_control = ICELAND_VOLTAGE_CONTROL_NONE;
3664
3665 /*
3666 * Hardcode thermal temperature settings for now, these will
3667 * be overwritten if a custom policy exists.
3668 */
3669 data->thermal_temp_setting.temperature_low = 99500;
3670 data->thermal_temp_setting.temperature_high = 100000;
3671 data->thermal_temp_setting.temperature_shutdown = 104000;
3672 data->uvd_enabled = false;
3673
3674 table = &data->smc_state_table;
3675
3676 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
3677 &gpio_pin_assignment)) {
3678 table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
3679 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3680 PHM_PlatformCaps_RegulatorHot);
3681 } else {
3682 table->VRHotGpio = ICELAND_UNUSED_GPIO_PIN;
3683 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3684 PHM_PlatformCaps_RegulatorHot);
3685 }
3686
3687 if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
3688 &gpio_pin_assignment)) {
3689 table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
3690 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3691 PHM_PlatformCaps_AutomaticDCTransition);
3692 } else {
3693 table->AcDcGpio = ICELAND_UNUSED_GPIO_PIN;
3694 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3695 PHM_PlatformCaps_AutomaticDCTransition);
3696 }
3697
3698 /*
3699 * If ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, Peak.
3700 * Current Control feature is enabled and we should program
3701 * PCC HW register
3702 */
3703 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID,
3704 &gpio_pin_assignment)) {
3705 uint32_t temp_reg = cgs_read_ind_register(hwmgr->device,
3706 CGS_IND_REG__SMC,
3707 ixCNB_PWRMGT_CNTL);
3708
3709 switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
3710 case 0:
3711 temp_reg = PHM_SET_FIELD(temp_reg,
3712 CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
3713 break;
3714 case 1:
3715 temp_reg = PHM_SET_FIELD(temp_reg,
3716 CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
3717 break;
3718 case 2:
3719 temp_reg = PHM_SET_FIELD(temp_reg,
3720 CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
3721 break;
3722 case 3:
3723 temp_reg = PHM_SET_FIELD(temp_reg,
3724 CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
3725 break;
3726 case 4:
3727 temp_reg = PHM_SET_FIELD(temp_reg,
3728 CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
3729 break;
3730 default:
3731 pr_warning("[ powerplay ] Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!\n");
3732 break;
3733 }
3734 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3735 ixCNB_PWRMGT_CNTL, temp_reg);
3736 }
3737
3738 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3739 PHM_PlatformCaps_EnableSMU7ThermalManagement);
3740 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3741 PHM_PlatformCaps_SMU7);
3742
3743 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
3744 VOLTAGE_TYPE_VDDC,
3745 VOLTAGE_OBJ_GPIO_LUT))
3746 data->voltage_control = ICELAND_VOLTAGE_CONTROL_BY_GPIO;
3747 else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
3748 VOLTAGE_TYPE_VDDC,
3749 VOLTAGE_OBJ_SVID2))
3750 data->voltage_control = ICELAND_VOLTAGE_CONTROL_BY_SVID2;
3751
3752 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3753 PHM_PlatformCaps_ControlVDDCI)) {
3754 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
3755 VOLTAGE_TYPE_VDDCI,
3756 VOLTAGE_OBJ_GPIO_LUT))
3757 data->vdd_ci_control = ICELAND_VOLTAGE_CONTROL_BY_GPIO;
3758 else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
3759 VOLTAGE_TYPE_VDDCI,
3760 VOLTAGE_OBJ_SVID2))
3761 data->vdd_ci_control = ICELAND_VOLTAGE_CONTROL_BY_SVID2;
3762 }
3763
3764 if (data->vdd_ci_control == ICELAND_VOLTAGE_CONTROL_NONE)
3765 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3766 PHM_PlatformCaps_ControlVDDCI);
3767
3768 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3769 PHM_PlatformCaps_EnableMVDDControl)) {
3770 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
3771 VOLTAGE_TYPE_MVDDC,
3772 VOLTAGE_OBJ_GPIO_LUT))
3773 data->mvdd_control = ICELAND_VOLTAGE_CONTROL_BY_GPIO;
3774 else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
3775 VOLTAGE_TYPE_MVDDC,
3776 VOLTAGE_OBJ_SVID2))
3777 data->mvdd_control = ICELAND_VOLTAGE_CONTROL_BY_SVID2;
3778 }
3779
3780 if (data->mvdd_control == ICELAND_VOLTAGE_CONTROL_NONE)
3781 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3782 PHM_PlatformCaps_EnableMVDDControl);
3783
3784 data->vddc_phase_shed_control = false;
3785
3786 stay_in_boot = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3787 PHM_PlatformCaps_StayInBootState);
3788
3789 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3790 PHM_PlatformCaps_DynamicPowerManagement);
3791
3792 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3793 PHM_PlatformCaps_ActivityReporting);
3794
3795 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3796 PHM_PlatformCaps_GFXClockGatingSupport);
3797
3798 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3799 PHM_PlatformCaps_MemorySpreadSpectrumSupport);
3800 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3801 PHM_PlatformCaps_EngineSpreadSpectrumSupport);
3802
3803 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3804 PHM_PlatformCaps_DynamicPCIEGen2Support);
3805 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3806 PHM_PlatformCaps_SMC);
3807
3808 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3809 PHM_PlatformCaps_DisablePowerGating);
3810 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3811 PHM_PlatformCaps_BACO);
3812
3813 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3814 PHM_PlatformCaps_ThermalAutoThrottling);
3815 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3816 PHM_PlatformCaps_DisableLSClockGating);
3817 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3818 PHM_PlatformCaps_SamuDPM);
3819 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3820 PHM_PlatformCaps_AcpDPM);
3821 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3822 PHM_PlatformCaps_OD6inACSupport);
3823 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3824 PHM_PlatformCaps_EnablePlatformPowerManagement);
3825
3826 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3827 PHM_PlatformCaps_PauseMMSessions);
3828
3829 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3830 PHM_PlatformCaps_OD6PlusinACSupport);
3831 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3832 PHM_PlatformCaps_PauseMMSessions);
3833 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3834 PHM_PlatformCaps_GFXClockGatingManagedInCAIL);
3835 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3836 PHM_PlatformCaps_IcelandULPSSWWorkAround);
3837
3838
3839 /* iceland doesn't support UVD and VCE */
3840 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3841 PHM_PlatformCaps_UVDPowerGating);
3842 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3843 PHM_PlatformCaps_VCEPowerGating);
3844
3845 sys_info.size = sizeof(struct cgs_system_info);
3846 sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
3847 result = cgs_query_system_info(hwmgr->device, &sys_info);
3848 if (!result) {
3849 if (sys_info.value & AMD_PG_SUPPORT_UVD)
3850 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3851 PHM_PlatformCaps_UVDPowerGating);
3852 if (sys_info.value & AMD_PG_SUPPORT_VCE)
3853 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3854 PHM_PlatformCaps_VCEPowerGating);
3855
3856 data->is_tlu_enabled = false;
3857 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
3858 ICELAND_MAX_HARDWARE_POWERLEVELS;
3859 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
3860 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
3861
3862 sys_info.size = sizeof(struct cgs_system_info);
3863 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
3864 result = cgs_query_system_info(hwmgr->device, &sys_info);
3865 if (result)
3866 data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3867 else
3868 data->pcie_gen_cap = (uint32_t)sys_info.value;
3869 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
3870 data->pcie_spc_cap = 20;
3871 sys_info.size = sizeof(struct cgs_system_info);
3872 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
3873 result = cgs_query_system_info(hwmgr->device, &sys_info);
3874 if (result)
3875 data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
3876 else
3877 data->pcie_lane_cap = (uint32_t)sys_info.value;
3878 } else {
3879 /* Ignore return value in here, we are cleaning up a mess. */
3880 iceland_hwmgr_backend_fini(hwmgr);
3881 }
3882
3883 return 0;
3884failed:
3885 return result;
3886}
3887
3888static int iceland_get_num_of_entries(struct pp_hwmgr *hwmgr)
3889{
3890 int result;
3891 unsigned long ret = 0;
3892
3893 result = pp_tables_get_num_of_entries(hwmgr, &ret);
3894
3895 return result ? 0 : ret;
3896}
3897
3898static const unsigned long PhwIceland_Magic = (unsigned long)(PHM_VIslands_Magic);
3899
3900struct iceland_power_state *cast_phw_iceland_power_state(
3901 struct pp_hw_power_state *hw_ps)
3902{
3903 if (hw_ps == NULL)
3904 return NULL;
3905
3906 PP_ASSERT_WITH_CODE((PhwIceland_Magic == hw_ps->magic),
3907 "Invalid Powerstate Type!",
3908 return NULL);
3909
3910 return (struct iceland_power_state *)hw_ps;
3911}
3912
3913static int iceland_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3914 struct pp_power_state *prequest_ps,
3915 const struct pp_power_state *pcurrent_ps)
3916{
3917 struct iceland_power_state *iceland_ps =
3918 cast_phw_iceland_power_state(&prequest_ps->hardware);
3919
3920 uint32_t sclk;
3921 uint32_t mclk;
3922 struct PP_Clocks minimum_clocks = {0};
3923 bool disable_mclk_switching;
3924 bool disable_mclk_switching_for_frame_lock;
3925 struct cgs_display_info info = {0};
3926 const struct phm_clock_and_voltage_limits *max_limits;
3927 uint32_t i;
3928 iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
3929
3930 int32_t count;
3931 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
3932
3933 data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
3934
3935 PP_ASSERT_WITH_CODE(iceland_ps->performance_level_count == 2,
3936 "VI should always have 2 performance levels",
3937 );
3938
3939 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
3940 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
3941 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
3942
3943 if (PP_PowerSource_DC == hwmgr->power_source) {
3944 for (i = 0; i < iceland_ps->performance_level_count; i++) {
3945 if (iceland_ps->performance_levels[i].memory_clock > max_limits->mclk)
3946 iceland_ps->performance_levels[i].memory_clock = max_limits->mclk;
3947 if (iceland_ps->performance_levels[i].engine_clock > max_limits->sclk)
3948 iceland_ps->performance_levels[i].engine_clock = max_limits->sclk;
3949 }
3950 }
3951
3952 iceland_ps->vce_clocks.EVCLK = hwmgr->vce_arbiter.evclk;
3953 iceland_ps->vce_clocks.ECCLK = hwmgr->vce_arbiter.ecclk;
3954
3955 cgs_get_active_displays_info(hwmgr->device, &info);
3956
3957 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
3958
3959 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
3960 stable_pstate_sclk = (max_limits->sclk * 75) / 100;
3961
3962 for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; count >= 0; count--) {
3963 if (stable_pstate_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
3964 stable_pstate_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
3965 break;
3966 }
3967 }
3968
3969 if (count < 0)
3970 stable_pstate_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
3971
3972 stable_pstate_mclk = max_limits->mclk;
3973
3974 minimum_clocks.engineClock = stable_pstate_sclk;
3975 minimum_clocks.memoryClock = stable_pstate_mclk;
3976 }
3977
3978 if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
3979 minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
3980
3981 if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
3982 minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
3983
3984 iceland_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
3985
3986 if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
3987 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.engineClock),
3988 "Overdrive sclk exceeds limit",
3989 hwmgr->gfx_arbiter.sclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.engineClock);
3990
3991 if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
3992 iceland_ps->performance_levels[1].engine_clock = hwmgr->gfx_arbiter.sclk_over_drive;
3993 }
3994
3995 if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
3996 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.memoryClock),
3997 "Overdrive mclk exceeds limit",
3998 hwmgr->gfx_arbiter.mclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.memoryClock);
3999
4000 if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
4001 iceland_ps->performance_levels[1].memory_clock = hwmgr->gfx_arbiter.mclk_over_drive;
4002 }
4003
4004 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
4005 hwmgr->platform_descriptor.platformCaps,
4006 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
4007
4008 disable_mclk_switching = (1 < info.display_count) ||
4009 disable_mclk_switching_for_frame_lock;
4010
4011 sclk = iceland_ps->performance_levels[0].engine_clock;
4012 mclk = iceland_ps->performance_levels[0].memory_clock;
4013
4014 if (disable_mclk_switching)
4015 mclk = iceland_ps->performance_levels[iceland_ps->performance_level_count - 1].memory_clock;
4016
4017 if (sclk < minimum_clocks.engineClock)
4018 sclk = (minimum_clocks.engineClock > max_limits->sclk) ? max_limits->sclk : minimum_clocks.engineClock;
4019
4020 if (mclk < minimum_clocks.memoryClock)
4021 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? max_limits->mclk : minimum_clocks.memoryClock;
4022
4023 iceland_ps->performance_levels[0].engine_clock = sclk;
4024 iceland_ps->performance_levels[0].memory_clock = mclk;
4025
4026 iceland_ps->performance_levels[1].engine_clock =
4027 (iceland_ps->performance_levels[1].engine_clock >= iceland_ps->performance_levels[0].engine_clock) ?
4028 iceland_ps->performance_levels[1].engine_clock :
4029 iceland_ps->performance_levels[0].engine_clock;
4030
4031 if (disable_mclk_switching) {
4032 if (mclk < iceland_ps->performance_levels[1].memory_clock)
4033 mclk = iceland_ps->performance_levels[1].memory_clock;
4034
4035 iceland_ps->performance_levels[0].memory_clock = mclk;
4036 iceland_ps->performance_levels[1].memory_clock = mclk;
4037 } else {
4038 if (iceland_ps->performance_levels[1].memory_clock < iceland_ps->performance_levels[0].memory_clock)
4039 iceland_ps->performance_levels[1].memory_clock = iceland_ps->performance_levels[0].memory_clock;
4040 }
4041
4042 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
4043 for (i=0; i < iceland_ps->performance_level_count; i++) {
4044 iceland_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
4045 iceland_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
4046 iceland_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
4047 iceland_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
4048 }
4049 }
4050
4051 return 0;
4052}
4053
4054static bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr)
4055{
4056 /*
4057 * We return the status of Voltage Control instead of checking SCLK/MCLK DPM
4058 * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM,
4059 * whereas voltage control is a fundemental change that will not be disabled
4060 */
4061 return (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4062 FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) ? 1 : 0);
4063}
4064
4065/**
4066 * force DPM power State
4067 *
4068 * @param hwmgr: the address of the powerplay hardware manager.
4069 * @param n : DPM level
4070 * @return The response that came from the SMC.
4071 */
4072int iceland_dpm_force_state(struct pp_hwmgr *hwmgr, uint32_t n)
4073{
4074 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
4075
4076 /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
4077 PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr),
4078 "Trying to force SCLK when DPM is disabled", return -1;);
4079 if (0 == data->sclk_dpm_key_disabled)
4080 return (0 == smum_send_msg_to_smc_with_parameter(
4081 hwmgr->smumgr,
4082 PPSMC_MSG_DPM_ForceState,
4083 n) ? 0 : 1);
4084
4085 return 0;
4086}
4087
4088/**
4089 * force DPM power State
4090 *
4091 * @param hwmgr: the address of the powerplay hardware manager.
4092 * @param n : DPM level
4093 * @return The response that came from the SMC.
4094 */
4095int iceland_dpm_force_state_mclk(struct pp_hwmgr *hwmgr, uint32_t n)
4096{
4097 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
4098
4099 /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
4100 PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr),
4101 "Trying to Force MCLK when DPM is disabled", return -1;);
4102 if (0 == data->mclk_dpm_key_disabled)
4103 return (0 == smum_send_msg_to_smc_with_parameter(
4104 hwmgr->smumgr,
4105 PPSMC_MSG_MCLKDPM_ForceState,
4106 n) ? 0 : 1);
4107
4108 return 0;
4109}
4110
4111/**
4112 * force DPM power State
4113 *
4114 * @param hwmgr: the address of the powerplay hardware manager.
4115 * @param n : DPM level
4116 * @return The response that came from the SMC.
4117 */
4118int iceland_dpm_force_state_pcie(struct pp_hwmgr *hwmgr, uint32_t n)
4119{
4120 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
4121
4122 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
4123 PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr),
4124 "Trying to Force PCIE level when DPM is disabled", return -1;);
4125 if (0 == data->pcie_dpm_key_disabled)
4126 return (0 == smum_send_msg_to_smc_with_parameter(
4127 hwmgr->smumgr,
4128 PPSMC_MSG_PCIeDPM_ForceLevel,
4129 n) ? 0 : 1);
4130
4131 return 0;
4132}
4133
4134static int iceland_force_dpm_highest(struct pp_hwmgr *hwmgr)
4135{
4136 uint32_t level, tmp;
4137 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
4138
4139 if (0 == data->sclk_dpm_key_disabled) {
4140 /* SCLK */
4141 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask != 0) {
4142 level = 0;
4143 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
4144 while (tmp >>= 1)
4145 level++ ;
4146
4147 if (0 != level) {
4148 PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state(hwmgr, level)),
4149 "force highest sclk dpm state failed!", return -1);
4150 PHM_WAIT_INDIRECT_FIELD(hwmgr->device,
4151 SMC_IND, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX, level);
4152 }
4153 }
4154 }
4155
4156 if (0 == data->mclk_dpm_key_disabled) {
4157 /* MCLK */
4158 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) {
4159 level = 0;
4160 tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
4161 while (tmp >>= 1)
4162 level++ ;
4163
4164 if (0 != level) {
4165 PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state_mclk(hwmgr, level)),
4166 "force highest mclk dpm state failed!", return -1);
4167 PHM_WAIT_INDIRECT_FIELD(hwmgr->device, SMC_IND,
4168 TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX, level);
4169 }
4170 }
4171 }
4172
4173 if (0 == data->pcie_dpm_key_disabled) {
4174 /* PCIE */
4175 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) {
4176 level = 0;
4177 tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
4178 while (tmp >>= 1)
4179 level++ ;
4180
4181 if (0 != level) {
4182 PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state_pcie(hwmgr, level)),
4183 "force highest pcie dpm state failed!", return -1);
4184 }
4185 }
4186 }
4187
4188 return 0;
4189}
4190
4191static uint32_t iceland_get_lowest_enable_level(struct pp_hwmgr *hwmgr,
4192 uint32_t level_mask)
4193{
4194 uint32_t level = 0;
4195
4196 while (0 == (level_mask & (1 << level)))
4197 level++;
4198
4199 return level;
4200}
4201
4202static int iceland_force_dpm_lowest(struct pp_hwmgr *hwmgr)
4203{
4204 uint32_t level;
4205 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
4206
4207 /* for now force only sclk */
4208 if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4209 level = iceland_get_lowest_enable_level(hwmgr,
4210 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
4211
4212 PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state(hwmgr, level)),
4213 "force sclk dpm state failed!", return -1);
4214
4215 PHM_WAIT_INDIRECT_FIELD(hwmgr->device, SMC_IND,
4216 TARGET_AND_CURRENT_PROFILE_INDEX,
4217 CURR_SCLK_INDEX,
4218 level);
4219 }
4220
4221 return 0;
4222}
4223
4224int iceland_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
4225{
4226 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
4227
4228 PP_ASSERT_WITH_CODE (0 == iceland_is_dpm_running(hwmgr),
4229 "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.",
4230 return -1);
4231
4232 if (0 == data->sclk_dpm_key_disabled) {
4233 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(
4234 hwmgr->smumgr,
4235 PPSMC_MSG_NoForcedLevel)),
4236 "unforce sclk dpm state failed!",
4237 return -1);
4238 }
4239
4240 if (0 == data->mclk_dpm_key_disabled) {
4241 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(
4242 hwmgr->smumgr,
4243 PPSMC_MSG_MCLKDPM_NoForcedLevel)),
4244 "unforce mclk dpm state failed!",
4245 return -1);
4246 }
4247
4248 if (0 == data->pcie_dpm_key_disabled) {
4249 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(
4250 hwmgr->smumgr,
4251 PPSMC_MSG_PCIeDPM_UnForceLevel)),
4252 "unforce pcie level failed!",
4253 return -1);
4254 }
4255
4256 return 0;
4257}
4258
4259static int iceland_force_dpm_level(struct pp_hwmgr *hwmgr,
4260 enum amd_dpm_forced_level level)
4261{
4262 int ret = 0;
4263
4264 switch (level) {
4265 case AMD_DPM_FORCED_LEVEL_HIGH:
4266 ret = iceland_force_dpm_highest(hwmgr);
4267 if (ret)
4268 return ret;
4269 break;
4270 case AMD_DPM_FORCED_LEVEL_LOW:
4271 ret = iceland_force_dpm_lowest(hwmgr);
4272 if (ret)
4273 return ret;
4274 break;
4275 case AMD_DPM_FORCED_LEVEL_AUTO:
4276 ret = iceland_unforce_dpm_levels(hwmgr);
4277 if (ret)
4278 return ret;
4279 break;
4280 default:
4281 break;
4282 }
4283
4284 hwmgr->dpm_level = level;
4285 return ret;
4286}
4287
4288const struct iceland_power_state *cast_const_phw_iceland_power_state(
4289 const struct pp_hw_power_state *hw_ps)
4290{
4291 if (hw_ps == NULL)
4292 return NULL;
4293
4294 PP_ASSERT_WITH_CODE((PhwIceland_Magic == hw_ps->magic),
4295 "Invalid Powerstate Type!",
4296 return NULL);
4297
4298 return (const struct iceland_power_state *)hw_ps;
4299}
4300
4301static int iceland_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
4302{
4303 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
4304 const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state);
4305 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
4306 struct iceland_single_dpm_table *psclk_table = &(data->dpm_table.sclk_table);
4307 uint32_t sclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].engine_clock;
4308 struct iceland_single_dpm_table *pmclk_table = &(data->dpm_table.mclk_table);
4309 uint32_t mclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].memory_clock;
4310 struct PP_Clocks min_clocks = {0};
4311 uint32_t i;
4312 struct cgs_display_info info = {0};
4313
4314 data->need_update_smu7_dpm_table = 0;
4315
4316 for (i = 0; i < psclk_table->count; i++) {
4317 if (sclk == psclk_table->dpm_levels[i].value)
4318 break;
4319 }
4320
4321 if (i >= psclk_table->count)
4322 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4323 else {
4324 /*
4325 * TODO: Check SCLK in DAL's minimum clocks in case DeepSleep
4326 * divider update is required.
4327 */
4328 if(data->display_timing.min_clock_insr != min_clocks.engineClockInSR)
4329 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
4330 }
4331
4332 for (i = 0; i < pmclk_table->count; i++) {
4333 if (mclk == pmclk_table->dpm_levels[i].value)
4334 break;
4335 }
4336
4337 if (i >= pmclk_table->count)
4338 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4339
4340 cgs_get_active_displays_info(hwmgr->device, &info);
4341
4342 if (data->display_timing.num_existing_displays != info.display_count)
4343 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4344
4345 return 0;
4346}
4347
4348static uint16_t iceland_get_maximum_link_speed(struct pp_hwmgr *hwmgr, const struct iceland_power_state *hw_ps)
4349{
4350 uint32_t i;
4351 uint32_t pcie_speed, max_speed = 0;
4352
4353 for (i = 0; i < hw_ps->performance_level_count; i++) {
4354 pcie_speed = hw_ps->performance_levels[i].pcie_gen;
4355 if (max_speed < pcie_speed)
4356 max_speed = pcie_speed;
4357 }
4358
4359 return max_speed;
4360}
4361
4362static uint16_t iceland_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
4363{
4364 uint32_t speed_cntl = 0;
4365
4366 speed_cntl = cgs_read_ind_register(hwmgr->device,
4367 CGS_IND_REG__PCIE,
4368 ixPCIE_LC_SPEED_CNTL);
4369 return((uint16_t)PHM_GET_FIELD(speed_cntl,
4370 PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
4371}
4372
4373
4374static int iceland_request_link_speed_change_before_state_change(struct pp_hwmgr *hwmgr, const void *input)
4375{
4376 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
4377 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
4378 const struct iceland_power_state *iceland_nps = cast_const_phw_iceland_power_state(states->pnew_state);
4379 const struct iceland_power_state *iceland_cps = cast_const_phw_iceland_power_state(states->pcurrent_state);
4380
4381 uint16_t target_link_speed = iceland_get_maximum_link_speed(hwmgr, iceland_nps);
4382 uint16_t current_link_speed;
4383
4384 if (data->force_pcie_gen == PP_PCIEGenInvalid)
4385 current_link_speed = iceland_get_maximum_link_speed(hwmgr, iceland_cps);
4386 else
4387 current_link_speed = data->force_pcie_gen;
4388
4389 data->force_pcie_gen = PP_PCIEGenInvalid;
4390 data->pspp_notify_required = false;
4391 if (target_link_speed > current_link_speed) {
4392 switch(target_link_speed) {
4393 case PP_PCIEGen3:
4394 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
4395 break;
4396 data->force_pcie_gen = PP_PCIEGen2;
4397 if (current_link_speed == PP_PCIEGen2)
4398 break;
4399 case PP_PCIEGen2:
4400 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
4401 break;
4402 default:
4403 data->force_pcie_gen = iceland_get_current_pcie_speed(hwmgr);
4404 break;
4405 }
4406 } else {
4407 if (target_link_speed < current_link_speed)
4408 data->pspp_notify_required = true;
4409 }
4410
4411 return 0;
4412}
4413
4414static int iceland_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4415{
4416 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
4417
4418 if (0 == data->need_update_smu7_dpm_table)
4419 return 0;
4420
4421 if ((0 == data->sclk_dpm_key_disabled) &&
4422 (data->need_update_smu7_dpm_table &
4423 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4424 PP_ASSERT_WITH_CODE(
4425 0 == iceland_is_dpm_running(hwmgr),
4426 "Trying to freeze SCLK DPM when DPM is disabled",
4427 );
4428 PP_ASSERT_WITH_CODE(
4429 0 == smum_send_msg_to_smc(hwmgr->smumgr,
4430 PPSMC_MSG_SCLKDPM_FreezeLevel),
4431 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
4432 return -1);
4433 }
4434
4435 if ((0 == data->mclk_dpm_key_disabled) &&
4436 (data->need_update_smu7_dpm_table &
4437 DPMTABLE_OD_UPDATE_MCLK)) {
4438 PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr),
4439 "Trying to freeze MCLK DPM when DPM is disabled",
4440 );
4441 PP_ASSERT_WITH_CODE(
4442 0 == smum_send_msg_to_smc(hwmgr->smumgr,
4443 PPSMC_MSG_MCLKDPM_FreezeLevel),
4444 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
4445 return -1);
4446 }
4447
4448 return 0;
4449}
4450
4451static int iceland_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr *hwmgr, const void *input)
4452{
4453 int result = 0;
4454
4455 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
4456 const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state);
4457 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
4458 uint32_t sclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].engine_clock;
4459 uint32_t mclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].memory_clock;
4460 struct iceland_dpm_table *pdpm_table = &data->dpm_table;
4461
4462 struct iceland_dpm_table *pgolden_dpm_table = &data->golden_dpm_table;
4463 uint32_t dpm_count, clock_percent;
4464 uint32_t i;
4465
4466 if (0 == data->need_update_smu7_dpm_table)
4467 return 0;
4468
4469 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
4470 pdpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value = sclk;
4471
4472 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
4473 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
4474 /*
4475 * Need to do calculation based on the golden DPM table
4476 * as the Heatmap GPU Clock axis is also based on the default values
4477 */
4478 PP_ASSERT_WITH_CODE(
4479 (pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value != 0),
4480 "Divide by 0!",
4481 return -1);
4482 dpm_count = pdpm_table->sclk_table.count < 2 ? 0 : pdpm_table->sclk_table.count-2;
4483 for (i = dpm_count; i > 1; i--) {
4484 if (sclk > pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value) {
4485 clock_percent = ((sclk - pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value)*100) /
4486 pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value;
4487
4488 pdpm_table->sclk_table.dpm_levels[i].value =
4489 pgolden_dpm_table->sclk_table.dpm_levels[i].value +
4490 (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100;
4491
4492 } else if (pgolden_dpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value > sclk) {
4493 clock_percent = ((pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value - sclk)*100) /
4494 pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value;
4495
4496 pdpm_table->sclk_table.dpm_levels[i].value =
4497 pgolden_dpm_table->sclk_table.dpm_levels[i].value -
4498 (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100;
4499 } else
4500 pdpm_table->sclk_table.dpm_levels[i].value =
4501 pgolden_dpm_table->sclk_table.dpm_levels[i].value;
4502 }
4503 }
4504 }
4505
4506 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
4507 pdpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value = mclk;
4508
4509 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
4510 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
4511
4512 PP_ASSERT_WITH_CODE(
4513 (pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value != 0),
4514 "Divide by 0!",
4515 return -1);
4516 dpm_count = pdpm_table->mclk_table.count < 2? 0 : pdpm_table->mclk_table.count-2;
4517 for (i = dpm_count; i > 1; i--) {
4518 if (mclk > pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value) {
4519 clock_percent = ((mclk - pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value)*100) /
4520 pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value;
4521
4522 pdpm_table->mclk_table.dpm_levels[i].value =
4523 pgolden_dpm_table->mclk_table.dpm_levels[i].value +
4524 (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100;
4525
4526 } else if (pgolden_dpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value > mclk) {
4527 clock_percent = ((pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value - mclk)*100) /
4528 pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value;
4529
4530 pdpm_table->mclk_table.dpm_levels[i].value =
4531 pgolden_dpm_table->mclk_table.dpm_levels[i].value -
4532 (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100;
4533 } else
4534 pdpm_table->mclk_table.dpm_levels[i].value = pgolden_dpm_table->mclk_table.dpm_levels[i].value;
4535 }
4536 }
4537 }
4538
4539
4540 if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
4541 result = iceland_populate_all_graphic_levels(hwmgr);
4542 PP_ASSERT_WITH_CODE((0 == result),
4543 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
4544 return result);
4545 }
4546
4547 if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
4548 /*populate MCLK dpm table to SMU7 */
4549 result = iceland_populate_all_memory_levels(hwmgr);
4550 PP_ASSERT_WITH_CODE((0 == result),
4551 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
4552 return result);
4553 }
4554
4555 return result;
4556}
4557
4558static int iceland_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
4559 struct iceland_single_dpm_table *pdpm_table,
4560 uint32_t low_limit, uint32_t high_limit)
4561{
4562 uint32_t i;
4563
4564 for (i = 0; i < pdpm_table->count; i++) {
4565 if ((pdpm_table->dpm_levels[i].value < low_limit) ||
4566 (pdpm_table->dpm_levels[i].value > high_limit))
4567 pdpm_table->dpm_levels[i].enabled = false;
4568 else
4569 pdpm_table->dpm_levels[i].enabled = true;
4570 }
4571 return 0;
4572}
4573
4574static int iceland_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct iceland_power_state *hw_state)
4575{
4576 int result = 0;
4577 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
4578 uint32_t high_limit_count;
4579
4580 PP_ASSERT_WITH_CODE((hw_state->performance_level_count >= 1),
4581 "power state did not have any performance level",
4582 return -1);
4583
4584 high_limit_count = (1 == hw_state->performance_level_count) ? 0: 1;
4585
4586 iceland_trim_single_dpm_states(hwmgr, &(data->dpm_table.sclk_table),
4587 hw_state->performance_levels[0].engine_clock,
4588 hw_state->performance_levels[high_limit_count].engine_clock);
4589
4590 iceland_trim_single_dpm_states(hwmgr, &(data->dpm_table.mclk_table),
4591 hw_state->performance_levels[0].memory_clock,
4592 hw_state->performance_levels[high_limit_count].memory_clock);
4593
4594 return result;
4595}
4596
4597static int iceland_generate_dpm_level_enable_mask(struct pp_hwmgr *hwmgr, const void *input)
4598{
4599 int result;
4600 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
4601 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
4602 const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state);
4603
4604 result = iceland_trim_dpm_states(hwmgr, iceland_ps);
4605 if (0 != result)
4606 return result;
4607
4608 data->dpm_level_enable_mask.sclk_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
4609 data->dpm_level_enable_mask.mclk_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
4610 data->last_mclk_dpm_enable_mask = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
4611 if (data->uvd_enabled && (data->dpm_level_enable_mask.mclk_dpm_enable_mask & 1))
4612 data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4613
4614 data->dpm_level_enable_mask.pcie_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
4615
4616 return 0;
4617}
4618
4619static int iceland_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
4620{
4621 return 0;
4622}
4623
4624static int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
4625{
4626 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
4627
4628 int result = 0;
4629 uint32_t low_sclk_interrupt_threshold = 0;
4630
4631 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4632 PHM_PlatformCaps_SclkThrottleLowNotification)
4633 && (hwmgr->gfx_arbiter.sclk_threshold != data->low_sclk_interrupt_threshold)) {
4634 data->low_sclk_interrupt_threshold = hwmgr->gfx_arbiter.sclk_threshold;
4635 low_sclk_interrupt_threshold = data->low_sclk_interrupt_threshold;
4636
4637 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
4638
4639 result = iceland_copy_bytes_to_smc(
4640 hwmgr->smumgr,
4641 data->dpm_table_start + offsetof(SMU71_Discrete_DpmTable,
4642 LowSclkInterruptThreshold),
4643 (uint8_t *)&low_sclk_interrupt_threshold,
4644 sizeof(uint32_t),
4645 data->sram_end
4646 );
4647 }
4648
4649 return result;
4650}
4651
4652static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
4653{
4654 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
4655
4656 uint32_t address;
4657 int32_t result;
4658
4659 if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4660 return 0;
4661
4662
4663 memset(&data->mc_reg_table, 0, sizeof(SMU71_Discrete_MCRegisters));
4664
4665 result = iceland_convert_mc_reg_table_to_smc(hwmgr, &(data->mc_reg_table));
4666
4667 if(result != 0)
4668 return result;
4669
4670
4671 address = data->mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]);
4672
4673 return iceland_copy_bytes_to_smc(hwmgr->smumgr, address,
4674 (uint8_t *)&data->mc_reg_table.data[0],
4675 sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
4676 data->sram_end);
4677}
4678
4679static int iceland_program_memory_timing_parameters_conditionally(struct pp_hwmgr *hwmgr)
4680{
4681 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
4682
4683 if (data->need_update_smu7_dpm_table &
4684 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
4685 return iceland_program_memory_timing_parameters(hwmgr);
4686
4687 return 0;
4688}
4689
4690static int iceland_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4691{
4692 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
4693
4694 if (0 == data->need_update_smu7_dpm_table)
4695 return 0;
4696
4697 if ((0 == data->sclk_dpm_key_disabled) &&
4698 (data->need_update_smu7_dpm_table &
4699 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4700
4701 PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr),
4702 "Trying to Unfreeze SCLK DPM when DPM is disabled",
4703 );
4704 PP_ASSERT_WITH_CODE(
4705 0 == smum_send_msg_to_smc(hwmgr->smumgr,
4706 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
4707 "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
4708 return -1);
4709 }
4710
4711 if ((0 == data->mclk_dpm_key_disabled) &&
4712 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
4713
4714 PP_ASSERT_WITH_CODE(
4715 0 == iceland_is_dpm_running(hwmgr),
4716 "Trying to Unfreeze MCLK DPM when DPM is disabled",
4717 );
4718 PP_ASSERT_WITH_CODE(
4719 0 == smum_send_msg_to_smc(hwmgr->smumgr,
4720 PPSMC_MSG_MCLKDPM_UnfreezeLevel),
4721 "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
4722 return -1);
4723 }
4724
4725 data->need_update_smu7_dpm_table = 0;
4726
4727 return 0;
4728}
4729
4730static int iceland_notify_link_speed_change_after_state_change(struct pp_hwmgr *hwmgr, const void *input)
4731{
4732 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
4733 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
4734 const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state);
4735 uint16_t target_link_speed = iceland_get_maximum_link_speed(hwmgr, iceland_ps);
4736 uint8_t request;
4737
4738 if (data->pspp_notify_required ||
4739 data->pcie_performance_request) {
4740 if (target_link_speed == PP_PCIEGen3)
4741 request = PCIE_PERF_REQ_GEN3;
4742 else if (target_link_speed == PP_PCIEGen2)
4743 request = PCIE_PERF_REQ_GEN2;
4744 else
4745 request = PCIE_PERF_REQ_GEN1;
4746
4747 if(request == PCIE_PERF_REQ_GEN1 && iceland_get_current_pcie_speed(hwmgr) > 0) {
4748 data->pcie_performance_request = false;
4749 return 0;
4750 }
4751
4752 if (0 != acpi_pcie_perf_request(hwmgr->device, request, false)) {
4753 if (PP_PCIEGen2 == target_link_speed)
4754 printk("PSPP request to switch to Gen2 from Gen3 Failed!");
4755 else
4756 printk("PSPP request to switch to Gen1 from Gen2 Failed!");
4757 }
4758 }
4759
4760 data->pcie_performance_request = false;
4761 return 0;
4762}
4763
4764int iceland_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
4765{
4766 PPSMC_Result result;
4767 iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
4768
4769 if (0 == data->sclk_dpm_key_disabled) {
4770 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
4771 if (0 != iceland_is_dpm_running(hwmgr))
4772 printk(KERN_ERR "[ powerplay ] Trying to set Enable Sclk Mask when DPM is disabled \n");
4773
4774 if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4775 result = smum_send_msg_to_smc_with_parameter(
4776 hwmgr->smumgr,
4777 (PPSMC_Msg)PPSMC_MSG_SCLKDPM_SetEnabledMask,
4778 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
4779 PP_ASSERT_WITH_CODE((0 == result),
4780 "Set Sclk Dpm enable Mask failed", return -1);
4781 }
4782 }
4783
4784 if (0 == data->mclk_dpm_key_disabled) {
4785 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
4786 if (0 != iceland_is_dpm_running(hwmgr))
4787 printk(KERN_ERR "[ powerplay ] Trying to set Enable Mclk Mask when DPM is disabled \n");
4788
4789 if (0 != data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4790 result = smum_send_msg_to_smc_with_parameter(
4791 hwmgr->smumgr,
4792 (PPSMC_Msg)PPSMC_MSG_MCLKDPM_SetEnabledMask,
4793 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
4794 PP_ASSERT_WITH_CODE((0 == result),
4795 "Set Mclk Dpm enable Mask failed", return -1);
4796 }
4797 }
4798
4799 return 0;
4800}
4801
4802static int iceland_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
4803{
4804 int tmp_result, result = 0;
4805
4806 tmp_result = iceland_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
4807 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to find DPM states clocks in DPM table!", result = tmp_result);
4808
4809 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) {
4810 tmp_result = iceland_request_link_speed_change_before_state_change(hwmgr, input);
4811 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to request link speed change before state change!", result = tmp_result);
4812 }
4813
4814 tmp_result = iceland_freeze_sclk_mclk_dpm(hwmgr);
4815 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
4816
4817 tmp_result = iceland_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
4818 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to populate and upload SCLK MCLK DPM levels!", result = tmp_result);
4819
4820 tmp_result = iceland_generate_dpm_level_enable_mask(hwmgr, input);
4821 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to generate DPM level enabled mask!", result = tmp_result);
4822
4823 tmp_result = iceland_update_vce_dpm(hwmgr, input);
4824 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update VCE DPM!", result = tmp_result);
4825
4826 tmp_result = iceland_update_sclk_threshold(hwmgr);
4827 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update SCLK threshold!", result = tmp_result);
4828
4829 tmp_result = iceland_update_and_upload_mc_reg_table(hwmgr);
4830 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload MC reg table!", result = tmp_result);
4831
4832 tmp_result = iceland_program_memory_timing_parameters_conditionally(hwmgr);
4833 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to program memory timing parameters!", result = tmp_result);
4834
4835 tmp_result = iceland_unfreeze_sclk_mclk_dpm(hwmgr);
4836 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to unfreeze SCLK MCLK DPM!", result = tmp_result);
4837
4838 tmp_result = iceland_upload_dpm_level_enable_mask(hwmgr);
4839 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload DPM level enabled mask!", result = tmp_result);
4840
4841 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) {
4842 tmp_result = iceland_notify_link_speed_change_after_state_change(hwmgr, input);
4843 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to notify link speed change after state change!", result = tmp_result);
4844 }
4845
4846 return result;
4847}
4848
4849static int iceland_get_power_state_size(struct pp_hwmgr *hwmgr)
4850{
4851 return sizeof(struct iceland_power_state);
4852}
4853
4854static int iceland_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
4855{
4856 struct pp_power_state *ps;
4857 struct iceland_power_state *iceland_ps;
4858
4859 if (hwmgr == NULL)
4860 return -EINVAL;
4861
4862 ps = hwmgr->request_ps;
4863
4864 if (ps == NULL)
4865 return -EINVAL;
4866
4867 iceland_ps = cast_phw_iceland_power_state(&ps->hardware);
4868
4869 if (low)
4870 return iceland_ps->performance_levels[0].memory_clock;
4871 else
4872 return iceland_ps->performance_levels[iceland_ps->performance_level_count-1].memory_clock;
4873}
4874
4875static int iceland_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
4876{
4877 struct pp_power_state *ps;
4878 struct iceland_power_state *iceland_ps;
4879
4880 if (hwmgr == NULL)
4881 return -EINVAL;
4882
4883 ps = hwmgr->request_ps;
4884
4885 if (ps == NULL)
4886 return -EINVAL;
4887
4888 iceland_ps = cast_phw_iceland_power_state(&ps->hardware);
4889
4890 if (low)
4891 return iceland_ps->performance_levels[0].engine_clock;
4892 else
4893 return iceland_ps->performance_levels[iceland_ps->performance_level_count-1].engine_clock;
4894}
4895
4896static int iceland_get_current_pcie_lane_number(
4897 struct pp_hwmgr *hwmgr)
4898{
4899 uint32_t link_width;
4900
4901 link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device,
4902 CGS_IND_REG__PCIE,
4903 PCIE_LC_LINK_WIDTH_CNTL,
4904 LC_LINK_WIDTH_RD);
4905
4906 PP_ASSERT_WITH_CODE((7 >= link_width),
4907 "Invalid PCIe lane width!", return 0);
4908
4909 return decode_pcie_lane_width(link_width);
4910}
4911
4912static int iceland_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
4913 struct pp_hw_power_state *hw_ps)
4914{
4915 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
4916 struct iceland_power_state *ps = (struct iceland_power_state *)hw_ps;
4917 ATOM_FIRMWARE_INFO_V2_2 *fw_info;
4918 uint16_t size;
4919 uint8_t frev, crev;
4920 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
4921
4922 /* First retrieve the Boot clocks and VDDC from the firmware info table.
4923 * We assume here that fw_info is unchanged if this call fails.
4924 */
4925 fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
4926 hwmgr->device, index,
4927 &size, &frev, &crev);
4928 if (!fw_info)
4929 /* During a test, there is no firmware info table. */
4930 return 0;
4931
4932 /* Patch the state. */
4933 data->vbios_boot_state.sclk_bootup_value = le32_to_cpu(fw_info->ulDefaultEngineClock);
4934 data->vbios_boot_state.mclk_bootup_value = le32_to_cpu(fw_info->ulDefaultMemoryClock);
4935 data->vbios_boot_state.mvdd_bootup_value = le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
4936 data->vbios_boot_state.vddc_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCVoltage);
4937 data->vbios_boot_state.vddci_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
4938 data->vbios_boot_state.pcie_gen_bootup_value = iceland_get_current_pcie_speed(hwmgr);
4939 data->vbios_boot_state.pcie_lane_bootup_value =
4940 (uint16_t)iceland_get_current_pcie_lane_number(hwmgr);
4941
4942 /* set boot power state */
4943 ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
4944 ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
4945 ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
4946 ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
4947
4948 return 0;
4949}
4950
4951static int iceland_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
4952 struct pp_hw_power_state *power_state,
4953 unsigned int index, const void *clock_info)
4954{
4955 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
4956 struct iceland_power_state *iceland_power_state = cast_phw_iceland_power_state(power_state);
4957 const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
4958 struct iceland_performance_level *performance_level;
4959 uint32_t engine_clock, memory_clock;
4960 uint16_t pcie_gen_from_bios;
4961
4962 engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
4963 memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
4964
4965 if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
4966 data->highest_mclk = memory_clock;
4967
4968 performance_level = &(iceland_power_state->performance_levels
4969 [iceland_power_state->performance_level_count++]);
4970
4971 PP_ASSERT_WITH_CODE(
4972 (iceland_power_state->performance_level_count < SMU71_MAX_LEVELS_GRAPHICS),
4973 "Performance levels exceeds SMC limit!",
4974 return -1);
4975
4976 PP_ASSERT_WITH_CODE(
4977 (iceland_power_state->performance_level_count <=
4978 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
4979 "Performance levels exceeds Driver limit!",
4980 return -1);
4981
4982 /* Performance levels are arranged from low to high. */
4983 performance_level->memory_clock = memory_clock;
4984 performance_level->engine_clock = engine_clock;
4985
4986 pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
4987
4988 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
4989 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
4990
4991 return 0;
4992}
4993
4994static int iceland_get_pp_table_entry(struct pp_hwmgr *hwmgr,
4995 unsigned long entry_index, struct pp_power_state *state)
4996{
4997 int result;
4998 struct iceland_power_state *ps;
4999 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
5000 struct phm_clock_voltage_dependency_table *dep_mclk_table =
5001 hwmgr->dyn_state.vddci_dependency_on_mclk;
5002
5003 memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
5004
5005 state->hardware.magic = PHM_VIslands_Magic;
5006
5007 ps = (struct iceland_power_state *)(&state->hardware);
5008
5009 result = pp_tables_get_entry(hwmgr, entry_index, state,
5010 iceland_get_pp_table_entry_callback_func);
5011
5012 /*
5013 * This is the earliest time we have all the dependency table
5014 * and the VBIOS boot state as
5015 * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot
5016 * state if there is only one VDDCI/MCLK level, check if it's
5017 * the same as VBIOS boot state
5018 */
5019 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
5020 if (dep_mclk_table->entries[0].clk !=
5021 data->vbios_boot_state.mclk_bootup_value)
5022 printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
5023 "does not match VBIOS boot MCLK level");
5024 if (dep_mclk_table->entries[0].v !=
5025 data->vbios_boot_state.vddci_bootup_value)
5026 printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
5027 "does not match VBIOS boot VDDCI level");
5028 }
5029
5030 /* set DC compatible flag if this state supports DC */
5031 if (!state->validation.disallowOnDC)
5032 ps->dc_compatible = true;
5033
5034 if (state->classification.flags & PP_StateClassificationFlag_ACPI)
5035 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
5036 else if (0 != (state->classification.flags & PP_StateClassificationFlag_Boot)) {
5037 if (data->bacos.best_match == 0xffff) {
5038 /* For C.I. use boot state as base BACO state */
5039 data->bacos.best_match = PP_StateClassificationFlag_Boot;
5040 data->bacos.performance_level = ps->performance_levels[0];
5041 }
5042 }
5043
5044
5045 ps->uvd_clocks.VCLK = state->uvd_clocks.VCLK;
5046 ps->uvd_clocks.DCLK = state->uvd_clocks.DCLK;
5047
5048 if (!result) {
5049 uint32_t i;
5050
5051 switch (state->classification.ui_label) {
5052 case PP_StateUILabel_Performance:
5053 data->use_pcie_performance_levels = true;
5054
5055 for (i = 0; i < ps->performance_level_count; i++) {
5056 if (data->pcie_gen_performance.max <
5057 ps->performance_levels[i].pcie_gen)
5058 data->pcie_gen_performance.max =
5059 ps->performance_levels[i].pcie_gen;
5060
5061 if (data->pcie_gen_performance.min >
5062 ps->performance_levels[i].pcie_gen)
5063 data->pcie_gen_performance.min =
5064 ps->performance_levels[i].pcie_gen;
5065
5066 if (data->pcie_lane_performance.max <
5067 ps->performance_levels[i].pcie_lane)
5068 data->pcie_lane_performance.max =
5069 ps->performance_levels[i].pcie_lane;
5070
5071 if (data->pcie_lane_performance.min >
5072 ps->performance_levels[i].pcie_lane)
5073 data->pcie_lane_performance.min =
5074 ps->performance_levels[i].pcie_lane;
5075 }
5076 break;
5077 case PP_StateUILabel_Battery:
5078 data->use_pcie_power_saving_levels = true;
5079
5080 for (i = 0; i < ps->performance_level_count; i++) {
5081 if (data->pcie_gen_power_saving.max <
5082 ps->performance_levels[i].pcie_gen)
5083 data->pcie_gen_power_saving.max =
5084 ps->performance_levels[i].pcie_gen;
5085
5086 if (data->pcie_gen_power_saving.min >
5087 ps->performance_levels[i].pcie_gen)
5088 data->pcie_gen_power_saving.min =
5089 ps->performance_levels[i].pcie_gen;
5090
5091 if (data->pcie_lane_power_saving.max <
5092 ps->performance_levels[i].pcie_lane)
5093 data->pcie_lane_power_saving.max =
5094 ps->performance_levels[i].pcie_lane;
5095
5096 if (data->pcie_lane_power_saving.min >
5097 ps->performance_levels[i].pcie_lane)
5098 data->pcie_lane_power_saving.min =
5099 ps->performance_levels[i].pcie_lane;
5100 }
5101 break;
5102 default:
5103 break;
5104 }
5105 }
5106 return 0;
5107}
5108
5109static void
5110iceland_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
5111{
5112 uint32_t sclk, mclk, activity_percent;
5113 uint32_t offset;
5114 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
5115
5116 smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetSclkFrequency));
5117
5118 sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5119
5120 smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetMclkFrequency));
5121
5122 mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5123 seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n", mclk/100, sclk/100);
5124
5125 offset = data->soft_regs_start + offsetof(SMU71_SoftRegisters, AverageGraphicsActivity);
5126 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
5127 activity_percent += 0x80;
5128 activity_percent >>= 8;
5129
5130 seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
5131
5132 seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en");
5133
5134 seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en");
5135}
5136
5137int iceland_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
5138{
5139 uint32_t num_active_displays = 0;
5140 struct cgs_display_info info = {0};
5141 info.mode_info = NULL;
5142
5143 cgs_get_active_displays_info(hwmgr->device, &info);
5144
5145 num_active_displays = info.display_count;
5146
5147 if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
5148 iceland_notify_smc_display_change(hwmgr, false);
5149 else
5150 iceland_notify_smc_display_change(hwmgr, true);
5151
5152 return 0;
5153}
5154
5155/**
5156* Programs the display gap
5157*
5158* @param hwmgr the address of the powerplay hardware manager.
5159* @return always OK
5160*/
5161int iceland_program_display_gap(struct pp_hwmgr *hwmgr)
5162{
5163 uint32_t num_active_displays = 0;
5164 uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
5165 uint32_t display_gap2;
5166 uint32_t pre_vbi_time_in_us;
5167 uint32_t frame_time_in_us;
5168 uint32_t ref_clock;
5169 uint32_t refresh_rate = 0;
5170 struct cgs_display_info info = {0};
5171 struct cgs_mode_info mode_info;
5172
5173 info.mode_info = &mode_info;
5174
5175 cgs_get_active_displays_info(hwmgr->device, &info);
5176 num_active_displays = info.display_count;
5177
5178 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0)? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
5179 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
5180
5181 ref_clock = mode_info.ref_clock;
5182 refresh_rate = mode_info.refresh_rate;
5183
5184 if(0 == refresh_rate)
5185 refresh_rate = 60;
5186
5187 frame_time_in_us = 1000000 / refresh_rate;
5188
5189 pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
5190 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
5191
5192 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
5193
5194 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SOFT_REGISTERS_TABLE_4, PreVBlankGap, 0x64);
5195
5196 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SOFT_REGISTERS_TABLE_5, VBlankTimeout, (frame_time_in_us - pre_vbi_time_in_us));
5197
5198 if (num_active_displays == 1)
5199 iceland_notify_smc_display_change(hwmgr, true);
5200
5201 return 0;
5202}
5203
5204int iceland_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
5205{
5206 iceland_program_display_gap(hwmgr);
5207
5208 return 0;
5209}
5210
5211/**
5212* Set maximum target operating fan output PWM
5213*
5214* @param pHwMgr: the address of the powerplay hardware manager.
5215* @param usMaxFanPwm: max operating fan PWM in percents
5216* @return The response that came from the SMC.
5217*/
5218static int iceland_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
5219{
5220 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
5221
5222 if (phm_is_hw_access_blocked(hwmgr))
5223 return 0;
5224
5225 return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm) ? 0 : -1);
5226}
5227
5228/**
5229* Set maximum target operating fan output RPM
5230*
5231* @param pHwMgr: the address of the powerplay hardware manager.
5232* @param usMaxFanRpm: max operating fan RPM value.
5233* @return The response that came from the SMC.
5234*/
5235static int iceland_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
5236{
5237 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = us_max_fan_pwm;
5238
5239 if (phm_is_hw_access_blocked(hwmgr))
5240 return 0;
5241
5242 return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanRpmMax, us_max_fan_pwm) ? 0 : -1);
5243}
5244
5245static int iceland_dpm_set_interrupt_state(void *private_data,
5246 unsigned src_id, unsigned type,
5247 int enabled)
5248{
5249 uint32_t cg_thermal_int;
5250 struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr;
5251
5252 if (hwmgr == NULL)
5253 return -EINVAL;
5254
5255 switch (type) {
5256 case AMD_THERMAL_IRQ_LOW_TO_HIGH:
5257 if (enabled) {
5258 cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5259 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
5260 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5261 } else {
5262 cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5263 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
5264 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5265 }
5266 break;
5267
5268 case AMD_THERMAL_IRQ_HIGH_TO_LOW:
5269 if (enabled) {
5270 cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5271 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
5272 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5273 } else {
5274 cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5275 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
5276 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5277 }
5278 break;
5279 default:
5280 break;
5281 }
5282 return 0;
5283}
5284
5285static int iceland_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
5286 const void *thermal_interrupt_info)
5287{
5288 int result;
5289 const struct pp_interrupt_registration_info *info =
5290 (const struct pp_interrupt_registration_info *)thermal_interrupt_info;
5291
5292 if (info == NULL)
5293 return -EINVAL;
5294
5295 result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST,
5296 iceland_dpm_set_interrupt_state,
5297 info->call_back, info->context);
5298
5299 if (result)
5300 return -EINVAL;
5301
5302 result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST,
5303 iceland_dpm_set_interrupt_state,
5304 info->call_back, info->context);
5305
5306 if (result)
5307 return -EINVAL;
5308
5309 return 0;
5310}
5311
5312
5313static bool iceland_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
5314{
5315 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
5316 bool is_update_required = false;
5317 struct cgs_display_info info = {0,0,NULL};
5318
5319 cgs_get_active_displays_info(hwmgr->device, &info);
5320
5321 if (data->display_timing.num_existing_displays != info.display_count)
5322 is_update_required = true;
5323/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
5324 if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
5325 cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
5326 if(min_clocks.engineClockInSR != data->display_timing.minClockInSR)
5327 is_update_required = true;
5328*/
5329 return is_update_required;
5330}
5331
5332
5333static inline bool iceland_are_power_levels_equal(const struct iceland_performance_level *pl1,
5334 const struct iceland_performance_level *pl2)
5335{
5336 return ((pl1->memory_clock == pl2->memory_clock) &&
5337 (pl1->engine_clock == pl2->engine_clock) &&
5338 (pl1->pcie_gen == pl2->pcie_gen) &&
5339 (pl1->pcie_lane == pl2->pcie_lane));
5340}
5341
5342int iceland_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1,
5343 const struct pp_hw_power_state *pstate2, bool *equal)
5344{
5345 const struct iceland_power_state *psa = cast_const_phw_iceland_power_state(pstate1);
5346 const struct iceland_power_state *psb = cast_const_phw_iceland_power_state(pstate2);
5347 int i;
5348
5349 if (equal == NULL || psa == NULL || psb == NULL)
5350 return -EINVAL;
5351
5352 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
5353 if (psa->performance_level_count != psb->performance_level_count) {
5354 *equal = false;
5355 return 0;
5356 }
5357
5358 for (i = 0; i < psa->performance_level_count; i++) {
5359 if (!iceland_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
5360 /* If we have found even one performance level pair that is different the states are different. */
5361 *equal = false;
5362 return 0;
5363 }
5364 }
5365
5366 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
5367 *equal = ((psa->uvd_clocks.VCLK == psb->uvd_clocks.VCLK) && (psa->uvd_clocks.DCLK == psb->uvd_clocks.DCLK));
5368 *equal &= ((psa->vce_clocks.EVCLK == psb->vce_clocks.EVCLK) && (psa->vce_clocks.ECCLK == psb->vce_clocks.ECCLK));
5369 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
5370 *equal &= (psa->acp_clk == psb->acp_clk);
5371
5372 return 0;
5373}
5374
5375static int iceland_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
5376{
5377 if (mode) {
5378 /* stop auto-manage */
5379 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
5380 PHM_PlatformCaps_MicrocodeFanControl))
5381 iceland_fan_ctrl_stop_smc_fan_control(hwmgr);
5382 iceland_fan_ctrl_set_static_mode(hwmgr, mode);
5383 } else
5384 /* restart auto-manage */
5385 iceland_fan_ctrl_reset_fan_speed_to_default(hwmgr);
5386
5387 return 0;
5388}
5389
5390static int iceland_get_fan_control_mode(struct pp_hwmgr *hwmgr)
5391{
5392 if (hwmgr->fan_ctrl_is_in_default_mode)
5393 return hwmgr->fan_ctrl_default_mode;
5394 else
5395 return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
5396 CG_FDO_CTRL2, FDO_PWM_MODE);
5397}
5398
5399static int iceland_force_clock_level(struct pp_hwmgr *hwmgr,
5400 enum pp_clock_type type, uint32_t mask)
5401{
5402 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
5403
5404 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
5405 return -EINVAL;
5406
5407 switch (type) {
5408 case PP_SCLK:
5409 if (!data->sclk_dpm_key_disabled)
5410 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5411 PPSMC_MSG_SCLKDPM_SetEnabledMask,
5412 data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
5413 break;
5414 case PP_MCLK:
5415 if (!data->mclk_dpm_key_disabled)
5416 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5417 PPSMC_MSG_MCLKDPM_SetEnabledMask,
5418 data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
5419 break;
5420 case PP_PCIE:
5421 {
5422 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
5423 uint32_t level = 0;
5424
5425 while (tmp >>= 1)
5426 level++;
5427
5428 if (!data->pcie_dpm_key_disabled)
5429 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5430 PPSMC_MSG_PCIeDPM_ForceLevel,
5431 level);
5432 break;
5433 }
5434 default:
5435 break;
5436 }
5437
5438 return 0;
5439}
5440
5441static int iceland_print_clock_levels(struct pp_hwmgr *hwmgr,
5442 enum pp_clock_type type, char *buf)
5443{
5444 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
5445 struct iceland_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
5446 struct iceland_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
5447 struct iceland_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
5448 int i, now, size = 0;
5449 uint32_t clock, pcie_speed;
5450
5451 switch (type) {
5452 case PP_SCLK:
5453 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
5454 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5455
5456 for (i = 0; i < sclk_table->count; i++) {
5457 if (clock > sclk_table->dpm_levels[i].value)
5458 continue;
5459 break;
5460 }
5461 now = i;
5462
5463 for (i = 0; i < sclk_table->count; i++)
5464 size += sprintf(buf + size, "%d: %uMhz %s\n",
5465 i, sclk_table->dpm_levels[i].value / 100,
5466 (i == now) ? "*" : "");
5467 break;
5468 case PP_MCLK:
5469 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
5470 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5471
5472 for (i = 0; i < mclk_table->count; i++) {
5473 if (clock > mclk_table->dpm_levels[i].value)
5474 continue;
5475 break;
5476 }
5477 now = i;
5478
5479 for (i = 0; i < mclk_table->count; i++)
5480 size += sprintf(buf + size, "%d: %uMhz %s\n",
5481 i, mclk_table->dpm_levels[i].value / 100,
5482 (i == now) ? "*" : "");
5483 break;
5484 case PP_PCIE:
5485 pcie_speed = iceland_get_current_pcie_speed(hwmgr);
5486 for (i = 0; i < pcie_table->count; i++) {
5487 if (pcie_speed != pcie_table->dpm_levels[i].value)
5488 continue;
5489 break;
5490 }
5491 now = i;
5492
5493 for (i = 0; i < pcie_table->count; i++)
5494 size += sprintf(buf + size, "%d: %s %s\n", i,
5495 (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
5496 (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
5497 (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
5498 (i == now) ? "*" : "");
5499 break;
5500 default:
5501 break;
5502 }
5503 return size;
5504}
5505
5506static int iceland_get_sclk_od(struct pp_hwmgr *hwmgr)
5507{
5508 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
5509 struct iceland_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
5510 struct iceland_single_dpm_table *golden_sclk_table =
5511 &(data->golden_dpm_table.sclk_table);
5512 int value;
5513
5514 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
5515 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
5516 100 /
5517 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
5518
5519 return value;
5520}
5521
5522static int iceland_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5523{
5524 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
5525 struct iceland_single_dpm_table *golden_sclk_table =
5526 &(data->golden_dpm_table.sclk_table);
5527 struct pp_power_state *ps;
5528 struct iceland_power_state *iceland_ps;
5529
5530 if (value > 20)
5531 value = 20;
5532
5533 ps = hwmgr->request_ps;
5534
5535 if (ps == NULL)
5536 return -EINVAL;
5537
5538 iceland_ps = cast_phw_iceland_power_state(&ps->hardware);
5539
5540 iceland_ps->performance_levels[iceland_ps->performance_level_count - 1].engine_clock =
5541 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
5542 value / 100 +
5543 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
5544
5545 return 0;
5546}
5547
5548static int iceland_get_mclk_od(struct pp_hwmgr *hwmgr)
5549{
5550 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
5551 struct iceland_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
5552 struct iceland_single_dpm_table *golden_mclk_table =
5553 &(data->golden_dpm_table.mclk_table);
5554 int value;
5555
5556 value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
5557 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
5558 100 /
5559 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
5560
5561 return value;
5562}
5563
5564uint32_t iceland_get_xclk(struct pp_hwmgr *hwmgr)
5565{
5566 uint32_t reference_clock;
5567 uint32_t tc;
5568 uint32_t divide;
5569
5570 ATOM_FIRMWARE_INFO *fw_info;
5571 uint16_t size;
5572 uint8_t frev, crev;
5573 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5574
5575 tc = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
5576
5577 if (tc)
5578 return TCLK;
5579
5580 fw_info = (ATOM_FIRMWARE_INFO *)cgs_atom_get_data_table(hwmgr->device, index,
5581 &size, &frev, &crev);
5582
5583 if (!fw_info)
5584 return 0;
5585
5586 reference_clock = le16_to_cpu(fw_info->usReferenceClock);
5587
5588 divide = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
5589
5590 if (0 != divide)
5591 return reference_clock / 4;
5592
5593 return reference_clock;
5594}
5595
5596static int iceland_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5597{
5598 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
5599 struct iceland_single_dpm_table *golden_mclk_table =
5600 &(data->golden_dpm_table.mclk_table);
5601 struct pp_power_state *ps;
5602 struct iceland_power_state *iceland_ps;
5603
5604 if (value > 20)
5605 value = 20;
5606
5607 ps = hwmgr->request_ps;
5608
5609 if (ps == NULL)
5610 return -EINVAL;
5611
5612 iceland_ps = cast_phw_iceland_power_state(&ps->hardware);
5613
5614 iceland_ps->performance_levels[iceland_ps->performance_level_count - 1].memory_clock =
5615 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
5616 value / 100 +
5617 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
5618
5619 return 0;
5620}
5621
5622static const struct pp_hwmgr_func iceland_hwmgr_funcs = {
5623 .backend_init = &iceland_hwmgr_backend_init,
5624 .backend_fini = &iceland_hwmgr_backend_fini,
5625 .asic_setup = &iceland_setup_asic_task,
5626 .dynamic_state_management_enable = &iceland_enable_dpm_tasks,
5627 .apply_state_adjust_rules = iceland_apply_state_adjust_rules,
5628 .force_dpm_level = &iceland_force_dpm_level,
5629 .power_state_set = iceland_set_power_state_tasks,
5630 .get_power_state_size = iceland_get_power_state_size,
5631 .get_mclk = iceland_dpm_get_mclk,
5632 .get_sclk = iceland_dpm_get_sclk,
5633 .patch_boot_state = iceland_dpm_patch_boot_state,
5634 .get_pp_table_entry = iceland_get_pp_table_entry,
5635 .get_num_of_pp_table_entries = iceland_get_num_of_entries,
5636 .print_current_perforce_level = iceland_print_current_perforce_level,
5637 .powerdown_uvd = iceland_phm_powerdown_uvd,
5638 .powergate_uvd = iceland_phm_powergate_uvd,
5639 .powergate_vce = iceland_phm_powergate_vce,
5640 .disable_clock_power_gating = iceland_phm_disable_clock_power_gating,
5641 .update_clock_gatings = iceland_phm_update_clock_gatings,
5642 .notify_smc_display_config_after_ps_adjustment = iceland_notify_smc_display_config_after_ps_adjustment,
5643 .display_config_changed = iceland_display_configuration_changed_task,
5644 .set_max_fan_pwm_output = iceland_set_max_fan_pwm_output,
5645 .set_max_fan_rpm_output = iceland_set_max_fan_rpm_output,
5646 .get_temperature = iceland_thermal_get_temperature,
5647 .stop_thermal_controller = iceland_thermal_stop_thermal_controller,
5648 .get_fan_speed_info = iceland_fan_ctrl_get_fan_speed_info,
5649 .get_fan_speed_percent = iceland_fan_ctrl_get_fan_speed_percent,
5650 .set_fan_speed_percent = iceland_fan_ctrl_set_fan_speed_percent,
5651 .reset_fan_speed_to_default = iceland_fan_ctrl_reset_fan_speed_to_default,
5652 .get_fan_speed_rpm = iceland_fan_ctrl_get_fan_speed_rpm,
5653 .set_fan_speed_rpm = iceland_fan_ctrl_set_fan_speed_rpm,
5654 .uninitialize_thermal_controller = iceland_thermal_ctrl_uninitialize_thermal_controller,
5655 .register_internal_thermal_interrupt = iceland_register_internal_thermal_interrupt,
5656 .check_smc_update_required_for_display_configuration = iceland_check_smc_update_required_for_display_configuration,
5657 .check_states_equal = iceland_check_states_equal,
5658 .set_fan_control_mode = iceland_set_fan_control_mode,
5659 .get_fan_control_mode = iceland_get_fan_control_mode,
5660 .force_clock_level = iceland_force_clock_level,
5661 .print_clock_levels = iceland_print_clock_levels,
5662 .get_sclk_od = iceland_get_sclk_od,
5663 .set_sclk_od = iceland_set_sclk_od,
5664 .get_mclk_od = iceland_get_mclk_od,
5665 .set_mclk_od = iceland_set_mclk_od,
5666};
5667
5668int iceland_hwmgr_init(struct pp_hwmgr *hwmgr)
5669{
5670 iceland_hwmgr *data;
5671
5672 data = kzalloc (sizeof(iceland_hwmgr), GFP_KERNEL);
5673 if (data == NULL)
5674 return -ENOMEM;
5675 memset(data, 0x00, sizeof(iceland_hwmgr));
5676
5677 hwmgr->backend = data;
5678 hwmgr->hwmgr_func = &iceland_hwmgr_funcs;
5679 hwmgr->pptable_func = &pptable_funcs;
5680
5681 /* thermal */
5682 pp_iceland_thermal_initialize(hwmgr);
5683 return 0;
5684}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h
deleted file mode 100644
index f253988de2d2..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h
+++ /dev/null
@@ -1,424 +0,0 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui <ray.huang@amd.com>
23 *
24 */
25#ifndef ICELAND_HWMGR_H
26#define ICELAND_HWMGR_H
27
28#include "hwmgr.h"
29#include "ppatomctrl.h"
30#include "ppinterrupt.h"
31#include "ppsmc.h"
32#include "iceland_powertune.h"
33#include "pp_endian.h"
34#include "smu71_discrete.h"
35
36#define ICELAND_MAX_HARDWARE_POWERLEVELS 2
37#define ICELAND_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15
38
39struct iceland_performance_level {
40 uint32_t memory_clock;
41 uint32_t engine_clock;
42 uint16_t pcie_gen;
43 uint16_t pcie_lane;
44};
45
46struct _phw_iceland_bacos {
47 uint32_t best_match;
48 uint32_t baco_flags;
49 struct iceland_performance_level performance_level;
50};
51typedef struct _phw_iceland_bacos phw_iceland_bacos;
52
53struct _phw_iceland_uvd_clocks {
54 uint32_t VCLK;
55 uint32_t DCLK;
56};
57
58typedef struct _phw_iceland_uvd_clocks phw_iceland_uvd_clocks;
59
60struct _phw_iceland_vce_clocks {
61 uint32_t EVCLK;
62 uint32_t ECCLK;
63};
64
65typedef struct _phw_iceland_vce_clocks phw_iceland_vce_clocks;
66
67struct iceland_power_state {
68 uint32_t magic;
69 phw_iceland_uvd_clocks uvd_clocks;
70 phw_iceland_vce_clocks vce_clocks;
71 uint32_t sam_clk;
72 uint32_t acp_clk;
73 uint16_t performance_level_count;
74 bool dc_compatible;
75 uint32_t sclk_threshold;
76 struct iceland_performance_level performance_levels[ICELAND_MAX_HARDWARE_POWERLEVELS];
77};
78
79struct _phw_iceland_dpm_level {
80 bool enabled;
81 uint32_t value;
82 uint32_t param1;
83};
84typedef struct _phw_iceland_dpm_level phw_iceland_dpm_level;
85
86#define ICELAND_MAX_DEEPSLEEP_DIVIDER_ID 5
87#define MAX_REGULAR_DPM_NUMBER 8
88#define ICELAND_MINIMUM_ENGINE_CLOCK 5000
89
90struct iceland_single_dpm_table {
91 uint32_t count;
92 phw_iceland_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
93};
94
95struct iceland_dpm_table {
96 struct iceland_single_dpm_table sclk_table;
97 struct iceland_single_dpm_table mclk_table;
98 struct iceland_single_dpm_table pcie_speed_table;
99 struct iceland_single_dpm_table vddc_table;
100 struct iceland_single_dpm_table vdd_gfx_table;
101 struct iceland_single_dpm_table vdd_ci_table;
102 struct iceland_single_dpm_table mvdd_table;
103};
104typedef struct _phw_iceland_dpm_table phw_iceland_dpm_table;
105
106
107struct _phw_iceland_clock_regisiters {
108 uint32_t vCG_SPLL_FUNC_CNTL;
109 uint32_t vCG_SPLL_FUNC_CNTL_2;
110 uint32_t vCG_SPLL_FUNC_CNTL_3;
111 uint32_t vCG_SPLL_FUNC_CNTL_4;
112 uint32_t vCG_SPLL_SPREAD_SPECTRUM;
113 uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
114 uint32_t vDLL_CNTL;
115 uint32_t vMCLK_PWRMGT_CNTL;
116 uint32_t vMPLL_AD_FUNC_CNTL;
117 uint32_t vMPLL_DQ_FUNC_CNTL;
118 uint32_t vMPLL_FUNC_CNTL;
119 uint32_t vMPLL_FUNC_CNTL_1;
120 uint32_t vMPLL_FUNC_CNTL_2;
121 uint32_t vMPLL_SS1;
122 uint32_t vMPLL_SS2;
123};
124typedef struct _phw_iceland_clock_regisiters phw_iceland_clock_registers;
125
126struct _phw_iceland_voltage_smio_registers {
127 uint32_t vs0_vid_lower_smio_cntl;
128};
129typedef struct _phw_iceland_voltage_smio_registers phw_iceland_voltage_smio_registers;
130
131
132struct _phw_iceland_mc_reg_entry {
133 uint32_t mclk_max;
134 uint32_t mc_data[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
135};
136typedef struct _phw_iceland_mc_reg_entry phw_iceland_mc_reg_entry;
137
138struct _phw_iceland_mc_reg_table {
139 uint8_t last; /* number of registers*/
140 uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/
141 uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/
142 phw_iceland_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
143 SMU71_Discrete_MCRegisterAddress mc_reg_address[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
144};
145typedef struct _phw_iceland_mc_reg_table phw_iceland_mc_reg_table;
146
147#define DISABLE_MC_LOADMICROCODE 1
148#define DISABLE_MC_CFGPROGRAMMING 2
149
150
151/*Ultra Low Voltage parameter structure */
152struct phw_iceland_ulv_parm{
153 bool ulv_supported;
154 uint32_t ch_ulv_parameter;
155 uint32_t ulv_volt_change_delay;
156 struct iceland_performance_level ulv_power_level;
157};
158
159#define ICELAND_MAX_LEAKAGE_COUNT 8
160
161struct phw_iceland_leakage_voltage {
162 uint16_t count;
163 uint16_t leakage_id[ICELAND_MAX_LEAKAGE_COUNT];
164 uint16_t actual_voltage[ICELAND_MAX_LEAKAGE_COUNT];
165};
166
167struct _phw_iceland_display_timing {
168 uint32_t min_clock_insr;
169 uint32_t num_existing_displays;
170};
171typedef struct _phw_iceland_display_timing phw_iceland_display_timing;
172
173
174struct phw_iceland_thermal_temperature_setting
175{
176 long temperature_low;
177 long temperature_high;
178 long temperature_shutdown;
179};
180
181struct _phw_iceland_dpmlevel_enable_mask {
182 uint32_t uvd_dpm_enable_mask;
183 uint32_t vce_dpm_enable_mask;
184 uint32_t acp_dpm_enable_mask;
185 uint32_t samu_dpm_enable_mask;
186 uint32_t sclk_dpm_enable_mask;
187 uint32_t mclk_dpm_enable_mask;
188 uint32_t pcie_dpm_enable_mask;
189};
190typedef struct _phw_iceland_dpmlevel_enable_mask phw_iceland_dpmlevel_enable_mask;
191
192struct _phw_iceland_pcie_perf_range {
193 uint16_t max;
194 uint16_t min;
195};
196typedef struct _phw_iceland_pcie_perf_range phw_iceland_pcie_perf_range;
197
198struct _phw_iceland_vbios_boot_state {
199 uint16_t mvdd_bootup_value;
200 uint16_t vddc_bootup_value;
201 uint16_t vddci_bootup_value;
202 uint16_t vddgfx_bootup_value;
203 uint32_t sclk_bootup_value;
204 uint32_t mclk_bootup_value;
205 uint16_t pcie_gen_bootup_value;
206 uint16_t pcie_lane_bootup_value;
207};
208typedef struct _phw_iceland_vbios_boot_state phw_iceland_vbios_boot_state;
209
210#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
211#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
212#define DPMTABLE_UPDATE_SCLK 0x00000004
213#define DPMTABLE_UPDATE_MCLK 0x00000008
214
215/* We need to review which fields are needed. */
216/* This is mostly a copy of the RV7xx/Evergreen structure which is close, but not identical to the N.Islands one. */
217struct iceland_hwmgr {
218 struct iceland_dpm_table dpm_table;
219 struct iceland_dpm_table golden_dpm_table;
220
221 uint32_t voting_rights_clients0;
222 uint32_t voting_rights_clients1;
223 uint32_t voting_rights_clients2;
224 uint32_t voting_rights_clients3;
225 uint32_t voting_rights_clients4;
226 uint32_t voting_rights_clients5;
227 uint32_t voting_rights_clients6;
228 uint32_t voting_rights_clients7;
229 uint32_t static_screen_threshold_unit;
230 uint32_t static_screen_threshold;
231 uint32_t voltage_control;
232 uint32_t vdd_gfx_control;
233
234 uint32_t vddc_vddci_delta;
235 uint32_t vddc_vddgfx_delta;
236
237 struct pp_interrupt_registration_info internal_high_thermal_interrupt_info;
238 struct pp_interrupt_registration_info internal_low_thermal_interrupt_info;
239 struct pp_interrupt_registration_info smc_to_host_interrupt_info;
240 uint32_t active_auto_throttle_sources;
241
242 struct pp_interrupt_registration_info external_throttle_interrupt;
243 irq_handler_func_t external_throttle_callback;
244 void *external_throttle_context;
245
246 struct pp_interrupt_registration_info ctf_interrupt_info;
247 irq_handler_func_t ctf_callback;
248 void *ctf_context;
249
250 phw_iceland_clock_registers clock_registers;
251 phw_iceland_voltage_smio_registers voltage_smio_registers;
252
253 bool is_memory_GDDR5;
254 uint16_t acpi_vddc;
255 bool pspp_notify_required; /* Flag to indicate if PSPP notification to SBIOS is required */
256 uint16_t force_pcie_gen; /* The forced PCI-E speed if not 0xffff */
257 uint16_t acpi_pcie_gen; /* The PCI-E speed at ACPI time */
258 uint32_t pcie_gen_cap; /* The PCI-E speed capabilities bitmap from CAIL */
259 uint32_t pcie_lane_cap; /* The PCI-E lane capabilities bitmap from CAIL */
260 uint32_t pcie_spc_cap; /* Symbol Per Clock Capabilities from registry */
261 struct phw_iceland_leakage_voltage vddc_leakage; /* The Leakage VDDC supported (based on leakage ID).*/
262 struct phw_iceland_leakage_voltage vddcgfx_leakage; /* The Leakage VDDC supported (based on leakage ID). */
263 struct phw_iceland_leakage_voltage vddci_leakage; /* The Leakage VDDCI supported (based on leakage ID). */
264
265 uint32_t mvdd_control;
266 uint32_t vddc_mask_low;
267 uint32_t mvdd_mask_low;
268 uint16_t max_vddc_in_pp_table; /* the maximum VDDC value in the powerplay table*/
269 uint16_t min_vddc_in_pp_table;
270 uint16_t max_vddci_in_pp_table; /* the maximum VDDCI value in the powerplay table */
271 uint16_t min_vddci_in_pp_table;
272 uint32_t mclk_strobe_mode_threshold;
273 uint32_t mclk_stutter_mode_threshold;
274 uint32_t mclk_edc_enable_threshold;
275 uint32_t mclk_edc_wr_enable_threshold;
276 bool is_uvd_enabled;
277 bool is_xdma_enabled;
278 phw_iceland_vbios_boot_state vbios_boot_state;
279
280 bool battery_state;
281 bool is_tlu_enabled;
282 bool pcie_performance_request;
283
284 /* -------------- SMC SRAM Address of firmware header tables ----------------*/
285 uint32_t sram_end; /* The first address after the SMC SRAM. */
286 uint32_t dpm_table_start; /* The start of the dpm table in the SMC SRAM. */
287 uint32_t soft_regs_start; /* The start of the soft registers in the SMC SRAM. */
288 uint32_t mc_reg_table_start; /* The start of the mc register table in the SMC SRAM. */
289 uint32_t fan_table_start; /* The start of the fan table in the SMC SRAM. */
290 uint32_t arb_table_start; /* The start of the ARB setting table in the SMC SRAM. */
291 uint32_t ulv_settings_start;
292 SMU71_Discrete_DpmTable smc_state_table; /* The carbon copy of the SMC state table. */
293 SMU71_Discrete_MCRegisters mc_reg_table;
294 SMU71_Discrete_Ulv ulv_setting; /* The carbon copy of ULV setting. */
295
296 /* -------------- Stuff originally coming from Evergreen --------------------*/
297 phw_iceland_mc_reg_table iceland_mc_reg_table;
298 uint32_t vdd_ci_control;
299 pp_atomctrl_voltage_table vddc_voltage_table;
300 pp_atomctrl_voltage_table vddci_voltage_table;
301 pp_atomctrl_voltage_table vddgfx_voltage_table;
302 pp_atomctrl_voltage_table mvdd_voltage_table;
303
304 uint32_t mgcg_cgtt_local2;
305 uint32_t mgcg_cgtt_local3;
306 uint32_t gpio_debug;
307 uint32_t mc_micro_code_feature;
308 uint32_t highest_mclk;
309 uint16_t acpi_vdd_ci;
310 uint8_t mvdd_high_index;
311 uint8_t mvdd_low_index;
312 bool dll_defaule_on;
313 bool performance_request_registered;
314
315 /* ----------------- Low Power Features ---------------------*/
316 phw_iceland_bacos bacos;
317 struct phw_iceland_ulv_parm ulv;
318
319 /* ----------------- CAC Stuff ---------------------*/
320 uint32_t cac_table_start;
321 bool cac_configuration_required; /* TRUE if PP_CACConfigurationRequired == 1 */
322 bool driver_calculate_cac_leakage; /* TRUE if PP_DriverCalculateCACLeakage == 1 */
323 bool cac_enabled;
324
325 /* ----------------- DPM2 Parameters ---------------------*/
326 uint32_t power_containment_features;
327 bool enable_bapm_feature;
328 bool enable_dte_feature;
329 bool enable_tdc_limit_feature;
330 bool enable_pkg_pwr_tracking_feature;
331 bool disable_uvd_power_tune_feature;
332 struct iceland_pt_defaults *power_tune_defaults;
333 SMU71_Discrete_PmFuses power_tune_table;
334 uint32_t ul_dte_tj_offset; /* Fudge factor in DPM table to correct HW DTE errors */
335 uint32_t fast_watermark_threshold; /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */
336
337 /* ----------------- Phase Shedding ---------------------*/
338 bool vddc_phase_shed_control;
339
340 /* --------------------- DI/DT --------------------------*/
341 phw_iceland_display_timing display_timing;
342
343 /* --------- ReadRegistry data for memory and engine clock margins ---- */
344 uint32_t engine_clock_data;
345 uint32_t memory_clock_data;
346
347 /* -------- Thermal Temperature Setting --------------*/
348 struct phw_iceland_thermal_temperature_setting thermal_temp_setting;
349 phw_iceland_dpmlevel_enable_mask dpm_level_enable_mask;
350
351 uint32_t need_update_smu7_dpm_table;
352 uint32_t sclk_dpm_key_disabled;
353 uint32_t mclk_dpm_key_disabled;
354 uint32_t pcie_dpm_key_disabled;
355 /* used to store the previous dal min sclock */
356 uint32_t min_engine_clocks;
357 phw_iceland_pcie_perf_range pcie_gen_performance;
358 phw_iceland_pcie_perf_range pcie_lane_performance;
359 phw_iceland_pcie_perf_range pcie_gen_power_saving;
360 phw_iceland_pcie_perf_range pcie_lane_power_saving;
361 bool use_pcie_performance_levels;
362 bool use_pcie_power_saving_levels;
363 /* percentage value from 0-100, default 50 */
364 uint32_t activity_target[SMU71_MAX_LEVELS_GRAPHICS];
365 uint32_t mclk_activity_target;
366 uint32_t low_sclk_interrupt_threshold;
367 uint32_t last_mclk_dpm_enable_mask;
368 bool uvd_enabled;
369 uint32_t pcc_monitor_enabled;
370
371 /* --------- Power Gating States ------------*/
372 bool uvd_power_gated; /* 1: gated, 0:not gated */
373 bool vce_power_gated; /* 1: gated, 0:not gated */
374 bool samu_power_gated; /* 1: gated, 0:not gated */
375 bool acp_power_gated; /* 1: gated, 0:not gated */
376 bool pg_acp_init;
377
378 /* soft pptable for re-uploading into smu */
379 void *soft_pp_table;
380};
381
382typedef struct iceland_hwmgr iceland_hwmgr;
383
384int iceland_hwmgr_init(struct pp_hwmgr *hwmgr);
385int iceland_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
386uint32_t iceland_get_xclk(struct pp_hwmgr *hwmgr);
387int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr);
388int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr);
389
390#define ICELAND_DPM2_NEAR_TDP_DEC 10
391#define ICELAND_DPM2_ABOVE_SAFE_INC 5
392#define ICELAND_DPM2_BELOW_SAFE_INC 20
393
394/*
395 * Log2 of the LTA window size (l2numWin_TDP). Eg. If LTA windows size
396 * is 128, then this value should be Log2(128) = 7.
397 */
398#define ICELAND_DPM2_LTA_WINDOW_SIZE 7
399
400#define ICELAND_DPM2_LTS_TRUNCATE 0
401
402#define ICELAND_DPM2_TDP_SAFE_LIMIT_PERCENT 80 // Maximum 100
403
404#define ICELAND_DPM2_MAXPS_PERCENT_H 90 // Maximum 0xFF
405#define ICELAND_DPM2_MAXPS_PERCENT_M 90 // Maximum 0xFF
406
407#define ICELAND_DPM2_PWREFFICIENCYRATIO_MARGIN 50
408
409#define ICELAND_DPM2_SQ_RAMP_MAX_POWER 0x3FFF
410#define ICELAND_DPM2_SQ_RAMP_MIN_POWER 0x12
411#define ICELAND_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15
412#define ICELAND_DPM2_SQ_RAMP_SHORT_TERM_INTERVAL_SIZE 0x1E
413#define ICELAND_DPM2_SQ_RAMP_LONG_TERM_INTERVAL_RATIO 0xF
414
415#define ICELAND_VOLTAGE_CONTROL_NONE 0x0
416#define ICELAND_VOLTAGE_CONTROL_BY_GPIO 0x1
417#define ICELAND_VOLTAGE_CONTROL_BY_SVID2 0x2
418
419/* convert to Q8.8 format for firmware */
420#define ICELAND_Q88_FORMAT_CONVERSION_UNIT 256
421
422#define ICELAND_UNUSED_GPIO_PIN 0x7F
423
424#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c
deleted file mode 100644
index 041e9648e592..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c
+++ /dev/null
@@ -1,490 +0,0 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui <ray.huang@amd.com>
23 *
24 */
25
26#include "amdgpu.h"
27#include "hwmgr.h"
28#include "smumgr.h"
29#include "iceland_hwmgr.h"
30#include "iceland_powertune.h"
31#include "iceland_smumgr.h"
32#include "smu71_discrete.h"
33#include "smu71.h"
34#include "pp_debug.h"
35#include "cgs_common.h"
36#include "pp_endian.h"
37
38#include "bif/bif_5_0_d.h"
39#include "bif/bif_5_0_sh_mask.h"
40
41#define VOLTAGE_SCALE 4
42#define POWERTUNE_DEFAULT_SET_MAX 1
43
44#define DEVICE_ID_VI_ICELAND_M_6900 0x6900
45#define DEVICE_ID_VI_ICELAND_M_6901 0x6901
46#define DEVICE_ID_VI_ICELAND_M_6902 0x6902
47#define DEVICE_ID_VI_ICELAND_M_6903 0x6903
48
49
50struct iceland_pt_defaults defaults_iceland =
51{
52 /*
53 * sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,
54 * TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
55 */
56 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
57 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
58 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
59};
60
61/* 35W - XT, XTL */
62struct iceland_pt_defaults defaults_icelandxt =
63{
64 /*
65 * sviLoadLIneEn, SviLoadLineVddC,
66 * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
67 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
68 * BAPM_TEMP_GRADIENT
69 */
70 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
71 { 0xA7, 0x0, 0x0, 0xB5, 0x0, 0x0, 0x9F, 0x0, 0x0, 0xD6, 0x0, 0x0, 0xD7, 0x0, 0x0},
72 { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
73};
74
75/* 25W - PRO, LE */
76struct iceland_pt_defaults defaults_icelandpro =
77{
78 /*
79 * sviLoadLIneEn, SviLoadLineVddC,
80 * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
81 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
82 * BAPM_TEMP_GRADIENT
83 */
84 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
85 { 0xB7, 0x0, 0x0, 0xC3, 0x0, 0x0, 0xB5, 0x0, 0x0, 0xEA, 0x0, 0x0, 0xE6, 0x0, 0x0},
86 { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
87};
88
89void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
90{
91 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
92 uint32_t tmp = 0;
93 struct cgs_system_info sys_info = {0};
94 uint32_t pdev_id;
95
96 sys_info.size = sizeof(struct cgs_system_info);
97 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
98 cgs_query_system_info(hwmgr->device, &sys_info);
99 pdev_id = (uint32_t)sys_info.value;
100
101 switch (pdev_id) {
102 case DEVICE_ID_VI_ICELAND_M_6900:
103 case DEVICE_ID_VI_ICELAND_M_6903:
104 data->power_tune_defaults = &defaults_icelandxt;
105 break;
106
107 case DEVICE_ID_VI_ICELAND_M_6901:
108 case DEVICE_ID_VI_ICELAND_M_6902:
109 data->power_tune_defaults = &defaults_icelandpro;
110 break;
111 default:
112 /* TODO: need to assign valid defaults */
113 data->power_tune_defaults = &defaults_iceland;
114 pr_warning("Unknown V.I. Device ID.\n");
115 break;
116 }
117
118 /* Assume disabled */
119 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
120 PHM_PlatformCaps_PowerContainment);
121 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
122 PHM_PlatformCaps_CAC);
123 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
124 PHM_PlatformCaps_SQRamping);
125 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
126 PHM_PlatformCaps_DBRamping);
127 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
128 PHM_PlatformCaps_TDRamping);
129 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
130 PHM_PlatformCaps_TCPRamping);
131
132 data->ul_dte_tj_offset = tmp;
133
134 if (!tmp) {
135 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
136 PHM_PlatformCaps_CAC);
137
138 data->fast_watermark_threshold = 100;
139
140 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
141 PHM_PlatformCaps_PowerContainment)) {
142 tmp = 1;
143 data->enable_dte_feature = tmp ? false : true;
144 data->enable_tdc_limit_feature = tmp ? true : false;
145 data->enable_pkg_pwr_tracking_feature = tmp ? true : false;
146 }
147 }
148}
149
150int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
151{
152 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
153 struct iceland_pt_defaults *defaults = data->power_tune_defaults;
154 SMU71_Discrete_DpmTable *dpm_table = &(data->smc_state_table);
155 struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
156 struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
157 uint16_t *def1, *def2;
158 int i, j, k;
159
160 /*
161 * TDP number of fraction bits are changed from 8 to 7 for Iceland
162 * as requested by SMC team
163 */
164 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
165 dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
166
167 dpm_table->DTETjOffset = (uint8_t)data->ul_dte_tj_offset;
168
169 dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
170 dpm_table->GpuTjHyst = 8;
171
172 dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
173
174 /* The following are for new Iceland Multi-input fan/thermal control */
175 if(NULL != ppm) {
176 dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
177 dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
178 } else {
179 dpm_table->PPM_PkgPwrLimit = 0;
180 dpm_table->PPM_TemperatureLimit = 0;
181 }
182
183 CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
184 CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
185
186 dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
187 def1 = defaults->bapmti_r;
188 def2 = defaults->bapmti_rc;
189
190 for (i = 0; i < SMU71_DTE_ITERATIONS; i++) {
191 for (j = 0; j < SMU71_DTE_SOURCES; j++) {
192 for (k = 0; k < SMU71_DTE_SINKS; k++) {
193 dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
194 dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
195 def1++;
196 def2++;
197 }
198 }
199 }
200
201 return 0;
202}
203
204static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr)
205{
206 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
207 const struct iceland_pt_defaults *defaults = data->power_tune_defaults;
208
209 data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
210 data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
211 data->power_tune_table.SviLoadLineTrimVddC = 3;
212 data->power_tune_table.SviLoadLineOffsetVddC = 0;
213
214 return 0;
215}
216
217static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr)
218{
219 uint16_t tdc_limit;
220 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
221 const struct iceland_pt_defaults *defaults = data->power_tune_defaults;
222
223 /* TDC number of fraction bits are changed from 8 to 7
224 * for Iceland as requested by SMC team
225 */
226 tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
227 data->power_tune_table.TDC_VDDC_PkgLimit =
228 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
229 data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
230 defaults->tdc_vddc_throttle_release_limit_perc;
231 data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
232
233 return 0;
234}
235
236static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
237{
238 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
239 const struct iceland_pt_defaults *defaults = data->power_tune_defaults;
240 uint32_t temp;
241
242 if (iceland_read_smc_sram_dword(hwmgr->smumgr,
243 fuse_table_offset +
244 offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl),
245 (uint32_t *)&temp, data->sram_end))
246 PP_ASSERT_WITH_CODE(false,
247 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
248 return -EINVAL);
249 else
250 data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
251
252 return 0;
253}
254
255static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
256{
257 return 0;
258}
259
260static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
261{
262 int i;
263 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
264
265 /* Currently not used. Set all to zero. */
266 for (i = 0; i < 8; i++)
267 data->power_tune_table.GnbLPML[i] = 0;
268
269 return 0;
270}
271
272static int iceland_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
273{
274 return 0;
275}
276
277static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
278{
279 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
280 uint16_t HiSidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
281 uint16_t LoSidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
282 struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
283
284 HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
285 LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
286
287 data->power_tune_table.BapmVddCBaseLeakageHiSidd =
288 CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
289 data->power_tune_table.BapmVddCBaseLeakageLoSidd =
290 CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
291
292 return 0;
293}
294
295int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr)
296{
297 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
298 uint32_t pm_fuse_table_offset;
299
300 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
301 PHM_PlatformCaps_PowerContainment)) {
302 if (iceland_read_smc_sram_dword(hwmgr->smumgr,
303 SMU71_FIRMWARE_HEADER_LOCATION +
304 offsetof(SMU71_Firmware_Header, PmFuseTable),
305 &pm_fuse_table_offset, data->sram_end))
306 PP_ASSERT_WITH_CODE(false,
307 "Attempt to get pm_fuse_table_offset Failed!",
308 return -EINVAL);
309
310 /* DW0 - DW3 */
311 if (iceland_populate_bapm_vddc_vid_sidd(hwmgr))
312 PP_ASSERT_WITH_CODE(false,
313 "Attempt to populate bapm vddc vid Failed!",
314 return -EINVAL);
315
316 /* DW4 - DW5 */
317 if (iceland_populate_vddc_vid(hwmgr))
318 PP_ASSERT_WITH_CODE(false,
319 "Attempt to populate vddc vid Failed!",
320 return -EINVAL);
321
322 /* DW6 */
323 if (iceland_populate_svi_load_line(hwmgr))
324 PP_ASSERT_WITH_CODE(false,
325 "Attempt to populate SviLoadLine Failed!",
326 return -EINVAL);
327 /* DW7 */
328 if (iceland_populate_tdc_limit(hwmgr))
329 PP_ASSERT_WITH_CODE(false,
330 "Attempt to populate TDCLimit Failed!", return -EINVAL);
331 /* DW8 */
332 if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset))
333 PP_ASSERT_WITH_CODE(false,
334 "Attempt to populate TdcWaterfallCtl, "
335 "LPMLTemperature Min and Max Failed!",
336 return -EINVAL);
337
338 /* DW9-DW12 */
339 if (0 != iceland_populate_temperature_scaler(hwmgr))
340 PP_ASSERT_WITH_CODE(false,
341 "Attempt to populate LPMLTemperatureScaler Failed!",
342 return -EINVAL);
343
344 /* DW13-DW16 */
345 if (iceland_populate_gnb_lpml(hwmgr))
346 PP_ASSERT_WITH_CODE(false,
347 "Attempt to populate GnbLPML Failed!",
348 return -EINVAL);
349
350 /* DW17 */
351 if (iceland_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
352 PP_ASSERT_WITH_CODE(false,
353 "Attempt to populate GnbLPML Min and Max Vid Failed!",
354 return -EINVAL);
355
356 /* DW18 */
357 if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr))
358 PP_ASSERT_WITH_CODE(false,
359 "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!",
360 return -EINVAL);
361
362 if (iceland_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
363 (uint8_t *)&data->power_tune_table,
364 sizeof(struct SMU71_Discrete_PmFuses), data->sram_end))
365 PP_ASSERT_WITH_CODE(false,
366 "Attempt to download PmFuseTable Failed!",
367 return -EINVAL);
368 }
369 return 0;
370}
371
372int iceland_enable_smc_cac(struct pp_hwmgr *hwmgr)
373{
374 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
375 int result = 0;
376
377 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
378 PHM_PlatformCaps_CAC)) {
379 int smc_result;
380 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
381 (uint16_t)(PPSMC_MSG_EnableCac));
382 PP_ASSERT_WITH_CODE((0 == smc_result),
383 "Failed to enable CAC in SMC.", result = -1);
384
385 data->cac_enabled = (0 == smc_result) ? true : false;
386 }
387 return result;
388}
389
390static int iceland_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
391{
392 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
393
394 if(data->power_containment_features &
395 POWERCONTAINMENT_FEATURE_PkgPwrLimit)
396 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
397 PPSMC_MSG_PkgPwrSetLimit, n);
398 return 0;
399}
400
401static int iceland_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
402{
403 return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
404 PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
405}
406
407int iceland_enable_power_containment(struct pp_hwmgr *hwmgr)
408{
409 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
410 SMU71_Discrete_DpmTable *dpm_table = &data->smc_state_table;
411 int smc_result;
412 int result = 0;
413 uint32_t is_asic_kicker;
414
415 data->power_containment_features = 0;
416 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
417 PHM_PlatformCaps_PowerContainment)) {
418 is_asic_kicker = cgs_read_register(hwmgr->device, mmCC_BIF_BX_STRAP2);
419 is_asic_kicker = (is_asic_kicker >> 12) & 0x01;
420
421 if (data->enable_bapm_feature &&
422 (!is_asic_kicker ||
423 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
424 PHM_PlatformCaps_DisableUsingActualTemperatureForPowerCalc))) {
425 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
426 (uint16_t)(PPSMC_MSG_EnableDTE));
427 PP_ASSERT_WITH_CODE((0 == smc_result),
428 "Failed to enable BAPM in SMC.", result = -1;);
429 if (0 == smc_result)
430 data->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
431 }
432
433 if (is_asic_kicker && !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
434 PHM_PlatformCaps_DisableUsingActualTemperatureForPowerCalc))
435 dpm_table->DTEMode = 2;
436
437 if (data->enable_tdc_limit_feature) {
438 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
439 (uint16_t)(PPSMC_MSG_TDCLimitEnable));
440 PP_ASSERT_WITH_CODE((0 == smc_result),
441 "Failed to enable TDCLimit in SMC.", result = -1;);
442 if (0 == smc_result)
443 data->power_containment_features |=
444 POWERCONTAINMENT_FEATURE_TDCLimit;
445 }
446
447 if (data->enable_pkg_pwr_tracking_feature) {
448 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
449 (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
450 PP_ASSERT_WITH_CODE((0 == smc_result),
451 "Failed to enable PkgPwrTracking in SMC.", result = -1;);
452 if (0 == smc_result) {
453 struct phm_cac_tdp_table *cac_table =
454 hwmgr->dyn_state.cac_dtp_table;
455 uint32_t default_limit =
456 (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
457
458 data->power_containment_features |=
459 POWERCONTAINMENT_FEATURE_PkgPwrLimit;
460
461 if (iceland_set_power_limit(hwmgr, default_limit))
462 printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
463 }
464 }
465 }
466 return result;
467}
468
469int iceland_power_control_set_level(struct pp_hwmgr *hwmgr)
470{
471 struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
472 int adjust_percent, target_tdp;
473 int result = 0;
474
475 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
476 PHM_PlatformCaps_PowerContainment)) {
477 /* adjustment percentage has already been validated */
478 adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
479 hwmgr->platform_descriptor.TDPAdjustment :
480 (-1 * hwmgr->platform_descriptor.TDPAdjustment);
481 /*
482 * SMC requested that target_tdp to be 7 bit fraction in DPM table
483 * but message to be 8 bit fraction for messages
484 */
485 target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
486 result = iceland_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
487 }
488
489 return result;
490}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h
deleted file mode 100644
index 6c25ee139ca3..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h
+++ /dev/null
@@ -1,74 +0,0 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui <ray.huang@amd.com>
23 *
24 */
25#ifndef ICELAND_POWERTUNE_H
26#define ICELAND_POWERTUNE_H
27
28#include "smu71.h"
29
30enum iceland_pt_config_reg_type {
31 ICELAND_CONFIGREG_MMR = 0,
32 ICELAND_CONFIGREG_SMC_IND,
33 ICELAND_CONFIGREG_DIDT_IND,
34 ICELAND_CONFIGREG_CACHE,
35 ICELAND_CONFIGREG_MAX
36};
37
38/* PowerContainment Features */
39#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
40#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
41#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
42#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001
43
44struct iceland_pt_config_reg {
45 uint32_t offset;
46 uint32_t mask;
47 uint32_t shift;
48 uint32_t value;
49 enum iceland_pt_config_reg_type type;
50};
51
52struct iceland_pt_defaults
53{
54 uint8_t svi_load_line_en;
55 uint8_t svi_load_line_vddc;
56 uint8_t tdc_vddc_throttle_release_limit_perc;
57 uint8_t tdc_mawt;
58 uint8_t tdc_waterfall_ctl;
59 uint8_t dte_ambient_temp_base;
60 uint32_t display_cac;
61 uint32_t bamp_temp_gradient;
62 uint16_t bapmti_r[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
63 uint16_t bapmti_rc[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
64};
65
66void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
67int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
68int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr);
69int iceland_enable_smc_cac(struct pp_hwmgr *hwmgr);
70int iceland_enable_power_containment(struct pp_hwmgr *hwmgr);
71int iceland_power_control_set_level(struct pp_hwmgr *hwmgr);
72
73#endif /* ICELAND_POWERTUNE_H */
74
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c
deleted file mode 100644
index 527f37022424..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c
+++ /dev/null
@@ -1,595 +0,0 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui <ray.huang@amd.com>
23 *
24 */
25#include <asm/div64.h>
26#include "iceland_thermal.h"
27#include "iceland_hwmgr.h"
28#include "iceland_smumgr.h"
29#include "atombios.h"
30#include "ppsmc.h"
31
32#include "gmc/gmc_8_1_d.h"
33#include "gmc/gmc_8_1_sh_mask.h"
34
35#include "bif/bif_5_0_d.h"
36#include "bif/bif_5_0_sh_mask.h"
37
38#include "smu/smu_7_1_1_d.h"
39#include "smu/smu_7_1_1_sh_mask.h"
40
41
42/**
43* Get Fan Speed Control Parameters.
44* @param hwmgr the address of the powerplay hardware manager.
45* @param pSpeed is the address of the structure where the result is to be placed.
46* @exception Always succeeds except if we cannot zero out the output structure.
47*/
48int iceland_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
49 struct phm_fan_speed_info *fan_speed_info)
50{
51
52 if (hwmgr->thermal_controller.fanInfo.bNoFan)
53 return 0;
54
55 fan_speed_info->supports_percent_read = true;
56 fan_speed_info->supports_percent_write = true;
57 fan_speed_info->min_percent = 0;
58 fan_speed_info->max_percent = 100;
59
60 if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
61 fan_speed_info->supports_rpm_read = true;
62 fan_speed_info->supports_rpm_write = true;
63 fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
64 fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
65 } else {
66 fan_speed_info->min_rpm = 0;
67 fan_speed_info->max_rpm = 0;
68 }
69
70 return 0;
71}
72
73/**
74* Get Fan Speed in percent.
75* @param hwmgr the address of the powerplay hardware manager.
76* @param pSpeed is the address of the structure where the result is to be placed.
77* @exception Fails is the 100% setting appears to be 0.
78*/
79int iceland_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed)
80{
81 uint32_t duty100;
82 uint32_t duty;
83 uint64_t tmp64;
84
85 if (hwmgr->thermal_controller.fanInfo.bNoFan)
86 return 0;
87
88 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
89 duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_STATUS, FDO_PWM_DUTY);
90
91 if (0 == duty100)
92 return -EINVAL;
93
94
95 tmp64 = (uint64_t)duty * 100;
96 do_div(tmp64, duty100);
97 *speed = (uint32_t)tmp64;
98
99 if (*speed > 100)
100 *speed = 100;
101
102 return 0;
103}
104
105/**
106* Get Fan Speed in RPM.
107* @param hwmgr the address of the powerplay hardware manager.
108* @param speed is the address of the structure where the result is to be placed.
109* @exception Returns not supported if no fan is found or if pulses per revolution are not set
110*/
111int iceland_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
112{
113 return 0;
114}
115
116/**
117* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
118* @param hwmgr the address of the powerplay hardware manager.
119* mode the fan control mode, 0 default, 1 by percent, 5, by RPM
120* @exception Should always succeed.
121*/
122int iceland_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
123{
124
125 if (hwmgr->fan_ctrl_is_in_default_mode) {
126 hwmgr->fan_ctrl_default_mode = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE);
127 hwmgr->tmin = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN);
128 hwmgr->fan_ctrl_is_in_default_mode = false;
129 }
130
131 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, 0);
132 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, mode);
133
134 return 0;
135}
136
137/**
138* Reset Fan Speed Control to default mode.
139* @param hwmgr the address of the powerplay hardware manager.
140* @exception Should always succeed.
141*/
142static int iceland_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
143{
144 if (!hwmgr->fan_ctrl_is_in_default_mode) {
145 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
146 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, hwmgr->tmin);
147 hwmgr->fan_ctrl_is_in_default_mode = true;
148 }
149
150 return 0;
151}
152
153int iceland_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
154{
155 return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ? 0 : -EINVAL;
156}
157
158
159int iceland_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
160{
161 return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl) == 0) ? 0 : -EINVAL;
162}
163
164/**
165* Set Fan Speed in percent.
166* @param hwmgr the address of the powerplay hardware manager.
167* @param speed is the percentage value (0% - 100%) to be set.
168* @exception Fails is the 100% setting appears to be 0.
169*/
170int iceland_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed)
171{
172 uint32_t duty100;
173 uint32_t duty;
174 uint64_t tmp64;
175
176 if (hwmgr->thermal_controller.fanInfo.bNoFan)
177 return -EINVAL;
178
179 if (speed > 100) {
180 pr_warning("Cannot set more than 100%% duty cycle. Set it to 100.\n");
181 speed = 100;
182 }
183
184 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
185 iceland_fan_ctrl_stop_smc_fan_control(hwmgr);
186
187 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
188
189 if (0 == duty100)
190 return -EINVAL;
191
192 tmp64 = (uint64_t)speed * duty100;
193 do_div(tmp64, 100);
194 duty = (uint32_t)tmp64;
195
196 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
197
198 return iceland_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
199}
200
201/**
202* Reset Fan Speed to default.
203* @param hwmgr the address of the powerplay hardware manager.
204* @exception Always succeeds.
205*/
206int iceland_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
207{
208 int result;
209
210 if (hwmgr->thermal_controller.fanInfo.bNoFan)
211 return 0;
212
213 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) {
214 result = iceland_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
215 if (0 == result)
216 result = iceland_fan_ctrl_start_smc_fan_control(hwmgr);
217 } else
218 result = iceland_fan_ctrl_set_default_mode(hwmgr);
219
220 return result;
221}
222
223/**
224* Set Fan Speed in RPM.
225* @param hwmgr the address of the powerplay hardware manager.
226* @param speed is the percentage value (min - max) to be set.
227* @exception Fails is the speed not lie between min and max.
228*/
229int iceland_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
230{
231 return 0;
232}
233
234/**
235* Reads the remote temperature from the SIslands thermal controller.
236*
237* @param hwmgr The address of the hardware manager.
238*/
239int iceland_thermal_get_temperature(struct pp_hwmgr *hwmgr)
240{
241 int temp;
242
243 temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_STATUS, CTF_TEMP);
244
245 /*
246 * Bit 9 means the reading is lower than the lowest usable
247 * value.
248 */
249 if (0 != (0x200 & temp))
250 temp = ICELAND_THERMAL_MAXIMUM_TEMP_READING;
251 else
252 temp = (temp & 0x1ff);
253
254 temp = temp * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
255
256 return temp;
257}
258
259/**
260* Set the requested temperature range for high and low alert signals
261*
262* @param hwmgr The address of the hardware manager.
263* @param range Temperature range to be programmed for high and low alert signals
264* @exception PP_Result_BadInput if the input data is not valid.
265*/
266static int iceland_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, uint32_t low_temp, uint32_t high_temp)
267{
268 uint32_t low = ICELAND_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
269 uint32_t high = ICELAND_THERMAL_MAXIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
270
271 if (low < low_temp)
272 low = low_temp;
273 if (high > high_temp)
274 high = high_temp;
275
276 if (low > high)
277 return -EINVAL;
278
279 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
280 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
281 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, DIG_THERM_DPM, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
282
283 return 0;
284}
285
286/**
287* Programs thermal controller one-time setting registers
288*
289* @param hwmgr The address of the hardware manager.
290*/
291static int iceland_thermal_initialize(struct pp_hwmgr *hwmgr)
292{
293 if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
294 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
295 CG_TACH_CTRL, EDGE_PER_REV,
296 hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1);
297
298 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
299
300 return 0;
301}
302
303/**
304* Enable thermal alerts on the RV770 thermal controller.
305*
306* @param hwmgr The address of the hardware manager.
307*/
308static int iceland_thermal_enable_alert(struct pp_hwmgr *hwmgr)
309{
310 uint32_t alert;
311
312 alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK);
313 alert &= ~(ICELAND_THERMAL_HIGH_ALERT_MASK | ICELAND_THERMAL_LOW_ALERT_MASK);
314 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert);
315
316 /* send message to SMU to enable internal thermal interrupts */
317 return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable) == 0) ? 0 : -1;
318}
319
320/**
321* Disable thermal alerts on the RV770 thermal controller.
322* @param hwmgr The address of the hardware manager.
323*/
324static int iceland_thermal_disable_alert(struct pp_hwmgr *hwmgr)
325{
326 uint32_t alert;
327
328 alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK);
329 alert |= (ICELAND_THERMAL_HIGH_ALERT_MASK | ICELAND_THERMAL_LOW_ALERT_MASK);
330 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert);
331
332 /* send message to SMU to disable internal thermal interrupts */
333 return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable) == 0) ? 0 : -1;
334}
335
336/**
337* Uninitialize the thermal controller.
338* Currently just disables alerts.
339* @param hwmgr The address of the hardware manager.
340*/
341int iceland_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
342{
343 int result = iceland_thermal_disable_alert(hwmgr);
344
345 if (result)
346 pr_warning("Failed to disable thermal alerts!\n");
347
348 if (hwmgr->thermal_controller.fanInfo.bNoFan)
349 iceland_fan_ctrl_set_default_mode(hwmgr);
350
351 return result;
352}
353
354/**
355* Set up the fan table to control the fan using the SMC.
356* @param hwmgr the address of the powerplay hardware manager.
357* @param pInput the pointer to input data
358* @param pOutput the pointer to output data
359* @param pStorage the pointer to temporary storage
360* @param Result the last failure code
361* @return result from set temperature range routine
362*/
363int tf_iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
364{
365 struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
366 SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
367 uint32_t duty100;
368 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
369 uint16_t fdo_min, slope1, slope2;
370 uint32_t reference_clock;
371 int res;
372 uint64_t tmp64;
373
374 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
375 return 0;
376
377 if (0 == data->fan_table_start) {
378 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
379 return 0;
380 }
381
382 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
383
384 if (0 == duty100) {
385 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
386 return 0;
387 }
388
389 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
390 do_div(tmp64, 10000);
391 fdo_min = (uint16_t)tmp64;
392
393 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
394 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
395
396 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
397 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
398
399 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
400 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
401
402 fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
403 fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
404 fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
405
406 fan_table.Slope1 = cpu_to_be16(slope1);
407 fan_table.Slope2 = cpu_to_be16(slope2);
408
409 fan_table.FdoMin = cpu_to_be16(fdo_min);
410
411 fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
412
413 fan_table.HystUp = cpu_to_be16(1);
414
415 fan_table.HystSlope = cpu_to_be16(1);
416
417 fan_table.TempRespLim = cpu_to_be16(5);
418
419 reference_clock = iceland_get_xclk(hwmgr);
420
421 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
422
423 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
424
425 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
426
427 //fan_table.FanControl_GL_Flag = 1;
428
429 res = iceland_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), data->sram_end);
430/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command.
431 if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit != 0)
432 res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanMinPwm, \
433 hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit) ? 0 : -1);
434
435 if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit != 0)
436 res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanSclkTarget, \
437 hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit) ? 0 : -1);
438
439 if (0 != res)
440 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
441*/
442 return 0;
443}
444
445/**
446* Start the fan control on the SMC.
447* @param hwmgr the address of the powerplay hardware manager.
448* @param pInput the pointer to input data
449* @param pOutput the pointer to output data
450* @param pStorage the pointer to temporary storage
451* @param Result the last failure code
452* @return result from set temperature range routine
453*/
454int tf_iceland_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
455{
456/* If the fantable setup has failed we could have disabled PHM_PlatformCaps_MicrocodeFanControl even after this function was included in the table.
457 * Make sure that we still think controlling the fan is OK.
458*/
459 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) {
460 iceland_fan_ctrl_start_smc_fan_control(hwmgr);
461 iceland_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
462 }
463
464 return 0;
465}
466
467/**
468* Set temperature range for high and low alerts
469* @param hwmgr the address of the powerplay hardware manager.
470* @param pInput the pointer to input data
471* @param pOutput the pointer to output data
472* @param pStorage the pointer to temporary storage
473* @param Result the last failure code
474* @return result from set temperature range routine
475*/
476static int tf_iceland_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
477 void *input, void *output, void *storage, int result)
478{
479 struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
480
481 if (range == NULL)
482 return -EINVAL;
483
484 return iceland_thermal_set_temperature_range(hwmgr, range->min, range->max);
485}
486
487/**
488* Programs one-time setting registers
489* @param hwmgr the address of the powerplay hardware manager.
490* @param pInput the pointer to input data
491* @param pOutput the pointer to output data
492* @param pStorage the pointer to temporary storage
493* @param Result the last failure code
494* @return result from initialize thermal controller routine
495*/
496static int tf_iceland_thermal_initialize(struct pp_hwmgr *hwmgr, void *input,
497 void *output, void *storage, int result)
498{
499 return iceland_thermal_initialize(hwmgr);
500}
501
502/**
503* Enable high and low alerts
504* @param hwmgr the address of the powerplay hardware manager.
505* @param pInput the pointer to input data
506* @param pOutput the pointer to output data
507* @param pStorage the pointer to temporary storage
508* @param Result the last failure code
509* @return result from enable alert routine
510*/
511static int tf_iceland_thermal_enable_alert(struct pp_hwmgr *hwmgr,
512 void *input, void *output, void *storage, int result)
513{
514 return iceland_thermal_enable_alert(hwmgr);
515}
516
517/**
518* Disable high and low alerts
519* @param hwmgr the address of the powerplay hardware manager.
520* @param pInput the pointer to input data
521* @param pOutput the pointer to output data
522* @param pStorage the pointer to temporary storage
523* @param Result the last failure code
524* @return result from disable alert routine
525*/
526static int tf_iceland_thermal_disable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
527{
528 return iceland_thermal_disable_alert(hwmgr);
529}
530
531static const struct phm_master_table_item iceland_thermal_start_thermal_controller_master_list[] = {
532 { NULL, tf_iceland_thermal_initialize },
533 { NULL, tf_iceland_thermal_set_temperature_range },
534 { NULL, tf_iceland_thermal_enable_alert },
535 /*
536 * We should restrict performance levels to low before we halt
537 * the SMC. On the other hand we are still in boot state when
538 * we do this so it would be pointless. If this assumption
539 * changes we have to revisit this table.
540 */
541 { NULL, tf_iceland_thermal_setup_fan_table},
542 { NULL, tf_iceland_thermal_start_smc_fan_control},
543 { NULL, NULL }
544};
545
546static const struct phm_master_table_header iceland_thermal_start_thermal_controller_master = {
547 0,
548 PHM_MasterTableFlag_None,
549 iceland_thermal_start_thermal_controller_master_list
550};
551
552static const struct phm_master_table_item iceland_thermal_set_temperature_range_master_list[] = {
553 { NULL, tf_iceland_thermal_disable_alert},
554 { NULL, tf_iceland_thermal_set_temperature_range},
555 { NULL, tf_iceland_thermal_enable_alert},
556 { NULL, NULL }
557};
558
559static const struct phm_master_table_header iceland_thermal_set_temperature_range_master = {
560 0,
561 PHM_MasterTableFlag_None,
562 iceland_thermal_set_temperature_range_master_list
563};
564
565int iceland_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
566{
567 if (!hwmgr->thermal_controller.fanInfo.bNoFan)
568 iceland_fan_ctrl_set_default_mode(hwmgr);
569 return 0;
570}
571
572/**
573* Initializes the thermal controller related functions in the Hardware Manager structure.
574* @param hwmgr The address of the hardware manager.
575* @exception Any error code from the low-level communication.
576*/
577int pp_iceland_thermal_initialize(struct pp_hwmgr *hwmgr)
578{
579 int result;
580
581 result = phm_construct_table(hwmgr, &iceland_thermal_set_temperature_range_master, &(hwmgr->set_temperature_range));
582
583 if (0 == result) {
584 result = phm_construct_table(hwmgr,
585 &iceland_thermal_start_thermal_controller_master,
586 &(hwmgr->start_thermal_controller));
587 if (0 != result)
588 phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
589 }
590
591 if (0 == result)
592 hwmgr->fan_ctrl_is_in_default_mode = true;
593 return result;
594}
595
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h
deleted file mode 100644
index 267945f4df71..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h
+++ /dev/null
@@ -1,58 +0,0 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui <ray.huang@amd.com>
23 *
24 */
25
26#ifndef ICELAND_THERMAL_H
27#define ICELAND_THERMAL_H
28
29#include "hwmgr.h"
30
31#define ICELAND_THERMAL_HIGH_ALERT_MASK 0x1
32#define ICELAND_THERMAL_LOW_ALERT_MASK 0x2
33
34#define ICELAND_THERMAL_MINIMUM_TEMP_READING -256
35#define ICELAND_THERMAL_MAXIMUM_TEMP_READING 255
36
37#define ICELAND_THERMAL_MINIMUM_ALERT_TEMP 0
38#define ICELAND_THERMAL_MAXIMUM_ALERT_TEMP 255
39
40#define FDO_PWM_MODE_STATIC 1
41#define FDO_PWM_MODE_STATIC_RPM 5
42
43
44extern int iceland_thermal_get_temperature(struct pp_hwmgr *hwmgr);
45extern int iceland_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
46extern int iceland_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
47extern int iceland_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
48extern int iceland_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
49extern int iceland_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
50extern int iceland_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
51extern int pp_iceland_thermal_initialize(struct pp_hwmgr *hwmgr);
52extern int iceland_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
53extern int iceland_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
54extern int iceland_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
55extern int iceland_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
56
57#endif
58
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h
deleted file mode 100644
index f78ffd935cee..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef POLARIS10_DYN_DEFAULTS_H
25#define POLARIS10_DYN_DEFAULTS_H
26
27
28enum Polaris10dpm_TrendDetection {
29 Polaris10Adpm_TrendDetection_AUTO,
30 Polaris10Adpm_TrendDetection_UP,
31 Polaris10Adpm_TrendDetection_DOWN
32};
33typedef enum Polaris10dpm_TrendDetection Polaris10dpm_TrendDetection;
34
35/* We need to fill in the default values */
36
37
38#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
39#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT1 0x000400
40#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080
41#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200
42#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680
43#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033
44#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033
45#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000
46
47
48#define PPPOLARIS10_THERMALPROTECTCOUNTER_DFLT 0x200
49#define PPPOLARIS10_STATICSCREENTHRESHOLDUNIT_DFLT 0
50#define PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT 0x00C8
51#define PPPOLARIS10_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200
52#define PPPOLARIS10_REFERENCEDIVIDER_DFLT 4
53
54#define PPPOLARIS10_ULVVOLTAGECHANGEDELAY_DFLT 1687
55
56#define PPPOLARIS10_CGULVPARAMETER_DFLT 0x00040035
57#define PPPOLARIS10_CGULVCONTROL_DFLT 0x00007450
58#define PPPOLARIS10_TARGETACTIVITY_DFLT 50
59#define PPPOLARIS10_MCLK_TARGETACTIVITY_DFLT 10
60
61#endif
62
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
deleted file mode 100644
index 191ed504effb..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
+++ /dev/null
@@ -1,5289 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <asm/div64.h>
26#include "linux/delay.h"
27#include "pp_acpi.h"
28#include "hwmgr.h"
29#include "polaris10_hwmgr.h"
30#include "polaris10_powertune.h"
31#include "polaris10_dyn_defaults.h"
32#include "polaris10_smumgr.h"
33#include "pp_debug.h"
34#include "ppatomctrl.h"
35#include "atombios.h"
36#include "pptable_v1_0.h"
37#include "pppcielanes.h"
38#include "amd_pcie_helpers.h"
39#include "hardwaremanager.h"
40#include "process_pptables_v1_0.h"
41#include "cgs_common.h"
42#include "smu74.h"
43#include "smu_ucode_xfer_vi.h"
44#include "smu74_discrete.h"
45#include "smu/smu_7_1_3_d.h"
46#include "smu/smu_7_1_3_sh_mask.h"
47#include "gmc/gmc_8_1_d.h"
48#include "gmc/gmc_8_1_sh_mask.h"
49#include "oss/oss_3_0_d.h"
50#include "gca/gfx_8_0_d.h"
51#include "bif/bif_5_0_d.h"
52#include "bif/bif_5_0_sh_mask.h"
53#include "gmc/gmc_8_1_d.h"
54#include "gmc/gmc_8_1_sh_mask.h"
55#include "bif/bif_5_0_d.h"
56#include "bif/bif_5_0_sh_mask.h"
57#include "dce/dce_10_0_d.h"
58#include "dce/dce_10_0_sh_mask.h"
59
60#include "polaris10_thermal.h"
61#include "polaris10_clockpowergating.h"
62
63#define MC_CG_ARB_FREQ_F0 0x0a
64#define MC_CG_ARB_FREQ_F1 0x0b
65#define MC_CG_ARB_FREQ_F2 0x0c
66#define MC_CG_ARB_FREQ_F3 0x0d
67
68#define MC_CG_SEQ_DRAMCONF_S0 0x05
69#define MC_CG_SEQ_DRAMCONF_S1 0x06
70#define MC_CG_SEQ_YCLK_SUSPEND 0x04
71#define MC_CG_SEQ_YCLK_RESUME 0x0a
72
73
74#define SMC_RAM_END 0x40000
75
76#define SMC_CG_IND_START 0xc0030000
77#define SMC_CG_IND_END 0xc0040000
78
79#define VOLTAGE_SCALE 4
80#define VOLTAGE_VID_OFFSET_SCALE1 625
81#define VOLTAGE_VID_OFFSET_SCALE2 100
82
83#define VDDC_VDDCI_DELTA 200
84
85#define MEM_FREQ_LOW_LATENCY 25000
86#define MEM_FREQ_HIGH_LATENCY 80000
87
88#define MEM_LATENCY_HIGH 45
89#define MEM_LATENCY_LOW 35
90#define MEM_LATENCY_ERR 0xFFFF
91
92#define MC_SEQ_MISC0_GDDR5_SHIFT 28
93#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
94#define MC_SEQ_MISC0_GDDR5_VALUE 5
95
96
97#define PCIE_BUS_CLK 10000
98#define TCLK (PCIE_BUS_CLK / 10)
99
100/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
101enum DPM_EVENT_SRC {
102 DPM_EVENT_SRC_ANALOG = 0,
103 DPM_EVENT_SRC_EXTERNAL = 1,
104 DPM_EVENT_SRC_DIGITAL = 2,
105 DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
106 DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
107};
108
109static const unsigned long PhwPolaris10_Magic = (unsigned long)(PHM_VIslands_Magic);
110
111static struct polaris10_power_state *cast_phw_polaris10_power_state(
112 struct pp_hw_power_state *hw_ps)
113{
114 PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic),
115 "Invalid Powerstate Type!",
116 return NULL);
117
118 return (struct polaris10_power_state *)hw_ps;
119}
120
121static const struct polaris10_power_state *
122cast_const_phw_polaris10_power_state(
123 const struct pp_hw_power_state *hw_ps)
124{
125 PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic),
126 "Invalid Powerstate Type!",
127 return NULL);
128
129 return (const struct polaris10_power_state *)hw_ps;
130}
131
132static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
133{
134 return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
135 CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
136 ? true : false;
137}
138
139/**
140 * Find the MC microcode version and store it in the HwMgr struct
141 *
142 * @param hwmgr the address of the powerplay hardware manager.
143 * @return always 0
144 */
145static int phm_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
146{
147 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
148
149 hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
150
151 return 0;
152}
153
154static uint16_t phm_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
155{
156 uint32_t speedCntl = 0;
157
158 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
159 speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
160 ixPCIE_LC_SPEED_CNTL);
161 return((uint16_t)PHM_GET_FIELD(speedCntl,
162 PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
163}
164
165static int phm_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
166{
167 uint32_t link_width;
168
169 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
170 link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
171 PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
172
173 PP_ASSERT_WITH_CODE((7 >= link_width),
174 "Invalid PCIe lane width!", return 0);
175
176 return decode_pcie_lane_width(link_width);
177}
178
179/**
180* Enable voltage control
181*
182* @param pHwMgr the address of the powerplay hardware manager.
183* @return always PP_Result_OK
184*/
185static int polaris10_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
186{
187 PP_ASSERT_WITH_CODE(
188 (hwmgr->smumgr->smumgr_funcs->send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable) == 0),
189 "Failed to enable voltage DPM during DPM Start Function!",
190 return 1;
191 );
192
193 return 0;
194}
195
196/**
197* Checks if we want to support voltage control
198*
199* @param hwmgr the address of the powerplay hardware manager.
200*/
201static bool polaris10_voltage_control(const struct pp_hwmgr *hwmgr)
202{
203 const struct polaris10_hwmgr *data =
204 (const struct polaris10_hwmgr *)(hwmgr->backend);
205
206 return (POLARIS10_VOLTAGE_CONTROL_NONE != data->voltage_control);
207}
208
209/**
210* Enable voltage control
211*
212* @param hwmgr the address of the powerplay hardware manager.
213* @return always 0
214*/
215static int polaris10_enable_voltage_control(struct pp_hwmgr *hwmgr)
216{
217 /* enable voltage control */
218 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
219 GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
220
221 return 0;
222}
223
224/**
225* Create Voltage Tables.
226*
227* @param hwmgr the address of the powerplay hardware manager.
228* @return always 0
229*/
230static int polaris10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
231{
232 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
233 struct phm_ppt_v1_information *table_info =
234 (struct phm_ppt_v1_information *)hwmgr->pptable;
235 int result;
236
237 if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
238 result = atomctrl_get_voltage_table_v3(hwmgr,
239 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
240 &(data->mvdd_voltage_table));
241 PP_ASSERT_WITH_CODE((0 == result),
242 "Failed to retrieve MVDD table.",
243 return result);
244 } else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
245 result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
246 table_info->vdd_dep_on_mclk);
247 PP_ASSERT_WITH_CODE((0 == result),
248 "Failed to retrieve SVI2 MVDD table from dependancy table.",
249 return result;);
250 }
251
252 if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
253 result = atomctrl_get_voltage_table_v3(hwmgr,
254 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
255 &(data->vddci_voltage_table));
256 PP_ASSERT_WITH_CODE((0 == result),
257 "Failed to retrieve VDDCI table.",
258 return result);
259 } else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
260 result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
261 table_info->vdd_dep_on_mclk);
262 PP_ASSERT_WITH_CODE((0 == result),
263 "Failed to retrieve SVI2 VDDCI table from dependancy table.",
264 return result);
265 }
266
267 if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
268 result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
269 table_info->vddc_lookup_table);
270 PP_ASSERT_WITH_CODE((0 == result),
271 "Failed to retrieve SVI2 VDDC table from lookup table.",
272 return result);
273 }
274
275 PP_ASSERT_WITH_CODE(
276 (data->vddc_voltage_table.count <= (SMU74_MAX_LEVELS_VDDC)),
277 "Too many voltage values for VDDC. Trimming to fit state table.",
278 phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_VDDC,
279 &(data->vddc_voltage_table)));
280
281 PP_ASSERT_WITH_CODE(
282 (data->vddci_voltage_table.count <= (SMU74_MAX_LEVELS_VDDCI)),
283 "Too many voltage values for VDDCI. Trimming to fit state table.",
284 phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_VDDCI,
285 &(data->vddci_voltage_table)));
286
287 PP_ASSERT_WITH_CODE(
288 (data->mvdd_voltage_table.count <= (SMU74_MAX_LEVELS_MVDD)),
289 "Too many voltage values for MVDD. Trimming to fit state table.",
290 phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_MVDD,
291 &(data->mvdd_voltage_table)));
292
293 return 0;
294}
295
296/**
297* Programs static screed detection parameters
298*
299* @param hwmgr the address of the powerplay hardware manager.
300* @return always 0
301*/
302static int polaris10_program_static_screen_threshold_parameters(
303 struct pp_hwmgr *hwmgr)
304{
305 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
306
307 /* Set static screen threshold unit */
308 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
309 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
310 data->static_screen_threshold_unit);
311 /* Set static screen threshold */
312 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
313 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
314 data->static_screen_threshold);
315
316 return 0;
317}
318
319/**
320* Setup display gap for glitch free memory clock switching.
321*
322* @param hwmgr the address of the powerplay hardware manager.
323* @return always 0
324*/
325static int polaris10_enable_display_gap(struct pp_hwmgr *hwmgr)
326{
327 uint32_t display_gap =
328 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
329 ixCG_DISPLAY_GAP_CNTL);
330
331 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
332 DISP_GAP, DISPLAY_GAP_IGNORE);
333
334 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
335 DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
336
337 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
338 ixCG_DISPLAY_GAP_CNTL, display_gap);
339
340 return 0;
341}
342
343/**
344* Programs activity state transition voting clients
345*
346* @param hwmgr the address of the powerplay hardware manager.
347* @return always 0
348*/
349static int polaris10_program_voting_clients(struct pp_hwmgr *hwmgr)
350{
351 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
352
353 /* Clear reset for voting clients before enabling DPM */
354 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
355 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
356 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
357 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
358
359 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
360 ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
361 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
362 ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
363 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
364 ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
365 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
366 ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
367 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
368 ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
369 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
370 ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
371 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
372 ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
373 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
374 ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
375
376 return 0;
377}
378
379static int polaris10_clear_voting_clients(struct pp_hwmgr *hwmgr)
380{
381 /* Reset voting clients before disabling DPM */
382 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
383 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
384 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
385 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
386
387 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
388 ixCG_FREQ_TRAN_VOTING_0, 0);
389 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
390 ixCG_FREQ_TRAN_VOTING_1, 0);
391 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
392 ixCG_FREQ_TRAN_VOTING_2, 0);
393 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
394 ixCG_FREQ_TRAN_VOTING_3, 0);
395 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
396 ixCG_FREQ_TRAN_VOTING_4, 0);
397 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
398 ixCG_FREQ_TRAN_VOTING_5, 0);
399 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
400 ixCG_FREQ_TRAN_VOTING_6, 0);
401 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
402 ixCG_FREQ_TRAN_VOTING_7, 0);
403
404 return 0;
405}
406
407/**
408* Get the location of various tables inside the FW image.
409*
410* @param hwmgr the address of the powerplay hardware manager.
411* @return always 0
412*/
413static int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
414{
415 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
416 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
417 uint32_t tmp;
418 int result;
419 bool error = false;
420
421 result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
422 SMU7_FIRMWARE_HEADER_LOCATION +
423 offsetof(SMU74_Firmware_Header, DpmTable),
424 &tmp, data->sram_end);
425
426 if (0 == result)
427 data->dpm_table_start = tmp;
428
429 error |= (0 != result);
430
431 result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
432 SMU7_FIRMWARE_HEADER_LOCATION +
433 offsetof(SMU74_Firmware_Header, SoftRegisters),
434 &tmp, data->sram_end);
435
436 if (!result) {
437 data->soft_regs_start = tmp;
438 smu_data->soft_regs_start = tmp;
439 }
440
441 error |= (0 != result);
442
443 result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
444 SMU7_FIRMWARE_HEADER_LOCATION +
445 offsetof(SMU74_Firmware_Header, mcRegisterTable),
446 &tmp, data->sram_end);
447
448 if (!result)
449 data->mc_reg_table_start = tmp;
450
451 result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
452 SMU7_FIRMWARE_HEADER_LOCATION +
453 offsetof(SMU74_Firmware_Header, FanTable),
454 &tmp, data->sram_end);
455
456 if (!result)
457 data->fan_table_start = tmp;
458
459 error |= (0 != result);
460
461 result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
462 SMU7_FIRMWARE_HEADER_LOCATION +
463 offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
464 &tmp, data->sram_end);
465
466 if (!result)
467 data->arb_table_start = tmp;
468
469 error |= (0 != result);
470
471 result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
472 SMU7_FIRMWARE_HEADER_LOCATION +
473 offsetof(SMU74_Firmware_Header, Version),
474 &tmp, data->sram_end);
475
476 if (!result)
477 hwmgr->microcode_version_info.SMC = tmp;
478
479 error |= (0 != result);
480
481 return error ? -1 : 0;
482}
483
484/* Copy one arb setting to another and then switch the active set.
485 * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
486 */
487static int polaris10_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
488 uint32_t arb_src, uint32_t arb_dest)
489{
490 uint32_t mc_arb_dram_timing;
491 uint32_t mc_arb_dram_timing2;
492 uint32_t burst_time;
493 uint32_t mc_cg_config;
494
495 switch (arb_src) {
496 case MC_CG_ARB_FREQ_F0:
497 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
498 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
499 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
500 break;
501 case MC_CG_ARB_FREQ_F1:
502 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
503 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
504 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
505 break;
506 default:
507 return -EINVAL;
508 }
509
510 switch (arb_dest) {
511 case MC_CG_ARB_FREQ_F0:
512 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
513 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
514 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
515 break;
516 case MC_CG_ARB_FREQ_F1:
517 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
518 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
519 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
520 break;
521 default:
522 return -EINVAL;
523 }
524
525 mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
526 mc_cg_config |= 0x0000000F;
527 cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
528 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
529
530 return 0;
531}
532
533static int polaris10_reset_to_default(struct pp_hwmgr *hwmgr)
534{
535 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
536}
537
538/**
539* Initial switch from ARB F0->F1
540*
541* @param hwmgr the address of the powerplay hardware manager.
542* @return always 0
543* This function is to be called from the SetPowerState table.
544*/
545static int polaris10_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
546{
547 return polaris10_copy_and_switch_arb_sets(hwmgr,
548 MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
549}
550
551static int polaris10_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
552{
553 uint32_t tmp;
554
555 tmp = (cgs_read_ind_register(hwmgr->device,
556 CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
557 0x0000ff00) >> 8;
558
559 if (tmp == MC_CG_ARB_FREQ_F0)
560 return 0;
561
562 return polaris10_copy_and_switch_arb_sets(hwmgr,
563 tmp, MC_CG_ARB_FREQ_F0);
564}
565
566static int polaris10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
567{
568 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
569 struct phm_ppt_v1_information *table_info =
570 (struct phm_ppt_v1_information *)(hwmgr->pptable);
571 struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
572 uint32_t i, max_entry;
573
574 PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
575 data->use_pcie_power_saving_levels), "No pcie performance levels!",
576 return -EINVAL);
577
578 if (data->use_pcie_performance_levels &&
579 !data->use_pcie_power_saving_levels) {
580 data->pcie_gen_power_saving = data->pcie_gen_performance;
581 data->pcie_lane_power_saving = data->pcie_lane_performance;
582 } else if (!data->use_pcie_performance_levels &&
583 data->use_pcie_power_saving_levels) {
584 data->pcie_gen_performance = data->pcie_gen_power_saving;
585 data->pcie_lane_performance = data->pcie_lane_power_saving;
586 }
587
588 phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
589 SMU74_MAX_LEVELS_LINK,
590 MAX_REGULAR_DPM_NUMBER);
591
592 if (pcie_table != NULL) {
593 /* max_entry is used to make sure we reserve one PCIE level
594 * for boot level (fix for A+A PSPP issue).
595 * If PCIE table from PPTable have ULV entry + 8 entries,
596 * then ignore the last entry.*/
597 max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
598 SMU74_MAX_LEVELS_LINK : pcie_table->count;
599 for (i = 1; i < max_entry; i++) {
600 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
601 get_pcie_gen_support(data->pcie_gen_cap,
602 pcie_table->entries[i].gen_speed),
603 get_pcie_lane_support(data->pcie_lane_cap,
604 pcie_table->entries[i].lane_width));
605 }
606 data->dpm_table.pcie_speed_table.count = max_entry - 1;
607
608 /* Setup BIF_SCLK levels */
609 for (i = 0; i < max_entry; i++)
610 data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
611 } else {
612 /* Hardcode Pcie Table */
613 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
614 get_pcie_gen_support(data->pcie_gen_cap,
615 PP_Min_PCIEGen),
616 get_pcie_lane_support(data->pcie_lane_cap,
617 PP_Max_PCIELane));
618 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
619 get_pcie_gen_support(data->pcie_gen_cap,
620 PP_Min_PCIEGen),
621 get_pcie_lane_support(data->pcie_lane_cap,
622 PP_Max_PCIELane));
623 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
624 get_pcie_gen_support(data->pcie_gen_cap,
625 PP_Max_PCIEGen),
626 get_pcie_lane_support(data->pcie_lane_cap,
627 PP_Max_PCIELane));
628 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
629 get_pcie_gen_support(data->pcie_gen_cap,
630 PP_Max_PCIEGen),
631 get_pcie_lane_support(data->pcie_lane_cap,
632 PP_Max_PCIELane));
633 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
634 get_pcie_gen_support(data->pcie_gen_cap,
635 PP_Max_PCIEGen),
636 get_pcie_lane_support(data->pcie_lane_cap,
637 PP_Max_PCIELane));
638 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
639 get_pcie_gen_support(data->pcie_gen_cap,
640 PP_Max_PCIEGen),
641 get_pcie_lane_support(data->pcie_lane_cap,
642 PP_Max_PCIELane));
643
644 data->dpm_table.pcie_speed_table.count = 6;
645 }
646 /* Populate last level for boot PCIE level, but do not increment count. */
647 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
648 data->dpm_table.pcie_speed_table.count,
649 get_pcie_gen_support(data->pcie_gen_cap,
650 PP_Min_PCIEGen),
651 get_pcie_lane_support(data->pcie_lane_cap,
652 PP_Max_PCIELane));
653
654 return 0;
655}
656
657/*
658 * This function is to initalize all DPM state tables
659 * for SMU7 based on the dependency table.
660 * Dynamic state patching function will then trim these
661 * state tables to the allowed range based
662 * on the power policy or external client requests,
663 * such as UVD request, etc.
664 */
665static int polaris10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
666{
667 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
668 struct phm_ppt_v1_information *table_info =
669 (struct phm_ppt_v1_information *)(hwmgr->pptable);
670 uint32_t i;
671
672 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
673 table_info->vdd_dep_on_sclk;
674 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
675 table_info->vdd_dep_on_mclk;
676
677 PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
678 "SCLK dependency table is missing. This table is mandatory",
679 return -EINVAL);
680 PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
681 "SCLK dependency table has to have is missing."
682 "This table is mandatory",
683 return -EINVAL);
684
685 PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
686 "MCLK dependency table is missing. This table is mandatory",
687 return -EINVAL);
688 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
689 "MCLK dependency table has to have is missing."
690 "This table is mandatory",
691 return -EINVAL);
692
693 /* clear the state table to reset everything to default */
694 phm_reset_single_dpm_table(
695 &data->dpm_table.sclk_table, SMU74_MAX_LEVELS_GRAPHICS, MAX_REGULAR_DPM_NUMBER);
696 phm_reset_single_dpm_table(
697 &data->dpm_table.mclk_table, SMU74_MAX_LEVELS_MEMORY, MAX_REGULAR_DPM_NUMBER);
698
699
700 /* Initialize Sclk DPM table based on allow Sclk values */
701 data->dpm_table.sclk_table.count = 0;
702 for (i = 0; i < dep_sclk_table->count; i++) {
703 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
704 dep_sclk_table->entries[i].clk) {
705
706 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
707 dep_sclk_table->entries[i].clk;
708
709 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
710 (i == 0) ? true : false;
711 data->dpm_table.sclk_table.count++;
712 }
713 }
714
715 /* Initialize Mclk DPM table based on allow Mclk values */
716 data->dpm_table.mclk_table.count = 0;
717 for (i = 0; i < dep_mclk_table->count; i++) {
718 if (i == 0 || data->dpm_table.mclk_table.dpm_levels
719 [data->dpm_table.mclk_table.count - 1].value !=
720 dep_mclk_table->entries[i].clk) {
721 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
722 dep_mclk_table->entries[i].clk;
723 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
724 (i == 0) ? true : false;
725 data->dpm_table.mclk_table.count++;
726 }
727 }
728
729 /* setup PCIE gen speed levels */
730 polaris10_setup_default_pcie_table(hwmgr);
731
732 /* save a copy of the default DPM table */
733 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
734 sizeof(struct polaris10_dpm_table));
735
736 return 0;
737}
738
739/**
740 * Mvdd table preparation for SMC.
741 *
742 * @param *hwmgr The address of the hardware manager.
743 * @param *table The SMC DPM table structure to be populated.
744 * @return 0
745 */
746static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
747 SMU74_Discrete_DpmTable *table)
748{
749 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
750 uint32_t count, level;
751
752 if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
753 count = data->mvdd_voltage_table.count;
754 if (count > SMU_MAX_SMIO_LEVELS)
755 count = SMU_MAX_SMIO_LEVELS;
756 for (level = 0; level < count; level++) {
757 table->SmioTable2.Pattern[level].Voltage =
758 PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
759 /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
760 table->SmioTable2.Pattern[level].Smio =
761 (uint8_t) level;
762 table->Smio[level] |=
763 data->mvdd_voltage_table.entries[level].smio_low;
764 }
765 table->SmioMask2 = data->mvdd_voltage_table.mask_low;
766
767 table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
768 }
769
770 return 0;
771}
772
773static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
774 struct SMU74_Discrete_DpmTable *table)
775{
776 uint32_t count, level;
777 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
778
779 count = data->vddci_voltage_table.count;
780
781 if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
782 if (count > SMU_MAX_SMIO_LEVELS)
783 count = SMU_MAX_SMIO_LEVELS;
784 for (level = 0; level < count; ++level) {
785 table->SmioTable1.Pattern[level].Voltage =
786 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
787 table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
788
789 table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
790 }
791 }
792
793 table->SmioMask1 = data->vddci_voltage_table.mask_low;
794
795 return 0;
796}
797
798/**
799* Preparation of vddc and vddgfx CAC tables for SMC.
800*
801* @param hwmgr the address of the hardware manager
802* @param table the SMC DPM table structure to be populated
803* @return always 0
804*/
805static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
806 struct SMU74_Discrete_DpmTable *table)
807{
808 uint32_t count;
809 uint8_t index;
810 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
811 struct phm_ppt_v1_information *table_info =
812 (struct phm_ppt_v1_information *)(hwmgr->pptable);
813 struct phm_ppt_v1_voltage_lookup_table *lookup_table =
814 table_info->vddc_lookup_table;
815 /* tables is already swapped, so in order to use the value from it,
816 * we need to swap it back.
817 * We are populating vddc CAC data to BapmVddc table
818 * in split and merged mode
819 */
820 for (count = 0; count < lookup_table->count; count++) {
821 index = phm_get_voltage_index(lookup_table,
822 data->vddc_voltage_table.entries[count].value);
823 table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
824 table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
825 table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
826 }
827
828 return 0;
829}
830
831/**
832* Preparation of voltage tables for SMC.
833*
834* @param hwmgr the address of the hardware manager
835* @param table the SMC DPM table structure to be populated
836* @return always 0
837*/
838
839static int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
840 struct SMU74_Discrete_DpmTable *table)
841{
842 polaris10_populate_smc_vddci_table(hwmgr, table);
843 polaris10_populate_smc_mvdd_table(hwmgr, table);
844 polaris10_populate_cac_table(hwmgr, table);
845
846 return 0;
847}
848
849static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
850 struct SMU74_Discrete_Ulv *state)
851{
852 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
853 struct phm_ppt_v1_information *table_info =
854 (struct phm_ppt_v1_information *)(hwmgr->pptable);
855
856 state->CcPwrDynRm = 0;
857 state->CcPwrDynRm1 = 0;
858
859 state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
860 state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
861 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
862
863 state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
864
865 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
866 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
867 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
868
869 return 0;
870}
871
872static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr,
873 struct SMU74_Discrete_DpmTable *table)
874{
875 return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
876}
877
878static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
879 struct SMU74_Discrete_DpmTable *table)
880{
881 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
882 struct polaris10_dpm_table *dpm_table = &data->dpm_table;
883 int i;
884
885 /* Index (dpm_table->pcie_speed_table.count)
886 * is reserved for PCIE boot level. */
887 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
888 table->LinkLevel[i].PcieGenSpeed =
889 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
890 table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
891 dpm_table->pcie_speed_table.dpm_levels[i].param1);
892 table->LinkLevel[i].EnabledForActivity = 1;
893 table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
894 table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
895 table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
896 }
897
898 data->smc_state_table.LinkLevelCount =
899 (uint8_t)dpm_table->pcie_speed_table.count;
900 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
901 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
902
903 return 0;
904}
905
906static uint32_t polaris10_get_xclk(struct pp_hwmgr *hwmgr)
907{
908 uint32_t reference_clock, tmp;
909 struct cgs_display_info info = {0};
910 struct cgs_mode_info mode_info;
911
912 info.mode_info = &mode_info;
913
914 tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
915
916 if (tmp)
917 return TCLK;
918
919 cgs_get_active_displays_info(hwmgr->device, &info);
920 reference_clock = mode_info.ref_clock;
921
922 tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
923
924 if (0 != tmp)
925 return reference_clock / 4;
926
927 return reference_clock;
928}
929
930/**
931* Calculates the SCLK dividers using the provided engine clock
932*
933* @param hwmgr the address of the hardware manager
934* @param clock the engine clock to use to populate the structure
935* @param sclk the SMC SCLK structure to be populated
936*/
937static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
938 uint32_t clock, SMU_SclkSetting *sclk_setting)
939{
940 const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
941 const SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
942 struct pp_atomctrl_clock_dividers_ai dividers;
943
944 uint32_t ref_clock;
945 uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
946 uint8_t i;
947 int result;
948 uint64_t temp;
949
950 sclk_setting->SclkFrequency = clock;
951 /* get the engine clock dividers for this clock value */
952 result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, &dividers);
953 if (result == 0) {
954 sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
955 sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
956 sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
957 sclk_setting->PllRange = dividers.ucSclkPllRange;
958 sclk_setting->Sclk_slew_rate = 0x400;
959 sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
960 sclk_setting->Pcc_down_slew_rate = 0xffff;
961 sclk_setting->SSc_En = dividers.ucSscEnable;
962 sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
963 sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
964 sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
965 return result;
966 }
967
968 ref_clock = polaris10_get_xclk(hwmgr);
969
970 for (i = 0; i < NUM_SCLK_RANGE; i++) {
971 if (clock > data->range_table[i].trans_lower_frequency
972 && clock <= data->range_table[i].trans_upper_frequency) {
973 sclk_setting->PllRange = i;
974 break;
975 }
976 }
977
978 sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
979 temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
980 temp <<= 0x10;
981 do_div(temp, ref_clock);
982 sclk_setting->Fcw_frac = temp & 0xffff;
983
984 pcc_target_percent = 10; /* Hardcode 10% for now. */
985 pcc_target_freq = clock - (clock * pcc_target_percent / 100);
986 sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
987
988 ss_target_percent = 2; /* Hardcode 2% for now. */
989 sclk_setting->SSc_En = 0;
990 if (ss_target_percent) {
991 sclk_setting->SSc_En = 1;
992 ss_target_freq = clock - (clock * ss_target_percent / 100);
993 sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
994 temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
995 temp <<= 0x10;
996 do_div(temp, ref_clock);
997 sclk_setting->Fcw1_frac = temp & 0xffff;
998 }
999
1000 return 0;
1001}
1002
1003static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
1004 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1005 uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
1006{
1007 uint32_t i;
1008 uint16_t vddci;
1009 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1010
1011 *voltage = *mvdd = 0;
1012
1013 /* clock - voltage dependency table is empty table */
1014 if (dep_table->count == 0)
1015 return -EINVAL;
1016
1017 for (i = 0; i < dep_table->count; i++) {
1018 /* find first sclk bigger than request */
1019 if (dep_table->entries[i].clk >= clock) {
1020 *voltage |= (dep_table->entries[i].vddc *
1021 VOLTAGE_SCALE) << VDDC_SHIFT;
1022 if (POLARIS10_VOLTAGE_CONTROL_NONE == data->vddci_control)
1023 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
1024 VOLTAGE_SCALE) << VDDCI_SHIFT;
1025 else if (dep_table->entries[i].vddci)
1026 *voltage |= (dep_table->entries[i].vddci *
1027 VOLTAGE_SCALE) << VDDCI_SHIFT;
1028 else {
1029 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
1030 (dep_table->entries[i].vddc -
1031 (uint16_t)data->vddc_vddci_delta));
1032 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1033 }
1034
1035 if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control)
1036 *mvdd = data->vbios_boot_state.mvdd_bootup_value *
1037 VOLTAGE_SCALE;
1038 else if (dep_table->entries[i].mvdd)
1039 *mvdd = (uint32_t) dep_table->entries[i].mvdd *
1040 VOLTAGE_SCALE;
1041
1042 *voltage |= 1 << PHASES_SHIFT;
1043 return 0;
1044 }
1045 }
1046
1047 /* sclk is bigger than max sclk in the dependence table */
1048 *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
1049
1050 if (POLARIS10_VOLTAGE_CONTROL_NONE == data->vddci_control)
1051 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
1052 VOLTAGE_SCALE) << VDDCI_SHIFT;
1053 else if (dep_table->entries[i-1].vddci) {
1054 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
1055 (dep_table->entries[i].vddc -
1056 (uint16_t)data->vddc_vddci_delta));
1057 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1058 }
1059
1060 if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control)
1061 *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
1062 else if (dep_table->entries[i].mvdd)
1063 *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
1064
1065 return 0;
1066}
1067
1068static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] =
1069{ {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112},
1070 {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
1071 {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112},
1072 {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160},
1073 {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112},
1074 {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160},
1075 {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108},
1076 {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} };
1077
1078static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr)
1079{
1080 uint32_t i, ref_clk;
1081 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1082 SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
1083 struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
1084
1085 ref_clk = polaris10_get_xclk(hwmgr);
1086
1087 if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
1088 for (i = 0; i < NUM_SCLK_RANGE; i++) {
1089 table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
1090 table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
1091 table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
1092
1093 table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
1094 table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
1095
1096 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
1097 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
1098 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
1099 }
1100 return;
1101 }
1102
1103 for (i = 0; i < NUM_SCLK_RANGE; i++) {
1104
1105 data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
1106 data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
1107
1108 table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
1109 table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
1110 table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
1111
1112 table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
1113 table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
1114
1115 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
1116 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
1117 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
1118 }
1119}
1120
1121/**
1122* Populates single SMC SCLK structure using the provided engine clock
1123*
1124* @param hwmgr the address of the hardware manager
1125* @param clock the engine clock to use to populate the structure
1126* @param sclk the SMC SCLK structure to be populated
1127*/
1128
1129static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
1130 uint32_t clock, uint16_t sclk_al_threshold,
1131 struct SMU74_Discrete_GraphicsLevel *level)
1132{
1133 int result, i, temp;
1134 /* PP_Clocks minClocks; */
1135 uint32_t mvdd;
1136 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1137 struct phm_ppt_v1_information *table_info =
1138 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1139 SMU_SclkSetting curr_sclk_setting = { 0 };
1140
1141 result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
1142
1143 /* populate graphics levels */
1144 result = polaris10_get_dependency_volt_by_clk(hwmgr,
1145 table_info->vdd_dep_on_sclk, clock,
1146 &level->MinVoltage, &mvdd);
1147
1148 PP_ASSERT_WITH_CODE((0 == result),
1149 "can not find VDDC voltage value for "
1150 "VDDC engine clock dependency table",
1151 return result);
1152 level->ActivityLevel = sclk_al_threshold;
1153
1154 level->CcPwrDynRm = 0;
1155 level->CcPwrDynRm1 = 0;
1156 level->EnabledForActivity = 0;
1157 level->EnabledForThrottle = 1;
1158 level->UpHyst = 10;
1159 level->DownHyst = 0;
1160 level->VoltageDownHyst = 0;
1161 level->PowerThrottle = 0;
1162
1163 /*
1164 * TODO: get minimum clocks from dal configaration
1165 * PECI_GetMinClockSettings(hwmgr->pPECI, &minClocks);
1166 */
1167 /* data->DisplayTiming.minClockInSR = minClocks.engineClockInSR; */
1168
1169 /* get level->DeepSleepDivId
1170 if (phm_cap_enabled(hwmgr->platformDescriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
1171 level->DeepSleepDivId = PhwFiji_GetSleepDividerIdFromClock(hwmgr, clock, minClocks.engineClockInSR);
1172 */
1173 PP_ASSERT_WITH_CODE((clock >= POLARIS10_MINIMUM_ENGINE_CLOCK), "Engine clock can't satisfy stutter requirement!", return 0);
1174 for (i = POLARIS10_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
1175 temp = clock >> i;
1176
1177 if (temp >= POLARIS10_MINIMUM_ENGINE_CLOCK || i == 0)
1178 break;
1179 }
1180
1181 level->DeepSleepDivId = i;
1182
1183 /* Default to slow, highest DPM level will be
1184 * set to PPSMC_DISPLAY_WATERMARK_LOW later.
1185 */
1186 if (data->update_up_hyst)
1187 level->UpHyst = (uint8_t)data->up_hyst;
1188 if (data->update_down_hyst)
1189 level->DownHyst = (uint8_t)data->down_hyst;
1190
1191 level->SclkSetting = curr_sclk_setting;
1192
1193 CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
1194 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
1195 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
1196 CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
1197 CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
1198 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
1199 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
1200 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
1201 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
1202 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
1203 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
1204 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
1205 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
1206 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
1207 return 0;
1208}
1209
1210/**
1211* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
1212*
1213* @param hwmgr the address of the hardware manager
1214*/
1215static int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1216{
1217 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1218 struct polaris10_dpm_table *dpm_table = &data->dpm_table;
1219 struct phm_ppt_v1_information *table_info =
1220 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1221 struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
1222 uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
1223 int result = 0;
1224 uint32_t array = data->dpm_table_start +
1225 offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
1226 uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
1227 SMU74_MAX_LEVELS_GRAPHICS;
1228 struct SMU74_Discrete_GraphicsLevel *levels =
1229 data->smc_state_table.GraphicsLevel;
1230 uint32_t i, max_entry;
1231 uint8_t hightest_pcie_level_enabled = 0,
1232 lowest_pcie_level_enabled = 0,
1233 mid_pcie_level_enabled = 0,
1234 count = 0;
1235
1236 polaris10_get_sclk_range_table(hwmgr);
1237
1238 for (i = 0; i < dpm_table->sclk_table.count; i++) {
1239
1240 result = polaris10_populate_single_graphic_level(hwmgr,
1241 dpm_table->sclk_table.dpm_levels[i].value,
1242 (uint16_t)data->activity_target[i],
1243 &(data->smc_state_table.GraphicsLevel[i]));
1244 if (result)
1245 return result;
1246
1247 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
1248 if (i > 1)
1249 levels[i].DeepSleepDivId = 0;
1250 }
1251 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1252 PHM_PlatformCaps_SPLLShutdownSupport))
1253 data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
1254
1255 data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
1256 data->smc_state_table.GraphicsDpmLevelCount =
1257 (uint8_t)dpm_table->sclk_table.count;
1258 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
1259 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
1260
1261
1262 if (pcie_table != NULL) {
1263 PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
1264 "There must be 1 or more PCIE levels defined in PPTable.",
1265 return -EINVAL);
1266 max_entry = pcie_entry_cnt - 1;
1267 for (i = 0; i < dpm_table->sclk_table.count; i++)
1268 levels[i].pcieDpmLevel =
1269 (uint8_t) ((i < max_entry) ? i : max_entry);
1270 } else {
1271 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
1272 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
1273 (1 << (hightest_pcie_level_enabled + 1))) != 0))
1274 hightest_pcie_level_enabled++;
1275
1276 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
1277 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
1278 (1 << lowest_pcie_level_enabled)) == 0))
1279 lowest_pcie_level_enabled++;
1280
1281 while ((count < hightest_pcie_level_enabled) &&
1282 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
1283 (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
1284 count++;
1285
1286 mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
1287 hightest_pcie_level_enabled ?
1288 (lowest_pcie_level_enabled + 1 + count) :
1289 hightest_pcie_level_enabled;
1290
1291 /* set pcieDpmLevel to hightest_pcie_level_enabled */
1292 for (i = 2; i < dpm_table->sclk_table.count; i++)
1293 levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
1294
1295 /* set pcieDpmLevel to lowest_pcie_level_enabled */
1296 levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
1297
1298 /* set pcieDpmLevel to mid_pcie_level_enabled */
1299 levels[1].pcieDpmLevel = mid_pcie_level_enabled;
1300 }
1301 /* level count will send to smc once at init smc table and never change */
1302 result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
1303 (uint32_t)array_size, data->sram_end);
1304
1305 return result;
1306}
1307
1308static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1309 uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
1310{
1311 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1312 struct phm_ppt_v1_information *table_info =
1313 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1314 int result = 0;
1315 struct cgs_display_info info = {0, 0, NULL};
1316
1317 cgs_get_active_displays_info(hwmgr->device, &info);
1318
1319 if (table_info->vdd_dep_on_mclk) {
1320 result = polaris10_get_dependency_volt_by_clk(hwmgr,
1321 table_info->vdd_dep_on_mclk, clock,
1322 &mem_level->MinVoltage, &mem_level->MinMvdd);
1323 PP_ASSERT_WITH_CODE((0 == result),
1324 "can not find MinVddc voltage value from memory "
1325 "VDDC voltage dependency table", return result);
1326 }
1327
1328 mem_level->MclkFrequency = clock;
1329 mem_level->EnabledForThrottle = 1;
1330 mem_level->EnabledForActivity = 0;
1331 mem_level->UpHyst = 0;
1332 mem_level->DownHyst = 100;
1333 mem_level->VoltageDownHyst = 0;
1334 mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
1335 mem_level->StutterEnable = false;
1336 mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1337
1338 data->display_timing.num_existing_displays = info.display_count;
1339
1340 if ((data->mclk_stutter_mode_threshold) &&
1341 (clock <= data->mclk_stutter_mode_threshold) &&
1342 (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
1343 STUTTER_ENABLE) & 0x1))
1344 mem_level->StutterEnable = true;
1345
1346 if (!result) {
1347 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
1348 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
1349 CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
1350 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
1351 }
1352 return result;
1353}
1354
1355/**
1356* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
1357*
1358* @param hwmgr the address of the hardware manager
1359*/
1360static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1361{
1362 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1363 struct polaris10_dpm_table *dpm_table = &data->dpm_table;
1364 int result;
1365 /* populate MCLK dpm table to SMU7 */
1366 uint32_t array = data->dpm_table_start +
1367 offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
1368 uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
1369 SMU74_MAX_LEVELS_MEMORY;
1370 struct SMU74_Discrete_MemoryLevel *levels =
1371 data->smc_state_table.MemoryLevel;
1372 uint32_t i;
1373
1374 for (i = 0; i < dpm_table->mclk_table.count; i++) {
1375 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
1376 "can not populate memory level as memory clock is zero",
1377 return -EINVAL);
1378 result = polaris10_populate_single_memory_level(hwmgr,
1379 dpm_table->mclk_table.dpm_levels[i].value,
1380 &levels[i]);
1381 if (i == dpm_table->mclk_table.count - 1) {
1382 levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
1383 levels[i].EnabledForActivity = 1;
1384 }
1385 if (result)
1386 return result;
1387 }
1388
1389 /* In order to prevent MC activity from stutter mode to push DPM up,
1390 * the UVD change complements this by putting the MCLK in
1391 * a higher state by default such that we are not affected by
1392 * up threshold or and MCLK DPM latency.
1393 */
1394 levels[0].ActivityLevel = 0x1f;
1395 CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
1396
1397 data->smc_state_table.MemoryDpmLevelCount =
1398 (uint8_t)dpm_table->mclk_table.count;
1399 data->dpm_level_enable_mask.mclk_dpm_enable_mask =
1400 phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1401
1402 /* level count will send to smc once at init smc table and never change */
1403 result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
1404 (uint32_t)array_size, data->sram_end);
1405
1406 return result;
1407}
1408
1409/**
1410* Populates the SMC MVDD structure using the provided memory clock.
1411*
1412* @param hwmgr the address of the hardware manager
1413* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
1414* @param voltage the SMC VOLTAGE structure to be populated
1415*/
1416static int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
1417 uint32_t mclk, SMIO_Pattern *smio_pat)
1418{
1419 const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1420 struct phm_ppt_v1_information *table_info =
1421 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1422 uint32_t i = 0;
1423
1424 if (POLARIS10_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
1425 /* find mvdd value which clock is more than request */
1426 for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
1427 if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
1428 smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
1429 break;
1430 }
1431 }
1432 PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
1433 "MVDD Voltage is outside the supported range.",
1434 return -EINVAL);
1435 } else
1436 return -EINVAL;
1437
1438 return 0;
1439}
1440
1441static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1442 SMU74_Discrete_DpmTable *table)
1443{
1444 int result = 0;
1445 uint32_t sclk_frequency;
1446 const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1447 struct phm_ppt_v1_information *table_info =
1448 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1449 SMIO_Pattern vol_level;
1450 uint32_t mvdd;
1451 uint16_t us_mvdd;
1452
1453 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1454
1455
1456 /* Get MinVoltage and Frequency from DPM0,
1457 * already converted to SMC_UL */
1458 sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
1459 result = polaris10_get_dependency_volt_by_clk(hwmgr,
1460 table_info->vdd_dep_on_sclk,
1461 sclk_frequency,
1462 &table->ACPILevel.MinVoltage, &mvdd);
1463 PP_ASSERT_WITH_CODE((0 == result),
1464 "Cannot find ACPI VDDC voltage value "
1465 "in Clock Dependency Table",
1466 );
1467
1468
1469 result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting));
1470 PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
1471
1472 table->ACPILevel.DeepSleepDivId = 0;
1473 table->ACPILevel.CcPwrDynRm = 0;
1474 table->ACPILevel.CcPwrDynRm1 = 0;
1475
1476 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1477 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
1478 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1479 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1480
1481 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
1482 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
1483 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
1484 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
1485 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
1486 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
1487 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
1488 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
1489 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
1490 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
1491
1492
1493 /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
1494 table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
1495 result = polaris10_get_dependency_volt_by_clk(hwmgr,
1496 table_info->vdd_dep_on_mclk,
1497 table->MemoryACPILevel.MclkFrequency,
1498 &table->MemoryACPILevel.MinVoltage, &mvdd);
1499 PP_ASSERT_WITH_CODE((0 == result),
1500 "Cannot find ACPI VDDCI voltage value "
1501 "in Clock Dependency Table",
1502 );
1503
1504 us_mvdd = 0;
1505 if ((POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
1506 (data->mclk_dpm_key_disabled))
1507 us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
1508 else {
1509 if (!polaris10_populate_mvdd_value(hwmgr,
1510 data->dpm_table.mclk_table.dpm_levels[0].value,
1511 &vol_level))
1512 us_mvdd = vol_level.Voltage;
1513 }
1514
1515 if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
1516 table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
1517 else
1518 table->MemoryACPILevel.MinMvdd = 0;
1519
1520 table->MemoryACPILevel.StutterEnable = false;
1521
1522 table->MemoryACPILevel.EnabledForThrottle = 0;
1523 table->MemoryACPILevel.EnabledForActivity = 0;
1524 table->MemoryACPILevel.UpHyst = 0;
1525 table->MemoryACPILevel.DownHyst = 100;
1526 table->MemoryACPILevel.VoltageDownHyst = 0;
1527 table->MemoryACPILevel.ActivityLevel =
1528 PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
1529
1530 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
1531 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
1532
1533 return result;
1534}
1535
1536static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1537 SMU74_Discrete_DpmTable *table)
1538{
1539 int result = -EINVAL;
1540 uint8_t count;
1541 struct pp_atomctrl_clock_dividers_vi dividers;
1542 struct phm_ppt_v1_information *table_info =
1543 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1544 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1545 table_info->mm_dep_table;
1546 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1547 uint32_t vddci;
1548
1549 table->VceLevelCount = (uint8_t)(mm_table->count);
1550 table->VceBootLevel = 0;
1551
1552 for (count = 0; count < table->VceLevelCount; count++) {
1553 table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
1554 table->VceLevel[count].MinVoltage = 0;
1555 table->VceLevel[count].MinVoltage |=
1556 (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
1557
1558 if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1559 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1560 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1561 else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1562 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1563 else
1564 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1565
1566
1567 table->VceLevel[count].MinVoltage |=
1568 (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1569 table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1570
1571 /*retrieve divider value for VBIOS */
1572 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1573 table->VceLevel[count].Frequency, &dividers);
1574 PP_ASSERT_WITH_CODE((0 == result),
1575 "can not find divide id for VCE engine clock",
1576 return result);
1577
1578 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1579
1580 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1581 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
1582 }
1583 return result;
1584}
1585
1586static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1587 SMU74_Discrete_DpmTable *table)
1588{
1589 int result = -EINVAL;
1590 uint8_t count;
1591 struct pp_atomctrl_clock_dividers_vi dividers;
1592 struct phm_ppt_v1_information *table_info =
1593 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1594 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1595 table_info->mm_dep_table;
1596 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1597 uint32_t vddci;
1598
1599 table->SamuBootLevel = 0;
1600 table->SamuLevelCount = (uint8_t)(mm_table->count);
1601
1602 for (count = 0; count < table->SamuLevelCount; count++) {
1603 /* not sure whether we need evclk or not */
1604 table->SamuLevel[count].MinVoltage = 0;
1605 table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
1606 table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1607 VOLTAGE_SCALE) << VDDC_SHIFT;
1608
1609 if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1610 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1611 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1612 else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1613 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1614 else
1615 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1616
1617 table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1618 table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1619
1620 /* retrieve divider value for VBIOS */
1621 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1622 table->SamuLevel[count].Frequency, &dividers);
1623 PP_ASSERT_WITH_CODE((0 == result),
1624 "can not find divide id for samu clock", return result);
1625
1626 table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1627
1628 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
1629 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
1630 }
1631 return result;
1632}
1633
1634static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
1635 int32_t eng_clock, int32_t mem_clock,
1636 SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
1637{
1638 uint32_t dram_timing;
1639 uint32_t dram_timing2;
1640 uint32_t burst_time;
1641 int result;
1642
1643 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1644 eng_clock, mem_clock);
1645 PP_ASSERT_WITH_CODE(result == 0,
1646 "Error calling VBIOS to set DRAM_TIMING.", return result);
1647
1648 dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1649 dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1650 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1651
1652
1653 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
1654 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
1655 arb_regs->McArbBurstTime = (uint8_t)burst_time;
1656
1657 return 0;
1658}
1659
1660static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1661{
1662 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1663 struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
1664 uint32_t i, j;
1665 int result = 0;
1666
1667 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1668 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1669 result = polaris10_populate_memory_timing_parameters(hwmgr,
1670 data->dpm_table.sclk_table.dpm_levels[i].value,
1671 data->dpm_table.mclk_table.dpm_levels[j].value,
1672 &arb_regs.entries[i][j]);
1673 if (result == 0)
1674 result = atomctrl_set_ac_timing_ai(hwmgr, data->dpm_table.mclk_table.dpm_levels[j].value, j);
1675 if (result != 0)
1676 return result;
1677 }
1678 }
1679
1680 result = polaris10_copy_bytes_to_smc(
1681 hwmgr->smumgr,
1682 data->arb_table_start,
1683 (uint8_t *)&arb_regs,
1684 sizeof(SMU74_Discrete_MCArbDramTimingTable),
1685 data->sram_end);
1686 return result;
1687}
1688
1689static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1690 struct SMU74_Discrete_DpmTable *table)
1691{
1692 int result = -EINVAL;
1693 uint8_t count;
1694 struct pp_atomctrl_clock_dividers_vi dividers;
1695 struct phm_ppt_v1_information *table_info =
1696 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1697 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1698 table_info->mm_dep_table;
1699 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1700 uint32_t vddci;
1701
1702 table->UvdLevelCount = (uint8_t)(mm_table->count);
1703 table->UvdBootLevel = 0;
1704
1705 for (count = 0; count < table->UvdLevelCount; count++) {
1706 table->UvdLevel[count].MinVoltage = 0;
1707 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
1708 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
1709 table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1710 VOLTAGE_SCALE) << VDDC_SHIFT;
1711
1712 if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1713 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1714 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1715 else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1716 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1717 else
1718 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1719
1720 table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1721 table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1722
1723 /* retrieve divider value for VBIOS */
1724 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1725 table->UvdLevel[count].VclkFrequency, &dividers);
1726 PP_ASSERT_WITH_CODE((0 == result),
1727 "can not find divide id for Vclk clock", return result);
1728
1729 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1730
1731 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1732 table->UvdLevel[count].DclkFrequency, &dividers);
1733 PP_ASSERT_WITH_CODE((0 == result),
1734 "can not find divide id for Dclk clock", return result);
1735
1736 table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
1737
1738 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1739 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1740 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
1741 }
1742
1743 return result;
1744}
1745
1746static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1747 struct SMU74_Discrete_DpmTable *table)
1748{
1749 int result = 0;
1750 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1751
1752 table->GraphicsBootLevel = 0;
1753 table->MemoryBootLevel = 0;
1754
1755 /* find boot level from dpm table */
1756 result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1757 data->vbios_boot_state.sclk_bootup_value,
1758 (uint32_t *)&(table->GraphicsBootLevel));
1759
1760 result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1761 data->vbios_boot_state.mclk_bootup_value,
1762 (uint32_t *)&(table->MemoryBootLevel));
1763
1764 table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
1765 VOLTAGE_SCALE;
1766 table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
1767 VOLTAGE_SCALE;
1768 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
1769 VOLTAGE_SCALE;
1770
1771 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
1772 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
1773 CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
1774
1775 return 0;
1776}
1777
1778
1779static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
1780{
1781 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1782 struct phm_ppt_v1_information *table_info =
1783 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1784 uint8_t count, level;
1785
1786 count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
1787
1788 for (level = 0; level < count; level++) {
1789 if (table_info->vdd_dep_on_sclk->entries[level].clk >=
1790 data->vbios_boot_state.sclk_bootup_value) {
1791 data->smc_state_table.GraphicsBootLevel = level;
1792 break;
1793 }
1794 }
1795
1796 count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
1797 for (level = 0; level < count; level++) {
1798 if (table_info->vdd_dep_on_mclk->entries[level].clk >=
1799 data->vbios_boot_state.mclk_bootup_value) {
1800 data->smc_state_table.MemoryBootLevel = level;
1801 break;
1802 }
1803 }
1804
1805 return 0;
1806}
1807
1808static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1809{
1810 uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
1811 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1812 uint8_t i, stretch_amount, volt_offset = 0;
1813 struct phm_ppt_v1_information *table_info =
1814 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1815 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1816 table_info->vdd_dep_on_sclk;
1817
1818 stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
1819
1820 /* Read SMU_Eefuse to read and calculate RO and determine
1821 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
1822 */
1823 efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1824 ixSMU_EFUSE_0 + (67 * 4));
1825 efuse &= 0xFF000000;
1826 efuse = efuse >> 24;
1827
1828 if (hwmgr->chip_id == CHIP_POLARIS10) {
1829 min = 1000;
1830 max = 2300;
1831 } else {
1832 min = 1100;
1833 max = 2100;
1834 }
1835
1836 ro = efuse * (max -min)/255 + min;
1837
1838 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
1839 for (i = 0; i < sclk_table->count; i++) {
1840 data->smc_state_table.Sclk_CKS_masterEn0_7 |=
1841 sclk_table->entries[i].cks_enable << i;
1842 if (hwmgr->chip_id == CHIP_POLARIS10) {
1843 volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \
1844 (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
1845 volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
1846 (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
1847 } else {
1848 volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \
1849 (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
1850 volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
1851 (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
1852 }
1853
1854 if (volt_without_cks >= volt_with_cks)
1855 volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
1856 sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
1857
1858 data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
1859 }
1860
1861 data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
1862 /* Populate CKS Lookup Table */
1863 if (stretch_amount != 1 && stretch_amount != 2 && stretch_amount != 3 &&
1864 stretch_amount != 4 && stretch_amount != 5) {
1865 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1866 PHM_PlatformCaps_ClockStretcher);
1867 PP_ASSERT_WITH_CODE(false,
1868 "Stretch Amount in PPTable not supported\n",
1869 return -EINVAL);
1870 }
1871
1872 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
1873 value &= 0xFFFFFFFE;
1874 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
1875
1876 return 0;
1877}
1878
1879/**
1880* Populates the SMC VRConfig field in DPM table.
1881*
1882* @param hwmgr the address of the hardware manager
1883* @param table the SMC DPM table structure to be populated
1884* @return always 0
1885*/
1886static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
1887 struct SMU74_Discrete_DpmTable *table)
1888{
1889 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1890 uint16_t config;
1891
1892 config = VR_MERGED_WITH_VDDC;
1893 table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
1894
1895 /* Set Vddc Voltage Controller */
1896 if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1897 config = VR_SVI2_PLANE_1;
1898 table->VRConfig |= config;
1899 } else {
1900 PP_ASSERT_WITH_CODE(false,
1901 "VDDC should be on SVI2 control in merged mode!",
1902 );
1903 }
1904 /* Set Vddci Voltage Controller */
1905 if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
1906 config = VR_SVI2_PLANE_2; /* only in merged mode */
1907 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1908 } else if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
1909 config = VR_SMIO_PATTERN_1;
1910 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1911 } else {
1912 config = VR_STATIC_VOLTAGE;
1913 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1914 }
1915 /* Set Mvdd Voltage Controller */
1916 if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
1917 config = VR_SVI2_PLANE_2;
1918 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1919 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start +
1920 offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
1921 } else {
1922 config = VR_STATIC_VOLTAGE;
1923 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1924 }
1925
1926 return 0;
1927}
1928
1929
1930static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
1931{
1932 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1933 SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
1934 int result = 0;
1935 struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
1936 AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
1937 AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
1938 uint32_t tmp, i;
1939 struct pp_smumgr *smumgr = hwmgr->smumgr;
1940 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
1941
1942 struct phm_ppt_v1_information *table_info =
1943 (struct phm_ppt_v1_information *)hwmgr->pptable;
1944 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1945 table_info->vdd_dep_on_sclk;
1946
1947
1948 if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
1949 return result;
1950
1951 result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
1952
1953 if (0 == result) {
1954 table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
1955 table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
1956 table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
1957 table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
1958 table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
1959 table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
1960 table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
1961 table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
1962 table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
1963 table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
1964 table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12;
1965 table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
1966 table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
1967 table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
1968 table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
1969 table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12;
1970 table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
1971 AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
1972 AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
1973 AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
1974 AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
1975 AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
1976 AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
1977 AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
1978
1979 for (i = 0; i < NUM_VFT_COLUMNS; i++) {
1980 AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
1981 AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
1982 }
1983
1984 result = polaris10_read_smc_sram_dword(smumgr,
1985 SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
1986 &tmp, data->sram_end);
1987
1988 polaris10_copy_bytes_to_smc(smumgr,
1989 tmp,
1990 (uint8_t *)&AVFS_meanNsigma,
1991 sizeof(AVFS_meanNsigma_t),
1992 data->sram_end);
1993
1994 result = polaris10_read_smc_sram_dword(smumgr,
1995 SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
1996 &tmp, data->sram_end);
1997 polaris10_copy_bytes_to_smc(smumgr,
1998 tmp,
1999 (uint8_t *)&AVFS_SclkOffset,
2000 sizeof(AVFS_Sclk_Offset_t),
2001 data->sram_end);
2002
2003 data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
2004 (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
2005 (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
2006 (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
2007 data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
2008 }
2009 return result;
2010}
2011
2012
2013/**
2014* Initializes the SMC table and uploads it
2015*
2016* @param hwmgr the address of the powerplay hardware manager.
2017* @return always 0
2018*/
2019static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
2020{
2021 int result;
2022 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2023 struct phm_ppt_v1_information *table_info =
2024 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2025 struct SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
2026 const struct polaris10_ulv_parm *ulv = &(data->ulv);
2027 uint8_t i;
2028 struct pp_atomctrl_gpio_pin_assignment gpio_pin;
2029 pp_atomctrl_clock_dividers_vi dividers;
2030
2031 result = polaris10_setup_default_dpm_tables(hwmgr);
2032 PP_ASSERT_WITH_CODE(0 == result,
2033 "Failed to setup default DPM tables!", return result);
2034
2035 if (POLARIS10_VOLTAGE_CONTROL_NONE != data->voltage_control)
2036 polaris10_populate_smc_voltage_tables(hwmgr, table);
2037
2038 table->SystemFlags = 0;
2039 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2040 PHM_PlatformCaps_AutomaticDCTransition))
2041 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
2042
2043 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2044 PHM_PlatformCaps_StepVddc))
2045 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
2046
2047 if (data->is_memory_gddr5)
2048 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
2049
2050 if (ulv->ulv_supported && table_info->us_ulv_voltage_offset) {
2051 result = polaris10_populate_ulv_state(hwmgr, table);
2052 PP_ASSERT_WITH_CODE(0 == result,
2053 "Failed to initialize ULV state!", return result);
2054 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2055 ixCG_ULV_PARAMETER, PPPOLARIS10_CGULVPARAMETER_DFLT);
2056 }
2057
2058 result = polaris10_populate_smc_link_level(hwmgr, table);
2059 PP_ASSERT_WITH_CODE(0 == result,
2060 "Failed to initialize Link Level!", return result);
2061
2062 result = polaris10_populate_all_graphic_levels(hwmgr);
2063 PP_ASSERT_WITH_CODE(0 == result,
2064 "Failed to initialize Graphics Level!", return result);
2065
2066 result = polaris10_populate_all_memory_levels(hwmgr);
2067 PP_ASSERT_WITH_CODE(0 == result,
2068 "Failed to initialize Memory Level!", return result);
2069
2070 result = polaris10_populate_smc_acpi_level(hwmgr, table);
2071 PP_ASSERT_WITH_CODE(0 == result,
2072 "Failed to initialize ACPI Level!", return result);
2073
2074 result = polaris10_populate_smc_vce_level(hwmgr, table);
2075 PP_ASSERT_WITH_CODE(0 == result,
2076 "Failed to initialize VCE Level!", return result);
2077
2078 result = polaris10_populate_smc_samu_level(hwmgr, table);
2079 PP_ASSERT_WITH_CODE(0 == result,
2080 "Failed to initialize SAMU Level!", return result);
2081
2082 /* Since only the initial state is completely set up at this point
2083 * (the other states are just copies of the boot state) we only
2084 * need to populate the ARB settings for the initial state.
2085 */
2086 result = polaris10_program_memory_timing_parameters(hwmgr);
2087 PP_ASSERT_WITH_CODE(0 == result,
2088 "Failed to Write ARB settings for the initial state.", return result);
2089
2090 result = polaris10_populate_smc_uvd_level(hwmgr, table);
2091 PP_ASSERT_WITH_CODE(0 == result,
2092 "Failed to initialize UVD Level!", return result);
2093
2094 result = polaris10_populate_smc_boot_level(hwmgr, table);
2095 PP_ASSERT_WITH_CODE(0 == result,
2096 "Failed to initialize Boot Level!", return result);
2097
2098 result = polaris10_populate_smc_initailial_state(hwmgr);
2099 PP_ASSERT_WITH_CODE(0 == result,
2100 "Failed to initialize Boot State!", return result);
2101
2102 result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr);
2103 PP_ASSERT_WITH_CODE(0 == result,
2104 "Failed to populate BAPM Parameters!", return result);
2105
2106 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2107 PHM_PlatformCaps_ClockStretcher)) {
2108 result = polaris10_populate_clock_stretcher_data_table(hwmgr);
2109 PP_ASSERT_WITH_CODE(0 == result,
2110 "Failed to populate Clock Stretcher Data Table!",
2111 return result);
2112 }
2113
2114 result = polaris10_populate_avfs_parameters(hwmgr);
2115 PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
2116
2117 table->CurrSclkPllRange = 0xff;
2118 table->GraphicsVoltageChangeEnable = 1;
2119 table->GraphicsThermThrottleEnable = 1;
2120 table->GraphicsInterval = 1;
2121 table->VoltageInterval = 1;
2122 table->ThermalInterval = 1;
2123 table->TemperatureLimitHigh =
2124 table_info->cac_dtp_table->usTargetOperatingTemp *
2125 POLARIS10_Q88_FORMAT_CONVERSION_UNIT;
2126 table->TemperatureLimitLow =
2127 (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
2128 POLARIS10_Q88_FORMAT_CONVERSION_UNIT;
2129 table->MemoryVoltageChangeEnable = 1;
2130 table->MemoryInterval = 1;
2131 table->VoltageResponseTime = 0;
2132 table->PhaseResponseTime = 0;
2133 table->MemoryThermThrottleEnable = 1;
2134 table->PCIeBootLinkLevel = 0;
2135 table->PCIeGenInterval = 1;
2136 table->VRConfig = 0;
2137
2138 result = polaris10_populate_vr_config(hwmgr, table);
2139 PP_ASSERT_WITH_CODE(0 == result,
2140 "Failed to populate VRConfig setting!", return result);
2141
2142 table->ThermGpio = 17;
2143 table->SclkStepSize = 0x4000;
2144
2145 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
2146 table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
2147 } else {
2148 table->VRHotGpio = POLARIS10_UNUSED_GPIO_PIN;
2149 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2150 PHM_PlatformCaps_RegulatorHot);
2151 }
2152
2153 if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
2154 &gpio_pin)) {
2155 table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
2156 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2157 PHM_PlatformCaps_AutomaticDCTransition);
2158 } else {
2159 table->AcDcGpio = POLARIS10_UNUSED_GPIO_PIN;
2160 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2161 PHM_PlatformCaps_AutomaticDCTransition);
2162 }
2163
2164 /* Thermal Output GPIO */
2165 if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
2166 &gpio_pin)) {
2167 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2168 PHM_PlatformCaps_ThermalOutGPIO);
2169
2170 table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
2171
2172 /* For porlarity read GPIOPAD_A with assigned Gpio pin
2173 * since VBIOS will program this register to set 'inactive state',
2174 * driver can then determine 'active state' from this and
2175 * program SMU with correct polarity
2176 */
2177 table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
2178 & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
2179 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
2180
2181 /* if required, combine VRHot/PCC with thermal out GPIO */
2182 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
2183 && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
2184 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
2185 } else {
2186 table->ThermOutGpio = 17;
2187 table->ThermOutPolarity = 1;
2188 table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
2189 }
2190
2191 /* Populate BIF_SCLK levels into SMC DPM table */
2192 for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++) {
2193 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, data->bif_sclk_table[i], &dividers);
2194 PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
2195
2196 if (i == 0)
2197 table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
2198 else
2199 table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
2200 }
2201
2202 for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
2203 table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
2204
2205 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
2206 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
2207 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
2208 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
2209 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
2210 CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
2211 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
2212 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
2213 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
2214 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
2215
2216 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
2217 result = polaris10_copy_bytes_to_smc(hwmgr->smumgr,
2218 data->dpm_table_start +
2219 offsetof(SMU74_Discrete_DpmTable, SystemFlags),
2220 (uint8_t *)&(table->SystemFlags),
2221 sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
2222 data->sram_end);
2223 PP_ASSERT_WITH_CODE(0 == result,
2224 "Failed to upload dpm data to SMC memory!", return result);
2225
2226 return 0;
2227}
2228
2229/**
2230* Initialize the ARB DRAM timing table's index field.
2231*
2232* @param hwmgr the address of the powerplay hardware manager.
2233* @return always 0
2234*/
2235static int polaris10_init_arb_table_index(struct pp_hwmgr *hwmgr)
2236{
2237 const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2238 uint32_t tmp;
2239 int result;
2240
2241 /* This is a read-modify-write on the first byte of the ARB table.
2242 * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
2243 * is the field 'current'.
2244 * This solution is ugly, but we never write the whole table only
2245 * individual fields in it.
2246 * In reality this field should not be in that structure
2247 * but in a soft register.
2248 */
2249 result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
2250 data->arb_table_start, &tmp, data->sram_end);
2251
2252 if (result)
2253 return result;
2254
2255 tmp &= 0x00FFFFFF;
2256 tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
2257
2258 return polaris10_write_smc_sram_dword(hwmgr->smumgr,
2259 data->arb_table_start, tmp, data->sram_end);
2260}
2261
2262static int polaris10_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
2263{
2264 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2265 PHM_PlatformCaps_RegulatorHot))
2266 return smum_send_msg_to_smc(hwmgr->smumgr,
2267 PPSMC_MSG_EnableVRHotGPIOInterrupt);
2268
2269 return 0;
2270}
2271
2272static int polaris10_enable_sclk_control(struct pp_hwmgr *hwmgr)
2273{
2274 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
2275 SCLK_PWRMGT_OFF, 0);
2276 return 0;
2277}
2278
2279static int polaris10_enable_ulv(struct pp_hwmgr *hwmgr)
2280{
2281 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2282 struct polaris10_ulv_parm *ulv = &(data->ulv);
2283
2284 if (ulv->ulv_supported)
2285 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
2286
2287 return 0;
2288}
2289
2290static int polaris10_disable_ulv(struct pp_hwmgr *hwmgr)
2291{
2292 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2293 struct polaris10_ulv_parm *ulv = &(data->ulv);
2294
2295 if (ulv->ulv_supported)
2296 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
2297
2298 return 0;
2299}
2300
2301static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2302{
2303 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2304 PHM_PlatformCaps_SclkDeepSleep)) {
2305 if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
2306 PP_ASSERT_WITH_CODE(false,
2307 "Attempt to enable Master Deep Sleep switch failed!",
2308 return -1);
2309 } else {
2310 if (smum_send_msg_to_smc(hwmgr->smumgr,
2311 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
2312 PP_ASSERT_WITH_CODE(false,
2313 "Attempt to disable Master Deep Sleep switch failed!",
2314 return -1);
2315 }
2316 }
2317
2318 return 0;
2319}
2320
2321static int polaris10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2322{
2323 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2324 PHM_PlatformCaps_SclkDeepSleep)) {
2325 if (smum_send_msg_to_smc(hwmgr->smumgr,
2326 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
2327 PP_ASSERT_WITH_CODE(false,
2328 "Attempt to disable Master Deep Sleep switch failed!",
2329 return -1);
2330 }
2331 }
2332
2333 return 0;
2334}
2335
2336static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
2337{
2338 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2339 uint32_t soft_register_value = 0;
2340 uint32_t handshake_disables_offset = data->soft_regs_start
2341 + offsetof(SMU74_SoftRegisters, HandshakeDisables);
2342
2343 /* enable SCLK dpm */
2344 if (!data->sclk_dpm_key_disabled)
2345 PP_ASSERT_WITH_CODE(
2346 (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
2347 "Failed to enable SCLK DPM during DPM Start Function!",
2348 return -1);
2349
2350 /* enable MCLK dpm */
2351 if (0 == data->mclk_dpm_key_disabled) {
2352/* Disable UVD - SMU handshake for MCLK. */
2353 soft_register_value = cgs_read_ind_register(hwmgr->device,
2354 CGS_IND_REG__SMC, handshake_disables_offset);
2355 soft_register_value |= SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
2356 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2357 handshake_disables_offset, soft_register_value);
2358
2359 PP_ASSERT_WITH_CODE(
2360 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
2361 PPSMC_MSG_MCLKDPM_Enable)),
2362 "Failed to enable MCLK DPM during DPM Start Function!",
2363 return -1);
2364
2365 PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
2366
2367 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
2368 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
2369 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
2370 udelay(10);
2371 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
2372 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
2373 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
2374 }
2375
2376 return 0;
2377}
2378
2379static int polaris10_start_dpm(struct pp_hwmgr *hwmgr)
2380{
2381 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2382
2383 /*enable general power management */
2384
2385 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
2386 GLOBAL_PWRMGT_EN, 1);
2387
2388 /* enable sclk deep sleep */
2389
2390 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
2391 DYNAMIC_PM_EN, 1);
2392
2393 /* prepare for PCIE DPM */
2394
2395 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2396 data->soft_regs_start + offsetof(SMU74_SoftRegisters,
2397 VoltageChangeTimeout), 0x1000);
2398 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
2399 SWRST_COMMAND_1, RESETLC, 0x0);
2400/*
2401 PP_ASSERT_WITH_CODE(
2402 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
2403 PPSMC_MSG_Voltage_Cntl_Enable)),
2404 "Failed to enable voltage DPM during DPM Start Function!",
2405 return -1);
2406*/
2407
2408 if (polaris10_enable_sclk_mclk_dpm(hwmgr)) {
2409 printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
2410 return -1;
2411 }
2412
2413 /* enable PCIE dpm */
2414 if (0 == data->pcie_dpm_key_disabled) {
2415 PP_ASSERT_WITH_CODE(
2416 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
2417 PPSMC_MSG_PCIeDPM_Enable)),
2418 "Failed to enable pcie DPM during DPM Start Function!",
2419 return -1);
2420 }
2421
2422 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2423 PHM_PlatformCaps_Falcon_QuickTransition)) {
2424 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr,
2425 PPSMC_MSG_EnableACDCGPIOInterrupt)),
2426 "Failed to enable AC DC GPIO Interrupt!",
2427 );
2428 }
2429
2430 return 0;
2431}
2432
2433static int polaris10_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
2434{
2435 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2436
2437 /* disable SCLK dpm */
2438 if (!data->sclk_dpm_key_disabled)
2439 PP_ASSERT_WITH_CODE(
2440 (smum_send_msg_to_smc(hwmgr->smumgr,
2441 PPSMC_MSG_DPM_Disable) == 0),
2442 "Failed to disable SCLK DPM!",
2443 return -1);
2444
2445 /* disable MCLK dpm */
2446 if (!data->mclk_dpm_key_disabled) {
2447 PP_ASSERT_WITH_CODE(
2448 (smum_send_msg_to_smc(hwmgr->smumgr,
2449 PPSMC_MSG_MCLKDPM_Disable) == 0),
2450 "Failed to disable MCLK DPM!",
2451 return -1);
2452 }
2453
2454 return 0;
2455}
2456
2457static int polaris10_stop_dpm(struct pp_hwmgr *hwmgr)
2458{
2459 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2460
2461 /* disable general power management */
2462 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
2463 GLOBAL_PWRMGT_EN, 0);
2464 /* disable sclk deep sleep */
2465 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
2466 DYNAMIC_PM_EN, 0);
2467
2468 /* disable PCIE dpm */
2469 if (!data->pcie_dpm_key_disabled) {
2470 PP_ASSERT_WITH_CODE(
2471 (smum_send_msg_to_smc(hwmgr->smumgr,
2472 PPSMC_MSG_PCIeDPM_Disable) == 0),
2473 "Failed to disable pcie DPM during DPM Stop Function!",
2474 return -1);
2475 }
2476
2477 if (polaris10_disable_sclk_mclk_dpm(hwmgr)) {
2478 printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
2479 return -1;
2480 }
2481
2482 return 0;
2483}
2484
2485static void polaris10_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
2486{
2487 bool protection;
2488 enum DPM_EVENT_SRC src;
2489
2490 switch (sources) {
2491 default:
2492 printk(KERN_ERR "Unknown throttling event sources.");
2493 /* fall through */
2494 case 0:
2495 protection = false;
2496 /* src is unused */
2497 break;
2498 case (1 << PHM_AutoThrottleSource_Thermal):
2499 protection = true;
2500 src = DPM_EVENT_SRC_DIGITAL;
2501 break;
2502 case (1 << PHM_AutoThrottleSource_External):
2503 protection = true;
2504 src = DPM_EVENT_SRC_EXTERNAL;
2505 break;
2506 case (1 << PHM_AutoThrottleSource_External) |
2507 (1 << PHM_AutoThrottleSource_Thermal):
2508 protection = true;
2509 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
2510 break;
2511 }
2512 /* Order matters - don't enable thermal protection for the wrong source. */
2513 if (protection) {
2514 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
2515 DPM_EVENT_SRC, src);
2516 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
2517 THERMAL_PROTECTION_DIS,
2518 !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2519 PHM_PlatformCaps_ThermalController));
2520 } else
2521 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
2522 THERMAL_PROTECTION_DIS, 1);
2523}
2524
2525static int polaris10_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
2526 PHM_AutoThrottleSource source)
2527{
2528 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2529
2530 if (!(data->active_auto_throttle_sources & (1 << source))) {
2531 data->active_auto_throttle_sources |= 1 << source;
2532 polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
2533 }
2534 return 0;
2535}
2536
2537static int polaris10_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
2538{
2539 return polaris10_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
2540}
2541
2542static int polaris10_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
2543 PHM_AutoThrottleSource source)
2544{
2545 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2546
2547 if (data->active_auto_throttle_sources & (1 << source)) {
2548 data->active_auto_throttle_sources &= ~(1 << source);
2549 polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
2550 }
2551 return 0;
2552}
2553
2554static int polaris10_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
2555{
2556 return polaris10_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
2557}
2558
2559static int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr)
2560{
2561 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2562 data->pcie_performance_request = true;
2563
2564 return 0;
2565}
2566
2567static int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2568{
2569 int tmp_result, result = 0;
2570 tmp_result = (!polaris10_is_dpm_running(hwmgr)) ? 0 : -1;
2571 PP_ASSERT_WITH_CODE(result == 0,
2572 "DPM is already running right now, no need to enable DPM!",
2573 return 0);
2574
2575 if (polaris10_voltage_control(hwmgr)) {
2576 tmp_result = polaris10_enable_voltage_control(hwmgr);
2577 PP_ASSERT_WITH_CODE(tmp_result == 0,
2578 "Failed to enable voltage control!",
2579 result = tmp_result);
2580
2581 tmp_result = polaris10_construct_voltage_tables(hwmgr);
2582 PP_ASSERT_WITH_CODE((0 == tmp_result),
2583 "Failed to contruct voltage tables!",
2584 result = tmp_result);
2585 }
2586
2587 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2588 PHM_PlatformCaps_EngineSpreadSpectrumSupport))
2589 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
2590 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
2591
2592 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2593 PHM_PlatformCaps_ThermalController))
2594 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
2595 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
2596
2597 tmp_result = polaris10_program_static_screen_threshold_parameters(hwmgr);
2598 PP_ASSERT_WITH_CODE((0 == tmp_result),
2599 "Failed to program static screen threshold parameters!",
2600 result = tmp_result);
2601
2602 tmp_result = polaris10_enable_display_gap(hwmgr);
2603 PP_ASSERT_WITH_CODE((0 == tmp_result),
2604 "Failed to enable display gap!", result = tmp_result);
2605
2606 tmp_result = polaris10_program_voting_clients(hwmgr);
2607 PP_ASSERT_WITH_CODE((0 == tmp_result),
2608 "Failed to program voting clients!", result = tmp_result);
2609
2610 tmp_result = polaris10_process_firmware_header(hwmgr);
2611 PP_ASSERT_WITH_CODE((0 == tmp_result),
2612 "Failed to process firmware header!", result = tmp_result);
2613
2614 tmp_result = polaris10_initial_switch_from_arbf0_to_f1(hwmgr);
2615 PP_ASSERT_WITH_CODE((0 == tmp_result),
2616 "Failed to initialize switch from ArbF0 to F1!",
2617 result = tmp_result);
2618
2619 tmp_result = polaris10_init_smc_table(hwmgr);
2620 PP_ASSERT_WITH_CODE((0 == tmp_result),
2621 "Failed to initialize SMC table!", result = tmp_result);
2622
2623 tmp_result = polaris10_init_arb_table_index(hwmgr);
2624 PP_ASSERT_WITH_CODE((0 == tmp_result),
2625 "Failed to initialize ARB table index!", result = tmp_result);
2626
2627 tmp_result = polaris10_populate_pm_fuses(hwmgr);
2628 PP_ASSERT_WITH_CODE((0 == tmp_result),
2629 "Failed to populate PM fuses!", result = tmp_result);
2630
2631 tmp_result = polaris10_enable_vrhot_gpio_interrupt(hwmgr);
2632 PP_ASSERT_WITH_CODE((0 == tmp_result),
2633 "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
2634
2635 smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay);
2636
2637 tmp_result = polaris10_enable_sclk_control(hwmgr);
2638 PP_ASSERT_WITH_CODE((0 == tmp_result),
2639 "Failed to enable SCLK control!", result = tmp_result);
2640
2641 tmp_result = polaris10_enable_smc_voltage_controller(hwmgr);
2642 PP_ASSERT_WITH_CODE((0 == tmp_result),
2643 "Failed to enable voltage control!", result = tmp_result);
2644
2645 tmp_result = polaris10_enable_ulv(hwmgr);
2646 PP_ASSERT_WITH_CODE((0 == tmp_result),
2647 "Failed to enable ULV!", result = tmp_result);
2648
2649 tmp_result = polaris10_enable_deep_sleep_master_switch(hwmgr);
2650 PP_ASSERT_WITH_CODE((0 == tmp_result),
2651 "Failed to enable deep sleep master switch!", result = tmp_result);
2652
2653 tmp_result = polaris10_enable_didt_config(hwmgr);
2654 PP_ASSERT_WITH_CODE((tmp_result == 0),
2655 "Failed to enable deep sleep master switch!", result = tmp_result);
2656
2657 tmp_result = polaris10_start_dpm(hwmgr);
2658 PP_ASSERT_WITH_CODE((0 == tmp_result),
2659 "Failed to start DPM!", result = tmp_result);
2660
2661 tmp_result = polaris10_enable_smc_cac(hwmgr);
2662 PP_ASSERT_WITH_CODE((0 == tmp_result),
2663 "Failed to enable SMC CAC!", result = tmp_result);
2664
2665 tmp_result = polaris10_enable_power_containment(hwmgr);
2666 PP_ASSERT_WITH_CODE((0 == tmp_result),
2667 "Failed to enable power containment!", result = tmp_result);
2668
2669 tmp_result = polaris10_power_control_set_level(hwmgr);
2670 PP_ASSERT_WITH_CODE((0 == tmp_result),
2671 "Failed to power control set level!", result = tmp_result);
2672
2673 tmp_result = polaris10_enable_thermal_auto_throttle(hwmgr);
2674 PP_ASSERT_WITH_CODE((0 == tmp_result),
2675 "Failed to enable thermal auto throttle!", result = tmp_result);
2676
2677 tmp_result = polaris10_pcie_performance_request(hwmgr);
2678 PP_ASSERT_WITH_CODE((0 == tmp_result),
2679 "pcie performance request failed!", result = tmp_result);
2680
2681 return result;
2682}
2683
2684int polaris10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
2685{
2686 int tmp_result, result = 0;
2687
2688 tmp_result = (polaris10_is_dpm_running(hwmgr)) ? 0 : -1;
2689 PP_ASSERT_WITH_CODE(tmp_result == 0,
2690 "DPM is not running right now, no need to disable DPM!",
2691 return 0);
2692
2693 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2694 PHM_PlatformCaps_ThermalController))
2695 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
2696 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
2697
2698 tmp_result = polaris10_disable_power_containment(hwmgr);
2699 PP_ASSERT_WITH_CODE((tmp_result == 0),
2700 "Failed to disable power containment!", result = tmp_result);
2701
2702 tmp_result = polaris10_disable_smc_cac(hwmgr);
2703 PP_ASSERT_WITH_CODE((tmp_result == 0),
2704 "Failed to disable SMC CAC!", result = tmp_result);
2705
2706 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
2707 CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
2708 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
2709 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
2710
2711 tmp_result = polaris10_disable_thermal_auto_throttle(hwmgr);
2712 PP_ASSERT_WITH_CODE((tmp_result == 0),
2713 "Failed to disable thermal auto throttle!", result = tmp_result);
2714
2715 tmp_result = polaris10_stop_dpm(hwmgr);
2716 PP_ASSERT_WITH_CODE((tmp_result == 0),
2717 "Failed to stop DPM!", result = tmp_result);
2718
2719 tmp_result = polaris10_disable_deep_sleep_master_switch(hwmgr);
2720 PP_ASSERT_WITH_CODE((tmp_result == 0),
2721 "Failed to disable deep sleep master switch!", result = tmp_result);
2722
2723 tmp_result = polaris10_disable_ulv(hwmgr);
2724 PP_ASSERT_WITH_CODE((tmp_result == 0),
2725 "Failed to disable ULV!", result = tmp_result);
2726
2727 tmp_result = polaris10_clear_voting_clients(hwmgr);
2728 PP_ASSERT_WITH_CODE((tmp_result == 0),
2729 "Failed to clear voting clients!", result = tmp_result);
2730
2731 tmp_result = polaris10_reset_to_default(hwmgr);
2732 PP_ASSERT_WITH_CODE((tmp_result == 0),
2733 "Failed to reset to default!", result = tmp_result);
2734
2735 tmp_result = polaris10_force_switch_to_arbf0(hwmgr);
2736 PP_ASSERT_WITH_CODE((tmp_result == 0),
2737 "Failed to force to switch arbf0!", result = tmp_result);
2738
2739 return result;
2740}
2741
2742int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr)
2743{
2744
2745 return 0;
2746}
2747
2748static int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
2749{
2750 return phm_hwmgr_backend_fini(hwmgr);
2751}
2752
2753static int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
2754{
2755 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2756
2757 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2758 PHM_PlatformCaps_DynamicPatchPowerState);
2759
2760 if (data->mvdd_control == POLARIS10_VOLTAGE_CONTROL_NONE)
2761 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2762 PHM_PlatformCaps_EnableMVDDControl);
2763
2764 if (data->vddci_control == POLARIS10_VOLTAGE_CONTROL_NONE)
2765 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2766 PHM_PlatformCaps_ControlVDDCI);
2767
2768 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2769 PHM_PlatformCaps_TablelessHardwareInterface);
2770
2771 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2772 PHM_PlatformCaps_EnableSMU7ThermalManagement);
2773
2774 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2775 PHM_PlatformCaps_DynamicPowerManagement);
2776
2777 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2778 PHM_PlatformCaps_UnTabledHardwareInterface);
2779
2780 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2781 PHM_PlatformCaps_TablelessHardwareInterface);
2782
2783 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2784 PHM_PlatformCaps_SMC);
2785
2786 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2787 PHM_PlatformCaps_NonABMSupportInPPLib);
2788
2789 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2790 PHM_PlatformCaps_DynamicUVDState);
2791
2792 /* power tune caps Assume disabled */
2793 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2794 PHM_PlatformCaps_SQRamping);
2795 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2796 PHM_PlatformCaps_DBRamping);
2797 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2798 PHM_PlatformCaps_TDRamping);
2799 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2800 PHM_PlatformCaps_TCPRamping);
2801
2802 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2803 PHM_PlatformCaps_CAC);
2804
2805 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2806 PHM_PlatformCaps_RegulatorHot);
2807
2808 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2809 PHM_PlatformCaps_AutomaticDCTransition);
2810
2811 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2812 PHM_PlatformCaps_ODFuzzyFanControlSupport);
2813
2814 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2815 PHM_PlatformCaps_FanSpeedInTableIsRPM);
2816
2817 if (hwmgr->chip_id == CHIP_POLARIS11)
2818 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2819 PHM_PlatformCaps_SPLLShutdownSupport);
2820 return 0;
2821}
2822
2823static void polaris10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
2824{
2825 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2826
2827 polaris10_initialize_power_tune_defaults(hwmgr);
2828
2829 data->pcie_gen_performance.max = PP_PCIEGen1;
2830 data->pcie_gen_performance.min = PP_PCIEGen3;
2831 data->pcie_gen_power_saving.max = PP_PCIEGen1;
2832 data->pcie_gen_power_saving.min = PP_PCIEGen3;
2833 data->pcie_lane_performance.max = 0;
2834 data->pcie_lane_performance.min = 16;
2835 data->pcie_lane_power_saving.max = 0;
2836 data->pcie_lane_power_saving.min = 16;
2837}
2838
2839/**
2840* Get Leakage VDDC based on leakage ID.
2841*
2842* @param hwmgr the address of the powerplay hardware manager.
2843* @return always 0
2844*/
2845static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
2846{
2847 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2848 uint16_t vv_id;
2849 uint32_t vddc = 0;
2850 uint16_t i, j;
2851 uint32_t sclk = 0;
2852 struct phm_ppt_v1_information *table_info =
2853 (struct phm_ppt_v1_information *)hwmgr->pptable;
2854 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
2855 table_info->vdd_dep_on_sclk;
2856 int result;
2857
2858 for (i = 0; i < POLARIS10_MAX_LEAKAGE_COUNT; i++) {
2859 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
2860 if (!phm_get_sclk_for_voltage_evv(hwmgr,
2861 table_info->vddc_lookup_table, vv_id, &sclk)) {
2862 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2863 PHM_PlatformCaps_ClockStretcher)) {
2864 for (j = 1; j < sclk_table->count; j++) {
2865 if (sclk_table->entries[j].clk == sclk &&
2866 sclk_table->entries[j].cks_enable == 0) {
2867 sclk += 5000;
2868 break;
2869 }
2870 }
2871 }
2872
2873 if (atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
2874 VOLTAGE_TYPE_VDDC,
2875 sclk, vv_id, &vddc) != 0) {
2876 printk(KERN_WARNING "failed to retrieving EVV voltage!\n");
2877 continue;
2878 }
2879
2880 /* need to make sure vddc is less than 2V or else, it could burn the ASIC.
2881 * real voltage level in unit of 0.01mV */
2882 PP_ASSERT_WITH_CODE((vddc < 200000 && vddc != 0),
2883 "Invalid VDDC value", result = -EINVAL;);
2884
2885 /* the voltage should not be zero nor equal to leakage ID */
2886 if (vddc != 0 && vddc != vv_id) {
2887 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
2888 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
2889 data->vddc_leakage.count++;
2890 }
2891 }
2892 }
2893
2894 return 0;
2895}
2896
2897/**
2898 * Change virtual leakage voltage to actual value.
2899 *
2900 * @param hwmgr the address of the powerplay hardware manager.
2901 * @param pointer to changing voltage
2902 * @param pointer to leakage table
2903 */
2904static void polaris10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2905 uint16_t *voltage, struct polaris10_leakage_voltage *leakage_table)
2906{
2907 uint32_t index;
2908
2909 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
2910 for (index = 0; index < leakage_table->count; index++) {
2911 /* if this voltage matches a leakage voltage ID */
2912 /* patch with actual leakage voltage */
2913 if (leakage_table->leakage_id[index] == *voltage) {
2914 *voltage = leakage_table->actual_voltage[index];
2915 break;
2916 }
2917 }
2918
2919 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
2920 printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
2921}
2922
2923/**
2924* Patch voltage lookup table by EVV leakages.
2925*
2926* @param hwmgr the address of the powerplay hardware manager.
2927* @param pointer to voltage lookup table
2928* @param pointer to leakage table
2929* @return always 0
2930*/
2931static int polaris10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
2932 phm_ppt_v1_voltage_lookup_table *lookup_table,
2933 struct polaris10_leakage_voltage *leakage_table)
2934{
2935 uint32_t i;
2936
2937 for (i = 0; i < lookup_table->count; i++)
2938 polaris10_patch_with_vdd_leakage(hwmgr,
2939 &lookup_table->entries[i].us_vdd, leakage_table);
2940
2941 return 0;
2942}
2943
2944static int polaris10_patch_clock_voltage_limits_with_vddc_leakage(
2945 struct pp_hwmgr *hwmgr, struct polaris10_leakage_voltage *leakage_table,
2946 uint16_t *vddc)
2947{
2948 struct phm_ppt_v1_information *table_info =
2949 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2950 polaris10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
2951 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
2952 table_info->max_clock_voltage_on_dc.vddc;
2953 return 0;
2954}
2955
2956static int polaris10_patch_voltage_dependency_tables_with_lookup_table(
2957 struct pp_hwmgr *hwmgr)
2958{
2959 uint8_t entryId;
2960 uint8_t voltageId;
2961 struct phm_ppt_v1_information *table_info =
2962 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2963
2964 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
2965 table_info->vdd_dep_on_sclk;
2966 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
2967 table_info->vdd_dep_on_mclk;
2968 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
2969 table_info->mm_dep_table;
2970
2971 for (entryId = 0; entryId < sclk_table->count; ++entryId) {
2972 voltageId = sclk_table->entries[entryId].vddInd;
2973 sclk_table->entries[entryId].vddc =
2974 table_info->vddc_lookup_table->entries[voltageId].us_vdd;
2975 }
2976
2977 for (entryId = 0; entryId < mclk_table->count; ++entryId) {
2978 voltageId = mclk_table->entries[entryId].vddInd;
2979 mclk_table->entries[entryId].vddc =
2980 table_info->vddc_lookup_table->entries[voltageId].us_vdd;
2981 }
2982
2983 for (entryId = 0; entryId < mm_table->count; ++entryId) {
2984 voltageId = mm_table->entries[entryId].vddcInd;
2985 mm_table->entries[entryId].vddc =
2986 table_info->vddc_lookup_table->entries[voltageId].us_vdd;
2987 }
2988
2989 return 0;
2990
2991}
2992
2993static int polaris10_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
2994{
2995 /* Need to determine if we need calculated voltage. */
2996 return 0;
2997}
2998
2999static int polaris10_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
3000{
3001 /* Need to determine if we need calculated voltage from mm table. */
3002 return 0;
3003}
3004
3005static int polaris10_sort_lookup_table(struct pp_hwmgr *hwmgr,
3006 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
3007{
3008 uint32_t table_size, i, j;
3009 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
3010 table_size = lookup_table->count;
3011
3012 PP_ASSERT_WITH_CODE(0 != lookup_table->count,
3013 "Lookup table is empty", return -EINVAL);
3014
3015 /* Sorting voltages */
3016 for (i = 0; i < table_size - 1; i++) {
3017 for (j = i + 1; j > 0; j--) {
3018 if (lookup_table->entries[j].us_vdd <
3019 lookup_table->entries[j - 1].us_vdd) {
3020 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
3021 lookup_table->entries[j - 1] = lookup_table->entries[j];
3022 lookup_table->entries[j] = tmp_voltage_lookup_record;
3023 }
3024 }
3025 }
3026
3027 return 0;
3028}
3029
3030static int polaris10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
3031{
3032 int result = 0;
3033 int tmp_result;
3034 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
3035 struct phm_ppt_v1_information *table_info =
3036 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3037
3038 tmp_result = polaris10_patch_lookup_table_with_leakage(hwmgr,
3039 table_info->vddc_lookup_table, &(data->vddc_leakage));
3040 if (tmp_result)
3041 result = tmp_result;
3042
3043 tmp_result = polaris10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
3044 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
3045 if (tmp_result)
3046 result = tmp_result;
3047
3048 tmp_result = polaris10_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
3049 if (tmp_result)
3050 result = tmp_result;
3051
3052 tmp_result = polaris10_calc_voltage_dependency_tables(hwmgr);
3053 if (tmp_result)
3054 result = tmp_result;
3055
3056 tmp_result = polaris10_calc_mm_voltage_dependency_table(hwmgr);
3057 if (tmp_result)
3058 result = tmp_result;
3059
3060 tmp_result = polaris10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
3061 if (tmp_result)
3062 result = tmp_result;
3063
3064 return result;
3065}
3066
3067static int polaris10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
3068{
3069 struct phm_ppt_v1_information *table_info =
3070 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3071
3072 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
3073 table_info->vdd_dep_on_sclk;
3074 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
3075 table_info->vdd_dep_on_mclk;
3076
3077 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
3078 "VDD dependency on SCLK table is missing. \
3079 This table is mandatory", return -EINVAL);
3080 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
3081 "VDD dependency on SCLK table has to have is missing. \
3082 This table is mandatory", return -EINVAL);
3083
3084 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
3085 "VDD dependency on MCLK table is missing. \
3086 This table is mandatory", return -EINVAL);
3087 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
3088 "VDD dependency on MCLK table has to have is missing. \
3089 This table is mandatory", return -EINVAL);
3090
3091 table_info->max_clock_voltage_on_ac.sclk =
3092 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
3093 table_info->max_clock_voltage_on_ac.mclk =
3094 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
3095 table_info->max_clock_voltage_on_ac.vddc =
3096 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
3097 table_info->max_clock_voltage_on_ac.vddci =
3098 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
3099
3100 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
3101 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
3102 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
3103 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =table_info->max_clock_voltage_on_ac.vddci;
3104
3105 return 0;
3106}
3107
3108static int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
3109{
3110 struct phm_ppt_v1_information *table_info =
3111 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3112 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
3113 table_info->vdd_dep_on_mclk;
3114 struct phm_ppt_v1_voltage_lookup_table *lookup_table =
3115 table_info->vddc_lookup_table;
3116 uint32_t i;
3117 uint32_t hw_revision, sub_vendor_id, sub_sys_id;
3118 struct cgs_system_info sys_info = {0};
3119
3120 sys_info.size = sizeof(struct cgs_system_info);
3121
3122 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
3123 cgs_query_system_info(hwmgr->device, &sys_info);
3124 hw_revision = (uint32_t)sys_info.value;
3125
3126 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID;
3127 cgs_query_system_info(hwmgr->device, &sys_info);
3128 sub_sys_id = (uint32_t)sys_info.value;
3129
3130 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID;
3131 cgs_query_system_info(hwmgr->device, &sys_info);
3132 sub_vendor_id = (uint32_t)sys_info.value;
3133
3134 if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 &&
3135 ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
3136 (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
3137 (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
3138 if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
3139 return 0;
3140
3141 for (i = 0; i < lookup_table->count; i++) {
3142 if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
3143 dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
3144 return 0;
3145 }
3146 }
3147 }
3148 return 0;
3149}
3150
3151
3152static int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
3153{
3154 struct polaris10_hwmgr *data;
3155 struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
3156 uint32_t temp_reg;
3157 int result;
3158 struct phm_ppt_v1_information *table_info =
3159 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3160
3161 data = kzalloc(sizeof(struct polaris10_hwmgr), GFP_KERNEL);
3162 if (data == NULL)
3163 return -ENOMEM;
3164
3165 hwmgr->backend = data;
3166
3167 data->dll_default_on = false;
3168 data->sram_end = SMC_RAM_END;
3169 data->mclk_dpm0_activity_target = 0xa;
3170 data->disable_dpm_mask = 0xFF;
3171 data->static_screen_threshold = PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT;
3172 data->static_screen_threshold_unit = PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT;
3173 data->activity_target[0] = PPPOLARIS10_TARGETACTIVITY_DFLT;
3174 data->activity_target[1] = PPPOLARIS10_TARGETACTIVITY_DFLT;
3175 data->activity_target[2] = PPPOLARIS10_TARGETACTIVITY_DFLT;
3176 data->activity_target[3] = PPPOLARIS10_TARGETACTIVITY_DFLT;
3177 data->activity_target[4] = PPPOLARIS10_TARGETACTIVITY_DFLT;
3178 data->activity_target[5] = PPPOLARIS10_TARGETACTIVITY_DFLT;
3179 data->activity_target[6] = PPPOLARIS10_TARGETACTIVITY_DFLT;
3180 data->activity_target[7] = PPPOLARIS10_TARGETACTIVITY_DFLT;
3181
3182 data->voting_rights_clients0 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT0;
3183 data->voting_rights_clients1 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT1;
3184 data->voting_rights_clients2 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT2;
3185 data->voting_rights_clients3 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT3;
3186 data->voting_rights_clients4 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT4;
3187 data->voting_rights_clients5 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT5;
3188 data->voting_rights_clients6 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT6;
3189 data->voting_rights_clients7 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT7;
3190
3191 data->vddc_vddci_delta = VDDC_VDDCI_DELTA;
3192
3193 data->mclk_activity_target = PPPOLARIS10_MCLK_TARGETACTIVITY_DFLT;
3194
3195 /* need to set voltage control types before EVV patching */
3196 data->voltage_control = POLARIS10_VOLTAGE_CONTROL_NONE;
3197 data->vddci_control = POLARIS10_VOLTAGE_CONTROL_NONE;
3198 data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_NONE;
3199
3200 data->enable_tdc_limit_feature = true;
3201 data->enable_pkg_pwr_tracking_feature = true;
3202 data->force_pcie_gen = PP_PCIEGenInvalid;
3203 data->mclk_stutter_mode_threshold = 40000;
3204
3205 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
3206 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
3207 data->voltage_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
3208
3209 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3210 PHM_PlatformCaps_EnableMVDDControl)) {
3211 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
3212 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
3213 data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_BY_GPIO;
3214 else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
3215 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
3216 data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
3217 }
3218
3219 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3220 PHM_PlatformCaps_ControlVDDCI)) {
3221 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
3222 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
3223 data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_GPIO;
3224 else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
3225 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
3226 data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
3227 }
3228
3229 if (table_info->cac_dtp_table->usClockStretchAmount != 0)
3230 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3231 PHM_PlatformCaps_ClockStretcher);
3232
3233 polaris10_set_features_platform_caps(hwmgr);
3234
3235 polaris10_patch_voltage_workaround(hwmgr);
3236 polaris10_init_dpm_defaults(hwmgr);
3237
3238 /* Get leakage voltage based on leakage ID. */
3239 result = polaris10_get_evv_voltages(hwmgr);
3240
3241 if (result) {
3242 printk("Get EVV Voltage Failed. Abort Driver loading!\n");
3243 return -1;
3244 }
3245
3246 polaris10_complete_dependency_tables(hwmgr);
3247 polaris10_set_private_data_based_on_pptable(hwmgr);
3248
3249 /* Initalize Dynamic State Adjustment Rule Settings */
3250 result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
3251
3252 if (0 == result) {
3253 struct cgs_system_info sys_info = {0};
3254
3255 data->is_tlu_enabled = false;
3256
3257 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
3258 POLARIS10_MAX_HARDWARE_POWERLEVELS;
3259 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
3260 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
3261
3262
3263 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
3264 temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
3265 switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
3266 case 0:
3267 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
3268 break;
3269 case 1:
3270 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
3271 break;
3272 case 2:
3273 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
3274 break;
3275 case 3:
3276 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
3277 break;
3278 case 4:
3279 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
3280 break;
3281 default:
3282 PP_ASSERT_WITH_CODE(0,
3283 "Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!",
3284 );
3285 break;
3286 }
3287 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
3288 }
3289
3290 if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
3291 hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
3292 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
3293 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
3294
3295 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
3296 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
3297
3298 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
3299
3300 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
3301
3302 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
3303 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
3304
3305 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
3306
3307 table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
3308 (table_info->cac_dtp_table->usDefaultTargetOperatingTemp -50) : 0;
3309
3310 table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
3311 table_info->cac_dtp_table->usOperatingTempStep = 1;
3312 table_info->cac_dtp_table->usOperatingTempHyst = 1;
3313
3314 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
3315 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
3316
3317 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
3318 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
3319
3320 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
3321 table_info->cac_dtp_table->usOperatingTempMinLimit;
3322
3323 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
3324 table_info->cac_dtp_table->usOperatingTempMaxLimit;
3325
3326 hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
3327 table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
3328
3329 hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
3330 table_info->cac_dtp_table->usOperatingTempStep;
3331
3332 hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
3333 table_info->cac_dtp_table->usTargetOperatingTemp;
3334 }
3335
3336 sys_info.size = sizeof(struct cgs_system_info);
3337 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
3338 result = cgs_query_system_info(hwmgr->device, &sys_info);
3339 if (result)
3340 data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3341 else
3342 data->pcie_gen_cap = (uint32_t)sys_info.value;
3343 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
3344 data->pcie_spc_cap = 20;
3345 sys_info.size = sizeof(struct cgs_system_info);
3346 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
3347 result = cgs_query_system_info(hwmgr->device, &sys_info);
3348 if (result)
3349 data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
3350 else
3351 data->pcie_lane_cap = (uint32_t)sys_info.value;
3352
3353 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
3354/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
3355 hwmgr->platform_descriptor.clockStep.engineClock = 500;
3356 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
3357 } else {
3358 /* Ignore return value in here, we are cleaning up a mess. */
3359 polaris10_hwmgr_backend_fini(hwmgr);
3360 }
3361
3362 return 0;
3363}
3364
3365static int polaris10_force_dpm_highest(struct pp_hwmgr *hwmgr)
3366{
3367 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
3368 uint32_t level, tmp;
3369
3370 if (!data->pcie_dpm_key_disabled) {
3371 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3372 level = 0;
3373 tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
3374 while (tmp >>= 1)
3375 level++;
3376
3377 if (level)
3378 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3379 PPSMC_MSG_PCIeDPM_ForceLevel, level);
3380 }
3381 }
3382
3383 if (!data->sclk_dpm_key_disabled) {
3384 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3385 level = 0;
3386 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
3387 while (tmp >>= 1)
3388 level++;
3389
3390 if (level)
3391 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3392 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3393 (1 << level));
3394 }
3395 }
3396
3397 if (!data->mclk_dpm_key_disabled) {
3398 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3399 level = 0;
3400 tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
3401 while (tmp >>= 1)
3402 level++;
3403
3404 if (level)
3405 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3406 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3407 (1 << level));
3408 }
3409 }
3410
3411 return 0;
3412}
3413
3414static int polaris10_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
3415{
3416 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
3417
3418 phm_apply_dal_min_voltage_request(hwmgr);
3419
3420 if (!data->sclk_dpm_key_disabled) {
3421 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
3422 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3423 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3424 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
3425 }
3426
3427 if (!data->mclk_dpm_key_disabled) {
3428 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
3429 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3430 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3431 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
3432 }
3433
3434 return 0;
3435}
3436
3437static int polaris10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
3438{
3439 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
3440
3441 if (!polaris10_is_dpm_running(hwmgr))
3442 return -EINVAL;
3443
3444 if (!data->pcie_dpm_key_disabled) {
3445 smum_send_msg_to_smc(hwmgr->smumgr,
3446 PPSMC_MSG_PCIeDPM_UnForceLevel);
3447 }
3448
3449 return polaris10_upload_dpm_level_enable_mask(hwmgr);
3450}
3451
3452static int polaris10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
3453{
3454 struct polaris10_hwmgr *data =
3455 (struct polaris10_hwmgr *)(hwmgr->backend);
3456 uint32_t level;
3457
3458 if (!data->sclk_dpm_key_disabled)
3459 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3460 level = phm_get_lowest_enabled_level(hwmgr,
3461 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
3462 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3463 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3464 (1 << level));
3465
3466 }
3467
3468 if (!data->mclk_dpm_key_disabled) {
3469 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3470 level = phm_get_lowest_enabled_level(hwmgr,
3471 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
3472 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3473 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3474 (1 << level));
3475 }
3476 }
3477
3478 if (!data->pcie_dpm_key_disabled) {
3479 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3480 level = phm_get_lowest_enabled_level(hwmgr,
3481 data->dpm_level_enable_mask.pcie_dpm_enable_mask);
3482 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3483 PPSMC_MSG_PCIeDPM_ForceLevel,
3484 (level));
3485 }
3486 }
3487
3488 return 0;
3489
3490}
3491static int polaris10_force_dpm_level(struct pp_hwmgr *hwmgr,
3492 enum amd_dpm_forced_level level)
3493{
3494 int ret = 0;
3495
3496 switch (level) {
3497 case AMD_DPM_FORCED_LEVEL_HIGH:
3498 ret = polaris10_force_dpm_highest(hwmgr);
3499 if (ret)
3500 return ret;
3501 break;
3502 case AMD_DPM_FORCED_LEVEL_LOW:
3503 ret = polaris10_force_dpm_lowest(hwmgr);
3504 if (ret)
3505 return ret;
3506 break;
3507 case AMD_DPM_FORCED_LEVEL_AUTO:
3508 ret = polaris10_unforce_dpm_levels(hwmgr);
3509 if (ret)
3510 return ret;
3511 break;
3512 default:
3513 break;
3514 }
3515
3516 hwmgr->dpm_level = level;
3517
3518 return ret;
3519}
3520
3521static int polaris10_get_power_state_size(struct pp_hwmgr *hwmgr)
3522{
3523 return sizeof(struct polaris10_power_state);
3524}
3525
3526
3527static int polaris10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3528 struct pp_power_state *request_ps,
3529 const struct pp_power_state *current_ps)
3530{
3531
3532 struct polaris10_power_state *polaris10_ps =
3533 cast_phw_polaris10_power_state(&request_ps->hardware);
3534 uint32_t sclk;
3535 uint32_t mclk;
3536 struct PP_Clocks minimum_clocks = {0};
3537 bool disable_mclk_switching;
3538 bool disable_mclk_switching_for_frame_lock;
3539 struct cgs_display_info info = {0};
3540 const struct phm_clock_and_voltage_limits *max_limits;
3541 uint32_t i;
3542 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
3543 struct phm_ppt_v1_information *table_info =
3544 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3545 int32_t count;
3546 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
3547
3548 data->battery_state = (PP_StateUILabel_Battery ==
3549 request_ps->classification.ui_label);
3550
3551 PP_ASSERT_WITH_CODE(polaris10_ps->performance_level_count == 2,
3552 "VI should always have 2 performance levels",
3553 );
3554
3555 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
3556 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
3557 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
3558
3559 /* Cap clock DPM tables at DC MAX if it is in DC. */
3560 if (PP_PowerSource_DC == hwmgr->power_source) {
3561 for (i = 0; i < polaris10_ps->performance_level_count; i++) {
3562 if (polaris10_ps->performance_levels[i].memory_clock > max_limits->mclk)
3563 polaris10_ps->performance_levels[i].memory_clock = max_limits->mclk;
3564 if (polaris10_ps->performance_levels[i].engine_clock > max_limits->sclk)
3565 polaris10_ps->performance_levels[i].engine_clock = max_limits->sclk;
3566 }
3567 }
3568
3569 polaris10_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
3570 polaris10_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
3571
3572 cgs_get_active_displays_info(hwmgr->device, &info);
3573
3574 /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
3575
3576 /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */
3577
3578 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3579 PHM_PlatformCaps_StablePState)) {
3580 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
3581 stable_pstate_sclk = (max_limits->sclk * 75) / 100;
3582
3583 for (count = table_info->vdd_dep_on_sclk->count - 1;
3584 count >= 0; count--) {
3585 if (stable_pstate_sclk >=
3586 table_info->vdd_dep_on_sclk->entries[count].clk) {
3587 stable_pstate_sclk =
3588 table_info->vdd_dep_on_sclk->entries[count].clk;
3589 break;
3590 }
3591 }
3592
3593 if (count < 0)
3594 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
3595
3596 stable_pstate_mclk = max_limits->mclk;
3597
3598 minimum_clocks.engineClock = stable_pstate_sclk;
3599 minimum_clocks.memoryClock = stable_pstate_mclk;
3600 }
3601
3602 if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
3603 minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
3604
3605 if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
3606 minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
3607
3608 polaris10_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
3609
3610 if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
3611 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
3612 hwmgr->platform_descriptor.overdriveLimit.engineClock),
3613 "Overdrive sclk exceeds limit",
3614 hwmgr->gfx_arbiter.sclk_over_drive =
3615 hwmgr->platform_descriptor.overdriveLimit.engineClock);
3616
3617 if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
3618 polaris10_ps->performance_levels[1].engine_clock =
3619 hwmgr->gfx_arbiter.sclk_over_drive;
3620 }
3621
3622 if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
3623 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
3624 hwmgr->platform_descriptor.overdriveLimit.memoryClock),
3625 "Overdrive mclk exceeds limit",
3626 hwmgr->gfx_arbiter.mclk_over_drive =
3627 hwmgr->platform_descriptor.overdriveLimit.memoryClock);
3628
3629 if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
3630 polaris10_ps->performance_levels[1].memory_clock =
3631 hwmgr->gfx_arbiter.mclk_over_drive;
3632 }
3633
3634 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
3635 hwmgr->platform_descriptor.platformCaps,
3636 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3637
3638
3639 disable_mclk_switching = (1 < info.display_count) ||
3640 disable_mclk_switching_for_frame_lock;
3641
3642 sclk = polaris10_ps->performance_levels[0].engine_clock;
3643 mclk = polaris10_ps->performance_levels[0].memory_clock;
3644
3645 if (disable_mclk_switching)
3646 mclk = polaris10_ps->performance_levels
3647 [polaris10_ps->performance_level_count - 1].memory_clock;
3648
3649 if (sclk < minimum_clocks.engineClock)
3650 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
3651 max_limits->sclk : minimum_clocks.engineClock;
3652
3653 if (mclk < minimum_clocks.memoryClock)
3654 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
3655 max_limits->mclk : minimum_clocks.memoryClock;
3656
3657 polaris10_ps->performance_levels[0].engine_clock = sclk;
3658 polaris10_ps->performance_levels[0].memory_clock = mclk;
3659
3660 polaris10_ps->performance_levels[1].engine_clock =
3661 (polaris10_ps->performance_levels[1].engine_clock >=
3662 polaris10_ps->performance_levels[0].engine_clock) ?
3663 polaris10_ps->performance_levels[1].engine_clock :
3664 polaris10_ps->performance_levels[0].engine_clock;
3665
3666 if (disable_mclk_switching) {
3667 if (mclk < polaris10_ps->performance_levels[1].memory_clock)
3668 mclk = polaris10_ps->performance_levels[1].memory_clock;
3669
3670 polaris10_ps->performance_levels[0].memory_clock = mclk;
3671 polaris10_ps->performance_levels[1].memory_clock = mclk;
3672 } else {
3673 if (polaris10_ps->performance_levels[1].memory_clock <
3674 polaris10_ps->performance_levels[0].memory_clock)
3675 polaris10_ps->performance_levels[1].memory_clock =
3676 polaris10_ps->performance_levels[0].memory_clock;
3677 }
3678
3679 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3680 PHM_PlatformCaps_StablePState)) {
3681 for (i = 0; i < polaris10_ps->performance_level_count; i++) {
3682 polaris10_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
3683 polaris10_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
3684 polaris10_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
3685 polaris10_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
3686 }
3687 }
3688 return 0;
3689}
3690
3691
3692static int polaris10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3693{
3694 struct pp_power_state *ps;
3695 struct polaris10_power_state *polaris10_ps;
3696
3697 if (hwmgr == NULL)
3698 return -EINVAL;
3699
3700 ps = hwmgr->request_ps;
3701
3702 if (ps == NULL)
3703 return -EINVAL;
3704
3705 polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
3706
3707 if (low)
3708 return polaris10_ps->performance_levels[0].memory_clock;
3709 else
3710 return polaris10_ps->performance_levels
3711 [polaris10_ps->performance_level_count-1].memory_clock;
3712}
3713
3714static int polaris10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3715{
3716 struct pp_power_state *ps;
3717 struct polaris10_power_state *polaris10_ps;
3718
3719 if (hwmgr == NULL)
3720 return -EINVAL;
3721
3722 ps = hwmgr->request_ps;
3723
3724 if (ps == NULL)
3725 return -EINVAL;
3726
3727 polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
3728
3729 if (low)
3730 return polaris10_ps->performance_levels[0].engine_clock;
3731 else
3732 return polaris10_ps->performance_levels
3733 [polaris10_ps->performance_level_count-1].engine_clock;
3734}
3735
3736static int polaris10_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
3737 struct pp_hw_power_state *hw_ps)
3738{
3739 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
3740 struct polaris10_power_state *ps = (struct polaris10_power_state *)hw_ps;
3741 ATOM_FIRMWARE_INFO_V2_2 *fw_info;
3742 uint16_t size;
3743 uint8_t frev, crev;
3744 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
3745
3746 /* First retrieve the Boot clocks and VDDC from the firmware info table.
3747 * We assume here that fw_info is unchanged if this call fails.
3748 */
3749 fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
3750 hwmgr->device, index,
3751 &size, &frev, &crev);
3752 if (!fw_info)
3753 /* During a test, there is no firmware info table. */
3754 return 0;
3755
3756 /* Patch the state. */
3757 data->vbios_boot_state.sclk_bootup_value =
3758 le32_to_cpu(fw_info->ulDefaultEngineClock);
3759 data->vbios_boot_state.mclk_bootup_value =
3760 le32_to_cpu(fw_info->ulDefaultMemoryClock);
3761 data->vbios_boot_state.mvdd_bootup_value =
3762 le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
3763 data->vbios_boot_state.vddc_bootup_value =
3764 le16_to_cpu(fw_info->usBootUpVDDCVoltage);
3765 data->vbios_boot_state.vddci_bootup_value =
3766 le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
3767 data->vbios_boot_state.pcie_gen_bootup_value =
3768 phm_get_current_pcie_speed(hwmgr);
3769
3770 data->vbios_boot_state.pcie_lane_bootup_value =
3771 (uint16_t)phm_get_current_pcie_lane_number(hwmgr);
3772
3773 /* set boot power state */
3774 ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
3775 ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
3776 ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
3777 ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
3778
3779 return 0;
3780}
3781
3782static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
3783 void *state, struct pp_power_state *power_state,
3784 void *pp_table, uint32_t classification_flag)
3785{
3786 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
3787 struct polaris10_power_state *polaris10_power_state =
3788 (struct polaris10_power_state *)(&(power_state->hardware));
3789 struct polaris10_performance_level *performance_level;
3790 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
3791 ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
3792 (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
3793 PPTable_Generic_SubTable_Header *sclk_dep_table =
3794 (PPTable_Generic_SubTable_Header *)
3795 (((unsigned long)powerplay_table) +
3796 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
3797
3798 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
3799 (ATOM_Tonga_MCLK_Dependency_Table *)
3800 (((unsigned long)powerplay_table) +
3801 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
3802
3803 /* The following fields are not initialized here: id orderedList allStatesList */
3804 power_state->classification.ui_label =
3805 (le16_to_cpu(state_entry->usClassification) &
3806 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
3807 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
3808 power_state->classification.flags = classification_flag;
3809 /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
3810
3811 power_state->classification.temporary_state = false;
3812 power_state->classification.to_be_deleted = false;
3813
3814 power_state->validation.disallowOnDC =
3815 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3816 ATOM_Tonga_DISALLOW_ON_DC));
3817
3818 power_state->pcie.lanes = 0;
3819
3820 power_state->display.disableFrameModulation = false;
3821 power_state->display.limitRefreshrate = false;
3822 power_state->display.enableVariBright =
3823 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3824 ATOM_Tonga_ENABLE_VARIBRIGHT));
3825
3826 power_state->validation.supportedPowerLevels = 0;
3827 power_state->uvd_clocks.VCLK = 0;
3828 power_state->uvd_clocks.DCLK = 0;
3829 power_state->temperatures.min = 0;
3830 power_state->temperatures.max = 0;
3831
3832 performance_level = &(polaris10_power_state->performance_levels
3833 [polaris10_power_state->performance_level_count++]);
3834
3835 PP_ASSERT_WITH_CODE(
3836 (polaris10_power_state->performance_level_count < SMU74_MAX_LEVELS_GRAPHICS),
3837 "Performance levels exceeds SMC limit!",
3838 return -1);
3839
3840 PP_ASSERT_WITH_CODE(
3841 (polaris10_power_state->performance_level_count <=
3842 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3843 "Performance levels exceeds Driver limit!",
3844 return -1);
3845
3846 /* Performance levels are arranged from low to high. */
3847 performance_level->memory_clock = mclk_dep_table->entries
3848 [state_entry->ucMemoryClockIndexLow].ulMclk;
3849 if (sclk_dep_table->ucRevId == 0)
3850 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3851 [state_entry->ucEngineClockIndexLow].ulSclk;
3852 else if (sclk_dep_table->ucRevId == 1)
3853 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3854 [state_entry->ucEngineClockIndexLow].ulSclk;
3855 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3856 state_entry->ucPCIEGenLow);
3857 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3858 state_entry->ucPCIELaneHigh);
3859
3860 performance_level = &(polaris10_power_state->performance_levels
3861 [polaris10_power_state->performance_level_count++]);
3862 performance_level->memory_clock = mclk_dep_table->entries
3863 [state_entry->ucMemoryClockIndexHigh].ulMclk;
3864
3865 if (sclk_dep_table->ucRevId == 0)
3866 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3867 [state_entry->ucEngineClockIndexHigh].ulSclk;
3868 else if (sclk_dep_table->ucRevId == 1)
3869 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3870 [state_entry->ucEngineClockIndexHigh].ulSclk;
3871
3872 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3873 state_entry->ucPCIEGenHigh);
3874 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3875 state_entry->ucPCIELaneHigh);
3876
3877 return 0;
3878}
3879
3880static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3881 unsigned long entry_index, struct pp_power_state *state)
3882{
3883 int result;
3884 struct polaris10_power_state *ps;
3885 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
3886 struct phm_ppt_v1_information *table_info =
3887 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3888 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
3889 table_info->vdd_dep_on_mclk;
3890
3891 state->hardware.magic = PHM_VIslands_Magic;
3892
3893 ps = (struct polaris10_power_state *)(&state->hardware);
3894
3895 result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
3896 polaris10_get_pp_table_entry_callback_func);
3897
3898 /* This is the earliest time we have all the dependency table and the VBIOS boot state
3899 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
3900 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
3901 */
3902 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3903 if (dep_mclk_table->entries[0].clk !=
3904 data->vbios_boot_state.mclk_bootup_value)
3905 printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
3906 "does not match VBIOS boot MCLK level");
3907 if (dep_mclk_table->entries[0].vddci !=
3908 data->vbios_boot_state.vddci_bootup_value)
3909 printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
3910 "does not match VBIOS boot VDDCI level");
3911 }
3912
3913 /* set DC compatible flag if this state supports DC */
3914 if (!state->validation.disallowOnDC)
3915 ps->dc_compatible = true;
3916
3917 if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3918 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3919
3920 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3921 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3922
3923 if (!result) {
3924 uint32_t i;
3925
3926 switch (state->classification.ui_label) {
3927 case PP_StateUILabel_Performance:
3928 data->use_pcie_performance_levels = true;
3929 for (i = 0; i < ps->performance_level_count; i++) {
3930 if (data->pcie_gen_performance.max <
3931 ps->performance_levels[i].pcie_gen)
3932 data->pcie_gen_performance.max =
3933 ps->performance_levels[i].pcie_gen;
3934
3935 if (data->pcie_gen_performance.min >
3936 ps->performance_levels[i].pcie_gen)
3937 data->pcie_gen_performance.min =
3938 ps->performance_levels[i].pcie_gen;
3939
3940 if (data->pcie_lane_performance.max <
3941 ps->performance_levels[i].pcie_lane)
3942 data->pcie_lane_performance.max =
3943 ps->performance_levels[i].pcie_lane;
3944 if (data->pcie_lane_performance.min >
3945 ps->performance_levels[i].pcie_lane)
3946 data->pcie_lane_performance.min =
3947 ps->performance_levels[i].pcie_lane;
3948 }
3949 break;
3950 case PP_StateUILabel_Battery:
3951 data->use_pcie_power_saving_levels = true;
3952
3953 for (i = 0; i < ps->performance_level_count; i++) {
3954 if (data->pcie_gen_power_saving.max <
3955 ps->performance_levels[i].pcie_gen)
3956 data->pcie_gen_power_saving.max =
3957 ps->performance_levels[i].pcie_gen;
3958
3959 if (data->pcie_gen_power_saving.min >
3960 ps->performance_levels[i].pcie_gen)
3961 data->pcie_gen_power_saving.min =
3962 ps->performance_levels[i].pcie_gen;
3963
3964 if (data->pcie_lane_power_saving.max <
3965 ps->performance_levels[i].pcie_lane)
3966 data->pcie_lane_power_saving.max =
3967 ps->performance_levels[i].pcie_lane;
3968
3969 if (data->pcie_lane_power_saving.min >
3970 ps->performance_levels[i].pcie_lane)
3971 data->pcie_lane_power_saving.min =
3972 ps->performance_levels[i].pcie_lane;
3973 }
3974 break;
3975 default:
3976 break;
3977 }
3978 }
3979 return 0;
3980}
3981
3982static void
3983polaris10_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
3984{
3985 uint32_t sclk, mclk, activity_percent;
3986 uint32_t offset;
3987 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
3988
3989 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
3990
3991 sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3992
3993 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
3994
3995 mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3996 seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n",
3997 mclk / 100, sclk / 100);
3998
3999 offset = data->soft_regs_start + offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
4000 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
4001 activity_percent += 0x80;
4002 activity_percent >>= 8;
4003
4004 seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
4005
4006 seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en");
4007
4008 seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en");
4009}
4010
4011static int polaris10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
4012{
4013 const struct phm_set_power_state_input *states =
4014 (const struct phm_set_power_state_input *)input;
4015 const struct polaris10_power_state *polaris10_ps =
4016 cast_const_phw_polaris10_power_state(states->pnew_state);
4017 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4018 struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4019 uint32_t sclk = polaris10_ps->performance_levels
4020 [polaris10_ps->performance_level_count - 1].engine_clock;
4021 struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4022 uint32_t mclk = polaris10_ps->performance_levels
4023 [polaris10_ps->performance_level_count - 1].memory_clock;
4024 struct PP_Clocks min_clocks = {0};
4025 uint32_t i;
4026 struct cgs_display_info info = {0};
4027
4028 data->need_update_smu7_dpm_table = 0;
4029
4030 for (i = 0; i < sclk_table->count; i++) {
4031 if (sclk == sclk_table->dpm_levels[i].value)
4032 break;
4033 }
4034
4035 if (i >= sclk_table->count)
4036 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4037 else {
4038 /* TODO: Check SCLK in DAL's minimum clocks
4039 * in case DeepSleep divider update is required.
4040 */
4041 if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
4042 (min_clocks.engineClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK ||
4043 data->display_timing.min_clock_in_sr >= POLARIS10_MINIMUM_ENGINE_CLOCK))
4044 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
4045 }
4046
4047 for (i = 0; i < mclk_table->count; i++) {
4048 if (mclk == mclk_table->dpm_levels[i].value)
4049 break;
4050 }
4051
4052 if (i >= mclk_table->count)
4053 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4054
4055 cgs_get_active_displays_info(hwmgr->device, &info);
4056
4057 if (data->display_timing.num_existing_displays != info.display_count)
4058 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4059
4060 return 0;
4061}
4062
4063static uint16_t polaris10_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
4064 const struct polaris10_power_state *polaris10_ps)
4065{
4066 uint32_t i;
4067 uint32_t sclk, max_sclk = 0;
4068 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4069 struct polaris10_dpm_table *dpm_table = &data->dpm_table;
4070
4071 for (i = 0; i < polaris10_ps->performance_level_count; i++) {
4072 sclk = polaris10_ps->performance_levels[i].engine_clock;
4073 if (max_sclk < sclk)
4074 max_sclk = sclk;
4075 }
4076
4077 for (i = 0; i < dpm_table->sclk_table.count; i++) {
4078 if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
4079 return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
4080 dpm_table->pcie_speed_table.dpm_levels
4081 [dpm_table->pcie_speed_table.count - 1].value :
4082 dpm_table->pcie_speed_table.dpm_levels[i].value);
4083 }
4084
4085 return 0;
4086}
4087
4088static int polaris10_request_link_speed_change_before_state_change(
4089 struct pp_hwmgr *hwmgr, const void *input)
4090{
4091 const struct phm_set_power_state_input *states =
4092 (const struct phm_set_power_state_input *)input;
4093 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4094 const struct polaris10_power_state *polaris10_nps =
4095 cast_const_phw_polaris10_power_state(states->pnew_state);
4096 const struct polaris10_power_state *polaris10_cps =
4097 cast_const_phw_polaris10_power_state(states->pcurrent_state);
4098
4099 uint16_t target_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_nps);
4100 uint16_t current_link_speed;
4101
4102 if (data->force_pcie_gen == PP_PCIEGenInvalid)
4103 current_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_cps);
4104 else
4105 current_link_speed = data->force_pcie_gen;
4106
4107 data->force_pcie_gen = PP_PCIEGenInvalid;
4108 data->pspp_notify_required = false;
4109
4110 if (target_link_speed > current_link_speed) {
4111 switch (target_link_speed) {
4112 case PP_PCIEGen3:
4113 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
4114 break;
4115 data->force_pcie_gen = PP_PCIEGen2;
4116 if (current_link_speed == PP_PCIEGen2)
4117 break;
4118 case PP_PCIEGen2:
4119 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
4120 break;
4121 default:
4122 data->force_pcie_gen = phm_get_current_pcie_speed(hwmgr);
4123 break;
4124 }
4125 } else {
4126 if (target_link_speed < current_link_speed)
4127 data->pspp_notify_required = true;
4128 }
4129
4130 return 0;
4131}
4132
4133static int polaris10_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4134{
4135 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4136
4137 if (0 == data->need_update_smu7_dpm_table)
4138 return 0;
4139
4140 if ((0 == data->sclk_dpm_key_disabled) &&
4141 (data->need_update_smu7_dpm_table &
4142 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4143 PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
4144 "Trying to freeze SCLK DPM when DPM is disabled",
4145 );
4146 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4147 PPSMC_MSG_SCLKDPM_FreezeLevel),
4148 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
4149 return -1);
4150 }
4151
4152 if ((0 == data->mclk_dpm_key_disabled) &&
4153 (data->need_update_smu7_dpm_table &
4154 DPMTABLE_OD_UPDATE_MCLK)) {
4155 PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
4156 "Trying to freeze MCLK DPM when DPM is disabled",
4157 );
4158 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4159 PPSMC_MSG_MCLKDPM_FreezeLevel),
4160 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
4161 return -1);
4162 }
4163
4164 return 0;
4165}
4166
4167static int polaris10_populate_and_upload_sclk_mclk_dpm_levels(
4168 struct pp_hwmgr *hwmgr, const void *input)
4169{
4170 int result = 0;
4171 const struct phm_set_power_state_input *states =
4172 (const struct phm_set_power_state_input *)input;
4173 const struct polaris10_power_state *polaris10_ps =
4174 cast_const_phw_polaris10_power_state(states->pnew_state);
4175 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4176 uint32_t sclk = polaris10_ps->performance_levels
4177 [polaris10_ps->performance_level_count - 1].engine_clock;
4178 uint32_t mclk = polaris10_ps->performance_levels
4179 [polaris10_ps->performance_level_count - 1].memory_clock;
4180 struct polaris10_dpm_table *dpm_table = &data->dpm_table;
4181
4182 struct polaris10_dpm_table *golden_dpm_table = &data->golden_dpm_table;
4183 uint32_t dpm_count, clock_percent;
4184 uint32_t i;
4185
4186 if (0 == data->need_update_smu7_dpm_table)
4187 return 0;
4188
4189 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
4190 dpm_table->sclk_table.dpm_levels
4191 [dpm_table->sclk_table.count - 1].value = sclk;
4192
4193 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
4194 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
4195 /* Need to do calculation based on the golden DPM table
4196 * as the Heatmap GPU Clock axis is also based on the default values
4197 */
4198 PP_ASSERT_WITH_CODE(
4199 (golden_dpm_table->sclk_table.dpm_levels
4200 [golden_dpm_table->sclk_table.count - 1].value != 0),
4201 "Divide by 0!",
4202 return -1);
4203 dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2;
4204
4205 for (i = dpm_count; i > 1; i--) {
4206 if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) {
4207 clock_percent =
4208 ((sclk
4209 - golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value
4210 ) * 100)
4211 / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
4212
4213 dpm_table->sclk_table.dpm_levels[i].value =
4214 golden_dpm_table->sclk_table.dpm_levels[i].value +
4215 (golden_dpm_table->sclk_table.dpm_levels[i].value *
4216 clock_percent)/100;
4217
4218 } else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) {
4219 clock_percent =
4220 ((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value
4221 - sclk) * 100)
4222 / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
4223
4224 dpm_table->sclk_table.dpm_levels[i].value =
4225 golden_dpm_table->sclk_table.dpm_levels[i].value -
4226 (golden_dpm_table->sclk_table.dpm_levels[i].value *
4227 clock_percent) / 100;
4228 } else
4229 dpm_table->sclk_table.dpm_levels[i].value =
4230 golden_dpm_table->sclk_table.dpm_levels[i].value;
4231 }
4232 }
4233 }
4234
4235 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
4236 dpm_table->mclk_table.dpm_levels
4237 [dpm_table->mclk_table.count - 1].value = mclk;
4238
4239 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
4240 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
4241
4242 PP_ASSERT_WITH_CODE(
4243 (golden_dpm_table->mclk_table.dpm_levels
4244 [golden_dpm_table->mclk_table.count-1].value != 0),
4245 "Divide by 0!",
4246 return -1);
4247 dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2;
4248 for (i = dpm_count; i > 1; i--) {
4249 if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) {
4250 clock_percent = ((mclk -
4251 golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100)
4252 / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
4253
4254 dpm_table->mclk_table.dpm_levels[i].value =
4255 golden_dpm_table->mclk_table.dpm_levels[i].value +
4256 (golden_dpm_table->mclk_table.dpm_levels[i].value *
4257 clock_percent) / 100;
4258
4259 } else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) {
4260 clock_percent = (
4261 (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk)
4262 * 100)
4263 / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
4264
4265 dpm_table->mclk_table.dpm_levels[i].value =
4266 golden_dpm_table->mclk_table.dpm_levels[i].value -
4267 (golden_dpm_table->mclk_table.dpm_levels[i].value *
4268 clock_percent) / 100;
4269 } else
4270 dpm_table->mclk_table.dpm_levels[i].value =
4271 golden_dpm_table->mclk_table.dpm_levels[i].value;
4272 }
4273 }
4274 }
4275
4276 if (data->need_update_smu7_dpm_table &
4277 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
4278 result = polaris10_populate_all_graphic_levels(hwmgr);
4279 PP_ASSERT_WITH_CODE((0 == result),
4280 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
4281 return result);
4282 }
4283
4284 if (data->need_update_smu7_dpm_table &
4285 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
4286 /*populate MCLK dpm table to SMU7 */
4287 result = polaris10_populate_all_memory_levels(hwmgr);
4288 PP_ASSERT_WITH_CODE((0 == result),
4289 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
4290 return result);
4291 }
4292
4293 return result;
4294}
4295
4296static int polaris10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
4297 struct polaris10_single_dpm_table *dpm_table,
4298 uint32_t low_limit, uint32_t high_limit)
4299{
4300 uint32_t i;
4301
4302 for (i = 0; i < dpm_table->count; i++) {
4303 if ((dpm_table->dpm_levels[i].value < low_limit)
4304 || (dpm_table->dpm_levels[i].value > high_limit))
4305 dpm_table->dpm_levels[i].enabled = false;
4306 else
4307 dpm_table->dpm_levels[i].enabled = true;
4308 }
4309
4310 return 0;
4311}
4312
4313static int polaris10_trim_dpm_states(struct pp_hwmgr *hwmgr,
4314 const struct polaris10_power_state *polaris10_ps)
4315{
4316 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4317 uint32_t high_limit_count;
4318
4319 PP_ASSERT_WITH_CODE((polaris10_ps->performance_level_count >= 1),
4320 "power state did not have any performance level",
4321 return -1);
4322
4323 high_limit_count = (1 == polaris10_ps->performance_level_count) ? 0 : 1;
4324
4325 polaris10_trim_single_dpm_states(hwmgr,
4326 &(data->dpm_table.sclk_table),
4327 polaris10_ps->performance_levels[0].engine_clock,
4328 polaris10_ps->performance_levels[high_limit_count].engine_clock);
4329
4330 polaris10_trim_single_dpm_states(hwmgr,
4331 &(data->dpm_table.mclk_table),
4332 polaris10_ps->performance_levels[0].memory_clock,
4333 polaris10_ps->performance_levels[high_limit_count].memory_clock);
4334
4335 return 0;
4336}
4337
4338static int polaris10_generate_dpm_level_enable_mask(
4339 struct pp_hwmgr *hwmgr, const void *input)
4340{
4341 int result;
4342 const struct phm_set_power_state_input *states =
4343 (const struct phm_set_power_state_input *)input;
4344 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4345 const struct polaris10_power_state *polaris10_ps =
4346 cast_const_phw_polaris10_power_state(states->pnew_state);
4347
4348 result = polaris10_trim_dpm_states(hwmgr, polaris10_ps);
4349 if (result)
4350 return result;
4351
4352 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
4353 phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
4354 data->dpm_level_enable_mask.mclk_dpm_enable_mask =
4355 phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
4356 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
4357 phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
4358
4359 return 0;
4360}
4361
4362static int
4363polaris10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
4364{
4365 return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
4366 PPSMC_MSG_UVDDPM_Enable :
4367 PPSMC_MSG_UVDDPM_Disable);
4368}
4369
4370int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
4371{
4372 return smum_send_msg_to_smc(hwmgr->smumgr, enable?
4373 PPSMC_MSG_VCEDPM_Enable :
4374 PPSMC_MSG_VCEDPM_Disable);
4375}
4376
4377static int
4378polaris10_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
4379{
4380 return smum_send_msg_to_smc(hwmgr->smumgr, enable?
4381 PPSMC_MSG_SAMUDPM_Enable :
4382 PPSMC_MSG_SAMUDPM_Disable);
4383}
4384
4385int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
4386{
4387 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4388 uint32_t mm_boot_level_offset, mm_boot_level_value;
4389 struct phm_ppt_v1_information *table_info =
4390 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4391
4392 if (!bgate) {
4393 data->smc_state_table.UvdBootLevel = 0;
4394 if (table_info->mm_dep_table->count > 0)
4395 data->smc_state_table.UvdBootLevel =
4396 (uint8_t) (table_info->mm_dep_table->count - 1);
4397 mm_boot_level_offset = data->dpm_table_start +
4398 offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
4399 mm_boot_level_offset /= 4;
4400 mm_boot_level_offset *= 4;
4401 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
4402 CGS_IND_REG__SMC, mm_boot_level_offset);
4403 mm_boot_level_value &= 0x00FFFFFF;
4404 mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24;
4405 cgs_write_ind_register(hwmgr->device,
4406 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
4407
4408 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4409 PHM_PlatformCaps_UVDDPM) ||
4410 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4411 PHM_PlatformCaps_StablePState))
4412 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4413 PPSMC_MSG_UVDDPM_SetEnabledMask,
4414 (uint32_t)(1 << data->smc_state_table.UvdBootLevel));
4415 }
4416
4417 return polaris10_enable_disable_uvd_dpm(hwmgr, !bgate);
4418}
4419
4420int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
4421{
4422 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4423 uint32_t mm_boot_level_offset, mm_boot_level_value;
4424 struct phm_ppt_v1_information *table_info =
4425 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4426
4427 if (!bgate) {
4428 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4429 PHM_PlatformCaps_StablePState))
4430 data->smc_state_table.VceBootLevel =
4431 (uint8_t) (table_info->mm_dep_table->count - 1);
4432 else
4433 data->smc_state_table.VceBootLevel = 0;
4434
4435 mm_boot_level_offset = data->dpm_table_start +
4436 offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
4437 mm_boot_level_offset /= 4;
4438 mm_boot_level_offset *= 4;
4439 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
4440 CGS_IND_REG__SMC, mm_boot_level_offset);
4441 mm_boot_level_value &= 0xFF00FFFF;
4442 mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16;
4443 cgs_write_ind_register(hwmgr->device,
4444 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
4445
4446 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
4447 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4448 PPSMC_MSG_VCEDPM_SetEnabledMask,
4449 (uint32_t)1 << data->smc_state_table.VceBootLevel);
4450 }
4451
4452 polaris10_enable_disable_vce_dpm(hwmgr, !bgate);
4453
4454 return 0;
4455}
4456
4457int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
4458{
4459 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4460 uint32_t mm_boot_level_offset, mm_boot_level_value;
4461
4462 if (!bgate) {
4463 data->smc_state_table.SamuBootLevel = 0;
4464 mm_boot_level_offset = data->dpm_table_start +
4465 offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
4466 mm_boot_level_offset /= 4;
4467 mm_boot_level_offset *= 4;
4468 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
4469 CGS_IND_REG__SMC, mm_boot_level_offset);
4470 mm_boot_level_value &= 0xFFFFFF00;
4471 mm_boot_level_value |= data->smc_state_table.SamuBootLevel << 0;
4472 cgs_write_ind_register(hwmgr->device,
4473 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
4474
4475 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4476 PHM_PlatformCaps_StablePState))
4477 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4478 PPSMC_MSG_SAMUDPM_SetEnabledMask,
4479 (uint32_t)(1 << data->smc_state_table.SamuBootLevel));
4480 }
4481
4482 return polaris10_enable_disable_samu_dpm(hwmgr, !bgate);
4483}
4484
4485static int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
4486{
4487 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4488
4489 int result = 0;
4490 uint32_t low_sclk_interrupt_threshold = 0;
4491
4492 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4493 PHM_PlatformCaps_SclkThrottleLowNotification)
4494 && (hwmgr->gfx_arbiter.sclk_threshold !=
4495 data->low_sclk_interrupt_threshold)) {
4496 data->low_sclk_interrupt_threshold =
4497 hwmgr->gfx_arbiter.sclk_threshold;
4498 low_sclk_interrupt_threshold =
4499 data->low_sclk_interrupt_threshold;
4500
4501 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
4502
4503 result = polaris10_copy_bytes_to_smc(
4504 hwmgr->smumgr,
4505 data->dpm_table_start +
4506 offsetof(SMU74_Discrete_DpmTable,
4507 LowSclkInterruptThreshold),
4508 (uint8_t *)&low_sclk_interrupt_threshold,
4509 sizeof(uint32_t),
4510 data->sram_end);
4511 }
4512
4513 return result;
4514}
4515
4516static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
4517{
4518 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4519
4520 if (data->need_update_smu7_dpm_table &
4521 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
4522 return polaris10_program_memory_timing_parameters(hwmgr);
4523
4524 return 0;
4525}
4526
4527static int polaris10_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4528{
4529 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4530
4531 if (0 == data->need_update_smu7_dpm_table)
4532 return 0;
4533
4534 if ((0 == data->sclk_dpm_key_disabled) &&
4535 (data->need_update_smu7_dpm_table &
4536 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4537
4538 PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
4539 "Trying to Unfreeze SCLK DPM when DPM is disabled",
4540 );
4541 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4542 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
4543 "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
4544 return -1);
4545 }
4546
4547 if ((0 == data->mclk_dpm_key_disabled) &&
4548 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
4549
4550 PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
4551 "Trying to Unfreeze MCLK DPM when DPM is disabled",
4552 );
4553 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4554 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
4555 "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
4556 return -1);
4557 }
4558
4559 data->need_update_smu7_dpm_table = 0;
4560
4561 return 0;
4562}
4563
4564static int polaris10_notify_link_speed_change_after_state_change(
4565 struct pp_hwmgr *hwmgr, const void *input)
4566{
4567 const struct phm_set_power_state_input *states =
4568 (const struct phm_set_power_state_input *)input;
4569 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4570 const struct polaris10_power_state *polaris10_ps =
4571 cast_const_phw_polaris10_power_state(states->pnew_state);
4572 uint16_t target_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_ps);
4573 uint8_t request;
4574
4575 if (data->pspp_notify_required) {
4576 if (target_link_speed == PP_PCIEGen3)
4577 request = PCIE_PERF_REQ_GEN3;
4578 else if (target_link_speed == PP_PCIEGen2)
4579 request = PCIE_PERF_REQ_GEN2;
4580 else
4581 request = PCIE_PERF_REQ_GEN1;
4582
4583 if (request == PCIE_PERF_REQ_GEN1 &&
4584 phm_get_current_pcie_speed(hwmgr) > 0)
4585 return 0;
4586
4587 if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
4588 if (PP_PCIEGen2 == target_link_speed)
4589 printk("PSPP request to switch to Gen2 from Gen3 Failed!");
4590 else
4591 printk("PSPP request to switch to Gen1 from Gen2 Failed!");
4592 }
4593 }
4594
4595 return 0;
4596}
4597
4598static int polaris10_notify_smc_display(struct pp_hwmgr *hwmgr)
4599{
4600 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4601
4602 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4603 (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
4604 return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
4605}
4606
4607
4608
4609static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
4610{
4611 int tmp_result, result = 0;
4612 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4613
4614 tmp_result = polaris10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
4615 PP_ASSERT_WITH_CODE((0 == tmp_result),
4616 "Failed to find DPM states clocks in DPM table!",
4617 result = tmp_result);
4618
4619 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4620 PHM_PlatformCaps_PCIEPerformanceRequest)) {
4621 tmp_result =
4622 polaris10_request_link_speed_change_before_state_change(hwmgr, input);
4623 PP_ASSERT_WITH_CODE((0 == tmp_result),
4624 "Failed to request link speed change before state change!",
4625 result = tmp_result);
4626 }
4627
4628 tmp_result = polaris10_freeze_sclk_mclk_dpm(hwmgr);
4629 PP_ASSERT_WITH_CODE((0 == tmp_result),
4630 "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
4631
4632 tmp_result = polaris10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
4633 PP_ASSERT_WITH_CODE((0 == tmp_result),
4634 "Failed to populate and upload SCLK MCLK DPM levels!",
4635 result = tmp_result);
4636
4637 tmp_result = polaris10_generate_dpm_level_enable_mask(hwmgr, input);
4638 PP_ASSERT_WITH_CODE((0 == tmp_result),
4639 "Failed to generate DPM level enabled mask!",
4640 result = tmp_result);
4641
4642 tmp_result = polaris10_update_sclk_threshold(hwmgr);
4643 PP_ASSERT_WITH_CODE((0 == tmp_result),
4644 "Failed to update SCLK threshold!",
4645 result = tmp_result);
4646
4647 tmp_result = polaris10_program_mem_timing_parameters(hwmgr);
4648 PP_ASSERT_WITH_CODE((0 == tmp_result),
4649 "Failed to program memory timing parameters!",
4650 result = tmp_result);
4651
4652 tmp_result = polaris10_notify_smc_display(hwmgr);
4653 PP_ASSERT_WITH_CODE((0 == tmp_result),
4654 "Failed to notify smc display settings!",
4655 result = tmp_result);
4656
4657 tmp_result = polaris10_unfreeze_sclk_mclk_dpm(hwmgr);
4658 PP_ASSERT_WITH_CODE((0 == tmp_result),
4659 "Failed to unfreeze SCLK MCLK DPM!",
4660 result = tmp_result);
4661
4662 tmp_result = polaris10_upload_dpm_level_enable_mask(hwmgr);
4663 PP_ASSERT_WITH_CODE((0 == tmp_result),
4664 "Failed to upload DPM level enabled mask!",
4665 result = tmp_result);
4666
4667 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4668 PHM_PlatformCaps_PCIEPerformanceRequest)) {
4669 tmp_result =
4670 polaris10_notify_link_speed_change_after_state_change(hwmgr, input);
4671 PP_ASSERT_WITH_CODE((0 == tmp_result),
4672 "Failed to notify link speed change after state change!",
4673 result = tmp_result);
4674 }
4675 data->apply_optimized_settings = false;
4676 return result;
4677}
4678
4679static int polaris10_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
4680{
4681 hwmgr->thermal_controller.
4682 advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
4683
4684 if (phm_is_hw_access_blocked(hwmgr))
4685 return 0;
4686
4687 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4688 PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
4689}
4690
4691
4692static int
4693polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
4694{
4695 PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
4696
4697 return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
4698}
4699
4700static int
4701polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
4702{
4703 uint32_t num_active_displays = 0;
4704 struct cgs_display_info info = {0};
4705 info.mode_info = NULL;
4706
4707 cgs_get_active_displays_info(hwmgr->device, &info);
4708
4709 num_active_displays = info.display_count;
4710
4711 if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
4712 polaris10_notify_smc_display_change(hwmgr, false);
4713
4714
4715 return 0;
4716}
4717
4718/**
4719* Programs the display gap
4720*
4721* @param hwmgr the address of the powerplay hardware manager.
4722* @return always OK
4723*/
4724static int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
4725{
4726 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4727 uint32_t num_active_displays = 0;
4728 uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
4729 uint32_t display_gap2;
4730 uint32_t pre_vbi_time_in_us;
4731 uint32_t frame_time_in_us;
4732 uint32_t ref_clock;
4733 uint32_t refresh_rate = 0;
4734 struct cgs_display_info info = {0};
4735 struct cgs_mode_info mode_info;
4736
4737 info.mode_info = &mode_info;
4738
4739 cgs_get_active_displays_info(hwmgr->device, &info);
4740 num_active_displays = info.display_count;
4741
4742 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
4743 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
4744
4745 ref_clock = mode_info.ref_clock;
4746 refresh_rate = mode_info.refresh_rate;
4747
4748 if (0 == refresh_rate)
4749 refresh_rate = 60;
4750
4751 frame_time_in_us = 1000000 / refresh_rate;
4752
4753 pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
4754 data->frame_time_x2 = frame_time_in_us * 2 / 100;
4755
4756 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
4757
4758 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
4759
4760 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, PreVBlankGap), 0x64);
4761
4762 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
4763
4764
4765 return 0;
4766}
4767
4768
4769static int polaris10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4770{
4771 return polaris10_program_display_gap(hwmgr);
4772}
4773
4774/**
4775* Set maximum target operating fan output RPM
4776*
4777* @param hwmgr: the address of the powerplay hardware manager.
4778* @param usMaxFanRpm: max operating fan RPM value.
4779* @return The response that came from the SMC.
4780*/
4781static int polaris10_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
4782{
4783 hwmgr->thermal_controller.
4784 advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
4785
4786 if (phm_is_hw_access_blocked(hwmgr))
4787 return 0;
4788
4789 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4790 PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
4791}
4792
4793static int
4794polaris10_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
4795 const void *thermal_interrupt_info)
4796{
4797 return 0;
4798}
4799
4800static bool polaris10_check_smc_update_required_for_display_configuration(
4801 struct pp_hwmgr *hwmgr)
4802{
4803 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4804 bool is_update_required = false;
4805 struct cgs_display_info info = {0, 0, NULL};
4806
4807 cgs_get_active_displays_info(hwmgr->device, &info);
4808
4809 if (data->display_timing.num_existing_displays != info.display_count)
4810 is_update_required = true;
4811/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
4812 if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
4813 cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
4814 if (min_clocks.engineClockInSR != data->display_timing.minClockInSR &&
4815 (min_clocks.engineClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK ||
4816 data->display_timing.minClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK))
4817 is_update_required = true;
4818*/
4819 return is_update_required;
4820}
4821
4822static inline bool polaris10_are_power_levels_equal(const struct polaris10_performance_level *pl1,
4823 const struct polaris10_performance_level *pl2)
4824{
4825 return ((pl1->memory_clock == pl2->memory_clock) &&
4826 (pl1->engine_clock == pl2->engine_clock) &&
4827 (pl1->pcie_gen == pl2->pcie_gen) &&
4828 (pl1->pcie_lane == pl2->pcie_lane));
4829}
4830
4831static int polaris10_check_states_equal(struct pp_hwmgr *hwmgr,
4832 const struct pp_hw_power_state *pstate1,
4833 const struct pp_hw_power_state *pstate2, bool *equal)
4834{
4835 const struct polaris10_power_state *psa = cast_const_phw_polaris10_power_state(pstate1);
4836 const struct polaris10_power_state *psb = cast_const_phw_polaris10_power_state(pstate2);
4837 int i;
4838
4839 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4840 return -EINVAL;
4841
4842 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4843 if (psa->performance_level_count != psb->performance_level_count) {
4844 *equal = false;
4845 return 0;
4846 }
4847
4848 for (i = 0; i < psa->performance_level_count; i++) {
4849 if (!polaris10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4850 /* If we have found even one performance level pair that is different the states are different. */
4851 *equal = false;
4852 return 0;
4853 }
4854 }
4855
4856 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4857 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4858 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4859 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4860
4861 return 0;
4862}
4863
4864static int polaris10_upload_mc_firmware(struct pp_hwmgr *hwmgr)
4865{
4866 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4867
4868 uint32_t vbios_version;
4869
4870 /* Read MC indirect register offset 0x9F bits [3:0] to see if VBIOS has already loaded a full version of MC ucode or not.*/
4871
4872 phm_get_mc_microcode_version(hwmgr);
4873 vbios_version = hwmgr->microcode_version_info.MC & 0xf;
4874 /* Full version of MC ucode has already been loaded. */
4875 if (vbios_version == 0) {
4876 data->need_long_memory_training = false;
4877 return 0;
4878 }
4879
4880 data->need_long_memory_training = false;
4881
4882/*
4883 * PPMCME_FirmwareDescriptorEntry *pfd = NULL;
4884 pfd = &tonga_mcmeFirmware;
4885 if (0 == PHM_READ_FIELD(hwmgr->device, MC_SEQ_SUP_CNTL, RUN))
4886 polaris10_load_mc_microcode(hwmgr, pfd->dpmThreshold,
4887 pfd->cfgArray, pfd->cfgSize, pfd->ioDebugArray,
4888 pfd->ioDebugSize, pfd->ucodeArray, pfd->ucodeSize);
4889*/
4890 return 0;
4891}
4892
4893/**
4894 * Read clock related registers.
4895 *
4896 * @param hwmgr the address of the powerplay hardware manager.
4897 * @return always 0
4898 */
4899static int polaris10_read_clock_registers(struct pp_hwmgr *hwmgr)
4900{
4901 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4902
4903 data->clock_registers.vCG_SPLL_FUNC_CNTL = cgs_read_ind_register(hwmgr->device,
4904 CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL)
4905 & CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK;
4906
4907 data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = cgs_read_ind_register(hwmgr->device,
4908 CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2)
4909 & CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
4910
4911 data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = cgs_read_ind_register(hwmgr->device,
4912 CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4)
4913 & CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_MASK;
4914
4915 return 0;
4916}
4917
4918/**
4919 * Find out if memory is GDDR5.
4920 *
4921 * @param hwmgr the address of the powerplay hardware manager.
4922 * @return always 0
4923 */
4924static int polaris10_get_memory_type(struct pp_hwmgr *hwmgr)
4925{
4926 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4927 uint32_t temp;
4928
4929 temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
4930
4931 data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
4932 ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
4933 MC_SEQ_MISC0_GDDR5_SHIFT));
4934
4935 return 0;
4936}
4937
4938/**
4939 * Enables Dynamic Power Management by SMC
4940 *
4941 * @param hwmgr the address of the powerplay hardware manager.
4942 * @return always 0
4943 */
4944static int polaris10_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
4945{
4946 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4947 GENERAL_PWRMGT, STATIC_PM_EN, 1);
4948
4949 return 0;
4950}
4951
4952/**
4953 * Initialize PowerGating States for different engines
4954 *
4955 * @param hwmgr the address of the powerplay hardware manager.
4956 * @return always 0
4957 */
4958static int polaris10_init_power_gate_state(struct pp_hwmgr *hwmgr)
4959{
4960 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4961
4962 data->uvd_power_gated = false;
4963 data->vce_power_gated = false;
4964 data->samu_power_gated = false;
4965
4966 return 0;
4967}
4968
4969static int polaris10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
4970{
4971 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4972 data->low_sclk_interrupt_threshold = 0;
4973
4974 return 0;
4975}
4976
4977static int polaris10_setup_asic_task(struct pp_hwmgr *hwmgr)
4978{
4979 int tmp_result, result = 0;
4980
4981 polaris10_upload_mc_firmware(hwmgr);
4982
4983 tmp_result = polaris10_read_clock_registers(hwmgr);
4984 PP_ASSERT_WITH_CODE((0 == tmp_result),
4985 "Failed to read clock registers!", result = tmp_result);
4986
4987 tmp_result = polaris10_get_memory_type(hwmgr);
4988 PP_ASSERT_WITH_CODE((0 == tmp_result),
4989 "Failed to get memory type!", result = tmp_result);
4990
4991 tmp_result = polaris10_enable_acpi_power_management(hwmgr);
4992 PP_ASSERT_WITH_CODE((0 == tmp_result),
4993 "Failed to enable ACPI power management!", result = tmp_result);
4994
4995 tmp_result = polaris10_init_power_gate_state(hwmgr);
4996 PP_ASSERT_WITH_CODE((0 == tmp_result),
4997 "Failed to init power gate state!", result = tmp_result);
4998
4999 tmp_result = phm_get_mc_microcode_version(hwmgr);
5000 PP_ASSERT_WITH_CODE((0 == tmp_result),
5001 "Failed to get MC microcode version!", result = tmp_result);
5002
5003 tmp_result = polaris10_init_sclk_threshold(hwmgr);
5004 PP_ASSERT_WITH_CODE((0 == tmp_result),
5005 "Failed to init sclk threshold!", result = tmp_result);
5006
5007 return result;
5008}
5009
5010static int polaris10_force_clock_level(struct pp_hwmgr *hwmgr,
5011 enum pp_clock_type type, uint32_t mask)
5012{
5013 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
5014
5015 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
5016 return -EINVAL;
5017
5018 switch (type) {
5019 case PP_SCLK:
5020 if (!data->sclk_dpm_key_disabled)
5021 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5022 PPSMC_MSG_SCLKDPM_SetEnabledMask,
5023 data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
5024 break;
5025 case PP_MCLK:
5026 if (!data->mclk_dpm_key_disabled)
5027 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5028 PPSMC_MSG_MCLKDPM_SetEnabledMask,
5029 data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
5030 break;
5031 case PP_PCIE:
5032 {
5033 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
5034 uint32_t level = 0;
5035
5036 while (tmp >>= 1)
5037 level++;
5038
5039 if (!data->pcie_dpm_key_disabled)
5040 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5041 PPSMC_MSG_PCIeDPM_ForceLevel,
5042 level);
5043 break;
5044 }
5045 default:
5046 break;
5047 }
5048
5049 return 0;
5050}
5051
5052static uint16_t polaris10_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
5053{
5054 uint32_t speedCntl = 0;
5055
5056 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
5057 speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
5058 ixPCIE_LC_SPEED_CNTL);
5059 return((uint16_t)PHM_GET_FIELD(speedCntl,
5060 PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
5061}
5062
5063static int polaris10_print_clock_levels(struct pp_hwmgr *hwmgr,
5064 enum pp_clock_type type, char *buf)
5065{
5066 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
5067 struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
5068 struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
5069 struct polaris10_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
5070 int i, now, size = 0;
5071 uint32_t clock, pcie_speed;
5072
5073 switch (type) {
5074 case PP_SCLK:
5075 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
5076 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5077
5078 for (i = 0; i < sclk_table->count; i++) {
5079 if (clock > sclk_table->dpm_levels[i].value)
5080 continue;
5081 break;
5082 }
5083 now = i;
5084
5085 for (i = 0; i < sclk_table->count; i++)
5086 size += sprintf(buf + size, "%d: %uMhz %s\n",
5087 i, sclk_table->dpm_levels[i].value / 100,
5088 (i == now) ? "*" : "");
5089 break;
5090 case PP_MCLK:
5091 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
5092 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5093
5094 for (i = 0; i < mclk_table->count; i++) {
5095 if (clock > mclk_table->dpm_levels[i].value)
5096 continue;
5097 break;
5098 }
5099 now = i;
5100
5101 for (i = 0; i < mclk_table->count; i++)
5102 size += sprintf(buf + size, "%d: %uMhz %s\n",
5103 i, mclk_table->dpm_levels[i].value / 100,
5104 (i == now) ? "*" : "");
5105 break;
5106 case PP_PCIE:
5107 pcie_speed = polaris10_get_current_pcie_speed(hwmgr);
5108 for (i = 0; i < pcie_table->count; i++) {
5109 if (pcie_speed != pcie_table->dpm_levels[i].value)
5110 continue;
5111 break;
5112 }
5113 now = i;
5114
5115 for (i = 0; i < pcie_table->count; i++)
5116 size += sprintf(buf + size, "%d: %s %s\n", i,
5117 (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
5118 (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
5119 (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
5120 (i == now) ? "*" : "");
5121 break;
5122 default:
5123 break;
5124 }
5125 return size;
5126}
5127
5128static int polaris10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
5129{
5130 if (mode) {
5131 /* stop auto-manage */
5132 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
5133 PHM_PlatformCaps_MicrocodeFanControl))
5134 polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
5135 polaris10_fan_ctrl_set_static_mode(hwmgr, mode);
5136 } else
5137 /* restart auto-manage */
5138 polaris10_fan_ctrl_reset_fan_speed_to_default(hwmgr);
5139
5140 return 0;
5141}
5142
5143static int polaris10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
5144{
5145 if (hwmgr->fan_ctrl_is_in_default_mode)
5146 return hwmgr->fan_ctrl_default_mode;
5147 else
5148 return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
5149 CG_FDO_CTRL2, FDO_PWM_MODE);
5150}
5151
5152static int polaris10_get_sclk_od(struct pp_hwmgr *hwmgr)
5153{
5154 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
5155 struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
5156 struct polaris10_single_dpm_table *golden_sclk_table =
5157 &(data->golden_dpm_table.sclk_table);
5158 int value;
5159
5160 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
5161 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
5162 100 /
5163 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
5164
5165 return value;
5166}
5167
5168static int polaris10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5169{
5170 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
5171 struct polaris10_single_dpm_table *golden_sclk_table =
5172 &(data->golden_dpm_table.sclk_table);
5173 struct pp_power_state *ps;
5174 struct polaris10_power_state *polaris10_ps;
5175
5176 if (value > 20)
5177 value = 20;
5178
5179 ps = hwmgr->request_ps;
5180
5181 if (ps == NULL)
5182 return -EINVAL;
5183
5184 polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
5185
5186 polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].engine_clock =
5187 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
5188 value / 100 +
5189 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
5190
5191 return 0;
5192}
5193
5194static int polaris10_get_mclk_od(struct pp_hwmgr *hwmgr)
5195{
5196 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
5197 struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
5198 struct polaris10_single_dpm_table *golden_mclk_table =
5199 &(data->golden_dpm_table.mclk_table);
5200 int value;
5201
5202 value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
5203 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
5204 100 /
5205 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
5206
5207 return value;
5208}
5209
5210static int polaris10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5211{
5212 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
5213 struct polaris10_single_dpm_table *golden_mclk_table =
5214 &(data->golden_dpm_table.mclk_table);
5215 struct pp_power_state *ps;
5216 struct polaris10_power_state *polaris10_ps;
5217
5218 if (value > 20)
5219 value = 20;
5220
5221 ps = hwmgr->request_ps;
5222
5223 if (ps == NULL)
5224 return -EINVAL;
5225
5226 polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
5227
5228 polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].memory_clock =
5229 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
5230 value / 100 +
5231 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
5232
5233 return 0;
5234}
5235static const struct pp_hwmgr_func polaris10_hwmgr_funcs = {
5236 .backend_init = &polaris10_hwmgr_backend_init,
5237 .backend_fini = &polaris10_hwmgr_backend_fini,
5238 .asic_setup = &polaris10_setup_asic_task,
5239 .dynamic_state_management_enable = &polaris10_enable_dpm_tasks,
5240 .apply_state_adjust_rules = polaris10_apply_state_adjust_rules,
5241 .force_dpm_level = &polaris10_force_dpm_level,
5242 .power_state_set = polaris10_set_power_state_tasks,
5243 .get_power_state_size = polaris10_get_power_state_size,
5244 .get_mclk = polaris10_dpm_get_mclk,
5245 .get_sclk = polaris10_dpm_get_sclk,
5246 .patch_boot_state = polaris10_dpm_patch_boot_state,
5247 .get_pp_table_entry = polaris10_get_pp_table_entry,
5248 .get_num_of_pp_table_entries = get_number_of_powerplay_table_entries_v1_0,
5249 .print_current_perforce_level = polaris10_print_current_perforce_level,
5250 .powerdown_uvd = polaris10_phm_powerdown_uvd,
5251 .powergate_uvd = polaris10_phm_powergate_uvd,
5252 .powergate_vce = polaris10_phm_powergate_vce,
5253 .disable_clock_power_gating = polaris10_phm_disable_clock_power_gating,
5254 .update_clock_gatings = polaris10_phm_update_clock_gatings,
5255 .notify_smc_display_config_after_ps_adjustment = polaris10_notify_smc_display_config_after_ps_adjustment,
5256 .display_config_changed = polaris10_display_configuration_changed_task,
5257 .set_max_fan_pwm_output = polaris10_set_max_fan_pwm_output,
5258 .set_max_fan_rpm_output = polaris10_set_max_fan_rpm_output,
5259 .get_temperature = polaris10_thermal_get_temperature,
5260 .stop_thermal_controller = polaris10_thermal_stop_thermal_controller,
5261 .get_fan_speed_info = polaris10_fan_ctrl_get_fan_speed_info,
5262 .get_fan_speed_percent = polaris10_fan_ctrl_get_fan_speed_percent,
5263 .set_fan_speed_percent = polaris10_fan_ctrl_set_fan_speed_percent,
5264 .reset_fan_speed_to_default = polaris10_fan_ctrl_reset_fan_speed_to_default,
5265 .get_fan_speed_rpm = polaris10_fan_ctrl_get_fan_speed_rpm,
5266 .set_fan_speed_rpm = polaris10_fan_ctrl_set_fan_speed_rpm,
5267 .uninitialize_thermal_controller = polaris10_thermal_ctrl_uninitialize_thermal_controller,
5268 .register_internal_thermal_interrupt = polaris10_register_internal_thermal_interrupt,
5269 .check_smc_update_required_for_display_configuration = polaris10_check_smc_update_required_for_display_configuration,
5270 .check_states_equal = polaris10_check_states_equal,
5271 .set_fan_control_mode = polaris10_set_fan_control_mode,
5272 .get_fan_control_mode = polaris10_get_fan_control_mode,
5273 .force_clock_level = polaris10_force_clock_level,
5274 .print_clock_levels = polaris10_print_clock_levels,
5275 .enable_per_cu_power_gating = polaris10_phm_enable_per_cu_power_gating,
5276 .get_sclk_od = polaris10_get_sclk_od,
5277 .set_sclk_od = polaris10_set_sclk_od,
5278 .get_mclk_od = polaris10_get_mclk_od,
5279 .set_mclk_od = polaris10_set_mclk_od,
5280};
5281
5282int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr)
5283{
5284 hwmgr->hwmgr_func = &polaris10_hwmgr_funcs;
5285 hwmgr->pptable_func = &pptable_v1_0_funcs;
5286 pp_polaris10_thermal_initialize(hwmgr);
5287
5288 return 0;
5289}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
deleted file mode 100644
index 41f835adba91..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
+++ /dev/null
@@ -1,716 +0,0 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <asm/div64.h>
25#include "polaris10_thermal.h"
26#include "polaris10_hwmgr.h"
27#include "polaris10_smumgr.h"
28#include "polaris10_ppsmc.h"
29#include "smu/smu_7_1_3_d.h"
30#include "smu/smu_7_1_3_sh_mask.h"
31
32int polaris10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
33 struct phm_fan_speed_info *fan_speed_info)
34{
35 if (hwmgr->thermal_controller.fanInfo.bNoFan)
36 return 0;
37
38 fan_speed_info->supports_percent_read = true;
39 fan_speed_info->supports_percent_write = true;
40 fan_speed_info->min_percent = 0;
41 fan_speed_info->max_percent = 100;
42
43 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
44 PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
45 hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
46 fan_speed_info->supports_rpm_read = true;
47 fan_speed_info->supports_rpm_write = true;
48 fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
49 fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
50 } else {
51 fan_speed_info->min_rpm = 0;
52 fan_speed_info->max_rpm = 0;
53 }
54
55 return 0;
56}
57
58int polaris10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
59 uint32_t *speed)
60{
61 uint32_t duty100;
62 uint32_t duty;
63 uint64_t tmp64;
64
65 if (hwmgr->thermal_controller.fanInfo.bNoFan)
66 return 0;
67
68 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
69 CG_FDO_CTRL1, FMAX_DUTY100);
70 duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
71 CG_THERMAL_STATUS, FDO_PWM_DUTY);
72
73 if (duty100 == 0)
74 return -EINVAL;
75
76
77 tmp64 = (uint64_t)duty * 100;
78 do_div(tmp64, duty100);
79 *speed = (uint32_t)tmp64;
80
81 if (*speed > 100)
82 *speed = 100;
83
84 return 0;
85}
86
87int polaris10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
88{
89 uint32_t tach_period;
90 uint32_t crystal_clock_freq;
91
92 if (hwmgr->thermal_controller.fanInfo.bNoFan ||
93 (hwmgr->thermal_controller.fanInfo.
94 ucTachometerPulsesPerRevolution == 0))
95 return 0;
96
97 tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
98 CG_TACH_STATUS, TACH_PERIOD);
99
100 if (tach_period == 0)
101 return -EINVAL;
102
103 crystal_clock_freq = tonga_get_xclk(hwmgr);
104
105 *speed = 60 * crystal_clock_freq * 10000 / tach_period;
106
107 return 0;
108}
109
110/**
111* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
112* @param hwmgr the address of the powerplay hardware manager.
113* mode the fan control mode, 0 default, 1 by percent, 5, by RPM
114* @exception Should always succeed.
115*/
116int polaris10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
117{
118
119 if (hwmgr->fan_ctrl_is_in_default_mode) {
120 hwmgr->fan_ctrl_default_mode =
121 PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
122 CG_FDO_CTRL2, FDO_PWM_MODE);
123 hwmgr->tmin =
124 PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
125 CG_FDO_CTRL2, TMIN);
126 hwmgr->fan_ctrl_is_in_default_mode = false;
127 }
128
129 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
130 CG_FDO_CTRL2, TMIN, 0);
131 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
132 CG_FDO_CTRL2, FDO_PWM_MODE, mode);
133
134 return 0;
135}
136
137/**
138* Reset Fan Speed Control to default mode.
139* @param hwmgr the address of the powerplay hardware manager.
140* @exception Should always succeed.
141*/
142int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
143{
144 if (!hwmgr->fan_ctrl_is_in_default_mode) {
145 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
146 CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
147 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
148 CG_FDO_CTRL2, TMIN, hwmgr->tmin);
149 hwmgr->fan_ctrl_is_in_default_mode = true;
150 }
151
152 return 0;
153}
154
155static int polaris10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
156{
157 int result;
158
159 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
160 PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
161 cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
162 result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
163
164 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
165 PHM_PlatformCaps_FanSpeedInTableIsRPM))
166 hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr,
167 hwmgr->thermal_controller.
168 advanceFanControlParameters.usMaxFanRPM);
169 else
170 hwmgr->hwmgr_func->set_max_fan_pwm_output(hwmgr,
171 hwmgr->thermal_controller.
172 advanceFanControlParameters.usMaxFanPWM);
173
174 } else {
175 cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
176 result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
177 }
178
179 if (!result && hwmgr->thermal_controller.
180 advanceFanControlParameters.ucTargetTemperature)
181 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
182 PPSMC_MSG_SetFanTemperatureTarget,
183 hwmgr->thermal_controller.
184 advanceFanControlParameters.ucTargetTemperature);
185
186 return result;
187}
188
189
190int polaris10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
191{
192 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl);
193}
194
195/**
196* Set Fan Speed in percent.
197* @param hwmgr the address of the powerplay hardware manager.
198* @param speed is the percentage value (0% - 100%) to be set.
199* @exception Fails is the 100% setting appears to be 0.
200*/
201int polaris10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
202 uint32_t speed)
203{
204 uint32_t duty100;
205 uint32_t duty;
206 uint64_t tmp64;
207
208 if (hwmgr->thermal_controller.fanInfo.bNoFan)
209 return 0;
210
211 if (speed > 100)
212 speed = 100;
213
214 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
215 PHM_PlatformCaps_MicrocodeFanControl))
216 polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
217
218 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
219 CG_FDO_CTRL1, FMAX_DUTY100);
220
221 if (duty100 == 0)
222 return -EINVAL;
223
224 tmp64 = (uint64_t)speed * duty100;
225 do_div(tmp64, 100);
226 duty = (uint32_t)tmp64;
227
228 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
229 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
230
231 return polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
232}
233
234/**
235* Reset Fan Speed to default.
236* @param hwmgr the address of the powerplay hardware manager.
237* @exception Always succeeds.
238*/
239int polaris10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
240{
241 int result;
242
243 if (hwmgr->thermal_controller.fanInfo.bNoFan)
244 return 0;
245
246 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
247 PHM_PlatformCaps_MicrocodeFanControl)) {
248 result = polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
249 if (!result)
250 result = polaris10_fan_ctrl_start_smc_fan_control(hwmgr);
251 } else
252 result = polaris10_fan_ctrl_set_default_mode(hwmgr);
253
254 return result;
255}
256
257/**
258* Set Fan Speed in RPM.
259* @param hwmgr the address of the powerplay hardware manager.
260* @param speed is the percentage value (min - max) to be set.
261* @exception Fails is the speed not lie between min and max.
262*/
263int polaris10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
264{
265 uint32_t tach_period;
266 uint32_t crystal_clock_freq;
267
268 if (hwmgr->thermal_controller.fanInfo.bNoFan ||
269 (hwmgr->thermal_controller.fanInfo.
270 ucTachometerPulsesPerRevolution == 0) ||
271 (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
272 (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
273 return 0;
274
275 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
276 PHM_PlatformCaps_MicrocodeFanControl))
277 polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
278
279 crystal_clock_freq = tonga_get_xclk(hwmgr);
280
281 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
282
283 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
284 CG_TACH_STATUS, TACH_PERIOD, tach_period);
285
286 return polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
287}
288
289/**
290* Reads the remote temperature from the SIslands thermal controller.
291*
292* @param hwmgr The address of the hardware manager.
293*/
294int polaris10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
295{
296 int temp;
297
298 temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
299 CG_MULT_THERMAL_STATUS, CTF_TEMP);
300
301 /* Bit 9 means the reading is lower than the lowest usable value. */
302 if (temp & 0x200)
303 temp = POLARIS10_THERMAL_MAXIMUM_TEMP_READING;
304 else
305 temp = temp & 0x1ff;
306
307 temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
308
309 return temp;
310}
311
312/**
313* Set the requested temperature range for high and low alert signals
314*
315* @param hwmgr The address of the hardware manager.
316* @param range Temperature range to be programmed for high and low alert signals
317* @exception PP_Result_BadInput if the input data is not valid.
318*/
319static int polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
320 uint32_t low_temp, uint32_t high_temp)
321{
322 uint32_t low = POLARIS10_THERMAL_MINIMUM_ALERT_TEMP *
323 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
324 uint32_t high = POLARIS10_THERMAL_MAXIMUM_ALERT_TEMP *
325 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
326
327 if (low < low_temp)
328 low = low_temp;
329 if (high > high_temp)
330 high = high_temp;
331
332 if (low > high)
333 return -EINVAL;
334
335 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
336 CG_THERMAL_INT, DIG_THERM_INTH,
337 (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
338 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
339 CG_THERMAL_INT, DIG_THERM_INTL,
340 (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
341 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
342 CG_THERMAL_CTRL, DIG_THERM_DPM,
343 (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
344
345 return 0;
346}
347
348/**
349* Programs thermal controller one-time setting registers
350*
351* @param hwmgr The address of the hardware manager.
352*/
353static int polaris10_thermal_initialize(struct pp_hwmgr *hwmgr)
354{
355 if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
356 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
357 CG_TACH_CTRL, EDGE_PER_REV,
358 hwmgr->thermal_controller.fanInfo.
359 ucTachometerPulsesPerRevolution - 1);
360
361 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
362 CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
363
364 return 0;
365}
366
367/**
368* Enable thermal alerts on the RV770 thermal controller.
369*
370* @param hwmgr The address of the hardware manager.
371*/
372static int polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
373{
374 uint32_t alert;
375
376 alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
377 CG_THERMAL_INT, THERM_INT_MASK);
378 alert &= ~(POLARIS10_THERMAL_HIGH_ALERT_MASK | POLARIS10_THERMAL_LOW_ALERT_MASK);
379 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
380 CG_THERMAL_INT, THERM_INT_MASK, alert);
381
382 /* send message to SMU to enable internal thermal interrupts */
383 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable);
384}
385
386/**
387* Disable thermal alerts on the RV770 thermal controller.
388* @param hwmgr The address of the hardware manager.
389*/
390static int polaris10_thermal_disable_alert(struct pp_hwmgr *hwmgr)
391{
392 uint32_t alert;
393
394 alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
395 CG_THERMAL_INT, THERM_INT_MASK);
396 alert |= (POLARIS10_THERMAL_HIGH_ALERT_MASK | POLARIS10_THERMAL_LOW_ALERT_MASK);
397 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
398 CG_THERMAL_INT, THERM_INT_MASK, alert);
399
400 /* send message to SMU to disable internal thermal interrupts */
401 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable);
402}
403
404/**
405* Uninitialize the thermal controller.
406* Currently just disables alerts.
407* @param hwmgr The address of the hardware manager.
408*/
409int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
410{
411 int result = polaris10_thermal_disable_alert(hwmgr);
412
413 if (!hwmgr->thermal_controller.fanInfo.bNoFan)
414 polaris10_fan_ctrl_set_default_mode(hwmgr);
415
416 return result;
417}
418
419/**
420* Set up the fan table to control the fan using the SMC.
421* @param hwmgr the address of the powerplay hardware manager.
422* @param pInput the pointer to input data
423* @param pOutput the pointer to output data
424* @param pStorage the pointer to temporary storage
425* @param Result the last failure code
426* @return result from set temperature range routine
427*/
428static int tf_polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
429 void *input, void *output, void *storage, int result)
430{
431 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
432 SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
433 uint32_t duty100;
434 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
435 uint16_t fdo_min, slope1, slope2;
436 uint32_t reference_clock;
437 int res;
438 uint64_t tmp64;
439
440 if (data->fan_table_start == 0) {
441 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
442 PHM_PlatformCaps_MicrocodeFanControl);
443 return 0;
444 }
445
446 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
447 CG_FDO_CTRL1, FMAX_DUTY100);
448
449 if (duty100 == 0) {
450 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
451 PHM_PlatformCaps_MicrocodeFanControl);
452 return 0;
453 }
454
455 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
456 usPWMMin * duty100;
457 do_div(tmp64, 10000);
458 fdo_min = (uint16_t)tmp64;
459
460 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
461 hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
462 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
463 hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
464
465 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
466 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
467 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
468 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
469
470 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
471 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
472
473 fan_table.TempMin = cpu_to_be16((50 + hwmgr->
474 thermal_controller.advanceFanControlParameters.usTMin) / 100);
475 fan_table.TempMed = cpu_to_be16((50 + hwmgr->
476 thermal_controller.advanceFanControlParameters.usTMed) / 100);
477 fan_table.TempMax = cpu_to_be16((50 + hwmgr->
478 thermal_controller.advanceFanControlParameters.usTMax) / 100);
479
480 fan_table.Slope1 = cpu_to_be16(slope1);
481 fan_table.Slope2 = cpu_to_be16(slope2);
482
483 fan_table.FdoMin = cpu_to_be16(fdo_min);
484
485 fan_table.HystDown = cpu_to_be16(hwmgr->
486 thermal_controller.advanceFanControlParameters.ucTHyst);
487
488 fan_table.HystUp = cpu_to_be16(1);
489
490 fan_table.HystSlope = cpu_to_be16(1);
491
492 fan_table.TempRespLim = cpu_to_be16(5);
493
494 reference_clock = tonga_get_xclk(hwmgr);
495
496 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
497 thermal_controller.advanceFanControlParameters.ulCycleDelay *
498 reference_clock) / 1600);
499
500 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
501
502 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
503 hwmgr->device, CGS_IND_REG__SMC,
504 CG_MULT_THERMAL_CTRL, TEMP_SEL);
505
506 res = polaris10_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start,
507 (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
508 data->sram_end);
509
510 if (!res && hwmgr->thermal_controller.
511 advanceFanControlParameters.ucMinimumPWMLimit)
512 res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
513 PPSMC_MSG_SetFanMinPwm,
514 hwmgr->thermal_controller.
515 advanceFanControlParameters.ucMinimumPWMLimit);
516
517 if (!res && hwmgr->thermal_controller.
518 advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
519 res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
520 PPSMC_MSG_SetFanSclkTarget,
521 hwmgr->thermal_controller.
522 advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
523
524 if (res)
525 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
526 PHM_PlatformCaps_MicrocodeFanControl);
527
528 return 0;
529}
530
531/**
532* Start the fan control on the SMC.
533* @param hwmgr the address of the powerplay hardware manager.
534* @param pInput the pointer to input data
535* @param pOutput the pointer to output data
536* @param pStorage the pointer to temporary storage
537* @param Result the last failure code
538* @return result from set temperature range routine
539*/
540static int tf_polaris10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
541 void *input, void *output, void *storage, int result)
542{
543/* If the fantable setup has failed we could have disabled
544 * PHM_PlatformCaps_MicrocodeFanControl even after
545 * this function was included in the table.
546 * Make sure that we still think controlling the fan is OK.
547*/
548 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
549 PHM_PlatformCaps_MicrocodeFanControl)) {
550 polaris10_fan_ctrl_start_smc_fan_control(hwmgr);
551 polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
552 }
553
554 return 0;
555}
556
557/**
558* Set temperature range for high and low alerts
559* @param hwmgr the address of the powerplay hardware manager.
560* @param pInput the pointer to input data
561* @param pOutput the pointer to output data
562* @param pStorage the pointer to temporary storage
563* @param Result the last failure code
564* @return result from set temperature range routine
565*/
566int tf_polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
567 void *input, void *output, void *storage, int result)
568{
569 struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
570
571 if (range == NULL)
572 return -EINVAL;
573
574 return polaris10_thermal_set_temperature_range(hwmgr, range->min, range->max);
575}
576
577/**
578* Programs one-time setting registers
579* @param hwmgr the address of the powerplay hardware manager.
580* @param pInput the pointer to input data
581* @param pOutput the pointer to output data
582* @param pStorage the pointer to temporary storage
583* @param Result the last failure code
584* @return result from initialize thermal controller routine
585*/
586int tf_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr,
587 void *input, void *output, void *storage, int result)
588{
589 return polaris10_thermal_initialize(hwmgr);
590}
591
592/**
593* Enable high and low alerts
594* @param hwmgr the address of the powerplay hardware manager.
595* @param pInput the pointer to input data
596* @param pOutput the pointer to output data
597* @param pStorage the pointer to temporary storage
598* @param Result the last failure code
599* @return result from enable alert routine
600*/
601int tf_polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr,
602 void *input, void *output, void *storage, int result)
603{
604 return polaris10_thermal_enable_alert(hwmgr);
605}
606
607/**
608* Disable high and low alerts
609* @param hwmgr the address of the powerplay hardware manager.
610* @param pInput the pointer to input data
611* @param pOutput the pointer to output data
612* @param pStorage the pointer to temporary storage
613* @param Result the last failure code
614* @return result from disable alert routine
615*/
616static int tf_polaris10_thermal_disable_alert(struct pp_hwmgr *hwmgr,
617 void *input, void *output, void *storage, int result)
618{
619 return polaris10_thermal_disable_alert(hwmgr);
620}
621
622static int tf_polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
623 void *input, void *output, void *storage, int result)
624{
625 int ret;
626 struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
627 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
628 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
629
630 if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
631 return 0;
632
633 ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
634 PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
635
636 ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
637 0 : -1;
638
639 if (!ret)
640 /* If this param is not changed, this function could fire unnecessarily */
641 smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
642
643 return ret;
644}
645
646static const struct phm_master_table_item
647polaris10_thermal_start_thermal_controller_master_list[] = {
648 {NULL, tf_polaris10_thermal_initialize},
649 {NULL, tf_polaris10_thermal_set_temperature_range},
650 {NULL, tf_polaris10_thermal_enable_alert},
651 {NULL, tf_polaris10_thermal_avfs_enable},
652/* We should restrict performance levels to low before we halt the SMC.
653 * On the other hand we are still in boot state when we do this
654 * so it would be pointless.
655 * If this assumption changes we have to revisit this table.
656 */
657 {NULL, tf_polaris10_thermal_setup_fan_table},
658 {NULL, tf_polaris10_thermal_start_smc_fan_control},
659 {NULL, NULL}
660};
661
662static const struct phm_master_table_header
663polaris10_thermal_start_thermal_controller_master = {
664 0,
665 PHM_MasterTableFlag_None,
666 polaris10_thermal_start_thermal_controller_master_list
667};
668
669static const struct phm_master_table_item
670polaris10_thermal_set_temperature_range_master_list[] = {
671 {NULL, tf_polaris10_thermal_disable_alert},
672 {NULL, tf_polaris10_thermal_set_temperature_range},
673 {NULL, tf_polaris10_thermal_enable_alert},
674 {NULL, NULL}
675};
676
677static const struct phm_master_table_header
678polaris10_thermal_set_temperature_range_master = {
679 0,
680 PHM_MasterTableFlag_None,
681 polaris10_thermal_set_temperature_range_master_list
682};
683
684int polaris10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
685{
686 if (!hwmgr->thermal_controller.fanInfo.bNoFan)
687 polaris10_fan_ctrl_set_default_mode(hwmgr);
688 return 0;
689}
690
691/**
692* Initializes the thermal controller related functions in the Hardware Manager structure.
693* @param hwmgr The address of the hardware manager.
694* @exception Any error code from the low-level communication.
695*/
696int pp_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr)
697{
698 int result;
699
700 result = phm_construct_table(hwmgr,
701 &polaris10_thermal_set_temperature_range_master,
702 &(hwmgr->set_temperature_range));
703
704 if (!result) {
705 result = phm_construct_table(hwmgr,
706 &polaris10_thermal_start_thermal_controller_master,
707 &(hwmgr->start_thermal_controller));
708 if (result)
709 phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
710 }
711
712 if (!result)
713 hwmgr->fan_ctrl_is_in_default_mode = true;
714 return result;
715}
716
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h
deleted file mode 100644
index 62f8cbc2d590..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _POLARIS10_THERMAL_H_
25#define _POLARIS10_THERMAL_H_
26
27#include "hwmgr.h"
28
29#define POLARIS10_THERMAL_HIGH_ALERT_MASK 0x1
30#define POLARIS10_THERMAL_LOW_ALERT_MASK 0x2
31
32#define POLARIS10_THERMAL_MINIMUM_TEMP_READING -256
33#define POLARIS10_THERMAL_MAXIMUM_TEMP_READING 255
34
35#define POLARIS10_THERMAL_MINIMUM_ALERT_TEMP 0
36#define POLARIS10_THERMAL_MAXIMUM_ALERT_TEMP 255
37
38#define FDO_PWM_MODE_STATIC 1
39#define FDO_PWM_MODE_STATIC_RPM 5
40
41
42extern int tf_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
43extern int tf_polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
44extern int tf_polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
45
46extern int polaris10_thermal_get_temperature(struct pp_hwmgr *hwmgr);
47extern int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
48extern int polaris10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
49extern int polaris10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
50extern int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
51extern int polaris10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
52extern int polaris10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
53extern int polaris10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
54extern int pp_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr);
55extern int polaris10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
56extern int polaris10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
57extern int polaris10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
58extern int polaris10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
59extern uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
60
61#endif
62
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 7e405b04c2c5..6eb6db199250 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -21,9 +21,53 @@
21 * 21 *
22 */ 22 */
23 23
24#include "polaris10_clockpowergating.h" 24#include "smu7_hwmgr.h"
25#include "smu7_clockpowergating.h"
26#include "smu7_common.h"
25 27
26int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr) 28static int smu7_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
29{
30 return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
31 PPSMC_MSG_UVDDPM_Enable :
32 PPSMC_MSG_UVDDPM_Disable);
33}
34
35static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
36{
37 return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
38 PPSMC_MSG_VCEDPM_Enable :
39 PPSMC_MSG_VCEDPM_Disable);
40}
41
42static int smu7_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
43{
44 return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
45 PPSMC_MSG_SAMUDPM_Enable :
46 PPSMC_MSG_SAMUDPM_Disable);
47}
48
49static int smu7_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
50{
51 if (!bgate)
52 smum_update_smc_table(hwmgr, SMU_UVD_TABLE);
53 return smu7_enable_disable_uvd_dpm(hwmgr, !bgate);
54}
55
56static int smu7_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
57{
58 if (!bgate)
59 smum_update_smc_table(hwmgr, SMU_VCE_TABLE);
60 return smu7_enable_disable_vce_dpm(hwmgr, !bgate);
61}
62
63static int smu7_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
64{
65 if (!bgate)
66 smum_update_smc_table(hwmgr, SMU_SAMU_TABLE);
67 return smu7_enable_disable_samu_dpm(hwmgr, !bgate);
68}
69
70int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
27{ 71{
28 if (phm_cf_want_uvd_power_gating(hwmgr)) 72 if (phm_cf_want_uvd_power_gating(hwmgr))
29 return smum_send_msg_to_smc(hwmgr->smumgr, 73 return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -31,7 +75,7 @@ int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
31 return 0; 75 return 0;
32} 76}
33 77
34static int polaris10_phm_powerup_uvd(struct pp_hwmgr *hwmgr) 78int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
35{ 79{
36 if (phm_cf_want_uvd_power_gating(hwmgr)) { 80 if (phm_cf_want_uvd_power_gating(hwmgr)) {
37 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 81 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -47,7 +91,7 @@ static int polaris10_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
47 return 0; 91 return 0;
48} 92}
49 93
50static int polaris10_phm_powerdown_vce(struct pp_hwmgr *hwmgr) 94int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
51{ 95{
52 if (phm_cf_want_vce_power_gating(hwmgr)) 96 if (phm_cf_want_vce_power_gating(hwmgr))
53 return smum_send_msg_to_smc(hwmgr->smumgr, 97 return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -55,7 +99,7 @@ static int polaris10_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
55 return 0; 99 return 0;
56} 100}
57 101
58static int polaris10_phm_powerup_vce(struct pp_hwmgr *hwmgr) 102int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
59{ 103{
60 if (phm_cf_want_vce_power_gating(hwmgr)) 104 if (phm_cf_want_vce_power_gating(hwmgr))
61 return smum_send_msg_to_smc(hwmgr->smumgr, 105 return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -63,7 +107,7 @@ static int polaris10_phm_powerup_vce(struct pp_hwmgr *hwmgr)
63 return 0; 107 return 0;
64} 108}
65 109
66static int polaris10_phm_powerdown_samu(struct pp_hwmgr *hwmgr) 110int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
67{ 111{
68 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 112 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
69 PHM_PlatformCaps_SamuPowerGating)) 113 PHM_PlatformCaps_SamuPowerGating))
@@ -72,7 +116,7 @@ static int polaris10_phm_powerdown_samu(struct pp_hwmgr *hwmgr)
72 return 0; 116 return 0;
73} 117}
74 118
75static int polaris10_phm_powerup_samu(struct pp_hwmgr *hwmgr) 119int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
76{ 120{
77 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 121 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
78 PHM_PlatformCaps_SamuPowerGating)) 122 PHM_PlatformCaps_SamuPowerGating))
@@ -81,27 +125,24 @@ static int polaris10_phm_powerup_samu(struct pp_hwmgr *hwmgr)
81 return 0; 125 return 0;
82} 126}
83 127
84int polaris10_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr) 128int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
85{ 129{
86 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 130 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
87 131
88 data->uvd_power_gated = false; 132 data->uvd_power_gated = false;
89 data->vce_power_gated = false; 133 data->vce_power_gated = false;
90 data->samu_power_gated = false; 134 data->samu_power_gated = false;
91 135
92 polaris10_phm_powerup_uvd(hwmgr); 136 smu7_powerup_uvd(hwmgr);
93 polaris10_phm_powerup_vce(hwmgr); 137 smu7_powerup_vce(hwmgr);
94 polaris10_phm_powerup_samu(hwmgr); 138 smu7_powerup_samu(hwmgr);
95 139
96 return 0; 140 return 0;
97} 141}
98 142
99int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) 143int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
100{ 144{
101 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 145 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
102
103 if (data->uvd_power_gated == bgate)
104 return 0;
105 146
106 data->uvd_power_gated = bgate; 147 data->uvd_power_gated = bgate;
107 148
@@ -109,11 +150,11 @@ int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
109 cgs_set_clockgating_state(hwmgr->device, 150 cgs_set_clockgating_state(hwmgr->device,
110 AMD_IP_BLOCK_TYPE_UVD, 151 AMD_IP_BLOCK_TYPE_UVD,
111 AMD_CG_STATE_GATE); 152 AMD_CG_STATE_GATE);
112 polaris10_update_uvd_dpm(hwmgr, true); 153 smu7_update_uvd_dpm(hwmgr, true);
113 polaris10_phm_powerdown_uvd(hwmgr); 154 smu7_powerdown_uvd(hwmgr);
114 } else { 155 } else {
115 polaris10_phm_powerup_uvd(hwmgr); 156 smu7_powerup_uvd(hwmgr);
116 polaris10_update_uvd_dpm(hwmgr, false); 157 smu7_update_uvd_dpm(hwmgr, false);
117 cgs_set_clockgating_state(hwmgr->device, 158 cgs_set_clockgating_state(hwmgr->device,
118 AMD_IP_BLOCK_TYPE_UVD, 159 AMD_IP_BLOCK_TYPE_UVD,
119 AMD_CG_STATE_UNGATE); 160 AMD_CG_STATE_UNGATE);
@@ -122,9 +163,9 @@ int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
122 return 0; 163 return 0;
123} 164}
124 165
125int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) 166int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
126{ 167{
127 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 168 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
128 169
129 if (data->vce_power_gated == bgate) 170 if (data->vce_power_gated == bgate)
130 return 0; 171 return 0;
@@ -135,11 +176,11 @@ int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
135 cgs_set_clockgating_state(hwmgr->device, 176 cgs_set_clockgating_state(hwmgr->device,
136 AMD_IP_BLOCK_TYPE_VCE, 177 AMD_IP_BLOCK_TYPE_VCE,
137 AMD_CG_STATE_GATE); 178 AMD_CG_STATE_GATE);
138 polaris10_update_vce_dpm(hwmgr, true); 179 smu7_update_vce_dpm(hwmgr, true);
139 polaris10_phm_powerdown_vce(hwmgr); 180 smu7_powerdown_vce(hwmgr);
140 } else { 181 } else {
141 polaris10_phm_powerup_vce(hwmgr); 182 smu7_powerup_vce(hwmgr);
142 polaris10_update_vce_dpm(hwmgr, false); 183 smu7_update_vce_dpm(hwmgr, false);
143 cgs_set_clockgating_state(hwmgr->device, 184 cgs_set_clockgating_state(hwmgr->device,
144 AMD_IP_BLOCK_TYPE_VCE, 185 AMD_IP_BLOCK_TYPE_VCE,
145 AMD_CG_STATE_UNGATE); 186 AMD_CG_STATE_UNGATE);
@@ -147,9 +188,9 @@ int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
147 return 0; 188 return 0;
148} 189}
149 190
150int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate) 191int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
151{ 192{
152 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 193 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
153 194
154 if (data->samu_power_gated == bgate) 195 if (data->samu_power_gated == bgate)
155 return 0; 196 return 0;
@@ -157,22 +198,25 @@ int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
157 data->samu_power_gated = bgate; 198 data->samu_power_gated = bgate;
158 199
159 if (bgate) { 200 if (bgate) {
160 polaris10_update_samu_dpm(hwmgr, true); 201 smu7_update_samu_dpm(hwmgr, true);
161 polaris10_phm_powerdown_samu(hwmgr); 202 smu7_powerdown_samu(hwmgr);
162 } else { 203 } else {
163 polaris10_phm_powerup_samu(hwmgr); 204 smu7_powerup_samu(hwmgr);
164 polaris10_update_samu_dpm(hwmgr, false); 205 smu7_update_samu_dpm(hwmgr, false);
165 } 206 }
166 207
167 return 0; 208 return 0;
168} 209}
169 210
170int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, 211int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
171 const uint32_t *msg_id) 212 const uint32_t *msg_id)
172{ 213{
173 PPSMC_Msg msg; 214 PPSMC_Msg msg;
174 uint32_t value; 215 uint32_t value;
175 216
217 if (!(hwmgr->feature_mask & PP_ENABLE_GFX_CG_THRU_SMU))
218 return 0;
219
176 switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) { 220 switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) {
177 case PP_GROUP_GFX: 221 case PP_GROUP_GFX:
178 switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) { 222 switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
@@ -185,7 +229,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
185 229
186 if (smum_send_msg_to_smc_with_parameter( 230 if (smum_send_msg_to_smc_with_parameter(
187 hwmgr->smumgr, msg, value)) 231 hwmgr->smumgr, msg, value))
188 return -1; 232 return -EINVAL;
189 } 233 }
190 if (PP_STATE_SUPPORT_LS & *msg_id) { 234 if (PP_STATE_SUPPORT_LS & *msg_id) {
191 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS 235 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
@@ -195,7 +239,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
195 239
196 if (smum_send_msg_to_smc_with_parameter( 240 if (smum_send_msg_to_smc_with_parameter(
197 hwmgr->smumgr, msg, value)) 241 hwmgr->smumgr, msg, value))
198 return -1; 242 return -EINVAL;
199 } 243 }
200 break; 244 break;
201 245
@@ -208,7 +252,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
208 252
209 if (smum_send_msg_to_smc_with_parameter( 253 if (smum_send_msg_to_smc_with_parameter(
210 hwmgr->smumgr, msg, value)) 254 hwmgr->smumgr, msg, value))
211 return -1; 255 return -EINVAL;
212 } 256 }
213 257
214 if (PP_STATE_SUPPORT_LS & *msg_id) { 258 if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -219,7 +263,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
219 263
220 if (smum_send_msg_to_smc_with_parameter( 264 if (smum_send_msg_to_smc_with_parameter(
221 hwmgr->smumgr, msg, value)) 265 hwmgr->smumgr, msg, value))
222 return -1; 266 return -EINVAL;
223 } 267 }
224 break; 268 break;
225 269
@@ -232,7 +276,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
232 276
233 if (smum_send_msg_to_smc_with_parameter( 277 if (smum_send_msg_to_smc_with_parameter(
234 hwmgr->smumgr, msg, value)) 278 hwmgr->smumgr, msg, value))
235 return -1; 279 return -EINVAL;
236 } 280 }
237 break; 281 break;
238 282
@@ -245,7 +289,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
245 289
246 if (smum_send_msg_to_smc_with_parameter( 290 if (smum_send_msg_to_smc_with_parameter(
247 hwmgr->smumgr, msg, value)) 291 hwmgr->smumgr, msg, value))
248 return -1; 292 return -EINVAL;
249 } 293 }
250 break; 294 break;
251 295
@@ -259,12 +303,12 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
259 303
260 if (smum_send_msg_to_smc_with_parameter( 304 if (smum_send_msg_to_smc_with_parameter(
261 hwmgr->smumgr, msg, value)) 305 hwmgr->smumgr, msg, value))
262 return -1; 306 return -EINVAL;
263 } 307 }
264 break; 308 break;
265 309
266 default: 310 default:
267 return -1; 311 return -EINVAL;
268 } 312 }
269 break; 313 break;
270 314
@@ -279,7 +323,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
279 323
280 if (smum_send_msg_to_smc_with_parameter( 324 if (smum_send_msg_to_smc_with_parameter(
281 hwmgr->smumgr, msg, value)) 325 hwmgr->smumgr, msg, value))
282 return -1; 326 return -EINVAL;
283 } 327 }
284 if (PP_STATE_SUPPORT_LS & *msg_id) { 328 if (PP_STATE_SUPPORT_LS & *msg_id) {
285 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ? 329 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
@@ -289,7 +333,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
289 333
290 if (smum_send_msg_to_smc_with_parameter( 334 if (smum_send_msg_to_smc_with_parameter(
291 hwmgr->smumgr, msg, value)) 335 hwmgr->smumgr, msg, value))
292 return -1; 336 return -EINVAL;
293 } 337 }
294 break; 338 break;
295 339
@@ -302,7 +346,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
302 346
303 if (smum_send_msg_to_smc_with_parameter( 347 if (smum_send_msg_to_smc_with_parameter(
304 hwmgr->smumgr, msg, value)) 348 hwmgr->smumgr, msg, value))
305 return -1; 349 return -EINVAL;
306 } 350 }
307 351
308 if (PP_STATE_SUPPORT_LS & *msg_id) { 352 if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -313,7 +357,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
313 357
314 if (smum_send_msg_to_smc_with_parameter( 358 if (smum_send_msg_to_smc_with_parameter(
315 hwmgr->smumgr, msg, value)) 359 hwmgr->smumgr, msg, value))
316 return -1; 360 return -EINVAL;
317 } 361 }
318 break; 362 break;
319 363
@@ -326,7 +370,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
326 370
327 if (smum_send_msg_to_smc_with_parameter( 371 if (smum_send_msg_to_smc_with_parameter(
328 hwmgr->smumgr, msg, value)) 372 hwmgr->smumgr, msg, value))
329 return -1; 373 return -EINVAL;
330 } 374 }
331 if (PP_STATE_SUPPORT_LS & *msg_id) { 375 if (PP_STATE_SUPPORT_LS & *msg_id) {
332 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ? 376 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
@@ -336,7 +380,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
336 380
337 if (smum_send_msg_to_smc_with_parameter( 381 if (smum_send_msg_to_smc_with_parameter(
338 hwmgr->smumgr, msg, value)) 382 hwmgr->smumgr, msg, value))
339 return -1; 383 return -EINVAL;
340 } 384 }
341 break; 385 break;
342 386
@@ -349,7 +393,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
349 393
350 if (smum_send_msg_to_smc_with_parameter( 394 if (smum_send_msg_to_smc_with_parameter(
351 hwmgr->smumgr, msg, value)) 395 hwmgr->smumgr, msg, value))
352 return -1; 396 return -EINVAL;
353 } 397 }
354 398
355 if (PP_STATE_SUPPORT_LS & *msg_id) { 399 if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -360,7 +404,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
360 404
361 if (smum_send_msg_to_smc_with_parameter( 405 if (smum_send_msg_to_smc_with_parameter(
362 hwmgr->smumgr, msg, value)) 406 hwmgr->smumgr, msg, value))
363 return -1; 407 return -EINVAL;
364 } 408 }
365 break; 409 break;
366 410
@@ -373,7 +417,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
373 417
374 if (smum_send_msg_to_smc_with_parameter( 418 if (smum_send_msg_to_smc_with_parameter(
375 hwmgr->smumgr, msg, value)) 419 hwmgr->smumgr, msg, value))
376 return -1; 420 return -EINVAL;
377 } 421 }
378 422
379 if (PP_STATE_SUPPORT_LS & *msg_id) { 423 if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -384,7 +428,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
384 428
385 if (smum_send_msg_to_smc_with_parameter( 429 if (smum_send_msg_to_smc_with_parameter(
386 hwmgr->smumgr, msg, value)) 430 hwmgr->smumgr, msg, value))
387 return -1; 431 return -EINVAL;
388 } 432 }
389 break; 433 break;
390 434
@@ -397,18 +441,18 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
397 441
398 if (smum_send_msg_to_smc_with_parameter( 442 if (smum_send_msg_to_smc_with_parameter(
399 hwmgr->smumgr, msg, value)) 443 hwmgr->smumgr, msg, value))
400 return -1; 444 return -EINVAL;
401 } 445 }
402 break; 446 break;
403 447
404 default: 448 default:
405 return -1; 449 return -EINVAL;
406 450
407 } 451 }
408 break; 452 break;
409 453
410 default: 454 default:
411 return -1; 455 return -EINVAL;
412 456
413 } 457 }
414 458
@@ -419,7 +463,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
419 * Powerplay will only control the static per CU Power Gating. 463 * Powerplay will only control the static per CU Power Gating.
420 * Dynamic per CU Power Gating will be done in gfx. 464 * Dynamic per CU Power Gating will be done in gfx.
421 */ 465 */
422int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable) 466int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable)
423{ 467{
424 struct cgs_system_info sys_info = {0}; 468 struct cgs_system_info sys_info = {0};
425 uint32_t active_cus; 469 uint32_t active_cus;
@@ -432,8 +476,8 @@ int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable
432 476
433 if (result) 477 if (result)
434 return -EINVAL; 478 return -EINVAL;
435 else 479
436 active_cus = sys_info.value; 480 active_cus = sys_info.value;
437 481
438 if (enable) 482 if (enable)
439 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, 483 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
index 33af5f511ab8..d52a28c343e3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2015 Advanced Micro Devices, Inc. 2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -21,15 +21,20 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef _FIJI_CLOCK_POWER_GATING_H_ 24#ifndef _SMU7_CLOCK_POWER_GATING_H_
25#define _FIJI_CLOCK_POWER_GATING_H_ 25#define _SMU7_CLOCK__POWER_GATING_H_
26 26
27#include "fiji_hwmgr.h" 27#include "smu7_hwmgr.h"
28#include "pp_asicblocks.h" 28#include "pp_asicblocks.h"
29 29
30extern int fiji_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); 30int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
31extern int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); 31int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
32extern int fiji_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate); 32int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr);
33extern int fiji_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate); 33int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
34extern int fiji_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr); 34int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
35#endif /* _TONGA_CLOCK_POWER_GATING_H_ */ 35int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
36int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
37 const uint32_t *msg_id);
38int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable);
39
40#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h
new file mode 100644
index 000000000000..f967613191cf
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h
@@ -0,0 +1,55 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _SMU7_DYN_DEFAULTS_H
25#define _SMU7_DYN_DEFAULTS_H
26
27
28/* We need to fill in the default values */
29
30
31#define SMU7_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
32#define SMU7_VOTINGRIGHTSCLIENTS_DFLT1 0x000400
33#define SMU7_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080
34#define SMU7_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200
35#define SMU7_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680
36#define SMU7_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033
37#define SMU7_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033
38#define SMU7_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000
39
40
41#define SMU7_THERMALPROTECTCOUNTER_DFLT 0x200
42#define SMU7_STATICSCREENTHRESHOLDUNIT_DFLT 0
43#define SMU7_STATICSCREENTHRESHOLD_DFLT 0x00C8
44#define SMU7_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200
45#define SMU7_REFERENCEDIVIDER_DFLT 4
46
47#define SMU7_ULVVOLTAGECHANGEDELAY_DFLT 1687
48
49#define SMU7_CGULVPARAMETER_DFLT 0x00040035
50#define SMU7_CGULVCONTROL_DFLT 0x00007450
51#define SMU7_TARGETACTIVITY_DFLT 50
52#define SMU7_MCLK_TARGETACTIVITY_DFLT 10
53
54#endif
55
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
new file mode 100644
index 000000000000..a3832f2d893b
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -0,0 +1,4359 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/fb.h>
26#include <asm/div64.h>
27#include "linux/delay.h"
28#include "pp_acpi.h"
29#include "pp_debug.h"
30#include "ppatomctrl.h"
31#include "atombios.h"
32#include "pptable_v1_0.h"
33#include "pppcielanes.h"
34#include "amd_pcie_helpers.h"
35#include "hardwaremanager.h"
36#include "process_pptables_v1_0.h"
37#include "cgs_common.h"
38
39#include "smu7_common.h"
40
41#include "hwmgr.h"
42#include "smu7_hwmgr.h"
43#include "smu7_powertune.h"
44#include "smu7_dyn_defaults.h"
45#include "smu7_thermal.h"
46#include "smu7_clockpowergating.h"
47#include "processpptables.h"
48
49#define MC_CG_ARB_FREQ_F0 0x0a
50#define MC_CG_ARB_FREQ_F1 0x0b
51#define MC_CG_ARB_FREQ_F2 0x0c
52#define MC_CG_ARB_FREQ_F3 0x0d
53
54#define MC_CG_SEQ_DRAMCONF_S0 0x05
55#define MC_CG_SEQ_DRAMCONF_S1 0x06
56#define MC_CG_SEQ_YCLK_SUSPEND 0x04
57#define MC_CG_SEQ_YCLK_RESUME 0x0a
58
59#define SMC_CG_IND_START 0xc0030000
60#define SMC_CG_IND_END 0xc0040000
61
62#define VOLTAGE_SCALE 4
63#define VOLTAGE_VID_OFFSET_SCALE1 625
64#define VOLTAGE_VID_OFFSET_SCALE2 100
65
66#define MEM_FREQ_LOW_LATENCY 25000
67#define MEM_FREQ_HIGH_LATENCY 80000
68
69#define MEM_LATENCY_HIGH 45
70#define MEM_LATENCY_LOW 35
71#define MEM_LATENCY_ERR 0xFFFF
72
73#define MC_SEQ_MISC0_GDDR5_SHIFT 28
74#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
75#define MC_SEQ_MISC0_GDDR5_VALUE 5
76
77#define PCIE_BUS_CLK 10000
78#define TCLK (PCIE_BUS_CLK / 10)
79
80
81/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
82enum DPM_EVENT_SRC {
83 DPM_EVENT_SRC_ANALOG = 0,
84 DPM_EVENT_SRC_EXTERNAL = 1,
85 DPM_EVENT_SRC_DIGITAL = 2,
86 DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
87 DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
88};
89
90static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
91
92struct smu7_power_state *cast_phw_smu7_power_state(
93 struct pp_hw_power_state *hw_ps)
94{
95 PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
96 "Invalid Powerstate Type!",
97 return NULL);
98
99 return (struct smu7_power_state *)hw_ps;
100}
101
102const struct smu7_power_state *cast_const_phw_smu7_power_state(
103 const struct pp_hw_power_state *hw_ps)
104{
105 PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
106 "Invalid Powerstate Type!",
107 return NULL);
108
109 return (const struct smu7_power_state *)hw_ps;
110}
111
112/**
113 * Find the MC microcode version and store it in the HwMgr struct
114 *
115 * @param hwmgr the address of the powerplay hardware manager.
116 * @return always 0
117 */
118int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
119{
120 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
121
122 hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
123
124 return 0;
125}
126
127uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
128{
129 uint32_t speedCntl = 0;
130
131 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
132 speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
133 ixPCIE_LC_SPEED_CNTL);
134 return((uint16_t)PHM_GET_FIELD(speedCntl,
135 PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
136}
137
138int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
139{
140 uint32_t link_width;
141
142 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
143 link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
144 PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
145
146 PP_ASSERT_WITH_CODE((7 >= link_width),
147 "Invalid PCIe lane width!", return 0);
148
149 return decode_pcie_lane_width(link_width);
150}
151
152/**
153* Enable voltage control
154*
155* @param pHwMgr the address of the powerplay hardware manager.
156* @return always PP_Result_OK
157*/
158int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
159{
160 if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
161 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable);
162
163 return 0;
164}
165
166/**
167* Checks if we want to support voltage control
168*
169* @param hwmgr the address of the powerplay hardware manager.
170*/
171static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr)
172{
173 const struct smu7_hwmgr *data =
174 (const struct smu7_hwmgr *)(hwmgr->backend);
175
176 return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control);
177}
178
179/**
180* Enable voltage control
181*
182* @param hwmgr the address of the powerplay hardware manager.
183* @return always 0
184*/
185static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr)
186{
187 /* enable voltage control */
188 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
189 GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
190
191 return 0;
192}
193
194static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table,
195 struct phm_clock_voltage_dependency_table *voltage_dependency_table
196 )
197{
198 uint32_t i;
199
200 PP_ASSERT_WITH_CODE((NULL != voltage_table),
201 "Voltage Dependency Table empty.", return -EINVAL;);
202
203 voltage_table->mask_low = 0;
204 voltage_table->phase_delay = 0;
205 voltage_table->count = voltage_dependency_table->count;
206
207 for (i = 0; i < voltage_dependency_table->count; i++) {
208 voltage_table->entries[i].value =
209 voltage_dependency_table->entries[i].v;
210 voltage_table->entries[i].smio_low = 0;
211 }
212
213 return 0;
214}
215
216
217/**
218* Create Voltage Tables.
219*
220* @param hwmgr the address of the powerplay hardware manager.
221* @return always 0
222*/
223static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
224{
225 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
226 struct phm_ppt_v1_information *table_info =
227 (struct phm_ppt_v1_information *)hwmgr->pptable;
228 int result = 0;
229 uint32_t tmp;
230
231 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
232 result = atomctrl_get_voltage_table_v3(hwmgr,
233 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
234 &(data->mvdd_voltage_table));
235 PP_ASSERT_WITH_CODE((0 == result),
236 "Failed to retrieve MVDD table.",
237 return result);
238 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
239 if (hwmgr->pp_table_version == PP_TABLE_V1)
240 result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
241 table_info->vdd_dep_on_mclk);
242 else if (hwmgr->pp_table_version == PP_TABLE_V0)
243 result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table),
244 hwmgr->dyn_state.mvdd_dependency_on_mclk);
245
246 PP_ASSERT_WITH_CODE((0 == result),
247 "Failed to retrieve SVI2 MVDD table from dependancy table.",
248 return result;);
249 }
250
251 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
252 result = atomctrl_get_voltage_table_v3(hwmgr,
253 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
254 &(data->vddci_voltage_table));
255 PP_ASSERT_WITH_CODE((0 == result),
256 "Failed to retrieve VDDCI table.",
257 return result);
258 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
259 if (hwmgr->pp_table_version == PP_TABLE_V1)
260 result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
261 table_info->vdd_dep_on_mclk);
262 else if (hwmgr->pp_table_version == PP_TABLE_V0)
263 result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
264 hwmgr->dyn_state.vddci_dependency_on_mclk);
265 PP_ASSERT_WITH_CODE((0 == result),
266 "Failed to retrieve SVI2 VDDCI table from dependancy table.",
267 return result);
268 }
269
270 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
271 /* VDDGFX has only SVI2 voltage control */
272 result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table),
273 table_info->vddgfx_lookup_table);
274 PP_ASSERT_WITH_CODE((0 == result),
275 "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
276 }
277
278
279 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
280 result = atomctrl_get_voltage_table_v3(hwmgr,
281 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
282 &data->vddc_voltage_table);
283 PP_ASSERT_WITH_CODE((0 == result),
284 "Failed to retrieve VDDC table.", return result;);
285 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
286
287 if (hwmgr->pp_table_version == PP_TABLE_V0)
288 result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table,
289 hwmgr->dyn_state.vddc_dependency_on_mclk);
290 else if (hwmgr->pp_table_version == PP_TABLE_V1)
291 result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
292 table_info->vddc_lookup_table);
293
294 PP_ASSERT_WITH_CODE((0 == result),
295 "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;);
296 }
297
298 tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDC);
299 PP_ASSERT_WITH_CODE(
300 (data->vddc_voltage_table.count <= tmp),
301 "Too many voltage values for VDDC. Trimming to fit state table.",
302 phm_trim_voltage_table_to_fit_state_table(tmp,
303 &(data->vddc_voltage_table)));
304
305 tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX);
306 PP_ASSERT_WITH_CODE(
307 (data->vddgfx_voltage_table.count <= tmp),
308 "Too many voltage values for VDDC. Trimming to fit state table.",
309 phm_trim_voltage_table_to_fit_state_table(tmp,
310 &(data->vddgfx_voltage_table)));
311
312 tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDCI);
313 PP_ASSERT_WITH_CODE(
314 (data->vddci_voltage_table.count <= tmp),
315 "Too many voltage values for VDDCI. Trimming to fit state table.",
316 phm_trim_voltage_table_to_fit_state_table(tmp,
317 &(data->vddci_voltage_table)));
318
319 tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_MVDD);
320 PP_ASSERT_WITH_CODE(
321 (data->mvdd_voltage_table.count <= tmp),
322 "Too many voltage values for MVDD. Trimming to fit state table.",
323 phm_trim_voltage_table_to_fit_state_table(tmp,
324 &(data->mvdd_voltage_table)));
325
326 return 0;
327}
328
329/**
330* Programs static screed detection parameters
331*
332* @param hwmgr the address of the powerplay hardware manager.
333* @return always 0
334*/
335static int smu7_program_static_screen_threshold_parameters(
336 struct pp_hwmgr *hwmgr)
337{
338 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
339
340 /* Set static screen threshold unit */
341 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
342 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
343 data->static_screen_threshold_unit);
344 /* Set static screen threshold */
345 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
346 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
347 data->static_screen_threshold);
348
349 return 0;
350}
351
352/**
353* Setup display gap for glitch free memory clock switching.
354*
355* @param hwmgr the address of the powerplay hardware manager.
356* @return always 0
357*/
358static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
359{
360 uint32_t display_gap =
361 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
362 ixCG_DISPLAY_GAP_CNTL);
363
364 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
365 DISP_GAP, DISPLAY_GAP_IGNORE);
366
367 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
368 DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
369
370 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
371 ixCG_DISPLAY_GAP_CNTL, display_gap);
372
373 return 0;
374}
375
376/**
377* Programs activity state transition voting clients
378*
379* @param hwmgr the address of the powerplay hardware manager.
380* @return always 0
381*/
382static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
383{
384 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
385
386 /* Clear reset for voting clients before enabling DPM */
387 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
388 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
389 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
390 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
391
392 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
393 ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
394 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
395 ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
396 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
397 ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
398 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
399 ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
400 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
401 ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
402 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
403 ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
404 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
405 ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
406 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
407 ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
408
409 return 0;
410}
411
412static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
413{
414 /* Reset voting clients before disabling DPM */
415 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
416 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
417 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
418 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
419
420 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
421 ixCG_FREQ_TRAN_VOTING_0, 0);
422 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
423 ixCG_FREQ_TRAN_VOTING_1, 0);
424 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
425 ixCG_FREQ_TRAN_VOTING_2, 0);
426 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
427 ixCG_FREQ_TRAN_VOTING_3, 0);
428 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
429 ixCG_FREQ_TRAN_VOTING_4, 0);
430 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
431 ixCG_FREQ_TRAN_VOTING_5, 0);
432 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
433 ixCG_FREQ_TRAN_VOTING_6, 0);
434 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
435 ixCG_FREQ_TRAN_VOTING_7, 0);
436
437 return 0;
438}
439
440/* Copy one arb setting to another and then switch the active set.
441 * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
442 */
443static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
444 uint32_t arb_src, uint32_t arb_dest)
445{
446 uint32_t mc_arb_dram_timing;
447 uint32_t mc_arb_dram_timing2;
448 uint32_t burst_time;
449 uint32_t mc_cg_config;
450
451 switch (arb_src) {
452 case MC_CG_ARB_FREQ_F0:
453 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
454 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
455 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
456 break;
457 case MC_CG_ARB_FREQ_F1:
458 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
459 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
460 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
461 break;
462 default:
463 return -EINVAL;
464 }
465
466 switch (arb_dest) {
467 case MC_CG_ARB_FREQ_F0:
468 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
469 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
470 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
471 break;
472 case MC_CG_ARB_FREQ_F1:
473 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
474 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
475 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
476 break;
477 default:
478 return -EINVAL;
479 }
480
481 mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
482 mc_cg_config |= 0x0000000F;
483 cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
484 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
485
486 return 0;
487}
488
489static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
490{
491 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
492}
493
494/**
495* Initial switch from ARB F0->F1
496*
497* @param hwmgr the address of the powerplay hardware manager.
498* @return always 0
499* This function is to be called from the SetPowerState table.
500*/
501static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
502{
503 return smu7_copy_and_switch_arb_sets(hwmgr,
504 MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
505}
506
507static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
508{
509 uint32_t tmp;
510
511 tmp = (cgs_read_ind_register(hwmgr->device,
512 CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
513 0x0000ff00) >> 8;
514
515 if (tmp == MC_CG_ARB_FREQ_F0)
516 return 0;
517
518 return smu7_copy_and_switch_arb_sets(hwmgr,
519 tmp, MC_CG_ARB_FREQ_F0);
520}
521
522static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
523{
524 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
525
526 struct phm_ppt_v1_information *table_info =
527 (struct phm_ppt_v1_information *)(hwmgr->pptable);
528 struct phm_ppt_v1_pcie_table *pcie_table = NULL;
529
530 uint32_t i, max_entry;
531 uint32_t tmp;
532
533 PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
534 data->use_pcie_power_saving_levels), "No pcie performance levels!",
535 return -EINVAL);
536
537 if (table_info != NULL)
538 pcie_table = table_info->pcie_table;
539
540 if (data->use_pcie_performance_levels &&
541 !data->use_pcie_power_saving_levels) {
542 data->pcie_gen_power_saving = data->pcie_gen_performance;
543 data->pcie_lane_power_saving = data->pcie_lane_performance;
544 } else if (!data->use_pcie_performance_levels &&
545 data->use_pcie_power_saving_levels) {
546 data->pcie_gen_performance = data->pcie_gen_power_saving;
547 data->pcie_lane_performance = data->pcie_lane_power_saving;
548 }
549 tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_LINK);
550 phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
551 tmp,
552 MAX_REGULAR_DPM_NUMBER);
553
554 if (pcie_table != NULL) {
555 /* max_entry is used to make sure we reserve one PCIE level
556 * for boot level (fix for A+A PSPP issue).
557 * If PCIE table from PPTable have ULV entry + 8 entries,
558 * then ignore the last entry.*/
559 max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count;
560 for (i = 1; i < max_entry; i++) {
561 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
562 get_pcie_gen_support(data->pcie_gen_cap,
563 pcie_table->entries[i].gen_speed),
564 get_pcie_lane_support(data->pcie_lane_cap,
565 pcie_table->entries[i].lane_width));
566 }
567 data->dpm_table.pcie_speed_table.count = max_entry - 1;
568 smum_update_smc_table(hwmgr, SMU_BIF_TABLE);
569 } else {
570 /* Hardcode Pcie Table */
571 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
572 get_pcie_gen_support(data->pcie_gen_cap,
573 PP_Min_PCIEGen),
574 get_pcie_lane_support(data->pcie_lane_cap,
575 PP_Max_PCIELane));
576 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
577 get_pcie_gen_support(data->pcie_gen_cap,
578 PP_Min_PCIEGen),
579 get_pcie_lane_support(data->pcie_lane_cap,
580 PP_Max_PCIELane));
581 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
582 get_pcie_gen_support(data->pcie_gen_cap,
583 PP_Max_PCIEGen),
584 get_pcie_lane_support(data->pcie_lane_cap,
585 PP_Max_PCIELane));
586 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
587 get_pcie_gen_support(data->pcie_gen_cap,
588 PP_Max_PCIEGen),
589 get_pcie_lane_support(data->pcie_lane_cap,
590 PP_Max_PCIELane));
591 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
592 get_pcie_gen_support(data->pcie_gen_cap,
593 PP_Max_PCIEGen),
594 get_pcie_lane_support(data->pcie_lane_cap,
595 PP_Max_PCIELane));
596 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
597 get_pcie_gen_support(data->pcie_gen_cap,
598 PP_Max_PCIEGen),
599 get_pcie_lane_support(data->pcie_lane_cap,
600 PP_Max_PCIELane));
601
602 data->dpm_table.pcie_speed_table.count = 6;
603 }
604 /* Populate last level for boot PCIE level, but do not increment count. */
605 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
606 data->dpm_table.pcie_speed_table.count,
607 get_pcie_gen_support(data->pcie_gen_cap,
608 PP_Min_PCIEGen),
609 get_pcie_lane_support(data->pcie_lane_cap,
610 PP_Max_PCIELane));
611
612 return 0;
613}
614
615static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
616{
617 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
618
619 memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
620
621 phm_reset_single_dpm_table(
622 &data->dpm_table.sclk_table,
623 smum_get_mac_definition(hwmgr->smumgr,
624 SMU_MAX_LEVELS_GRAPHICS),
625 MAX_REGULAR_DPM_NUMBER);
626 phm_reset_single_dpm_table(
627 &data->dpm_table.mclk_table,
628 smum_get_mac_definition(hwmgr->smumgr,
629 SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
630
631 phm_reset_single_dpm_table(
632 &data->dpm_table.vddc_table,
633 smum_get_mac_definition(hwmgr->smumgr,
634 SMU_MAX_LEVELS_VDDC),
635 MAX_REGULAR_DPM_NUMBER);
636 phm_reset_single_dpm_table(
637 &data->dpm_table.vddci_table,
638 smum_get_mac_definition(hwmgr->smumgr,
639 SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
640
641 phm_reset_single_dpm_table(
642 &data->dpm_table.mvdd_table,
643 smum_get_mac_definition(hwmgr->smumgr,
644 SMU_MAX_LEVELS_MVDD),
645 MAX_REGULAR_DPM_NUMBER);
646 return 0;
647}
648/*
649 * This function is to initialize all DPM state tables
650 * for SMU7 based on the dependency table.
651 * Dynamic state patching function will then trim these
652 * state tables to the allowed range based
653 * on the power policy or external client requests,
654 * such as UVD request, etc.
655 */
656
657static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
658{
659 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
660 struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
661 hwmgr->dyn_state.vddc_dependency_on_sclk;
662 struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
663 hwmgr->dyn_state.vddc_dependency_on_mclk;
664 struct phm_cac_leakage_table *std_voltage_table =
665 hwmgr->dyn_state.cac_leakage_table;
666 uint32_t i;
667
668 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
669 "SCLK dependency table is missing. This table is mandatory", return -EINVAL);
670 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
671 "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
672
673 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
674 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
675 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
676 "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
677
678
679 /* Initialize Sclk DPM table based on allow Sclk values*/
680 data->dpm_table.sclk_table.count = 0;
681
682 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
683 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
684 allowed_vdd_sclk_table->entries[i].clk) {
685 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
686 allowed_vdd_sclk_table->entries[i].clk;
687 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */
688 data->dpm_table.sclk_table.count++;
689 }
690 }
691
692 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
693 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
694 /* Initialize Mclk DPM table based on allow Mclk values */
695 data->dpm_table.mclk_table.count = 0;
696 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
697 if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
698 allowed_vdd_mclk_table->entries[i].clk) {
699 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
700 allowed_vdd_mclk_table->entries[i].clk;
701 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */
702 data->dpm_table.mclk_table.count++;
703 }
704 }
705
706 /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */
707 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
708 data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
709 data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
710 /* param1 is for corresponding std voltage */
711 data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
712 }
713
714 data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
715 allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
716
717 if (NULL != allowed_vdd_mclk_table) {
718 /* Initialize Vddci DPM table based on allow Mclk values */
719 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
720 data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
721 data->dpm_table.vddci_table.dpm_levels[i].enabled = 1;
722 }
723 data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
724 }
725
726 allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
727
728 if (NULL != allowed_vdd_mclk_table) {
729 /*
730 * Initialize MVDD DPM table based on allow Mclk
731 * values
732 */
733 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
734 data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
735 data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
736 }
737 data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
738 }
739
740 return 0;
741}
742
743static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
744{
745 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
746 struct phm_ppt_v1_information *table_info =
747 (struct phm_ppt_v1_information *)(hwmgr->pptable);
748 uint32_t i;
749
750 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
751 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
752
753 if (table_info == NULL)
754 return -EINVAL;
755
756 dep_sclk_table = table_info->vdd_dep_on_sclk;
757 dep_mclk_table = table_info->vdd_dep_on_mclk;
758
759 PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
760 "SCLK dependency table is missing.",
761 return -EINVAL);
762 PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
763 "SCLK dependency table count is 0.",
764 return -EINVAL);
765
766 PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
767 "MCLK dependency table is missing.",
768 return -EINVAL);
769 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
770 "MCLK dependency table count is 0",
771 return -EINVAL);
772
773 /* Initialize Sclk DPM table based on allow Sclk values */
774 data->dpm_table.sclk_table.count = 0;
775 for (i = 0; i < dep_sclk_table->count; i++) {
776 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
777 dep_sclk_table->entries[i].clk) {
778
779 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
780 dep_sclk_table->entries[i].clk;
781
782 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
783 (i == 0) ? true : false;
784 data->dpm_table.sclk_table.count++;
785 }
786 }
787
788 /* Initialize Mclk DPM table based on allow Mclk values */
789 data->dpm_table.mclk_table.count = 0;
790 for (i = 0; i < dep_mclk_table->count; i++) {
791 if (i == 0 || data->dpm_table.mclk_table.dpm_levels
792 [data->dpm_table.mclk_table.count - 1].value !=
793 dep_mclk_table->entries[i].clk) {
794 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
795 dep_mclk_table->entries[i].clk;
796 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
797 (i == 0) ? true : false;
798 data->dpm_table.mclk_table.count++;
799 }
800 }
801
802 return 0;
803}
804
805int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
806{
807 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
808
809 smu7_reset_dpm_tables(hwmgr);
810
811 if (hwmgr->pp_table_version == PP_TABLE_V1)
812 smu7_setup_dpm_tables_v1(hwmgr);
813 else if (hwmgr->pp_table_version == PP_TABLE_V0)
814 smu7_setup_dpm_tables_v0(hwmgr);
815
816 smu7_setup_default_pcie_table(hwmgr);
817
818 /* save a copy of the default DPM table */
819 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
820 sizeof(struct smu7_dpm_table));
821 return 0;
822}
823
824uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr)
825{
826 uint32_t reference_clock, tmp;
827 struct cgs_display_info info = {0};
828 struct cgs_mode_info mode_info;
829
830 info.mode_info = &mode_info;
831
832 tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
833
834 if (tmp)
835 return TCLK;
836
837 cgs_get_active_displays_info(hwmgr->device, &info);
838 reference_clock = mode_info.ref_clock;
839
840 tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
841
842 if (0 != tmp)
843 return reference_clock / 4;
844
845 return reference_clock;
846}
847
848static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
849{
850
851 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
852 PHM_PlatformCaps_RegulatorHot))
853 return smum_send_msg_to_smc(hwmgr->smumgr,
854 PPSMC_MSG_EnableVRHotGPIOInterrupt);
855
856 return 0;
857}
858
859static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr)
860{
861 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
862 SCLK_PWRMGT_OFF, 0);
863 return 0;
864}
865
866static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
867{
868 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
869
870 if (data->ulv_supported)
871 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
872
873 return 0;
874}
875
876static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
877{
878 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
879
880 if (data->ulv_supported)
881 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
882
883 return 0;
884}
885
886static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
887{
888 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
889 PHM_PlatformCaps_SclkDeepSleep)) {
890 if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
891 PP_ASSERT_WITH_CODE(false,
892 "Attempt to enable Master Deep Sleep switch failed!",
893 return -EINVAL);
894 } else {
895 if (smum_send_msg_to_smc(hwmgr->smumgr,
896 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
897 PP_ASSERT_WITH_CODE(false,
898 "Attempt to disable Master Deep Sleep switch failed!",
899 return -EINVAL);
900 }
901 }
902
903 return 0;
904}
905
906static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
907{
908 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
909 PHM_PlatformCaps_SclkDeepSleep)) {
910 if (smum_send_msg_to_smc(hwmgr->smumgr,
911 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
912 PP_ASSERT_WITH_CODE(false,
913 "Attempt to disable Master Deep Sleep switch failed!",
914 return -EINVAL);
915 }
916 }
917
918 return 0;
919}
920
921static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
922{
923 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
924 uint32_t soft_register_value = 0;
925 uint32_t handshake_disables_offset = data->soft_regs_start
926 + smum_get_offsetof(hwmgr->smumgr,
927 SMU_SoftRegisters, HandshakeDisables);
928
929 soft_register_value = cgs_read_ind_register(hwmgr->device,
930 CGS_IND_REG__SMC, handshake_disables_offset);
931 soft_register_value |= smum_get_mac_definition(hwmgr->smumgr,
932 SMU_UVD_MCLK_HANDSHAKE_DISABLE);
933 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
934 handshake_disables_offset, soft_register_value);
935 return 0;
936}
937
938static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
939{
940 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
941
942 /* enable SCLK dpm */
943 if (!data->sclk_dpm_key_disabled)
944 PP_ASSERT_WITH_CODE(
945 (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
946 "Failed to enable SCLK DPM during DPM Start Function!",
947 return -EINVAL);
948
949 /* enable MCLK dpm */
950 if (0 == data->mclk_dpm_key_disabled) {
951 if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
952 smu7_disable_handshake_uvd(hwmgr);
953 PP_ASSERT_WITH_CODE(
954 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
955 PPSMC_MSG_MCLKDPM_Enable)),
956 "Failed to enable MCLK DPM during DPM Start Function!",
957 return -EINVAL);
958
959 PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
960
961 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
962 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
963 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
964 udelay(10);
965 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
966 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
967 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
968 }
969
970 return 0;
971}
972
973static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
974{
975 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
976
977 /*enable general power management */
978
979 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
980 GLOBAL_PWRMGT_EN, 1);
981
982 /* enable sclk deep sleep */
983
984 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
985 DYNAMIC_PM_EN, 1);
986
987 /* prepare for PCIE DPM */
988
989 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
990 data->soft_regs_start +
991 smum_get_offsetof(hwmgr->smumgr, SMU_SoftRegisters,
992 VoltageChangeTimeout), 0x1000);
993 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
994 SWRST_COMMAND_1, RESETLC, 0x0);
995
996 PP_ASSERT_WITH_CODE(
997 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
998 PPSMC_MSG_Voltage_Cntl_Enable)),
999 "Failed to enable voltage DPM during DPM Start Function!",
1000 return -EINVAL);
1001
1002
1003 if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
1004 printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
1005 return -EINVAL;
1006 }
1007
1008 /* enable PCIE dpm */
1009 if (0 == data->pcie_dpm_key_disabled) {
1010 PP_ASSERT_WITH_CODE(
1011 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
1012 PPSMC_MSG_PCIeDPM_Enable)),
1013 "Failed to enable pcie DPM during DPM Start Function!",
1014 return -EINVAL);
1015 }
1016
1017 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1018 PHM_PlatformCaps_Falcon_QuickTransition)) {
1019 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr,
1020 PPSMC_MSG_EnableACDCGPIOInterrupt)),
1021 "Failed to enable AC DC GPIO Interrupt!",
1022 );
1023 }
1024
1025 return 0;
1026}
1027
1028static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1029{
1030 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1031
1032 /* disable SCLK dpm */
1033 if (!data->sclk_dpm_key_disabled)
1034 PP_ASSERT_WITH_CODE(
1035 (smum_send_msg_to_smc(hwmgr->smumgr,
1036 PPSMC_MSG_DPM_Disable) == 0),
1037 "Failed to disable SCLK DPM!",
1038 return -EINVAL);
1039
1040 /* disable MCLK dpm */
1041 if (!data->mclk_dpm_key_disabled) {
1042 PP_ASSERT_WITH_CODE(
1043 (smum_send_msg_to_smc(hwmgr->smumgr,
1044 PPSMC_MSG_MCLKDPM_Disable) == 0),
1045 "Failed to disable MCLK DPM!",
1046 return -EINVAL);
1047 }
1048
1049 return 0;
1050}
1051
1052static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
1053{
1054 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1055
1056 /* disable general power management */
1057 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1058 GLOBAL_PWRMGT_EN, 0);
1059 /* disable sclk deep sleep */
1060 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1061 DYNAMIC_PM_EN, 0);
1062
1063 /* disable PCIE dpm */
1064 if (!data->pcie_dpm_key_disabled) {
1065 PP_ASSERT_WITH_CODE(
1066 (smum_send_msg_to_smc(hwmgr->smumgr,
1067 PPSMC_MSG_PCIeDPM_Disable) == 0),
1068 "Failed to disable pcie DPM during DPM Stop Function!",
1069 return -EINVAL);
1070 }
1071
1072 if (smu7_disable_sclk_mclk_dpm(hwmgr)) {
1073 printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
1074 return -EINVAL;
1075 }
1076
1077 return 0;
1078}
1079
1080static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
1081{
1082 bool protection;
1083 enum DPM_EVENT_SRC src;
1084
1085 switch (sources) {
1086 default:
1087 printk(KERN_ERR "Unknown throttling event sources.");
1088 /* fall through */
1089 case 0:
1090 protection = false;
1091 /* src is unused */
1092 break;
1093 case (1 << PHM_AutoThrottleSource_Thermal):
1094 protection = true;
1095 src = DPM_EVENT_SRC_DIGITAL;
1096 break;
1097 case (1 << PHM_AutoThrottleSource_External):
1098 protection = true;
1099 src = DPM_EVENT_SRC_EXTERNAL;
1100 break;
1101 case (1 << PHM_AutoThrottleSource_External) |
1102 (1 << PHM_AutoThrottleSource_Thermal):
1103 protection = true;
1104 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
1105 break;
1106 }
1107 /* Order matters - don't enable thermal protection for the wrong source. */
1108 if (protection) {
1109 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
1110 DPM_EVENT_SRC, src);
1111 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1112 THERMAL_PROTECTION_DIS,
1113 !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1114 PHM_PlatformCaps_ThermalController));
1115 } else
1116 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1117 THERMAL_PROTECTION_DIS, 1);
1118}
1119
1120static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1121 PHM_AutoThrottleSource source)
1122{
1123 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1124
1125 if (!(data->active_auto_throttle_sources & (1 << source))) {
1126 data->active_auto_throttle_sources |= 1 << source;
1127 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1128 }
1129 return 0;
1130}
1131
1132static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1133{
1134 return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1135}
1136
1137static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1138 PHM_AutoThrottleSource source)
1139{
1140 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1141
1142 if (data->active_auto_throttle_sources & (1 << source)) {
1143 data->active_auto_throttle_sources &= ~(1 << source);
1144 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1145 }
1146 return 0;
1147}
1148
1149static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1150{
1151 return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1152}
1153
1154int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
1155{
1156 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1157 data->pcie_performance_request = true;
1158
1159 return 0;
1160}
1161
1162int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1163{
1164 int tmp_result = 0;
1165 int result = 0;
1166
1167 tmp_result = (!smum_is_dpm_running(hwmgr)) ? 0 : -1;
1168 PP_ASSERT_WITH_CODE(tmp_result == 0,
1169 "DPM is already running right now, no need to enable DPM!",
1170 return 0);
1171
1172 if (smu7_voltage_control(hwmgr)) {
1173 tmp_result = smu7_enable_voltage_control(hwmgr);
1174 PP_ASSERT_WITH_CODE(tmp_result == 0,
1175 "Failed to enable voltage control!",
1176 result = tmp_result);
1177
1178 tmp_result = smu7_construct_voltage_tables(hwmgr);
1179 PP_ASSERT_WITH_CODE((0 == tmp_result),
1180 "Failed to contruct voltage tables!",
1181 result = tmp_result);
1182 }
1183 smum_initialize_mc_reg_table(hwmgr);
1184
1185 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1186 PHM_PlatformCaps_EngineSpreadSpectrumSupport))
1187 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1188 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
1189
1190 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1191 PHM_PlatformCaps_ThermalController))
1192 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1193 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
1194
1195 tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr);
1196 PP_ASSERT_WITH_CODE((0 == tmp_result),
1197 "Failed to program static screen threshold parameters!",
1198 result = tmp_result);
1199
1200 tmp_result = smu7_enable_display_gap(hwmgr);
1201 PP_ASSERT_WITH_CODE((0 == tmp_result),
1202 "Failed to enable display gap!", result = tmp_result);
1203
1204 tmp_result = smu7_program_voting_clients(hwmgr);
1205 PP_ASSERT_WITH_CODE((0 == tmp_result),
1206 "Failed to program voting clients!", result = tmp_result);
1207
1208 tmp_result = smum_process_firmware_header(hwmgr);
1209 PP_ASSERT_WITH_CODE((0 == tmp_result),
1210 "Failed to process firmware header!", result = tmp_result);
1211
1212 tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
1213 PP_ASSERT_WITH_CODE((0 == tmp_result),
1214 "Failed to initialize switch from ArbF0 to F1!",
1215 result = tmp_result);
1216
1217 result = smu7_setup_default_dpm_tables(hwmgr);
1218 PP_ASSERT_WITH_CODE(0 == result,
1219 "Failed to setup default DPM tables!", return result);
1220
1221 tmp_result = smum_init_smc_table(hwmgr);
1222 PP_ASSERT_WITH_CODE((0 == tmp_result),
1223 "Failed to initialize SMC table!", result = tmp_result);
1224
1225 tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr);
1226 PP_ASSERT_WITH_CODE((0 == tmp_result),
1227 "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
1228
1229 smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay);
1230
1231 tmp_result = smu7_enable_sclk_control(hwmgr);
1232 PP_ASSERT_WITH_CODE((0 == tmp_result),
1233 "Failed to enable SCLK control!", result = tmp_result);
1234
1235 tmp_result = smu7_enable_smc_voltage_controller(hwmgr);
1236 PP_ASSERT_WITH_CODE((0 == tmp_result),
1237 "Failed to enable voltage control!", result = tmp_result);
1238
1239 tmp_result = smu7_enable_ulv(hwmgr);
1240 PP_ASSERT_WITH_CODE((0 == tmp_result),
1241 "Failed to enable ULV!", result = tmp_result);
1242
1243 tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr);
1244 PP_ASSERT_WITH_CODE((0 == tmp_result),
1245 "Failed to enable deep sleep master switch!", result = tmp_result);
1246
1247 tmp_result = smu7_enable_didt_config(hwmgr);
1248 PP_ASSERT_WITH_CODE((tmp_result == 0),
1249 "Failed to enable deep sleep master switch!", result = tmp_result);
1250
1251 tmp_result = smu7_start_dpm(hwmgr);
1252 PP_ASSERT_WITH_CODE((0 == tmp_result),
1253 "Failed to start DPM!", result = tmp_result);
1254
1255 tmp_result = smu7_enable_smc_cac(hwmgr);
1256 PP_ASSERT_WITH_CODE((0 == tmp_result),
1257 "Failed to enable SMC CAC!", result = tmp_result);
1258
1259 tmp_result = smu7_enable_power_containment(hwmgr);
1260 PP_ASSERT_WITH_CODE((0 == tmp_result),
1261 "Failed to enable power containment!", result = tmp_result);
1262
1263 tmp_result = smu7_power_control_set_level(hwmgr);
1264 PP_ASSERT_WITH_CODE((0 == tmp_result),
1265 "Failed to power control set level!", result = tmp_result);
1266
1267 tmp_result = smu7_enable_thermal_auto_throttle(hwmgr);
1268 PP_ASSERT_WITH_CODE((0 == tmp_result),
1269 "Failed to enable thermal auto throttle!", result = tmp_result);
1270
1271 tmp_result = smu7_pcie_performance_request(hwmgr);
1272 PP_ASSERT_WITH_CODE((0 == tmp_result),
1273 "pcie performance request failed!", result = tmp_result);
1274
1275 return 0;
1276}
1277
1278int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1279{
1280 int tmp_result, result = 0;
1281
1282 tmp_result = (smum_is_dpm_running(hwmgr)) ? 0 : -1;
1283 PP_ASSERT_WITH_CODE(tmp_result == 0,
1284 "DPM is not running right now, no need to disable DPM!",
1285 return 0);
1286
1287 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1288 PHM_PlatformCaps_ThermalController))
1289 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1290 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
1291
1292 tmp_result = smu7_disable_power_containment(hwmgr);
1293 PP_ASSERT_WITH_CODE((tmp_result == 0),
1294 "Failed to disable power containment!", result = tmp_result);
1295
1296 tmp_result = smu7_disable_smc_cac(hwmgr);
1297 PP_ASSERT_WITH_CODE((tmp_result == 0),
1298 "Failed to disable SMC CAC!", result = tmp_result);
1299
1300 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1301 CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
1302 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1303 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
1304
1305 tmp_result = smu7_disable_thermal_auto_throttle(hwmgr);
1306 PP_ASSERT_WITH_CODE((tmp_result == 0),
1307 "Failed to disable thermal auto throttle!", result = tmp_result);
1308
1309 tmp_result = smu7_stop_dpm(hwmgr);
1310 PP_ASSERT_WITH_CODE((tmp_result == 0),
1311 "Failed to stop DPM!", result = tmp_result);
1312
1313 tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr);
1314 PP_ASSERT_WITH_CODE((tmp_result == 0),
1315 "Failed to disable deep sleep master switch!", result = tmp_result);
1316
1317 tmp_result = smu7_disable_ulv(hwmgr);
1318 PP_ASSERT_WITH_CODE((tmp_result == 0),
1319 "Failed to disable ULV!", result = tmp_result);
1320
1321 tmp_result = smu7_clear_voting_clients(hwmgr);
1322 PP_ASSERT_WITH_CODE((tmp_result == 0),
1323 "Failed to clear voting clients!", result = tmp_result);
1324
1325 tmp_result = smu7_reset_to_default(hwmgr);
1326 PP_ASSERT_WITH_CODE((tmp_result == 0),
1327 "Failed to reset to default!", result = tmp_result);
1328
1329 tmp_result = smu7_force_switch_to_arbf0(hwmgr);
1330 PP_ASSERT_WITH_CODE((tmp_result == 0),
1331 "Failed to force to switch arbf0!", result = tmp_result);
1332
1333 return result;
1334}
1335
1336int smu7_reset_asic_tasks(struct pp_hwmgr *hwmgr)
1337{
1338
1339 return 0;
1340}
1341
1342static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
1343{
1344 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1345 struct phm_ppt_v1_information *table_info =
1346 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1347
1348 data->dll_default_on = false;
1349 data->mclk_dpm0_activity_target = 0xa;
1350 data->mclk_activity_target = SMU7_MCLK_TARGETACTIVITY_DFLT;
1351 data->vddc_vddgfx_delta = 300;
1352 data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
1353 data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
1354 data->voting_rights_clients0 = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
1355 data->voting_rights_clients1 = SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
1356 data->voting_rights_clients2 = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
1357 data->voting_rights_clients3 = SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
1358 data->voting_rights_clients4 = SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
1359 data->voting_rights_clients5 = SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
1360 data->voting_rights_clients6 = SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
1361 data->voting_rights_clients7 = SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
1362
1363 data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
1364 data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
1365 data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
1366 /* need to set voltage control types before EVV patching */
1367 data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
1368 data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
1369 data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE;
1370 data->enable_tdc_limit_feature = true;
1371 data->enable_pkg_pwr_tracking_feature = true;
1372 data->force_pcie_gen = PP_PCIEGenInvalid;
1373 data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
1374
1375 data->fast_watermark_threshold = 100;
1376 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
1377 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
1378 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1379
1380 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1381 PHM_PlatformCaps_ControlVDDGFX)) {
1382 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
1383 VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
1384 data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1385 }
1386 }
1387
1388 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1389 PHM_PlatformCaps_EnableMVDDControl)) {
1390 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
1391 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
1392 data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1393 else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
1394 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
1395 data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1396 }
1397
1398 if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) {
1399 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1400 PHM_PlatformCaps_ControlVDDGFX);
1401 }
1402
1403 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1404 PHM_PlatformCaps_ControlVDDCI)) {
1405 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
1406 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
1407 data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1408 else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
1409 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
1410 data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1411 }
1412
1413 if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
1414 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1415 PHM_PlatformCaps_EnableMVDDControl);
1416
1417 if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE)
1418 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1419 PHM_PlatformCaps_ControlVDDCI);
1420
1421 if ((hwmgr->pp_table_version != PP_TABLE_V0)
1422 && (table_info->cac_dtp_table->usClockStretchAmount != 0))
1423 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1424 PHM_PlatformCaps_ClockStretcher);
1425
1426 data->pcie_gen_performance.max = PP_PCIEGen1;
1427 data->pcie_gen_performance.min = PP_PCIEGen3;
1428 data->pcie_gen_power_saving.max = PP_PCIEGen1;
1429 data->pcie_gen_power_saving.min = PP_PCIEGen3;
1430 data->pcie_lane_performance.max = 0;
1431 data->pcie_lane_performance.min = 16;
1432 data->pcie_lane_power_saving.max = 0;
1433 data->pcie_lane_power_saving.min = 16;
1434}
1435
1436/**
1437* Get Leakage VDDC based on leakage ID.
1438*
1439* @param hwmgr the address of the powerplay hardware manager.
1440* @return always 0
1441*/
1442static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
1443{
1444 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1445 uint16_t vv_id;
1446 uint16_t vddc = 0;
1447 uint16_t vddgfx = 0;
1448 uint16_t i, j;
1449 uint32_t sclk = 0;
1450 struct phm_ppt_v1_information *table_info =
1451 (struct phm_ppt_v1_information *)hwmgr->pptable;
1452 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
1453
1454
1455 if (table_info != NULL)
1456 sclk_table = table_info->vdd_dep_on_sclk;
1457
1458 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
1459 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1460
1461 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1462 if (0 == phm_get_sclk_for_voltage_evv(hwmgr,
1463 table_info->vddgfx_lookup_table, vv_id, &sclk)) {
1464 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1465 PHM_PlatformCaps_ClockStretcher)) {
1466 for (j = 1; j < sclk_table->count; j++) {
1467 if (sclk_table->entries[j].clk == sclk &&
1468 sclk_table->entries[j].cks_enable == 0) {
1469 sclk += 5000;
1470 break;
1471 }
1472 }
1473 }
1474 if (0 == atomctrl_get_voltage_evv_on_sclk
1475 (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
1476 vv_id, &vddgfx)) {
1477 /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
1478 PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL);
1479
1480 /* the voltage should not be zero nor equal to leakage ID */
1481 if (vddgfx != 0 && vddgfx != vv_id) {
1482 data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
1483 data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id;
1484 data->vddcgfx_leakage.count++;
1485 }
1486 } else {
1487 printk("Error retrieving EVV voltage value!\n");
1488 }
1489 }
1490 } else {
1491
1492 if ((hwmgr->pp_table_version == PP_TABLE_V0)
1493 || !phm_get_sclk_for_voltage_evv(hwmgr,
1494 table_info->vddc_lookup_table, vv_id, &sclk)) {
1495 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1496 PHM_PlatformCaps_ClockStretcher)) {
1497 for (j = 1; j < sclk_table->count; j++) {
1498 if (sclk_table->entries[j].clk == sclk &&
1499 sclk_table->entries[j].cks_enable == 0) {
1500 sclk += 5000;
1501 break;
1502 }
1503 }
1504 }
1505
1506 if (phm_get_voltage_evv_on_sclk(hwmgr,
1507 VOLTAGE_TYPE_VDDC,
1508 sclk, vv_id, &vddc) == 0) {
1509 if (vddc >= 2000 || vddc == 0)
1510 return -EINVAL;
1511 } else {
1512 printk(KERN_WARNING "failed to retrieving EVV voltage!\n");
1513 continue;
1514 }
1515
1516 /* the voltage should not be zero nor equal to leakage ID */
1517 if (vddc != 0 && vddc != vv_id) {
1518 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc);
1519 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
1520 data->vddc_leakage.count++;
1521 }
1522 }
1523 }
1524 }
1525
1526 return 0;
1527}
1528
1529/**
1530 * Change virtual leakage voltage to actual value.
1531 *
1532 * @param hwmgr the address of the powerplay hardware manager.
1533 * @param pointer to changing voltage
1534 * @param pointer to leakage table
1535 */
1536static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
1537 uint16_t *voltage, struct smu7_leakage_voltage *leakage_table)
1538{
1539 uint32_t index;
1540
1541 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
1542 for (index = 0; index < leakage_table->count; index++) {
1543 /* if this voltage matches a leakage voltage ID */
1544 /* patch with actual leakage voltage */
1545 if (leakage_table->leakage_id[index] == *voltage) {
1546 *voltage = leakage_table->actual_voltage[index];
1547 break;
1548 }
1549 }
1550
1551 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
1552 printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
1553}
1554
1555/**
1556* Patch voltage lookup table by EVV leakages.
1557*
1558* @param hwmgr the address of the powerplay hardware manager.
1559* @param pointer to voltage lookup table
1560* @param pointer to leakage table
1561* @return always 0
1562*/
1563static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
1564 phm_ppt_v1_voltage_lookup_table *lookup_table,
1565 struct smu7_leakage_voltage *leakage_table)
1566{
1567 uint32_t i;
1568
1569 for (i = 0; i < lookup_table->count; i++)
1570 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
1571 &lookup_table->entries[i].us_vdd, leakage_table);
1572
1573 return 0;
1574}
1575
1576static int smu7_patch_clock_voltage_limits_with_vddc_leakage(
1577 struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table,
1578 uint16_t *vddc)
1579{
1580 struct phm_ppt_v1_information *table_info =
1581 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1582 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
1583 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
1584 table_info->max_clock_voltage_on_dc.vddc;
1585 return 0;
1586}
1587
1588static int smu7_patch_voltage_dependency_tables_with_lookup_table(
1589 struct pp_hwmgr *hwmgr)
1590{
1591 uint8_t entry_id;
1592 uint8_t voltage_id;
1593 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1594 struct phm_ppt_v1_information *table_info =
1595 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1596
1597 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1598 table_info->vdd_dep_on_sclk;
1599 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
1600 table_info->vdd_dep_on_mclk;
1601 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1602 table_info->mm_dep_table;
1603
1604 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1605 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1606 voltage_id = sclk_table->entries[entry_id].vddInd;
1607 sclk_table->entries[entry_id].vddgfx =
1608 table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd;
1609 }
1610 } else {
1611 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1612 voltage_id = sclk_table->entries[entry_id].vddInd;
1613 sclk_table->entries[entry_id].vddc =
1614 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1615 }
1616 }
1617
1618 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
1619 voltage_id = mclk_table->entries[entry_id].vddInd;
1620 mclk_table->entries[entry_id].vddc =
1621 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1622 }
1623
1624 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
1625 voltage_id = mm_table->entries[entry_id].vddcInd;
1626 mm_table->entries[entry_id].vddc =
1627 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1628 }
1629
1630 return 0;
1631
1632}
1633
1634static int phm_add_voltage(struct pp_hwmgr *hwmgr,
1635 phm_ppt_v1_voltage_lookup_table *look_up_table,
1636 phm_ppt_v1_voltage_lookup_record *record)
1637{
1638 uint32_t i;
1639
1640 PP_ASSERT_WITH_CODE((NULL != look_up_table),
1641 "Lookup Table empty.", return -EINVAL);
1642 PP_ASSERT_WITH_CODE((0 != look_up_table->count),
1643 "Lookup Table empty.", return -EINVAL);
1644
1645 i = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX);
1646 PP_ASSERT_WITH_CODE((i >= look_up_table->count),
1647 "Lookup Table is full.", return -EINVAL);
1648
1649 /* This is to avoid entering duplicate calculated records. */
1650 for (i = 0; i < look_up_table->count; i++) {
1651 if (look_up_table->entries[i].us_vdd == record->us_vdd) {
1652 if (look_up_table->entries[i].us_calculated == 1)
1653 return 0;
1654 break;
1655 }
1656 }
1657
1658 look_up_table->entries[i].us_calculated = 1;
1659 look_up_table->entries[i].us_vdd = record->us_vdd;
1660 look_up_table->entries[i].us_cac_low = record->us_cac_low;
1661 look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
1662 look_up_table->entries[i].us_cac_high = record->us_cac_high;
1663 /* Only increment the count when we're appending, not replacing duplicate entry. */
1664 if (i == look_up_table->count)
1665 look_up_table->count++;
1666
1667 return 0;
1668}
1669
1670
1671static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
1672{
1673 uint8_t entry_id;
1674 struct phm_ppt_v1_voltage_lookup_record v_record;
1675 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1676 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1677
1678 phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
1679 phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
1680
1681 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1682 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1683 if (sclk_table->entries[entry_id].vdd_offset & (1 << 15))
1684 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
1685 sclk_table->entries[entry_id].vdd_offset - 0xFFFF;
1686 else
1687 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
1688 sclk_table->entries[entry_id].vdd_offset;
1689
1690 sclk_table->entries[entry_id].vddc =
1691 v_record.us_cac_low = v_record.us_cac_mid =
1692 v_record.us_cac_high = v_record.us_vdd;
1693
1694 phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
1695 }
1696
1697 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
1698 if (mclk_table->entries[entry_id].vdd_offset & (1 << 15))
1699 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
1700 mclk_table->entries[entry_id].vdd_offset - 0xFFFF;
1701 else
1702 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
1703 mclk_table->entries[entry_id].vdd_offset;
1704
1705 mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low =
1706 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
1707 phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
1708 }
1709 }
1710 return 0;
1711}
1712
1713static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
1714{
1715 uint8_t entry_id;
1716 struct phm_ppt_v1_voltage_lookup_record v_record;
1717 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1718 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1719 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
1720
1721 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1722 for (entry_id = 0; entry_id < mm_table->count; entry_id++) {
1723 if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15))
1724 v_record.us_vdd = mm_table->entries[entry_id].vddc +
1725 mm_table->entries[entry_id].vddgfx_offset - 0xFFFF;
1726 else
1727 v_record.us_vdd = mm_table->entries[entry_id].vddc +
1728 mm_table->entries[entry_id].vddgfx_offset;
1729
1730 /* Add the calculated VDDGFX to the VDDGFX lookup table */
1731 mm_table->entries[entry_id].vddgfx = v_record.us_cac_low =
1732 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
1733 phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
1734 }
1735 }
1736 return 0;
1737}
1738
1739static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
1740 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
1741{
1742 uint32_t table_size, i, j;
1743 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
1744 table_size = lookup_table->count;
1745
1746 PP_ASSERT_WITH_CODE(0 != lookup_table->count,
1747 "Lookup table is empty", return -EINVAL);
1748
1749 /* Sorting voltages */
1750 for (i = 0; i < table_size - 1; i++) {
1751 for (j = i + 1; j > 0; j--) {
1752 if (lookup_table->entries[j].us_vdd <
1753 lookup_table->entries[j - 1].us_vdd) {
1754 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
1755 lookup_table->entries[j - 1] = lookup_table->entries[j];
1756 lookup_table->entries[j] = tmp_voltage_lookup_record;
1757 }
1758 }
1759 }
1760
1761 return 0;
1762}
1763
1764static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr)
1765{
1766 int result = 0;
1767 int tmp_result;
1768 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1769 struct phm_ppt_v1_information *table_info =
1770 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1771
1772 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1773 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
1774 table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
1775 if (tmp_result != 0)
1776 result = tmp_result;
1777
1778 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
1779 &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage));
1780 } else {
1781
1782 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
1783 table_info->vddc_lookup_table, &(data->vddc_leakage));
1784 if (tmp_result)
1785 result = tmp_result;
1786
1787 tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
1788 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
1789 if (tmp_result)
1790 result = tmp_result;
1791 }
1792
1793 tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
1794 if (tmp_result)
1795 result = tmp_result;
1796
1797 tmp_result = smu7_calc_voltage_dependency_tables(hwmgr);
1798 if (tmp_result)
1799 result = tmp_result;
1800
1801 tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr);
1802 if (tmp_result)
1803 result = tmp_result;
1804
1805 tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table);
1806 if (tmp_result)
1807 result = tmp_result;
1808
1809 tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
1810 if (tmp_result)
1811 result = tmp_result;
1812
1813 return result;
1814}
1815
1816static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
1817{
1818 struct phm_ppt_v1_information *table_info =
1819 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1820
1821 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
1822 table_info->vdd_dep_on_sclk;
1823 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
1824 table_info->vdd_dep_on_mclk;
1825
1826 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
1827 "VDD dependency on SCLK table is missing.",
1828 return -EINVAL);
1829 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
1830 "VDD dependency on SCLK table has to have is missing.",
1831 return -EINVAL);
1832
1833 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
1834 "VDD dependency on MCLK table is missing",
1835 return -EINVAL);
1836 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
1837 "VDD dependency on MCLK table has to have is missing.",
1838 return -EINVAL);
1839
1840 table_info->max_clock_voltage_on_ac.sclk =
1841 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
1842 table_info->max_clock_voltage_on_ac.mclk =
1843 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
1844 table_info->max_clock_voltage_on_ac.vddc =
1845 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
1846 table_info->max_clock_voltage_on_ac.vddci =
1847 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
1848
1849 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
1850 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
1851 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
1852 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci;
1853
1854 return 0;
1855}
1856
1857int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
1858{
1859 struct phm_ppt_v1_information *table_info =
1860 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1861 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
1862 struct phm_ppt_v1_voltage_lookup_table *lookup_table;
1863 uint32_t i;
1864 uint32_t hw_revision, sub_vendor_id, sub_sys_id;
1865 struct cgs_system_info sys_info = {0};
1866
1867 if (table_info != NULL) {
1868 dep_mclk_table = table_info->vdd_dep_on_mclk;
1869 lookup_table = table_info->vddc_lookup_table;
1870 } else
1871 return 0;
1872
1873 sys_info.size = sizeof(struct cgs_system_info);
1874
1875 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
1876 cgs_query_system_info(hwmgr->device, &sys_info);
1877 hw_revision = (uint32_t)sys_info.value;
1878
1879 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID;
1880 cgs_query_system_info(hwmgr->device, &sys_info);
1881 sub_sys_id = (uint32_t)sys_info.value;
1882
1883 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID;
1884 cgs_query_system_info(hwmgr->device, &sys_info);
1885 sub_vendor_id = (uint32_t)sys_info.value;
1886
1887 if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 &&
1888 ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
1889 (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
1890 (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
1891 if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
1892 return 0;
1893
1894 for (i = 0; i < lookup_table->count; i++) {
1895 if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
1896 dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
1897 return 0;
1898 }
1899 }
1900 }
1901 return 0;
1902}
1903
1904static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
1905{
1906 struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
1907 uint32_t temp_reg;
1908 struct phm_ppt_v1_information *table_info =
1909 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1910
1911
1912 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
1913 temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
1914 switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
1915 case 0:
1916 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
1917 break;
1918 case 1:
1919 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
1920 break;
1921 case 2:
1922 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
1923 break;
1924 case 3:
1925 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
1926 break;
1927 case 4:
1928 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
1929 break;
1930 default:
1931 PP_ASSERT_WITH_CODE(0,
1932 "Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!",
1933 );
1934 break;
1935 }
1936 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
1937 }
1938
1939 if (table_info == NULL)
1940 return 0;
1941
1942 if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
1943 hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
1944 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
1945 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
1946
1947 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
1948 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
1949
1950 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
1951
1952 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
1953
1954 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
1955 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
1956
1957 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
1958
1959 table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
1960 (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0;
1961
1962 table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
1963 table_info->cac_dtp_table->usOperatingTempStep = 1;
1964 table_info->cac_dtp_table->usOperatingTempHyst = 1;
1965
1966 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
1967 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
1968
1969 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
1970 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
1971
1972 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
1973 table_info->cac_dtp_table->usOperatingTempMinLimit;
1974
1975 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
1976 table_info->cac_dtp_table->usOperatingTempMaxLimit;
1977
1978 hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
1979 table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
1980
1981 hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
1982 table_info->cac_dtp_table->usOperatingTempStep;
1983
1984 hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
1985 table_info->cac_dtp_table->usTargetOperatingTemp;
1986 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1987 PHM_PlatformCaps_ODFuzzyFanControlSupport);
1988 }
1989
1990 return 0;
1991}
1992
1993/**
1994 * Change virtual leakage voltage to actual value.
1995 *
1996 * @param hwmgr the address of the powerplay hardware manager.
1997 * @param pointer to changing voltage
1998 * @param pointer to leakage table
1999 */
2000static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2001 uint32_t *voltage, struct smu7_leakage_voltage *leakage_table)
2002{
2003 uint32_t index;
2004
2005 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
2006 for (index = 0; index < leakage_table->count; index++) {
2007 /* if this voltage matches a leakage voltage ID */
2008 /* patch with actual leakage voltage */
2009 if (leakage_table->leakage_id[index] == *voltage) {
2010 *voltage = leakage_table->actual_voltage[index];
2011 break;
2012 }
2013 }
2014
2015 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
2016 printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
2017}
2018
2019
2020static int smu7_patch_vddc(struct pp_hwmgr *hwmgr,
2021 struct phm_clock_voltage_dependency_table *tab)
2022{
2023 uint16_t i;
2024 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2025
2026 if (tab)
2027 for (i = 0; i < tab->count; i++)
2028 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2029 &data->vddc_leakage);
2030
2031 return 0;
2032}
2033
2034static int smu7_patch_vddci(struct pp_hwmgr *hwmgr,
2035 struct phm_clock_voltage_dependency_table *tab)
2036{
2037 uint16_t i;
2038 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2039
2040 if (tab)
2041 for (i = 0; i < tab->count; i++)
2042 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2043 &data->vddci_leakage);
2044
2045 return 0;
2046}
2047
2048static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr,
2049 struct phm_vce_clock_voltage_dependency_table *tab)
2050{
2051 uint16_t i;
2052 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2053
2054 if (tab)
2055 for (i = 0; i < tab->count; i++)
2056 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2057 &data->vddc_leakage);
2058
2059 return 0;
2060}
2061
2062
2063static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
2064 struct phm_uvd_clock_voltage_dependency_table *tab)
2065{
2066 uint16_t i;
2067 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2068
2069 if (tab)
2070 for (i = 0; i < tab->count; i++)
2071 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2072 &data->vddc_leakage);
2073
2074 return 0;
2075}
2076
2077static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
2078 struct phm_phase_shedding_limits_table *tab)
2079{
2080 uint16_t i;
2081 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2082
2083 if (tab)
2084 for (i = 0; i < tab->count; i++)
2085 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage,
2086 &data->vddc_leakage);
2087
2088 return 0;
2089}
2090
2091static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr,
2092 struct phm_samu_clock_voltage_dependency_table *tab)
2093{
2094 uint16_t i;
2095 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2096
2097 if (tab)
2098 for (i = 0; i < tab->count; i++)
2099 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2100 &data->vddc_leakage);
2101
2102 return 0;
2103}
2104
2105static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
2106 struct phm_acp_clock_voltage_dependency_table *tab)
2107{
2108 uint16_t i;
2109 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2110
2111 if (tab)
2112 for (i = 0; i < tab->count; i++)
2113 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2114 &data->vddc_leakage);
2115
2116 return 0;
2117}
2118
2119static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
2120 struct phm_clock_and_voltage_limits *tab)
2121{
2122 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2123
2124 if (tab) {
2125 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddc,
2126 &data->vddc_leakage);
2127 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddci,
2128 &data->vddci_leakage);
2129 }
2130
2131 return 0;
2132}
2133
2134static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
2135{
2136 uint32_t i;
2137 uint32_t vddc;
2138 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2139
2140 if (tab) {
2141 for (i = 0; i < tab->count; i++) {
2142 vddc = (uint32_t)(tab->entries[i].Vddc);
2143 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage);
2144 tab->entries[i].Vddc = (uint16_t)vddc;
2145 }
2146 }
2147
2148 return 0;
2149}
2150
2151static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
2152{
2153 int tmp;
2154
2155 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
2156 if (tmp)
2157 return -EINVAL;
2158
2159 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
2160 if (tmp)
2161 return -EINVAL;
2162
2163 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2164 if (tmp)
2165 return -EINVAL;
2166
2167 tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
2168 if (tmp)
2169 return -EINVAL;
2170
2171 tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
2172 if (tmp)
2173 return -EINVAL;
2174
2175 tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
2176 if (tmp)
2177 return -EINVAL;
2178
2179 tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
2180 if (tmp)
2181 return -EINVAL;
2182
2183 tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
2184 if (tmp)
2185 return -EINVAL;
2186
2187 tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
2188 if (tmp)
2189 return -EINVAL;
2190
2191 tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
2192 if (tmp)
2193 return -EINVAL;
2194
2195 tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
2196 if (tmp)
2197 return -EINVAL;
2198
2199 tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
2200 if (tmp)
2201 return -EINVAL;
2202
2203 return 0;
2204}
2205
2206
2207static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
2208{
2209 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2210
2211 struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
2212 struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
2213 struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
2214
2215 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
2216 "VDDC dependency on SCLK table is missing. This table is mandatory\n", return -EINVAL);
2217 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
2218 "VDDC dependency on SCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
2219
2220 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
2221 "VDDC dependency on MCLK table is missing. This table is mandatory\n", return -EINVAL);
2222 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
2223 "VDD dependency on MCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
2224
2225 data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
2226 data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2227
2228 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
2229 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
2230 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
2231 allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
2232 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
2233 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2234
2235 if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
2236 data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
2237 data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
2238 }
2239
2240 if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count > 1)
2241 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
2242
2243 return 0;
2244}
2245
2246int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2247{
2248 struct smu7_hwmgr *data;
2249 int result;
2250
2251 data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
2252 if (data == NULL)
2253 return -ENOMEM;
2254
2255 hwmgr->backend = data;
2256
2257 smu7_patch_voltage_workaround(hwmgr);
2258 smu7_init_dpm_defaults(hwmgr);
2259
2260 /* Get leakage voltage based on leakage ID. */
2261 result = smu7_get_evv_voltages(hwmgr);
2262
2263 if (result) {
2264 printk("Get EVV Voltage Failed. Abort Driver loading!\n");
2265 return -EINVAL;
2266 }
2267
2268 if (hwmgr->pp_table_version == PP_TABLE_V1) {
2269 smu7_complete_dependency_tables(hwmgr);
2270 smu7_set_private_data_based_on_pptable_v1(hwmgr);
2271 } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
2272 smu7_patch_dependency_tables_with_leakage(hwmgr);
2273 smu7_set_private_data_based_on_pptable_v0(hwmgr);
2274 }
2275
2276 /* Initalize Dynamic State Adjustment Rule Settings */
2277 result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
2278
2279 if (0 == result) {
2280 struct cgs_system_info sys_info = {0};
2281
2282 data->is_tlu_enabled = false;
2283
2284 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
2285 SMU7_MAX_HARDWARE_POWERLEVELS;
2286 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
2287 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
2288
2289 sys_info.size = sizeof(struct cgs_system_info);
2290 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
2291 result = cgs_query_system_info(hwmgr->device, &sys_info);
2292 if (result)
2293 data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2294 else
2295 data->pcie_gen_cap = (uint32_t)sys_info.value;
2296 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
2297 data->pcie_spc_cap = 20;
2298 sys_info.size = sizeof(struct cgs_system_info);
2299 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
2300 result = cgs_query_system_info(hwmgr->device, &sys_info);
2301 if (result)
2302 data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
2303 else
2304 data->pcie_lane_cap = (uint32_t)sys_info.value;
2305
2306 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
2307/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
2308 hwmgr->platform_descriptor.clockStep.engineClock = 500;
2309 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
2310 smu7_thermal_parameter_init(hwmgr);
2311 } else {
2312 /* Ignore return value in here, we are cleaning up a mess. */
2313 phm_hwmgr_backend_fini(hwmgr);
2314 }
2315
2316 return 0;
2317}
2318
2319static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
2320{
2321 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2322 uint32_t level, tmp;
2323
2324 if (!data->pcie_dpm_key_disabled) {
2325 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2326 level = 0;
2327 tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
2328 while (tmp >>= 1)
2329 level++;
2330
2331 if (level)
2332 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2333 PPSMC_MSG_PCIeDPM_ForceLevel, level);
2334 }
2335 }
2336
2337 if (!data->sclk_dpm_key_disabled) {
2338 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2339 level = 0;
2340 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
2341 while (tmp >>= 1)
2342 level++;
2343
2344 if (level)
2345 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2346 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2347 (1 << level));
2348 }
2349 }
2350
2351 if (!data->mclk_dpm_key_disabled) {
2352 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2353 level = 0;
2354 tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
2355 while (tmp >>= 1)
2356 level++;
2357
2358 if (level)
2359 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2360 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2361 (1 << level));
2362 }
2363 }
2364
2365 return 0;
2366}
2367
2368static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
2369{
2370 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2371
2372 if (hwmgr->pp_table_version == PP_TABLE_V1)
2373 phm_apply_dal_min_voltage_request(hwmgr);
2374/* TO DO for v0 iceland and Ci*/
2375
2376 if (!data->sclk_dpm_key_disabled) {
2377 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
2378 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2379 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2380 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
2381 }
2382
2383 if (!data->mclk_dpm_key_disabled) {
2384 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
2385 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2386 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2387 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
2388 }
2389
2390 return 0;
2391}
2392
2393static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2394{
2395 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2396
2397 if (!smum_is_dpm_running(hwmgr))
2398 return -EINVAL;
2399
2400 if (!data->pcie_dpm_key_disabled) {
2401 smum_send_msg_to_smc(hwmgr->smumgr,
2402 PPSMC_MSG_PCIeDPM_UnForceLevel);
2403 }
2404
2405 return smu7_upload_dpm_level_enable_mask(hwmgr);
2406}
2407
2408static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2409{
2410 struct smu7_hwmgr *data =
2411 (struct smu7_hwmgr *)(hwmgr->backend);
2412 uint32_t level;
2413
2414 if (!data->sclk_dpm_key_disabled)
2415 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2416 level = phm_get_lowest_enabled_level(hwmgr,
2417 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
2418 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2419 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2420 (1 << level));
2421
2422 }
2423
2424 if (!data->mclk_dpm_key_disabled) {
2425 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2426 level = phm_get_lowest_enabled_level(hwmgr,
2427 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
2428 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2429 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2430 (1 << level));
2431 }
2432 }
2433
2434 if (!data->pcie_dpm_key_disabled) {
2435 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2436 level = phm_get_lowest_enabled_level(hwmgr,
2437 data->dpm_level_enable_mask.pcie_dpm_enable_mask);
2438 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2439 PPSMC_MSG_PCIeDPM_ForceLevel,
2440 (level));
2441 }
2442 }
2443
2444 return 0;
2445
2446}
2447static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
2448 enum amd_dpm_forced_level level)
2449{
2450 int ret = 0;
2451
2452 switch (level) {
2453 case AMD_DPM_FORCED_LEVEL_HIGH:
2454 ret = smu7_force_dpm_highest(hwmgr);
2455 if (ret)
2456 return ret;
2457 break;
2458 case AMD_DPM_FORCED_LEVEL_LOW:
2459 ret = smu7_force_dpm_lowest(hwmgr);
2460 if (ret)
2461 return ret;
2462 break;
2463 case AMD_DPM_FORCED_LEVEL_AUTO:
2464 ret = smu7_unforce_dpm_levels(hwmgr);
2465 if (ret)
2466 return ret;
2467 break;
2468 default:
2469 break;
2470 }
2471
2472 hwmgr->dpm_level = level;
2473
2474 return ret;
2475}
2476
2477static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
2478{
2479 return sizeof(struct smu7_power_state);
2480}
2481
2482
2483static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2484 struct pp_power_state *request_ps,
2485 const struct pp_power_state *current_ps)
2486{
2487
2488 struct smu7_power_state *smu7_ps =
2489 cast_phw_smu7_power_state(&request_ps->hardware);
2490 uint32_t sclk;
2491 uint32_t mclk;
2492 struct PP_Clocks minimum_clocks = {0};
2493 bool disable_mclk_switching;
2494 bool disable_mclk_switching_for_frame_lock;
2495 struct cgs_display_info info = {0};
2496 const struct phm_clock_and_voltage_limits *max_limits;
2497 uint32_t i;
2498 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2499 struct phm_ppt_v1_information *table_info =
2500 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2501 int32_t count;
2502 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
2503
2504 data->battery_state = (PP_StateUILabel_Battery ==
2505 request_ps->classification.ui_label);
2506
2507 PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2,
2508 "VI should always have 2 performance levels",
2509 );
2510
2511 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
2512 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
2513 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
2514
2515 /* Cap clock DPM tables at DC MAX if it is in DC. */
2516 if (PP_PowerSource_DC == hwmgr->power_source) {
2517 for (i = 0; i < smu7_ps->performance_level_count; i++) {
2518 if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
2519 smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
2520 if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk)
2521 smu7_ps->performance_levels[i].engine_clock = max_limits->sclk;
2522 }
2523 }
2524
2525 smu7_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
2526 smu7_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
2527
2528 cgs_get_active_displays_info(hwmgr->device, &info);
2529
2530 /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
2531
2532 minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
2533 minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
2534
2535 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2536 PHM_PlatformCaps_StablePState)) {
2537 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
2538 stable_pstate_sclk = (max_limits->sclk * 75) / 100;
2539
2540 for (count = table_info->vdd_dep_on_sclk->count - 1;
2541 count >= 0; count--) {
2542 if (stable_pstate_sclk >=
2543 table_info->vdd_dep_on_sclk->entries[count].clk) {
2544 stable_pstate_sclk =
2545 table_info->vdd_dep_on_sclk->entries[count].clk;
2546 break;
2547 }
2548 }
2549
2550 if (count < 0)
2551 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
2552
2553 stable_pstate_mclk = max_limits->mclk;
2554
2555 minimum_clocks.engineClock = stable_pstate_sclk;
2556 minimum_clocks.memoryClock = stable_pstate_mclk;
2557 }
2558
2559 if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
2560 minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
2561
2562 if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
2563 minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
2564
2565 smu7_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
2566
2567 if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
2568 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
2569 hwmgr->platform_descriptor.overdriveLimit.engineClock),
2570 "Overdrive sclk exceeds limit",
2571 hwmgr->gfx_arbiter.sclk_over_drive =
2572 hwmgr->platform_descriptor.overdriveLimit.engineClock);
2573
2574 if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
2575 smu7_ps->performance_levels[1].engine_clock =
2576 hwmgr->gfx_arbiter.sclk_over_drive;
2577 }
2578
2579 if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
2580 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
2581 hwmgr->platform_descriptor.overdriveLimit.memoryClock),
2582 "Overdrive mclk exceeds limit",
2583 hwmgr->gfx_arbiter.mclk_over_drive =
2584 hwmgr->platform_descriptor.overdriveLimit.memoryClock);
2585
2586 if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
2587 smu7_ps->performance_levels[1].memory_clock =
2588 hwmgr->gfx_arbiter.mclk_over_drive;
2589 }
2590
2591 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
2592 hwmgr->platform_descriptor.platformCaps,
2593 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
2594
2595
2596 disable_mclk_switching = (1 < info.display_count) ||
2597 disable_mclk_switching_for_frame_lock;
2598
2599 sclk = smu7_ps->performance_levels[0].engine_clock;
2600 mclk = smu7_ps->performance_levels[0].memory_clock;
2601
2602 if (disable_mclk_switching)
2603 mclk = smu7_ps->performance_levels
2604 [smu7_ps->performance_level_count - 1].memory_clock;
2605
2606 if (sclk < minimum_clocks.engineClock)
2607 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
2608 max_limits->sclk : minimum_clocks.engineClock;
2609
2610 if (mclk < minimum_clocks.memoryClock)
2611 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
2612 max_limits->mclk : minimum_clocks.memoryClock;
2613
2614 smu7_ps->performance_levels[0].engine_clock = sclk;
2615 smu7_ps->performance_levels[0].memory_clock = mclk;
2616
2617 smu7_ps->performance_levels[1].engine_clock =
2618 (smu7_ps->performance_levels[1].engine_clock >=
2619 smu7_ps->performance_levels[0].engine_clock) ?
2620 smu7_ps->performance_levels[1].engine_clock :
2621 smu7_ps->performance_levels[0].engine_clock;
2622
2623 if (disable_mclk_switching) {
2624 if (mclk < smu7_ps->performance_levels[1].memory_clock)
2625 mclk = smu7_ps->performance_levels[1].memory_clock;
2626
2627 smu7_ps->performance_levels[0].memory_clock = mclk;
2628 smu7_ps->performance_levels[1].memory_clock = mclk;
2629 } else {
2630 if (smu7_ps->performance_levels[1].memory_clock <
2631 smu7_ps->performance_levels[0].memory_clock)
2632 smu7_ps->performance_levels[1].memory_clock =
2633 smu7_ps->performance_levels[0].memory_clock;
2634 }
2635
2636 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2637 PHM_PlatformCaps_StablePState)) {
2638 for (i = 0; i < smu7_ps->performance_level_count; i++) {
2639 smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
2640 smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
2641 smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
2642 smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
2643 }
2644 }
2645 return 0;
2646}
2647
2648
2649static int smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
2650{
2651 struct pp_power_state *ps;
2652 struct smu7_power_state *smu7_ps;
2653
2654 if (hwmgr == NULL)
2655 return -EINVAL;
2656
2657 ps = hwmgr->request_ps;
2658
2659 if (ps == NULL)
2660 return -EINVAL;
2661
2662 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
2663
2664 if (low)
2665 return smu7_ps->performance_levels[0].memory_clock;
2666 else
2667 return smu7_ps->performance_levels
2668 [smu7_ps->performance_level_count-1].memory_clock;
2669}
2670
2671static int smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
2672{
2673 struct pp_power_state *ps;
2674 struct smu7_power_state *smu7_ps;
2675
2676 if (hwmgr == NULL)
2677 return -EINVAL;
2678
2679 ps = hwmgr->request_ps;
2680
2681 if (ps == NULL)
2682 return -EINVAL;
2683
2684 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
2685
2686 if (low)
2687 return smu7_ps->performance_levels[0].engine_clock;
2688 else
2689 return smu7_ps->performance_levels
2690 [smu7_ps->performance_level_count-1].engine_clock;
2691}
2692
2693static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
2694 struct pp_hw_power_state *hw_ps)
2695{
2696 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2697 struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps;
2698 ATOM_FIRMWARE_INFO_V2_2 *fw_info;
2699 uint16_t size;
2700 uint8_t frev, crev;
2701 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
2702
2703 /* First retrieve the Boot clocks and VDDC from the firmware info table.
2704 * We assume here that fw_info is unchanged if this call fails.
2705 */
2706 fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
2707 hwmgr->device, index,
2708 &size, &frev, &crev);
2709 if (!fw_info)
2710 /* During a test, there is no firmware info table. */
2711 return 0;
2712
2713 /* Patch the state. */
2714 data->vbios_boot_state.sclk_bootup_value =
2715 le32_to_cpu(fw_info->ulDefaultEngineClock);
2716 data->vbios_boot_state.mclk_bootup_value =
2717 le32_to_cpu(fw_info->ulDefaultMemoryClock);
2718 data->vbios_boot_state.mvdd_bootup_value =
2719 le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
2720 data->vbios_boot_state.vddc_bootup_value =
2721 le16_to_cpu(fw_info->usBootUpVDDCVoltage);
2722 data->vbios_boot_state.vddci_bootup_value =
2723 le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
2724 data->vbios_boot_state.pcie_gen_bootup_value =
2725 smu7_get_current_pcie_speed(hwmgr);
2726
2727 data->vbios_boot_state.pcie_lane_bootup_value =
2728 (uint16_t)smu7_get_current_pcie_lane_number(hwmgr);
2729
2730 /* set boot power state */
2731 ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
2732 ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
2733 ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
2734 ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
2735
2736 return 0;
2737}
2738
2739static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
2740{
2741 int result;
2742 unsigned long ret = 0;
2743
2744 if (hwmgr->pp_table_version == PP_TABLE_V0) {
2745 result = pp_tables_get_num_of_entries(hwmgr, &ret);
2746 return result ? 0 : ret;
2747 } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
2748 result = get_number_of_powerplay_table_entries_v1_0(hwmgr);
2749 return result;
2750 }
2751 return 0;
2752}
2753
2754static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
2755 void *state, struct pp_power_state *power_state,
2756 void *pp_table, uint32_t classification_flag)
2757{
2758 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2759 struct smu7_power_state *smu7_power_state =
2760 (struct smu7_power_state *)(&(power_state->hardware));
2761 struct smu7_performance_level *performance_level;
2762 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
2763 ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
2764 (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
2765 PPTable_Generic_SubTable_Header *sclk_dep_table =
2766 (PPTable_Generic_SubTable_Header *)
2767 (((unsigned long)powerplay_table) +
2768 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
2769
2770 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
2771 (ATOM_Tonga_MCLK_Dependency_Table *)
2772 (((unsigned long)powerplay_table) +
2773 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
2774
2775 /* The following fields are not initialized here: id orderedList allStatesList */
2776 power_state->classification.ui_label =
2777 (le16_to_cpu(state_entry->usClassification) &
2778 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
2779 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
2780 power_state->classification.flags = classification_flag;
2781 /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
2782
2783 power_state->classification.temporary_state = false;
2784 power_state->classification.to_be_deleted = false;
2785
2786 power_state->validation.disallowOnDC =
2787 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
2788 ATOM_Tonga_DISALLOW_ON_DC));
2789
2790 power_state->pcie.lanes = 0;
2791
2792 power_state->display.disableFrameModulation = false;
2793 power_state->display.limitRefreshrate = false;
2794 power_state->display.enableVariBright =
2795 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
2796 ATOM_Tonga_ENABLE_VARIBRIGHT));
2797
2798 power_state->validation.supportedPowerLevels = 0;
2799 power_state->uvd_clocks.VCLK = 0;
2800 power_state->uvd_clocks.DCLK = 0;
2801 power_state->temperatures.min = 0;
2802 power_state->temperatures.max = 0;
2803
2804 performance_level = &(smu7_power_state->performance_levels
2805 [smu7_power_state->performance_level_count++]);
2806
2807 PP_ASSERT_WITH_CODE(
2808 (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
2809 "Performance levels exceeds SMC limit!",
2810 return -EINVAL);
2811
2812 PP_ASSERT_WITH_CODE(
2813 (smu7_power_state->performance_level_count <=
2814 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
2815 "Performance levels exceeds Driver limit!",
2816 return -EINVAL);
2817
2818 /* Performance levels are arranged from low to high. */
2819 performance_level->memory_clock = mclk_dep_table->entries
2820 [state_entry->ucMemoryClockIndexLow].ulMclk;
2821 if (sclk_dep_table->ucRevId == 0)
2822 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
2823 [state_entry->ucEngineClockIndexLow].ulSclk;
2824 else if (sclk_dep_table->ucRevId == 1)
2825 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
2826 [state_entry->ucEngineClockIndexLow].ulSclk;
2827 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
2828 state_entry->ucPCIEGenLow);
2829 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
2830 state_entry->ucPCIELaneHigh);
2831
2832 performance_level = &(smu7_power_state->performance_levels
2833 [smu7_power_state->performance_level_count++]);
2834 performance_level->memory_clock = mclk_dep_table->entries
2835 [state_entry->ucMemoryClockIndexHigh].ulMclk;
2836
2837 if (sclk_dep_table->ucRevId == 0)
2838 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
2839 [state_entry->ucEngineClockIndexHigh].ulSclk;
2840 else if (sclk_dep_table->ucRevId == 1)
2841 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
2842 [state_entry->ucEngineClockIndexHigh].ulSclk;
2843
2844 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
2845 state_entry->ucPCIEGenHigh);
2846 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
2847 state_entry->ucPCIELaneHigh);
2848
2849 return 0;
2850}
2851
2852static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
2853 unsigned long entry_index, struct pp_power_state *state)
2854{
2855 int result;
2856 struct smu7_power_state *ps;
2857 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2858 struct phm_ppt_v1_information *table_info =
2859 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2860 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
2861 table_info->vdd_dep_on_mclk;
2862
2863 state->hardware.magic = PHM_VIslands_Magic;
2864
2865 ps = (struct smu7_power_state *)(&state->hardware);
2866
2867 result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
2868 smu7_get_pp_table_entry_callback_func_v1);
2869
2870 /* This is the earliest time we have all the dependency table and the VBIOS boot state
2871 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
2872 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
2873 */
2874 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
2875 if (dep_mclk_table->entries[0].clk !=
2876 data->vbios_boot_state.mclk_bootup_value)
2877 printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
2878 "does not match VBIOS boot MCLK level");
2879 if (dep_mclk_table->entries[0].vddci !=
2880 data->vbios_boot_state.vddci_bootup_value)
2881 printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
2882 "does not match VBIOS boot VDDCI level");
2883 }
2884
2885 /* set DC compatible flag if this state supports DC */
2886 if (!state->validation.disallowOnDC)
2887 ps->dc_compatible = true;
2888
2889 if (state->classification.flags & PP_StateClassificationFlag_ACPI)
2890 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
2891
2892 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
2893 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
2894
2895 if (!result) {
2896 uint32_t i;
2897
2898 switch (state->classification.ui_label) {
2899 case PP_StateUILabel_Performance:
2900 data->use_pcie_performance_levels = true;
2901 for (i = 0; i < ps->performance_level_count; i++) {
2902 if (data->pcie_gen_performance.max <
2903 ps->performance_levels[i].pcie_gen)
2904 data->pcie_gen_performance.max =
2905 ps->performance_levels[i].pcie_gen;
2906
2907 if (data->pcie_gen_performance.min >
2908 ps->performance_levels[i].pcie_gen)
2909 data->pcie_gen_performance.min =
2910 ps->performance_levels[i].pcie_gen;
2911
2912 if (data->pcie_lane_performance.max <
2913 ps->performance_levels[i].pcie_lane)
2914 data->pcie_lane_performance.max =
2915 ps->performance_levels[i].pcie_lane;
2916 if (data->pcie_lane_performance.min >
2917 ps->performance_levels[i].pcie_lane)
2918 data->pcie_lane_performance.min =
2919 ps->performance_levels[i].pcie_lane;
2920 }
2921 break;
2922 case PP_StateUILabel_Battery:
2923 data->use_pcie_power_saving_levels = true;
2924
2925 for (i = 0; i < ps->performance_level_count; i++) {
2926 if (data->pcie_gen_power_saving.max <
2927 ps->performance_levels[i].pcie_gen)
2928 data->pcie_gen_power_saving.max =
2929 ps->performance_levels[i].pcie_gen;
2930
2931 if (data->pcie_gen_power_saving.min >
2932 ps->performance_levels[i].pcie_gen)
2933 data->pcie_gen_power_saving.min =
2934 ps->performance_levels[i].pcie_gen;
2935
2936 if (data->pcie_lane_power_saving.max <
2937 ps->performance_levels[i].pcie_lane)
2938 data->pcie_lane_power_saving.max =
2939 ps->performance_levels[i].pcie_lane;
2940
2941 if (data->pcie_lane_power_saving.min >
2942 ps->performance_levels[i].pcie_lane)
2943 data->pcie_lane_power_saving.min =
2944 ps->performance_levels[i].pcie_lane;
2945 }
2946 break;
2947 default:
2948 break;
2949 }
2950 }
2951 return 0;
2952}
2953
2954static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
2955 struct pp_hw_power_state *power_state,
2956 unsigned int index, const void *clock_info)
2957{
2958 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2959 struct smu7_power_state *ps = cast_phw_smu7_power_state(power_state);
2960 const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
2961 struct smu7_performance_level *performance_level;
2962 uint32_t engine_clock, memory_clock;
2963 uint16_t pcie_gen_from_bios;
2964
2965 engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
2966 memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
2967
2968 if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
2969 data->highest_mclk = memory_clock;
2970
2971 performance_level = &(ps->performance_levels
2972 [ps->performance_level_count++]);
2973
2974 PP_ASSERT_WITH_CODE(
2975 (ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
2976 "Performance levels exceeds SMC limit!",
2977 return -EINVAL);
2978
2979 PP_ASSERT_WITH_CODE(
2980 (ps->performance_level_count <=
2981 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
2982 "Performance levels exceeds Driver limit!",
2983 return -EINVAL);
2984
2985 /* Performance levels are arranged from low to high. */
2986 performance_level->memory_clock = memory_clock;
2987 performance_level->engine_clock = engine_clock;
2988
2989 pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
2990
2991 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
2992 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
2993
2994 return 0;
2995}
2996
2997static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
2998 unsigned long entry_index, struct pp_power_state *state)
2999{
3000 int result;
3001 struct smu7_power_state *ps;
3002 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3003 struct phm_clock_voltage_dependency_table *dep_mclk_table =
3004 hwmgr->dyn_state.vddci_dependency_on_mclk;
3005
3006 memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
3007
3008 state->hardware.magic = PHM_VIslands_Magic;
3009
3010 ps = (struct smu7_power_state *)(&state->hardware);
3011
3012 result = pp_tables_get_entry(hwmgr, entry_index, state,
3013 smu7_get_pp_table_entry_callback_func_v0);
3014
3015 /*
3016 * This is the earliest time we have all the dependency table
3017 * and the VBIOS boot state as
3018 * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot
3019 * state if there is only one VDDCI/MCLK level, check if it's
3020 * the same as VBIOS boot state
3021 */
3022 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3023 if (dep_mclk_table->entries[0].clk !=
3024 data->vbios_boot_state.mclk_bootup_value)
3025 printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
3026 "does not match VBIOS boot MCLK level");
3027 if (dep_mclk_table->entries[0].v !=
3028 data->vbios_boot_state.vddci_bootup_value)
3029 printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
3030 "does not match VBIOS boot VDDCI level");
3031 }
3032
3033 /* set DC compatible flag if this state supports DC */
3034 if (!state->validation.disallowOnDC)
3035 ps->dc_compatible = true;
3036
3037 if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3038 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3039
3040 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3041 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3042
3043 if (!result) {
3044 uint32_t i;
3045
3046 switch (state->classification.ui_label) {
3047 case PP_StateUILabel_Performance:
3048 data->use_pcie_performance_levels = true;
3049
3050 for (i = 0; i < ps->performance_level_count; i++) {
3051 if (data->pcie_gen_performance.max <
3052 ps->performance_levels[i].pcie_gen)
3053 data->pcie_gen_performance.max =
3054 ps->performance_levels[i].pcie_gen;
3055
3056 if (data->pcie_gen_performance.min >
3057 ps->performance_levels[i].pcie_gen)
3058 data->pcie_gen_performance.min =
3059 ps->performance_levels[i].pcie_gen;
3060
3061 if (data->pcie_lane_performance.max <
3062 ps->performance_levels[i].pcie_lane)
3063 data->pcie_lane_performance.max =
3064 ps->performance_levels[i].pcie_lane;
3065
3066 if (data->pcie_lane_performance.min >
3067 ps->performance_levels[i].pcie_lane)
3068 data->pcie_lane_performance.min =
3069 ps->performance_levels[i].pcie_lane;
3070 }
3071 break;
3072 case PP_StateUILabel_Battery:
3073 data->use_pcie_power_saving_levels = true;
3074
3075 for (i = 0; i < ps->performance_level_count; i++) {
3076 if (data->pcie_gen_power_saving.max <
3077 ps->performance_levels[i].pcie_gen)
3078 data->pcie_gen_power_saving.max =
3079 ps->performance_levels[i].pcie_gen;
3080
3081 if (data->pcie_gen_power_saving.min >
3082 ps->performance_levels[i].pcie_gen)
3083 data->pcie_gen_power_saving.min =
3084 ps->performance_levels[i].pcie_gen;
3085
3086 if (data->pcie_lane_power_saving.max <
3087 ps->performance_levels[i].pcie_lane)
3088 data->pcie_lane_power_saving.max =
3089 ps->performance_levels[i].pcie_lane;
3090
3091 if (data->pcie_lane_power_saving.min >
3092 ps->performance_levels[i].pcie_lane)
3093 data->pcie_lane_power_saving.min =
3094 ps->performance_levels[i].pcie_lane;
3095 }
3096 break;
3097 default:
3098 break;
3099 }
3100 }
3101 return 0;
3102}
3103
3104static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3105 unsigned long entry_index, struct pp_power_state *state)
3106{
3107 if (hwmgr->pp_table_version == PP_TABLE_V0)
3108 return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state);
3109 else if (hwmgr->pp_table_version == PP_TABLE_V1)
3110 return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state);
3111
3112 return 0;
3113}
3114
3115static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
3116{
3117 uint32_t sclk, mclk, activity_percent;
3118 uint32_t offset;
3119 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3120
3121 switch (idx) {
3122 case AMDGPU_PP_SENSOR_GFX_SCLK:
3123 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
3124 sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3125 *value = sclk;
3126 return 0;
3127 case AMDGPU_PP_SENSOR_GFX_MCLK:
3128 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
3129 mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3130 *value = mclk;
3131 return 0;
3132 case AMDGPU_PP_SENSOR_GPU_LOAD:
3133 offset = data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
3134 SMU_SoftRegisters,
3135 AverageGraphicsActivity);
3136
3137 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
3138 activity_percent += 0x80;
3139 activity_percent >>= 8;
3140 *value = activity_percent > 100 ? 100 : activity_percent;
3141 return 0;
3142 case AMDGPU_PP_SENSOR_GPU_TEMP:
3143 *value = smu7_thermal_get_temperature(hwmgr);
3144 return 0;
3145 case AMDGPU_PP_SENSOR_UVD_POWER:
3146 *value = data->uvd_power_gated ? 0 : 1;
3147 return 0;
3148 case AMDGPU_PP_SENSOR_VCE_POWER:
3149 *value = data->vce_power_gated ? 0 : 1;
3150 return 0;
3151 default:
3152 return -EINVAL;
3153 }
3154}
3155
3156static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3157{
3158 const struct phm_set_power_state_input *states =
3159 (const struct phm_set_power_state_input *)input;
3160 const struct smu7_power_state *smu7_ps =
3161 cast_const_phw_smu7_power_state(states->pnew_state);
3162 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3163 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
3164 uint32_t sclk = smu7_ps->performance_levels
3165 [smu7_ps->performance_level_count - 1].engine_clock;
3166 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
3167 uint32_t mclk = smu7_ps->performance_levels
3168 [smu7_ps->performance_level_count - 1].memory_clock;
3169 struct PP_Clocks min_clocks = {0};
3170 uint32_t i;
3171 struct cgs_display_info info = {0};
3172
3173 data->need_update_smu7_dpm_table = 0;
3174
3175 for (i = 0; i < sclk_table->count; i++) {
3176 if (sclk == sclk_table->dpm_levels[i].value)
3177 break;
3178 }
3179
3180 if (i >= sclk_table->count)
3181 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3182 else {
3183 /* TODO: Check SCLK in DAL's minimum clocks
3184 * in case DeepSleep divider update is required.
3185 */
3186 if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
3187 (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK ||
3188 data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
3189 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3190 }
3191
3192 for (i = 0; i < mclk_table->count; i++) {
3193 if (mclk == mclk_table->dpm_levels[i].value)
3194 break;
3195 }
3196
3197 if (i >= mclk_table->count)
3198 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3199
3200 cgs_get_active_displays_info(hwmgr->device, &info);
3201
3202 if (data->display_timing.num_existing_displays != info.display_count)
3203 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3204
3205 return 0;
3206}
3207
3208static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
3209 const struct smu7_power_state *smu7_ps)
3210{
3211 uint32_t i;
3212 uint32_t sclk, max_sclk = 0;
3213 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3214 struct smu7_dpm_table *dpm_table = &data->dpm_table;
3215
3216 for (i = 0; i < smu7_ps->performance_level_count; i++) {
3217 sclk = smu7_ps->performance_levels[i].engine_clock;
3218 if (max_sclk < sclk)
3219 max_sclk = sclk;
3220 }
3221
3222 for (i = 0; i < dpm_table->sclk_table.count; i++) {
3223 if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
3224 return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
3225 dpm_table->pcie_speed_table.dpm_levels
3226 [dpm_table->pcie_speed_table.count - 1].value :
3227 dpm_table->pcie_speed_table.dpm_levels[i].value);
3228 }
3229
3230 return 0;
3231}
3232
3233static int smu7_request_link_speed_change_before_state_change(
3234 struct pp_hwmgr *hwmgr, const void *input)
3235{
3236 const struct phm_set_power_state_input *states =
3237 (const struct phm_set_power_state_input *)input;
3238 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3239 const struct smu7_power_state *smu7_nps =
3240 cast_const_phw_smu7_power_state(states->pnew_state);
3241 const struct smu7_power_state *polaris10_cps =
3242 cast_const_phw_smu7_power_state(states->pcurrent_state);
3243
3244 uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps);
3245 uint16_t current_link_speed;
3246
3247 if (data->force_pcie_gen == PP_PCIEGenInvalid)
3248 current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps);
3249 else
3250 current_link_speed = data->force_pcie_gen;
3251
3252 data->force_pcie_gen = PP_PCIEGenInvalid;
3253 data->pspp_notify_required = false;
3254
3255 if (target_link_speed > current_link_speed) {
3256 switch (target_link_speed) {
3257 case PP_PCIEGen3:
3258 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
3259 break;
3260 data->force_pcie_gen = PP_PCIEGen2;
3261 if (current_link_speed == PP_PCIEGen2)
3262 break;
3263 case PP_PCIEGen2:
3264 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
3265 break;
3266 default:
3267 data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
3268 break;
3269 }
3270 } else {
3271 if (target_link_speed < current_link_speed)
3272 data->pspp_notify_required = true;
3273 }
3274
3275 return 0;
3276}
3277
3278static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3279{
3280 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3281
3282 if (0 == data->need_update_smu7_dpm_table)
3283 return 0;
3284
3285 if ((0 == data->sclk_dpm_key_disabled) &&
3286 (data->need_update_smu7_dpm_table &
3287 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3288 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3289 "Trying to freeze SCLK DPM when DPM is disabled",
3290 );
3291 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
3292 PPSMC_MSG_SCLKDPM_FreezeLevel),
3293 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
3294 return -EINVAL);
3295 }
3296
3297 if ((0 == data->mclk_dpm_key_disabled) &&
3298 (data->need_update_smu7_dpm_table &
3299 DPMTABLE_OD_UPDATE_MCLK)) {
3300 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3301 "Trying to freeze MCLK DPM when DPM is disabled",
3302 );
3303 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
3304 PPSMC_MSG_MCLKDPM_FreezeLevel),
3305 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
3306 return -EINVAL);
3307 }
3308
3309 return 0;
3310}
3311
3312static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
3313 struct pp_hwmgr *hwmgr, const void *input)
3314{
3315 int result = 0;
3316 const struct phm_set_power_state_input *states =
3317 (const struct phm_set_power_state_input *)input;
3318 const struct smu7_power_state *smu7_ps =
3319 cast_const_phw_smu7_power_state(states->pnew_state);
3320 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3321 uint32_t sclk = smu7_ps->performance_levels
3322 [smu7_ps->performance_level_count - 1].engine_clock;
3323 uint32_t mclk = smu7_ps->performance_levels
3324 [smu7_ps->performance_level_count - 1].memory_clock;
3325 struct smu7_dpm_table *dpm_table = &data->dpm_table;
3326
3327 struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
3328 uint32_t dpm_count, clock_percent;
3329 uint32_t i;
3330
3331 if (0 == data->need_update_smu7_dpm_table)
3332 return 0;
3333
3334 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
3335 dpm_table->sclk_table.dpm_levels
3336 [dpm_table->sclk_table.count - 1].value = sclk;
3337
3338 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
3339 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
3340 /* Need to do calculation based on the golden DPM table
3341 * as the Heatmap GPU Clock axis is also based on the default values
3342 */
3343 PP_ASSERT_WITH_CODE(
3344 (golden_dpm_table->sclk_table.dpm_levels
3345 [golden_dpm_table->sclk_table.count - 1].value != 0),
3346 "Divide by 0!",
3347 return -EINVAL);
3348 dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2;
3349
3350 for (i = dpm_count; i > 1; i--) {
3351 if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) {
3352 clock_percent =
3353 ((sclk
3354 - golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value
3355 ) * 100)
3356 / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
3357
3358 dpm_table->sclk_table.dpm_levels[i].value =
3359 golden_dpm_table->sclk_table.dpm_levels[i].value +
3360 (golden_dpm_table->sclk_table.dpm_levels[i].value *
3361 clock_percent)/100;
3362
3363 } else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) {
3364 clock_percent =
3365 ((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value
3366 - sclk) * 100)
3367 / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
3368
3369 dpm_table->sclk_table.dpm_levels[i].value =
3370 golden_dpm_table->sclk_table.dpm_levels[i].value -
3371 (golden_dpm_table->sclk_table.dpm_levels[i].value *
3372 clock_percent) / 100;
3373 } else
3374 dpm_table->sclk_table.dpm_levels[i].value =
3375 golden_dpm_table->sclk_table.dpm_levels[i].value;
3376 }
3377 }
3378 }
3379
3380 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
3381 dpm_table->mclk_table.dpm_levels
3382 [dpm_table->mclk_table.count - 1].value = mclk;
3383
3384 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
3385 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
3386
3387 PP_ASSERT_WITH_CODE(
3388 (golden_dpm_table->mclk_table.dpm_levels
3389 [golden_dpm_table->mclk_table.count-1].value != 0),
3390 "Divide by 0!",
3391 return -EINVAL);
3392 dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2;
3393 for (i = dpm_count; i > 1; i--) {
3394 if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) {
3395 clock_percent = ((mclk -
3396 golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100)
3397 / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
3398
3399 dpm_table->mclk_table.dpm_levels[i].value =
3400 golden_dpm_table->mclk_table.dpm_levels[i].value +
3401 (golden_dpm_table->mclk_table.dpm_levels[i].value *
3402 clock_percent) / 100;
3403
3404 } else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) {
3405 clock_percent = (
3406 (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk)
3407 * 100)
3408 / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
3409
3410 dpm_table->mclk_table.dpm_levels[i].value =
3411 golden_dpm_table->mclk_table.dpm_levels[i].value -
3412 (golden_dpm_table->mclk_table.dpm_levels[i].value *
3413 clock_percent) / 100;
3414 } else
3415 dpm_table->mclk_table.dpm_levels[i].value =
3416 golden_dpm_table->mclk_table.dpm_levels[i].value;
3417 }
3418 }
3419 }
3420
3421 if (data->need_update_smu7_dpm_table &
3422 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
3423 result = smum_populate_all_graphic_levels(hwmgr);
3424 PP_ASSERT_WITH_CODE((0 == result),
3425 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
3426 return result);
3427 }
3428
3429 if (data->need_update_smu7_dpm_table &
3430 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3431 /*populate MCLK dpm table to SMU7 */
3432 result = smum_populate_all_memory_levels(hwmgr);
3433 PP_ASSERT_WITH_CODE((0 == result),
3434 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
3435 return result);
3436 }
3437
3438 return result;
3439}
3440
3441static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3442 struct smu7_single_dpm_table *dpm_table,
3443 uint32_t low_limit, uint32_t high_limit)
3444{
3445 uint32_t i;
3446
3447 for (i = 0; i < dpm_table->count; i++) {
3448 if ((dpm_table->dpm_levels[i].value < low_limit)
3449 || (dpm_table->dpm_levels[i].value > high_limit))
3450 dpm_table->dpm_levels[i].enabled = false;
3451 else
3452 dpm_table->dpm_levels[i].enabled = true;
3453 }
3454
3455 return 0;
3456}
3457
3458static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr,
3459 const struct smu7_power_state *smu7_ps)
3460{
3461 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3462 uint32_t high_limit_count;
3463
3464 PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1),
3465 "power state did not have any performance level",
3466 return -EINVAL);
3467
3468 high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1;
3469
3470 smu7_trim_single_dpm_states(hwmgr,
3471 &(data->dpm_table.sclk_table),
3472 smu7_ps->performance_levels[0].engine_clock,
3473 smu7_ps->performance_levels[high_limit_count].engine_clock);
3474
3475 smu7_trim_single_dpm_states(hwmgr,
3476 &(data->dpm_table.mclk_table),
3477 smu7_ps->performance_levels[0].memory_clock,
3478 smu7_ps->performance_levels[high_limit_count].memory_clock);
3479
3480 return 0;
3481}
3482
3483static int smu7_generate_dpm_level_enable_mask(
3484 struct pp_hwmgr *hwmgr, const void *input)
3485{
3486 int result;
3487 const struct phm_set_power_state_input *states =
3488 (const struct phm_set_power_state_input *)input;
3489 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3490 const struct smu7_power_state *smu7_ps =
3491 cast_const_phw_smu7_power_state(states->pnew_state);
3492
3493 result = smu7_trim_dpm_states(hwmgr, smu7_ps);
3494 if (result)
3495 return result;
3496
3497 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
3498 phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
3499 data->dpm_level_enable_mask.mclk_dpm_enable_mask =
3500 phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
3501 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
3502 phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
3503
3504 return 0;
3505}
3506
3507static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3508{
3509 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3510
3511 if (0 == data->need_update_smu7_dpm_table)
3512 return 0;
3513
3514 if ((0 == data->sclk_dpm_key_disabled) &&
3515 (data->need_update_smu7_dpm_table &
3516 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3517
3518 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3519 "Trying to Unfreeze SCLK DPM when DPM is disabled",
3520 );
3521 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
3522 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
3523 "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
3524 return -EINVAL);
3525 }
3526
3527 if ((0 == data->mclk_dpm_key_disabled) &&
3528 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
3529
3530 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3531 "Trying to Unfreeze MCLK DPM when DPM is disabled",
3532 );
3533 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
3534 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
3535 "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
3536 return -EINVAL);
3537 }
3538
3539 data->need_update_smu7_dpm_table = 0;
3540
3541 return 0;
3542}
3543
3544static int smu7_notify_link_speed_change_after_state_change(
3545 struct pp_hwmgr *hwmgr, const void *input)
3546{
3547 const struct phm_set_power_state_input *states =
3548 (const struct phm_set_power_state_input *)input;
3549 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3550 const struct smu7_power_state *smu7_ps =
3551 cast_const_phw_smu7_power_state(states->pnew_state);
3552 uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps);
3553 uint8_t request;
3554
3555 if (data->pspp_notify_required) {
3556 if (target_link_speed == PP_PCIEGen3)
3557 request = PCIE_PERF_REQ_GEN3;
3558 else if (target_link_speed == PP_PCIEGen2)
3559 request = PCIE_PERF_REQ_GEN2;
3560 else
3561 request = PCIE_PERF_REQ_GEN1;
3562
3563 if (request == PCIE_PERF_REQ_GEN1 &&
3564 smu7_get_current_pcie_speed(hwmgr) > 0)
3565 return 0;
3566
3567 if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
3568 if (PP_PCIEGen2 == target_link_speed)
3569 printk("PSPP request to switch to Gen2 from Gen3 Failed!");
3570 else
3571 printk("PSPP request to switch to Gen1 from Gen2 Failed!");
3572 }
3573 }
3574
3575 return 0;
3576}
3577
3578static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
3579{
3580 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3581
3582 if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK)
3583 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3584 (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
3585 return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
3586}
3587
3588static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
3589{
3590 int tmp_result, result = 0;
3591 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3592
3593 tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3594 PP_ASSERT_WITH_CODE((0 == tmp_result),
3595 "Failed to find DPM states clocks in DPM table!",
3596 result = tmp_result);
3597
3598 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3599 PHM_PlatformCaps_PCIEPerformanceRequest)) {
3600 tmp_result =
3601 smu7_request_link_speed_change_before_state_change(hwmgr, input);
3602 PP_ASSERT_WITH_CODE((0 == tmp_result),
3603 "Failed to request link speed change before state change!",
3604 result = tmp_result);
3605 }
3606
3607 tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
3608 PP_ASSERT_WITH_CODE((0 == tmp_result),
3609 "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
3610
3611 tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3612 PP_ASSERT_WITH_CODE((0 == tmp_result),
3613 "Failed to populate and upload SCLK MCLK DPM levels!",
3614 result = tmp_result);
3615
3616 tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input);
3617 PP_ASSERT_WITH_CODE((0 == tmp_result),
3618 "Failed to generate DPM level enabled mask!",
3619 result = tmp_result);
3620
3621 tmp_result = smum_update_sclk_threshold(hwmgr);
3622 PP_ASSERT_WITH_CODE((0 == tmp_result),
3623 "Failed to update SCLK threshold!",
3624 result = tmp_result);
3625
3626 tmp_result = smu7_notify_smc_display(hwmgr);
3627 PP_ASSERT_WITH_CODE((0 == tmp_result),
3628 "Failed to notify smc display settings!",
3629 result = tmp_result);
3630
3631 tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
3632 PP_ASSERT_WITH_CODE((0 == tmp_result),
3633 "Failed to unfreeze SCLK MCLK DPM!",
3634 result = tmp_result);
3635
3636 tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr);
3637 PP_ASSERT_WITH_CODE((0 == tmp_result),
3638 "Failed to upload DPM level enabled mask!",
3639 result = tmp_result);
3640
3641 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3642 PHM_PlatformCaps_PCIEPerformanceRequest)) {
3643 tmp_result =
3644 smu7_notify_link_speed_change_after_state_change(hwmgr, input);
3645 PP_ASSERT_WITH_CODE((0 == tmp_result),
3646 "Failed to notify link speed change after state change!",
3647 result = tmp_result);
3648 }
3649 data->apply_optimized_settings = false;
3650 return result;
3651}
3652
3653static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
3654{
3655 hwmgr->thermal_controller.
3656 advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
3657
3658 if (phm_is_hw_access_blocked(hwmgr))
3659 return 0;
3660
3661 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3662 PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
3663}
3664
3665int smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
3666{
3667 PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
3668
3669 return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
3670}
3671
3672int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
3673{
3674 uint32_t num_active_displays = 0;
3675 struct cgs_display_info info = {0};
3676
3677 info.mode_info = NULL;
3678 cgs_get_active_displays_info(hwmgr->device, &info);
3679
3680 num_active_displays = info.display_count;
3681
3682 if (num_active_displays > 1 && hwmgr->display_config.multi_monitor_in_sync != true)
3683 smu7_notify_smc_display_change(hwmgr, false);
3684
3685 return 0;
3686}
3687
3688/**
3689* Programs the display gap
3690*
3691* @param hwmgr the address of the powerplay hardware manager.
3692* @return always OK
3693*/
3694int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
3695{
3696 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3697 uint32_t num_active_displays = 0;
3698 uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
3699 uint32_t display_gap2;
3700 uint32_t pre_vbi_time_in_us;
3701 uint32_t frame_time_in_us;
3702 uint32_t ref_clock;
3703 uint32_t refresh_rate = 0;
3704 struct cgs_display_info info = {0};
3705 struct cgs_mode_info mode_info;
3706
3707 info.mode_info = &mode_info;
3708
3709 cgs_get_active_displays_info(hwmgr->device, &info);
3710 num_active_displays = info.display_count;
3711
3712 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
3713 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
3714
3715 ref_clock = mode_info.ref_clock;
3716 refresh_rate = mode_info.refresh_rate;
3717
3718 if (0 == refresh_rate)
3719 refresh_rate = 60;
3720
3721 frame_time_in_us = 1000000 / refresh_rate;
3722
3723 pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
3724 data->frame_time_x2 = frame_time_in_us * 2 / 100;
3725
3726 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
3727
3728 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
3729
3730 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3731 data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
3732 SMU_SoftRegisters,
3733 PreVBlankGap), 0x64);
3734
3735 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3736 data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
3737 SMU_SoftRegisters,
3738 VBlankTimeout),
3739 (frame_time_in_us - pre_vbi_time_in_us));
3740
3741 return 0;
3742}
3743
3744int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
3745{
3746 return smu7_program_display_gap(hwmgr);
3747}
3748
3749/**
3750* Set maximum target operating fan output RPM
3751*
3752* @param hwmgr: the address of the powerplay hardware manager.
3753* @param usMaxFanRpm: max operating fan RPM value.
3754* @return The response that came from the SMC.
3755*/
3756static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
3757{
3758 hwmgr->thermal_controller.
3759 advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
3760
3761 if (phm_is_hw_access_blocked(hwmgr))
3762 return 0;
3763
3764 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3765 PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
3766}
3767
3768int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
3769 const void *thermal_interrupt_info)
3770{
3771 return 0;
3772}
3773
3774bool smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
3775{
3776 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3777 bool is_update_required = false;
3778 struct cgs_display_info info = {0, 0, NULL};
3779
3780 cgs_get_active_displays_info(hwmgr->device, &info);
3781
3782 if (data->display_timing.num_existing_displays != info.display_count)
3783 is_update_required = true;
3784
3785 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
3786 if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr &&
3787 (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
3788 hwmgr->display_config.min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
3789 is_update_required = true;
3790 }
3791 return is_update_required;
3792}
3793
3794static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1,
3795 const struct smu7_performance_level *pl2)
3796{
3797 return ((pl1->memory_clock == pl2->memory_clock) &&
3798 (pl1->engine_clock == pl2->engine_clock) &&
3799 (pl1->pcie_gen == pl2->pcie_gen) &&
3800 (pl1->pcie_lane == pl2->pcie_lane));
3801}
3802
3803int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
3804{
3805 const struct smu7_power_state *psa = cast_const_phw_smu7_power_state(pstate1);
3806 const struct smu7_power_state *psb = cast_const_phw_smu7_power_state(pstate2);
3807 int i;
3808
3809 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
3810 return -EINVAL;
3811
3812 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
3813 if (psa->performance_level_count != psb->performance_level_count) {
3814 *equal = false;
3815 return 0;
3816 }
3817
3818 for (i = 0; i < psa->performance_level_count; i++) {
3819 if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
3820 /* If we have found even one performance level pair that is different the states are different. */
3821 *equal = false;
3822 return 0;
3823 }
3824 }
3825
3826 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
3827 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
3828 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
3829 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
3830
3831 return 0;
3832}
3833
3834int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
3835{
3836 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3837
3838 uint32_t vbios_version;
3839 uint32_t tmp;
3840
3841 /* Read MC indirect register offset 0x9F bits [3:0] to see
3842 * if VBIOS has already loaded a full version of MC ucode
3843 * or not.
3844 */
3845
3846 smu7_get_mc_microcode_version(hwmgr);
3847 vbios_version = hwmgr->microcode_version_info.MC & 0xf;
3848
3849 data->need_long_memory_training = false;
3850
3851 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX,
3852 ixMC_IO_DEBUG_UP_13);
3853 tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
3854
3855 if (tmp & (1 << 23)) {
3856 data->mem_latency_high = MEM_LATENCY_HIGH;
3857 data->mem_latency_low = MEM_LATENCY_LOW;
3858 } else {
3859 data->mem_latency_high = 330;
3860 data->mem_latency_low = 330;
3861 }
3862
3863 return 0;
3864}
3865
3866static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
3867{
3868 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3869
3870 data->clock_registers.vCG_SPLL_FUNC_CNTL =
3871 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
3872 data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
3873 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
3874 data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
3875 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
3876 data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
3877 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
3878 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
3879 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
3880 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
3881 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
3882 data->clock_registers.vDLL_CNTL =
3883 cgs_read_register(hwmgr->device, mmDLL_CNTL);
3884 data->clock_registers.vMCLK_PWRMGT_CNTL =
3885 cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
3886 data->clock_registers.vMPLL_AD_FUNC_CNTL =
3887 cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
3888 data->clock_registers.vMPLL_DQ_FUNC_CNTL =
3889 cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
3890 data->clock_registers.vMPLL_FUNC_CNTL =
3891 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
3892 data->clock_registers.vMPLL_FUNC_CNTL_1 =
3893 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
3894 data->clock_registers.vMPLL_FUNC_CNTL_2 =
3895 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
3896 data->clock_registers.vMPLL_SS1 =
3897 cgs_read_register(hwmgr->device, mmMPLL_SS1);
3898 data->clock_registers.vMPLL_SS2 =
3899 cgs_read_register(hwmgr->device, mmMPLL_SS2);
3900 return 0;
3901
3902}
3903
3904/**
3905 * Find out if memory is GDDR5.
3906 *
3907 * @param hwmgr the address of the powerplay hardware manager.
3908 * @return always 0
3909 */
3910static int smu7_get_memory_type(struct pp_hwmgr *hwmgr)
3911{
3912 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3913 uint32_t temp;
3914
3915 temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
3916
3917 data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
3918 ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
3919 MC_SEQ_MISC0_GDDR5_SHIFT));
3920
3921 return 0;
3922}
3923
3924/**
3925 * Enables Dynamic Power Management by SMC
3926 *
3927 * @param hwmgr the address of the powerplay hardware manager.
3928 * @return always 0
3929 */
3930static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
3931{
3932 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
3933 GENERAL_PWRMGT, STATIC_PM_EN, 1);
3934
3935 return 0;
3936}
3937
3938/**
3939 * Initialize PowerGating States for different engines
3940 *
3941 * @param hwmgr the address of the powerplay hardware manager.
3942 * @return always 0
3943 */
3944static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
3945{
3946 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3947
3948 data->uvd_power_gated = false;
3949 data->vce_power_gated = false;
3950 data->samu_power_gated = false;
3951
3952 return 0;
3953}
3954
3955static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
3956{
3957 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3958
3959 data->low_sclk_interrupt_threshold = 0;
3960 return 0;
3961}
3962
3963int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
3964{
3965 int tmp_result, result = 0;
3966
3967 smu7_upload_mc_firmware(hwmgr);
3968
3969 tmp_result = smu7_read_clock_registers(hwmgr);
3970 PP_ASSERT_WITH_CODE((0 == tmp_result),
3971 "Failed to read clock registers!", result = tmp_result);
3972
3973 tmp_result = smu7_get_memory_type(hwmgr);
3974 PP_ASSERT_WITH_CODE((0 == tmp_result),
3975 "Failed to get memory type!", result = tmp_result);
3976
3977 tmp_result = smu7_enable_acpi_power_management(hwmgr);
3978 PP_ASSERT_WITH_CODE((0 == tmp_result),
3979 "Failed to enable ACPI power management!", result = tmp_result);
3980
3981 tmp_result = smu7_init_power_gate_state(hwmgr);
3982 PP_ASSERT_WITH_CODE((0 == tmp_result),
3983 "Failed to init power gate state!", result = tmp_result);
3984
3985 tmp_result = smu7_get_mc_microcode_version(hwmgr);
3986 PP_ASSERT_WITH_CODE((0 == tmp_result),
3987 "Failed to get MC microcode version!", result = tmp_result);
3988
3989 tmp_result = smu7_init_sclk_threshold(hwmgr);
3990 PP_ASSERT_WITH_CODE((0 == tmp_result),
3991 "Failed to init sclk threshold!", result = tmp_result);
3992
3993 return result;
3994}
3995
3996static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
3997 enum pp_clock_type type, uint32_t mask)
3998{
3999 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4000
4001 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
4002 return -EINVAL;
4003
4004 switch (type) {
4005 case PP_SCLK:
4006 if (!data->sclk_dpm_key_disabled)
4007 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4008 PPSMC_MSG_SCLKDPM_SetEnabledMask,
4009 data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
4010 break;
4011 case PP_MCLK:
4012 if (!data->mclk_dpm_key_disabled)
4013 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4014 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4015 data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
4016 break;
4017 case PP_PCIE:
4018 {
4019 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
4020 uint32_t level = 0;
4021
4022 while (tmp >>= 1)
4023 level++;
4024
4025 if (!data->pcie_dpm_key_disabled)
4026 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4027 PPSMC_MSG_PCIeDPM_ForceLevel,
4028 level);
4029 break;
4030 }
4031 default:
4032 break;
4033 }
4034
4035 return 0;
4036}
4037
4038static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
4039 enum pp_clock_type type, char *buf)
4040{
4041 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4042 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4043 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4044 struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
4045 int i, now, size = 0;
4046 uint32_t clock, pcie_speed;
4047
4048 switch (type) {
4049 case PP_SCLK:
4050 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
4051 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
4052
4053 for (i = 0; i < sclk_table->count; i++) {
4054 if (clock > sclk_table->dpm_levels[i].value)
4055 continue;
4056 break;
4057 }
4058 now = i;
4059
4060 for (i = 0; i < sclk_table->count; i++)
4061 size += sprintf(buf + size, "%d: %uMhz %s\n",
4062 i, sclk_table->dpm_levels[i].value / 100,
4063 (i == now) ? "*" : "");
4064 break;
4065 case PP_MCLK:
4066 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
4067 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
4068
4069 for (i = 0; i < mclk_table->count; i++) {
4070 if (clock > mclk_table->dpm_levels[i].value)
4071 continue;
4072 break;
4073 }
4074 now = i;
4075
4076 for (i = 0; i < mclk_table->count; i++)
4077 size += sprintf(buf + size, "%d: %uMhz %s\n",
4078 i, mclk_table->dpm_levels[i].value / 100,
4079 (i == now) ? "*" : "");
4080 break;
4081 case PP_PCIE:
4082 pcie_speed = smu7_get_current_pcie_speed(hwmgr);
4083 for (i = 0; i < pcie_table->count; i++) {
4084 if (pcie_speed != pcie_table->dpm_levels[i].value)
4085 continue;
4086 break;
4087 }
4088 now = i;
4089
4090 for (i = 0; i < pcie_table->count; i++)
4091 size += sprintf(buf + size, "%d: %s %s\n", i,
4092 (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
4093 (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
4094 (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
4095 (i == now) ? "*" : "");
4096 break;
4097 default:
4098 break;
4099 }
4100 return size;
4101}
4102
4103static int smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
4104{
4105 if (mode) {
4106 /* stop auto-manage */
4107 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4108 PHM_PlatformCaps_MicrocodeFanControl))
4109 smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
4110 smu7_fan_ctrl_set_static_mode(hwmgr, mode);
4111 } else
4112 /* restart auto-manage */
4113 smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
4114
4115 return 0;
4116}
4117
4118static int smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
4119{
4120 if (hwmgr->fan_ctrl_is_in_default_mode)
4121 return hwmgr->fan_ctrl_default_mode;
4122 else
4123 return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4124 CG_FDO_CTRL2, FDO_PWM_MODE);
4125}
4126
4127static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
4128{
4129 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4130 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4131 struct smu7_single_dpm_table *golden_sclk_table =
4132 &(data->golden_dpm_table.sclk_table);
4133 int value;
4134
4135 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
4136 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
4137 100 /
4138 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
4139
4140 return value;
4141}
4142
4143static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4144{
4145 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4146 struct smu7_single_dpm_table *golden_sclk_table =
4147 &(data->golden_dpm_table.sclk_table);
4148 struct pp_power_state *ps;
4149 struct smu7_power_state *smu7_ps;
4150
4151 if (value > 20)
4152 value = 20;
4153
4154 ps = hwmgr->request_ps;
4155
4156 if (ps == NULL)
4157 return -EINVAL;
4158
4159 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4160
4161 smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock =
4162 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
4163 value / 100 +
4164 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
4165
4166 return 0;
4167}
4168
4169static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
4170{
4171 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4172 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4173 struct smu7_single_dpm_table *golden_mclk_table =
4174 &(data->golden_dpm_table.mclk_table);
4175 int value;
4176
4177 value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
4178 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
4179 100 /
4180 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
4181
4182 return value;
4183}
4184
4185static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4186{
4187 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4188 struct smu7_single_dpm_table *golden_mclk_table =
4189 &(data->golden_dpm_table.mclk_table);
4190 struct pp_power_state *ps;
4191 struct smu7_power_state *smu7_ps;
4192
4193 if (value > 20)
4194 value = 20;
4195
4196 ps = hwmgr->request_ps;
4197
4198 if (ps == NULL)
4199 return -EINVAL;
4200
4201 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4202
4203 smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock =
4204 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
4205 value / 100 +
4206 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
4207
4208 return 0;
4209}
4210
4211
4212static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4213{
4214 struct phm_ppt_v1_information *table_info =
4215 (struct phm_ppt_v1_information *)hwmgr->pptable;
4216 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
4217 int i;
4218
4219 if (table_info == NULL)
4220 return -EINVAL;
4221
4222 dep_sclk_table = table_info->vdd_dep_on_sclk;
4223
4224 for (i = 0; i < dep_sclk_table->count; i++) {
4225 clocks->clock[i] = dep_sclk_table->entries[i].clk;
4226 clocks->count++;
4227 }
4228 return 0;
4229}
4230
4231static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk)
4232{
4233 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4234
4235 if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY)
4236 return data->mem_latency_high;
4237 else if (clk >= MEM_FREQ_HIGH_LATENCY)
4238 return data->mem_latency_low;
4239 else
4240 return MEM_LATENCY_ERR;
4241}
4242
4243static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4244{
4245 struct phm_ppt_v1_information *table_info =
4246 (struct phm_ppt_v1_information *)hwmgr->pptable;
4247 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
4248 int i;
4249
4250 if (table_info == NULL)
4251 return -EINVAL;
4252
4253 dep_mclk_table = table_info->vdd_dep_on_mclk;
4254
4255 for (i = 0; i < dep_mclk_table->count; i++) {
4256 clocks->clock[i] = dep_mclk_table->entries[i].clk;
4257 clocks->latency[i] = smu7_get_mem_latency(hwmgr,
4258 dep_mclk_table->entries[i].clk);
4259 clocks->count++;
4260 }
4261 return 0;
4262}
4263
4264static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
4265 struct amd_pp_clocks *clocks)
4266{
4267 switch (type) {
4268 case amd_pp_sys_clock:
4269 smu7_get_sclks(hwmgr, clocks);
4270 break;
4271 case amd_pp_mem_clock:
4272 smu7_get_mclks(hwmgr, clocks);
4273 break;
4274 default:
4275 return -EINVAL;
4276 }
4277
4278 return 0;
4279}
4280
4281static struct pp_hwmgr_func smu7_hwmgr_funcs = {
4282 .backend_init = &smu7_hwmgr_backend_init,
4283 .backend_fini = &phm_hwmgr_backend_fini,
4284 .asic_setup = &smu7_setup_asic_task,
4285 .dynamic_state_management_enable = &smu7_enable_dpm_tasks,
4286 .apply_state_adjust_rules = smu7_apply_state_adjust_rules,
4287 .force_dpm_level = &smu7_force_dpm_level,
4288 .power_state_set = smu7_set_power_state_tasks,
4289 .get_power_state_size = smu7_get_power_state_size,
4290 .get_mclk = smu7_dpm_get_mclk,
4291 .get_sclk = smu7_dpm_get_sclk,
4292 .patch_boot_state = smu7_dpm_patch_boot_state,
4293 .get_pp_table_entry = smu7_get_pp_table_entry,
4294 .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
4295 .powerdown_uvd = smu7_powerdown_uvd,
4296 .powergate_uvd = smu7_powergate_uvd,
4297 .powergate_vce = smu7_powergate_vce,
4298 .disable_clock_power_gating = smu7_disable_clock_power_gating,
4299 .update_clock_gatings = smu7_update_clock_gatings,
4300 .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment,
4301 .display_config_changed = smu7_display_configuration_changed_task,
4302 .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output,
4303 .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
4304 .get_temperature = smu7_thermal_get_temperature,
4305 .stop_thermal_controller = smu7_thermal_stop_thermal_controller,
4306 .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
4307 .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent,
4308 .set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent,
4309 .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default,
4310 .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
4311 .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
4312 .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller,
4313 .register_internal_thermal_interrupt = smu7_register_internal_thermal_interrupt,
4314 .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration,
4315 .check_states_equal = smu7_check_states_equal,
4316 .set_fan_control_mode = smu7_set_fan_control_mode,
4317 .get_fan_control_mode = smu7_get_fan_control_mode,
4318 .force_clock_level = smu7_force_clock_level,
4319 .print_clock_levels = smu7_print_clock_levels,
4320 .enable_per_cu_power_gating = smu7_enable_per_cu_power_gating,
4321 .get_sclk_od = smu7_get_sclk_od,
4322 .set_sclk_od = smu7_set_sclk_od,
4323 .get_mclk_od = smu7_get_mclk_od,
4324 .set_mclk_od = smu7_set_mclk_od,
4325 .get_clock_by_type = smu7_get_clock_by_type,
4326 .read_sensor = smu7_read_sensor,
4327};
4328
4329uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
4330 uint32_t clock_insr)
4331{
4332 uint8_t i;
4333 uint32_t temp;
4334 uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
4335
4336 PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
4337 for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
4338 temp = clock >> i;
4339
4340 if (temp >= min || i == 0)
4341 break;
4342 }
4343 return i;
4344}
4345
4346int smu7_hwmgr_init(struct pp_hwmgr *hwmgr)
4347{
4348 int ret = 0;
4349
4350 hwmgr->hwmgr_func = &smu7_hwmgr_funcs;
4351 if (hwmgr->pp_table_version == PP_TABLE_V0)
4352 hwmgr->pptable_func = &pptable_funcs;
4353 else if (hwmgr->pp_table_version == PP_TABLE_V1)
4354 hwmgr->pptable_func = &pptable_v1_0_funcs;
4355
4356 pp_smu7_thermal_initialize(hwmgr);
4357 return ret;
4358}
4359
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
index 378ab342c257..27e7f76ad8a6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
@@ -21,82 +21,100 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef POLARIS10_HWMGR_H 24#ifndef _SMU7_HWMGR_H
25#define POLARIS10_HWMGR_H 25#define _SMU7_HWMGR_H
26 26
27#include "hwmgr.h" 27#include "hwmgr.h"
28#include "smu74.h"
29#include "smu74_discrete.h"
30#include "ppatomctrl.h" 28#include "ppatomctrl.h"
31#include "polaris10_ppsmc.h"
32#include "polaris10_powertune.h"
33#include "polaris10_smumgr.h"
34 29
35#define POLARIS10_MAX_HARDWARE_POWERLEVELS 2 30#define SMU7_MAX_HARDWARE_POWERLEVELS 2
36 31
37#define POLARIS10_VOLTAGE_CONTROL_NONE 0x0 32#define SMU7_VOLTAGE_CONTROL_NONE 0x0
38#define POLARIS10_VOLTAGE_CONTROL_BY_GPIO 0x1 33#define SMU7_VOLTAGE_CONTROL_BY_GPIO 0x1
39#define POLARIS10_VOLTAGE_CONTROL_BY_SVID2 0x2 34#define SMU7_VOLTAGE_CONTROL_BY_SVID2 0x2
40#define POLARIS10_VOLTAGE_CONTROL_MERGED 0x3 35#define SMU7_VOLTAGE_CONTROL_MERGED 0x3
41 36
42#define DPMTABLE_OD_UPDATE_SCLK 0x00000001 37#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
43#define DPMTABLE_OD_UPDATE_MCLK 0x00000002 38#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
44#define DPMTABLE_UPDATE_SCLK 0x00000004 39#define DPMTABLE_UPDATE_SCLK 0x00000004
45#define DPMTABLE_UPDATE_MCLK 0x00000008 40#define DPMTABLE_UPDATE_MCLK 0x00000008
46 41
47struct polaris10_performance_level { 42enum gpu_pt_config_reg_type {
43 GPU_CONFIGREG_MMR = 0,
44 GPU_CONFIGREG_SMC_IND,
45 GPU_CONFIGREG_DIDT_IND,
46 GPU_CONFIGREG_GC_CAC_IND,
47 GPU_CONFIGREG_CACHE,
48 GPU_CONFIGREG_MAX
49};
50
51struct gpu_pt_config_reg {
52 uint32_t offset;
53 uint32_t mask;
54 uint32_t shift;
55 uint32_t value;
56 enum gpu_pt_config_reg_type type;
57};
58
59struct smu7_performance_level {
48 uint32_t memory_clock; 60 uint32_t memory_clock;
49 uint32_t engine_clock; 61 uint32_t engine_clock;
50 uint16_t pcie_gen; 62 uint16_t pcie_gen;
51 uint16_t pcie_lane; 63 uint16_t pcie_lane;
52}; 64};
53 65
54struct polaris10_uvd_clocks { 66struct smu7_thermal_temperature_setting {
67 long temperature_low;
68 long temperature_high;
69 long temperature_shutdown;
70};
71
72struct smu7_uvd_clocks {
55 uint32_t vclk; 73 uint32_t vclk;
56 uint32_t dclk; 74 uint32_t dclk;
57}; 75};
58 76
59struct polaris10_vce_clocks { 77struct smu7_vce_clocks {
60 uint32_t evclk; 78 uint32_t evclk;
61 uint32_t ecclk; 79 uint32_t ecclk;
62}; 80};
63 81
64struct polaris10_power_state { 82struct smu7_power_state {
65 uint32_t magic; 83 uint32_t magic;
66 struct polaris10_uvd_clocks uvd_clks; 84 struct smu7_uvd_clocks uvd_clks;
67 struct polaris10_vce_clocks vce_clks; 85 struct smu7_vce_clocks vce_clks;
68 uint32_t sam_clk; 86 uint32_t sam_clk;
69 uint16_t performance_level_count; 87 uint16_t performance_level_count;
70 bool dc_compatible; 88 bool dc_compatible;
71 uint32_t sclk_threshold; 89 uint32_t sclk_threshold;
72 struct polaris10_performance_level performance_levels[POLARIS10_MAX_HARDWARE_POWERLEVELS]; 90 struct smu7_performance_level performance_levels[SMU7_MAX_HARDWARE_POWERLEVELS];
73}; 91};
74 92
75struct polaris10_dpm_level { 93struct smu7_dpm_level {
76 bool enabled; 94 bool enabled;
77 uint32_t value; 95 uint32_t value;
78 uint32_t param1; 96 uint32_t param1;
79}; 97};
80 98
81#define POLARIS10_MAX_DEEPSLEEP_DIVIDER_ID 5 99#define SMU7_MAX_DEEPSLEEP_DIVIDER_ID 5
82#define MAX_REGULAR_DPM_NUMBER 8 100#define MAX_REGULAR_DPM_NUMBER 8
83#define POLARIS10_MINIMUM_ENGINE_CLOCK 2500 101#define SMU7_MINIMUM_ENGINE_CLOCK 2500
84 102
85struct polaris10_single_dpm_table { 103struct smu7_single_dpm_table {
86 uint32_t count; 104 uint32_t count;
87 struct polaris10_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER]; 105 struct smu7_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
88}; 106};
89 107
90struct polaris10_dpm_table { 108struct smu7_dpm_table {
91 struct polaris10_single_dpm_table sclk_table; 109 struct smu7_single_dpm_table sclk_table;
92 struct polaris10_single_dpm_table mclk_table; 110 struct smu7_single_dpm_table mclk_table;
93 struct polaris10_single_dpm_table pcie_speed_table; 111 struct smu7_single_dpm_table pcie_speed_table;
94 struct polaris10_single_dpm_table vddc_table; 112 struct smu7_single_dpm_table vddc_table;
95 struct polaris10_single_dpm_table vddci_table; 113 struct smu7_single_dpm_table vddci_table;
96 struct polaris10_single_dpm_table mvdd_table; 114 struct smu7_single_dpm_table mvdd_table;
97}; 115};
98 116
99struct polaris10_clock_registers { 117struct smu7_clock_registers {
100 uint32_t vCG_SPLL_FUNC_CNTL; 118 uint32_t vCG_SPLL_FUNC_CNTL;
101 uint32_t vCG_SPLL_FUNC_CNTL_2; 119 uint32_t vCG_SPLL_FUNC_CNTL_2;
102 uint32_t vCG_SPLL_FUNC_CNTL_3; 120 uint32_t vCG_SPLL_FUNC_CNTL_3;
@@ -117,42 +135,35 @@ struct polaris10_clock_registers {
117#define DISABLE_MC_LOADMICROCODE 1 135#define DISABLE_MC_LOADMICROCODE 1
118#define DISABLE_MC_CFGPROGRAMMING 2 136#define DISABLE_MC_CFGPROGRAMMING 2
119 137
120struct polaris10_voltage_smio_registers { 138struct smu7_voltage_smio_registers {
121 uint32_t vS0_VID_LOWER_SMIO_CNTL; 139 uint32_t vS0_VID_LOWER_SMIO_CNTL;
122}; 140};
123 141
124#define POLARIS10_MAX_LEAKAGE_COUNT 8 142#define SMU7_MAX_LEAKAGE_COUNT 8
125 143
126struct polaris10_leakage_voltage { 144struct smu7_leakage_voltage {
127 uint16_t count; 145 uint16_t count;
128 uint16_t leakage_id[POLARIS10_MAX_LEAKAGE_COUNT]; 146 uint16_t leakage_id[SMU7_MAX_LEAKAGE_COUNT];
129 uint16_t actual_voltage[POLARIS10_MAX_LEAKAGE_COUNT]; 147 uint16_t actual_voltage[SMU7_MAX_LEAKAGE_COUNT];
130}; 148};
131 149
132struct polaris10_vbios_boot_state { 150struct smu7_vbios_boot_state {
133 uint16_t mvdd_bootup_value; 151 uint16_t mvdd_bootup_value;
134 uint16_t vddc_bootup_value; 152 uint16_t vddc_bootup_value;
135 uint16_t vddci_bootup_value; 153 uint16_t vddci_bootup_value;
154 uint16_t vddgfx_bootup_value;
136 uint32_t sclk_bootup_value; 155 uint32_t sclk_bootup_value;
137 uint32_t mclk_bootup_value; 156 uint32_t mclk_bootup_value;
138 uint16_t pcie_gen_bootup_value; 157 uint16_t pcie_gen_bootup_value;
139 uint16_t pcie_lane_bootup_value; 158 uint16_t pcie_lane_bootup_value;
140}; 159};
141 160
142/* Ultra Low Voltage parameter structure */ 161struct smu7_display_timing {
143struct polaris10_ulv_parm {
144 bool ulv_supported;
145 uint32_t cg_ulv_parameter;
146 uint32_t ulv_volt_change_delay;
147 struct polaris10_performance_level ulv_power_level;
148};
149
150struct polaris10_display_timing {
151 uint32_t min_clock_in_sr; 162 uint32_t min_clock_in_sr;
152 uint32_t num_existing_displays; 163 uint32_t num_existing_displays;
153}; 164};
154 165
155struct polaris10_dpmlevel_enable_mask { 166struct smu7_dpmlevel_enable_mask {
156 uint32_t uvd_dpm_enable_mask; 167 uint32_t uvd_dpm_enable_mask;
157 uint32_t vce_dpm_enable_mask; 168 uint32_t vce_dpm_enable_mask;
158 uint32_t acp_dpm_enable_mask; 169 uint32_t acp_dpm_enable_mask;
@@ -162,18 +173,15 @@ struct polaris10_dpmlevel_enable_mask {
162 uint32_t pcie_dpm_enable_mask; 173 uint32_t pcie_dpm_enable_mask;
163}; 174};
164 175
165struct polaris10_pcie_perf_range { 176struct smu7_pcie_perf_range {
166 uint16_t max; 177 uint16_t max;
167 uint16_t min; 178 uint16_t min;
168}; 179};
169 180
170struct polaris10_hwmgr { 181struct smu7_hwmgr {
171 struct polaris10_dpm_table dpm_table; 182 struct smu7_dpm_table dpm_table;
172 struct polaris10_dpm_table golden_dpm_table; 183 struct smu7_dpm_table golden_dpm_table;
173 SMU74_Discrete_DpmTable smc_state_table;
174 struct SMU74_Discrete_Ulv ulv_setting;
175 184
176 struct polaris10_range_table range_table[NUM_SCLK_RANGE];
177 uint32_t voting_rights_clients0; 185 uint32_t voting_rights_clients0;
178 uint32_t voting_rights_clients1; 186 uint32_t voting_rights_clients1;
179 uint32_t voting_rights_clients2; 187 uint32_t voting_rights_clients2;
@@ -185,12 +193,11 @@ struct polaris10_hwmgr {
185 uint32_t static_screen_threshold_unit; 193 uint32_t static_screen_threshold_unit;
186 uint32_t static_screen_threshold; 194 uint32_t static_screen_threshold;
187 uint32_t voltage_control; 195 uint32_t voltage_control;
188 uint32_t vddc_vddci_delta; 196 uint32_t vdd_gfx_control;
189 197 uint32_t vddc_vddgfx_delta;
190 uint32_t active_auto_throttle_sources; 198 uint32_t active_auto_throttle_sources;
191 199
192 struct polaris10_clock_registers clock_registers; 200 struct smu7_clock_registers clock_registers;
193 struct polaris10_voltage_smio_registers voltage_smio_registers;
194 201
195 bool is_memory_gddr5; 202 bool is_memory_gddr5;
196 uint16_t acpi_vddc; 203 uint16_t acpi_vddc;
@@ -200,8 +207,9 @@ struct polaris10_hwmgr {
200 uint32_t pcie_gen_cap; 207 uint32_t pcie_gen_cap;
201 uint32_t pcie_lane_cap; 208 uint32_t pcie_lane_cap;
202 uint32_t pcie_spc_cap; 209 uint32_t pcie_spc_cap;
203 struct polaris10_leakage_voltage vddc_leakage; 210 struct smu7_leakage_voltage vddc_leakage;
204 struct polaris10_leakage_voltage Vddci_leakage; 211 struct smu7_leakage_voltage vddci_leakage;
212 struct smu7_leakage_voltage vddcgfx_leakage;
205 213
206 uint32_t mvdd_control; 214 uint32_t mvdd_control;
207 uint32_t vddc_mask_low; 215 uint32_t vddc_mask_low;
@@ -210,30 +218,23 @@ struct polaris10_hwmgr {
210 uint16_t min_vddc_in_pptable; 218 uint16_t min_vddc_in_pptable;
211 uint16_t max_vddci_in_pptable; 219 uint16_t max_vddci_in_pptable;
212 uint16_t min_vddci_in_pptable; 220 uint16_t min_vddci_in_pptable;
213 uint32_t mclk_strobe_mode_threshold;
214 uint32_t mclk_stutter_mode_threshold;
215 uint32_t mclk_edc_enable_threshold;
216 uint32_t mclk_edcwr_enable_threshold;
217 bool is_uvd_enabled; 221 bool is_uvd_enabled;
218 struct polaris10_vbios_boot_state vbios_boot_state; 222 struct smu7_vbios_boot_state vbios_boot_state;
219 223
220 bool pcie_performance_request; 224 bool pcie_performance_request;
221 bool battery_state; 225 bool battery_state;
222 bool is_tlu_enabled; 226 bool is_tlu_enabled;
227 bool disable_handshake;
228 bool smc_voltage_control_enabled;
229 bool vbi_time_out_support;
223 230
224 /* ---- SMC SRAM Address of firmware header tables ---- */ 231 uint32_t soft_regs_start;
225 uint32_t sram_end;
226 uint32_t dpm_table_start;
227 uint32_t soft_regs_start;
228 uint32_t mc_reg_table_start;
229 uint32_t fan_table_start;
230 uint32_t arb_table_start;
231
232 /* ---- Stuff originally coming from Evergreen ---- */ 232 /* ---- Stuff originally coming from Evergreen ---- */
233 uint32_t vddci_control; 233 uint32_t vddci_control;
234 struct pp_atomctrl_voltage_table vddc_voltage_table; 234 struct pp_atomctrl_voltage_table vddc_voltage_table;
235 struct pp_atomctrl_voltage_table vddci_voltage_table; 235 struct pp_atomctrl_voltage_table vddci_voltage_table;
236 struct pp_atomctrl_voltage_table mvdd_voltage_table; 236 struct pp_atomctrl_voltage_table mvdd_voltage_table;
237 struct pp_atomctrl_voltage_table vddgfx_voltage_table;
237 238
238 uint32_t mgcg_cgtt_local2; 239 uint32_t mgcg_cgtt_local2;
239 uint32_t mgcg_cgtt_local3; 240 uint32_t mgcg_cgtt_local3;
@@ -247,7 +248,7 @@ struct polaris10_hwmgr {
247 bool performance_request_registered; 248 bool performance_request_registered;
248 249
249 /* ---- Low Power Features ---- */ 250 /* ---- Low Power Features ---- */
250 struct polaris10_ulv_parm ulv; 251 bool ulv_supported;
251 252
252 /* ---- CAC Stuff ---- */ 253 /* ---- CAC Stuff ---- */
253 uint32_t cac_table_start; 254 uint32_t cac_table_start;
@@ -261,8 +262,8 @@ struct polaris10_hwmgr {
261 bool enable_tdc_limit_feature; 262 bool enable_tdc_limit_feature;
262 bool enable_pkg_pwr_tracking_feature; 263 bool enable_pkg_pwr_tracking_feature;
263 bool disable_uvd_power_tune_feature; 264 bool disable_uvd_power_tune_feature;
264 const struct polaris10_pt_defaults *power_tune_defaults; 265
265 struct SMU74_Discrete_PmFuses power_tune_table; 266
266 uint32_t dte_tj_offset; 267 uint32_t dte_tj_offset;
267 uint32_t fast_watermark_threshold; 268 uint32_t fast_watermark_threshold;
268 269
@@ -270,23 +271,22 @@ struct polaris10_hwmgr {
270 bool vddc_phase_shed_control; 271 bool vddc_phase_shed_control;
271 272
272 /* ---- DI/DT ---- */ 273 /* ---- DI/DT ---- */
273 struct polaris10_display_timing display_timing; 274 struct smu7_display_timing display_timing;
274 uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK];
275 275
276 /* ---- Thermal Temperature Setting ---- */ 276 /* ---- Thermal Temperature Setting ---- */
277 struct polaris10_dpmlevel_enable_mask dpm_level_enable_mask; 277 struct smu7_thermal_temperature_setting thermal_temp_setting;
278 struct smu7_dpmlevel_enable_mask dpm_level_enable_mask;
278 uint32_t need_update_smu7_dpm_table; 279 uint32_t need_update_smu7_dpm_table;
279 uint32_t sclk_dpm_key_disabled; 280 uint32_t sclk_dpm_key_disabled;
280 uint32_t mclk_dpm_key_disabled; 281 uint32_t mclk_dpm_key_disabled;
281 uint32_t pcie_dpm_key_disabled; 282 uint32_t pcie_dpm_key_disabled;
282 uint32_t min_engine_clocks; 283 uint32_t min_engine_clocks;
283 struct polaris10_pcie_perf_range pcie_gen_performance; 284 struct smu7_pcie_perf_range pcie_gen_performance;
284 struct polaris10_pcie_perf_range pcie_lane_performance; 285 struct smu7_pcie_perf_range pcie_lane_performance;
285 struct polaris10_pcie_perf_range pcie_gen_power_saving; 286 struct smu7_pcie_perf_range pcie_gen_power_saving;
286 struct polaris10_pcie_perf_range pcie_lane_power_saving; 287 struct smu7_pcie_perf_range pcie_lane_power_saving;
287 bool use_pcie_performance_levels; 288 bool use_pcie_performance_levels;
288 bool use_pcie_power_saving_levels; 289 bool use_pcie_power_saving_levels;
289 uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS];
290 uint32_t mclk_activity_target; 290 uint32_t mclk_activity_target;
291 uint32_t mclk_dpm0_activity_target; 291 uint32_t mclk_dpm0_activity_target;
292 uint32_t low_sclk_interrupt_threshold; 292 uint32_t low_sclk_interrupt_threshold;
@@ -306,49 +306,48 @@ struct polaris10_hwmgr {
306 uint32_t up_hyst; 306 uint32_t up_hyst;
307 uint32_t disable_dpm_mask; 307 uint32_t disable_dpm_mask;
308 bool apply_optimized_settings; 308 bool apply_optimized_settings;
309
309 uint32_t avfs_vdroop_override_setting; 310 uint32_t avfs_vdroop_override_setting;
310 bool apply_avfs_cks_off_voltage; 311 bool apply_avfs_cks_off_voltage;
311 uint32_t frame_time_x2; 312 uint32_t frame_time_x2;
313 uint16_t mem_latency_high;
314 uint16_t mem_latency_low;
312}; 315};
313 316
314/* To convert to Q8.8 format for firmware */ 317/* To convert to Q8.8 format for firmware */
315#define POLARIS10_Q88_FORMAT_CONVERSION_UNIT 256 318#define SMU7_Q88_FORMAT_CONVERSION_UNIT 256
316 319
317enum Polaris10_I2CLineID { 320enum SMU7_I2CLineID {
318 Polaris10_I2CLineID_DDC1 = 0x90, 321 SMU7_I2CLineID_DDC1 = 0x90,
319 Polaris10_I2CLineID_DDC2 = 0x91, 322 SMU7_I2CLineID_DDC2 = 0x91,
320 Polaris10_I2CLineID_DDC3 = 0x92, 323 SMU7_I2CLineID_DDC3 = 0x92,
321 Polaris10_I2CLineID_DDC4 = 0x93, 324 SMU7_I2CLineID_DDC4 = 0x93,
322 Polaris10_I2CLineID_DDC5 = 0x94, 325 SMU7_I2CLineID_DDC5 = 0x94,
323 Polaris10_I2CLineID_DDC6 = 0x95, 326 SMU7_I2CLineID_DDC6 = 0x95,
324 Polaris10_I2CLineID_SCLSDA = 0x96, 327 SMU7_I2CLineID_SCLSDA = 0x96,
325 Polaris10_I2CLineID_DDCVGA = 0x97 328 SMU7_I2CLineID_DDCVGA = 0x97
326}; 329};
327 330
328#define POLARIS10_I2C_DDC1DATA 0 331#define SMU7_I2C_DDC1DATA 0
329#define POLARIS10_I2C_DDC1CLK 1 332#define SMU7_I2C_DDC1CLK 1
330#define POLARIS10_I2C_DDC2DATA 2 333#define SMU7_I2C_DDC2DATA 2
331#define POLARIS10_I2C_DDC2CLK 3 334#define SMU7_I2C_DDC2CLK 3
332#define POLARIS10_I2C_DDC3DATA 4 335#define SMU7_I2C_DDC3DATA 4
333#define POLARIS10_I2C_DDC3CLK 5 336#define SMU7_I2C_DDC3CLK 5
334#define POLARIS10_I2C_SDA 40 337#define SMU7_I2C_SDA 40
335#define POLARIS10_I2C_SCL 41 338#define SMU7_I2C_SCL 41
336#define POLARIS10_I2C_DDC4DATA 65 339#define SMU7_I2C_DDC4DATA 65
337#define POLARIS10_I2C_DDC4CLK 66 340#define SMU7_I2C_DDC4CLK 66
338#define POLARIS10_I2C_DDC5DATA 0x48 341#define SMU7_I2C_DDC5DATA 0x48
339#define POLARIS10_I2C_DDC5CLK 0x49 342#define SMU7_I2C_DDC5CLK 0x49
340#define POLARIS10_I2C_DDC6DATA 0x4a 343#define SMU7_I2C_DDC6DATA 0x4a
341#define POLARIS10_I2C_DDC6CLK 0x4b 344#define SMU7_I2C_DDC6CLK 0x4b
342#define POLARIS10_I2C_DDCVGADATA 0x4c 345#define SMU7_I2C_DDCVGADATA 0x4c
343#define POLARIS10_I2C_DDCVGACLK 0x4d 346#define SMU7_I2C_DDCVGACLK 0x4d
344 347
345#define POLARIS10_UNUSED_GPIO_PIN 0x7F 348#define SMU7_UNUSED_GPIO_PIN 0x7F
346 349uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr);
347int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr); 350uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
348 351 uint32_t clock_insr);
349int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
350int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
351int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
352int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate);
353#endif 352#endif
354 353
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index b9cb240a135d..260fce050175 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -20,546 +20,364 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 */ 22 */
23
24#include "hwmgr.h" 23#include "hwmgr.h"
25#include "smumgr.h" 24#include "smumgr.h"
26#include "polaris10_hwmgr.h" 25#include "smu7_hwmgr.h"
27#include "polaris10_powertune.h" 26#include "smu7_powertune.h"
28#include "polaris10_smumgr.h"
29#include "smu74_discrete.h"
30#include "pp_debug.h" 27#include "pp_debug.h"
31#include "gca/gfx_8_0_d.h" 28#include "smu7_common.h"
32#include "gca/gfx_8_0_sh_mask.h"
33#include "oss/oss_3_0_sh_mask.h"
34 29
35#define VOLTAGE_SCALE 4 30#define VOLTAGE_SCALE 4
36#define POWERTUNE_DEFAULT_SET_MAX 1
37 31
38uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK; 32static uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
39 33
40struct polaris10_pt_config_reg GCCACConfig_Polaris10[] = { 34static struct gpu_pt_config_reg GCCACConfig_Polaris10[] = {
41/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 35/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
42 * Offset Mask Shift Value Type 36 * Offset Mask Shift Value Type
43 * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 37 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
44 */ 38 */
45 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060013, POLARIS10_CONFIGREG_GC_CAC_IND }, 39 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060013, GPU_CONFIGREG_GC_CAC_IND },
46 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860013, POLARIS10_CONFIGREG_GC_CAC_IND }, 40 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860013, GPU_CONFIGREG_GC_CAC_IND },
47 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060013, POLARIS10_CONFIGREG_GC_CAC_IND }, 41 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060013, GPU_CONFIGREG_GC_CAC_IND },
48 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860013, POLARIS10_CONFIGREG_GC_CAC_IND }, 42 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860013, GPU_CONFIGREG_GC_CAC_IND },
49 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060013, POLARIS10_CONFIGREG_GC_CAC_IND }, 43 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060013, GPU_CONFIGREG_GC_CAC_IND },
50 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860013, POLARIS10_CONFIGREG_GC_CAC_IND }, 44 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860013, GPU_CONFIGREG_GC_CAC_IND },
51 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060013, POLARIS10_CONFIGREG_GC_CAC_IND }, 45 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060013, GPU_CONFIGREG_GC_CAC_IND },
52 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860013, POLARIS10_CONFIGREG_GC_CAC_IND }, 46 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860013, GPU_CONFIGREG_GC_CAC_IND },
53 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060013, POLARIS10_CONFIGREG_GC_CAC_IND }, 47 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060013, GPU_CONFIGREG_GC_CAC_IND },
54 48
55 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, 49 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0013, GPU_CONFIGREG_GC_CAC_IND },
56 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, 50 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0013, GPU_CONFIGREG_GC_CAC_IND },
57 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, 51 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0013, GPU_CONFIGREG_GC_CAC_IND },
58 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, 52 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0013, GPU_CONFIGREG_GC_CAC_IND },
59 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, 53 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0013, GPU_CONFIGREG_GC_CAC_IND },
60 54
61 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100013, POLARIS10_CONFIGREG_GC_CAC_IND }, 55 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100013, GPU_CONFIGREG_GC_CAC_IND },
62 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900013, POLARIS10_CONFIGREG_GC_CAC_IND }, 56 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900013, GPU_CONFIGREG_GC_CAC_IND },
63 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100013, POLARIS10_CONFIGREG_GC_CAC_IND }, 57 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100013, GPU_CONFIGREG_GC_CAC_IND },
64 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900013, POLARIS10_CONFIGREG_GC_CAC_IND }, 58 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900013, GPU_CONFIGREG_GC_CAC_IND },
65 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100013, POLARIS10_CONFIGREG_GC_CAC_IND }, 59 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100013, GPU_CONFIGREG_GC_CAC_IND },
66 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900013, POLARIS10_CONFIGREG_GC_CAC_IND }, 60 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900013, GPU_CONFIGREG_GC_CAC_IND },
67 61
68 { 0xFFFFFFFF } 62 { 0xFFFFFFFF }
69}; 63};
70 64
71struct polaris10_pt_config_reg GCCACConfig_Polaris11[] = { 65static struct gpu_pt_config_reg GCCACConfig_Polaris11[] = {
72/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 66/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
73 * Offset Mask Shift Value Type 67 * Offset Mask Shift Value Type
74 * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 68 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
75 */ 69 */
76 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060011, POLARIS10_CONFIGREG_GC_CAC_IND }, 70 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060011, GPU_CONFIGREG_GC_CAC_IND },
77 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860011, POLARIS10_CONFIGREG_GC_CAC_IND }, 71 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860011, GPU_CONFIGREG_GC_CAC_IND },
78 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060011, POLARIS10_CONFIGREG_GC_CAC_IND }, 72 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060011, GPU_CONFIGREG_GC_CAC_IND },
79 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860011, POLARIS10_CONFIGREG_GC_CAC_IND }, 73 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860011, GPU_CONFIGREG_GC_CAC_IND },
80 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060011, POLARIS10_CONFIGREG_GC_CAC_IND }, 74 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060011, GPU_CONFIGREG_GC_CAC_IND },
81 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860011, POLARIS10_CONFIGREG_GC_CAC_IND }, 75 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860011, GPU_CONFIGREG_GC_CAC_IND },
82 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060011, POLARIS10_CONFIGREG_GC_CAC_IND }, 76 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060011, GPU_CONFIGREG_GC_CAC_IND },
83 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860011, POLARIS10_CONFIGREG_GC_CAC_IND }, 77 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860011, GPU_CONFIGREG_GC_CAC_IND },
84 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060011, POLARIS10_CONFIGREG_GC_CAC_IND }, 78 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060011, GPU_CONFIGREG_GC_CAC_IND },
85 79
86 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, 80 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0011, GPU_CONFIGREG_GC_CAC_IND },
87 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, 81 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0011, GPU_CONFIGREG_GC_CAC_IND },
88 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, 82 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0011, GPU_CONFIGREG_GC_CAC_IND },
89 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, 83 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0011, GPU_CONFIGREG_GC_CAC_IND },
90 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, 84 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0011, GPU_CONFIGREG_GC_CAC_IND },
91 85
92 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100011, POLARIS10_CONFIGREG_GC_CAC_IND }, 86 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100011, GPU_CONFIGREG_GC_CAC_IND },
93 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900011, POLARIS10_CONFIGREG_GC_CAC_IND }, 87 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900011, GPU_CONFIGREG_GC_CAC_IND },
94 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100011, POLARIS10_CONFIGREG_GC_CAC_IND }, 88 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100011, GPU_CONFIGREG_GC_CAC_IND },
95 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900011, POLARIS10_CONFIGREG_GC_CAC_IND }, 89 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900011, GPU_CONFIGREG_GC_CAC_IND },
96 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100011, POLARIS10_CONFIGREG_GC_CAC_IND }, 90 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100011, GPU_CONFIGREG_GC_CAC_IND },
97 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900011, POLARIS10_CONFIGREG_GC_CAC_IND }, 91 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900011, GPU_CONFIGREG_GC_CAC_IND },
98 92
99 { 0xFFFFFFFF } 93 { 0xFFFFFFFF }
100}; 94};
101 95
102struct polaris10_pt_config_reg DIDTConfig_Polaris10[] = { 96static struct gpu_pt_config_reg DIDTConfig_Polaris10[] = {
103/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 97/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
104 * Offset Mask Shift Value Type 98 * Offset Mask Shift Value Type
105 * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 99 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
106 */ 100 */
107 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, POLARIS10_CONFIGREG_DIDT_IND }, 101 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, GPU_CONFIGREG_DIDT_IND },
108 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, POLARIS10_CONFIGREG_DIDT_IND }, 102 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, GPU_CONFIGREG_DIDT_IND },
109 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND }, 103 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND },
110 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND }, 104 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
111 105
112 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, POLARIS10_CONFIGREG_DIDT_IND }, 106 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, GPU_CONFIGREG_DIDT_IND },
113 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND }, 107 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND },
114 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, POLARIS10_CONFIGREG_DIDT_IND }, 108 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, GPU_CONFIGREG_DIDT_IND },
115 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND }, 109 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
116 110
117 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, POLARIS10_CONFIGREG_DIDT_IND }, 111 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, GPU_CONFIGREG_DIDT_IND },
118 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 112 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
119 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 113 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
120 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 114 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
121 115
122 { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 116 { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
123 { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, 117 { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
124 118
125 { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 119 { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
126 { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, 120 { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
127 121
128 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND }, 122 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
129 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 123 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
130 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND }, 124 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
131 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 125 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
132 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 126 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
133 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 127 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
134 128
135 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 129 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
136 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 130 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
137 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 131 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
138 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, POLARIS10_CONFIGREG_DIDT_IND }, 132 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND },
139 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 133 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
140 134
141 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 135 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
142 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND }, 136 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
143 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, POLARIS10_CONFIGREG_DIDT_IND }, 137 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND },
144 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 138 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
145 139
146 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 140 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
147 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 141 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
148 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 142 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
149 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 143 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
150 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 144 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
151 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, 145 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
152 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, 146 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
153 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 147 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
154 148
155 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, POLARIS10_CONFIGREG_DIDT_IND }, 149 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND },
156 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, 150 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
157 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, POLARIS10_CONFIGREG_DIDT_IND }, 151 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND },
158 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, POLARIS10_CONFIGREG_DIDT_IND }, 152 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND },
159 153
160 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND }, 154 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
161 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, POLARIS10_CONFIGREG_DIDT_IND }, 155 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND },
162 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 156 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
163 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 157 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
164 158
165 { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 159 { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
166 { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, 160 { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
167 161
168 { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 162 { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
169 { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND }, 163 { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
170 164
171 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, POLARIS10_CONFIGREG_DIDT_IND }, 165 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
172 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 166 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
173 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, POLARIS10_CONFIGREG_DIDT_IND }, 167 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND },
174 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 168 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
175 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 169 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
176 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 170 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
177 171
178 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 172 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
179 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 173 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
180 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 174 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
181 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND }, 175 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
182 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 176 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
183 177
184 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 178 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
185 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND }, 179 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
186 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND }, 180 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
187 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 181 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
188 182
189 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 183 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
190 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 184 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
191 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 185 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
192 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 186 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
193 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 187 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
194 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0009, POLARIS10_CONFIGREG_DIDT_IND }, 188 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0009, GPU_CONFIGREG_DIDT_IND },
195 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0009, POLARIS10_CONFIGREG_DIDT_IND }, 189 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0009, GPU_CONFIGREG_DIDT_IND },
196 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 190 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
197 191
198 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, POLARIS10_CONFIGREG_DIDT_IND }, 192 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, GPU_CONFIGREG_DIDT_IND },
199 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, POLARIS10_CONFIGREG_DIDT_IND }, 193 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, GPU_CONFIGREG_DIDT_IND },
200 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 194 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
201 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND }, 195 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
202 196
203 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, POLARIS10_CONFIGREG_DIDT_IND }, 197 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, GPU_CONFIGREG_DIDT_IND },
204 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 198 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
205 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 199 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
206 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 200 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
207 201
208 { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 202 { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
209 { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, 203 { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
210 204
211 { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 205 { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
212 { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, 206 { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
213 207
214 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, 208 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
215 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 209 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
216 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, POLARIS10_CONFIGREG_DIDT_IND }, 210 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, GPU_CONFIGREG_DIDT_IND },
217 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 211 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
218 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 212 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
219 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 213 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
220 214
221 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 215 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
222 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 216 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
223 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 217 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
224 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND }, 218 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
225 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 219 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
226 220
227 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 221 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
228 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, 222 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
229 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, 223 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
230 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 224 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
231 225
232 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 226 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
233 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 227 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
234 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 228 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
235 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 229 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
236 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 230 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
237 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, 231 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
238 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, 232 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
239 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 233 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
240 234
241 { 0xFFFFFFFF } 235 { 0xFFFFFFFF }
242}; 236};
243 237
244struct polaris10_pt_config_reg DIDTConfig_Polaris11[] = { 238static struct gpu_pt_config_reg DIDTConfig_Polaris11[] = {
245/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 239/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
246 * Offset Mask Shift Value Type 240 * Offset Mask Shift Value Type
247 * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 241 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
248 */ 242 */
249 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, POLARIS10_CONFIGREG_DIDT_IND }, 243 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, GPU_CONFIGREG_DIDT_IND },
250 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, POLARIS10_CONFIGREG_DIDT_IND }, 244 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, GPU_CONFIGREG_DIDT_IND },
251 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND }, 245 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND },
252 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND }, 246 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
253 247
254 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, POLARIS10_CONFIGREG_DIDT_IND }, 248 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, GPU_CONFIGREG_DIDT_IND },
255 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND }, 249 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND },
256 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, POLARIS10_CONFIGREG_DIDT_IND }, 250 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, GPU_CONFIGREG_DIDT_IND },
257 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND }, 251 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
258 252
259 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, POLARIS10_CONFIGREG_DIDT_IND }, 253 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, GPU_CONFIGREG_DIDT_IND },
260 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 254 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
261 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 255 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
262 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 256 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
263 257
264 { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 258 { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
265 { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, 259 { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
266 260
267 { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 261 { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
268 { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, 262 { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
269 263
270 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND }, 264 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
271 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 265 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
272 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND }, 266 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
273 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 267 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
274 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 268 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
275 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 269 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
276 270
277 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 271 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
278 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 272 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
279 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 273 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
280 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, POLARIS10_CONFIGREG_DIDT_IND }, 274 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND },
281 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 275 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
282 276
283 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 277 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
284 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND }, 278 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
285 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, POLARIS10_CONFIGREG_DIDT_IND }, 279 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND },
286 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 280 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
287 281
288 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 282 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
289 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 283 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
290 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 284 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
291 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 285 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
292 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 286 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
293 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, 287 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
294 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, 288 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
295 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 289 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
296 290
297 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, POLARIS10_CONFIGREG_DIDT_IND }, 291 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND },
298 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, 292 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
299 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, POLARIS10_CONFIGREG_DIDT_IND }, 293 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND },
300 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, POLARIS10_CONFIGREG_DIDT_IND }, 294 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND },
301 295
302 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND }, 296 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
303 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, POLARIS10_CONFIGREG_DIDT_IND }, 297 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND },
304 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 298 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
305 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 299 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
306 300
307 { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 301 { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
308 { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, 302 { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
309 303
310 { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 304 { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
311 { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND }, 305 { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
312 306
313 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, POLARIS10_CONFIGREG_DIDT_IND }, 307 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
314 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 308 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
315 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, POLARIS10_CONFIGREG_DIDT_IND }, 309 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND },
316 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 310 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
317 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 311 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
318 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 312 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
319 313
320 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 314 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
321 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 315 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
322 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 316 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
323 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND }, 317 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
324 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 318 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
325 319
326 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 320 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
327 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND }, 321 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
328 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND }, 322 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
329 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 323 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
330 324
331 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 325 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
332 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 326 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
333 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 327 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
334 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 328 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
335 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 329 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
336 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, POLARIS10_CONFIGREG_DIDT_IND }, 330 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND },
337 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, POLARIS10_CONFIGREG_DIDT_IND }, 331 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND },
338 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 332 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
339 333
340 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, POLARIS10_CONFIGREG_DIDT_IND }, 334 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, GPU_CONFIGREG_DIDT_IND },
341 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, POLARIS10_CONFIGREG_DIDT_IND }, 335 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, GPU_CONFIGREG_DIDT_IND },
342 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 336 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
343 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND }, 337 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
344 338
345 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, POLARIS10_CONFIGREG_DIDT_IND }, 339 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, GPU_CONFIGREG_DIDT_IND },
346 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 340 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
347 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 341 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
348 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 342 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
349 343
350 { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 344 { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
351 { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, 345 { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
352 346
353 { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 347 { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
354 { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, 348 { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
355 349
356 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, 350 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
357 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 351 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
358 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, POLARIS10_CONFIGREG_DIDT_IND }, 352 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, GPU_CONFIGREG_DIDT_IND },
359 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 353 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
360 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 354 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
361 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 355 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
362 356
363 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 357 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
364 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 358 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
365 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 359 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
366 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND }, 360 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
367 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 361 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
368 362
369 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 363 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
370 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, 364 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
371 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, 365 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
372 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 366 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
373 367
374 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, 368 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
375 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 369 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
376 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 370 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
377 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 371 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
378 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 372 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
379 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, 373 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
380 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, 374 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
381 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, 375 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
382 { 0xFFFFFFFF } 376 { 0xFFFFFFFF }
383}; 377};
384 378
385static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
386 /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
387 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
388 { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
389 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
390 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
391};
392
393void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
394{
395 struct polaris10_hwmgr *polaris10_hwmgr = (struct polaris10_hwmgr *)(hwmgr->backend);
396 struct phm_ppt_v1_information *table_info =
397 (struct phm_ppt_v1_information *)(hwmgr->pptable);
398
399 if (table_info &&
400 table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
401 table_info->cac_dtp_table->usPowerTuneDataSetID)
402 polaris10_hwmgr->power_tune_defaults =
403 &polaris10_power_tune_data_set_array
404 [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
405 else
406 polaris10_hwmgr->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
407
408}
409
410static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
411{
412 uint32_t tmp;
413 tmp = raw_setting * 4096 / 100;
414 return (uint16_t)tmp;
415}
416
417int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
418{
419 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
420 const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
421 SMU74_Discrete_DpmTable *dpm_table = &(data->smc_state_table);
422 struct phm_ppt_v1_information *table_info =
423 (struct phm_ppt_v1_information *)(hwmgr->pptable);
424 struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
425 struct pp_advance_fan_control_parameters *fan_table=
426 &hwmgr->thermal_controller.advanceFanControlParameters;
427 int i, j, k;
428 const uint16_t *pdef1;
429 const uint16_t *pdef2;
430
431 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
432 dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
433
434 PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
435 "Target Operating Temp is out of Range!",
436 );
437
438 dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
439 cac_dtp_table->usTargetOperatingTemp * 256);
440 dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
441 cac_dtp_table->usTemperatureLimitHotspot * 256);
442 dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
443 scale_fan_gain_settings(fan_table->usFanGainEdge));
444 dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
445 scale_fan_gain_settings(fan_table->usFanGainHotspot));
446
447 pdef1 = defaults->BAPMTI_R;
448 pdef2 = defaults->BAPMTI_RC;
449
450 for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
451 for (j = 0; j < SMU74_DTE_SOURCES; j++) {
452 for (k = 0; k < SMU74_DTE_SINKS; k++) {
453 dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
454 dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
455 pdef1++;
456 pdef2++;
457 }
458 }
459 }
460
461 return 0;
462}
463
464static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
465{
466 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
467 const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
468
469 data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
470 data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
471 data->power_tune_table.SviLoadLineTrimVddC = 3;
472 data->power_tune_table.SviLoadLineOffsetVddC = 0;
473
474 return 0;
475}
476 379
477static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr) 380static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
478{
479 uint16_t tdc_limit;
480 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
481 struct phm_ppt_v1_information *table_info =
482 (struct phm_ppt_v1_information *)(hwmgr->pptable);
483 const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
484
485 tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
486 data->power_tune_table.TDC_VDDC_PkgLimit =
487 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
488 data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
489 defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
490 data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
491
492 return 0;
493}
494
495static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
496{
497 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
498 const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
499 uint32_t temp;
500
501 if (polaris10_read_smc_sram_dword(hwmgr->smumgr,
502 fuse_table_offset +
503 offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
504 (uint32_t *)&temp, data->sram_end))
505 PP_ASSERT_WITH_CODE(false,
506 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
507 return -EINVAL);
508 else {
509 data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
510 data->power_tune_table.LPMLTemperatureMin =
511 (uint8_t)((temp >> 16) & 0xff);
512 data->power_tune_table.LPMLTemperatureMax =
513 (uint8_t)((temp >> 8) & 0xff);
514 data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
515 }
516 return 0;
517}
518
519static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
520{
521 int i;
522 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
523
524 /* Currently not used. Set all to zero. */
525 for (i = 0; i < 16; i++)
526 data->power_tune_table.LPMLTemperatureScaler[i] = 0;
527
528 return 0;
529}
530
531static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
532{
533 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
534
535 if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
536 || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
537 hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
538 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
539
540 data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
541 hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
542 return 0;
543}
544
545static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
546{
547 int i;
548 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
549
550 /* Currently not used. Set all to zero. */
551 for (i = 0; i < 16; i++)
552 data->power_tune_table.GnbLPML[i] = 0;
553
554 return 0;
555}
556
557static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
558{
559 return 0;
560}
561
562static int polaris10_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
563{ 381{
564 382
565 uint32_t en = enable ? 1 : 0; 383 uint32_t en = enable ? 1 : 0;
@@ -608,29 +426,29 @@ static int polaris10_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
608 return result; 426 return result;
609} 427}
610 428
611static int polaris10_program_pt_config_registers(struct pp_hwmgr *hwmgr, 429static int smu7_program_pt_config_registers(struct pp_hwmgr *hwmgr,
612 struct polaris10_pt_config_reg *cac_config_regs) 430 struct gpu_pt_config_reg *cac_config_regs)
613{ 431{
614 struct polaris10_pt_config_reg *config_regs = cac_config_regs; 432 struct gpu_pt_config_reg *config_regs = cac_config_regs;
615 uint32_t cache = 0; 433 uint32_t cache = 0;
616 uint32_t data = 0; 434 uint32_t data = 0;
617 435
618 PP_ASSERT_WITH_CODE((config_regs != NULL), "Invalid config register table.", return -EINVAL); 436 PP_ASSERT_WITH_CODE((config_regs != NULL), "Invalid config register table.", return -EINVAL);
619 437
620 while (config_regs->offset != 0xFFFFFFFF) { 438 while (config_regs->offset != 0xFFFFFFFF) {
621 if (config_regs->type == POLARIS10_CONFIGREG_CACHE) 439 if (config_regs->type == GPU_CONFIGREG_CACHE)
622 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); 440 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
623 else { 441 else {
624 switch (config_regs->type) { 442 switch (config_regs->type) {
625 case POLARIS10_CONFIGREG_SMC_IND: 443 case GPU_CONFIGREG_SMC_IND:
626 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset); 444 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset);
627 break; 445 break;
628 446
629 case POLARIS10_CONFIGREG_DIDT_IND: 447 case GPU_CONFIGREG_DIDT_IND:
630 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset); 448 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset);
631 break; 449 break;
632 450
633 case POLARIS10_CONFIGREG_GC_CAC_IND: 451 case GPU_CONFIGREG_GC_CAC_IND:
634 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset); 452 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset);
635 break; 453 break;
636 454
@@ -644,15 +462,15 @@ static int polaris10_program_pt_config_registers(struct pp_hwmgr *hwmgr,
644 data |= cache; 462 data |= cache;
645 463
646 switch (config_regs->type) { 464 switch (config_regs->type) {
647 case POLARIS10_CONFIGREG_SMC_IND: 465 case GPU_CONFIGREG_SMC_IND:
648 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset, data); 466 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset, data);
649 break; 467 break;
650 468
651 case POLARIS10_CONFIGREG_DIDT_IND: 469 case GPU_CONFIGREG_DIDT_IND:
652 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data); 470 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data);
653 break; 471 break;
654 472
655 case POLARIS10_CONFIGREG_GC_CAC_IND: 473 case GPU_CONFIGREG_GC_CAC_IND:
656 cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data); 474 cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data);
657 break; 475 break;
658 476
@@ -669,7 +487,7 @@ static int polaris10_program_pt_config_registers(struct pp_hwmgr *hwmgr,
669 return 0; 487 return 0;
670} 488}
671 489
672int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr) 490int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
673{ 491{
674 int result; 492 int result;
675 uint32_t num_se = 0; 493 uint32_t num_se = 0;
@@ -699,20 +517,20 @@ int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr)
699 cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value); 517 cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value);
700 518
701 if (hwmgr->chip_id == CHIP_POLARIS10) { 519 if (hwmgr->chip_id == CHIP_POLARIS10) {
702 result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris10); 520 result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris10);
703 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); 521 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
704 result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10); 522 result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10);
705 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); 523 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
706 } else if (hwmgr->chip_id == CHIP_POLARIS11) { 524 } else if (hwmgr->chip_id == CHIP_POLARIS11) {
707 result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11); 525 result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
708 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); 526 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
709 result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11); 527 result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
710 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); 528 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
711 } 529 }
712 } 530 }
713 cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2); 531 cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2);
714 532
715 result = polaris10_enable_didt(hwmgr, true); 533 result = smu7_enable_didt(hwmgr, true);
716 PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result); 534 PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result);
717 535
718 /* TO DO Post DIDT enable clock gating */ 536 /* TO DO Post DIDT enable clock gating */
@@ -721,7 +539,7 @@ int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr)
721 return 0; 539 return 0;
722} 540}
723 541
724int polaris10_disable_didt_config(struct pp_hwmgr *hwmgr) 542int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
725{ 543{
726 int result; 544 int result;
727 545
@@ -731,7 +549,7 @@ int polaris10_disable_didt_config(struct pp_hwmgr *hwmgr)
731 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { 549 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
732 /* TO DO Pre DIDT disable clock gating */ 550 /* TO DO Pre DIDT disable clock gating */
733 551
734 result = polaris10_enable_didt(hwmgr, false); 552 result = smu7_enable_didt(hwmgr, false);
735 PP_ASSERT_WITH_CODE((result == 0), "Post DIDT enable clock gating failed.", return result); 553 PP_ASSERT_WITH_CODE((result == 0), "Post DIDT enable clock gating failed.", return result);
736 /* TO DO Post DIDT enable clock gating */ 554 /* TO DO Post DIDT enable clock gating */
737 } 555 }
@@ -739,95 +557,9 @@ int polaris10_disable_didt_config(struct pp_hwmgr *hwmgr)
739 return 0; 557 return 0;
740} 558}
741 559
742 560int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr)
743static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
744{ 561{
745 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 562 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
746 struct phm_ppt_v1_information *table_info =
747 (struct phm_ppt_v1_information *)(hwmgr->pptable);
748 uint16_t hi_sidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
749 uint16_t lo_sidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
750 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
751
752 hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
753 lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
754
755 data->power_tune_table.BapmVddCBaseLeakageHiSidd =
756 CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
757 data->power_tune_table.BapmVddCBaseLeakageLoSidd =
758 CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
759
760 return 0;
761}
762
763int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
764{
765 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
766 uint32_t pm_fuse_table_offset;
767
768 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
769 PHM_PlatformCaps_PowerContainment)) {
770 if (polaris10_read_smc_sram_dword(hwmgr->smumgr,
771 SMU7_FIRMWARE_HEADER_LOCATION +
772 offsetof(SMU74_Firmware_Header, PmFuseTable),
773 &pm_fuse_table_offset, data->sram_end))
774 PP_ASSERT_WITH_CODE(false,
775 "Attempt to get pm_fuse_table_offset Failed!",
776 return -EINVAL);
777
778 if (polaris10_populate_svi_load_line(hwmgr))
779 PP_ASSERT_WITH_CODE(false,
780 "Attempt to populate SviLoadLine Failed!",
781 return -EINVAL);
782
783 if (polaris10_populate_tdc_limit(hwmgr))
784 PP_ASSERT_WITH_CODE(false,
785 "Attempt to populate TDCLimit Failed!", return -EINVAL);
786
787 if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
788 PP_ASSERT_WITH_CODE(false,
789 "Attempt to populate TdcWaterfallCtl, "
790 "LPMLTemperature Min and Max Failed!",
791 return -EINVAL);
792
793 if (0 != polaris10_populate_temperature_scaler(hwmgr))
794 PP_ASSERT_WITH_CODE(false,
795 "Attempt to populate LPMLTemperatureScaler Failed!",
796 return -EINVAL);
797
798 if (polaris10_populate_fuzzy_fan(hwmgr))
799 PP_ASSERT_WITH_CODE(false,
800 "Attempt to populate Fuzzy Fan Control parameters Failed!",
801 return -EINVAL);
802
803 if (polaris10_populate_gnb_lpml(hwmgr))
804 PP_ASSERT_WITH_CODE(false,
805 "Attempt to populate GnbLPML Failed!",
806 return -EINVAL);
807
808 if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
809 PP_ASSERT_WITH_CODE(false,
810 "Attempt to populate GnbLPML Min and Max Vid Failed!",
811 return -EINVAL);
812
813 if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
814 PP_ASSERT_WITH_CODE(false,
815 "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
816 "Sidd Failed!", return -EINVAL);
817
818 if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
819 (uint8_t *)&data->power_tune_table,
820 (sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end))
821 PP_ASSERT_WITH_CODE(false,
822 "Attempt to download PmFuseTable Failed!",
823 return -EINVAL);
824 }
825 return 0;
826}
827
828int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr)
829{
830 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
831 int result = 0; 563 int result = 0;
832 564
833 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 565 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -843,9 +575,9 @@ int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr)
843 return result; 575 return result;
844} 576}
845 577
846int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr) 578int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr)
847{ 579{
848 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 580 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
849 int result = 0; 581 int result = 0;
850 582
851 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 583 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -860,9 +592,9 @@ int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr)
860 return result; 592 return result;
861} 593}
862 594
863int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) 595int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
864{ 596{
865 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 597 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
866 598
867 if (data->power_containment_features & 599 if (data->power_containment_features &
868 POWERCONTAINMENT_FEATURE_PkgPwrLimit) 600 POWERCONTAINMENT_FEATURE_PkgPwrLimit)
@@ -871,21 +603,27 @@ int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
871 return 0; 603 return 0;
872} 604}
873 605
874static int polaris10_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp) 606static int smu7_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
875{ 607{
876 return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr, 608 return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
877 PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); 609 PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
878} 610}
879 611
880int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr) 612int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
881{ 613{
882 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 614 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
883 struct phm_ppt_v1_information *table_info = 615 struct phm_ppt_v1_information *table_info =
884 (struct phm_ppt_v1_information *)(hwmgr->pptable); 616 (struct phm_ppt_v1_information *)(hwmgr->pptable);
885 int smc_result; 617 int smc_result;
886 int result = 0; 618 int result = 0;
619 struct phm_cac_tdp_table *cac_table;
887 620
888 data->power_containment_features = 0; 621 data->power_containment_features = 0;
622 if (hwmgr->pp_table_version == PP_TABLE_V1)
623 cac_table = table_info->cac_dtp_table;
624 else
625 cac_table = hwmgr->dyn_state.cac_dtp_table;
626
889 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 627 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
890 PHM_PlatformCaps_PowerContainment)) { 628 PHM_PlatformCaps_PowerContainment)) {
891 629
@@ -905,15 +643,13 @@ int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr)
905 PP_ASSERT_WITH_CODE((0 == smc_result), 643 PP_ASSERT_WITH_CODE((0 == smc_result),
906 "Failed to enable PkgPwrTracking in SMC.", result = -1;); 644 "Failed to enable PkgPwrTracking in SMC.", result = -1;);
907 if (0 == smc_result) { 645 if (0 == smc_result) {
908 struct phm_cac_tdp_table *cac_table =
909 table_info->cac_dtp_table;
910 uint32_t default_limit = 646 uint32_t default_limit =
911 (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256); 647 (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
912 648
913 data->power_containment_features |= 649 data->power_containment_features |=
914 POWERCONTAINMENT_FEATURE_PkgPwrLimit; 650 POWERCONTAINMENT_FEATURE_PkgPwrLimit;
915 651
916 if (polaris10_set_power_limit(hwmgr, default_limit)) 652 if (smu7_set_power_limit(hwmgr, default_limit))
917 printk(KERN_ERR "Failed to set Default Power Limit in SMC!"); 653 printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
918 } 654 }
919 } 655 }
@@ -921,9 +657,9 @@ int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr)
921 return result; 657 return result;
922} 658}
923 659
924int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr) 660int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
925{ 661{
926 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 662 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
927 int result = 0; 663 int result = 0;
928 664
929 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 665 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -963,14 +699,19 @@ int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr)
963 return result; 699 return result;
964} 700}
965 701
966int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr) 702int smu7_power_control_set_level(struct pp_hwmgr *hwmgr)
967{ 703{
968 struct phm_ppt_v1_information *table_info = 704 struct phm_ppt_v1_information *table_info =
969 (struct phm_ppt_v1_information *)(hwmgr->pptable); 705 (struct phm_ppt_v1_information *)(hwmgr->pptable);
970 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; 706 struct phm_cac_tdp_table *cac_table;
707
971 int adjust_percent, target_tdp; 708 int adjust_percent, target_tdp;
972 int result = 0; 709 int result = 0;
973 710
711 if (hwmgr->pp_table_version == PP_TABLE_V1)
712 cac_table = table_info->cac_dtp_table;
713 else
714 cac_table = hwmgr->dyn_state.cac_dtp_table;
974 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 715 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
975 PHM_PlatformCaps_PowerContainment)) { 716 PHM_PlatformCaps_PowerContainment)) {
976 /* adjustment percentage has already been validated */ 717 /* adjustment percentage has already been validated */
@@ -981,7 +722,7 @@ int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr)
981 * but message to be 8 bit fraction for messages 722 * but message to be 8 bit fraction for messages
982 */ 723 */
983 target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100; 724 target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
984 result = polaris10_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp); 725 result = smu7_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
985 } 726 }
986 727
987 return result; 728 return result;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h
index 329119d6cc71..22f86b6bf1be 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h
@@ -20,17 +20,8 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 */ 22 */
23#ifndef POLARIS10_POWERTUNE_H 23#ifndef _SMU7_POWERTUNE_H
24#define POLARIS10_POWERTUNE_H 24#define _SMU7_POWERTUNE_H
25
26enum polaris10_pt_config_reg_type {
27 POLARIS10_CONFIGREG_MMR = 0,
28 POLARIS10_CONFIGREG_SMC_IND,
29 POLARIS10_CONFIGREG_DIDT_IND,
30 POLARIS10_CONFIGREG_GC_CAC_IND,
31 POLARIS10_CONFIGREG_CACHE,
32 POLARIS10_CONFIGREG_MAX
33};
34 25
35#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xfffc0000 26#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xfffc0000
36#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x12 27#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x12
@@ -52,30 +43,20 @@ enum polaris10_pt_config_reg_type {
52 43
53#define ixGC_CAC_CNTL 0x0000 44#define ixGC_CAC_CNTL 0x0000
54#define ixDIDT_SQ_STALL_CTRL 0x0004 45#define ixDIDT_SQ_STALL_CTRL 0x0004
55#define ixDIDT_SQ_TUNING_CTRL 0x0005 46#define ixDIDT_SQ_TUNING_CTRL 0x0005
56#define ixDIDT_TD_STALL_CTRL 0x0044 47#define ixDIDT_TD_STALL_CTRL 0x0044
57#define ixDIDT_TD_TUNING_CTRL 0x0045 48#define ixDIDT_TD_TUNING_CTRL 0x0045
58#define ixDIDT_TCP_STALL_CTRL 0x0064 49#define ixDIDT_TCP_STALL_CTRL 0x0064
59#define ixDIDT_TCP_TUNING_CTRL 0x0065 50#define ixDIDT_TCP_TUNING_CTRL 0x0065
60 51
61struct polaris10_pt_config_reg {
62 uint32_t offset;
63 uint32_t mask;
64 uint32_t shift;
65 uint32_t value;
66 enum polaris10_pt_config_reg_type type;
67};
68
69 52
70void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr); 53int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr);
71int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); 54int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr);
72int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr); 55int smu7_enable_power_containment(struct pp_hwmgr *hwmgr);
73int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr); 56int smu7_disable_power_containment(struct pp_hwmgr *hwmgr);
74int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr); 57int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
75int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr); 58int smu7_power_control_set_level(struct pp_hwmgr *hwmgr);
76int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr); 59int smu7_enable_didt_config(struct pp_hwmgr *hwmgr);
77int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); 60int smu7_disable_didt_config(struct pp_hwmgr *hwmgr);
78int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr); 61#endif /* DGPU_POWERTUNE_H */
79int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr);
80#endif /* POLARIS10_POWERTUNE_H */
81 62
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
index 7f431e762262..fb6c6f6106d5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2015 Advanced Micro Devices, Inc. 2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -20,18 +20,15 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 */ 22 */
23
23#include <asm/div64.h> 24#include <asm/div64.h>
24#include "fiji_thermal.h" 25#include "smu7_thermal.h"
25#include "fiji_hwmgr.h" 26#include "smu7_hwmgr.h"
26#include "fiji_smumgr.h" 27#include "smu7_common.h"
27#include "fiji_ppsmc.h" 28
28#include "smu/smu_7_1_3_d.h" 29int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
29#include "smu/smu_7_1_3_sh_mask.h"
30
31int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
32 struct phm_fan_speed_info *fan_speed_info) 30 struct phm_fan_speed_info *fan_speed_info)
33{ 31{
34
35 if (hwmgr->thermal_controller.fanInfo.bNoFan) 32 if (hwmgr->thermal_controller.fanInfo.bNoFan)
36 return 0; 33 return 0;
37 34
@@ -55,7 +52,7 @@ int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
55 return 0; 52 return 0;
56} 53}
57 54
58int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, 55int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
59 uint32_t *speed) 56 uint32_t *speed)
60{ 57{
61 uint32_t duty100; 58 uint32_t duty100;
@@ -84,7 +81,7 @@ int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
84 return 0; 81 return 0;
85} 82}
86 83
87int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) 84int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
88{ 85{
89 uint32_t tach_period; 86 uint32_t tach_period;
90 uint32_t crystal_clock_freq; 87 uint32_t crystal_clock_freq;
@@ -100,9 +97,9 @@ int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
100 if (tach_period == 0) 97 if (tach_period == 0)
101 return -EINVAL; 98 return -EINVAL;
102 99
103 crystal_clock_freq = tonga_get_xclk(hwmgr); 100 crystal_clock_freq = smu7_get_xclk(hwmgr);
104 101
105 *speed = 60 * crystal_clock_freq * 10000/ tach_period; 102 *speed = 60 * crystal_clock_freq * 10000 / tach_period;
106 103
107 return 0; 104 return 0;
108} 105}
@@ -113,7 +110,7 @@ int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
113* mode the fan control mode, 0 default, 1 by percent, 5, by RPM 110* mode the fan control mode, 0 default, 1 by percent, 5, by RPM
114* @exception Should always succeed. 111* @exception Should always succeed.
115*/ 112*/
116int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) 113int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
117{ 114{
118 115
119 if (hwmgr->fan_ctrl_is_in_default_mode) { 116 if (hwmgr->fan_ctrl_is_in_default_mode) {
@@ -139,7 +136,7 @@ int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
139* @param hwmgr the address of the powerplay hardware manager. 136* @param hwmgr the address of the powerplay hardware manager.
140* @exception Should always succeed. 137* @exception Should always succeed.
141*/ 138*/
142int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr) 139int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
143{ 140{
144 if (!hwmgr->fan_ctrl_is_in_default_mode) { 141 if (!hwmgr->fan_ctrl_is_in_default_mode) {
145 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 142 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@@ -152,7 +149,7 @@ int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
152 return 0; 149 return 0;
153} 150}
154 151
155static int fiji_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) 152static int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
156{ 153{
157 int result; 154 int result;
158 155
@@ -187,7 +184,7 @@ static int fiji_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
187} 184}
188 185
189 186
190int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) 187int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
191{ 188{
192 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl); 189 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl);
193} 190}
@@ -198,7 +195,7 @@ int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
198* @param speed is the percentage value (0% - 100%) to be set. 195* @param speed is the percentage value (0% - 100%) to be set.
199* @exception Fails is the 100% setting appears to be 0. 196* @exception Fails is the 100% setting appears to be 0.
200*/ 197*/
201int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, 198int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
202 uint32_t speed) 199 uint32_t speed)
203{ 200{
204 uint32_t duty100; 201 uint32_t duty100;
@@ -213,7 +210,7 @@ int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
213 210
214 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 211 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
215 PHM_PlatformCaps_MicrocodeFanControl)) 212 PHM_PlatformCaps_MicrocodeFanControl))
216 fiji_fan_ctrl_stop_smc_fan_control(hwmgr); 213 smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
217 214
218 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 215 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
219 CG_FDO_CTRL1, FMAX_DUTY100); 216 CG_FDO_CTRL1, FMAX_DUTY100);
@@ -228,7 +225,7 @@ int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
228 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 225 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
229 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty); 226 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
230 227
231 return fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); 228 return smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
232} 229}
233 230
234/** 231/**
@@ -236,7 +233,7 @@ int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
236* @param hwmgr the address of the powerplay hardware manager. 233* @param hwmgr the address of the powerplay hardware manager.
237* @exception Always succeeds. 234* @exception Always succeeds.
238*/ 235*/
239int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) 236int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
240{ 237{
241 int result; 238 int result;
242 239
@@ -245,11 +242,11 @@ int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
245 242
246 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 243 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
247 PHM_PlatformCaps_MicrocodeFanControl)) { 244 PHM_PlatformCaps_MicrocodeFanControl)) {
248 result = fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); 245 result = smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
249 if (!result) 246 if (!result)
250 result = fiji_fan_ctrl_start_smc_fan_control(hwmgr); 247 result = smu7_fan_ctrl_start_smc_fan_control(hwmgr);
251 } else 248 } else
252 result = fiji_fan_ctrl_set_default_mode(hwmgr); 249 result = smu7_fan_ctrl_set_default_mode(hwmgr);
253 250
254 return result; 251 return result;
255} 252}
@@ -260,7 +257,7 @@ int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
260* @param speed is the percentage value (min - max) to be set. 257* @param speed is the percentage value (min - max) to be set.
261* @exception Fails is the speed not lie between min and max. 258* @exception Fails is the speed not lie between min and max.
262*/ 259*/
263int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) 260int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
264{ 261{
265 uint32_t tach_period; 262 uint32_t tach_period;
266 uint32_t crystal_clock_freq; 263 uint32_t crystal_clock_freq;
@@ -272,14 +269,18 @@ int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
272 (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) 269 (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
273 return 0; 270 return 0;
274 271
275 crystal_clock_freq = tonga_get_xclk(hwmgr); 272 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
273 PHM_PlatformCaps_MicrocodeFanControl))
274 smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
275
276 crystal_clock_freq = smu7_get_xclk(hwmgr);
276 277
277 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); 278 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
278 279
279 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 280 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
280 CG_TACH_STATUS, TACH_PERIOD, tach_period); 281 CG_TACH_STATUS, TACH_PERIOD, tach_period);
281 282
282 return fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); 283 return smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
283} 284}
284 285
285/** 286/**
@@ -287,7 +288,7 @@ int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
287* 288*
288* @param hwmgr The address of the hardware manager. 289* @param hwmgr The address of the hardware manager.
289*/ 290*/
290int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr) 291int smu7_thermal_get_temperature(struct pp_hwmgr *hwmgr)
291{ 292{
292 int temp; 293 int temp;
293 294
@@ -296,7 +297,7 @@ int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr)
296 297
297 /* Bit 9 means the reading is lower than the lowest usable value. */ 298 /* Bit 9 means the reading is lower than the lowest usable value. */
298 if (temp & 0x200) 299 if (temp & 0x200)
299 temp = FIJI_THERMAL_MAXIMUM_TEMP_READING; 300 temp = SMU7_THERMAL_MAXIMUM_TEMP_READING;
300 else 301 else
301 temp = temp & 0x1ff; 302 temp = temp & 0x1ff;
302 303
@@ -312,12 +313,12 @@ int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr)
312* @param range Temperature range to be programmed for high and low alert signals 313* @param range Temperature range to be programmed for high and low alert signals
313* @exception PP_Result_BadInput if the input data is not valid. 314* @exception PP_Result_BadInput if the input data is not valid.
314*/ 315*/
315static int fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, 316static int smu7_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
316 uint32_t low_temp, uint32_t high_temp) 317 uint32_t low_temp, uint32_t high_temp)
317{ 318{
318 uint32_t low = FIJI_THERMAL_MINIMUM_ALERT_TEMP * 319 uint32_t low = SMU7_THERMAL_MINIMUM_ALERT_TEMP *
319 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 320 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
320 uint32_t high = FIJI_THERMAL_MAXIMUM_ALERT_TEMP * 321 uint32_t high = SMU7_THERMAL_MAXIMUM_ALERT_TEMP *
321 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 322 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
322 323
323 if (low < low_temp) 324 if (low < low_temp)
@@ -346,7 +347,7 @@ static int fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
346* 347*
347* @param hwmgr The address of the hardware manager. 348* @param hwmgr The address of the hardware manager.
348*/ 349*/
349static int fiji_thermal_initialize(struct pp_hwmgr *hwmgr) 350static int smu7_thermal_initialize(struct pp_hwmgr *hwmgr)
350{ 351{
351 if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) 352 if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
352 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 353 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@@ -365,13 +366,13 @@ static int fiji_thermal_initialize(struct pp_hwmgr *hwmgr)
365* 366*
366* @param hwmgr The address of the hardware manager. 367* @param hwmgr The address of the hardware manager.
367*/ 368*/
368static int fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr) 369int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr)
369{ 370{
370 uint32_t alert; 371 uint32_t alert;
371 372
372 alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 373 alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
373 CG_THERMAL_INT, THERM_INT_MASK); 374 CG_THERMAL_INT, THERM_INT_MASK);
374 alert &= ~(FIJI_THERMAL_HIGH_ALERT_MASK | FIJI_THERMAL_LOW_ALERT_MASK); 375 alert &= ~(SMU7_THERMAL_HIGH_ALERT_MASK | SMU7_THERMAL_LOW_ALERT_MASK);
375 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 376 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
376 CG_THERMAL_INT, THERM_INT_MASK, alert); 377 CG_THERMAL_INT, THERM_INT_MASK, alert);
377 378
@@ -383,13 +384,13 @@ static int fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr)
383* Disable thermal alerts on the RV770 thermal controller. 384* Disable thermal alerts on the RV770 thermal controller.
384* @param hwmgr The address of the hardware manager. 385* @param hwmgr The address of the hardware manager.
385*/ 386*/
386static int fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr) 387int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr)
387{ 388{
388 uint32_t alert; 389 uint32_t alert;
389 390
390 alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 391 alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
391 CG_THERMAL_INT, THERM_INT_MASK); 392 CG_THERMAL_INT, THERM_INT_MASK);
392 alert |= (FIJI_THERMAL_HIGH_ALERT_MASK | FIJI_THERMAL_LOW_ALERT_MASK); 393 alert |= (SMU7_THERMAL_HIGH_ALERT_MASK | SMU7_THERMAL_LOW_ALERT_MASK);
393 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 394 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
394 CG_THERMAL_INT, THERM_INT_MASK, alert); 395 CG_THERMAL_INT, THERM_INT_MASK, alert);
395 396
@@ -402,129 +403,17 @@ static int fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr)
402* Currently just disables alerts. 403* Currently just disables alerts.
403* @param hwmgr The address of the hardware manager. 404* @param hwmgr The address of the hardware manager.
404*/ 405*/
405int fiji_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr) 406int smu7_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
406{ 407{
407 int result = fiji_thermal_disable_alert(hwmgr); 408 int result = smu7_thermal_disable_alert(hwmgr);
408 409
409 if (hwmgr->thermal_controller.fanInfo.bNoFan) 410 if (!hwmgr->thermal_controller.fanInfo.bNoFan)
410 fiji_fan_ctrl_set_default_mode(hwmgr); 411 smu7_fan_ctrl_set_default_mode(hwmgr);
411 412
412 return result; 413 return result;
413} 414}
414 415
415/** 416/**
416* Set up the fan table to control the fan using the SMC.
417* @param hwmgr the address of the powerplay hardware manager.
418* @param pInput the pointer to input data
419* @param pOutput the pointer to output data
420* @param pStorage the pointer to temporary storage
421* @param Result the last failure code
422* @return result from set temperature range routine
423*/
424static int tf_fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
425 void *input, void *output, void *storage, int result)
426{
427 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
428 SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
429 uint32_t duty100;
430 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
431 uint16_t fdo_min, slope1, slope2;
432 uint32_t reference_clock;
433 int res;
434 uint64_t tmp64;
435
436 if (data->fan_table_start == 0) {
437 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
438 PHM_PlatformCaps_MicrocodeFanControl);
439 return 0;
440 }
441
442 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
443 CG_FDO_CTRL1, FMAX_DUTY100);
444
445 if (duty100 == 0) {
446 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
447 PHM_PlatformCaps_MicrocodeFanControl);
448 return 0;
449 }
450
451 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
452 usPWMMin * duty100;
453 do_div(tmp64, 10000);
454 fdo_min = (uint16_t)tmp64;
455
456 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
457 hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
458 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
459 hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
460
461 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
462 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
463 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
464 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
465
466 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
467 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
468
469 fan_table.TempMin = cpu_to_be16((50 + hwmgr->
470 thermal_controller.advanceFanControlParameters.usTMin) / 100);
471 fan_table.TempMed = cpu_to_be16((50 + hwmgr->
472 thermal_controller.advanceFanControlParameters.usTMed) / 100);
473 fan_table.TempMax = cpu_to_be16((50 + hwmgr->
474 thermal_controller.advanceFanControlParameters.usTMax) / 100);
475
476 fan_table.Slope1 = cpu_to_be16(slope1);
477 fan_table.Slope2 = cpu_to_be16(slope2);
478
479 fan_table.FdoMin = cpu_to_be16(fdo_min);
480
481 fan_table.HystDown = cpu_to_be16(hwmgr->
482 thermal_controller.advanceFanControlParameters.ucTHyst);
483
484 fan_table.HystUp = cpu_to_be16(1);
485
486 fan_table.HystSlope = cpu_to_be16(1);
487
488 fan_table.TempRespLim = cpu_to_be16(5);
489
490 reference_clock = tonga_get_xclk(hwmgr);
491
492 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
493 thermal_controller.advanceFanControlParameters.ulCycleDelay *
494 reference_clock) / 1600);
495
496 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
497
498 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
499 hwmgr->device, CGS_IND_REG__SMC,
500 CG_MULT_THERMAL_CTRL, TEMP_SEL);
501
502 res = fiji_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start,
503 (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
504 data->sram_end);
505
506 if (!res && hwmgr->thermal_controller.
507 advanceFanControlParameters.ucMinimumPWMLimit)
508 res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
509 PPSMC_MSG_SetFanMinPwm,
510 hwmgr->thermal_controller.
511 advanceFanControlParameters.ucMinimumPWMLimit);
512
513 if (!res && hwmgr->thermal_controller.
514 advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
515 res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
516 PPSMC_MSG_SetFanSclkTarget,
517 hwmgr->thermal_controller.
518 advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
519
520 if (res)
521 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
522 PHM_PlatformCaps_MicrocodeFanControl);
523
524 return 0;
525}
526
527/**
528* Start the fan control on the SMC. 417* Start the fan control on the SMC.
529* @param hwmgr the address of the powerplay hardware manager. 418* @param hwmgr the address of the powerplay hardware manager.
530* @param pInput the pointer to input data 419* @param pInput the pointer to input data
@@ -533,7 +422,7 @@ static int tf_fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
533* @param Result the last failure code 422* @param Result the last failure code
534* @return result from set temperature range routine 423* @return result from set temperature range routine
535*/ 424*/
536static int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, 425static int tf_smu7_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
537 void *input, void *output, void *storage, int result) 426 void *input, void *output, void *storage, int result)
538{ 427{
539/* If the fantable setup has failed we could have disabled 428/* If the fantable setup has failed we could have disabled
@@ -543,8 +432,8 @@ static int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
543*/ 432*/
544 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 433 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
545 PHM_PlatformCaps_MicrocodeFanControl)) { 434 PHM_PlatformCaps_MicrocodeFanControl)) {
546 fiji_fan_ctrl_start_smc_fan_control(hwmgr); 435 smu7_fan_ctrl_start_smc_fan_control(hwmgr);
547 fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); 436 smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
548 } 437 }
549 438
550 return 0; 439 return 0;
@@ -559,7 +448,7 @@ static int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
559* @param Result the last failure code 448* @param Result the last failure code
560* @return result from set temperature range routine 449* @return result from set temperature range routine
561*/ 450*/
562int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, 451static int tf_smu7_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
563 void *input, void *output, void *storage, int result) 452 void *input, void *output, void *storage, int result)
564{ 453{
565 struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input; 454 struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
@@ -567,7 +456,7 @@ int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
567 if (range == NULL) 456 if (range == NULL)
568 return -EINVAL; 457 return -EINVAL;
569 458
570 return fiji_thermal_set_temperature_range(hwmgr, range->min, range->max); 459 return smu7_thermal_set_temperature_range(hwmgr, range->min, range->max);
571} 460}
572 461
573/** 462/**
@@ -579,10 +468,10 @@ int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
579* @param Result the last failure code 468* @param Result the last failure code
580* @return result from initialize thermal controller routine 469* @return result from initialize thermal controller routine
581*/ 470*/
582int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr, 471static int tf_smu7_thermal_initialize(struct pp_hwmgr *hwmgr,
583 void *input, void *output, void *storage, int result) 472 void *input, void *output, void *storage, int result)
584{ 473{
585 return fiji_thermal_initialize(hwmgr); 474 return smu7_thermal_initialize(hwmgr);
586} 475}
587 476
588/** 477/**
@@ -594,10 +483,10 @@ int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr,
594* @param Result the last failure code 483* @param Result the last failure code
595* @return result from enable alert routine 484* @return result from enable alert routine
596*/ 485*/
597int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr, 486static int tf_smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr,
598 void *input, void *output, void *storage, int result) 487 void *input, void *output, void *storage, int result)
599{ 488{
600 return fiji_thermal_enable_alert(hwmgr); 489 return smu7_thermal_enable_alert(hwmgr);
601} 490}
602 491
603/** 492/**
@@ -609,53 +498,54 @@ int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr,
609* @param Result the last failure code 498* @param Result the last failure code
610* @return result from disable alert routine 499* @return result from disable alert routine
611*/ 500*/
612static int tf_fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr, 501static int tf_smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr,
613 void *input, void *output, void *storage, int result) 502 void *input, void *output, void *storage, int result)
614{ 503{
615 return fiji_thermal_disable_alert(hwmgr); 504 return smu7_thermal_disable_alert(hwmgr);
616} 505}
617 506
618static const struct phm_master_table_item 507static const struct phm_master_table_item
619fiji_thermal_start_thermal_controller_master_list[] = { 508phm_thermal_start_thermal_controller_master_list[] = {
620 {NULL, tf_fiji_thermal_initialize}, 509 {NULL, tf_smu7_thermal_initialize},
621 {NULL, tf_fiji_thermal_set_temperature_range}, 510 {NULL, tf_smu7_thermal_set_temperature_range},
622 {NULL, tf_fiji_thermal_enable_alert}, 511 {NULL, tf_smu7_thermal_enable_alert},
512 {NULL, smum_thermal_avfs_enable},
623/* We should restrict performance levels to low before we halt the SMC. 513/* We should restrict performance levels to low before we halt the SMC.
624 * On the other hand we are still in boot state when we do this 514 * On the other hand we are still in boot state when we do this
625 * so it would be pointless. 515 * so it would be pointless.
626 * If this assumption changes we have to revisit this table. 516 * If this assumption changes we have to revisit this table.
627 */ 517 */
628 {NULL, tf_fiji_thermal_setup_fan_table}, 518 {NULL, smum_thermal_setup_fan_table},
629 {NULL, tf_fiji_thermal_start_smc_fan_control}, 519 {NULL, tf_smu7_thermal_start_smc_fan_control},
630 {NULL, NULL} 520 {NULL, NULL}
631}; 521};
632 522
633static const struct phm_master_table_header 523static const struct phm_master_table_header
634fiji_thermal_start_thermal_controller_master = { 524phm_thermal_start_thermal_controller_master = {
635 0, 525 0,
636 PHM_MasterTableFlag_None, 526 PHM_MasterTableFlag_None,
637 fiji_thermal_start_thermal_controller_master_list 527 phm_thermal_start_thermal_controller_master_list
638}; 528};
639 529
640static const struct phm_master_table_item 530static const struct phm_master_table_item
641fiji_thermal_set_temperature_range_master_list[] = { 531phm_thermal_set_temperature_range_master_list[] = {
642 {NULL, tf_fiji_thermal_disable_alert}, 532 {NULL, tf_smu7_thermal_disable_alert},
643 {NULL, tf_fiji_thermal_set_temperature_range}, 533 {NULL, tf_smu7_thermal_set_temperature_range},
644 {NULL, tf_fiji_thermal_enable_alert}, 534 {NULL, tf_smu7_thermal_enable_alert},
645 {NULL, NULL} 535 {NULL, NULL}
646}; 536};
647 537
648static const struct phm_master_table_header 538static const struct phm_master_table_header
649fiji_thermal_set_temperature_range_master = { 539phm_thermal_set_temperature_range_master = {
650 0, 540 0,
651 PHM_MasterTableFlag_None, 541 PHM_MasterTableFlag_None,
652 fiji_thermal_set_temperature_range_master_list 542 phm_thermal_set_temperature_range_master_list
653}; 543};
654 544
655int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) 545int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
656{ 546{
657 if (!hwmgr->thermal_controller.fanInfo.bNoFan) 547 if (!hwmgr->thermal_controller.fanInfo.bNoFan)
658 fiji_fan_ctrl_set_default_mode(hwmgr); 548 smu7_fan_ctrl_set_default_mode(hwmgr);
659 return 0; 549 return 0;
660} 550}
661 551
@@ -664,17 +554,17 @@ int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
664* @param hwmgr The address of the hardware manager. 554* @param hwmgr The address of the hardware manager.
665* @exception Any error code from the low-level communication. 555* @exception Any error code from the low-level communication.
666*/ 556*/
667int pp_fiji_thermal_initialize(struct pp_hwmgr *hwmgr) 557int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr)
668{ 558{
669 int result; 559 int result;
670 560
671 result = phm_construct_table(hwmgr, 561 result = phm_construct_table(hwmgr,
672 &fiji_thermal_set_temperature_range_master, 562 &phm_thermal_set_temperature_range_master,
673 &(hwmgr->set_temperature_range)); 563 &(hwmgr->set_temperature_range));
674 564
675 if (!result) { 565 if (!result) {
676 result = phm_construct_table(hwmgr, 566 result = phm_construct_table(hwmgr,
677 &fiji_thermal_start_thermal_controller_master, 567 &phm_thermal_start_thermal_controller_master,
678 &(hwmgr->start_thermal_controller)); 568 &(hwmgr->start_thermal_controller));
679 if (result) 569 if (result)
680 phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range)); 570 phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h
new file mode 100644
index 000000000000..6face973be43
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h
@@ -0,0 +1,58 @@
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _SMU7_THERMAL_H_
25#define _SMU7_THERMAL_H_
26
27#include "hwmgr.h"
28
29#define SMU7_THERMAL_HIGH_ALERT_MASK 0x1
30#define SMU7_THERMAL_LOW_ALERT_MASK 0x2
31
32#define SMU7_THERMAL_MINIMUM_TEMP_READING -256
33#define SMU7_THERMAL_MAXIMUM_TEMP_READING 255
34
35#define SMU7_THERMAL_MINIMUM_ALERT_TEMP 0
36#define SMU7_THERMAL_MAXIMUM_ALERT_TEMP 255
37
38#define FDO_PWM_MODE_STATIC 1
39#define FDO_PWM_MODE_STATIC_RPM 5
40
41extern int smu7_thermal_get_temperature(struct pp_hwmgr *hwmgr);
42extern int smu7_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
43extern int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
44extern int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
45extern int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
46extern int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
47extern int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
48extern int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
49extern int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr);
50extern int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
51extern int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
52extern int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
53extern int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
54extern int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr);
55extern int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr);
56
57#endif
58
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c
deleted file mode 100644
index e58d038a997b..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c
+++ /dev/null
@@ -1,350 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "hwmgr.h"
25#include "tonga_clockpowergating.h"
26#include "tonga_ppsmc.h"
27#include "tonga_hwmgr.h"
28
29int tonga_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
30{
31 if (phm_cf_want_uvd_power_gating(hwmgr))
32 return smum_send_msg_to_smc(hwmgr->smumgr,
33 PPSMC_MSG_UVDPowerOFF);
34 return 0;
35}
36
37int tonga_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
38{
39 if (phm_cf_want_uvd_power_gating(hwmgr)) {
40 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
41 PHM_PlatformCaps_UVDDynamicPowerGating)) {
42 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
43 PPSMC_MSG_UVDPowerON, 1);
44 } else {
45 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
46 PPSMC_MSG_UVDPowerON, 0);
47 }
48 }
49
50 return 0;
51}
52
53int tonga_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
54{
55 if (phm_cf_want_vce_power_gating(hwmgr))
56 return smum_send_msg_to_smc(hwmgr->smumgr,
57 PPSMC_MSG_VCEPowerOFF);
58 return 0;
59}
60
61int tonga_phm_powerup_vce(struct pp_hwmgr *hwmgr)
62{
63 if (phm_cf_want_vce_power_gating(hwmgr))
64 return smum_send_msg_to_smc(hwmgr->smumgr,
65 PPSMC_MSG_VCEPowerON);
66 return 0;
67}
68
69int tonga_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating)
70{
71 int ret = 0;
72
73 switch (block) {
74 case PHM_AsicBlock_UVD_MVC:
75 case PHM_AsicBlock_UVD:
76 case PHM_AsicBlock_UVD_HD:
77 case PHM_AsicBlock_UVD_SD:
78 if (gating == PHM_ClockGateSetting_StaticOff)
79 ret = tonga_phm_powerdown_uvd(hwmgr);
80 else
81 ret = tonga_phm_powerup_uvd(hwmgr);
82 break;
83 case PHM_AsicBlock_GFX:
84 default:
85 break;
86 }
87
88 return ret;
89}
90
91int tonga_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
92{
93 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
94
95 data->uvd_power_gated = false;
96 data->vce_power_gated = false;
97
98 tonga_phm_powerup_uvd(hwmgr);
99 tonga_phm_powerup_vce(hwmgr);
100
101 return 0;
102}
103
104int tonga_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
105{
106 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
107
108 if (data->uvd_power_gated == bgate)
109 return 0;
110
111 data->uvd_power_gated = bgate;
112
113 if (bgate) {
114 cgs_set_clockgating_state(hwmgr->device,
115 AMD_IP_BLOCK_TYPE_UVD,
116 AMD_CG_STATE_UNGATE);
117 cgs_set_powergating_state(hwmgr->device,
118 AMD_IP_BLOCK_TYPE_UVD,
119 AMD_PG_STATE_GATE);
120 tonga_update_uvd_dpm(hwmgr, true);
121 tonga_phm_powerdown_uvd(hwmgr);
122 } else {
123 tonga_phm_powerup_uvd(hwmgr);
124 cgs_set_powergating_state(hwmgr->device,
125 AMD_IP_BLOCK_TYPE_UVD,
126 AMD_PG_STATE_UNGATE);
127 cgs_set_clockgating_state(hwmgr->device,
128 AMD_IP_BLOCK_TYPE_UVD,
129 AMD_PG_STATE_GATE);
130
131 tonga_update_uvd_dpm(hwmgr, false);
132 }
133
134 return 0;
135}
136
137int tonga_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
138{
139 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
140 struct phm_set_power_state_input states;
141 const struct pp_power_state *pcurrent;
142 struct pp_power_state *requested;
143
144 pcurrent = hwmgr->current_ps;
145 requested = hwmgr->request_ps;
146
147 states.pcurrent_state = &(pcurrent->hardware);
148 states.pnew_state = &(requested->hardware);
149
150 if (phm_cf_want_vce_power_gating(hwmgr)) {
151 if (data->vce_power_gated != bgate) {
152 if (bgate) {
153 cgs_set_clockgating_state(
154 hwmgr->device,
155 AMD_IP_BLOCK_TYPE_VCE,
156 AMD_CG_STATE_UNGATE);
157 cgs_set_powergating_state(
158 hwmgr->device,
159 AMD_IP_BLOCK_TYPE_VCE,
160 AMD_PG_STATE_GATE);
161 tonga_enable_disable_vce_dpm(hwmgr, false);
162 data->vce_power_gated = true;
163 } else {
164 tonga_phm_powerup_vce(hwmgr);
165 data->vce_power_gated = false;
166 cgs_set_powergating_state(
167 hwmgr->device,
168 AMD_IP_BLOCK_TYPE_VCE,
169 AMD_PG_STATE_UNGATE);
170 cgs_set_clockgating_state(
171 hwmgr->device,
172 AMD_IP_BLOCK_TYPE_VCE,
173 AMD_PG_STATE_GATE);
174
175 tonga_update_vce_dpm(hwmgr, &states);
176 tonga_enable_disable_vce_dpm(hwmgr, true);
177 return 0;
178 }
179 }
180 } else {
181 tonga_update_vce_dpm(hwmgr, &states);
182 tonga_enable_disable_vce_dpm(hwmgr, true);
183 return 0;
184 }
185
186 if (!data->vce_power_gated)
187 tonga_update_vce_dpm(hwmgr, &states);
188
189 return 0;
190}
191
192int tonga_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
193 const uint32_t *msg_id)
194{
195 PPSMC_Msg msg;
196 uint32_t value;
197
198 switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) {
199 case PP_GROUP_GFX:
200 switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
201 case PP_BLOCK_GFX_CG:
202 if (PP_STATE_SUPPORT_CG & *msg_id) {
203 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
204 ? PPSMC_MSG_EnableClockGatingFeature
205 : PPSMC_MSG_DisableClockGatingFeature;
206 value = CG_GFX_CGCG_MASK;
207
208 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
209 return -1;
210 }
211 if (PP_STATE_SUPPORT_LS & *msg_id) {
212 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
213 ? PPSMC_MSG_EnableClockGatingFeature
214 : PPSMC_MSG_DisableClockGatingFeature;
215 value = CG_GFX_CGLS_MASK;
216
217 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
218 return -1;
219 }
220 break;
221
222 case PP_BLOCK_GFX_MG:
223 /* For GFX MGCG, there are three different ones;
224 * CPF, RLC, and all others. CPF MGCG will not be used for Tonga.
225 * For GFX MGLS, Tonga will not support it.
226 * */
227 if (PP_STATE_SUPPORT_CG & *msg_id) {
228 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
229 ? PPSMC_MSG_EnableClockGatingFeature
230 : PPSMC_MSG_DisableClockGatingFeature;
231 value = (CG_RLC_MGCG_MASK | CG_GFX_OTHERS_MGCG_MASK);
232
233 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
234 return -1;
235 }
236 break;
237
238 default:
239 return -1;
240 }
241 break;
242
243 case PP_GROUP_SYS:
244 switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
245 case PP_BLOCK_SYS_BIF:
246 if (PP_STATE_SUPPORT_LS & *msg_id) {
247 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
248 ? PPSMC_MSG_EnableClockGatingFeature
249 : PPSMC_MSG_DisableClockGatingFeature;
250 value = CG_SYS_BIF_MGLS_MASK;
251
252 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
253 return -1;
254 }
255 break;
256
257 case PP_BLOCK_SYS_MC:
258 if (PP_STATE_SUPPORT_CG & *msg_id) {
259 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
260 ? PPSMC_MSG_EnableClockGatingFeature
261 : PPSMC_MSG_DisableClockGatingFeature;
262 value = CG_SYS_MC_MGCG_MASK;
263
264 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
265 return -1;
266 }
267
268 if (PP_STATE_SUPPORT_LS & *msg_id) {
269 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
270 ? PPSMC_MSG_EnableClockGatingFeature
271 : PPSMC_MSG_DisableClockGatingFeature;
272 value = CG_SYS_MC_MGLS_MASK;
273
274 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
275 return -1;
276
277 }
278 break;
279
280 case PP_BLOCK_SYS_HDP:
281 if (PP_STATE_SUPPORT_CG & *msg_id) {
282 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
283 ? PPSMC_MSG_EnableClockGatingFeature
284 : PPSMC_MSG_DisableClockGatingFeature;
285 value = CG_SYS_HDP_MGCG_MASK;
286
287 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
288 return -1;
289 }
290
291 if (PP_STATE_SUPPORT_LS & *msg_id) {
292 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
293 ? PPSMC_MSG_EnableClockGatingFeature
294 : PPSMC_MSG_DisableClockGatingFeature;
295
296 value = CG_SYS_HDP_MGLS_MASK;
297
298 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
299 return -1;
300 }
301 break;
302
303 case PP_BLOCK_SYS_SDMA:
304 if (PP_STATE_SUPPORT_CG & *msg_id) {
305 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
306 ? PPSMC_MSG_EnableClockGatingFeature
307 : PPSMC_MSG_DisableClockGatingFeature;
308 value = CG_SYS_SDMA_MGCG_MASK;
309
310 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
311 return -1;
312 }
313
314 if (PP_STATE_SUPPORT_LS & *msg_id) {
315 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
316 ? PPSMC_MSG_EnableClockGatingFeature
317 : PPSMC_MSG_DisableClockGatingFeature;
318
319 value = CG_SYS_SDMA_MGLS_MASK;
320
321 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
322 return -1;
323 }
324 break;
325
326 case PP_BLOCK_SYS_ROM:
327 if (PP_STATE_SUPPORT_CG & *msg_id) {
328 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
329 ? PPSMC_MSG_EnableClockGatingFeature
330 : PPSMC_MSG_DisableClockGatingFeature;
331 value = CG_SYS_ROM_MASK;
332
333 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
334 return -1;
335 }
336 break;
337
338 default:
339 return -1;
340
341 }
342 break;
343
344 default:
345 return -1;
346
347 }
348
349 return 0;
350}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h
deleted file mode 100644
index 080d69d77f04..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h
+++ /dev/null
@@ -1,107 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef TONGA_DYN_DEFAULTS_H
24#define TONGA_DYN_DEFAULTS_H
25
26
27/** \file
28 * Volcanic Islands Dynamic default parameters.
29 */
30
31enum TONGAdpm_TrendDetection {
32 TONGAdpm_TrendDetection_AUTO,
33 TONGAdpm_TrendDetection_UP,
34 TONGAdpm_TrendDetection_DOWN
35};
36typedef enum TONGAdpm_TrendDetection TONGAdpm_TrendDetection;
37
38/* Bit vector representing same fields as hardware register. */
39#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 /* CP_Gfx_busy */
40/* HDP_busy */
41/* IH_busy */
42/* DRM_busy */
43/* DRMDMA_busy */
44/* UVD_busy */
45/* VCE_busy */
46/* ACP_busy */
47/* SAMU_busy */
48/* AVP_busy */
49/* SDMA enabled */
50#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 /* FE_Gfx_busy - Intended for primary usage. Rest are for flexibility. */
51/* SH_Gfx_busy */
52/* RB_Gfx_busy */
53/* VCE_busy */
54
55#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 /* SH_Gfx_busy - Intended for primary usage. Rest are for flexibility. */
56/* FE_Gfx_busy */
57/* RB_Gfx_busy */
58/* ACP_busy */
59
60#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 /* RB_Gfx_busy - Intended for primary usage. Rest are for flexibility. */
61/* FE_Gfx_busy */
62/* SH_Gfx_busy */
63/* UVD_busy */
64
65#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 /* UVD_busy */
66/* VCE_busy */
67/* ACP_busy */
68/* SAMU_busy */
69
70#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 /* GFX, HDP, DRMDMA */
71#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 /* GFX, HDP, DRMDMA */
72#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 /* GFX, HDP, DRMDMA */
73
74
75/* thermal protection counter (units).*/
76#define PPTONGA_THERMALPROTECTCOUNTER_DFLT 0x200 /* ~19us */
77
78/* static screen threshold unit */
79#define PPTONGA_STATICSCREENTHRESHOLDUNIT_DFLT 0
80
81/* static screen threshold */
82#define PPTONGA_STATICSCREENTHRESHOLD_DFLT 0x00C8
83
84/* gfx idle clock stop threshold */
85#define PPTONGA_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 /* ~19us with static screen threshold unit of 0 */
86
87/* Fixed reference divider to use when building baby stepping tables. */
88#define PPTONGA_REFERENCEDIVIDER_DFLT 4
89
90/*
91 * ULV voltage change delay time
92 * Used to be delay_vreg in N.I. split for S.I.
93 * Using N.I. delay_vreg value as default
94 * ReferenceClock = 2700
95 * VoltageResponseTime = 1000
96 * VDDCDelayTime = (VoltageResponseTime * ReferenceClock) / 1600 = 1687
97 */
98
99#define PPTONGA_ULVVOLTAGECHANGEDELAY_DFLT 1687
100
101#define PPTONGA_CGULVPARAMETER_DFLT 0x00040035
102#define PPTONGA_CGULVCONTROL_DFLT 0x00007450
103#define PPTONGA_TARGETACTIVITY_DFLT 30 /*30% */
104#define PPTONGA_MCLK_TARGETACTIVITY_DFLT 10 /*10% */
105
106#endif
107
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
deleted file mode 100644
index 582d04aed346..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ /dev/null
@@ -1,6370 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/module.h>
24#include <linux/slab.h>
25#include "linux/delay.h"
26#include "pp_acpi.h"
27#include "hwmgr.h"
28#include <atombios.h>
29#include "tonga_hwmgr.h"
30#include "pptable.h"
31#include "processpptables.h"
32#include "process_pptables_v1_0.h"
33#include "pptable_v1_0.h"
34#include "pp_debug.h"
35#include "tonga_ppsmc.h"
36#include "cgs_common.h"
37#include "pppcielanes.h"
38#include "tonga_dyn_defaults.h"
39#include "smumgr.h"
40#include "tonga_smumgr.h"
41#include "tonga_clockpowergating.h"
42#include "tonga_thermal.h"
43
44#include "smu/smu_7_1_2_d.h"
45#include "smu/smu_7_1_2_sh_mask.h"
46
47#include "gmc/gmc_8_1_d.h"
48#include "gmc/gmc_8_1_sh_mask.h"
49
50#include "bif/bif_5_0_d.h"
51#include "bif/bif_5_0_sh_mask.h"
52
53#include "dce/dce_10_0_d.h"
54#include "dce/dce_10_0_sh_mask.h"
55
56#include "cgs_linux.h"
57#include "eventmgr.h"
58#include "amd_pcie_helpers.h"
59
60#define MC_CG_ARB_FREQ_F0 0x0a
61#define MC_CG_ARB_FREQ_F1 0x0b
62#define MC_CG_ARB_FREQ_F2 0x0c
63#define MC_CG_ARB_FREQ_F3 0x0d
64
65#define MC_CG_SEQ_DRAMCONF_S0 0x05
66#define MC_CG_SEQ_DRAMCONF_S1 0x06
67#define MC_CG_SEQ_YCLK_SUSPEND 0x04
68#define MC_CG_SEQ_YCLK_RESUME 0x0a
69
70#define PCIE_BUS_CLK 10000
71#define TCLK (PCIE_BUS_CLK / 10)
72
73#define SMC_RAM_END 0x40000
74#define SMC_CG_IND_START 0xc0030000
75#define SMC_CG_IND_END 0xc0040000 /* First byte after SMC_CG_IND*/
76
77#define VOLTAGE_SCALE 4
78#define VOLTAGE_VID_OFFSET_SCALE1 625
79#define VOLTAGE_VID_OFFSET_SCALE2 100
80
81#define VDDC_VDDCI_DELTA 200
82#define VDDC_VDDGFX_DELTA 300
83
84#define MC_SEQ_MISC0_GDDR5_SHIFT 28
85#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
86#define MC_SEQ_MISC0_GDDR5_VALUE 5
87
88typedef uint32_t PECI_RegistryValue;
89
90/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */
91static const uint16_t PP_ClockStretcherLookupTable[2][4] = {
92 {600, 1050, 3, 0},
93 {600, 1050, 6, 1} };
94
95/* [FF, SS] type, [] 4 voltage ranges, and [Floor Freq, Boundary Freq, VID min , VID max] */
96static const uint32_t PP_ClockStretcherDDTTable[2][4][4] = {
97 { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
98 { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
99
100/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] (coming from PWR_CKS_CNTL.stretch_amount reg spec) */
101static const uint8_t PP_ClockStretchAmountConversion[2][6] = {
102 {0, 1, 3, 2, 4, 5},
103 {0, 2, 4, 5, 6, 5} };
104
105/* Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
106enum DPM_EVENT_SRC {
107 DPM_EVENT_SRC_ANALOG = 0, /* Internal analog trip point */
108 DPM_EVENT_SRC_EXTERNAL = 1, /* External (GPIO 17) signal */
109 DPM_EVENT_SRC_DIGITAL = 2, /* Internal digital trip point (DIG_THERM_DPM) */
110 DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, /* Internal analog or external */
111 DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 /* Internal digital or external */
112};
113typedef enum DPM_EVENT_SRC DPM_EVENT_SRC;
114
115static const unsigned long PhwTonga_Magic = (unsigned long)(PHM_VIslands_Magic);
116
117struct tonga_power_state *cast_phw_tonga_power_state(
118 struct pp_hw_power_state *hw_ps)
119{
120 if (hw_ps == NULL)
121 return NULL;
122
123 PP_ASSERT_WITH_CODE((PhwTonga_Magic == hw_ps->magic),
124 "Invalid Powerstate Type!",
125 return NULL);
126
127 return (struct tonga_power_state *)hw_ps;
128}
129
130const struct tonga_power_state *cast_const_phw_tonga_power_state(
131 const struct pp_hw_power_state *hw_ps)
132{
133 if (hw_ps == NULL)
134 return NULL;
135
136 PP_ASSERT_WITH_CODE((PhwTonga_Magic == hw_ps->magic),
137 "Invalid Powerstate Type!",
138 return NULL);
139
140 return (const struct tonga_power_state *)hw_ps;
141}
142
143int tonga_add_voltage(struct pp_hwmgr *hwmgr,
144 phm_ppt_v1_voltage_lookup_table *look_up_table,
145 phm_ppt_v1_voltage_lookup_record *record)
146{
147 uint32_t i;
148 PP_ASSERT_WITH_CODE((NULL != look_up_table),
149 "Lookup Table empty.", return -1;);
150 PP_ASSERT_WITH_CODE((0 != look_up_table->count),
151 "Lookup Table empty.", return -1;);
152 PP_ASSERT_WITH_CODE((SMU72_MAX_LEVELS_VDDGFX >= look_up_table->count),
153 "Lookup Table is full.", return -1;);
154
155 /* This is to avoid entering duplicate calculated records. */
156 for (i = 0; i < look_up_table->count; i++) {
157 if (look_up_table->entries[i].us_vdd == record->us_vdd) {
158 if (look_up_table->entries[i].us_calculated == 1)
159 return 0;
160 else
161 break;
162 }
163 }
164
165 look_up_table->entries[i].us_calculated = 1;
166 look_up_table->entries[i].us_vdd = record->us_vdd;
167 look_up_table->entries[i].us_cac_low = record->us_cac_low;
168 look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
169 look_up_table->entries[i].us_cac_high = record->us_cac_high;
170 /* Only increment the count when we're appending, not replacing duplicate entry. */
171 if (i == look_up_table->count)
172 look_up_table->count++;
173
174 return 0;
175}
176
177int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
178{
179 PPSMC_Msg msg = has_display? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
180
181 return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
182}
183
184uint8_t tonga_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
185 uint32_t voltage)
186{
187 uint8_t count = (uint8_t) (voltage_table->count);
188 uint8_t i = 0;
189
190 PP_ASSERT_WITH_CODE((NULL != voltage_table),
191 "Voltage Table empty.", return 0;);
192 PP_ASSERT_WITH_CODE((0 != count),
193 "Voltage Table empty.", return 0;);
194
195 for (i = 0; i < count; i++) {
196 /* find first voltage bigger than requested */
197 if (voltage_table->entries[i].value >= voltage)
198 return i;
199 }
200
201 /* voltage is bigger than max voltage in the table */
202 return i - 1;
203}
204
205
206/**
207 * @brief PhwTonga_GetVoltageOrder
208 * Returns index of requested voltage record in lookup(table)
209 * @param hwmgr - pointer to hardware manager
210 * @param lookupTable - lookup list to search in
211 * @param voltage - voltage to look for
212 * @return 0 on success
213 */
214uint8_t tonga_get_voltage_index(phm_ppt_v1_voltage_lookup_table *look_up_table,
215 uint16_t voltage)
216{
217 uint8_t count = (uint8_t) (look_up_table->count);
218 uint8_t i;
219
220 PP_ASSERT_WITH_CODE((NULL != look_up_table), "Lookup Table empty.", return 0;);
221 PP_ASSERT_WITH_CODE((0 != count), "Lookup Table empty.", return 0;);
222
223 for (i = 0; i < count; i++) {
224 /* find first voltage equal or bigger than requested */
225 if (look_up_table->entries[i].us_vdd >= voltage)
226 return i;
227 }
228
229 /* voltage is bigger than max voltage in the table */
230 return i-1;
231}
232
233static bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
234{
235 /*
236 * We return the status of Voltage Control instead of checking SCLK/MCLK DPM
237 * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM,
238 * whereas voltage control is a fundemental change that will not be disabled
239 */
240
241 return (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
242 FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) ? 1 : 0);
243}
244
245/**
246 * Re-generate the DPM level mask value
247 * @param hwmgr the address of the hardware manager
248 */
249static uint32_t tonga_get_dpm_level_enable_mask_value(
250 struct tonga_single_dpm_table * dpm_table)
251{
252 uint32_t i;
253 uint32_t mask_value = 0;
254
255 for (i = dpm_table->count; i > 0; i--) {
256 mask_value = mask_value << 1;
257
258 if (dpm_table->dpm_levels[i-1].enabled)
259 mask_value |= 0x1;
260 else
261 mask_value &= 0xFFFFFFFE;
262 }
263 return mask_value;
264}
265
266/**
267 * Retrieve DPM default values from registry (if available)
268 *
269 * @param hwmgr the address of the powerplay hardware manager.
270 */
271void tonga_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
272{
273 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
274 phw_tonga_ulv_parm *ulv = &(data->ulv);
275 uint32_t tmp;
276
277 ulv->ch_ulv_parameter = PPTONGA_CGULVPARAMETER_DFLT;
278 data->voting_rights_clients0 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT0;
279 data->voting_rights_clients1 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT1;
280 data->voting_rights_clients2 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT2;
281 data->voting_rights_clients3 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT3;
282 data->voting_rights_clients4 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT4;
283 data->voting_rights_clients5 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT5;
284 data->voting_rights_clients6 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT6;
285 data->voting_rights_clients7 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT7;
286
287 data->static_screen_threshold_unit = PPTONGA_STATICSCREENTHRESHOLDUNIT_DFLT;
288 data->static_screen_threshold = PPTONGA_STATICSCREENTHRESHOLD_DFLT;
289
290 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
291 PHM_PlatformCaps_ABM);
292 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
293 PHM_PlatformCaps_NonABMSupportInPPLib);
294
295 tmp = 0;
296 if (tmp == 0)
297 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
298 PHM_PlatformCaps_DynamicACTiming);
299
300 tmp = 0;
301 if (0 != tmp)
302 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
303 PHM_PlatformCaps_DisableMemoryTransition);
304
305 tonga_initialize_power_tune_defaults(hwmgr);
306
307 data->mclk_strobe_mode_threshold = 40000;
308 data->mclk_stutter_mode_threshold = 30000;
309 data->mclk_edc_enable_threshold = 40000;
310 data->mclk_edc_wr_enable_threshold = 40000;
311
312 tmp = 0;
313 if (tmp != 0)
314 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
315 PHM_PlatformCaps_DisableMCLS);
316
317 data->pcie_gen_performance.max = PP_PCIEGen1;
318 data->pcie_gen_performance.min = PP_PCIEGen3;
319 data->pcie_gen_power_saving.max = PP_PCIEGen1;
320 data->pcie_gen_power_saving.min = PP_PCIEGen3;
321
322 data->pcie_lane_performance.max = 0;
323 data->pcie_lane_performance.min = 16;
324 data->pcie_lane_power_saving.max = 0;
325 data->pcie_lane_power_saving.min = 16;
326
327 tmp = 0;
328
329 if (tmp)
330 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
331 PHM_PlatformCaps_SclkThrottleLowNotification);
332
333 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
334 PHM_PlatformCaps_DynamicUVDState);
335
336}
337
338static int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
339{
340 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
341
342 int result = 0;
343 uint32_t low_sclk_interrupt_threshold = 0;
344
345 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
346 PHM_PlatformCaps_SclkThrottleLowNotification)
347 && (hwmgr->gfx_arbiter.sclk_threshold != data->low_sclk_interrupt_threshold)) {
348 data->low_sclk_interrupt_threshold = hwmgr->gfx_arbiter.sclk_threshold;
349 low_sclk_interrupt_threshold = data->low_sclk_interrupt_threshold;
350
351 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
352
353 result = tonga_copy_bytes_to_smc(
354 hwmgr->smumgr,
355 data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable,
356 LowSclkInterruptThreshold),
357 (uint8_t *)&low_sclk_interrupt_threshold,
358 sizeof(uint32_t),
359 data->sram_end
360 );
361 }
362
363 return result;
364}
365
366/**
367 * Find SCLK value that is associated with specified virtual_voltage_Id.
368 *
369 * @param hwmgr the address of the powerplay hardware manager.
370 * @param virtual_voltage_Id voltageId to look for.
371 * @param sclk output value .
372 * @return always 0 if success and 2 if association not found
373 */
374static int tonga_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
375 phm_ppt_v1_voltage_lookup_table *lookup_table,
376 uint16_t virtual_voltage_id, uint32_t *sclk)
377{
378 uint8_t entryId;
379 uint8_t voltageId;
380 struct phm_ppt_v1_information *pptable_info =
381 (struct phm_ppt_v1_information *)(hwmgr->pptable);
382
383 PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -1);
384
385 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
386 for (entryId = 0; entryId < pptable_info->vdd_dep_on_sclk->count; entryId++) {
387 voltageId = pptable_info->vdd_dep_on_sclk->entries[entryId].vddInd;
388 if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id)
389 break;
390 }
391
392 PP_ASSERT_WITH_CODE(entryId < pptable_info->vdd_dep_on_sclk->count,
393 "Can't find requested voltage id in vdd_dep_on_sclk table!",
394 return -1;
395 );
396
397 *sclk = pptable_info->vdd_dep_on_sclk->entries[entryId].clk;
398
399 return 0;
400}
401
402/**
403 * Get Leakage VDDC based on leakage ID.
404 *
405 * @param hwmgr the address of the powerplay hardware manager.
406 * @return 2 if vddgfx returned is greater than 2V or if BIOS
407 */
408int tonga_get_evv_voltage(struct pp_hwmgr *hwmgr)
409{
410 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
411 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
412 phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
413 uint16_t virtual_voltage_id;
414 uint16_t vddc = 0;
415 uint16_t vddgfx = 0;
416 uint16_t i, j;
417 uint32_t sclk = 0;
418
419 /* retrieve voltage for leakage ID (0xff01 + i) */
420 for (i = 0; i < TONGA_MAX_LEAKAGE_COUNT; i++) {
421 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
422
423 /* in split mode we should have only vddgfx EVV leakages */
424 if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
425 if (0 == tonga_get_sclk_for_voltage_evv(hwmgr,
426 pptable_info->vddgfx_lookup_table, virtual_voltage_id, &sclk)) {
427 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
428 PHM_PlatformCaps_ClockStretcher)) {
429 for (j = 1; j < sclk_table->count; j++) {
430 if (sclk_table->entries[j].clk == sclk &&
431 sclk_table->entries[j].cks_enable == 0) {
432 sclk += 5000;
433 break;
434 }
435 }
436 }
437 if (0 == atomctrl_get_voltage_evv_on_sclk
438 (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
439 virtual_voltage_id, &vddgfx)) {
440 /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
441 PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -1);
442
443 /* the voltage should not be zero nor equal to leakage ID */
444 if (vddgfx != 0 && vddgfx != virtual_voltage_id) {
445 data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
446 data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = virtual_voltage_id;
447 data->vddcgfx_leakage.count++;
448 }
449 } else {
450 printk("Error retrieving EVV voltage value!\n");
451 }
452 }
453 } else {
454 /* in merged mode we have only vddc EVV leakages */
455 if (0 == tonga_get_sclk_for_voltage_evv(hwmgr,
456 pptable_info->vddc_lookup_table,
457 virtual_voltage_id, &sclk)) {
458 if (0 == atomctrl_get_voltage_evv_on_sclk
459 (hwmgr, VOLTAGE_TYPE_VDDC, sclk,
460 virtual_voltage_id, &vddc)) {
461 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
462 PP_ASSERT_WITH_CODE(vddc < 2000, "Invalid VDDC value!", return -1);
463
464 /* the voltage should not be zero nor equal to leakage ID */
465 if (vddc != 0 && vddc != virtual_voltage_id) {
466 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
467 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
468 data->vddc_leakage.count++;
469 }
470 } else {
471 printk("Error retrieving EVV voltage value!\n");
472 }
473 }
474 }
475 }
476
477 return 0;
478}
479
480int tonga_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
481{
482 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
483
484 /* enable SCLK dpm */
485 if (0 == data->sclk_dpm_key_disabled) {
486 PP_ASSERT_WITH_CODE(
487 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
488 PPSMC_MSG_DPM_Enable)),
489 "Failed to enable SCLK DPM during DPM Start Function!",
490 return -1);
491 }
492
493 /* enable MCLK dpm */
494 if (0 == data->mclk_dpm_key_disabled) {
495 PP_ASSERT_WITH_CODE(
496 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
497 PPSMC_MSG_MCLKDPM_Enable)),
498 "Failed to enable MCLK DPM during DPM Start Function!",
499 return -1);
500
501 PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
502
503 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
504 ixLCAC_MC0_CNTL, 0x05);/* CH0,1 read */
505 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
506 ixLCAC_MC1_CNTL, 0x05);/* CH2,3 read */
507 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
508 ixLCAC_CPL_CNTL, 0x100005);/*Read */
509
510 udelay(10);
511
512 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
513 ixLCAC_MC0_CNTL, 0x400005);/* CH0,1 write */
514 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
515 ixLCAC_MC1_CNTL, 0x400005);/* CH2,3 write */
516 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
517 ixLCAC_CPL_CNTL, 0x500005);/* write */
518
519 }
520
521 return 0;
522}
523
524int tonga_start_dpm(struct pp_hwmgr *hwmgr)
525{
526 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
527
528 /* enable general power management */
529 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 1);
530 /* enable sclk deep sleep */
531 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 1);
532
533 /* prepare for PCIE DPM */
534 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start +
535 offsetof(SMU72_SoftRegisters, VoltageChangeTimeout), 0x1000);
536
537 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, SWRST_COMMAND_1, RESETLC, 0x0);
538
539 PP_ASSERT_WITH_CODE(
540 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
541 PPSMC_MSG_Voltage_Cntl_Enable)),
542 "Failed to enable voltage DPM during DPM Start Function!",
543 return -1);
544
545 if (0 != tonga_enable_sclk_mclk_dpm(hwmgr)) {
546 PP_ASSERT_WITH_CODE(0, "Failed to enable Sclk DPM and Mclk DPM!", return -1);
547 }
548
549 /* enable PCIE dpm */
550 if (0 == data->pcie_dpm_key_disabled) {
551 PP_ASSERT_WITH_CODE(
552 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
553 PPSMC_MSG_PCIeDPM_Enable)),
554 "Failed to enable pcie DPM during DPM Start Function!",
555 return -1
556 );
557 }
558
559 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
560 PHM_PlatformCaps_Falcon_QuickTransition)) {
561 smum_send_msg_to_smc(hwmgr->smumgr,
562 PPSMC_MSG_EnableACDCGPIOInterrupt);
563 }
564
565 return 0;
566}
567
568int tonga_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
569{
570 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
571
572 /* disable SCLK dpm */
573 if (0 == data->sclk_dpm_key_disabled) {
574 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
575 PP_ASSERT_WITH_CODE(
576 !tonga_is_dpm_running(hwmgr),
577 "Trying to Disable SCLK DPM when DPM is disabled",
578 return -1
579 );
580
581 PP_ASSERT_WITH_CODE(
582 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
583 PPSMC_MSG_DPM_Disable)),
584 "Failed to disable SCLK DPM during DPM stop Function!",
585 return -1);
586 }
587
588 /* disable MCLK dpm */
589 if (0 == data->mclk_dpm_key_disabled) {
590 /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
591 PP_ASSERT_WITH_CODE(
592 !tonga_is_dpm_running(hwmgr),
593 "Trying to Disable MCLK DPM when DPM is disabled",
594 return -1
595 );
596
597 PP_ASSERT_WITH_CODE(
598 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
599 PPSMC_MSG_MCLKDPM_Disable)),
600 "Failed to Disable MCLK DPM during DPM stop Function!",
601 return -1);
602 }
603
604 return 0;
605}
606
607int tonga_stop_dpm(struct pp_hwmgr *hwmgr)
608{
609 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
610
611 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 0);
612 /* disable sclk deep sleep*/
613 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 0);
614
615 /* disable PCIE dpm */
616 if (0 == data->pcie_dpm_key_disabled) {
617 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
618 PP_ASSERT_WITH_CODE(
619 !tonga_is_dpm_running(hwmgr),
620 "Trying to Disable PCIE DPM when DPM is disabled",
621 return -1
622 );
623 PP_ASSERT_WITH_CODE(
624 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
625 PPSMC_MSG_PCIeDPM_Disable)),
626 "Failed to disable pcie DPM during DPM stop Function!",
627 return -1);
628 }
629
630 if (0 != tonga_disable_sclk_mclk_dpm(hwmgr))
631 PP_ASSERT_WITH_CODE(0, "Failed to disable Sclk DPM and Mclk DPM!", return -1);
632
633 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
634 PP_ASSERT_WITH_CODE(
635 !tonga_is_dpm_running(hwmgr),
636 "Trying to Disable Voltage CNTL when DPM is disabled",
637 return -1
638 );
639
640 PP_ASSERT_WITH_CODE(
641 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
642 PPSMC_MSG_Voltage_Cntl_Disable)),
643 "Failed to disable voltage DPM during DPM stop Function!",
644 return -1);
645
646 return 0;
647}
648
649int tonga_enable_sclk_control(struct pp_hwmgr *hwmgr)
650{
651 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, 0);
652
653 return 0;
654}
655
656/**
657 * Send a message to the SMC and return a parameter
658 *
659 * @param hwmgr: the address of the powerplay hardware manager.
660 * @param msg: the message to send.
661 * @param parameter: pointer to the received parameter
662 * @return The response that came from the SMC.
663 */
664PPSMC_Result tonga_send_msg_to_smc_return_parameter(
665 struct pp_hwmgr *hwmgr,
666 PPSMC_Msg msg,
667 uint32_t *parameter)
668{
669 int result;
670
671 result = smum_send_msg_to_smc(hwmgr->smumgr, msg);
672
673 if ((0 == result) && parameter) {
674 *parameter = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
675 }
676
677 return result;
678}
679
680/**
681 * force DPM power State
682 *
683 * @param hwmgr: the address of the powerplay hardware manager.
684 * @param n : DPM level
685 * @return The response that came from the SMC.
686 */
687int tonga_dpm_force_state(struct pp_hwmgr *hwmgr, uint32_t n)
688{
689 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
690 uint32_t level_mask = 1 << n;
691
692 /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
693 PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
694 "Trying to force SCLK when DPM is disabled",
695 return -1;);
696 if (0 == data->sclk_dpm_key_disabled)
697 return (0 == smum_send_msg_to_smc_with_parameter(
698 hwmgr->smumgr,
699 (PPSMC_Msg)(PPSMC_MSG_SCLKDPM_SetEnabledMask),
700 level_mask) ? 0 : 1);
701
702 return 0;
703}
704
705/**
706 * force DPM power State
707 *
708 * @param hwmgr: the address of the powerplay hardware manager.
709 * @param n : DPM level
710 * @return The response that came from the SMC.
711 */
712int tonga_dpm_force_state_mclk(struct pp_hwmgr *hwmgr, uint32_t n)
713{
714 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
715 uint32_t level_mask = 1 << n;
716
717 /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
718 PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
719 "Trying to Force MCLK when DPM is disabled",
720 return -1;);
721 if (0 == data->mclk_dpm_key_disabled)
722 return (0 == smum_send_msg_to_smc_with_parameter(
723 hwmgr->smumgr,
724 (PPSMC_Msg)(PPSMC_MSG_MCLKDPM_SetEnabledMask),
725 level_mask) ? 0 : 1);
726
727 return 0;
728}
729
730/**
731 * force DPM power State
732 *
733 * @param hwmgr: the address of the powerplay hardware manager.
734 * @param n : DPM level
735 * @return The response that came from the SMC.
736 */
737int tonga_dpm_force_state_pcie(struct pp_hwmgr *hwmgr, uint32_t n)
738{
739 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
740
741 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
742 PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
743 "Trying to Force PCIE level when DPM is disabled",
744 return -1;);
745 if (0 == data->pcie_dpm_key_disabled)
746 return (0 == smum_send_msg_to_smc_with_parameter(
747 hwmgr->smumgr,
748 (PPSMC_Msg)(PPSMC_MSG_PCIeDPM_ForceLevel),
749 n) ? 0 : 1);
750
751 return 0;
752}
753
754/**
755 * Set the initial state by calling SMC to switch to this state directly
756 *
757 * @param hwmgr the address of the powerplay hardware manager.
758 * @return always 0
759 */
760int tonga_set_boot_state(struct pp_hwmgr *hwmgr)
761{
762 /*
763 * SMC only stores one state that SW will ask to switch too,
764 * so we switch the the just uploaded one
765 */
766 return (0 == tonga_disable_sclk_mclk_dpm(hwmgr)) ? 0 : 1;
767}
768
769/**
770 * Get the location of various tables inside the FW image.
771 *
772 * @param hwmgr the address of the powerplay hardware manager.
773 * @return always 0
774 */
775static int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
776{
777 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
778 struct tonga_smumgr *tonga_smu = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
779
780 uint32_t tmp;
781 int result;
782 bool error = false;
783
784 result = tonga_read_smc_sram_dword(hwmgr->smumgr,
785 SMU72_FIRMWARE_HEADER_LOCATION +
786 offsetof(SMU72_Firmware_Header, DpmTable),
787 &tmp, data->sram_end);
788
789 if (0 == result) {
790 data->dpm_table_start = tmp;
791 }
792
793 error |= (0 != result);
794
795 result = tonga_read_smc_sram_dword(hwmgr->smumgr,
796 SMU72_FIRMWARE_HEADER_LOCATION +
797 offsetof(SMU72_Firmware_Header, SoftRegisters),
798 &tmp, data->sram_end);
799
800 if (0 == result) {
801 data->soft_regs_start = tmp;
802 tonga_smu->ulSoftRegsStart = tmp;
803 }
804
805 error |= (0 != result);
806
807
808 result = tonga_read_smc_sram_dword(hwmgr->smumgr,
809 SMU72_FIRMWARE_HEADER_LOCATION +
810 offsetof(SMU72_Firmware_Header, mcRegisterTable),
811 &tmp, data->sram_end);
812
813 if (0 == result) {
814 data->mc_reg_table_start = tmp;
815 }
816
817 result = tonga_read_smc_sram_dword(hwmgr->smumgr,
818 SMU72_FIRMWARE_HEADER_LOCATION +
819 offsetof(SMU72_Firmware_Header, FanTable),
820 &tmp, data->sram_end);
821
822 if (0 == result) {
823 data->fan_table_start = tmp;
824 }
825
826 error |= (0 != result);
827
828 result = tonga_read_smc_sram_dword(hwmgr->smumgr,
829 SMU72_FIRMWARE_HEADER_LOCATION +
830 offsetof(SMU72_Firmware_Header, mcArbDramTimingTable),
831 &tmp, data->sram_end);
832
833 if (0 == result) {
834 data->arb_table_start = tmp;
835 }
836
837 error |= (0 != result);
838
839
840 result = tonga_read_smc_sram_dword(hwmgr->smumgr,
841 SMU72_FIRMWARE_HEADER_LOCATION +
842 offsetof(SMU72_Firmware_Header, Version),
843 &tmp, data->sram_end);
844
845 if (0 == result) {
846 hwmgr->microcode_version_info.SMC = tmp;
847 }
848
849 error |= (0 != result);
850
851 return error ? 1 : 0;
852}
853
854/**
855 * Read clock related registers.
856 *
857 * @param hwmgr the address of the powerplay hardware manager.
858 * @return always 0
859 */
860int tonga_read_clock_registers(struct pp_hwmgr *hwmgr)
861{
862 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
863
864 data->clock_registers.vCG_SPLL_FUNC_CNTL =
865 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
866 data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
867 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
868 data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
869 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
870 data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
871 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
872 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
873 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
874 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
875 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
876 data->clock_registers.vDLL_CNTL =
877 cgs_read_register(hwmgr->device, mmDLL_CNTL);
878 data->clock_registers.vMCLK_PWRMGT_CNTL =
879 cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
880 data->clock_registers.vMPLL_AD_FUNC_CNTL =
881 cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
882 data->clock_registers.vMPLL_DQ_FUNC_CNTL =
883 cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
884 data->clock_registers.vMPLL_FUNC_CNTL =
885 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
886 data->clock_registers.vMPLL_FUNC_CNTL_1 =
887 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
888 data->clock_registers.vMPLL_FUNC_CNTL_2 =
889 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
890 data->clock_registers.vMPLL_SS1 =
891 cgs_read_register(hwmgr->device, mmMPLL_SS1);
892 data->clock_registers.vMPLL_SS2 =
893 cgs_read_register(hwmgr->device, mmMPLL_SS2);
894
895 return 0;
896}
897
898/**
899 * Find out if memory is GDDR5.
900 *
901 * @param hwmgr the address of the powerplay hardware manager.
902 * @return always 0
903 */
904int tonga_get_memory_type(struct pp_hwmgr *hwmgr)
905{
906 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
907 uint32_t temp;
908
909 temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
910
911 data->is_memory_GDDR5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
912 ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
913 MC_SEQ_MISC0_GDDR5_SHIFT));
914
915 return 0;
916}
917
918/**
919 * Enables Dynamic Power Management by SMC
920 *
921 * @param hwmgr the address of the powerplay hardware manager.
922 * @return always 0
923 */
924int tonga_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
925{
926 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, STATIC_PM_EN, 1);
927
928 return 0;
929}
930
931/**
932 * Initialize PowerGating States for different engines
933 *
934 * @param hwmgr the address of the powerplay hardware manager.
935 * @return always 0
936 */
937int tonga_init_power_gate_state(struct pp_hwmgr *hwmgr)
938{
939 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
940
941 data->uvd_power_gated = false;
942 data->vce_power_gated = false;
943 data->samu_power_gated = false;
944 data->acp_power_gated = false;
945 data->pg_acp_init = true;
946
947 return 0;
948}
949
950/**
951 * Checks if DPM is enabled
952 *
953 * @param hwmgr the address of the powerplay hardware manager.
954 * @return always 0
955 */
956int tonga_check_for_dpm_running(struct pp_hwmgr *hwmgr)
957{
958 /*
959 * We return the status of Voltage Control instead of checking SCLK/MCLK DPM
960 * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM,
961 * whereas voltage control is a fundemental change that will not be disabled
962 */
963 return (!tonga_is_dpm_running(hwmgr) ? 0 : 1);
964}
965
966/**
967 * Checks if DPM is stopped
968 *
969 * @param hwmgr the address of the powerplay hardware manager.
970 * @return always 0
971 */
972int tonga_check_for_dpm_stopped(struct pp_hwmgr *hwmgr)
973{
974 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
975
976 if (tonga_is_dpm_running(hwmgr)) {
977 /* If HW Virtualization is enabled, dpm_table_start will not have a valid value */
978 if (!data->dpm_table_start) {
979 return 1;
980 }
981 }
982
983 return 0;
984}
985
986/**
987 * Remove repeated voltage values and create table with unique values.
988 *
989 * @param hwmgr the address of the powerplay hardware manager.
990 * @param voltage_table the pointer to changing voltage table
991 * @return 1 in success
992 */
993
994static int tonga_trim_voltage_table(struct pp_hwmgr *hwmgr,
995 pp_atomctrl_voltage_table *voltage_table)
996{
997 uint32_t table_size, i, j;
998 uint16_t vvalue;
999 bool bVoltageFound = false;
1000 pp_atomctrl_voltage_table *table;
1001
1002 PP_ASSERT_WITH_CODE((NULL != voltage_table), "Voltage Table empty.", return -1;);
1003 table_size = sizeof(pp_atomctrl_voltage_table);
1004 table = kzalloc(table_size, GFP_KERNEL);
1005
1006 if (NULL == table)
1007 return -ENOMEM;
1008
1009 memset(table, 0x00, table_size);
1010 table->mask_low = voltage_table->mask_low;
1011 table->phase_delay = voltage_table->phase_delay;
1012
1013 for (i = 0; i < voltage_table->count; i++) {
1014 vvalue = voltage_table->entries[i].value;
1015 bVoltageFound = false;
1016
1017 for (j = 0; j < table->count; j++) {
1018 if (vvalue == table->entries[j].value) {
1019 bVoltageFound = true;
1020 break;
1021 }
1022 }
1023
1024 if (!bVoltageFound) {
1025 table->entries[table->count].value = vvalue;
1026 table->entries[table->count].smio_low =
1027 voltage_table->entries[i].smio_low;
1028 table->count++;
1029 }
1030 }
1031
1032 memcpy(table, voltage_table, sizeof(pp_atomctrl_voltage_table));
1033
1034 kfree(table);
1035
1036 return 0;
1037}
1038
1039static int tonga_get_svi2_vdd_ci_voltage_table(
1040 struct pp_hwmgr *hwmgr,
1041 phm_ppt_v1_clock_voltage_dependency_table *voltage_dependency_table)
1042{
1043 uint32_t i;
1044 int result;
1045 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1046 pp_atomctrl_voltage_table *vddci_voltage_table = &(data->vddci_voltage_table);
1047
1048 PP_ASSERT_WITH_CODE((0 != voltage_dependency_table->count),
1049 "Voltage Dependency Table empty.", return -1;);
1050
1051 vddci_voltage_table->mask_low = 0;
1052 vddci_voltage_table->phase_delay = 0;
1053 vddci_voltage_table->count = voltage_dependency_table->count;
1054
1055 for (i = 0; i < voltage_dependency_table->count; i++) {
1056 vddci_voltage_table->entries[i].value =
1057 voltage_dependency_table->entries[i].vddci;
1058 vddci_voltage_table->entries[i].smio_low = 0;
1059 }
1060
1061 result = tonga_trim_voltage_table(hwmgr, vddci_voltage_table);
1062 PP_ASSERT_WITH_CODE((0 == result),
1063 "Failed to trim VDDCI table.", return result;);
1064
1065 return 0;
1066}
1067
1068
1069
1070static int tonga_get_svi2_vdd_voltage_table(
1071 struct pp_hwmgr *hwmgr,
1072 phm_ppt_v1_voltage_lookup_table *look_up_table,
1073 pp_atomctrl_voltage_table *voltage_table)
1074{
1075 uint8_t i = 0;
1076
1077 PP_ASSERT_WITH_CODE((0 != look_up_table->count),
1078 "Voltage Lookup Table empty.", return -1;);
1079
1080 voltage_table->mask_low = 0;
1081 voltage_table->phase_delay = 0;
1082
1083 voltage_table->count = look_up_table->count;
1084
1085 for (i = 0; i < voltage_table->count; i++) {
1086 voltage_table->entries[i].value = look_up_table->entries[i].us_vdd;
1087 voltage_table->entries[i].smio_low = 0;
1088 }
1089
1090 return 0;
1091}
1092
1093/*
1094 * -------------------------------------------------------- Voltage Tables --------------------------------------------------------------------------
1095 * If the voltage table would be bigger than what will fit into the state table on the SMC keep only the higher entries.
1096 */
1097
1098static void tonga_trim_voltage_table_to_fit_state_table(
1099 struct pp_hwmgr *hwmgr,
1100 uint32_t max_voltage_steps,
1101 pp_atomctrl_voltage_table *voltage_table)
1102{
1103 unsigned int i, diff;
1104
1105 if (voltage_table->count <= max_voltage_steps) {
1106 return;
1107 }
1108
1109 diff = voltage_table->count - max_voltage_steps;
1110
1111 for (i = 0; i < max_voltage_steps; i++) {
1112 voltage_table->entries[i] = voltage_table->entries[i + diff];
1113 }
1114
1115 voltage_table->count = max_voltage_steps;
1116
1117 return;
1118}
1119
1120/**
1121 * Create Voltage Tables.
1122 *
1123 * @param hwmgr the address of the powerplay hardware manager.
1124 * @return always 0
1125 */
1126int tonga_construct_voltage_tables(struct pp_hwmgr *hwmgr)
1127{
1128 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1129 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1130 int result;
1131
1132 /* MVDD has only GPIO voltage control */
1133 if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1134 result = atomctrl_get_voltage_table_v3(hwmgr,
1135 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, &(data->mvdd_voltage_table));
1136 PP_ASSERT_WITH_CODE((0 == result),
1137 "Failed to retrieve MVDD table.", return result;);
1138 }
1139
1140 if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) {
1141 /* GPIO voltage */
1142 result = atomctrl_get_voltage_table_v3(hwmgr,
1143 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, &(data->vddci_voltage_table));
1144 PP_ASSERT_WITH_CODE((0 == result),
1145 "Failed to retrieve VDDCI table.", return result;);
1146 } else if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) {
1147 /* SVI2 voltage */
1148 result = tonga_get_svi2_vdd_ci_voltage_table(hwmgr,
1149 pptable_info->vdd_dep_on_mclk);
1150 PP_ASSERT_WITH_CODE((0 == result),
1151 "Failed to retrieve SVI2 VDDCI table from dependancy table.", return result;);
1152 }
1153
1154 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
1155 /* VDDGFX has only SVI2 voltage control */
1156 result = tonga_get_svi2_vdd_voltage_table(hwmgr,
1157 pptable_info->vddgfx_lookup_table, &(data->vddgfx_voltage_table));
1158 PP_ASSERT_WITH_CODE((0 == result),
1159 "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
1160 }
1161
1162 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1163 /* VDDC has only SVI2 voltage control */
1164 result = tonga_get_svi2_vdd_voltage_table(hwmgr,
1165 pptable_info->vddc_lookup_table, &(data->vddc_voltage_table));
1166 PP_ASSERT_WITH_CODE((0 == result),
1167 "Failed to retrieve SVI2 VDDC table from lookup table.", return result;);
1168 }
1169
1170 PP_ASSERT_WITH_CODE(
1171 (data->vddc_voltage_table.count <= (SMU72_MAX_LEVELS_VDDC)),
1172 "Too many voltage values for VDDC. Trimming to fit state table.",
1173 tonga_trim_voltage_table_to_fit_state_table(hwmgr,
1174 SMU72_MAX_LEVELS_VDDC, &(data->vddc_voltage_table));
1175 );
1176
1177 PP_ASSERT_WITH_CODE(
1178 (data->vddgfx_voltage_table.count <= (SMU72_MAX_LEVELS_VDDGFX)),
1179 "Too many voltage values for VDDGFX. Trimming to fit state table.",
1180 tonga_trim_voltage_table_to_fit_state_table(hwmgr,
1181 SMU72_MAX_LEVELS_VDDGFX, &(data->vddgfx_voltage_table));
1182 );
1183
1184 PP_ASSERT_WITH_CODE(
1185 (data->vddci_voltage_table.count <= (SMU72_MAX_LEVELS_VDDCI)),
1186 "Too many voltage values for VDDCI. Trimming to fit state table.",
1187 tonga_trim_voltage_table_to_fit_state_table(hwmgr,
1188 SMU72_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table));
1189 );
1190
1191 PP_ASSERT_WITH_CODE(
1192 (data->mvdd_voltage_table.count <= (SMU72_MAX_LEVELS_MVDD)),
1193 "Too many voltage values for MVDD. Trimming to fit state table.",
1194 tonga_trim_voltage_table_to_fit_state_table(hwmgr,
1195 SMU72_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table));
1196 );
1197
1198 return 0;
1199}
1200
1201/**
1202 * Vddc table preparation for SMC.
1203 *
1204 * @param hwmgr the address of the hardware manager
1205 * @param table the SMC DPM table structure to be populated
1206 * @return always 0
1207 */
1208static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
1209 SMU72_Discrete_DpmTable *table)
1210{
1211 unsigned int count;
1212 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1213
1214 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1215 table->VddcLevelCount = data->vddc_voltage_table.count;
1216 for (count = 0; count < table->VddcLevelCount; count++) {
1217 table->VddcTable[count] =
1218 PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE);
1219 }
1220 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
1221 }
1222 return 0;
1223}
1224
1225/**
1226 * VddGfx table preparation for SMC.
1227 *
1228 * @param hwmgr the address of the hardware manager
1229 * @param table the SMC DPM table structure to be populated
1230 * @return always 0
1231 */
1232static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr,
1233 SMU72_Discrete_DpmTable *table)
1234{
1235 unsigned int count;
1236 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1237
1238 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
1239 table->VddGfxLevelCount = data->vddgfx_voltage_table.count;
1240 for (count = 0; count < data->vddgfx_voltage_table.count; count++) {
1241 table->VddGfxTable[count] =
1242 PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE);
1243 }
1244 CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount);
1245 }
1246 return 0;
1247}
1248
1249/**
1250 * Vddci table preparation for SMC.
1251 *
1252 * @param *hwmgr The address of the hardware manager.
1253 * @param *table The SMC DPM table structure to be populated.
1254 * @return 0
1255 */
1256static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
1257 SMU72_Discrete_DpmTable *table)
1258{
1259 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1260 uint32_t count;
1261
1262 table->VddciLevelCount = data->vddci_voltage_table.count;
1263 for (count = 0; count < table->VddciLevelCount; count++) {
1264 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) {
1265 table->VddciTable[count] =
1266 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
1267 } else if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) {
1268 table->SmioTable1.Pattern[count].Voltage =
1269 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
1270 /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */
1271 table->SmioTable1.Pattern[count].Smio =
1272 (uint8_t) count;
1273 table->Smio[count] |=
1274 data->vddci_voltage_table.entries[count].smio_low;
1275 table->VddciTable[count] =
1276 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
1277 }
1278 }
1279
1280 table->SmioMask1 = data->vddci_voltage_table.mask_low;
1281 CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
1282
1283 return 0;
1284}
1285
1286/**
1287 * Mvdd table preparation for SMC.
1288 *
1289 * @param *hwmgr The address of the hardware manager.
1290 * @param *table The SMC DPM table structure to be populated.
1291 * @return 0
1292 */
1293static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
1294 SMU72_Discrete_DpmTable *table)
1295{
1296 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1297 uint32_t count;
1298
1299 if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1300 table->MvddLevelCount = data->mvdd_voltage_table.count;
1301 for (count = 0; count < table->MvddLevelCount; count++) {
1302 table->SmioTable2.Pattern[count].Voltage =
1303 PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
1304 /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
1305 table->SmioTable2.Pattern[count].Smio =
1306 (uint8_t) count;
1307 table->Smio[count] |=
1308 data->mvdd_voltage_table.entries[count].smio_low;
1309 }
1310 table->SmioMask2 = data->mvdd_voltage_table.mask_low;
1311
1312 CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
1313 }
1314
1315 return 0;
1316}
1317
1318/**
1319 * Preparation of vddc and vddgfx CAC tables for SMC.
1320 *
1321 * @param hwmgr the address of the hardware manager
1322 * @param table the SMC DPM table structure to be populated
1323 * @return always 0
1324 */
1325static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
1326 SMU72_Discrete_DpmTable *table)
1327{
1328 uint32_t count;
1329 uint8_t index;
1330 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1331 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1332 struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table = pptable_info->vddgfx_lookup_table;
1333 struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table = pptable_info->vddc_lookup_table;
1334
1335 /* pTables is already swapped, so in order to use the value from it, we need to swap it back. */
1336 uint32_t vddcLevelCount = PP_SMC_TO_HOST_UL(table->VddcLevelCount);
1337 uint32_t vddgfxLevelCount = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount);
1338
1339 for (count = 0; count < vddcLevelCount; count++) {
1340 /* We are populating vddc CAC data to BapmVddc table in split and merged mode */
1341 index = tonga_get_voltage_index(vddc_lookup_table,
1342 data->vddc_voltage_table.entries[count].value);
1343 table->BapmVddcVidLoSidd[count] =
1344 convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
1345 table->BapmVddcVidHiSidd[count] =
1346 convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
1347 table->BapmVddcVidHiSidd2[count] =
1348 convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
1349 }
1350
1351 if ((data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2)) {
1352 /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */
1353 for (count = 0; count < vddgfxLevelCount; count++) {
1354 index = tonga_get_voltage_index(vddgfx_lookup_table,
1355 data->vddgfx_voltage_table.entries[count].value);
1356 table->BapmVddGfxVidLoSidd[count] =
1357 convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_low);
1358 table->BapmVddGfxVidHiSidd[count] =
1359 convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid);
1360 table->BapmVddGfxVidHiSidd2[count] =
1361 convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high);
1362 }
1363 } else {
1364 for (count = 0; count < vddcLevelCount; count++) {
1365 index = tonga_get_voltage_index(vddc_lookup_table,
1366 data->vddc_voltage_table.entries[count].value);
1367 table->BapmVddGfxVidLoSidd[count] =
1368 convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
1369 table->BapmVddGfxVidHiSidd[count] =
1370 convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
1371 table->BapmVddGfxVidHiSidd2[count] =
1372 convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
1373 }
1374 }
1375
1376 return 0;
1377}
1378
1379
1380/**
1381 * Preparation of voltage tables for SMC.
1382 *
1383 * @param hwmgr the address of the hardware manager
1384 * @param table the SMC DPM table structure to be populated
1385 * @return always 0
1386 */
1387
1388int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
1389 SMU72_Discrete_DpmTable *table)
1390{
1391 int result;
1392
1393 result = tonga_populate_smc_vddc_table(hwmgr, table);
1394 PP_ASSERT_WITH_CODE(0 == result,
1395 "can not populate VDDC voltage table to SMC", return -1);
1396
1397 result = tonga_populate_smc_vdd_ci_table(hwmgr, table);
1398 PP_ASSERT_WITH_CODE(0 == result,
1399 "can not populate VDDCI voltage table to SMC", return -1);
1400
1401 result = tonga_populate_smc_vdd_gfx_table(hwmgr, table);
1402 PP_ASSERT_WITH_CODE(0 == result,
1403 "can not populate VDDGFX voltage table to SMC", return -1);
1404
1405 result = tonga_populate_smc_mvdd_table(hwmgr, table);
1406 PP_ASSERT_WITH_CODE(0 == result,
1407 "can not populate MVDD voltage table to SMC", return -1);
1408
1409 result = tonga_populate_cac_tables(hwmgr, table);
1410 PP_ASSERT_WITH_CODE(0 == result,
1411 "can not populate CAC voltage tables to SMC", return -1);
1412
1413 return 0;
1414}
1415
1416/**
1417 * Populates the SMC VRConfig field in DPM table.
1418 *
1419 * @param hwmgr the address of the hardware manager
1420 * @param table the SMC DPM table structure to be populated
1421 * @return always 0
1422 */
1423static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
1424 SMU72_Discrete_DpmTable *table)
1425{
1426 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1427 uint16_t config;
1428
1429 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
1430 /* Splitted mode */
1431 config = VR_SVI2_PLANE_1;
1432 table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
1433
1434 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1435 config = VR_SVI2_PLANE_2;
1436 table->VRConfig |= config;
1437 } else {
1438 printk(KERN_ERR "[ powerplay ] VDDC and VDDGFX should be both on SVI2 control in splitted mode! \n");
1439 }
1440 } else {
1441 /* Merged mode */
1442 config = VR_MERGED_WITH_VDDC;
1443 table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
1444
1445 /* Set Vddc Voltage Controller */
1446 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1447 config = VR_SVI2_PLANE_1;
1448 table->VRConfig |= config;
1449 } else {
1450 printk(KERN_ERR "[ powerplay ] VDDC should be on SVI2 control in merged mode! \n");
1451 }
1452 }
1453
1454 /* Set Vddci Voltage Controller */
1455 if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) {
1456 config = VR_SVI2_PLANE_2; /* only in merged mode */
1457 table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1458 } else if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) {
1459 config = VR_SMIO_PATTERN_1;
1460 table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1461 }
1462
1463 /* Set Mvdd Voltage Controller */
1464 if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1465 config = VR_SMIO_PATTERN_2;
1466 table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
1467 }
1468
1469 return 0;
1470}
1471
1472static int tonga_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
1473 phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table,
1474 uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
1475{
1476 uint32_t i = 0;
1477 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1478 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1479
1480 /* clock - voltage dependency table is empty table */
1481 if (allowed_clock_voltage_table->count == 0)
1482 return -1;
1483
1484 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
1485 /* find first sclk bigger than request */
1486 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
1487 voltage->VddGfx = tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
1488 allowed_clock_voltage_table->entries[i].vddgfx);
1489
1490 voltage->Vddc = tonga_get_voltage_index(pptable_info->vddc_lookup_table,
1491 allowed_clock_voltage_table->entries[i].vddc);
1492
1493 if (allowed_clock_voltage_table->entries[i].vddci) {
1494 voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table,
1495 allowed_clock_voltage_table->entries[i].vddci);
1496 } else {
1497 voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table,
1498 allowed_clock_voltage_table->entries[i].vddc - data->vddc_vddci_delta);
1499 }
1500
1501 if (allowed_clock_voltage_table->entries[i].mvdd) {
1502 *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd;
1503 }
1504
1505 voltage->Phases = 1;
1506 return 0;
1507 }
1508 }
1509
1510 /* sclk is bigger than max sclk in the dependence table */
1511 voltage->VddGfx = tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
1512 allowed_clock_voltage_table->entries[i-1].vddgfx);
1513 voltage->Vddc = tonga_get_voltage_index(pptable_info->vddc_lookup_table,
1514 allowed_clock_voltage_table->entries[i-1].vddc);
1515
1516 if (allowed_clock_voltage_table->entries[i-1].vddci) {
1517 voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table,
1518 allowed_clock_voltage_table->entries[i-1].vddci);
1519 }
1520 if (allowed_clock_voltage_table->entries[i-1].mvdd) {
1521 *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd;
1522 }
1523
1524 return 0;
1525}
1526
1527/**
1528 * Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value
1529 *
1530 * @param hwmgr the address of the powerplay hardware manager.
1531 * @return always 0
1532 */
1533int tonga_reset_to_default(struct pp_hwmgr *hwmgr)
1534{
1535 return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults) == 0) ? 0 : 1;
1536}
1537
1538int tonga_populate_memory_timing_parameters(
1539 struct pp_hwmgr *hwmgr,
1540 uint32_t engine_clock,
1541 uint32_t memory_clock,
1542 struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs
1543 )
1544{
1545 uint32_t dramTiming;
1546 uint32_t dramTiming2;
1547 uint32_t burstTime;
1548 int result;
1549
1550 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1551 engine_clock, memory_clock);
1552
1553 PP_ASSERT_WITH_CODE(result == 0,
1554 "Error calling VBIOS to set DRAM_TIMING.", return result);
1555
1556 dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1557 dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1558 burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1559
1560 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
1561 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
1562 arb_regs->McArbBurstTime = (uint8_t)burstTime;
1563
1564 return 0;
1565}
1566
1567/**
1568 * Setup parameters for the MC ARB.
1569 *
1570 * @param hwmgr the address of the powerplay hardware manager.
1571 * @return always 0
1572 * This function is to be called from the SetPowerState table.
1573 */
1574int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1575{
1576 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1577 int result = 0;
1578 SMU72_Discrete_MCArbDramTimingTable arb_regs;
1579 uint32_t i, j;
1580
1581 memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable));
1582
1583 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1584 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1585 result = tonga_populate_memory_timing_parameters
1586 (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
1587 data->dpm_table.mclk_table.dpm_levels[j].value,
1588 &arb_regs.entries[i][j]);
1589
1590 if (0 != result) {
1591 break;
1592 }
1593 }
1594 }
1595
1596 if (0 == result) {
1597 result = tonga_copy_bytes_to_smc(
1598 hwmgr->smumgr,
1599 data->arb_table_start,
1600 (uint8_t *)&arb_regs,
1601 sizeof(SMU72_Discrete_MCArbDramTimingTable),
1602 data->sram_end
1603 );
1604 }
1605
1606 return result;
1607}
1608
1609static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table)
1610{
1611 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1612 struct tonga_dpm_table *dpm_table = &data->dpm_table;
1613 uint32_t i;
1614
1615 /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
1616 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
1617 table->LinkLevel[i].PcieGenSpeed =
1618 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
1619 table->LinkLevel[i].PcieLaneCount =
1620 (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
1621 table->LinkLevel[i].EnabledForActivity =
1622 1;
1623 table->LinkLevel[i].SPC =
1624 (uint8_t)(data->pcie_spc_cap & 0xff);
1625 table->LinkLevel[i].DownThreshold =
1626 PP_HOST_TO_SMC_UL(5);
1627 table->LinkLevel[i].UpThreshold =
1628 PP_HOST_TO_SMC_UL(30);
1629 }
1630
1631 data->smc_state_table.LinkLevelCount =
1632 (uint8_t)dpm_table->pcie_speed_table.count;
1633 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
1634 tonga_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
1635
1636 return 0;
1637}
1638
1639static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1640 SMU72_Discrete_DpmTable *table)
1641{
1642 int result = 0;
1643
1644 uint8_t count;
1645 pp_atomctrl_clock_dividers_vi dividers;
1646 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1647 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1648 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
1649
1650 table->UvdLevelCount = (uint8_t) (mm_table->count);
1651 table->UvdBootLevel = 0;
1652
1653 for (count = 0; count < table->UvdLevelCount; count++) {
1654 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
1655 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
1656 table->UvdLevel[count].MinVoltage.Vddc =
1657 tonga_get_voltage_index(pptable_info->vddc_lookup_table,
1658 mm_table->entries[count].vddc);
1659 table->UvdLevel[count].MinVoltage.VddGfx =
1660 (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
1661 tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
1662 mm_table->entries[count].vddgfx) : 0;
1663 table->UvdLevel[count].MinVoltage.Vddci =
1664 tonga_get_voltage_id(&data->vddci_voltage_table,
1665 mm_table->entries[count].vddc - data->vddc_vddci_delta);
1666 table->UvdLevel[count].MinVoltage.Phases = 1;
1667
1668 /* retrieve divider value for VBIOS */
1669 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1670 table->UvdLevel[count].VclkFrequency, &dividers);
1671 PP_ASSERT_WITH_CODE((0 == result),
1672 "can not find divide id for Vclk clock", return result);
1673
1674 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1675
1676 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1677 table->UvdLevel[count].DclkFrequency, &dividers);
1678 PP_ASSERT_WITH_CODE((0 == result),
1679 "can not find divide id for Dclk clock", return result);
1680
1681 table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
1682
1683 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1684 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1685 //CONVERT_FROM_HOST_TO_SMC_UL((uint32_t)table->UvdLevel[count].MinVoltage);
1686 }
1687
1688 return result;
1689
1690}
1691
1692static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1693 SMU72_Discrete_DpmTable *table)
1694{
1695 int result = 0;
1696
1697 uint8_t count;
1698 pp_atomctrl_clock_dividers_vi dividers;
1699 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1700 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1701 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
1702
1703 table->VceLevelCount = (uint8_t) (mm_table->count);
1704 table->VceBootLevel = 0;
1705
1706 for (count = 0; count < table->VceLevelCount; count++) {
1707 table->VceLevel[count].Frequency =
1708 mm_table->entries[count].eclk;
1709 table->VceLevel[count].MinVoltage.Vddc =
1710 tonga_get_voltage_index(pptable_info->vddc_lookup_table,
1711 mm_table->entries[count].vddc);
1712 table->VceLevel[count].MinVoltage.VddGfx =
1713 (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
1714 tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
1715 mm_table->entries[count].vddgfx) : 0;
1716 table->VceLevel[count].MinVoltage.Vddci =
1717 tonga_get_voltage_id(&data->vddci_voltage_table,
1718 mm_table->entries[count].vddc - data->vddc_vddci_delta);
1719 table->VceLevel[count].MinVoltage.Phases = 1;
1720
1721 /* retrieve divider value for VBIOS */
1722 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1723 table->VceLevel[count].Frequency, &dividers);
1724 PP_ASSERT_WITH_CODE((0 == result),
1725 "can not find divide id for VCE engine clock", return result);
1726
1727 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1728
1729 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1730 }
1731
1732 return result;
1733}
1734
1735static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
1736 SMU72_Discrete_DpmTable *table)
1737{
1738 int result = 0;
1739 uint8_t count;
1740 pp_atomctrl_clock_dividers_vi dividers;
1741 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1742 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1743 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
1744
1745 table->AcpLevelCount = (uint8_t) (mm_table->count);
1746 table->AcpBootLevel = 0;
1747
1748 for (count = 0; count < table->AcpLevelCount; count++) {
1749 table->AcpLevel[count].Frequency =
1750 pptable_info->mm_dep_table->entries[count].aclk;
1751 table->AcpLevel[count].MinVoltage.Vddc =
1752 tonga_get_voltage_index(pptable_info->vddc_lookup_table,
1753 mm_table->entries[count].vddc);
1754 table->AcpLevel[count].MinVoltage.VddGfx =
1755 (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
1756 tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
1757 mm_table->entries[count].vddgfx) : 0;
1758 table->AcpLevel[count].MinVoltage.Vddci =
1759 tonga_get_voltage_id(&data->vddci_voltage_table,
1760 mm_table->entries[count].vddc - data->vddc_vddci_delta);
1761 table->AcpLevel[count].MinVoltage.Phases = 1;
1762
1763 /* retrieve divider value for VBIOS */
1764 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1765 table->AcpLevel[count].Frequency, &dividers);
1766 PP_ASSERT_WITH_CODE((0 == result),
1767 "can not find divide id for engine clock", return result);
1768
1769 table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1770
1771 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
1772 }
1773
1774 return result;
1775}
1776
1777static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1778 SMU72_Discrete_DpmTable *table)
1779{
1780 int result = 0;
1781 uint8_t count;
1782 pp_atomctrl_clock_dividers_vi dividers;
1783 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1784 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1785 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
1786
1787 table->SamuBootLevel = 0;
1788 table->SamuLevelCount = (uint8_t) (mm_table->count);
1789
1790 for (count = 0; count < table->SamuLevelCount; count++) {
1791 /* not sure whether we need evclk or not */
1792 table->SamuLevel[count].Frequency =
1793 pptable_info->mm_dep_table->entries[count].samclock;
1794 table->SamuLevel[count].MinVoltage.Vddc =
1795 tonga_get_voltage_index(pptable_info->vddc_lookup_table,
1796 mm_table->entries[count].vddc);
1797 table->SamuLevel[count].MinVoltage.VddGfx =
1798 (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
1799 tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
1800 mm_table->entries[count].vddgfx) : 0;
1801 table->SamuLevel[count].MinVoltage.Vddci =
1802 tonga_get_voltage_id(&data->vddci_voltage_table,
1803 mm_table->entries[count].vddc - data->vddc_vddci_delta);
1804 table->SamuLevel[count].MinVoltage.Phases = 1;
1805
1806 /* retrieve divider value for VBIOS */
1807 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1808 table->SamuLevel[count].Frequency, &dividers);
1809 PP_ASSERT_WITH_CODE((0 == result),
1810 "can not find divide id for samu clock", return result);
1811
1812 table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1813
1814 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
1815 }
1816
1817 return result;
1818}
1819
1820/**
1821 * Populates the SMC MCLK structure using the provided memory clock
1822 *
1823 * @param hwmgr the address of the hardware manager
1824 * @param memory_clock the memory clock to use to populate the structure
1825 * @param sclk the SMC SCLK structure to be populated
1826 */
1827static int tonga_calculate_mclk_params(
1828 struct pp_hwmgr *hwmgr,
1829 uint32_t memory_clock,
1830 SMU72_Discrete_MemoryLevel *mclk,
1831 bool strobe_mode,
1832 bool dllStateOn
1833 )
1834{
1835 const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1836 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1837 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1838 uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
1839 uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
1840 uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
1841 uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
1842 uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
1843 uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
1844 uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
1845
1846 pp_atomctrl_memory_clock_param mpll_param;
1847 int result;
1848
1849 result = atomctrl_get_memory_pll_dividers_si(hwmgr,
1850 memory_clock, &mpll_param, strobe_mode);
1851 PP_ASSERT_WITH_CODE(0 == result,
1852 "Error retrieving Memory Clock Parameters from VBIOS.", return result);
1853
1854 /* MPLL_FUNC_CNTL setup*/
1855 mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
1856
1857 /* MPLL_FUNC_CNTL_1 setup*/
1858 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1859 MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
1860 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1861 MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
1862 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1863 MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
1864
1865 /* MPLL_AD_FUNC_CNTL setup*/
1866 mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
1867 MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1868
1869 if (data->is_memory_GDDR5) {
1870 /* MPLL_DQ_FUNC_CNTL setup*/
1871 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
1872 MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
1873 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
1874 MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1875 }
1876
1877 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1878 PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
1879 /*
1880 ************************************
1881 Fref = Reference Frequency
1882 NF = Feedback divider ratio
1883 NR = Reference divider ratio
1884 Fnom = Nominal VCO output frequency = Fref * NF / NR
1885 Fs = Spreading Rate
1886 D = Percentage down-spread / 2
1887 Fint = Reference input frequency to PFD = Fref / NR
1888 NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
1889 CLKS = NS - 1 = ISS_STEP_NUM[11:0]
1890 NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
1891 CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
1892 *************************************
1893 */
1894 pp_atomctrl_internal_ss_info ss_info;
1895 uint32_t freq_nom;
1896 uint32_t tmp;
1897 uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
1898
1899 /* for GDDR5 for all modes and DDR3 */
1900 if (1 == mpll_param.qdr)
1901 freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
1902 else
1903 freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
1904
1905 /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
1906 tmp = (freq_nom / reference_clock);
1907 tmp = tmp * tmp;
1908
1909 if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
1910 /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
1911 /* ss.Info.speed_spectrum_rate -- in unit of khz */
1912 /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
1913 /* = reference_clock * 5 / speed_spectrum_rate */
1914 uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
1915
1916 /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
1917 /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
1918 uint32_t clkv =
1919 (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
1920 ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
1921
1922 mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
1923 mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
1924 }
1925 }
1926
1927 /* MCLK_PWRMGT_CNTL setup */
1928 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1929 MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
1930 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1931 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
1932 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1933 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
1934
1935
1936 /* Save the result data to outpupt memory level structure */
1937 mclk->MclkFrequency = memory_clock;
1938 mclk->MpllFuncCntl = mpll_func_cntl;
1939 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
1940 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
1941 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
1942 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
1943 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
1944 mclk->DllCntl = dll_cntl;
1945 mclk->MpllSs1 = mpll_ss1;
1946 mclk->MpllSs2 = mpll_ss2;
1947
1948 return 0;
1949}
1950
1951static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock,
1952 bool strobe_mode)
1953{
1954 uint8_t mc_para_index;
1955
1956 if (strobe_mode) {
1957 if (memory_clock < 12500) {
1958 mc_para_index = 0x00;
1959 } else if (memory_clock > 47500) {
1960 mc_para_index = 0x0f;
1961 } else {
1962 mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
1963 }
1964 } else {
1965 if (memory_clock < 65000) {
1966 mc_para_index = 0x00;
1967 } else if (memory_clock > 135000) {
1968 mc_para_index = 0x0f;
1969 } else {
1970 mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
1971 }
1972 }
1973
1974 return mc_para_index;
1975}
1976
1977static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
1978{
1979 uint8_t mc_para_index;
1980
1981 if (memory_clock < 10000) {
1982 mc_para_index = 0;
1983 } else if (memory_clock >= 80000) {
1984 mc_para_index = 0x0f;
1985 } else {
1986 mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
1987 }
1988
1989 return mc_para_index;
1990}
1991
1992static int tonga_populate_single_memory_level(
1993 struct pp_hwmgr *hwmgr,
1994 uint32_t memory_clock,
1995 SMU72_Discrete_MemoryLevel *memory_level
1996 )
1997{
1998 uint32_t minMvdd = 0;
1999 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2000 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2001 int result = 0;
2002 bool dllStateOn;
2003 struct cgs_display_info info = {0};
2004
2005
2006 if (NULL != pptable_info->vdd_dep_on_mclk) {
2007 result = tonga_get_dependecy_volt_by_clk(hwmgr,
2008 pptable_info->vdd_dep_on_mclk, memory_clock, &memory_level->MinVoltage, &minMvdd);
2009 PP_ASSERT_WITH_CODE((0 == result),
2010 "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
2011 }
2012
2013 if (data->mvdd_control == TONGA_VOLTAGE_CONTROL_NONE) {
2014 memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value;
2015 } else {
2016 memory_level->MinMvdd = minMvdd;
2017 }
2018 memory_level->EnabledForThrottle = 1;
2019 memory_level->EnabledForActivity = 0;
2020 memory_level->UpHyst = 0;
2021 memory_level->DownHyst = 100;
2022 memory_level->VoltageDownHyst = 0;
2023
2024 /* Indicates maximum activity level for this performance level.*/
2025 memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
2026 memory_level->StutterEnable = 0;
2027 memory_level->StrobeEnable = 0;
2028 memory_level->EdcReadEnable = 0;
2029 memory_level->EdcWriteEnable = 0;
2030 memory_level->RttEnable = 0;
2031
2032 /* default set to low watermark. Highest level will be set to high later.*/
2033 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2034
2035 cgs_get_active_displays_info(hwmgr->device, &info);
2036 data->display_timing.num_existing_displays = info.display_count;
2037
2038 if ((data->mclk_stutter_mode_threshold != 0) &&
2039 (memory_clock <= data->mclk_stutter_mode_threshold) &&
2040 (!data->is_uvd_enabled)
2041 && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
2042 && (data->display_timing.num_existing_displays <= 2)
2043 && (data->display_timing.num_existing_displays != 0))
2044 memory_level->StutterEnable = 1;
2045
2046 /* decide strobe mode*/
2047 memory_level->StrobeEnable = (data->mclk_strobe_mode_threshold != 0) &&
2048 (memory_clock <= data->mclk_strobe_mode_threshold);
2049
2050 /* decide EDC mode and memory clock ratio*/
2051 if (data->is_memory_GDDR5) {
2052 memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock,
2053 memory_level->StrobeEnable);
2054
2055 if ((data->mclk_edc_enable_threshold != 0) &&
2056 (memory_clock > data->mclk_edc_enable_threshold)) {
2057 memory_level->EdcReadEnable = 1;
2058 }
2059
2060 if ((data->mclk_edc_wr_enable_threshold != 0) &&
2061 (memory_clock > data->mclk_edc_wr_enable_threshold)) {
2062 memory_level->EdcWriteEnable = 1;
2063 }
2064
2065 if (memory_level->StrobeEnable) {
2066 if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >=
2067 ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
2068 dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
2069 } else {
2070 dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
2071 }
2072
2073 } else {
2074 dllStateOn = data->dll_defaule_on;
2075 }
2076 } else {
2077 memory_level->StrobeRatio =
2078 tonga_get_ddr3_mclk_frequency_ratio(memory_clock);
2079 dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
2080 }
2081
2082 result = tonga_calculate_mclk_params(hwmgr,
2083 memory_clock, memory_level, memory_level->StrobeEnable, dllStateOn);
2084
2085 if (0 == result) {
2086 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd);
2087 /* MCLK frequency in units of 10KHz*/
2088 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
2089 /* Indicates maximum activity level for this performance level.*/
2090 CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
2091 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
2092 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
2093 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
2094 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
2095 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
2096 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
2097 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
2098 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
2099 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
2100 }
2101
2102 return result;
2103}
2104
2105/**
2106 * Populates the SMC MVDD structure using the provided memory clock.
2107 *
2108 * @param hwmgr the address of the hardware manager
2109 * @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
2110 * @param voltage the SMC VOLTAGE structure to be populated
2111 */
2112int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk, SMIO_Pattern *smio_pattern)
2113{
2114 const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2115 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2116 uint32_t i = 0;
2117
2118 if (TONGA_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
2119 /* find mvdd value which clock is more than request */
2120 for (i = 0; i < pptable_info->vdd_dep_on_mclk->count; i++) {
2121 if (mclk <= pptable_info->vdd_dep_on_mclk->entries[i].clk) {
2122 /* Always round to higher voltage. */
2123 smio_pattern->Voltage = data->mvdd_voltage_table.entries[i].value;
2124 break;
2125 }
2126 }
2127
2128 PP_ASSERT_WITH_CODE(i < pptable_info->vdd_dep_on_mclk->count,
2129 "MVDD Voltage is outside the supported range.", return -1);
2130
2131 } else {
2132 return -1;
2133 }
2134
2135 return 0;
2136}
2137
2138
2139static int tonga_populate_smv_acpi_level(struct pp_hwmgr *hwmgr,
2140 SMU72_Discrete_DpmTable *table)
2141{
2142 int result = 0;
2143 const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2144 pp_atomctrl_clock_dividers_vi dividers;
2145 SMIO_Pattern voltage_level;
2146 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
2147 uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
2148 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
2149 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
2150
2151 /* The ACPI state should not do DPM on DC (or ever).*/
2152 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2153
2154 table->ACPILevel.MinVoltage = data->smc_state_table.GraphicsLevel[0].MinVoltage;
2155
2156 /* assign zero for now*/
2157 table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
2158
2159 /* get the engine clock dividers for this clock value*/
2160 result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
2161 table->ACPILevel.SclkFrequency, &dividers);
2162
2163 PP_ASSERT_WITH_CODE(result == 0,
2164 "Error retrieving Engine Clock dividers from VBIOS.", return result);
2165
2166 /* divider ID for required SCLK*/
2167 table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
2168 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2169 table->ACPILevel.DeepSleepDivId = 0;
2170
2171 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
2172 CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0);
2173 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
2174 CG_SPLL_FUNC_CNTL, SPLL_RESET, 1);
2175 spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2,
2176 CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4);
2177
2178 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2179 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2180 table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
2181 table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
2182 table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
2183 table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
2184 table->ACPILevel.CcPwrDynRm = 0;
2185 table->ACPILevel.CcPwrDynRm1 = 0;
2186
2187
2188 /* For various features to be enabled/disabled while this level is active.*/
2189 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
2190 /* SCLK frequency in units of 10KHz*/
2191 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
2192 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
2193 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
2194 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
2195 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
2196 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
2197 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
2198 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
2199 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
2200
2201 /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
2202 table->MemoryACPILevel.MinVoltage = data->smc_state_table.MemoryLevel[0].MinVoltage;
2203
2204 /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
2205
2206 if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level))
2207 table->MemoryACPILevel.MinMvdd =
2208 PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
2209 else
2210 table->MemoryACPILevel.MinMvdd = 0;
2211
2212 /* Force reset on DLL*/
2213 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
2214 MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
2215 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
2216 MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
2217
2218 /* Disable DLL in ACPIState*/
2219 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
2220 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
2221 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
2222 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
2223
2224 /* Enable DLL bypass signal*/
2225 dll_cntl = PHM_SET_FIELD(dll_cntl,
2226 DLL_CNTL, MRDCK0_BYPASS, 0);
2227 dll_cntl = PHM_SET_FIELD(dll_cntl,
2228 DLL_CNTL, MRDCK1_BYPASS, 0);
2229
2230 table->MemoryACPILevel.DllCntl =
2231 PP_HOST_TO_SMC_UL(dll_cntl);
2232 table->MemoryACPILevel.MclkPwrmgtCntl =
2233 PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
2234 table->MemoryACPILevel.MpllAdFuncCntl =
2235 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
2236 table->MemoryACPILevel.MpllDqFuncCntl =
2237 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
2238 table->MemoryACPILevel.MpllFuncCntl =
2239 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
2240 table->MemoryACPILevel.MpllFuncCntl_1 =
2241 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
2242 table->MemoryACPILevel.MpllFuncCntl_2 =
2243 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
2244 table->MemoryACPILevel.MpllSs1 =
2245 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
2246 table->MemoryACPILevel.MpllSs2 =
2247 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
2248
2249 table->MemoryACPILevel.EnabledForThrottle = 0;
2250 table->MemoryACPILevel.EnabledForActivity = 0;
2251 table->MemoryACPILevel.UpHyst = 0;
2252 table->MemoryACPILevel.DownHyst = 100;
2253 table->MemoryACPILevel.VoltageDownHyst = 0;
2254 /* Indicates maximum activity level for this performance level.*/
2255 table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
2256
2257 table->MemoryACPILevel.StutterEnable = 0;
2258 table->MemoryACPILevel.StrobeEnable = 0;
2259 table->MemoryACPILevel.EdcReadEnable = 0;
2260 table->MemoryACPILevel.EdcWriteEnable = 0;
2261 table->MemoryACPILevel.RttEnable = 0;
2262
2263 return result;
2264}
2265
2266static int tonga_find_boot_level(struct tonga_single_dpm_table *table, uint32_t value, uint32_t *boot_level)
2267{
2268 int result = 0;
2269 uint32_t i;
2270
2271 for (i = 0; i < table->count; i++) {
2272 if (value == table->dpm_levels[i].value) {
2273 *boot_level = i;
2274 result = 0;
2275 }
2276 }
2277 return result;
2278}
2279
2280static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
2281 SMU72_Discrete_DpmTable *table)
2282{
2283 int result = 0;
2284 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2285
2286 table->GraphicsBootLevel = 0; /* 0 == DPM[0] (low), etc. */
2287 table->MemoryBootLevel = 0; /* 0 == DPM[0] (low), etc. */
2288
2289 /* find boot level from dpm table*/
2290 result = tonga_find_boot_level(&(data->dpm_table.sclk_table),
2291 data->vbios_boot_state.sclk_bootup_value,
2292 (uint32_t *)&(data->smc_state_table.GraphicsBootLevel));
2293
2294 if (0 != result) {
2295 data->smc_state_table.GraphicsBootLevel = 0;
2296 printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
2297 in dependency table. Using Graphics DPM level 0!");
2298 result = 0;
2299 }
2300
2301 result = tonga_find_boot_level(&(data->dpm_table.mclk_table),
2302 data->vbios_boot_state.mclk_bootup_value,
2303 (uint32_t *)&(data->smc_state_table.MemoryBootLevel));
2304
2305 if (0 != result) {
2306 data->smc_state_table.MemoryBootLevel = 0;
2307 printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
2308 in dependency table. Using Memory DPM level 0!");
2309 result = 0;
2310 }
2311
2312 table->BootVoltage.Vddc =
2313 tonga_get_voltage_id(&(data->vddc_voltage_table),
2314 data->vbios_boot_state.vddc_bootup_value);
2315 table->BootVoltage.VddGfx =
2316 tonga_get_voltage_id(&(data->vddgfx_voltage_table),
2317 data->vbios_boot_state.vddgfx_bootup_value);
2318 table->BootVoltage.Vddci =
2319 tonga_get_voltage_id(&(data->vddci_voltage_table),
2320 data->vbios_boot_state.vddci_bootup_value);
2321 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
2322
2323 CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
2324
2325 return result;
2326}
2327
2328
2329/**
2330 * Calculates the SCLK dividers using the provided engine clock
2331 *
2332 * @param hwmgr the address of the hardware manager
2333 * @param engine_clock the engine clock to use to populate the structure
2334 * @param sclk the SMC SCLK structure to be populated
2335 */
2336int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
2337 uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk)
2338{
2339 const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2340 pp_atomctrl_clock_dividers_vi dividers;
2341 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
2342 uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
2343 uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
2344 uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
2345 uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
2346 uint32_t reference_clock;
2347 uint32_t reference_divider;
2348 uint32_t fbdiv;
2349 int result;
2350
2351 /* get the engine clock dividers for this clock value*/
2352 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
2353
2354 PP_ASSERT_WITH_CODE(result == 0,
2355 "Error retrieving Engine Clock dividers from VBIOS.", return result);
2356
2357 /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
2358 reference_clock = atomctrl_get_reference_clock(hwmgr);
2359
2360 reference_divider = 1 + dividers.uc_pll_ref_div;
2361
2362 /* low 14 bits is fraction and high 12 bits is divider*/
2363 fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
2364
2365 /* SPLL_FUNC_CNTL setup*/
2366 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
2367 CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
2368 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
2369 CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
2370
2371 /* SPLL_FUNC_CNTL_3 setup*/
2372 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
2373 CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
2374
2375 /* set to use fractional accumulation*/
2376 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
2377 CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
2378
2379 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2380 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
2381 pp_atomctrl_internal_ss_info ss_info;
2382
2383 uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
2384 if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
2385 /*
2386 * ss_info.speed_spectrum_percentage -- in unit of 0.01%
2387 * ss_info.speed_spectrum_rate -- in unit of khz
2388 */
2389 /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
2390 uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
2391
2392 /* clkv = 2 * D * fbdiv / NS */
2393 uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
2394
2395 cg_spll_spread_spectrum =
2396 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
2397 cg_spll_spread_spectrum =
2398 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
2399 cg_spll_spread_spectrum_2 =
2400 PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
2401 }
2402 }
2403
2404 sclk->SclkFrequency = engine_clock;
2405 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
2406 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
2407 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
2408 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
2409 sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
2410
2411 return 0;
2412}
2413
2414static uint8_t tonga_get_sleep_divider_id_from_clock(uint32_t engine_clock,
2415 uint32_t min_engine_clock_in_sr)
2416{
2417 uint32_t i, temp;
2418 uint32_t min = max(min_engine_clock_in_sr, (uint32_t)TONGA_MINIMUM_ENGINE_CLOCK);
2419
2420 PP_ASSERT_WITH_CODE((engine_clock >= min),
2421 "Engine clock can't satisfy stutter requirement!", return 0);
2422
2423 for (i = TONGA_MAX_DEEPSLEEP_DIVIDER_ID;; i--) {
2424 temp = engine_clock >> i;
2425
2426 if(temp >= min || i == 0)
2427 break;
2428 }
2429 return (uint8_t)i;
2430}
2431
2432/**
2433 * Populates single SMC SCLK structure using the provided engine clock
2434 *
2435 * @param hwmgr the address of the hardware manager
2436 * @param engine_clock the engine clock to use to populate the structure
2437 * @param sclk the SMC SCLK structure to be populated
2438 */
2439static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, uint32_t engine_clock, uint16_t sclk_activity_level_threshold, SMU72_Discrete_GraphicsLevel *graphic_level)
2440{
2441 int result;
2442 uint32_t threshold;
2443 uint32_t mvdd;
2444 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2445 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2446
2447 result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
2448
2449
2450 /* populate graphics levels*/
2451 result = tonga_get_dependecy_volt_by_clk(hwmgr,
2452 pptable_info->vdd_dep_on_sclk, engine_clock,
2453 &graphic_level->MinVoltage, &mvdd);
2454 PP_ASSERT_WITH_CODE((0 == result),
2455 "can not find VDDC voltage value for VDDC \
2456 engine clock dependency table", return result);
2457
2458 /* SCLK frequency in units of 10KHz*/
2459 graphic_level->SclkFrequency = engine_clock;
2460
2461 /* Indicates maximum activity level for this performance level. 50% for now*/
2462 graphic_level->ActivityLevel = sclk_activity_level_threshold;
2463
2464 graphic_level->CcPwrDynRm = 0;
2465 graphic_level->CcPwrDynRm1 = 0;
2466 /* this level can be used if activity is high enough.*/
2467 graphic_level->EnabledForActivity = 0;
2468 /* this level can be used for throttling.*/
2469 graphic_level->EnabledForThrottle = 1;
2470 graphic_level->UpHyst = 0;
2471 graphic_level->DownHyst = 0;
2472 graphic_level->VoltageDownHyst = 0;
2473 graphic_level->PowerThrottle = 0;
2474
2475 threshold = engine_clock * data->fast_watermark_threshold / 100;
2476/*
2477 *get the DAL clock. do it in funture.
2478 PECI_GetMinClockSettings(hwmgr->peci, &minClocks);
2479 data->display_timing.min_clock_insr = minClocks.engineClockInSR;
2480*/
2481 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2482 PHM_PlatformCaps_SclkDeepSleep))
2483 graphic_level->DeepSleepDivId =
2484 tonga_get_sleep_divider_id_from_clock(engine_clock,
2485 data->display_timing.min_clock_insr);
2486
2487 /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
2488 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2489
2490 if (0 == result) {
2491 /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
2492 /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/
2493 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
2494 CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
2495 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
2496 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
2497 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
2498 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
2499 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
2500 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
2501 }
2502
2503 return result;
2504}
2505
2506/**
2507 * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
2508 *
2509 * @param hwmgr the address of the hardware manager
2510 */
2511static int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
2512{
2513 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2514 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2515 struct tonga_dpm_table *dpm_table = &data->dpm_table;
2516 phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
2517 uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count;
2518 int result = 0;
2519 uint32_t level_array_adress = data->dpm_table_start +
2520 offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
2521 uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) *
2522 SMU72_MAX_LEVELS_GRAPHICS; /* 64 -> long; 32 -> int*/
2523 SMU72_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel;
2524 uint32_t i, maxEntry;
2525 uint8_t highest_pcie_level_enabled = 0, lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0, count = 0;
2526 PECI_RegistryValue reg_value;
2527 memset(levels, 0x00, level_array_size);
2528
2529 for (i = 0; i < dpm_table->sclk_table.count; i++) {
2530 result = tonga_populate_single_graphic_level(hwmgr,
2531 dpm_table->sclk_table.dpm_levels[i].value,
2532 (uint16_t)data->activity_target[i],
2533 &(data->smc_state_table.GraphicsLevel[i]));
2534
2535 if (0 != result)
2536 return result;
2537
2538 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
2539 if (i > 1)
2540 data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
2541
2542 if (0 == i) {
2543 reg_value = 0;
2544 if (reg_value != 0)
2545 data->smc_state_table.GraphicsLevel[0].UpHyst = (uint8_t)reg_value;
2546 }
2547
2548 if (1 == i) {
2549 reg_value = 0;
2550 if (reg_value != 0)
2551 data->smc_state_table.GraphicsLevel[1].UpHyst = (uint8_t)reg_value;
2552 }
2553 }
2554
2555 /* Only enable level 0 for now. */
2556 data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
2557
2558 /* set highest level watermark to high */
2559 if (dpm_table->sclk_table.count > 1)
2560 data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
2561 PPSMC_DISPLAY_WATERMARK_HIGH;
2562
2563 data->smc_state_table.GraphicsDpmLevelCount =
2564 (uint8_t)dpm_table->sclk_table.count;
2565 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
2566 tonga_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
2567
2568 if (pcie_table != NULL) {
2569 PP_ASSERT_WITH_CODE((pcie_entry_count >= 1),
2570 "There must be 1 or more PCIE levels defined in PPTable.", return -1);
2571 maxEntry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/
2572 for (i = 0; i < dpm_table->sclk_table.count; i++) {
2573 data->smc_state_table.GraphicsLevel[i].pcieDpmLevel =
2574 (uint8_t) ((i < maxEntry) ? i : maxEntry);
2575 }
2576 } else {
2577 if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask)
2578 printk(KERN_ERR "[ powerplay ] Pcie Dpm Enablemask is 0!");
2579
2580 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
2581 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
2582 (1<<(highest_pcie_level_enabled+1))) != 0)) {
2583 highest_pcie_level_enabled++;
2584 }
2585
2586 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
2587 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
2588 (1<<lowest_pcie_level_enabled)) == 0)) {
2589 lowest_pcie_level_enabled++;
2590 }
2591
2592 while ((count < highest_pcie_level_enabled) &&
2593 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
2594 (1<<(lowest_pcie_level_enabled+1+count))) == 0)) {
2595 count++;
2596 }
2597 mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
2598 (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
2599
2600
2601 /* set pcieDpmLevel to highest_pcie_level_enabled*/
2602 for (i = 2; i < dpm_table->sclk_table.count; i++) {
2603 data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
2604 }
2605
2606 /* set pcieDpmLevel to lowest_pcie_level_enabled*/
2607 data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
2608
2609 /* set pcieDpmLevel to mid_pcie_level_enabled*/
2610 data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
2611 }
2612 /* level count will send to smc once at init smc table and never change*/
2613 result = tonga_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end);
2614
2615 if (0 != result)
2616 return result;
2617
2618 return 0;
2619}
2620
2621/**
2622 * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
2623 *
2624 * @param hwmgr the address of the hardware manager
2625 */
2626
2627static int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
2628{
2629 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2630 struct tonga_dpm_table *dpm_table = &data->dpm_table;
2631 int result;
2632 /* populate MCLK dpm table to SMU7 */
2633 uint32_t level_array_adress = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
2634 uint32_t level_array_size = sizeof(SMU72_Discrete_MemoryLevel) * SMU72_MAX_LEVELS_MEMORY;
2635 SMU72_Discrete_MemoryLevel *levels = data->smc_state_table.MemoryLevel;
2636 uint32_t i;
2637
2638 memset(levels, 0x00, level_array_size);
2639
2640 for (i = 0; i < dpm_table->mclk_table.count; i++) {
2641 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
2642 "can not populate memory level as memory clock is zero", return -1);
2643 result = tonga_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
2644 &(data->smc_state_table.MemoryLevel[i]));
2645 if (0 != result) {
2646 return result;
2647 }
2648 }
2649
2650 /* Only enable level 0 for now.*/
2651 data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
2652
2653 /*
2654 * in order to prevent MC activity from stutter mode to push DPM up.
2655 * the UVD change complements this by putting the MCLK in a higher state
2656 * by default such that we are not effected by up threshold or and MCLK DPM latency.
2657 */
2658 data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
2659 CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.MemoryLevel[0].ActivityLevel);
2660
2661 data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
2662 data->dpm_level_enable_mask.mclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
2663 /* set highest level watermark to high*/
2664 data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
2665
2666 /* level count will send to smc once at init smc table and never change*/
2667 result = tonga_copy_bytes_to_smc(hwmgr->smumgr,
2668 level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end);
2669
2670 if (0 != result) {
2671 return result;
2672 }
2673
2674 return 0;
2675}
2676
2677struct TONGA_DLL_SPEED_SETTING {
2678 uint16_t Min; /* Minimum Data Rate*/
2679 uint16_t Max; /* Maximum Data Rate*/
2680 uint32_t dll_speed; /* The desired DLL_SPEED setting*/
2681};
2682
2683static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
2684{
2685 return 0;
2686}
2687
2688/* ---------------------------------------- ULV related functions ----------------------------------------------------*/
2689
2690
2691static int tonga_reset_single_dpm_table(
2692 struct pp_hwmgr *hwmgr,
2693 struct tonga_single_dpm_table *dpm_table,
2694 uint32_t count)
2695{
2696 uint32_t i;
2697 if (!(count <= MAX_REGULAR_DPM_NUMBER))
2698 printk(KERN_ERR "[ powerplay ] Fatal error, can not set up single DPM \
2699 table entries to exceed max number! \n");
2700
2701 dpm_table->count = count;
2702 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) {
2703 dpm_table->dpm_levels[i].enabled = false;
2704 }
2705
2706 return 0;
2707}
2708
2709static void tonga_setup_pcie_table_entry(
2710 struct tonga_single_dpm_table *dpm_table,
2711 uint32_t index, uint32_t pcie_gen,
2712 uint32_t pcie_lanes)
2713{
2714 dpm_table->dpm_levels[index].value = pcie_gen;
2715 dpm_table->dpm_levels[index].param1 = pcie_lanes;
2716 dpm_table->dpm_levels[index].enabled = true;
2717}
2718
2719static int tonga_setup_default_pcie_tables(struct pp_hwmgr *hwmgr)
2720{
2721 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2722 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2723 phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
2724 uint32_t i, maxEntry;
2725
2726 if (data->use_pcie_performance_levels && !data->use_pcie_power_saving_levels) {
2727 data->pcie_gen_power_saving = data->pcie_gen_performance;
2728 data->pcie_lane_power_saving = data->pcie_lane_performance;
2729 } else if (!data->use_pcie_performance_levels && data->use_pcie_power_saving_levels) {
2730 data->pcie_gen_performance = data->pcie_gen_power_saving;
2731 data->pcie_lane_performance = data->pcie_lane_power_saving;
2732 }
2733
2734 tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.pcie_speed_table, SMU72_MAX_LEVELS_LINK);
2735
2736 if (pcie_table != NULL) {
2737 /*
2738 * maxEntry is used to make sure we reserve one PCIE level for boot level (fix for A+A PSPP issue).
2739 * If PCIE table from PPTable have ULV entry + 8 entries, then ignore the last entry.
2740 */
2741 maxEntry = (SMU72_MAX_LEVELS_LINK < pcie_table->count) ?
2742 SMU72_MAX_LEVELS_LINK : pcie_table->count;
2743 for (i = 1; i < maxEntry; i++) {
2744 tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i-1,
2745 get_pcie_gen_support(data->pcie_gen_cap, pcie_table->entries[i].gen_speed),
2746 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
2747 }
2748 data->dpm_table.pcie_speed_table.count = maxEntry - 1;
2749 } else {
2750 /* Hardcode Pcie Table */
2751 tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
2752 get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
2753 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
2754 tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
2755 get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
2756 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
2757 tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
2758 get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
2759 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
2760 tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
2761 get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
2762 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
2763 tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
2764 get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
2765 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
2766 tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
2767 get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
2768 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
2769 data->dpm_table.pcie_speed_table.count = 6;
2770 }
2771 /* Populate last level for boot PCIE level, but do not increment count. */
2772 tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
2773 data->dpm_table.pcie_speed_table.count,
2774 get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
2775 get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
2776
2777 return 0;
2778
2779}
2780
2781/*
2782 * This function is to initalize all DPM state tables for SMU7 based on the dependency table.
2783 * Dynamic state patching function will then trim these state tables to the allowed range based
2784 * on the power policy or external client requests, such as UVD request, etc.
2785 */
2786static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
2787{
2788 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2789 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2790 uint32_t i;
2791
2792 phm_ppt_v1_clock_voltage_dependency_table *allowed_vdd_sclk_table =
2793 pptable_info->vdd_dep_on_sclk;
2794 phm_ppt_v1_clock_voltage_dependency_table *allowed_vdd_mclk_table =
2795 pptable_info->vdd_dep_on_mclk;
2796
2797 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
2798 "SCLK dependency table is missing. This table is mandatory", return -1);
2799 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
2800 "SCLK dependency table has to have is missing. This table is mandatory", return -1);
2801
2802 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
2803 "MCLK dependency table is missing. This table is mandatory", return -1);
2804 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
2805 "VMCLK dependency table has to have is missing. This table is mandatory", return -1);
2806
2807 /* clear the state table to reset everything to default */
2808 memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
2809 tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.sclk_table, SMU72_MAX_LEVELS_GRAPHICS);
2810 tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.mclk_table, SMU72_MAX_LEVELS_MEMORY);
2811 /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.VddcTable, SMU72_MAX_LEVELS_VDDC); */
2812 /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.vdd_gfx_table, SMU72_MAX_LEVELS_VDDGFX);*/
2813 /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.vdd_ci_table, SMU72_MAX_LEVELS_VDDCI);*/
2814 /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.mvdd_table, SMU72_MAX_LEVELS_MVDD);*/
2815
2816 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
2817 "SCLK dependency table is missing. This table is mandatory", return -1);
2818 /* Initialize Sclk DPM table based on allow Sclk values*/
2819 data->dpm_table.sclk_table.count = 0;
2820
2821 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
2822 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
2823 allowed_vdd_sclk_table->entries[i].clk) {
2824 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
2825 allowed_vdd_sclk_table->entries[i].clk;
2826 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = true; /*(i==0) ? 1 : 0; to do */
2827 data->dpm_table.sclk_table.count++;
2828 }
2829 }
2830
2831 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
2832 "MCLK dependency table is missing. This table is mandatory", return -1);
2833 /* Initialize Mclk DPM table based on allow Mclk values */
2834 data->dpm_table.mclk_table.count = 0;
2835 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
2836 if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
2837 allowed_vdd_mclk_table->entries[i].clk) {
2838 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
2839 allowed_vdd_mclk_table->entries[i].clk;
2840 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = true; /*(i==0) ? 1 : 0; */
2841 data->dpm_table.mclk_table.count++;
2842 }
2843 }
2844
2845 /* setup PCIE gen speed levels*/
2846 tonga_setup_default_pcie_tables(hwmgr);
2847
2848 /* save a copy of the default DPM table*/
2849 memcpy(&(data->golden_dpm_table), &(data->dpm_table), sizeof(struct tonga_dpm_table));
2850
2851 return 0;
2852}
2853
2854int tonga_populate_smc_initial_state(struct pp_hwmgr *hwmgr,
2855 const struct tonga_power_state *bootState)
2856{
2857 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2858 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2859 uint8_t count, level;
2860
2861 count = (uint8_t) (pptable_info->vdd_dep_on_sclk->count);
2862 for (level = 0; level < count; level++) {
2863 if (pptable_info->vdd_dep_on_sclk->entries[level].clk >=
2864 bootState->performance_levels[0].engine_clock) {
2865 data->smc_state_table.GraphicsBootLevel = level;
2866 break;
2867 }
2868 }
2869
2870 count = (uint8_t) (pptable_info->vdd_dep_on_mclk->count);
2871 for (level = 0; level < count; level++) {
2872 if (pptable_info->vdd_dep_on_mclk->entries[level].clk >=
2873 bootState->performance_levels[0].memory_clock) {
2874 data->smc_state_table.MemoryBootLevel = level;
2875 break;
2876 }
2877 }
2878
2879 return 0;
2880}
2881
2882/**
2883 * Initializes the SMC table and uploads it
2884 *
2885 * @param hwmgr the address of the powerplay hardware manager.
2886 * @param pInput the pointer to input data (PowerState)
2887 * @return always 0
2888 */
2889static int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
2890{
2891 int result;
2892 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
2893 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2894 SMU72_Discrete_DpmTable *table = &(data->smc_state_table);
2895 const phw_tonga_ulv_parm *ulv = &(data->ulv);
2896 uint8_t i;
2897 PECI_RegistryValue reg_value;
2898 pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
2899
2900 result = tonga_setup_default_dpm_tables(hwmgr);
2901 PP_ASSERT_WITH_CODE(0 == result,
2902 "Failed to setup default DPM tables!", return result;);
2903 memset(&(data->smc_state_table), 0x00, sizeof(data->smc_state_table));
2904 if (TONGA_VOLTAGE_CONTROL_NONE != data->voltage_control) {
2905 tonga_populate_smc_voltage_tables(hwmgr, table);
2906 }
2907
2908 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2909 PHM_PlatformCaps_AutomaticDCTransition)) {
2910 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
2911 }
2912
2913 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2914 PHM_PlatformCaps_StepVddc)) {
2915 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
2916 }
2917
2918 if (data->is_memory_GDDR5) {
2919 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
2920 }
2921
2922 i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN);
2923
2924 if (i == 1 || i == 0) {
2925 table->SystemFlags |= PPSMC_SYSTEMFLAG_12CHANNEL;
2926 }
2927
2928 if (ulv->ulv_supported && pptable_info->us_ulv_voltage_offset) {
2929 PP_ASSERT_WITH_CODE(0 == result,
2930 "Failed to initialize ULV state!", return result;);
2931
2932 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2933 ixCG_ULV_PARAMETER, ulv->ch_ulv_parameter);
2934 }
2935
2936 result = tonga_populate_smc_link_level(hwmgr, table);
2937 PP_ASSERT_WITH_CODE(0 == result,
2938 "Failed to initialize Link Level!", return result;);
2939
2940 result = tonga_populate_all_graphic_levels(hwmgr);
2941 PP_ASSERT_WITH_CODE(0 == result,
2942 "Failed to initialize Graphics Level!", return result;);
2943
2944 result = tonga_populate_all_memory_levels(hwmgr);
2945 PP_ASSERT_WITH_CODE(0 == result,
2946 "Failed to initialize Memory Level!", return result;);
2947
2948 result = tonga_populate_smv_acpi_level(hwmgr, table);
2949 PP_ASSERT_WITH_CODE(0 == result,
2950 "Failed to initialize ACPI Level!", return result;);
2951
2952 result = tonga_populate_smc_vce_level(hwmgr, table);
2953 PP_ASSERT_WITH_CODE(0 == result,
2954 "Failed to initialize VCE Level!", return result;);
2955
2956 result = tonga_populate_smc_acp_level(hwmgr, table);
2957 PP_ASSERT_WITH_CODE(0 == result,
2958 "Failed to initialize ACP Level!", return result;);
2959
2960 result = tonga_populate_smc_samu_level(hwmgr, table);
2961 PP_ASSERT_WITH_CODE(0 == result,
2962 "Failed to initialize SAMU Level!", return result;);
2963
2964 /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
2965 /* need to populate the ARB settings for the initial state. */
2966 result = tonga_program_memory_timing_parameters(hwmgr);
2967 PP_ASSERT_WITH_CODE(0 == result,
2968 "Failed to Write ARB settings for the initial state.", return result;);
2969
2970 result = tonga_populate_smc_uvd_level(hwmgr, table);
2971 PP_ASSERT_WITH_CODE(0 == result,
2972 "Failed to initialize UVD Level!", return result;);
2973
2974 result = tonga_populate_smc_boot_level(hwmgr, table);
2975 PP_ASSERT_WITH_CODE(0 == result,
2976 "Failed to initialize Boot Level!", return result;);
2977
2978 result = tonga_populate_bapm_parameters_in_dpm_table(hwmgr);
2979 PP_ASSERT_WITH_CODE(result == 0,
2980 "Failed to populate BAPM Parameters!", return result);
2981
2982 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2983 PHM_PlatformCaps_ClockStretcher)) {
2984 result = tonga_populate_clock_stretcher_data_table(hwmgr);
2985 PP_ASSERT_WITH_CODE(0 == result,
2986 "Failed to populate Clock Stretcher Data Table!", return result;);
2987 }
2988 table->GraphicsVoltageChangeEnable = 1;
2989 table->GraphicsThermThrottleEnable = 1;
2990 table->GraphicsInterval = 1;
2991 table->VoltageInterval = 1;
2992 table->ThermalInterval = 1;
2993 table->TemperatureLimitHigh =
2994 pptable_info->cac_dtp_table->usTargetOperatingTemp *
2995 TONGA_Q88_FORMAT_CONVERSION_UNIT;
2996 table->TemperatureLimitLow =
2997 (pptable_info->cac_dtp_table->usTargetOperatingTemp - 1) *
2998 TONGA_Q88_FORMAT_CONVERSION_UNIT;
2999 table->MemoryVoltageChangeEnable = 1;
3000 table->MemoryInterval = 1;
3001 table->VoltageResponseTime = 0;
3002 table->PhaseResponseTime = 0;
3003 table->MemoryThermThrottleEnable = 1;
3004
3005 /*
3006 * Cail reads current link status and reports it as cap (we cannot change this due to some previous issues we had)
3007 * SMC drops the link status to lowest level after enabling DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again
3008 * but this time Cail reads current link status which was set to low by SMC and reports it as cap to powerplay
3009 * To avoid it, we set PCIeBootLinkLevel to highest dpm level
3010 */
3011 PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
3012 "There must be 1 or more PCIE levels defined in PPTable.",
3013 return -1);
3014
3015 table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
3016
3017 table->PCIeGenInterval = 1;
3018
3019 result = tonga_populate_vr_config(hwmgr, table);
3020 PP_ASSERT_WITH_CODE(0 == result,
3021 "Failed to populate VRConfig setting!", return result);
3022
3023 table->ThermGpio = 17;
3024 table->SclkStepSize = 0x4000;
3025
3026 reg_value = 0;
3027 if ((0 == reg_value) &&
3028 (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
3029 &gpio_pin_assignment))) {
3030 table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
3031 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3032 PHM_PlatformCaps_RegulatorHot);
3033 } else {
3034 table->VRHotGpio = TONGA_UNUSED_GPIO_PIN;
3035 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3036 PHM_PlatformCaps_RegulatorHot);
3037 }
3038
3039 /* ACDC Switch GPIO */
3040 reg_value = 0;
3041 if ((0 == reg_value) &&
3042 (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
3043 &gpio_pin_assignment))) {
3044 table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
3045 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3046 PHM_PlatformCaps_AutomaticDCTransition);
3047 } else {
3048 table->AcDcGpio = TONGA_UNUSED_GPIO_PIN;
3049 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3050 PHM_PlatformCaps_AutomaticDCTransition);
3051 }
3052
3053 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3054 PHM_PlatformCaps_Falcon_QuickTransition);
3055
3056 reg_value = 0;
3057 if (1 == reg_value) {
3058 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3059 PHM_PlatformCaps_AutomaticDCTransition);
3060 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3061 PHM_PlatformCaps_Falcon_QuickTransition);
3062 }
3063
3064 reg_value = 0;
3065 if ((0 == reg_value) && (atomctrl_get_pp_assign_pin(hwmgr,
3066 THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment))) {
3067 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3068 PHM_PlatformCaps_ThermalOutGPIO);
3069
3070 table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
3071
3072 table->ThermOutPolarity =
3073 (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
3074 (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1:0;
3075
3076 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
3077
3078 /* if required, combine VRHot/PCC with thermal out GPIO*/
3079 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3080 PHM_PlatformCaps_RegulatorHot) &&
3081 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3082 PHM_PlatformCaps_CombinePCCWithThermalSignal)){
3083 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
3084 }
3085 } else {
3086 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
3087 PHM_PlatformCaps_ThermalOutGPIO);
3088
3089 table->ThermOutGpio = 17;
3090 table->ThermOutPolarity = 1;
3091 table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
3092 }
3093
3094 for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++) {
3095 table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
3096 }
3097 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
3098 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
3099 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
3100 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
3101 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
3102 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
3103 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
3104 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
3105 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
3106
3107 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
3108 result = tonga_copy_bytes_to_smc(hwmgr->smumgr, data->dpm_table_start +
3109 offsetof(SMU72_Discrete_DpmTable, SystemFlags),
3110 (uint8_t *)&(table->SystemFlags),
3111 sizeof(SMU72_Discrete_DpmTable)-3 * sizeof(SMU72_PIDController),
3112 data->sram_end);
3113
3114 PP_ASSERT_WITH_CODE(0 == result,
3115 "Failed to upload dpm data to SMC memory!", return result;);
3116
3117 return result;
3118}
3119
3120/* Look up the voltaged based on DAL's requested level. and then send the requested VDDC voltage to SMC*/
3121static void tonga_apply_dal_minimum_voltage_request(struct pp_hwmgr *hwmgr)
3122{
3123 return;
3124}
3125
3126int tonga_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
3127{
3128 PPSMC_Result result;
3129 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3130
3131 /* Apply minimum voltage based on DAL's request level */
3132 tonga_apply_dal_minimum_voltage_request(hwmgr);
3133
3134 if (0 == data->sclk_dpm_key_disabled) {
3135 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
3136 if (tonga_is_dpm_running(hwmgr))
3137 printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n");
3138
3139 if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3140 result = smum_send_msg_to_smc_with_parameter(
3141 hwmgr->smumgr,
3142 (PPSMC_Msg)PPSMC_MSG_SCLKDPM_SetEnabledMask,
3143 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
3144 PP_ASSERT_WITH_CODE((0 == result),
3145 "Set Sclk Dpm enable Mask failed", return -1);
3146 }
3147 }
3148
3149 if (0 == data->mclk_dpm_key_disabled) {
3150 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
3151 if (tonga_is_dpm_running(hwmgr))
3152 printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n");
3153
3154 if (0 != data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3155 result = smum_send_msg_to_smc_with_parameter(
3156 hwmgr->smumgr,
3157 (PPSMC_Msg)PPSMC_MSG_MCLKDPM_SetEnabledMask,
3158 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
3159 PP_ASSERT_WITH_CODE((0 == result),
3160 "Set Mclk Dpm enable Mask failed", return -1);
3161 }
3162 }
3163
3164 return 0;
3165}
3166
3167
3168int tonga_force_dpm_highest(struct pp_hwmgr *hwmgr)
3169{
3170 uint32_t level, tmp;
3171 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3172
3173 if (0 == data->pcie_dpm_key_disabled) {
3174 /* PCIE */
3175 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) {
3176 level = 0;
3177 tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
3178 while (tmp >>= 1)
3179 level++ ;
3180
3181 if (0 != level) {
3182 PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_pcie(hwmgr, level)),
3183 "force highest pcie dpm state failed!", return -1);
3184 }
3185 }
3186 }
3187
3188 if (0 == data->sclk_dpm_key_disabled) {
3189 /* SCLK */
3190 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask != 0) {
3191 level = 0;
3192 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
3193 while (tmp >>= 1)
3194 level++ ;
3195
3196 if (0 != level) {
3197 PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state(hwmgr, level)),
3198 "force highest sclk dpm state failed!", return -1);
3199 if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
3200 CGS_IND_REG__SMC, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX) != level)
3201 printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
3202 Curr_Sclk_Index does not match the level \n");
3203
3204 }
3205 }
3206 }
3207
3208 if (0 == data->mclk_dpm_key_disabled) {
3209 /* MCLK */
3210 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) {
3211 level = 0;
3212 tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
3213 while (tmp >>= 1)
3214 level++ ;
3215
3216 if (0 != level) {
3217 PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_mclk(hwmgr, level)),
3218 "force highest mclk dpm state failed!", return -1);
3219 if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
3220 TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX) != level)
3221 printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
3222 Curr_Mclk_Index does not match the level \n");
3223 }
3224 }
3225 }
3226
3227 return 0;
3228}
3229
3230/**
3231 * Find the MC microcode version and store it in the HwMgr struct
3232 *
3233 * @param hwmgr the address of the powerplay hardware manager.
3234 * @return always 0
3235 */
3236int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
3237{
3238 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
3239
3240 hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
3241
3242 return 0;
3243}
3244
3245/**
3246 * Initialize Dynamic State Adjustment Rule Settings
3247 *
3248 * @param hwmgr the address of the powerplay hardware manager.
3249 */
3250int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
3251{
3252 uint32_t table_size;
3253 struct phm_clock_voltage_dependency_table *table_clk_vlt;
3254 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
3255
3256 hwmgr->dyn_state.mclk_sclk_ratio = 4;
3257 hwmgr->dyn_state.sclk_mclk_delta = 15000; /* 150 MHz */
3258 hwmgr->dyn_state.vddc_vddci_delta = 200; /* 200mV */
3259
3260 /* initialize vddc_dep_on_dal_pwrl table */
3261 table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
3262 table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
3263
3264 if (NULL == table_clk_vlt) {
3265 printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n");
3266 return -ENOMEM;
3267 } else {
3268 table_clk_vlt->count = 4;
3269 table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
3270 table_clk_vlt->entries[0].v = 0;
3271 table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
3272 table_clk_vlt->entries[1].v = 720;
3273 table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
3274 table_clk_vlt->entries[2].v = 810;
3275 table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
3276 table_clk_vlt->entries[3].v = 900;
3277 pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
3278 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
3279 }
3280
3281 return 0;
3282}
3283
3284static int tonga_set_private_var_based_on_pptale(struct pp_hwmgr *hwmgr)
3285{
3286 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3287 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
3288
3289 phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
3290 pptable_info->vdd_dep_on_sclk;
3291 phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
3292 pptable_info->vdd_dep_on_mclk;
3293
3294 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
3295 "VDD dependency on SCLK table is missing. \
3296 This table is mandatory", return -1);
3297 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
3298 "VDD dependency on SCLK table has to have is missing. \
3299 This table is mandatory", return -1);
3300
3301 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
3302 "VDD dependency on MCLK table is missing. \
3303 This table is mandatory", return -1);
3304 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
3305 "VDD dependency on MCLK table has to have is missing. \
3306 This table is mandatory", return -1);
3307
3308 data->min_vddc_in_pp_table = (uint16_t)allowed_sclk_vdd_table->entries[0].vddc;
3309 data->max_vddc_in_pp_table = (uint16_t)allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
3310
3311 pptable_info->max_clock_voltage_on_ac.sclk =
3312 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
3313 pptable_info->max_clock_voltage_on_ac.mclk =
3314 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
3315 pptable_info->max_clock_voltage_on_ac.vddc =
3316 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
3317 pptable_info->max_clock_voltage_on_ac.vddci =
3318 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
3319
3320 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
3321 pptable_info->max_clock_voltage_on_ac.sclk;
3322 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
3323 pptable_info->max_clock_voltage_on_ac.mclk;
3324 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
3325 pptable_info->max_clock_voltage_on_ac.vddc;
3326 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
3327 pptable_info->max_clock_voltage_on_ac.vddci;
3328
3329 return 0;
3330}
3331
3332int tonga_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
3333{
3334 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3335 int result = 1;
3336
3337 PP_ASSERT_WITH_CODE (!tonga_is_dpm_running(hwmgr),
3338 "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.",
3339 return result);
3340
3341 if (0 == data->pcie_dpm_key_disabled) {
3342 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(
3343 hwmgr->smumgr,
3344 PPSMC_MSG_PCIeDPM_UnForceLevel)),
3345 "unforce pcie level failed!",
3346 return -1);
3347 }
3348
3349 result = tonga_upload_dpm_level_enable_mask(hwmgr);
3350
3351 return result;
3352}
3353
3354static uint32_t tonga_get_lowest_enable_level(
3355 struct pp_hwmgr *hwmgr, uint32_t level_mask)
3356{
3357 uint32_t level = 0;
3358
3359 while (0 == (level_mask & (1 << level)))
3360 level++;
3361
3362 return level;
3363}
3364
3365static int tonga_force_dpm_lowest(struct pp_hwmgr *hwmgr)
3366{
3367 uint32_t level;
3368 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3369
3370 if (0 == data->pcie_dpm_key_disabled) {
3371 /* PCIE */
3372 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) {
3373 level = tonga_get_lowest_enable_level(hwmgr,
3374 data->dpm_level_enable_mask.pcie_dpm_enable_mask);
3375 PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_pcie(hwmgr, level)),
3376 "force lowest pcie dpm state failed!", return -1);
3377 }
3378 }
3379
3380 if (0 == data->sclk_dpm_key_disabled) {
3381 /* SCLK */
3382 if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3383 level = tonga_get_lowest_enable_level(hwmgr,
3384 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
3385
3386 PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state(hwmgr, level)),
3387 "force sclk dpm state failed!", return -1);
3388
3389 if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
3390 CGS_IND_REG__SMC, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX) != level)
3391 printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
3392 Curr_Sclk_Index does not match the level \n");
3393 }
3394 }
3395
3396 if (0 == data->mclk_dpm_key_disabled) {
3397 /* MCLK */
3398 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) {
3399 level = tonga_get_lowest_enable_level(hwmgr,
3400 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
3401 PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_mclk(hwmgr, level)),
3402 "force lowest mclk dpm state failed!", return -1);
3403 if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
3404 TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX) != level)
3405 printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
3406 Curr_Mclk_Index does not match the level \n");
3407 }
3408 }
3409
3410 return 0;
3411}
3412
3413static int tonga_patch_voltage_dependency_tables_with_lookup_table(struct pp_hwmgr *hwmgr)
3414{
3415 uint8_t entryId;
3416 uint8_t voltageId;
3417 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3418 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
3419
3420 phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
3421 phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
3422 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
3423
3424 if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
3425 for (entryId = 0; entryId < sclk_table->count; ++entryId) {
3426 voltageId = sclk_table->entries[entryId].vddInd;
3427 sclk_table->entries[entryId].vddgfx =
3428 pptable_info->vddgfx_lookup_table->entries[voltageId].us_vdd;
3429 }
3430 } else {
3431 for (entryId = 0; entryId < sclk_table->count; ++entryId) {
3432 voltageId = sclk_table->entries[entryId].vddInd;
3433 sclk_table->entries[entryId].vddc =
3434 pptable_info->vddc_lookup_table->entries[voltageId].us_vdd;
3435 }
3436 }
3437
3438 for (entryId = 0; entryId < mclk_table->count; ++entryId) {
3439 voltageId = mclk_table->entries[entryId].vddInd;
3440 mclk_table->entries[entryId].vddc =
3441 pptable_info->vddc_lookup_table->entries[voltageId].us_vdd;
3442 }
3443
3444 for (entryId = 0; entryId < mm_table->count; ++entryId) {
3445 voltageId = mm_table->entries[entryId].vddcInd;
3446 mm_table->entries[entryId].vddc =
3447 pptable_info->vddc_lookup_table->entries[voltageId].us_vdd;
3448 }
3449
3450 return 0;
3451
3452}
3453
3454static int tonga_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
3455{
3456 uint8_t entryId;
3457 phm_ppt_v1_voltage_lookup_record v_record;
3458 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3459 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
3460
3461 phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
3462 phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
3463
3464 if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
3465 for (entryId = 0; entryId < sclk_table->count; ++entryId) {
3466 if (sclk_table->entries[entryId].vdd_offset & (1 << 15))
3467 v_record.us_vdd = sclk_table->entries[entryId].vddgfx +
3468 sclk_table->entries[entryId].vdd_offset - 0xFFFF;
3469 else
3470 v_record.us_vdd = sclk_table->entries[entryId].vddgfx +
3471 sclk_table->entries[entryId].vdd_offset;
3472
3473 sclk_table->entries[entryId].vddc =
3474 v_record.us_cac_low = v_record.us_cac_mid =
3475 v_record.us_cac_high = v_record.us_vdd;
3476
3477 tonga_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
3478 }
3479
3480 for (entryId = 0; entryId < mclk_table->count; ++entryId) {
3481 if (mclk_table->entries[entryId].vdd_offset & (1 << 15))
3482 v_record.us_vdd = mclk_table->entries[entryId].vddc +
3483 mclk_table->entries[entryId].vdd_offset - 0xFFFF;
3484 else
3485 v_record.us_vdd = mclk_table->entries[entryId].vddc +
3486 mclk_table->entries[entryId].vdd_offset;
3487
3488 mclk_table->entries[entryId].vddgfx = v_record.us_cac_low =
3489 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
3490 tonga_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
3491 }
3492 }
3493
3494 return 0;
3495
3496}
3497
3498static int tonga_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
3499{
3500 uint32_t entryId;
3501 phm_ppt_v1_voltage_lookup_record v_record;
3502 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3503 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
3504 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
3505
3506 if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
3507 for (entryId = 0; entryId < mm_table->count; entryId++) {
3508 if (mm_table->entries[entryId].vddgfx_offset & (1 << 15))
3509 v_record.us_vdd = mm_table->entries[entryId].vddc +
3510 mm_table->entries[entryId].vddgfx_offset - 0xFFFF;
3511 else
3512 v_record.us_vdd = mm_table->entries[entryId].vddc +
3513 mm_table->entries[entryId].vddgfx_offset;
3514
3515 /* Add the calculated VDDGFX to the VDDGFX lookup table */
3516 mm_table->entries[entryId].vddgfx = v_record.us_cac_low =
3517 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
3518 tonga_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
3519 }
3520 }
3521 return 0;
3522}
3523
3524
3525/**
3526 * Change virtual leakage voltage to actual value.
3527 *
3528 * @param hwmgr the address of the powerplay hardware manager.
3529 * @param pointer to changing voltage
3530 * @param pointer to leakage table
3531 */
3532static void tonga_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
3533 uint16_t *voltage, phw_tonga_leakage_voltage *pLeakageTable)
3534{
3535 uint32_t leakage_index;
3536
3537 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
3538 for (leakage_index = 0; leakage_index < pLeakageTable->count; leakage_index++) {
3539 /* if this voltage matches a leakage voltage ID */
3540 /* patch with actual leakage voltage */
3541 if (pLeakageTable->leakage_id[leakage_index] == *voltage) {
3542 *voltage = pLeakageTable->actual_voltage[leakage_index];
3543 break;
3544 }
3545 }
3546
3547 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
3548 printk(KERN_ERR "[ powerplay ] Voltage value looks like a Leakage ID but it's not patched \n");
3549}
3550
3551/**
3552 * Patch voltage lookup table by EVV leakages.
3553 *
3554 * @param hwmgr the address of the powerplay hardware manager.
3555 * @param pointer to voltage lookup table
3556 * @param pointer to leakage table
3557 * @return always 0
3558 */
3559static int tonga_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
3560 phm_ppt_v1_voltage_lookup_table *lookup_table,
3561 phw_tonga_leakage_voltage *pLeakageTable)
3562{
3563 uint32_t i;
3564
3565 for (i = 0; i < lookup_table->count; i++) {
3566 tonga_patch_with_vdd_leakage(hwmgr,
3567 &lookup_table->entries[i].us_vdd, pLeakageTable);
3568 }
3569
3570 return 0;
3571}
3572
3573static int tonga_patch_clock_voltage_lomits_with_vddc_leakage(struct pp_hwmgr *hwmgr,
3574 phw_tonga_leakage_voltage *pLeakageTable, uint16_t *Vddc)
3575{
3576 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
3577
3578 tonga_patch_with_vdd_leakage(hwmgr, (uint16_t *)Vddc, pLeakageTable);
3579 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
3580 pptable_info->max_clock_voltage_on_dc.vddc;
3581
3582 return 0;
3583}
3584
3585static int tonga_patch_clock_voltage_limits_with_vddgfx_leakage(
3586 struct pp_hwmgr *hwmgr, phw_tonga_leakage_voltage *pLeakageTable,
3587 uint16_t *Vddgfx)
3588{
3589 tonga_patch_with_vdd_leakage(hwmgr, (uint16_t *)Vddgfx, pLeakageTable);
3590 return 0;
3591}
3592
3593int tonga_sort_lookup_table(struct pp_hwmgr *hwmgr,
3594 phm_ppt_v1_voltage_lookup_table *lookup_table)
3595{
3596 uint32_t table_size, i, j;
3597 phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
3598 table_size = lookup_table->count;
3599
3600 PP_ASSERT_WITH_CODE(0 != lookup_table->count,
3601 "Lookup table is empty", return -1);
3602
3603 /* Sorting voltages */
3604 for (i = 0; i < table_size - 1; i++) {
3605 for (j = i + 1; j > 0; j--) {
3606 if (lookup_table->entries[j].us_vdd < lookup_table->entries[j-1].us_vdd) {
3607 tmp_voltage_lookup_record = lookup_table->entries[j-1];
3608 lookup_table->entries[j-1] = lookup_table->entries[j];
3609 lookup_table->entries[j] = tmp_voltage_lookup_record;
3610 }
3611 }
3612 }
3613
3614 return 0;
3615}
3616
3617static int tonga_complete_dependency_tables(struct pp_hwmgr *hwmgr)
3618{
3619 int result = 0;
3620 int tmp_result;
3621 tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
3622 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
3623
3624 if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
3625 tmp_result = tonga_patch_lookup_table_with_leakage(hwmgr,
3626 pptable_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
3627 if (tmp_result != 0)
3628 result = tmp_result;
3629
3630 tmp_result = tonga_patch_clock_voltage_limits_with_vddgfx_leakage(hwmgr,
3631 &(data->vddcgfx_leakage), &pptable_info->max_clock_voltage_on_dc.vddgfx);
3632 if (tmp_result != 0)
3633 result = tmp_result;
3634 } else {
3635 tmp_result = tonga_patch_lookup_table_with_leakage(hwmgr,
3636 pptable_info->vddc_lookup_table, &(data->vddc_leakage));
3637 if (tmp_result != 0)
3638 result = tmp_result;
3639
3640 tmp_result = tonga_patch_clock_voltage_lomits_with_vddc_leakage(hwmgr,
3641 &(data->vddc_leakage), &pptable_info->max_clock_voltage_on_dc.vddc);
3642 if (tmp_result != 0)
3643 result = tmp_result;
3644 }
3645
3646 tmp_result = tonga_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
3647 if (tmp_result != 0)
3648 result = tmp_result;
3649
3650 tmp_result = tonga_calc_voltage_dependency_tables(hwmgr);
3651 if (tmp_result != 0)
3652 result = tmp_result;
3653
3654 tmp_result = tonga_calc_mm_voltage_dependency_table(hwmgr);
3655 if (tmp_result != 0)
3656 result = tmp_result;
3657
3658 tmp_result = tonga_sort_lookup_table(hwmgr, pptable_info->vddgfx_lookup_table);
3659 if (tmp_result != 0)
3660 result = tmp_result;
3661
3662 tmp_result = tonga_sort_lookup_table(hwmgr, pptable_info->vddc_lookup_table);
3663 if (tmp_result != 0)
3664 result = tmp_result;
3665
3666 return result;
3667}
3668
3669int tonga_init_sclk_threshold(struct pp_hwmgr *hwmgr)
3670{
3671 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3672 data->low_sclk_interrupt_threshold = 0;
3673
3674 return 0;
3675}
3676
3677int tonga_setup_asic_task(struct pp_hwmgr *hwmgr)
3678{
3679 int tmp_result, result = 0;
3680
3681 tmp_result = tonga_read_clock_registers(hwmgr);
3682 PP_ASSERT_WITH_CODE((0 == tmp_result),
3683 "Failed to read clock registers!", result = tmp_result);
3684
3685 tmp_result = tonga_get_memory_type(hwmgr);
3686 PP_ASSERT_WITH_CODE((0 == tmp_result),
3687 "Failed to get memory type!", result = tmp_result);
3688
3689 tmp_result = tonga_enable_acpi_power_management(hwmgr);
3690 PP_ASSERT_WITH_CODE((0 == tmp_result),
3691 "Failed to enable ACPI power management!", result = tmp_result);
3692
3693 tmp_result = tonga_init_power_gate_state(hwmgr);
3694 PP_ASSERT_WITH_CODE((0 == tmp_result),
3695 "Failed to init power gate state!", result = tmp_result);
3696
3697 tmp_result = tonga_get_mc_microcode_version(hwmgr);
3698 PP_ASSERT_WITH_CODE((0 == tmp_result),
3699 "Failed to get MC microcode version!", result = tmp_result);
3700
3701 tmp_result = tonga_init_sclk_threshold(hwmgr);
3702 PP_ASSERT_WITH_CODE((0 == tmp_result),
3703 "Failed to init sclk threshold!", result = tmp_result);
3704
3705 return result;
3706}
3707
3708/**
3709 * Enable voltage control
3710 *
3711 * @param hwmgr the address of the powerplay hardware manager.
3712 * @return always 0
3713 */
3714int tonga_enable_voltage_control(struct pp_hwmgr *hwmgr)
3715{
3716 /* enable voltage control */
3717 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
3718
3719 return 0;
3720}
3721
3722/**
3723 * Checks if we want to support voltage control
3724 *
3725 * @param hwmgr the address of the powerplay hardware manager.
3726 */
3727bool cf_tonga_voltage_control(const struct pp_hwmgr *hwmgr)
3728{
3729 const struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
3730
3731 return(TONGA_VOLTAGE_CONTROL_NONE != data->voltage_control);
3732}
3733
3734/*---------------------------MC----------------------------*/
3735
3736uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
3737{
3738 return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
3739}
3740
3741bool tonga_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg)
3742{
3743 bool result = true;
3744
3745 switch (inReg) {
3746 case mmMC_SEQ_RAS_TIMING:
3747 *outReg = mmMC_SEQ_RAS_TIMING_LP;
3748 break;
3749
3750 case mmMC_SEQ_DLL_STBY:
3751 *outReg = mmMC_SEQ_DLL_STBY_LP;
3752 break;
3753
3754 case mmMC_SEQ_G5PDX_CMD0:
3755 *outReg = mmMC_SEQ_G5PDX_CMD0_LP;
3756 break;
3757
3758 case mmMC_SEQ_G5PDX_CMD1:
3759 *outReg = mmMC_SEQ_G5PDX_CMD1_LP;
3760 break;
3761
3762 case mmMC_SEQ_G5PDX_CTRL:
3763 *outReg = mmMC_SEQ_G5PDX_CTRL_LP;
3764 break;
3765
3766 case mmMC_SEQ_CAS_TIMING:
3767 *outReg = mmMC_SEQ_CAS_TIMING_LP;
3768 break;
3769
3770 case mmMC_SEQ_MISC_TIMING:
3771 *outReg = mmMC_SEQ_MISC_TIMING_LP;
3772 break;
3773
3774 case mmMC_SEQ_MISC_TIMING2:
3775 *outReg = mmMC_SEQ_MISC_TIMING2_LP;
3776 break;
3777
3778 case mmMC_SEQ_PMG_DVS_CMD:
3779 *outReg = mmMC_SEQ_PMG_DVS_CMD_LP;
3780 break;
3781
3782 case mmMC_SEQ_PMG_DVS_CTL:
3783 *outReg = mmMC_SEQ_PMG_DVS_CTL_LP;
3784 break;
3785
3786 case mmMC_SEQ_RD_CTL_D0:
3787 *outReg = mmMC_SEQ_RD_CTL_D0_LP;
3788 break;
3789
3790 case mmMC_SEQ_RD_CTL_D1:
3791 *outReg = mmMC_SEQ_RD_CTL_D1_LP;
3792 break;
3793
3794 case mmMC_SEQ_WR_CTL_D0:
3795 *outReg = mmMC_SEQ_WR_CTL_D0_LP;
3796 break;
3797
3798 case mmMC_SEQ_WR_CTL_D1:
3799 *outReg = mmMC_SEQ_WR_CTL_D1_LP;
3800 break;
3801
3802 case mmMC_PMG_CMD_EMRS:
3803 *outReg = mmMC_SEQ_PMG_CMD_EMRS_LP;
3804 break;
3805
3806 case mmMC_PMG_CMD_MRS:
3807 *outReg = mmMC_SEQ_PMG_CMD_MRS_LP;
3808 break;
3809
3810 case mmMC_PMG_CMD_MRS1:
3811 *outReg = mmMC_SEQ_PMG_CMD_MRS1_LP;
3812 break;
3813
3814 case mmMC_SEQ_PMG_TIMING:
3815 *outReg = mmMC_SEQ_PMG_TIMING_LP;
3816 break;
3817
3818 case mmMC_PMG_CMD_MRS2:
3819 *outReg = mmMC_SEQ_PMG_CMD_MRS2_LP;
3820 break;
3821
3822 case mmMC_SEQ_WR_CTL_2:
3823 *outReg = mmMC_SEQ_WR_CTL_2_LP;
3824 break;
3825
3826 default:
3827 result = false;
3828 break;
3829 }
3830
3831 return result;
3832}
3833
3834int tonga_set_s0_mc_reg_index(phw_tonga_mc_reg_table *table)
3835{
3836 uint32_t i;
3837 uint16_t address;
3838
3839 for (i = 0; i < table->last; i++) {
3840 table->mc_reg_address[i].s0 =
3841 tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
3842 ? address : table->mc_reg_address[i].s1;
3843 }
3844 return 0;
3845}
3846
3847int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, phw_tonga_mc_reg_table *ni_table)
3848{
3849 uint8_t i, j;
3850
3851 PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3852 "Invalid VramInfo table.", return -1);
3853 PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
3854 "Invalid VramInfo table.", return -1);
3855
3856 for (i = 0; i < table->last; i++) {
3857 ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
3858 }
3859 ni_table->last = table->last;
3860
3861 for (i = 0; i < table->num_entries; i++) {
3862 ni_table->mc_reg_table_entry[i].mclk_max =
3863 table->mc_reg_table_entry[i].mclk_max;
3864 for (j = 0; j < table->last; j++) {
3865 ni_table->mc_reg_table_entry[i].mc_data[j] =
3866 table->mc_reg_table_entry[i].mc_data[j];
3867 }
3868 }
3869
3870 ni_table->num_entries = table->num_entries;
3871
3872 return 0;
3873}
3874
3875/**
3876 * VBIOS omits some information to reduce size, we need to recover them here.
3877 * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0].
3878 * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
3879 * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0].
3880 * 3. need to set these data for each clock range
3881 *
3882 * @param hwmgr the address of the powerplay hardware manager.
3883 * @param table the address of MCRegTable
3884 * @return always 0
3885 */
3886int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr, phw_tonga_mc_reg_table *table)
3887{
3888 uint8_t i, j, k;
3889 uint32_t temp_reg;
3890 const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
3891
3892 for (i = 0, j = table->last; i < table->last; i++) {
3893 PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3894 "Invalid VramInfo table.", return -1);
3895 switch (table->mc_reg_address[i].s1) {
3896 /*
3897 * mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0].
3898 * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
3899 */
3900 case mmMC_SEQ_MISC1:
3901 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
3902 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
3903 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
3904 for (k = 0; k < table->num_entries; k++) {
3905 table->mc_reg_table_entry[k].mc_data[j] =
3906 ((temp_reg & 0xffff0000)) |
3907 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
3908 }
3909 j++;
3910 PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3911 "Invalid VramInfo table.", return -1);
3912
3913 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
3914 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
3915 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
3916 for (k = 0; k < table->num_entries; k++) {
3917 table->mc_reg_table_entry[k].mc_data[j] =
3918 (temp_reg & 0xffff0000) |
3919 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3920
3921 if (!data->is_memory_GDDR5) {
3922 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
3923 }
3924 }
3925 j++;
3926 PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3927 "Invalid VramInfo table.", return -1);
3928
3929 if (!data->is_memory_GDDR5) {
3930 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
3931 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
3932 for (k = 0; k < table->num_entries; k++) {
3933 table->mc_reg_table_entry[k].mc_data[j] =
3934 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
3935 }
3936 j++;
3937 PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3938 "Invalid VramInfo table.", return -1);
3939 }
3940
3941 break;
3942
3943 case mmMC_SEQ_RESERVE_M:
3944 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
3945 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
3946 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
3947 for (k = 0; k < table->num_entries; k++) {
3948 table->mc_reg_table_entry[k].mc_data[j] =
3949 (temp_reg & 0xffff0000) |
3950 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3951 }
3952 j++;
3953 PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3954 "Invalid VramInfo table.", return -1);
3955 break;
3956
3957 default:
3958 break;
3959 }
3960
3961 }
3962
3963 table->last = j;
3964
3965 return 0;
3966}
3967
3968int tonga_set_valid_flag(phw_tonga_mc_reg_table *table)
3969{
3970 uint8_t i, j;
3971 for (i = 0; i < table->last; i++) {
3972 for (j = 1; j < table->num_entries; j++) {
3973 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
3974 table->mc_reg_table_entry[j].mc_data[i]) {
3975 table->validflag |= (1<<i);
3976 break;
3977 }
3978 }
3979 }
3980
3981 return 0;
3982}
3983
3984static int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
3985{
3986 int result;
3987 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3988 pp_atomctrl_mc_reg_table *table;
3989 phw_tonga_mc_reg_table *ni_table = &data->tonga_mc_reg_table;
3990 uint8_t module_index = tonga_get_memory_modile_index(hwmgr);
3991
3992 table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
3993
3994 if (NULL == table)
3995 return -ENOMEM;
3996
3997 /* Program additional LP registers that are no longer programmed by VBIOS */
3998 cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
3999 cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
4000 cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
4001 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
4002 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
4003 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
4004 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
4005 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
4006 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
4007 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
4008 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
4009 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
4010 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
4011 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
4012 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
4013 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
4014 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
4015 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
4016 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
4017 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
4018
4019 memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
4020
4021 result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
4022
4023 if (0 == result)
4024 result = tonga_copy_vbios_smc_reg_table(table, ni_table);
4025
4026 if (0 == result) {
4027 tonga_set_s0_mc_reg_index(ni_table);
4028 result = tonga_set_mc_special_registers(hwmgr, ni_table);
4029 }
4030
4031 if (0 == result)
4032 tonga_set_valid_flag(ni_table);
4033
4034 kfree(table);
4035 return result;
4036}
4037
4038/*
4039* Copy one arb setting to another and then switch the active set.
4040* arbFreqSrc and arbFreqDest is one of the MC_CG_ARB_FREQ_Fx constants.
4041*/
4042int tonga_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
4043 uint32_t arbFreqSrc, uint32_t arbFreqDest)
4044{
4045 uint32_t mc_arb_dram_timing;
4046 uint32_t mc_arb_dram_timing2;
4047 uint32_t burst_time;
4048 uint32_t mc_cg_config;
4049
4050 switch (arbFreqSrc) {
4051 case MC_CG_ARB_FREQ_F0:
4052 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
4053 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
4054 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
4055 break;
4056
4057 case MC_CG_ARB_FREQ_F1:
4058 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
4059 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
4060 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
4061 break;
4062
4063 default:
4064 return -1;
4065 }
4066
4067 switch (arbFreqDest) {
4068 case MC_CG_ARB_FREQ_F0:
4069 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
4070 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
4071 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
4072 break;
4073
4074 case MC_CG_ARB_FREQ_F1:
4075 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
4076 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
4077 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
4078 break;
4079
4080 default:
4081 return -1;
4082 }
4083
4084 mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
4085 mc_cg_config |= 0x0000000F;
4086 cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
4087 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arbFreqDest);
4088
4089 return 0;
4090}
4091
4092/**
4093 * Initial switch from ARB F0->F1
4094 *
4095 * @param hwmgr the address of the powerplay hardware manager.
4096 * @return always 0
4097 * This function is to be called from the SetPowerState table.
4098 */
4099int tonga_initial_switch_from_arb_f0_to_f1(struct pp_hwmgr *hwmgr)
4100{
4101 return tonga_copy_and_switch_arb_sets(hwmgr, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
4102}
4103
4104/**
4105 * Initialize the ARB DRAM timing table's index field.
4106 *
4107 * @param hwmgr the address of the powerplay hardware manager.
4108 * @return always 0
4109 */
4110int tonga_init_arb_table_index(struct pp_hwmgr *hwmgr)
4111{
4112 const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4113 uint32_t tmp;
4114 int result;
4115
4116 /*
4117 * This is a read-modify-write on the first byte of the ARB table.
4118 * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure is the field 'current'.
4119 * This solution is ugly, but we never write the whole table only individual fields in it.
4120 * In reality this field should not be in that structure but in a soft register.
4121 */
4122 result = tonga_read_smc_sram_dword(hwmgr->smumgr,
4123 data->arb_table_start, &tmp, data->sram_end);
4124
4125 if (0 != result)
4126 return result;
4127
4128 tmp &= 0x00FFFFFF;
4129 tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
4130
4131 return tonga_write_smc_sram_dword(hwmgr->smumgr,
4132 data->arb_table_start, tmp, data->sram_end);
4133}
4134
4135int tonga_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU72_Discrete_MCRegisters *mc_reg_table)
4136{
4137 const struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4138
4139 uint32_t i, j;
4140
4141 for (i = 0, j = 0; j < data->tonga_mc_reg_table.last; j++) {
4142 if (data->tonga_mc_reg_table.validflag & 1<<j) {
4143 PP_ASSERT_WITH_CODE(i < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE,
4144 "Index of mc_reg_table->address[] array out of boundary", return -1);
4145 mc_reg_table->address[i].s0 =
4146 PP_HOST_TO_SMC_US(data->tonga_mc_reg_table.mc_reg_address[j].s0);
4147 mc_reg_table->address[i].s1 =
4148 PP_HOST_TO_SMC_US(data->tonga_mc_reg_table.mc_reg_address[j].s1);
4149 i++;
4150 }
4151 }
4152
4153 mc_reg_table->last = (uint8_t)i;
4154
4155 return 0;
4156}
4157
4158/*convert register values from driver to SMC format */
4159void tonga_convert_mc_registers(
4160 const phw_tonga_mc_reg_entry * pEntry,
4161 SMU72_Discrete_MCRegisterSet *pData,
4162 uint32_t numEntries, uint32_t validflag)
4163{
4164 uint32_t i, j;
4165
4166 for (i = 0, j = 0; j < numEntries; j++) {
4167 if (validflag & 1<<j) {
4168 pData->value[i] = PP_HOST_TO_SMC_UL(pEntry->mc_data[j]);
4169 i++;
4170 }
4171 }
4172}
4173
4174/* find the entry in the memory range table, then populate the value to SMC's tonga_mc_reg_table */
4175int tonga_convert_mc_reg_table_entry_to_smc(
4176 struct pp_hwmgr *hwmgr,
4177 const uint32_t memory_clock,
4178 SMU72_Discrete_MCRegisterSet *mc_reg_table_data
4179 )
4180{
4181 const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4182 uint32_t i = 0;
4183
4184 for (i = 0; i < data->tonga_mc_reg_table.num_entries; i++) {
4185 if (memory_clock <=
4186 data->tonga_mc_reg_table.mc_reg_table_entry[i].mclk_max) {
4187 break;
4188 }
4189 }
4190
4191 if ((i == data->tonga_mc_reg_table.num_entries) && (i > 0))
4192 --i;
4193
4194 tonga_convert_mc_registers(&data->tonga_mc_reg_table.mc_reg_table_entry[i],
4195 mc_reg_table_data, data->tonga_mc_reg_table.last, data->tonga_mc_reg_table.validflag);
4196
4197 return 0;
4198}
4199
4200int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
4201 SMU72_Discrete_MCRegisters *mc_reg_table)
4202{
4203 int result = 0;
4204 tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4205 int res;
4206 uint32_t i;
4207
4208 for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
4209 res = tonga_convert_mc_reg_table_entry_to_smc(
4210 hwmgr,
4211 data->dpm_table.mclk_table.dpm_levels[i].value,
4212 &mc_reg_table->data[i]
4213 );
4214
4215 if (0 != res)
4216 result = res;
4217 }
4218
4219 return result;
4220}
4221
4222int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
4223{
4224 int result;
4225 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4226
4227 memset(&data->mc_reg_table, 0x00, sizeof(SMU72_Discrete_MCRegisters));
4228 result = tonga_populate_mc_reg_address(hwmgr, &(data->mc_reg_table));
4229 PP_ASSERT_WITH_CODE(0 == result,
4230 "Failed to initialize MCRegTable for the MC register addresses!", return result;);
4231
4232 result = tonga_convert_mc_reg_table_to_smc(hwmgr, &data->mc_reg_table);
4233 PP_ASSERT_WITH_CODE(0 == result,
4234 "Failed to initialize MCRegTable for driver state!", return result;);
4235
4236 return tonga_copy_bytes_to_smc(hwmgr->smumgr, data->mc_reg_table_start,
4237 (uint8_t *)&data->mc_reg_table, sizeof(SMU72_Discrete_MCRegisters), data->sram_end);
4238}
4239
4240/**
4241 * Programs static screed detection parameters
4242 *
4243 * @param hwmgr the address of the powerplay hardware manager.
4244 * @return always 0
4245 */
4246int tonga_program_static_screen_threshold_parameters(struct pp_hwmgr *hwmgr)
4247{
4248 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
4249
4250 /* Set static screen threshold unit*/
4251 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
4252 CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
4253 data->static_screen_threshold_unit);
4254 /* Set static screen threshold*/
4255 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
4256 CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
4257 data->static_screen_threshold);
4258
4259 return 0;
4260}
4261
4262/**
4263 * Setup display gap for glitch free memory clock switching.
4264 *
4265 * @param hwmgr the address of the powerplay hardware manager.
4266 * @return always 0
4267 */
4268int tonga_enable_display_gap(struct pp_hwmgr *hwmgr)
4269{
4270 uint32_t display_gap = cgs_read_ind_register(hwmgr->device,
4271 CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
4272
4273 display_gap = PHM_SET_FIELD(display_gap,
4274 CG_DISPLAY_GAP_CNTL, DISP_GAP, DISPLAY_GAP_IGNORE);
4275
4276 display_gap = PHM_SET_FIELD(display_gap,
4277 CG_DISPLAY_GAP_CNTL, DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
4278
4279 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4280 ixCG_DISPLAY_GAP_CNTL, display_gap);
4281
4282 return 0;
4283}
4284
4285/**
4286 * Programs activity state transition voting clients
4287 *
4288 * @param hwmgr the address of the powerplay hardware manager.
4289 * @return always 0
4290 */
4291int tonga_program_voting_clients(struct pp_hwmgr *hwmgr)
4292{
4293 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
4294
4295 /* Clear reset for voting clients before enabling DPM */
4296 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4297 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
4298 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4299 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
4300
4301 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4302 ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
4303 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4304 ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
4305 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4306 ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
4307 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4308 ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
4309 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4310 ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
4311 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4312 ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
4313 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4314 ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
4315 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4316 ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
4317
4318 return 0;
4319}
4320
4321static void tonga_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
4322{
4323 bool protection;
4324 enum DPM_EVENT_SRC src;
4325
4326 switch (sources) {
4327 default:
4328 printk(KERN_ERR "Unknown throttling event sources.");
4329 /* fall through */
4330 case 0:
4331 protection = false;
4332 /* src is unused */
4333 break;
4334 case (1 << PHM_AutoThrottleSource_Thermal):
4335 protection = true;
4336 src = DPM_EVENT_SRC_DIGITAL;
4337 break;
4338 case (1 << PHM_AutoThrottleSource_External):
4339 protection = true;
4340 src = DPM_EVENT_SRC_EXTERNAL;
4341 break;
4342 case (1 << PHM_AutoThrottleSource_External) |
4343 (1 << PHM_AutoThrottleSource_Thermal):
4344 protection = true;
4345 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
4346 break;
4347 }
4348 /* Order matters - don't enable thermal protection for the wrong source. */
4349 if (protection) {
4350 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
4351 DPM_EVENT_SRC, src);
4352 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
4353 THERMAL_PROTECTION_DIS,
4354 !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4355 PHM_PlatformCaps_ThermalController));
4356 } else
4357 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
4358 THERMAL_PROTECTION_DIS, 1);
4359}
4360
4361static int tonga_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
4362 PHM_AutoThrottleSource source)
4363{
4364 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4365
4366 if (!(data->active_auto_throttle_sources & (1 << source))) {
4367 data->active_auto_throttle_sources |= 1 << source;
4368 tonga_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
4369 }
4370 return 0;
4371}
4372
4373static int tonga_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
4374{
4375 return tonga_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
4376}
4377
4378static int tonga_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
4379 PHM_AutoThrottleSource source)
4380{
4381 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4382
4383 if (data->active_auto_throttle_sources & (1 << source)) {
4384 data->active_auto_throttle_sources &= ~(1 << source);
4385 tonga_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
4386 }
4387 return 0;
4388}
4389
4390static int tonga_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
4391{
4392 return tonga_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
4393}
4394
4395int tonga_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
4396{
4397 int tmp_result, result = 0;
4398
4399 tmp_result = tonga_check_for_dpm_stopped(hwmgr);
4400
4401 if (cf_tonga_voltage_control(hwmgr)) {
4402 tmp_result = tonga_enable_voltage_control(hwmgr);
4403 PP_ASSERT_WITH_CODE((0 == tmp_result),
4404 "Failed to enable voltage control!", result = tmp_result);
4405
4406 tmp_result = tonga_construct_voltage_tables(hwmgr);
4407 PP_ASSERT_WITH_CODE((0 == tmp_result),
4408 "Failed to contruct voltage tables!", result = tmp_result);
4409 }
4410
4411 tmp_result = tonga_initialize_mc_reg_table(hwmgr);
4412 PP_ASSERT_WITH_CODE((0 == tmp_result),
4413 "Failed to initialize MC reg table!", result = tmp_result);
4414
4415 tmp_result = tonga_program_static_screen_threshold_parameters(hwmgr);
4416 PP_ASSERT_WITH_CODE((0 == tmp_result),
4417 "Failed to program static screen threshold parameters!", result = tmp_result);
4418
4419 tmp_result = tonga_enable_display_gap(hwmgr);
4420 PP_ASSERT_WITH_CODE((0 == tmp_result),
4421 "Failed to enable display gap!", result = tmp_result);
4422
4423 tmp_result = tonga_program_voting_clients(hwmgr);
4424 PP_ASSERT_WITH_CODE((0 == tmp_result),
4425 "Failed to program voting clients!", result = tmp_result);
4426
4427 tmp_result = tonga_process_firmware_header(hwmgr);
4428 PP_ASSERT_WITH_CODE((0 == tmp_result),
4429 "Failed to process firmware header!", result = tmp_result);
4430
4431 tmp_result = tonga_initial_switch_from_arb_f0_to_f1(hwmgr);
4432 PP_ASSERT_WITH_CODE((0 == tmp_result),
4433 "Failed to initialize switch from ArbF0 to F1!", result = tmp_result);
4434
4435 tmp_result = tonga_init_smc_table(hwmgr);
4436 PP_ASSERT_WITH_CODE((0 == tmp_result),
4437 "Failed to initialize SMC table!", result = tmp_result);
4438
4439 tmp_result = tonga_init_arb_table_index(hwmgr);
4440 PP_ASSERT_WITH_CODE((0 == tmp_result),
4441 "Failed to initialize ARB table index!", result = tmp_result);
4442
4443 tmp_result = tonga_populate_pm_fuses(hwmgr);
4444 PP_ASSERT_WITH_CODE((tmp_result == 0),
4445 "Failed to populate PM fuses!", result = tmp_result);
4446
4447 tmp_result = tonga_populate_initial_mc_reg_table(hwmgr);
4448 PP_ASSERT_WITH_CODE((0 == tmp_result),
4449 "Failed to populate initialize MC Reg table!", result = tmp_result);
4450
4451 tmp_result = tonga_notify_smc_display_change(hwmgr, false);
4452 PP_ASSERT_WITH_CODE((0 == tmp_result),
4453 "Failed to notify no display!", result = tmp_result);
4454
4455 /* enable SCLK control */
4456 tmp_result = tonga_enable_sclk_control(hwmgr);
4457 PP_ASSERT_WITH_CODE((0 == tmp_result),
4458 "Failed to enable SCLK control!", result = tmp_result);
4459
4460 /* enable DPM */
4461 tmp_result = tonga_start_dpm(hwmgr);
4462 PP_ASSERT_WITH_CODE((0 == tmp_result),
4463 "Failed to start DPM!", result = tmp_result);
4464
4465 tmp_result = tonga_enable_smc_cac(hwmgr);
4466 PP_ASSERT_WITH_CODE((tmp_result == 0),
4467 "Failed to enable SMC CAC!", result = tmp_result);
4468
4469 tmp_result = tonga_enable_power_containment(hwmgr);
4470 PP_ASSERT_WITH_CODE((tmp_result == 0),
4471 "Failed to enable power containment!", result = tmp_result);
4472
4473 tmp_result = tonga_power_control_set_level(hwmgr);
4474 PP_ASSERT_WITH_CODE((tmp_result == 0),
4475 "Failed to power control set level!", result = tmp_result);
4476
4477 tmp_result = tonga_enable_thermal_auto_throttle(hwmgr);
4478 PP_ASSERT_WITH_CODE((0 == tmp_result),
4479 "Failed to enable thermal auto throttle!", result = tmp_result);
4480
4481 return result;
4482}
4483
4484int tonga_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
4485{
4486 int tmp_result, result = 0;
4487
4488 tmp_result = tonga_check_for_dpm_running(hwmgr);
4489 PP_ASSERT_WITH_CODE((0 == tmp_result),
4490 "SMC is still running!", return 0);
4491
4492 tmp_result = tonga_disable_thermal_auto_throttle(hwmgr);
4493 PP_ASSERT_WITH_CODE((tmp_result == 0),
4494 "Failed to disable thermal auto throttle!", result = tmp_result);
4495
4496 tmp_result = tonga_stop_dpm(hwmgr);
4497 PP_ASSERT_WITH_CODE((0 == tmp_result),
4498 "Failed to stop DPM!", result = tmp_result);
4499
4500 tmp_result = tonga_reset_to_default(hwmgr);
4501 PP_ASSERT_WITH_CODE((0 == tmp_result),
4502 "Failed to reset to default!", result = tmp_result);
4503
4504 return result;
4505}
4506
4507int tonga_reset_asic_tasks(struct pp_hwmgr *hwmgr)
4508{
4509 int result;
4510
4511 result = tonga_set_boot_state(hwmgr);
4512 if (0 != result)
4513 printk(KERN_ERR "[ powerplay ] Failed to reset asic via set boot state! \n");
4514
4515 return result;
4516}
4517
4518int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
4519{
4520 return phm_hwmgr_backend_fini(hwmgr);
4521}
4522
4523/**
4524 * Initializes the Volcanic Islands Hardware Manager
4525 *
4526 * @param hwmgr the address of the powerplay hardware manager.
4527 * @return 1 if success; otherwise appropriate error code.
4528 */
4529int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
4530{
4531 int result = 0;
4532 SMU72_Discrete_DpmTable *table = NULL;
4533 tonga_hwmgr *data;
4534 pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
4535 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
4536 phw_tonga_ulv_parm *ulv;
4537 struct cgs_system_info sys_info = {0};
4538
4539 PP_ASSERT_WITH_CODE((NULL != hwmgr),
4540 "Invalid Parameter!", return -1;);
4541
4542 data = kzalloc(sizeof(struct tonga_hwmgr), GFP_KERNEL);
4543 if (data == NULL)
4544 return -ENOMEM;
4545
4546 hwmgr->backend = data;
4547
4548 data->dll_defaule_on = false;
4549 data->sram_end = SMC_RAM_END;
4550
4551 data->activity_target[0] = PPTONGA_TARGETACTIVITY_DFLT;
4552 data->activity_target[1] = PPTONGA_TARGETACTIVITY_DFLT;
4553 data->activity_target[2] = PPTONGA_TARGETACTIVITY_DFLT;
4554 data->activity_target[3] = PPTONGA_TARGETACTIVITY_DFLT;
4555 data->activity_target[4] = PPTONGA_TARGETACTIVITY_DFLT;
4556 data->activity_target[5] = PPTONGA_TARGETACTIVITY_DFLT;
4557 data->activity_target[6] = PPTONGA_TARGETACTIVITY_DFLT;
4558 data->activity_target[7] = PPTONGA_TARGETACTIVITY_DFLT;
4559
4560 data->vddc_vddci_delta = VDDC_VDDCI_DELTA;
4561 data->vddc_vddgfx_delta = VDDC_VDDGFX_DELTA;
4562 data->mclk_activity_target = PPTONGA_MCLK_TARGETACTIVITY_DFLT;
4563
4564 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4565 PHM_PlatformCaps_DisableVoltageIsland);
4566
4567 data->sclk_dpm_key_disabled = 0;
4568 data->mclk_dpm_key_disabled = 0;
4569 data->pcie_dpm_key_disabled = 0;
4570 data->pcc_monitor_enabled = 0;
4571
4572 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4573 PHM_PlatformCaps_UnTabledHardwareInterface);
4574
4575 data->gpio_debug = 0;
4576 data->engine_clock_data = 0;
4577 data->memory_clock_data = 0;
4578 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4579 PHM_PlatformCaps_DynamicPatchPowerState);
4580
4581 /* need to set voltage control types before EVV patching*/
4582 data->voltage_control = TONGA_VOLTAGE_CONTROL_NONE;
4583 data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE;
4584 data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE;
4585 data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE;
4586 data->force_pcie_gen = PP_PCIEGenInvalid;
4587
4588 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
4589 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
4590 data->voltage_control = TONGA_VOLTAGE_CONTROL_BY_SVID2;
4591 }
4592
4593 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4594 PHM_PlatformCaps_ControlVDDGFX)) {
4595 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
4596 VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
4597 data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_BY_SVID2;
4598 }
4599 }
4600
4601 if (TONGA_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) {
4602 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
4603 PHM_PlatformCaps_ControlVDDGFX);
4604 }
4605
4606 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4607 PHM_PlatformCaps_EnableMVDDControl)) {
4608 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
4609 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) {
4610 data->mvdd_control = TONGA_VOLTAGE_CONTROL_BY_GPIO;
4611 }
4612 }
4613
4614 if (TONGA_VOLTAGE_CONTROL_NONE == data->mvdd_control) {
4615 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
4616 PHM_PlatformCaps_EnableMVDDControl);
4617 }
4618
4619 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4620 PHM_PlatformCaps_ControlVDDCI)) {
4621 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
4622 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
4623 data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_BY_GPIO;
4624 else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
4625 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
4626 data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_BY_SVID2;
4627 }
4628
4629 if (TONGA_VOLTAGE_CONTROL_NONE == data->vdd_ci_control)
4630 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
4631 PHM_PlatformCaps_ControlVDDCI);
4632
4633 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4634 PHM_PlatformCaps_TablelessHardwareInterface);
4635
4636 if (pptable_info->cac_dtp_table->usClockStretchAmount != 0)
4637 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4638 PHM_PlatformCaps_ClockStretcher);
4639
4640 /* Initializes DPM default values*/
4641 tonga_initialize_dpm_defaults(hwmgr);
4642
4643 /* Get leakage voltage based on leakage ID.*/
4644 PP_ASSERT_WITH_CODE((0 == tonga_get_evv_voltage(hwmgr)),
4645 "Get EVV Voltage Failed. Abort Driver loading!", return -1);
4646
4647 tonga_complete_dependency_tables(hwmgr);
4648
4649 /* Parse pptable data read from VBIOS*/
4650 tonga_set_private_var_based_on_pptale(hwmgr);
4651
4652 /* ULV Support*/
4653 ulv = &(data->ulv);
4654 ulv->ulv_supported = false;
4655
4656 /* Initalize Dynamic State Adjustment Rule Settings*/
4657 result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
4658 if (result)
4659 printk(KERN_ERR "[ powerplay ] tonga_initializa_dynamic_state_adjustment_rule_settings failed!\n");
4660 data->uvd_enabled = false;
4661
4662 table = &(data->smc_state_table);
4663
4664 /*
4665 * if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable,
4666 * Peak Current Control feature is enabled and we should program PCC HW register
4667 */
4668 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
4669 uint32_t temp_reg = cgs_read_ind_register(hwmgr->device,
4670 CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
4671
4672 switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
4673 case 0:
4674 temp_reg = PHM_SET_FIELD(temp_reg,
4675 CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
4676 break;
4677 case 1:
4678 temp_reg = PHM_SET_FIELD(temp_reg,
4679 CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
4680 break;
4681 case 2:
4682 temp_reg = PHM_SET_FIELD(temp_reg,
4683 CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
4684 break;
4685 case 3:
4686 temp_reg = PHM_SET_FIELD(temp_reg,
4687 CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
4688 break;
4689 case 4:
4690 temp_reg = PHM_SET_FIELD(temp_reg,
4691 CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
4692 break;
4693 default:
4694 printk(KERN_ERR "[ powerplay ] Failed to setup PCC HW register! \
4695 Wrong GPIO assigned for VDDC_PCC_GPIO_PINID! \n");
4696 break;
4697 }
4698 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4699 ixCNB_PWRMGT_CNTL, temp_reg);
4700 }
4701
4702 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4703 PHM_PlatformCaps_EnableSMU7ThermalManagement);
4704 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4705 PHM_PlatformCaps_SMU7);
4706
4707 data->vddc_phase_shed_control = false;
4708
4709 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
4710 PHM_PlatformCaps_UVDPowerGating);
4711 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
4712 PHM_PlatformCaps_VCEPowerGating);
4713 sys_info.size = sizeof(struct cgs_system_info);
4714 sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
4715 result = cgs_query_system_info(hwmgr->device, &sys_info);
4716 if (!result) {
4717 if (sys_info.value & AMD_PG_SUPPORT_UVD)
4718 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4719 PHM_PlatformCaps_UVDPowerGating);
4720 if (sys_info.value & AMD_PG_SUPPORT_VCE)
4721 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4722 PHM_PlatformCaps_VCEPowerGating);
4723 }
4724
4725 if (0 == result) {
4726 data->is_tlu_enabled = false;
4727 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
4728 TONGA_MAX_HARDWARE_POWERLEVELS;
4729 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
4730 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
4731
4732 sys_info.size = sizeof(struct cgs_system_info);
4733 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
4734 result = cgs_query_system_info(hwmgr->device, &sys_info);
4735 if (result)
4736 data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
4737 else
4738 data->pcie_gen_cap = (uint32_t)sys_info.value;
4739 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
4740 data->pcie_spc_cap = 20;
4741 sys_info.size = sizeof(struct cgs_system_info);
4742 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
4743 result = cgs_query_system_info(hwmgr->device, &sys_info);
4744 if (result)
4745 data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
4746 else
4747 data->pcie_lane_cap = (uint32_t)sys_info.value;
4748 } else {
4749 /* Ignore return value in here, we are cleaning up a mess. */
4750 tonga_hwmgr_backend_fini(hwmgr);
4751 }
4752
4753 return result;
4754}
4755
4756static int tonga_force_dpm_level(struct pp_hwmgr *hwmgr,
4757 enum amd_dpm_forced_level level)
4758{
4759 int ret = 0;
4760
4761 switch (level) {
4762 case AMD_DPM_FORCED_LEVEL_HIGH:
4763 ret = tonga_force_dpm_highest(hwmgr);
4764 if (ret)
4765 return ret;
4766 break;
4767 case AMD_DPM_FORCED_LEVEL_LOW:
4768 ret = tonga_force_dpm_lowest(hwmgr);
4769 if (ret)
4770 return ret;
4771 break;
4772 case AMD_DPM_FORCED_LEVEL_AUTO:
4773 ret = tonga_unforce_dpm_levels(hwmgr);
4774 if (ret)
4775 return ret;
4776 break;
4777 default:
4778 break;
4779 }
4780
4781 hwmgr->dpm_level = level;
4782 return ret;
4783}
4784
4785static int tonga_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
4786 struct pp_power_state *prequest_ps,
4787 const struct pp_power_state *pcurrent_ps)
4788{
4789 struct tonga_power_state *tonga_ps =
4790 cast_phw_tonga_power_state(&prequest_ps->hardware);
4791
4792 uint32_t sclk;
4793 uint32_t mclk;
4794 struct PP_Clocks minimum_clocks = {0};
4795 bool disable_mclk_switching;
4796 bool disable_mclk_switching_for_frame_lock;
4797 struct cgs_display_info info = {0};
4798 const struct phm_clock_and_voltage_limits *max_limits;
4799 uint32_t i;
4800 tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4801 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
4802
4803 int32_t count;
4804 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
4805
4806 data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
4807
4808 PP_ASSERT_WITH_CODE(tonga_ps->performance_level_count == 2,
4809 "VI should always have 2 performance levels",
4810 );
4811
4812 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
4813 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
4814 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
4815
4816 if (PP_PowerSource_DC == hwmgr->power_source) {
4817 for (i = 0; i < tonga_ps->performance_level_count; i++) {
4818 if (tonga_ps->performance_levels[i].memory_clock > max_limits->mclk)
4819 tonga_ps->performance_levels[i].memory_clock = max_limits->mclk;
4820 if (tonga_ps->performance_levels[i].engine_clock > max_limits->sclk)
4821 tonga_ps->performance_levels[i].engine_clock = max_limits->sclk;
4822 }
4823 }
4824
4825 tonga_ps->vce_clocks.EVCLK = hwmgr->vce_arbiter.evclk;
4826 tonga_ps->vce_clocks.ECCLK = hwmgr->vce_arbiter.ecclk;
4827
4828 tonga_ps->acp_clk = hwmgr->acp_arbiter.acpclk;
4829
4830 cgs_get_active_displays_info(hwmgr->device, &info);
4831
4832 /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
4833
4834 /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */
4835
4836 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
4837
4838 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
4839 stable_pstate_sclk = (max_limits->sclk * 75) / 100;
4840
4841 for (count = pptable_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
4842 if (stable_pstate_sclk >= pptable_info->vdd_dep_on_sclk->entries[count].clk) {
4843 stable_pstate_sclk = pptable_info->vdd_dep_on_sclk->entries[count].clk;
4844 break;
4845 }
4846 }
4847
4848 if (count < 0)
4849 stable_pstate_sclk = pptable_info->vdd_dep_on_sclk->entries[0].clk;
4850
4851 stable_pstate_mclk = max_limits->mclk;
4852
4853 minimum_clocks.engineClock = stable_pstate_sclk;
4854 minimum_clocks.memoryClock = stable_pstate_mclk;
4855 }
4856
4857 if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
4858 minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
4859
4860 if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
4861 minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
4862
4863 tonga_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
4864
4865 if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
4866 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.engineClock),
4867 "Overdrive sclk exceeds limit",
4868 hwmgr->gfx_arbiter.sclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.engineClock);
4869
4870 if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
4871 tonga_ps->performance_levels[1].engine_clock = hwmgr->gfx_arbiter.sclk_over_drive;
4872 }
4873
4874 if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
4875 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.memoryClock),
4876 "Overdrive mclk exceeds limit",
4877 hwmgr->gfx_arbiter.mclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.memoryClock);
4878
4879 if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
4880 tonga_ps->performance_levels[1].memory_clock = hwmgr->gfx_arbiter.mclk_over_drive;
4881 }
4882
4883 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
4884 hwmgr->platform_descriptor.platformCaps,
4885 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
4886
4887 disable_mclk_switching = (1 < info.display_count) ||
4888 disable_mclk_switching_for_frame_lock;
4889
4890 sclk = tonga_ps->performance_levels[0].engine_clock;
4891 mclk = tonga_ps->performance_levels[0].memory_clock;
4892
4893 if (disable_mclk_switching)
4894 mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock;
4895
4896 if (sclk < minimum_clocks.engineClock)
4897 sclk = (minimum_clocks.engineClock > max_limits->sclk) ? max_limits->sclk : minimum_clocks.engineClock;
4898
4899 if (mclk < minimum_clocks.memoryClock)
4900 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? max_limits->mclk : minimum_clocks.memoryClock;
4901
4902 tonga_ps->performance_levels[0].engine_clock = sclk;
4903 tonga_ps->performance_levels[0].memory_clock = mclk;
4904
4905 tonga_ps->performance_levels[1].engine_clock =
4906 (tonga_ps->performance_levels[1].engine_clock >= tonga_ps->performance_levels[0].engine_clock) ?
4907 tonga_ps->performance_levels[1].engine_clock :
4908 tonga_ps->performance_levels[0].engine_clock;
4909
4910 if (disable_mclk_switching) {
4911 if (mclk < tonga_ps->performance_levels[1].memory_clock)
4912 mclk = tonga_ps->performance_levels[1].memory_clock;
4913
4914 tonga_ps->performance_levels[0].memory_clock = mclk;
4915 tonga_ps->performance_levels[1].memory_clock = mclk;
4916 } else {
4917 if (tonga_ps->performance_levels[1].memory_clock < tonga_ps->performance_levels[0].memory_clock)
4918 tonga_ps->performance_levels[1].memory_clock = tonga_ps->performance_levels[0].memory_clock;
4919 }
4920
4921 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
4922 for (i=0; i < tonga_ps->performance_level_count; i++) {
4923 tonga_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
4924 tonga_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
4925 tonga_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
4926 tonga_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
4927 }
4928 }
4929
4930 return 0;
4931}
4932
4933int tonga_get_power_state_size(struct pp_hwmgr *hwmgr)
4934{
4935 return sizeof(struct tonga_power_state);
4936}
4937
4938static int tonga_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
4939{
4940 struct pp_power_state *ps;
4941 struct tonga_power_state *tonga_ps;
4942
4943 if (hwmgr == NULL)
4944 return -EINVAL;
4945
4946 ps = hwmgr->request_ps;
4947
4948 if (ps == NULL)
4949 return -EINVAL;
4950
4951 tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
4952
4953 if (low)
4954 return tonga_ps->performance_levels[0].memory_clock;
4955 else
4956 return tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock;
4957}
4958
4959static int tonga_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
4960{
4961 struct pp_power_state *ps;
4962 struct tonga_power_state *tonga_ps;
4963
4964 if (hwmgr == NULL)
4965 return -EINVAL;
4966
4967 ps = hwmgr->request_ps;
4968
4969 if (ps == NULL)
4970 return -EINVAL;
4971
4972 tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
4973
4974 if (low)
4975 return tonga_ps->performance_levels[0].engine_clock;
4976 else
4977 return tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock;
4978}
4979
4980static uint16_t tonga_get_current_pcie_speed(
4981 struct pp_hwmgr *hwmgr)
4982{
4983 uint32_t speed_cntl = 0;
4984
4985 speed_cntl = cgs_read_ind_register(hwmgr->device,
4986 CGS_IND_REG__PCIE,
4987 ixPCIE_LC_SPEED_CNTL);
4988 return((uint16_t)PHM_GET_FIELD(speed_cntl,
4989 PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
4990}
4991
4992static int tonga_get_current_pcie_lane_number(
4993 struct pp_hwmgr *hwmgr)
4994{
4995 uint32_t link_width;
4996
4997 link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device,
4998 CGS_IND_REG__PCIE,
4999 PCIE_LC_LINK_WIDTH_CNTL,
5000 LC_LINK_WIDTH_RD);
5001
5002 PP_ASSERT_WITH_CODE((7 >= link_width),
5003 "Invalid PCIe lane width!", return 0);
5004
5005 return decode_pcie_lane_width(link_width);
5006}
5007
5008static int tonga_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
5009 struct pp_hw_power_state *hw_ps)
5010{
5011 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5012 struct tonga_power_state *ps = (struct tonga_power_state *)hw_ps;
5013 ATOM_FIRMWARE_INFO_V2_2 *fw_info;
5014 uint16_t size;
5015 uint8_t frev, crev;
5016 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5017
5018 /* First retrieve the Boot clocks and VDDC from the firmware info table.
5019 * We assume here that fw_info is unchanged if this call fails.
5020 */
5021 fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
5022 hwmgr->device, index,
5023 &size, &frev, &crev);
5024 if (!fw_info)
5025 /* During a test, there is no firmware info table. */
5026 return 0;
5027
5028 /* Patch the state. */
5029 data->vbios_boot_state.sclk_bootup_value = le32_to_cpu(fw_info->ulDefaultEngineClock);
5030 data->vbios_boot_state.mclk_bootup_value = le32_to_cpu(fw_info->ulDefaultMemoryClock);
5031 data->vbios_boot_state.mvdd_bootup_value = le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
5032 data->vbios_boot_state.vddc_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCVoltage);
5033 data->vbios_boot_state.vddci_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
5034 data->vbios_boot_state.pcie_gen_bootup_value = tonga_get_current_pcie_speed(hwmgr);
5035 data->vbios_boot_state.pcie_lane_bootup_value =
5036 (uint16_t)tonga_get_current_pcie_lane_number(hwmgr);
5037
5038 /* set boot power state */
5039 ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
5040 ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
5041 ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
5042 ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
5043
5044 return 0;
5045}
5046
5047static int tonga_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
5048 void *state, struct pp_power_state *power_state,
5049 void *pp_table, uint32_t classification_flag)
5050{
5051 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5052
5053 struct tonga_power_state *tonga_ps =
5054 (struct tonga_power_state *)(&(power_state->hardware));
5055
5056 struct tonga_performance_level *performance_level;
5057
5058 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
5059
5060 ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
5061 (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
5062
5063 ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
5064 (ATOM_Tonga_SCLK_Dependency_Table *)
5065 (((unsigned long)powerplay_table) +
5066 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
5067
5068 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
5069 (ATOM_Tonga_MCLK_Dependency_Table *)
5070 (((unsigned long)powerplay_table) +
5071 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
5072
5073 /* The following fields are not initialized here: id orderedList allStatesList */
5074 power_state->classification.ui_label =
5075 (le16_to_cpu(state_entry->usClassification) &
5076 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
5077 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
5078 power_state->classification.flags = classification_flag;
5079 /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
5080
5081 power_state->classification.temporary_state = false;
5082 power_state->classification.to_be_deleted = false;
5083
5084 power_state->validation.disallowOnDC =
5085 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & ATOM_Tonga_DISALLOW_ON_DC));
5086
5087 power_state->pcie.lanes = 0;
5088
5089 power_state->display.disableFrameModulation = false;
5090 power_state->display.limitRefreshrate = false;
5091 power_state->display.enableVariBright =
5092 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & ATOM_Tonga_ENABLE_VARIBRIGHT));
5093
5094 power_state->validation.supportedPowerLevels = 0;
5095 power_state->uvd_clocks.VCLK = 0;
5096 power_state->uvd_clocks.DCLK = 0;
5097 power_state->temperatures.min = 0;
5098 power_state->temperatures.max = 0;
5099
5100 performance_level = &(tonga_ps->performance_levels
5101 [tonga_ps->performance_level_count++]);
5102
5103 PP_ASSERT_WITH_CODE(
5104 (tonga_ps->performance_level_count < SMU72_MAX_LEVELS_GRAPHICS),
5105 "Performance levels exceeds SMC limit!",
5106 return -1);
5107
5108 PP_ASSERT_WITH_CODE(
5109 (tonga_ps->performance_level_count <=
5110 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
5111 "Performance levels exceeds Driver limit!",
5112 return -1);
5113
5114 /* Performance levels are arranged from low to high. */
5115 performance_level->memory_clock =
5116 le32_to_cpu(mclk_dep_table->entries[state_entry->ucMemoryClockIndexLow].ulMclk);
5117
5118 performance_level->engine_clock =
5119 le32_to_cpu(sclk_dep_table->entries[state_entry->ucEngineClockIndexLow].ulSclk);
5120
5121 performance_level->pcie_gen = get_pcie_gen_support(
5122 data->pcie_gen_cap,
5123 state_entry->ucPCIEGenLow);
5124
5125 performance_level->pcie_lane = get_pcie_lane_support(
5126 data->pcie_lane_cap,
5127 state_entry->ucPCIELaneHigh);
5128
5129 performance_level =
5130 &(tonga_ps->performance_levels[tonga_ps->performance_level_count++]);
5131
5132 performance_level->memory_clock =
5133 le32_to_cpu(mclk_dep_table->entries[state_entry->ucMemoryClockIndexHigh].ulMclk);
5134
5135 performance_level->engine_clock =
5136 le32_to_cpu(sclk_dep_table->entries[state_entry->ucEngineClockIndexHigh].ulSclk);
5137
5138 performance_level->pcie_gen = get_pcie_gen_support(
5139 data->pcie_gen_cap,
5140 state_entry->ucPCIEGenHigh);
5141
5142 performance_level->pcie_lane = get_pcie_lane_support(
5143 data->pcie_lane_cap,
5144 state_entry->ucPCIELaneHigh);
5145
5146 return 0;
5147}
5148
5149static int tonga_get_pp_table_entry(struct pp_hwmgr *hwmgr,
5150 unsigned long entry_index, struct pp_power_state *ps)
5151{
5152 int result;
5153 struct tonga_power_state *tonga_ps;
5154 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5155
5156 struct phm_ppt_v1_information *table_info =
5157 (struct phm_ppt_v1_information *)(hwmgr->pptable);
5158
5159 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
5160 table_info->vdd_dep_on_mclk;
5161
5162 ps->hardware.magic = PhwTonga_Magic;
5163
5164 tonga_ps = cast_phw_tonga_power_state(&(ps->hardware));
5165
5166 result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, ps,
5167 tonga_get_pp_table_entry_callback_func);
5168
5169 /* This is the earliest time we have all the dependency table and the VBIOS boot state
5170 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
5171 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
5172 */
5173 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
5174 if (dep_mclk_table->entries[0].clk !=
5175 data->vbios_boot_state.mclk_bootup_value)
5176 printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
5177 "does not match VBIOS boot MCLK level");
5178 if (dep_mclk_table->entries[0].vddci !=
5179 data->vbios_boot_state.vddci_bootup_value)
5180 printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
5181 "does not match VBIOS boot VDDCI level");
5182 }
5183
5184 /* set DC compatible flag if this state supports DC */
5185 if (!ps->validation.disallowOnDC)
5186 tonga_ps->dc_compatible = true;
5187
5188 if (ps->classification.flags & PP_StateClassificationFlag_ACPI)
5189 data->acpi_pcie_gen = tonga_ps->performance_levels[0].pcie_gen;
5190 else if (ps->classification.flags & PP_StateClassificationFlag_Boot) {
5191 if (data->bacos.best_match == 0xffff) {
5192 /* For V.I. use boot state as base BACO state */
5193 data->bacos.best_match = PP_StateClassificationFlag_Boot;
5194 data->bacos.performance_level = tonga_ps->performance_levels[0];
5195 }
5196 }
5197
5198 tonga_ps->uvd_clocks.VCLK = ps->uvd_clocks.VCLK;
5199 tonga_ps->uvd_clocks.DCLK = ps->uvd_clocks.DCLK;
5200
5201 if (!result) {
5202 uint32_t i;
5203
5204 switch (ps->classification.ui_label) {
5205 case PP_StateUILabel_Performance:
5206 data->use_pcie_performance_levels = true;
5207
5208 for (i = 0; i < tonga_ps->performance_level_count; i++) {
5209 if (data->pcie_gen_performance.max <
5210 tonga_ps->performance_levels[i].pcie_gen)
5211 data->pcie_gen_performance.max =
5212 tonga_ps->performance_levels[i].pcie_gen;
5213
5214 if (data->pcie_gen_performance.min >
5215 tonga_ps->performance_levels[i].pcie_gen)
5216 data->pcie_gen_performance.min =
5217 tonga_ps->performance_levels[i].pcie_gen;
5218
5219 if (data->pcie_lane_performance.max <
5220 tonga_ps->performance_levels[i].pcie_lane)
5221 data->pcie_lane_performance.max =
5222 tonga_ps->performance_levels[i].pcie_lane;
5223
5224 if (data->pcie_lane_performance.min >
5225 tonga_ps->performance_levels[i].pcie_lane)
5226 data->pcie_lane_performance.min =
5227 tonga_ps->performance_levels[i].pcie_lane;
5228 }
5229 break;
5230 case PP_StateUILabel_Battery:
5231 data->use_pcie_power_saving_levels = true;
5232
5233 for (i = 0; i < tonga_ps->performance_level_count; i++) {
5234 if (data->pcie_gen_power_saving.max <
5235 tonga_ps->performance_levels[i].pcie_gen)
5236 data->pcie_gen_power_saving.max =
5237 tonga_ps->performance_levels[i].pcie_gen;
5238
5239 if (data->pcie_gen_power_saving.min >
5240 tonga_ps->performance_levels[i].pcie_gen)
5241 data->pcie_gen_power_saving.min =
5242 tonga_ps->performance_levels[i].pcie_gen;
5243
5244 if (data->pcie_lane_power_saving.max <
5245 tonga_ps->performance_levels[i].pcie_lane)
5246 data->pcie_lane_power_saving.max =
5247 tonga_ps->performance_levels[i].pcie_lane;
5248
5249 if (data->pcie_lane_power_saving.min >
5250 tonga_ps->performance_levels[i].pcie_lane)
5251 data->pcie_lane_power_saving.min =
5252 tonga_ps->performance_levels[i].pcie_lane;
5253 }
5254 break;
5255 default:
5256 break;
5257 }
5258 }
5259 return 0;
5260}
5261
5262static void
5263tonga_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
5264{
5265 uint32_t sclk, mclk, activity_percent;
5266 uint32_t offset;
5267 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5268
5269 smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetSclkFrequency));
5270
5271 sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5272
5273 smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetMclkFrequency));
5274
5275 mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
5276 seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n", mclk/100, sclk/100);
5277
5278 offset = data->soft_regs_start + offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
5279 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
5280 activity_percent += 0x80;
5281 activity_percent >>= 8;
5282
5283 seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
5284
5285 seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en");
5286
5287 seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en");
5288}
5289
5290static int tonga_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
5291{
5292 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
5293 const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
5294 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5295 struct tonga_single_dpm_table *psclk_table = &(data->dpm_table.sclk_table);
5296 uint32_t sclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock;
5297 struct tonga_single_dpm_table *pmclk_table = &(data->dpm_table.mclk_table);
5298 uint32_t mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock;
5299 struct PP_Clocks min_clocks = {0};
5300 uint32_t i;
5301 struct cgs_display_info info = {0};
5302
5303 data->need_update_smu7_dpm_table = 0;
5304
5305 for (i = 0; i < psclk_table->count; i++) {
5306 if (sclk == psclk_table->dpm_levels[i].value)
5307 break;
5308 }
5309
5310 if (i >= psclk_table->count)
5311 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
5312 else {
5313 /* TODO: Check SCLK in DAL's minimum clocks in case DeepSleep divider update is required.*/
5314 if(data->display_timing.min_clock_insr != min_clocks.engineClockInSR)
5315 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
5316 }
5317
5318 for (i=0; i < pmclk_table->count; i++) {
5319 if (mclk == pmclk_table->dpm_levels[i].value)
5320 break;
5321 }
5322
5323 if (i >= pmclk_table->count)
5324 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
5325
5326 cgs_get_active_displays_info(hwmgr->device, &info);
5327
5328 if (data->display_timing.num_existing_displays != info.display_count)
5329 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
5330
5331 return 0;
5332}
5333
5334static uint16_t tonga_get_maximum_link_speed(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_ps)
5335{
5336 uint32_t i;
5337 uint32_t sclk, max_sclk = 0;
5338 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5339 struct tonga_dpm_table *pdpm_table = &data->dpm_table;
5340
5341 for (i = 0; i < hw_ps->performance_level_count; i++) {
5342 sclk = hw_ps->performance_levels[i].engine_clock;
5343 if (max_sclk < sclk)
5344 max_sclk = sclk;
5345 }
5346
5347 for (i = 0; i < pdpm_table->sclk_table.count; i++) {
5348 if (pdpm_table->sclk_table.dpm_levels[i].value == max_sclk)
5349 return (uint16_t) ((i >= pdpm_table->pcie_speed_table.count) ?
5350 pdpm_table->pcie_speed_table.dpm_levels[pdpm_table->pcie_speed_table.count-1].value :
5351 pdpm_table->pcie_speed_table.dpm_levels[i].value);
5352 }
5353
5354 return 0;
5355}
5356
5357static int tonga_request_link_speed_change_before_state_change(struct pp_hwmgr *hwmgr, const void *input)
5358{
5359 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
5360 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5361 const struct tonga_power_state *tonga_nps = cast_const_phw_tonga_power_state(states->pnew_state);
5362 const struct tonga_power_state *tonga_cps = cast_const_phw_tonga_power_state(states->pcurrent_state);
5363
5364 uint16_t target_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_nps);
5365 uint16_t current_link_speed;
5366
5367 if (data->force_pcie_gen == PP_PCIEGenInvalid)
5368 current_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_cps);
5369 else
5370 current_link_speed = data->force_pcie_gen;
5371
5372 data->force_pcie_gen = PP_PCIEGenInvalid;
5373 data->pspp_notify_required = false;
5374 if (target_link_speed > current_link_speed) {
5375 switch(target_link_speed) {
5376 case PP_PCIEGen3:
5377 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
5378 break;
5379 data->force_pcie_gen = PP_PCIEGen2;
5380 if (current_link_speed == PP_PCIEGen2)
5381 break;
5382 case PP_PCIEGen2:
5383 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
5384 break;
5385 default:
5386 data->force_pcie_gen = tonga_get_current_pcie_speed(hwmgr);
5387 break;
5388 }
5389 } else {
5390 if (target_link_speed < current_link_speed)
5391 data->pspp_notify_required = true;
5392 }
5393
5394 return 0;
5395}
5396
5397static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
5398{
5399 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5400
5401 if (0 == data->need_update_smu7_dpm_table)
5402 return 0;
5403
5404 if ((0 == data->sclk_dpm_key_disabled) &&
5405 (data->need_update_smu7_dpm_table &
5406 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
5407 PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
5408 "Trying to freeze SCLK DPM when DPM is disabled",
5409 );
5410 PP_ASSERT_WITH_CODE(
5411 0 == smum_send_msg_to_smc(hwmgr->smumgr,
5412 PPSMC_MSG_SCLKDPM_FreezeLevel),
5413 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
5414 return -1);
5415 }
5416
5417 if ((0 == data->mclk_dpm_key_disabled) &&
5418 (data->need_update_smu7_dpm_table &
5419 DPMTABLE_OD_UPDATE_MCLK)) {
5420 PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
5421 "Trying to freeze MCLK DPM when DPM is disabled",
5422 );
5423 PP_ASSERT_WITH_CODE(
5424 0 == smum_send_msg_to_smc(hwmgr->smumgr,
5425 PPSMC_MSG_MCLKDPM_FreezeLevel),
5426 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
5427 return -1);
5428 }
5429
5430 return 0;
5431}
5432
5433static int tonga_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr *hwmgr, const void *input)
5434{
5435 int result = 0;
5436
5437 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
5438 const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
5439 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5440 uint32_t sclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock;
5441 uint32_t mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock;
5442 struct tonga_dpm_table *pdpm_table = &data->dpm_table;
5443
5444 struct tonga_dpm_table *pgolden_dpm_table = &data->golden_dpm_table;
5445 uint32_t dpm_count, clock_percent;
5446 uint32_t i;
5447
5448 if (0 == data->need_update_smu7_dpm_table)
5449 return 0;
5450
5451 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
5452 pdpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value = sclk;
5453
5454 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
5455 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
5456 /* Need to do calculation based on the golden DPM table
5457 * as the Heatmap GPU Clock axis is also based on the default values
5458 */
5459 PP_ASSERT_WITH_CODE(
5460 (pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value != 0),
5461 "Divide by 0!",
5462 return -1);
5463 dpm_count = pdpm_table->sclk_table.count < 2 ? 0 : pdpm_table->sclk_table.count-2;
5464 for (i = dpm_count; i > 1; i--) {
5465 if (sclk > pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value) {
5466 clock_percent = ((sclk - pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value)*100) /
5467 pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value;
5468
5469 pdpm_table->sclk_table.dpm_levels[i].value =
5470 pgolden_dpm_table->sclk_table.dpm_levels[i].value +
5471 (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100;
5472
5473 } else if (pgolden_dpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value > sclk) {
5474 clock_percent = ((pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value - sclk)*100) /
5475 pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value;
5476
5477 pdpm_table->sclk_table.dpm_levels[i].value =
5478 pgolden_dpm_table->sclk_table.dpm_levels[i].value -
5479 (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100;
5480 } else
5481 pdpm_table->sclk_table.dpm_levels[i].value =
5482 pgolden_dpm_table->sclk_table.dpm_levels[i].value;
5483 }
5484 }
5485 }
5486
5487 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
5488 pdpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value = mclk;
5489
5490 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
5491 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
5492
5493 PP_ASSERT_WITH_CODE(
5494 (pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value != 0),
5495 "Divide by 0!",
5496 return -1);
5497 dpm_count = pdpm_table->mclk_table.count < 2? 0 : pdpm_table->mclk_table.count-2;
5498 for (i = dpm_count; i > 1; i--) {
5499 if (mclk > pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value) {
5500 clock_percent = ((mclk - pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value)*100) /
5501 pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value;
5502
5503 pdpm_table->mclk_table.dpm_levels[i].value =
5504 pgolden_dpm_table->mclk_table.dpm_levels[i].value +
5505 (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100;
5506
5507 } else if (pgolden_dpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value > mclk) {
5508 clock_percent = ((pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value - mclk)*100) /
5509 pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value;
5510
5511 pdpm_table->mclk_table.dpm_levels[i].value =
5512 pgolden_dpm_table->mclk_table.dpm_levels[i].value -
5513 (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100;
5514 } else
5515 pdpm_table->mclk_table.dpm_levels[i].value = pgolden_dpm_table->mclk_table.dpm_levels[i].value;
5516 }
5517 }
5518 }
5519
5520 if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
5521 result = tonga_populate_all_graphic_levels(hwmgr);
5522 PP_ASSERT_WITH_CODE((0 == result),
5523 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
5524 return result);
5525 }
5526
5527 if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
5528 /*populate MCLK dpm table to SMU7 */
5529 result = tonga_populate_all_memory_levels(hwmgr);
5530 PP_ASSERT_WITH_CODE((0 == result),
5531 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
5532 return result);
5533 }
5534
5535 return result;
5536}
5537
5538static int tonga_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
5539 struct tonga_single_dpm_table * pdpm_table,
5540 uint32_t low_limit, uint32_t high_limit)
5541{
5542 uint32_t i;
5543
5544 for (i = 0; i < pdpm_table->count; i++) {
5545 if ((pdpm_table->dpm_levels[i].value < low_limit) ||
5546 (pdpm_table->dpm_levels[i].value > high_limit))
5547 pdpm_table->dpm_levels[i].enabled = false;
5548 else
5549 pdpm_table->dpm_levels[i].enabled = true;
5550 }
5551 return 0;
5552}
5553
5554static int tonga_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_state)
5555{
5556 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5557 uint32_t high_limit_count;
5558
5559 PP_ASSERT_WITH_CODE((hw_state->performance_level_count >= 1),
5560 "power state did not have any performance level",
5561 return -1);
5562
5563 high_limit_count = (1 == hw_state->performance_level_count) ? 0: 1;
5564
5565 tonga_trim_single_dpm_states(hwmgr,
5566 &(data->dpm_table.sclk_table),
5567 hw_state->performance_levels[0].engine_clock,
5568 hw_state->performance_levels[high_limit_count].engine_clock);
5569
5570 tonga_trim_single_dpm_states(hwmgr,
5571 &(data->dpm_table.mclk_table),
5572 hw_state->performance_levels[0].memory_clock,
5573 hw_state->performance_levels[high_limit_count].memory_clock);
5574
5575 return 0;
5576}
5577
5578static int tonga_generate_dpm_level_enable_mask(struct pp_hwmgr *hwmgr, const void *input)
5579{
5580 int result;
5581 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
5582 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5583 const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
5584
5585 result = tonga_trim_dpm_states(hwmgr, tonga_ps);
5586 if (0 != result)
5587 return result;
5588
5589 data->dpm_level_enable_mask.sclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
5590 data->dpm_level_enable_mask.mclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
5591 data->last_mclk_dpm_enable_mask = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
5592 if (data->uvd_enabled)
5593 data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
5594
5595 data->dpm_level_enable_mask.pcie_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
5596
5597 return 0;
5598}
5599
5600int tonga_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
5601{
5602 return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
5603 (PPSMC_Msg)PPSMC_MSG_VCEDPM_Enable :
5604 (PPSMC_Msg)PPSMC_MSG_VCEDPM_Disable);
5605}
5606
5607int tonga_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
5608{
5609 return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
5610 (PPSMC_Msg)PPSMC_MSG_UVDDPM_Enable :
5611 (PPSMC_Msg)PPSMC_MSG_UVDDPM_Disable);
5612}
5613
5614int tonga_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
5615{
5616 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5617 uint32_t mm_boot_level_offset, mm_boot_level_value;
5618 struct phm_ppt_v1_information *ptable_information = (struct phm_ppt_v1_information *)(hwmgr->pptable);
5619
5620 if (!bgate) {
5621 data->smc_state_table.UvdBootLevel = (uint8_t) (ptable_information->mm_dep_table->count - 1);
5622 mm_boot_level_offset = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
5623 mm_boot_level_offset /= 4;
5624 mm_boot_level_offset *= 4;
5625 mm_boot_level_value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset);
5626 mm_boot_level_value &= 0x00FFFFFF;
5627 mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24;
5628 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
5629
5630 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM) ||
5631 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
5632 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5633 PPSMC_MSG_UVDDPM_SetEnabledMask,
5634 (uint32_t)(1 << data->smc_state_table.UvdBootLevel));
5635 }
5636
5637 return tonga_enable_disable_uvd_dpm(hwmgr, !bgate);
5638}
5639
5640int tonga_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
5641{
5642 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
5643 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5644 const struct tonga_power_state *tonga_nps = cast_const_phw_tonga_power_state(states->pnew_state);
5645 const struct tonga_power_state *tonga_cps = cast_const_phw_tonga_power_state(states->pcurrent_state);
5646
5647 uint32_t mm_boot_level_offset, mm_boot_level_value;
5648 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
5649
5650 if (tonga_nps->vce_clocks.EVCLK > 0 && (tonga_cps == NULL || tonga_cps->vce_clocks.EVCLK == 0)) {
5651 data->smc_state_table.VceBootLevel = (uint8_t) (pptable_info->mm_dep_table->count - 1);
5652
5653 mm_boot_level_offset = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
5654 mm_boot_level_offset /= 4;
5655 mm_boot_level_offset *= 4;
5656 mm_boot_level_value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset);
5657 mm_boot_level_value &= 0xFF00FFFF;
5658 mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16;
5659 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
5660
5661 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
5662 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
5663 PPSMC_MSG_VCEDPM_SetEnabledMask,
5664 (uint32_t)(1 << data->smc_state_table.VceBootLevel));
5665
5666 tonga_enable_disable_vce_dpm(hwmgr, true);
5667 } else if (tonga_nps->vce_clocks.EVCLK == 0 && tonga_cps != NULL && tonga_cps->vce_clocks.EVCLK > 0)
5668 tonga_enable_disable_vce_dpm(hwmgr, false);
5669
5670 return 0;
5671}
5672
5673static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
5674{
5675 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5676
5677 uint32_t address;
5678 int32_t result;
5679
5680 if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
5681 return 0;
5682
5683
5684 memset(&data->mc_reg_table, 0, sizeof(SMU72_Discrete_MCRegisters));
5685
5686 result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(data->mc_reg_table));
5687
5688 if(result != 0)
5689 return result;
5690
5691
5692 address = data->mc_reg_table_start + (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]);
5693
5694 return tonga_copy_bytes_to_smc(hwmgr->smumgr, address,
5695 (uint8_t *)&data->mc_reg_table.data[0],
5696 sizeof(SMU72_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
5697 data->sram_end);
5698}
5699
5700static int tonga_program_memory_timing_parameters_conditionally(struct pp_hwmgr *hwmgr)
5701{
5702 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5703
5704 if (data->need_update_smu7_dpm_table &
5705 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
5706 return tonga_program_memory_timing_parameters(hwmgr);
5707
5708 return 0;
5709}
5710
5711static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
5712{
5713 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5714
5715 if (0 == data->need_update_smu7_dpm_table)
5716 return 0;
5717
5718 if ((0 == data->sclk_dpm_key_disabled) &&
5719 (data->need_update_smu7_dpm_table &
5720 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
5721
5722 PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
5723 "Trying to Unfreeze SCLK DPM when DPM is disabled",
5724 );
5725 PP_ASSERT_WITH_CODE(
5726 0 == smum_send_msg_to_smc(hwmgr->smumgr,
5727 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
5728 "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
5729 return -1);
5730 }
5731
5732 if ((0 == data->mclk_dpm_key_disabled) &&
5733 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
5734
5735 PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
5736 "Trying to Unfreeze MCLK DPM when DPM is disabled",
5737 );
5738 PP_ASSERT_WITH_CODE(
5739 0 == smum_send_msg_to_smc(hwmgr->smumgr,
5740 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
5741 "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
5742 return -1);
5743 }
5744
5745 data->need_update_smu7_dpm_table = 0;
5746
5747 return 0;
5748}
5749
5750static int tonga_notify_link_speed_change_after_state_change(struct pp_hwmgr *hwmgr, const void *input)
5751{
5752 const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
5753 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5754 const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
5755 uint16_t target_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_ps);
5756 uint8_t request;
5757
5758 if (data->pspp_notify_required ||
5759 data->pcie_performance_request) {
5760 if (target_link_speed == PP_PCIEGen3)
5761 request = PCIE_PERF_REQ_GEN3;
5762 else if (target_link_speed == PP_PCIEGen2)
5763 request = PCIE_PERF_REQ_GEN2;
5764 else
5765 request = PCIE_PERF_REQ_GEN1;
5766
5767 if(request == PCIE_PERF_REQ_GEN1 && tonga_get_current_pcie_speed(hwmgr) > 0) {
5768 data->pcie_performance_request = false;
5769 return 0;
5770 }
5771
5772 if (0 != acpi_pcie_perf_request(hwmgr->device, request, false)) {
5773 if (PP_PCIEGen2 == target_link_speed)
5774 printk("PSPP request to switch to Gen2 from Gen3 Failed!");
5775 else
5776 printk("PSPP request to switch to Gen1 from Gen2 Failed!");
5777 }
5778 }
5779
5780 data->pcie_performance_request = false;
5781 return 0;
5782}
5783
5784static int tonga_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
5785{
5786 int tmp_result, result = 0;
5787
5788 tmp_result = tonga_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
5789 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to find DPM states clocks in DPM table!", result = tmp_result);
5790
5791 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) {
5792 tmp_result = tonga_request_link_speed_change_before_state_change(hwmgr, input);
5793 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to request link speed change before state change!", result = tmp_result);
5794 }
5795
5796 tmp_result = tonga_freeze_sclk_mclk_dpm(hwmgr);
5797 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
5798
5799 tmp_result = tonga_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
5800 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to populate and upload SCLK MCLK DPM levels!", result = tmp_result);
5801
5802 tmp_result = tonga_generate_dpm_level_enable_mask(hwmgr, input);
5803 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to generate DPM level enabled mask!", result = tmp_result);
5804
5805 tmp_result = tonga_update_vce_dpm(hwmgr, input);
5806 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update VCE DPM!", result = tmp_result);
5807
5808 tmp_result = tonga_update_sclk_threshold(hwmgr);
5809 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update SCLK threshold!", result = tmp_result);
5810
5811 tmp_result = tonga_update_and_upload_mc_reg_table(hwmgr);
5812 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload MC reg table!", result = tmp_result);
5813
5814 tmp_result = tonga_program_memory_timing_parameters_conditionally(hwmgr);
5815 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to program memory timing parameters!", result = tmp_result);
5816
5817 tmp_result = tonga_unfreeze_sclk_mclk_dpm(hwmgr);
5818 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to unfreeze SCLK MCLK DPM!", result = tmp_result);
5819
5820 tmp_result = tonga_upload_dpm_level_enable_mask(hwmgr);
5821 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload DPM level enabled mask!", result = tmp_result);
5822
5823 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) {
5824 tmp_result = tonga_notify_link_speed_change_after_state_change(hwmgr, input);
5825 PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to notify link speed change after state change!", result = tmp_result);
5826 }
5827
5828 return result;
5829}
5830
5831/**
5832* Set maximum target operating fan output PWM
5833*
5834* @param pHwMgr: the address of the powerplay hardware manager.
5835* @param usMaxFanPwm: max operating fan PWM in percents
5836* @return The response that came from the SMC.
5837*/
5838static int tonga_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
5839{
5840 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
5841
5842 if (phm_is_hw_access_blocked(hwmgr))
5843 return 0;
5844
5845 return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm) ? 0 : -1);
5846}
5847
5848int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
5849{
5850 uint32_t num_active_displays = 0;
5851 struct cgs_display_info info = {0};
5852 info.mode_info = NULL;
5853
5854 cgs_get_active_displays_info(hwmgr->device, &info);
5855
5856 num_active_displays = info.display_count;
5857
5858 if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
5859 tonga_notify_smc_display_change(hwmgr, false);
5860 else
5861 tonga_notify_smc_display_change(hwmgr, true);
5862
5863 return 0;
5864}
5865
5866/**
5867* Programs the display gap
5868*
5869* @param hwmgr the address of the powerplay hardware manager.
5870* @return always OK
5871*/
5872int tonga_program_display_gap(struct pp_hwmgr *hwmgr)
5873{
5874 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5875 uint32_t num_active_displays = 0;
5876 uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
5877 uint32_t display_gap2;
5878 uint32_t pre_vbi_time_in_us;
5879 uint32_t frame_time_in_us;
5880 uint32_t ref_clock;
5881 uint32_t refresh_rate = 0;
5882 struct cgs_display_info info = {0};
5883 struct cgs_mode_info mode_info;
5884
5885 info.mode_info = &mode_info;
5886
5887 cgs_get_active_displays_info(hwmgr->device, &info);
5888 num_active_displays = info.display_count;
5889
5890 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0)? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
5891 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
5892
5893 ref_clock = mode_info.ref_clock;
5894 refresh_rate = mode_info.refresh_rate;
5895
5896 if(0 == refresh_rate)
5897 refresh_rate = 60;
5898
5899 frame_time_in_us = 1000000 / refresh_rate;
5900
5901 pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
5902 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
5903
5904 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
5905
5906 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU72_SoftRegisters, PreVBlankGap), 0x64);
5907
5908 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU72_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
5909
5910 if (num_active_displays == 1)
5911 tonga_notify_smc_display_change(hwmgr, true);
5912
5913 return 0;
5914}
5915
5916int tonga_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
5917{
5918
5919 tonga_program_display_gap(hwmgr);
5920
5921 /* to do PhwTonga_CacUpdateDisplayConfiguration(pHwMgr); */
5922 return 0;
5923}
5924
5925/**
5926* Set maximum target operating fan output RPM
5927*
5928* @param pHwMgr: the address of the powerplay hardware manager.
5929* @param usMaxFanRpm: max operating fan RPM value.
5930* @return The response that came from the SMC.
5931*/
5932static int tonga_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
5933{
5934 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = us_max_fan_pwm;
5935
5936 if (phm_is_hw_access_blocked(hwmgr))
5937 return 0;
5938
5939 return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanRpmMax, us_max_fan_pwm) ? 0 : -1);
5940}
5941
5942uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr)
5943{
5944 uint32_t reference_clock;
5945 uint32_t tc;
5946 uint32_t divide;
5947
5948 ATOM_FIRMWARE_INFO *fw_info;
5949 uint16_t size;
5950 uint8_t frev, crev;
5951 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5952
5953 tc = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
5954
5955 if (tc)
5956 return TCLK;
5957
5958 fw_info = (ATOM_FIRMWARE_INFO *)cgs_atom_get_data_table(hwmgr->device, index,
5959 &size, &frev, &crev);
5960
5961 if (!fw_info)
5962 return 0;
5963
5964 reference_clock = le16_to_cpu(fw_info->usReferenceClock);
5965
5966 divide = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
5967
5968 if (0 != divide)
5969 return reference_clock / 4;
5970
5971 return reference_clock;
5972}
5973
5974int tonga_dpm_set_interrupt_state(void *private_data,
5975 unsigned src_id, unsigned type,
5976 int enabled)
5977{
5978 uint32_t cg_thermal_int;
5979 struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr;
5980
5981 if (hwmgr == NULL)
5982 return -EINVAL;
5983
5984 switch (type) {
5985 case AMD_THERMAL_IRQ_LOW_TO_HIGH:
5986 if (enabled) {
5987 cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5988 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
5989 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5990 } else {
5991 cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
5992 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
5993 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
5994 }
5995 break;
5996
5997 case AMD_THERMAL_IRQ_HIGH_TO_LOW:
5998 if (enabled) {
5999 cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
6000 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6001 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
6002 } else {
6003 cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
6004 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6005 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
6006 }
6007 break;
6008 default:
6009 break;
6010 }
6011 return 0;
6012}
6013
6014int tonga_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
6015 const void *thermal_interrupt_info)
6016{
6017 int result;
6018 const struct pp_interrupt_registration_info *info =
6019 (const struct pp_interrupt_registration_info *)thermal_interrupt_info;
6020
6021 if (info == NULL)
6022 return -EINVAL;
6023
6024 result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST,
6025 tonga_dpm_set_interrupt_state,
6026 info->call_back, info->context);
6027
6028 if (result)
6029 return -EINVAL;
6030
6031 result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST,
6032 tonga_dpm_set_interrupt_state,
6033 info->call_back, info->context);
6034
6035 if (result)
6036 return -EINVAL;
6037
6038 return 0;
6039}
6040
6041bool tonga_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
6042{
6043 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
6044 bool is_update_required = false;
6045 struct cgs_display_info info = {0,0,NULL};
6046
6047 cgs_get_active_displays_info(hwmgr->device, &info);
6048
6049 if (data->display_timing.num_existing_displays != info.display_count)
6050 is_update_required = true;
6051/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
6052 if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
6053 cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
6054 if(min_clocks.engineClockInSR != data->display_timing.minClockInSR)
6055 is_update_required = true;
6056*/
6057 return is_update_required;
6058}
6059
6060static inline bool tonga_are_power_levels_equal(const struct tonga_performance_level *pl1,
6061 const struct tonga_performance_level *pl2)
6062{
6063 return ((pl1->memory_clock == pl2->memory_clock) &&
6064 (pl1->engine_clock == pl2->engine_clock) &&
6065 (pl1->pcie_gen == pl2->pcie_gen) &&
6066 (pl1->pcie_lane == pl2->pcie_lane));
6067}
6068
6069int tonga_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
6070{
6071 const struct tonga_power_state *psa = cast_const_phw_tonga_power_state(pstate1);
6072 const struct tonga_power_state *psb = cast_const_phw_tonga_power_state(pstate2);
6073 int i;
6074
6075 if (equal == NULL || psa == NULL || psb == NULL)
6076 return -EINVAL;
6077
6078 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
6079 if (psa->performance_level_count != psb->performance_level_count) {
6080 *equal = false;
6081 return 0;
6082 }
6083
6084 for (i = 0; i < psa->performance_level_count; i++) {
6085 if (!tonga_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
6086 /* If we have found even one performance level pair that is different the states are different. */
6087 *equal = false;
6088 return 0;
6089 }
6090 }
6091
6092 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
6093 *equal = ((psa->uvd_clocks.VCLK == psb->uvd_clocks.VCLK) && (psa->uvd_clocks.DCLK == psb->uvd_clocks.DCLK));
6094 *equal &= ((psa->vce_clocks.EVCLK == psb->vce_clocks.EVCLK) && (psa->vce_clocks.ECCLK == psb->vce_clocks.ECCLK));
6095 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
6096 *equal &= (psa->acp_clk == psb->acp_clk);
6097
6098 return 0;
6099}
6100
6101static int tonga_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
6102{
6103 if (mode) {
6104 /* stop auto-manage */
6105 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
6106 PHM_PlatformCaps_MicrocodeFanControl))
6107 tonga_fan_ctrl_stop_smc_fan_control(hwmgr);
6108 tonga_fan_ctrl_set_static_mode(hwmgr, mode);
6109 } else
6110 /* restart auto-manage */
6111 tonga_fan_ctrl_reset_fan_speed_to_default(hwmgr);
6112
6113 return 0;
6114}
6115
6116static int tonga_get_fan_control_mode(struct pp_hwmgr *hwmgr)
6117{
6118 if (hwmgr->fan_ctrl_is_in_default_mode)
6119 return hwmgr->fan_ctrl_default_mode;
6120 else
6121 return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
6122 CG_FDO_CTRL2, FDO_PWM_MODE);
6123}
6124
6125static int tonga_force_clock_level(struct pp_hwmgr *hwmgr,
6126 enum pp_clock_type type, uint32_t mask)
6127{
6128 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
6129
6130 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
6131 return -EINVAL;
6132
6133 switch (type) {
6134 case PP_SCLK:
6135 if (!data->sclk_dpm_key_disabled)
6136 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
6137 PPSMC_MSG_SCLKDPM_SetEnabledMask,
6138 data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
6139 break;
6140 case PP_MCLK:
6141 if (!data->mclk_dpm_key_disabled)
6142 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
6143 PPSMC_MSG_MCLKDPM_SetEnabledMask,
6144 data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
6145 break;
6146 case PP_PCIE:
6147 {
6148 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
6149 uint32_t level = 0;
6150
6151 while (tmp >>= 1)
6152 level++;
6153
6154 if (!data->pcie_dpm_key_disabled)
6155 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
6156 PPSMC_MSG_PCIeDPM_ForceLevel,
6157 level);
6158 break;
6159 }
6160 default:
6161 break;
6162 }
6163
6164 return 0;
6165}
6166
6167static int tonga_print_clock_levels(struct pp_hwmgr *hwmgr,
6168 enum pp_clock_type type, char *buf)
6169{
6170 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
6171 struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
6172 struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
6173 struct tonga_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
6174 int i, now, size = 0;
6175 uint32_t clock, pcie_speed;
6176
6177 switch (type) {
6178 case PP_SCLK:
6179 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
6180 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
6181
6182 for (i = 0; i < sclk_table->count; i++) {
6183 if (clock > sclk_table->dpm_levels[i].value)
6184 continue;
6185 break;
6186 }
6187 now = i;
6188
6189 for (i = 0; i < sclk_table->count; i++)
6190 size += sprintf(buf + size, "%d: %uMhz %s\n",
6191 i, sclk_table->dpm_levels[i].value / 100,
6192 (i == now) ? "*" : "");
6193 break;
6194 case PP_MCLK:
6195 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
6196 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
6197
6198 for (i = 0; i < mclk_table->count; i++) {
6199 if (clock > mclk_table->dpm_levels[i].value)
6200 continue;
6201 break;
6202 }
6203 now = i;
6204
6205 for (i = 0; i < mclk_table->count; i++)
6206 size += sprintf(buf + size, "%d: %uMhz %s\n",
6207 i, mclk_table->dpm_levels[i].value / 100,
6208 (i == now) ? "*" : "");
6209 break;
6210 case PP_PCIE:
6211 pcie_speed = tonga_get_current_pcie_speed(hwmgr);
6212 for (i = 0; i < pcie_table->count; i++) {
6213 if (pcie_speed != pcie_table->dpm_levels[i].value)
6214 continue;
6215 break;
6216 }
6217 now = i;
6218
6219 for (i = 0; i < pcie_table->count; i++)
6220 size += sprintf(buf + size, "%d: %s %s\n", i,
6221 (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
6222 (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
6223 (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
6224 (i == now) ? "*" : "");
6225 break;
6226 default:
6227 break;
6228 }
6229 return size;
6230}
6231
6232static int tonga_get_sclk_od(struct pp_hwmgr *hwmgr)
6233{
6234 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
6235 struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
6236 struct tonga_single_dpm_table *golden_sclk_table =
6237 &(data->golden_dpm_table.sclk_table);
6238 int value;
6239
6240 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
6241 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
6242 100 /
6243 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6244
6245 return value;
6246}
6247
6248static int tonga_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
6249{
6250 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
6251 struct tonga_single_dpm_table *golden_sclk_table =
6252 &(data->golden_dpm_table.sclk_table);
6253 struct pp_power_state *ps;
6254 struct tonga_power_state *tonga_ps;
6255
6256 if (value > 20)
6257 value = 20;
6258
6259 ps = hwmgr->request_ps;
6260
6261 if (ps == NULL)
6262 return -EINVAL;
6263
6264 tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
6265
6266 tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].engine_clock =
6267 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
6268 value / 100 +
6269 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6270
6271 return 0;
6272}
6273
6274static int tonga_get_mclk_od(struct pp_hwmgr *hwmgr)
6275{
6276 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
6277 struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
6278 struct tonga_single_dpm_table *golden_mclk_table =
6279 &(data->golden_dpm_table.mclk_table);
6280 int value;
6281
6282 value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
6283 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
6284 100 /
6285 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6286
6287 return value;
6288}
6289
6290static int tonga_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
6291{
6292 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
6293 struct tonga_single_dpm_table *golden_mclk_table =
6294 &(data->golden_dpm_table.mclk_table);
6295 struct pp_power_state *ps;
6296 struct tonga_power_state *tonga_ps;
6297
6298 if (value > 20)
6299 value = 20;
6300
6301 ps = hwmgr->request_ps;
6302
6303 if (ps == NULL)
6304 return -EINVAL;
6305
6306 tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
6307
6308 tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock =
6309 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
6310 value / 100 +
6311 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6312
6313 return 0;
6314}
6315
6316static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
6317 .backend_init = &tonga_hwmgr_backend_init,
6318 .backend_fini = &tonga_hwmgr_backend_fini,
6319 .asic_setup = &tonga_setup_asic_task,
6320 .dynamic_state_management_enable = &tonga_enable_dpm_tasks,
6321 .dynamic_state_management_disable = &tonga_disable_dpm_tasks,
6322 .apply_state_adjust_rules = tonga_apply_state_adjust_rules,
6323 .force_dpm_level = &tonga_force_dpm_level,
6324 .power_state_set = tonga_set_power_state_tasks,
6325 .get_power_state_size = tonga_get_power_state_size,
6326 .get_mclk = tonga_dpm_get_mclk,
6327 .get_sclk = tonga_dpm_get_sclk,
6328 .patch_boot_state = tonga_dpm_patch_boot_state,
6329 .get_pp_table_entry = tonga_get_pp_table_entry,
6330 .get_num_of_pp_table_entries = get_number_of_powerplay_table_entries_v1_0,
6331 .print_current_perforce_level = tonga_print_current_perforce_level,
6332 .powerdown_uvd = tonga_phm_powerdown_uvd,
6333 .powergate_uvd = tonga_phm_powergate_uvd,
6334 .powergate_vce = tonga_phm_powergate_vce,
6335 .disable_clock_power_gating = tonga_phm_disable_clock_power_gating,
6336 .update_clock_gatings = tonga_phm_update_clock_gatings,
6337 .notify_smc_display_config_after_ps_adjustment = tonga_notify_smc_display_config_after_ps_adjustment,
6338 .display_config_changed = tonga_display_configuration_changed_task,
6339 .set_max_fan_pwm_output = tonga_set_max_fan_pwm_output,
6340 .set_max_fan_rpm_output = tonga_set_max_fan_rpm_output,
6341 .get_temperature = tonga_thermal_get_temperature,
6342 .stop_thermal_controller = tonga_thermal_stop_thermal_controller,
6343 .get_fan_speed_info = tonga_fan_ctrl_get_fan_speed_info,
6344 .get_fan_speed_percent = tonga_fan_ctrl_get_fan_speed_percent,
6345 .set_fan_speed_percent = tonga_fan_ctrl_set_fan_speed_percent,
6346 .reset_fan_speed_to_default = tonga_fan_ctrl_reset_fan_speed_to_default,
6347 .get_fan_speed_rpm = tonga_fan_ctrl_get_fan_speed_rpm,
6348 .set_fan_speed_rpm = tonga_fan_ctrl_set_fan_speed_rpm,
6349 .uninitialize_thermal_controller = tonga_thermal_ctrl_uninitialize_thermal_controller,
6350 .register_internal_thermal_interrupt = tonga_register_internal_thermal_interrupt,
6351 .check_smc_update_required_for_display_configuration = tonga_check_smc_update_required_for_display_configuration,
6352 .check_states_equal = tonga_check_states_equal,
6353 .set_fan_control_mode = tonga_set_fan_control_mode,
6354 .get_fan_control_mode = tonga_get_fan_control_mode,
6355 .force_clock_level = tonga_force_clock_level,
6356 .print_clock_levels = tonga_print_clock_levels,
6357 .get_sclk_od = tonga_get_sclk_od,
6358 .set_sclk_od = tonga_set_sclk_od,
6359 .get_mclk_od = tonga_get_mclk_od,
6360 .set_mclk_od = tonga_set_mclk_od,
6361};
6362
6363int tonga_hwmgr_init(struct pp_hwmgr *hwmgr)
6364{
6365 hwmgr->hwmgr_func = &tonga_hwmgr_funcs;
6366 hwmgr->pptable_func = &pptable_v1_0_funcs;
6367 pp_tonga_thermal_initialize(hwmgr);
6368 return 0;
6369}
6370
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
deleted file mode 100644
index fcad9426d3c1..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
+++ /dev/null
@@ -1,402 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef TONGA_HWMGR_H
24#define TONGA_HWMGR_H
25
26#include "hwmgr.h"
27#include "smu72_discrete.h"
28#include "ppatomctrl.h"
29#include "ppinterrupt.h"
30#include "tonga_powertune.h"
31#include "pp_endian.h"
32
33#define TONGA_MAX_HARDWARE_POWERLEVELS 2
34#define TONGA_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15
35
36struct tonga_performance_level {
37 uint32_t memory_clock;
38 uint32_t engine_clock;
39 uint16_t pcie_gen;
40 uint16_t pcie_lane;
41};
42
43struct _phw_tonga_bacos {
44 uint32_t best_match;
45 uint32_t baco_flags;
46 struct tonga_performance_level performance_level;
47};
48typedef struct _phw_tonga_bacos phw_tonga_bacos;
49
50struct _phw_tonga_uvd_clocks {
51 uint32_t VCLK;
52 uint32_t DCLK;
53};
54
55typedef struct _phw_tonga_uvd_clocks phw_tonga_uvd_clocks;
56
57struct _phw_tonga_vce_clocks {
58 uint32_t EVCLK;
59 uint32_t ECCLK;
60};
61
62typedef struct _phw_tonga_vce_clocks phw_tonga_vce_clocks;
63
64struct tonga_power_state {
65 uint32_t magic;
66 phw_tonga_uvd_clocks uvd_clocks;
67 phw_tonga_vce_clocks vce_clocks;
68 uint32_t sam_clk;
69 uint32_t acp_clk;
70 uint16_t performance_level_count;
71 bool dc_compatible;
72 uint32_t sclk_threshold;
73 struct tonga_performance_level performance_levels[TONGA_MAX_HARDWARE_POWERLEVELS];
74};
75
76struct _phw_tonga_dpm_level {
77 bool enabled;
78 uint32_t value;
79 uint32_t param1;
80};
81typedef struct _phw_tonga_dpm_level phw_tonga_dpm_level;
82
83#define TONGA_MAX_DEEPSLEEP_DIVIDER_ID 5
84#define MAX_REGULAR_DPM_NUMBER 8
85#define TONGA_MINIMUM_ENGINE_CLOCK 2500
86
87struct tonga_single_dpm_table {
88 uint32_t count;
89 phw_tonga_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
90};
91
92struct tonga_dpm_table {
93 struct tonga_single_dpm_table sclk_table;
94 struct tonga_single_dpm_table mclk_table;
95 struct tonga_single_dpm_table pcie_speed_table;
96 struct tonga_single_dpm_table vddc_table;
97 struct tonga_single_dpm_table vdd_gfx_table;
98 struct tonga_single_dpm_table vdd_ci_table;
99 struct tonga_single_dpm_table mvdd_table;
100};
101typedef struct _phw_tonga_dpm_table phw_tonga_dpm_table;
102
103
104struct _phw_tonga_clock_regisiters {
105 uint32_t vCG_SPLL_FUNC_CNTL;
106 uint32_t vCG_SPLL_FUNC_CNTL_2;
107 uint32_t vCG_SPLL_FUNC_CNTL_3;
108 uint32_t vCG_SPLL_FUNC_CNTL_4;
109 uint32_t vCG_SPLL_SPREAD_SPECTRUM;
110 uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
111 uint32_t vDLL_CNTL;
112 uint32_t vMCLK_PWRMGT_CNTL;
113 uint32_t vMPLL_AD_FUNC_CNTL;
114 uint32_t vMPLL_DQ_FUNC_CNTL;
115 uint32_t vMPLL_FUNC_CNTL;
116 uint32_t vMPLL_FUNC_CNTL_1;
117 uint32_t vMPLL_FUNC_CNTL_2;
118 uint32_t vMPLL_SS1;
119 uint32_t vMPLL_SS2;
120};
121typedef struct _phw_tonga_clock_regisiters phw_tonga_clock_registers;
122
123struct _phw_tonga_voltage_smio_registers {
124 uint32_t vs0_vid_lower_smio_cntl;
125};
126typedef struct _phw_tonga_voltage_smio_registers phw_tonga_voltage_smio_registers;
127
128
129struct _phw_tonga_mc_reg_entry {
130 uint32_t mclk_max;
131 uint32_t mc_data[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
132};
133typedef struct _phw_tonga_mc_reg_entry phw_tonga_mc_reg_entry;
134
135struct _phw_tonga_mc_reg_table {
136 uint8_t last; /* number of registers*/
137 uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/
138 uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/
139 phw_tonga_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
140 SMU72_Discrete_MCRegisterAddress mc_reg_address[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
141};
142typedef struct _phw_tonga_mc_reg_table phw_tonga_mc_reg_table;
143
144#define DISABLE_MC_LOADMICROCODE 1
145#define DISABLE_MC_CFGPROGRAMMING 2
146
147/*Ultra Low Voltage parameter structure */
148struct _phw_tonga_ulv_parm{
149 bool ulv_supported;
150 uint32_t ch_ulv_parameter;
151 uint32_t ulv_volt_change_delay;
152 struct tonga_performance_level ulv_power_level;
153};
154typedef struct _phw_tonga_ulv_parm phw_tonga_ulv_parm;
155
156#define TONGA_MAX_LEAKAGE_COUNT 8
157
158struct _phw_tonga_leakage_voltage {
159 uint16_t count;
160 uint16_t leakage_id[TONGA_MAX_LEAKAGE_COUNT];
161 uint16_t actual_voltage[TONGA_MAX_LEAKAGE_COUNT];
162};
163typedef struct _phw_tonga_leakage_voltage phw_tonga_leakage_voltage;
164
165struct _phw_tonga_display_timing {
166 uint32_t min_clock_insr;
167 uint32_t num_existing_displays;
168};
169typedef struct _phw_tonga_display_timing phw_tonga_display_timing;
170
171struct _phw_tonga_dpmlevel_enable_mask {
172 uint32_t uvd_dpm_enable_mask;
173 uint32_t vce_dpm_enable_mask;
174 uint32_t acp_dpm_enable_mask;
175 uint32_t samu_dpm_enable_mask;
176 uint32_t sclk_dpm_enable_mask;
177 uint32_t mclk_dpm_enable_mask;
178 uint32_t pcie_dpm_enable_mask;
179};
180typedef struct _phw_tonga_dpmlevel_enable_mask phw_tonga_dpmlevel_enable_mask;
181
182struct _phw_tonga_pcie_perf_range {
183 uint16_t max;
184 uint16_t min;
185};
186typedef struct _phw_tonga_pcie_perf_range phw_tonga_pcie_perf_range;
187
188struct _phw_tonga_vbios_boot_state {
189 uint16_t mvdd_bootup_value;
190 uint16_t vddc_bootup_value;
191 uint16_t vddci_bootup_value;
192 uint16_t vddgfx_bootup_value;
193 uint32_t sclk_bootup_value;
194 uint32_t mclk_bootup_value;
195 uint16_t pcie_gen_bootup_value;
196 uint16_t pcie_lane_bootup_value;
197};
198typedef struct _phw_tonga_vbios_boot_state phw_tonga_vbios_boot_state;
199
200#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
201#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
202#define DPMTABLE_UPDATE_SCLK 0x00000004
203#define DPMTABLE_UPDATE_MCLK 0x00000008
204
205/* We need to review which fields are needed. */
206/* This is mostly a copy of the RV7xx/Evergreen structure which is close, but not identical to the N.Islands one. */
207struct tonga_hwmgr {
208 struct tonga_dpm_table dpm_table;
209 struct tonga_dpm_table golden_dpm_table;
210
211 uint32_t voting_rights_clients0;
212 uint32_t voting_rights_clients1;
213 uint32_t voting_rights_clients2;
214 uint32_t voting_rights_clients3;
215 uint32_t voting_rights_clients4;
216 uint32_t voting_rights_clients5;
217 uint32_t voting_rights_clients6;
218 uint32_t voting_rights_clients7;
219 uint32_t static_screen_threshold_unit;
220 uint32_t static_screen_threshold;
221 uint32_t voltage_control;
222 uint32_t vdd_gfx_control;
223
224 uint32_t vddc_vddci_delta;
225 uint32_t vddc_vddgfx_delta;
226
227 struct pp_interrupt_registration_info internal_high_thermal_interrupt_info;
228 struct pp_interrupt_registration_info internal_low_thermal_interrupt_info;
229 struct pp_interrupt_registration_info smc_to_host_interrupt_info;
230 uint32_t active_auto_throttle_sources;
231
232 struct pp_interrupt_registration_info external_throttle_interrupt;
233 irq_handler_func_t external_throttle_callback;
234 void *external_throttle_context;
235
236 struct pp_interrupt_registration_info ctf_interrupt_info;
237 irq_handler_func_t ctf_callback;
238 void *ctf_context;
239
240 phw_tonga_clock_registers clock_registers;
241 phw_tonga_voltage_smio_registers voltage_smio_registers;
242
243 bool is_memory_GDDR5;
244 uint16_t acpi_vddc;
245 bool pspp_notify_required; /* Flag to indicate if PSPP notification to SBIOS is required */
246 uint16_t force_pcie_gen; /* The forced PCI-E speed if not 0xffff */
247 uint16_t acpi_pcie_gen; /* The PCI-E speed at ACPI time */
248 uint32_t pcie_gen_cap; /* The PCI-E speed capabilities bitmap from CAIL */
249 uint32_t pcie_lane_cap; /* The PCI-E lane capabilities bitmap from CAIL */
250 uint32_t pcie_spc_cap; /* Symbol Per Clock Capabilities from registry */
251 phw_tonga_leakage_voltage vddc_leakage; /* The Leakage VDDC supported (based on leakage ID).*/
252 phw_tonga_leakage_voltage vddcgfx_leakage; /* The Leakage VDDC supported (based on leakage ID). */
253 phw_tonga_leakage_voltage vddci_leakage; /* The Leakage VDDCI supported (based on leakage ID). */
254
255 uint32_t mvdd_control;
256 uint32_t vddc_mask_low;
257 uint32_t mvdd_mask_low;
258 uint16_t max_vddc_in_pp_table; /* the maximum VDDC value in the powerplay table*/
259 uint16_t min_vddc_in_pp_table;
260 uint16_t max_vddci_in_pp_table; /* the maximum VDDCI value in the powerplay table */
261 uint16_t min_vddci_in_pp_table;
262 uint32_t mclk_strobe_mode_threshold;
263 uint32_t mclk_stutter_mode_threshold;
264 uint32_t mclk_edc_enable_threshold;
265 uint32_t mclk_edc_wr_enable_threshold;
266 bool is_uvd_enabled;
267 bool is_xdma_enabled;
268 phw_tonga_vbios_boot_state vbios_boot_state;
269
270 bool battery_state;
271 bool is_tlu_enabled;
272 bool pcie_performance_request;
273
274 /* -------------- SMC SRAM Address of firmware header tables ----------------*/
275 uint32_t sram_end; /* The first address after the SMC SRAM. */
276 uint32_t dpm_table_start; /* The start of the dpm table in the SMC SRAM. */
277 uint32_t soft_regs_start; /* The start of the soft registers in the SMC SRAM. */
278 uint32_t mc_reg_table_start; /* The start of the mc register table in the SMC SRAM. */
279 uint32_t fan_table_start; /* The start of the fan table in the SMC SRAM. */
280 uint32_t arb_table_start; /* The start of the ARB setting table in the SMC SRAM. */
281 SMU72_Discrete_DpmTable smc_state_table; /* The carbon copy of the SMC state table. */
282 SMU72_Discrete_MCRegisters mc_reg_table;
283 SMU72_Discrete_Ulv ulv_setting; /* The carbon copy of ULV setting. */
284 /* -------------- Stuff originally coming from Evergreen --------------------*/
285 phw_tonga_mc_reg_table tonga_mc_reg_table;
286 uint32_t vdd_ci_control;
287 pp_atomctrl_voltage_table vddc_voltage_table;
288 pp_atomctrl_voltage_table vddci_voltage_table;
289 pp_atomctrl_voltage_table vddgfx_voltage_table;
290 pp_atomctrl_voltage_table mvdd_voltage_table;
291
292 uint32_t mgcg_cgtt_local2;
293 uint32_t mgcg_cgtt_local3;
294 uint32_t gpio_debug;
295 uint32_t mc_micro_code_feature;
296 uint32_t highest_mclk;
297 uint16_t acpi_vdd_ci;
298 uint8_t mvdd_high_index;
299 uint8_t mvdd_low_index;
300 bool dll_defaule_on;
301 bool performance_request_registered;
302
303
304 /* ----------------- Low Power Features ---------------------*/
305 phw_tonga_bacos bacos;
306 phw_tonga_ulv_parm ulv;
307 /* ----------------- CAC Stuff ---------------------*/
308 uint32_t cac_table_start;
309 bool cac_configuration_required; /* TRUE if PP_CACConfigurationRequired == 1 */
310 bool driver_calculate_cac_leakage; /* TRUE if PP_DriverCalculateCACLeakage == 1 */
311 bool cac_enabled;
312 /* ----------------- DPM2 Parameters ---------------------*/
313 uint32_t power_containment_features;
314 bool enable_bapm_feature;
315 bool enable_tdc_limit_feature;
316 bool enable_pkg_pwr_tracking_feature;
317 bool disable_uvd_power_tune_feature;
318 struct tonga_pt_defaults *power_tune_defaults;
319 SMU72_Discrete_PmFuses power_tune_table;
320 uint32_t dte_tj_offset; /* Fudge factor in DPM table to correct HW DTE errors */
321 uint32_t fast_watermark_threshold; /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */
322
323
324 bool enable_dte_feature;
325
326
327 /* ----------------- Phase Shedding ---------------------*/
328 bool vddc_phase_shed_control;
329 /* --------------------- DI/DT --------------------------*/
330 phw_tonga_display_timing display_timing;
331 /* --------- ReadRegistry data for memory and engine clock margins ---- */
332 uint32_t engine_clock_data;
333 uint32_t memory_clock_data;
334 /* -------- Thermal Temperature Setting --------------*/
335 phw_tonga_dpmlevel_enable_mask dpm_level_enable_mask;
336 uint32_t need_update_smu7_dpm_table;
337 uint32_t sclk_dpm_key_disabled;
338 uint32_t mclk_dpm_key_disabled;
339 uint32_t pcie_dpm_key_disabled;
340 uint32_t min_engine_clocks; /* used to store the previous dal min sclock */
341 phw_tonga_pcie_perf_range pcie_gen_performance;
342 phw_tonga_pcie_perf_range pcie_lane_performance;
343 phw_tonga_pcie_perf_range pcie_gen_power_saving;
344 phw_tonga_pcie_perf_range pcie_lane_power_saving;
345 bool use_pcie_performance_levels;
346 bool use_pcie_power_saving_levels;
347 uint32_t activity_target[SMU72_MAX_LEVELS_GRAPHICS]; /* percentage value from 0-100, default 50 */
348 uint32_t mclk_activity_target;
349 uint32_t low_sclk_interrupt_threshold;
350 uint32_t last_mclk_dpm_enable_mask;
351 bool uvd_enabled;
352 uint32_t pcc_monitor_enabled;
353
354 /* --------- Power Gating States ------------*/
355 bool uvd_power_gated; /* 1: gated, 0:not gated */
356 bool vce_power_gated; /* 1: gated, 0:not gated */
357 bool samu_power_gated; /* 1: gated, 0:not gated */
358 bool acp_power_gated; /* 1: gated, 0:not gated */
359 bool pg_acp_init;
360};
361
362typedef struct tonga_hwmgr tonga_hwmgr;
363
364#define TONGA_DPM2_NEAR_TDP_DEC 10
365#define TONGA_DPM2_ABOVE_SAFE_INC 5
366#define TONGA_DPM2_BELOW_SAFE_INC 20
367
368#define TONGA_DPM2_LTA_WINDOW_SIZE 7 /* Log2 of the LTA window size (l2numWin_TDP). Eg. If LTA windows size is 128, then this value should be Log2(128) = 7. */
369
370#define TONGA_DPM2_LTS_TRUNCATE 0
371
372#define TONGA_DPM2_TDP_SAFE_LIMIT_PERCENT 80 /* Maximum 100 */
373
374#define TONGA_DPM2_MAXPS_PERCENT_H 90 /* Maximum 0xFF */
375#define TONGA_DPM2_MAXPS_PERCENT_M 90 /* Maximum 0xFF */
376
377#define TONGA_DPM2_PWREFFICIENCYRATIO_MARGIN 50
378
379#define TONGA_DPM2_SQ_RAMP_MAX_POWER 0x3FFF
380#define TONGA_DPM2_SQ_RAMP_MIN_POWER 0x12
381#define TONGA_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15
382#define TONGA_DPM2_SQ_RAMP_SHORT_TERM_INTERVAL_SIZE 0x1E
383#define TONGA_DPM2_SQ_RAMP_LONG_TERM_INTERVAL_RATIO 0xF
384
385#define TONGA_VOLTAGE_CONTROL_NONE 0x0
386#define TONGA_VOLTAGE_CONTROL_BY_GPIO 0x1
387#define TONGA_VOLTAGE_CONTROL_BY_SVID2 0x2
388#define TONGA_VOLTAGE_CONTROL_MERGED 0x3
389
390#define TONGA_Q88_FORMAT_CONVERSION_UNIT 256 /*To convert to Q8.8 format for firmware */
391
392#define TONGA_UNUSED_GPIO_PIN 0x7F
393
394int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
395int tonga_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input);
396int tonga_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
397int tonga_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable);
398int tonga_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
399uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
400
401#endif
402
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c
deleted file mode 100644
index 24d9a05e7997..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c
+++ /dev/null
@@ -1,495 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "hwmgr.h"
25#include "smumgr.h"
26#include "tonga_hwmgr.h"
27#include "tonga_powertune.h"
28#include "tonga_smumgr.h"
29#include "smu72_discrete.h"
30#include "pp_debug.h"
31#include "tonga_ppsmc.h"
32
33#define VOLTAGE_SCALE 4
34#define POWERTUNE_DEFAULT_SET_MAX 1
35
36struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
37/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
38 {1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
39 {0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
40 {0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
41};
42
43void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
44{
45 struct tonga_hwmgr *tonga_hwmgr = (struct tonga_hwmgr *)(hwmgr->backend);
46 struct phm_ppt_v1_information *table_info =
47 (struct phm_ppt_v1_information *)(hwmgr->pptable);
48 uint32_t tmp = 0;
49
50 if (table_info &&
51 table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
52 table_info->cac_dtp_table->usPowerTuneDataSetID)
53 tonga_hwmgr->power_tune_defaults =
54 &tonga_power_tune_data_set_array
55 [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
56 else
57 tonga_hwmgr->power_tune_defaults = &tonga_power_tune_data_set_array[0];
58
59 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
60 PHM_PlatformCaps_CAC);
61 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
62 PHM_PlatformCaps_SQRamping);
63 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
64 PHM_PlatformCaps_DBRamping);
65 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
66 PHM_PlatformCaps_TDRamping);
67 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
68 PHM_PlatformCaps_TCPRamping);
69
70 tonga_hwmgr->dte_tj_offset = tmp;
71
72 if (!tmp) {
73 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
74 PHM_PlatformCaps_CAC);
75
76 tonga_hwmgr->fast_watermark_threshold = 100;
77
78 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
79 PHM_PlatformCaps_PowerContainment)) {
80 tmp = 1;
81 tonga_hwmgr->enable_dte_feature = tmp ? false : true;
82 tonga_hwmgr->enable_tdc_limit_feature = tmp ? true : false;
83 tonga_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false;
84 }
85 }
86}
87
88
89int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
90{
91 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
92 struct tonga_pt_defaults *defaults = data->power_tune_defaults;
93 SMU72_Discrete_DpmTable *dpm_table = &(data->smc_state_table);
94 struct phm_ppt_v1_information *table_info =
95 (struct phm_ppt_v1_information *)(hwmgr->pptable);
96 struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
97 int i, j, k;
98 uint16_t *pdef1;
99 uint16_t *pdef2;
100
101
102 /* TDP number of fraction bits are changed from 8 to 7 for Fiji
103 * as requested by SMC team
104 */
105 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
106 (uint16_t)(cac_dtp_table->usTDP * 256));
107 dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
108 (uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
109
110 PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
111 "Target Operating Temp is out of Range!",
112 );
113
114 dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
115 dpm_table->GpuTjHyst = 8;
116
117 dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
118
119 dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
120 pdef1 = defaults->bapmti_r;
121 pdef2 = defaults->bapmti_rc;
122
123 for (i = 0; i < SMU72_DTE_ITERATIONS; i++) {
124 for (j = 0; j < SMU72_DTE_SOURCES; j++) {
125 for (k = 0; k < SMU72_DTE_SINKS; k++) {
126 dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
127 dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
128 pdef1++;
129 pdef2++;
130 }
131 }
132 }
133
134 return 0;
135}
136
137static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr)
138{
139 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
140 const struct tonga_pt_defaults *defaults = data->power_tune_defaults;
141
142 data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
143 data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC;
144 data->power_tune_table.SviLoadLineTrimVddC = 3;
145 data->power_tune_table.SviLoadLineOffsetVddC = 0;
146
147 return 0;
148}
149
150static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr)
151{
152 uint16_t tdc_limit;
153 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
154 struct phm_ppt_v1_information *table_info =
155 (struct phm_ppt_v1_information *)(hwmgr->pptable);
156 const struct tonga_pt_defaults *defaults = data->power_tune_defaults;
157
158 /* TDC number of fraction bits are changed from 8 to 7
159 * for Fiji as requested by SMC team
160 */
161 tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256);
162 data->power_tune_table.TDC_VDDC_PkgLimit =
163 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
164 data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
165 defaults->tdc_vddc_throttle_release_limit_perc;
166 data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
167
168 return 0;
169}
170
171static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
172{
173 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
174 const struct tonga_pt_defaults *defaults = data->power_tune_defaults;
175 uint32_t temp;
176
177 if (tonga_read_smc_sram_dword(hwmgr->smumgr,
178 fuse_table_offset +
179 offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl),
180 (uint32_t *)&temp, data->sram_end))
181 PP_ASSERT_WITH_CODE(false,
182 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
183 return -EINVAL);
184 else
185 data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
186
187 return 0;
188}
189
190static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
191{
192 int i;
193 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
194
195 /* Currently not used. Set all to zero. */
196 for (i = 0; i < 16; i++)
197 data->power_tune_table.LPMLTemperatureScaler[i] = 0;
198
199 return 0;
200}
201
202static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
203{
204 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
205
206 if ((hwmgr->thermal_controller.advanceFanControlParameters.
207 usFanOutputSensitivity & (1 << 15)) ||
208 (hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0))
209 hwmgr->thermal_controller.advanceFanControlParameters.
210 usFanOutputSensitivity = hwmgr->thermal_controller.
211 advanceFanControlParameters.usDefaultFanOutputSensitivity;
212
213 data->power_tune_table.FuzzyFan_PwmSetDelta =
214 PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
215 advanceFanControlParameters.usFanOutputSensitivity);
216 return 0;
217}
218
219static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
220{
221 int i;
222 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
223
224 /* Currently not used. Set all to zero. */
225 for (i = 0; i < 16; i++)
226 data->power_tune_table.GnbLPML[i] = 0;
227
228 return 0;
229}
230
231static int tonga_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
232{
233 return 0;
234}
235
236static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
237{
238 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
239 struct phm_ppt_v1_information *table_info =
240 (struct phm_ppt_v1_information *)(hwmgr->pptable);
241 uint16_t hi_sidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
242 uint16_t lo_sidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
243 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
244
245 hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
246 lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
247
248 data->power_tune_table.BapmVddCBaseLeakageHiSidd =
249 CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
250 data->power_tune_table.BapmVddCBaseLeakageLoSidd =
251 CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
252
253 return 0;
254}
255
256int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr)
257{
258 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
259 uint32_t pm_fuse_table_offset;
260
261 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
262 PHM_PlatformCaps_PowerContainment)) {
263 if (tonga_read_smc_sram_dword(hwmgr->smumgr,
264 SMU72_FIRMWARE_HEADER_LOCATION +
265 offsetof(SMU72_Firmware_Header, PmFuseTable),
266 &pm_fuse_table_offset, data->sram_end))
267 PP_ASSERT_WITH_CODE(false,
268 "Attempt to get pm_fuse_table_offset Failed!",
269 return -EINVAL);
270
271 /* DW6 */
272 if (tonga_populate_svi_load_line(hwmgr))
273 PP_ASSERT_WITH_CODE(false,
274 "Attempt to populate SviLoadLine Failed!",
275 return -EINVAL);
276 /* DW7 */
277 if (tonga_populate_tdc_limit(hwmgr))
278 PP_ASSERT_WITH_CODE(false,
279 "Attempt to populate TDCLimit Failed!", return -EINVAL);
280 /* DW8 */
281 if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset))
282 PP_ASSERT_WITH_CODE(false,
283 "Attempt to populate TdcWaterfallCtl Failed !",
284 return -EINVAL);
285
286 /* DW9-DW12 */
287 if (tonga_populate_temperature_scaler(hwmgr) != 0)
288 PP_ASSERT_WITH_CODE(false,
289 "Attempt to populate LPMLTemperatureScaler Failed!",
290 return -EINVAL);
291
292 /* DW13-DW14 */
293 if (tonga_populate_fuzzy_fan(hwmgr))
294 PP_ASSERT_WITH_CODE(false,
295 "Attempt to populate Fuzzy Fan Control parameters Failed!",
296 return -EINVAL);
297
298 /* DW15-DW18 */
299 if (tonga_populate_gnb_lpml(hwmgr))
300 PP_ASSERT_WITH_CODE(false,
301 "Attempt to populate GnbLPML Failed!",
302 return -EINVAL);
303
304 /* DW19 */
305 if (tonga_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
306 PP_ASSERT_WITH_CODE(false,
307 "Attempt to populate GnbLPML Min and Max Vid Failed!",
308 return -EINVAL);
309
310 /* DW20 */
311 if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr))
312 PP_ASSERT_WITH_CODE(false,
313 "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!",
314 return -EINVAL);
315
316 if (tonga_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
317 (uint8_t *)&data->power_tune_table,
318 sizeof(struct SMU72_Discrete_PmFuses), data->sram_end))
319 PP_ASSERT_WITH_CODE(false,
320 "Attempt to download PmFuseTable Failed!",
321 return -EINVAL);
322 }
323 return 0;
324}
325
326int tonga_enable_smc_cac(struct pp_hwmgr *hwmgr)
327{
328 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
329 int result = 0;
330
331 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
332 PHM_PlatformCaps_CAC)) {
333 int smc_result;
334
335 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
336 (uint16_t)(PPSMC_MSG_EnableCac));
337 PP_ASSERT_WITH_CODE((smc_result == 0),
338 "Failed to enable CAC in SMC.", result = -1);
339
340 data->cac_enabled = (smc_result == 0) ? true : false;
341 }
342 return result;
343}
344
345int tonga_disable_smc_cac(struct pp_hwmgr *hwmgr)
346{
347 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
348 int result = 0;
349
350 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
351 PHM_PlatformCaps_CAC) && data->cac_enabled) {
352 int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
353 (uint16_t)(PPSMC_MSG_DisableCac));
354 PP_ASSERT_WITH_CODE((smc_result == 0),
355 "Failed to disable CAC in SMC.", result = -1);
356
357 data->cac_enabled = false;
358 }
359 return result;
360}
361
362int tonga_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
363{
364 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
365
366 if (data->power_containment_features &
367 POWERCONTAINMENT_FEATURE_PkgPwrLimit)
368 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
369 PPSMC_MSG_PkgPwrSetLimit, n);
370 return 0;
371}
372
373static int tonga_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
374{
375 return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
376 PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
377}
378
379int tonga_enable_power_containment(struct pp_hwmgr *hwmgr)
380{
381 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
382 struct phm_ppt_v1_information *table_info =
383 (struct phm_ppt_v1_information *)(hwmgr->pptable);
384 int smc_result;
385 int result = 0;
386
387 data->power_containment_features = 0;
388 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
389 PHM_PlatformCaps_PowerContainment)) {
390 if (data->enable_dte_feature) {
391 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
392 (uint16_t)(PPSMC_MSG_EnableDTE));
393 PP_ASSERT_WITH_CODE((smc_result == 0),
394 "Failed to enable DTE in SMC.", result = -1;);
395 if (smc_result == 0)
396 data->power_containment_features |= POWERCONTAINMENT_FEATURE_DTE;
397 }
398
399 if (data->enable_tdc_limit_feature) {
400 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
401 (uint16_t)(PPSMC_MSG_TDCLimitEnable));
402 PP_ASSERT_WITH_CODE((smc_result == 0),
403 "Failed to enable TDCLimit in SMC.", result = -1;);
404 if (smc_result == 0)
405 data->power_containment_features |=
406 POWERCONTAINMENT_FEATURE_TDCLimit;
407 }
408
409 if (data->enable_pkg_pwr_tracking_feature) {
410 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
411 (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
412 PP_ASSERT_WITH_CODE((smc_result == 0),
413 "Failed to enable PkgPwrTracking in SMC.", result = -1;);
414 if (smc_result == 0) {
415 struct phm_cac_tdp_table *cac_table =
416 table_info->cac_dtp_table;
417 uint32_t default_limit =
418 (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
419
420 data->power_containment_features |=
421 POWERCONTAINMENT_FEATURE_PkgPwrLimit;
422
423 if (tonga_set_power_limit(hwmgr, default_limit))
424 printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
425 }
426 }
427 }
428 return result;
429}
430
431int tonga_disable_power_containment(struct pp_hwmgr *hwmgr)
432{
433 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
434 int result = 0;
435
436 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
437 PHM_PlatformCaps_PowerContainment) &&
438 data->power_containment_features) {
439 int smc_result;
440
441 if (data->power_containment_features &
442 POWERCONTAINMENT_FEATURE_TDCLimit) {
443 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
444 (uint16_t)(PPSMC_MSG_TDCLimitDisable));
445 PP_ASSERT_WITH_CODE((smc_result == 0),
446 "Failed to disable TDCLimit in SMC.",
447 result = smc_result);
448 }
449
450 if (data->power_containment_features &
451 POWERCONTAINMENT_FEATURE_DTE) {
452 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
453 (uint16_t)(PPSMC_MSG_DisableDTE));
454 PP_ASSERT_WITH_CODE((smc_result == 0),
455 "Failed to disable DTE in SMC.",
456 result = smc_result);
457 }
458
459 if (data->power_containment_features &
460 POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
461 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
462 (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
463 PP_ASSERT_WITH_CODE((smc_result == 0),
464 "Failed to disable PkgPwrTracking in SMC.",
465 result = smc_result);
466 }
467 data->power_containment_features = 0;
468 }
469
470 return result;
471}
472
473int tonga_power_control_set_level(struct pp_hwmgr *hwmgr)
474{
475 struct phm_ppt_v1_information *table_info =
476 (struct phm_ppt_v1_information *)(hwmgr->pptable);
477 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
478 int adjust_percent, target_tdp;
479 int result = 0;
480
481 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
482 PHM_PlatformCaps_PowerContainment)) {
483 /* adjustment percentage has already been validated */
484 adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
485 hwmgr->platform_descriptor.TDPAdjustment :
486 (-1 * hwmgr->platform_descriptor.TDPAdjustment);
487 /* SMC requested that target_tdp to be 7 bit fraction in DPM table
488 * but message to be 8 bit fraction for messages
489 */
490 target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
491 result = tonga_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
492 }
493
494 return result;
495}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c
deleted file mode 100644
index 47ef1ca2d78b..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c
+++ /dev/null
@@ -1,590 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <asm/div64.h>
24#include "tonga_thermal.h"
25#include "tonga_hwmgr.h"
26#include "tonga_smumgr.h"
27#include "tonga_ppsmc.h"
28#include "smu/smu_7_1_2_d.h"
29#include "smu/smu_7_1_2_sh_mask.h"
30
31/**
32* Get Fan Speed Control Parameters.
33* @param hwmgr the address of the powerplay hardware manager.
34* @param pSpeed is the address of the structure where the result is to be placed.
35* @exception Always succeeds except if we cannot zero out the output structure.
36*/
37int tonga_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info)
38{
39
40 if (hwmgr->thermal_controller.fanInfo.bNoFan)
41 return 0;
42
43 fan_speed_info->supports_percent_read = true;
44 fan_speed_info->supports_percent_write = true;
45 fan_speed_info->min_percent = 0;
46 fan_speed_info->max_percent = 100;
47
48 if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
49 fan_speed_info->supports_rpm_read = true;
50 fan_speed_info->supports_rpm_write = true;
51 fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
52 fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
53 } else {
54 fan_speed_info->min_rpm = 0;
55 fan_speed_info->max_rpm = 0;
56 }
57
58 return 0;
59}
60
61/**
62* Get Fan Speed in percent.
63* @param hwmgr the address of the powerplay hardware manager.
64* @param pSpeed is the address of the structure where the result is to be placed.
65* @exception Fails is the 100% setting appears to be 0.
66*/
67int tonga_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed)
68{
69 uint32_t duty100;
70 uint32_t duty;
71 uint64_t tmp64;
72
73 if (hwmgr->thermal_controller.fanInfo.bNoFan)
74 return 0;
75
76 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
77 duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_STATUS, FDO_PWM_DUTY);
78
79 if (0 == duty100)
80 return -EINVAL;
81
82
83 tmp64 = (uint64_t)duty * 100;
84 do_div(tmp64, duty100);
85 *speed = (uint32_t)tmp64;
86
87 if (*speed > 100)
88 *speed = 100;
89
90 return 0;
91}
92
93/**
94* Get Fan Speed in RPM.
95* @param hwmgr the address of the powerplay hardware manager.
96* @param speed is the address of the structure where the result is to be placed.
97* @exception Returns not supported if no fan is found or if pulses per revolution are not set
98*/
99int tonga_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
100{
101 return 0;
102}
103
104/**
105* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
106* @param hwmgr the address of the powerplay hardware manager.
107* mode the fan control mode, 0 default, 1 by percent, 5, by RPM
108* @exception Should always succeed.
109*/
110int tonga_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
111{
112
113 if (hwmgr->fan_ctrl_is_in_default_mode) {
114 hwmgr->fan_ctrl_default_mode = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE);
115 hwmgr->tmin = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN);
116 hwmgr->fan_ctrl_is_in_default_mode = false;
117 }
118
119 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, 0);
120 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, mode);
121
122 return 0;
123}
124
125/**
126* Reset Fan Speed Control to default mode.
127* @param hwmgr the address of the powerplay hardware manager.
128* @exception Should always succeed.
129*/
130int tonga_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
131{
132 if (!hwmgr->fan_ctrl_is_in_default_mode) {
133 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
134 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, hwmgr->tmin);
135 hwmgr->fan_ctrl_is_in_default_mode = true;
136 }
137
138 return 0;
139}
140
141int tonga_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
142{
143 int result;
144
145 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
146 cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
147 result = (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ? 0 : -EINVAL;
148/*
149 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_FanSpeedInTableIsRPM))
150 hwmgr->set_max_fan_rpm_output(hwmgr, hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM);
151 else
152 hwmgr->set_max_fan_pwm_output(hwmgr, hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM);
153*/
154 } else {
155 cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
156 result = (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ? 0 : -EINVAL;
157 }
158/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command.
159 if (result == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature != 0)
160 result = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanTemperatureTarget, \
161 hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature) ? 0 : -EINVAL);
162*/
163 return result;
164}
165
166
167int tonga_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
168{
169 return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl) == 0) ? 0 : -EINVAL;
170}
171
172/**
173* Set Fan Speed in percent.
174* @param hwmgr the address of the powerplay hardware manager.
175* @param speed is the percentage value (0% - 100%) to be set.
176* @exception Fails is the 100% setting appears to be 0.
177*/
178int tonga_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed)
179{
180 uint32_t duty100;
181 uint32_t duty;
182 uint64_t tmp64;
183
184 if (hwmgr->thermal_controller.fanInfo.bNoFan)
185 return -EINVAL;
186
187 if (speed > 100)
188 speed = 100;
189
190 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
191 tonga_fan_ctrl_stop_smc_fan_control(hwmgr);
192
193 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
194
195 if (0 == duty100)
196 return -EINVAL;
197
198 tmp64 = (uint64_t)speed * duty100;
199 do_div(tmp64, 100);
200 duty = (uint32_t)tmp64;
201
202 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
203
204 return tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
205}
206
207/**
208* Reset Fan Speed to default.
209* @param hwmgr the address of the powerplay hardware manager.
210* @exception Always succeeds.
211*/
212int tonga_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
213{
214 int result;
215
216 if (hwmgr->thermal_controller.fanInfo.bNoFan)
217 return 0;
218
219 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) {
220 result = tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
221 if (0 == result)
222 result = tonga_fan_ctrl_start_smc_fan_control(hwmgr);
223 } else
224 result = tonga_fan_ctrl_set_default_mode(hwmgr);
225
226 return result;
227}
228
229/**
230* Set Fan Speed in RPM.
231* @param hwmgr the address of the powerplay hardware manager.
232* @param speed is the percentage value (min - max) to be set.
233* @exception Fails is the speed not lie between min and max.
234*/
235int tonga_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
236{
237 return 0;
238}
239
240/**
241* Reads the remote temperature from the SIslands thermal controller.
242*
243* @param hwmgr The address of the hardware manager.
244*/
245int tonga_thermal_get_temperature(struct pp_hwmgr *hwmgr)
246{
247 int temp;
248
249 temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_STATUS, CTF_TEMP);
250
251/* Bit 9 means the reading is lower than the lowest usable value. */
252 if (0 != (0x200 & temp))
253 temp = TONGA_THERMAL_MAXIMUM_TEMP_READING;
254 else
255 temp = (temp & 0x1ff);
256
257 temp = temp * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
258
259 return temp;
260}
261
262/**
263* Set the requested temperature range for high and low alert signals
264*
265* @param hwmgr The address of the hardware manager.
266* @param range Temperature range to be programmed for high and low alert signals
267* @exception PP_Result_BadInput if the input data is not valid.
268*/
269static int tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, uint32_t low_temp, uint32_t high_temp)
270{
271 uint32_t low = TONGA_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
272 uint32_t high = TONGA_THERMAL_MAXIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
273
274 if (low < low_temp)
275 low = low_temp;
276 if (high > high_temp)
277 high = high_temp;
278
279 if (low > high)
280 return -EINVAL;
281
282 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
283 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
284 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, DIG_THERM_DPM, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
285
286 return 0;
287}
288
289/**
290* Programs thermal controller one-time setting registers
291*
292* @param hwmgr The address of the hardware manager.
293*/
294static int tonga_thermal_initialize(struct pp_hwmgr *hwmgr)
295{
296 if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
297 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
298 CG_TACH_CTRL, EDGE_PER_REV,
299 hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1);
300
301 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
302
303 return 0;
304}
305
306/**
307* Enable thermal alerts on the RV770 thermal controller.
308*
309* @param hwmgr The address of the hardware manager.
310*/
311static int tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr)
312{
313 uint32_t alert;
314
315 alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK);
316 alert &= ~(TONGA_THERMAL_HIGH_ALERT_MASK | TONGA_THERMAL_LOW_ALERT_MASK);
317 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert);
318
319 /* send message to SMU to enable internal thermal interrupts */
320 return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable) == 0) ? 0 : -1;
321}
322
323/**
324* Disable thermal alerts on the RV770 thermal controller.
325* @param hwmgr The address of the hardware manager.
326*/
327static int tonga_thermal_disable_alert(struct pp_hwmgr *hwmgr)
328{
329 uint32_t alert;
330
331 alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK);
332 alert |= (TONGA_THERMAL_HIGH_ALERT_MASK | TONGA_THERMAL_LOW_ALERT_MASK);
333 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert);
334
335 /* send message to SMU to disable internal thermal interrupts */
336 return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable) == 0) ? 0 : -1;
337}
338
339/**
340* Uninitialize the thermal controller.
341* Currently just disables alerts.
342* @param hwmgr The address of the hardware manager.
343*/
344int tonga_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
345{
346 int result = tonga_thermal_disable_alert(hwmgr);
347
348 if (hwmgr->thermal_controller.fanInfo.bNoFan)
349 tonga_fan_ctrl_set_default_mode(hwmgr);
350
351 return result;
352}
353
354/**
355* Set up the fan table to control the fan using the SMC.
356* @param hwmgr the address of the powerplay hardware manager.
357* @param pInput the pointer to input data
358* @param pOutput the pointer to output data
359* @param pStorage the pointer to temporary storage
360* @param Result the last failure code
361* @return result from set temperature range routine
362*/
363int tf_tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
364{
365 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
366 SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
367 uint32_t duty100;
368 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
369 uint16_t fdo_min, slope1, slope2;
370 uint32_t reference_clock;
371 int res;
372 uint64_t tmp64;
373
374 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
375 return 0;
376
377 if (0 == data->fan_table_start) {
378 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
379 return 0;
380 }
381
382 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
383
384 if (0 == duty100) {
385 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
386 return 0;
387 }
388
389 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
390 do_div(tmp64, 10000);
391 fdo_min = (uint16_t)tmp64;
392
393 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
394 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
395
396 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
397 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
398
399 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
400 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
401
402 fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
403 fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
404 fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
405
406 fan_table.Slope1 = cpu_to_be16(slope1);
407 fan_table.Slope2 = cpu_to_be16(slope2);
408
409 fan_table.FdoMin = cpu_to_be16(fdo_min);
410
411 fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
412
413 fan_table.HystUp = cpu_to_be16(1);
414
415 fan_table.HystSlope = cpu_to_be16(1);
416
417 fan_table.TempRespLim = cpu_to_be16(5);
418
419 reference_clock = tonga_get_xclk(hwmgr);
420
421 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
422
423 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
424
425 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
426
427 fan_table.FanControl_GL_Flag = 1;
428
429 res = tonga_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), data->sram_end);
430/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command.
431 if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit != 0)
432 res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanMinPwm, \
433 hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit) ? 0 : -1);
434
435 if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit != 0)
436 res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanSclkTarget, \
437 hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit) ? 0 : -1);
438
439 if (0 != res)
440 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
441*/
442 return 0;
443}
444
445/**
446* Start the fan control on the SMC.
447* @param hwmgr the address of the powerplay hardware manager.
448* @param pInput the pointer to input data
449* @param pOutput the pointer to output data
450* @param pStorage the pointer to temporary storage
451* @param Result the last failure code
452* @return result from set temperature range routine
453*/
454int tf_tonga_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
455{
456/* If the fantable setup has failed we could have disabled PHM_PlatformCaps_MicrocodeFanControl even after this function was included in the table.
457 * Make sure that we still think controlling the fan is OK.
458*/
459 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) {
460 tonga_fan_ctrl_start_smc_fan_control(hwmgr);
461 tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
462 }
463
464 return 0;
465}
466
467/**
468* Set temperature range for high and low alerts
469* @param hwmgr the address of the powerplay hardware manager.
470* @param pInput the pointer to input data
471* @param pOutput the pointer to output data
472* @param pStorage the pointer to temporary storage
473* @param Result the last failure code
474* @return result from set temperature range routine
475*/
476int tf_tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
477{
478 struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
479
480 if (range == NULL)
481 return -EINVAL;
482
483 return tonga_thermal_set_temperature_range(hwmgr, range->min, range->max);
484}
485
486/**
487* Programs one-time setting registers
488* @param hwmgr the address of the powerplay hardware manager.
489* @param pInput the pointer to input data
490* @param pOutput the pointer to output data
491* @param pStorage the pointer to temporary storage
492* @param Result the last failure code
493* @return result from initialize thermal controller routine
494*/
495int tf_tonga_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
496{
497 return tonga_thermal_initialize(hwmgr);
498}
499
500/**
501* Enable high and low alerts
502* @param hwmgr the address of the powerplay hardware manager.
503* @param pInput the pointer to input data
504* @param pOutput the pointer to output data
505* @param pStorage the pointer to temporary storage
506* @param Result the last failure code
507* @return result from enable alert routine
508*/
509int tf_tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
510{
511 return tonga_thermal_enable_alert(hwmgr);
512}
513
514/**
515* Disable high and low alerts
516* @param hwmgr the address of the powerplay hardware manager.
517* @param pInput the pointer to input data
518* @param pOutput the pointer to output data
519* @param pStorage the pointer to temporary storage
520* @param Result the last failure code
521* @return result from disable alert routine
522*/
523static int tf_tonga_thermal_disable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
524{
525 return tonga_thermal_disable_alert(hwmgr);
526}
527
528static const struct phm_master_table_item tonga_thermal_start_thermal_controller_master_list[] = {
529 { NULL, tf_tonga_thermal_initialize },
530 { NULL, tf_tonga_thermal_set_temperature_range },
531 { NULL, tf_tonga_thermal_enable_alert },
532/* We should restrict performance levels to low before we halt the SMC.
533 * On the other hand we are still in boot state when we do this so it would be pointless.
534 * If this assumption changes we have to revisit this table.
535 */
536 { NULL, tf_tonga_thermal_setup_fan_table},
537 { NULL, tf_tonga_thermal_start_smc_fan_control},
538 { NULL, NULL }
539};
540
541static const struct phm_master_table_header tonga_thermal_start_thermal_controller_master = {
542 0,
543 PHM_MasterTableFlag_None,
544 tonga_thermal_start_thermal_controller_master_list
545};
546
547static const struct phm_master_table_item tonga_thermal_set_temperature_range_master_list[] = {
548 { NULL, tf_tonga_thermal_disable_alert},
549 { NULL, tf_tonga_thermal_set_temperature_range},
550 { NULL, tf_tonga_thermal_enable_alert},
551 { NULL, NULL }
552};
553
554static const struct phm_master_table_header tonga_thermal_set_temperature_range_master = {
555 0,
556 PHM_MasterTableFlag_None,
557 tonga_thermal_set_temperature_range_master_list
558};
559
560int tonga_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
561{
562 if (!hwmgr->thermal_controller.fanInfo.bNoFan)
563 tonga_fan_ctrl_set_default_mode(hwmgr);
564 return 0;
565}
566
567/**
568* Initializes the thermal controller related functions in the Hardware Manager structure.
569* @param hwmgr The address of the hardware manager.
570* @exception Any error code from the low-level communication.
571*/
572int pp_tonga_thermal_initialize(struct pp_hwmgr *hwmgr)
573{
574 int result;
575
576 result = phm_construct_table(hwmgr, &tonga_thermal_set_temperature_range_master, &(hwmgr->set_temperature_range));
577
578 if (0 == result) {
579 result = phm_construct_table(hwmgr,
580 &tonga_thermal_start_thermal_controller_master,
581 &(hwmgr->start_thermal_controller));
582 if (0 != result)
583 phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
584 }
585
586 if (0 == result)
587 hwmgr->fan_ctrl_is_in_default_mode = true;
588 return result;
589}
590
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h
deleted file mode 100644
index aa335f267e25..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h
+++ /dev/null
@@ -1,61 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef TONGA_THERMAL_H
25#define TONGA_THERMAL_H
26
27#include "hwmgr.h"
28
29#define TONGA_THERMAL_HIGH_ALERT_MASK 0x1
30#define TONGA_THERMAL_LOW_ALERT_MASK 0x2
31
32#define TONGA_THERMAL_MINIMUM_TEMP_READING -256
33#define TONGA_THERMAL_MAXIMUM_TEMP_READING 255
34
35#define TONGA_THERMAL_MINIMUM_ALERT_TEMP 0
36#define TONGA_THERMAL_MAXIMUM_ALERT_TEMP 255
37
38#define FDO_PWM_MODE_STATIC 1
39#define FDO_PWM_MODE_STATIC_RPM 5
40
41
42extern int tf_tonga_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
43extern int tf_tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
44extern int tf_tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
45
46extern int tonga_thermal_get_temperature(struct pp_hwmgr *hwmgr);
47extern int tonga_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
48extern int tonga_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
49extern int tonga_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
50extern int tonga_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
51extern int tonga_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
52extern int tonga_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
53extern int tonga_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
54extern int pp_tonga_thermal_initialize(struct pp_hwmgr *hwmgr);
55extern int tonga_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
56extern int tonga_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
57extern int tonga_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
58extern int tonga_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
59
60#endif
61
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index 18f39e89a7aa..3fb5e57a378b 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -29,6 +29,19 @@
29#include "amd_shared.h" 29#include "amd_shared.h"
30#include "cgs_common.h" 30#include "cgs_common.h"
31 31
32enum amd_pp_sensors {
33 AMDGPU_PP_SENSOR_GFX_SCLK = 0,
34 AMDGPU_PP_SENSOR_VDDNB,
35 AMDGPU_PP_SENSOR_VDDGFX,
36 AMDGPU_PP_SENSOR_UVD_VCLK,
37 AMDGPU_PP_SENSOR_UVD_DCLK,
38 AMDGPU_PP_SENSOR_VCE_ECCLK,
39 AMDGPU_PP_SENSOR_GPU_LOAD,
40 AMDGPU_PP_SENSOR_GFX_MCLK,
41 AMDGPU_PP_SENSOR_GPU_TEMP,
42 AMDGPU_PP_SENSOR_VCE_POWER,
43 AMDGPU_PP_SENSOR_UVD_POWER,
44};
32 45
33enum amd_pp_event { 46enum amd_pp_event {
34 AMD_PP_EVENT_INITIALIZE = 0, 47 AMD_PP_EVENT_INITIALIZE = 0,
@@ -260,6 +273,7 @@ enum amd_pp_clock_type {
260struct amd_pp_clocks { 273struct amd_pp_clocks {
261 uint32_t count; 274 uint32_t count;
262 uint32_t clock[MAX_NUM_CLOCKS]; 275 uint32_t clock[MAX_NUM_CLOCKS];
276 uint32_t latency[MAX_NUM_CLOCKS];
263}; 277};
264 278
265 279
@@ -331,8 +345,6 @@ struct amd_powerplay_funcs {
331 int (*powergate_uvd)(void *handle, bool gate); 345 int (*powergate_uvd)(void *handle, bool gate);
332 int (*dispatch_tasks)(void *handle, enum amd_pp_event event_id, 346 int (*dispatch_tasks)(void *handle, enum amd_pp_event event_id,
333 void *input, void *output); 347 void *input, void *output);
334 void (*print_current_performance_level)(void *handle,
335 struct seq_file *m);
336 int (*set_fan_control_mode)(void *handle, uint32_t mode); 348 int (*set_fan_control_mode)(void *handle, uint32_t mode);
337 int (*get_fan_control_mode)(void *handle); 349 int (*get_fan_control_mode)(void *handle);
338 int (*set_fan_speed_percent)(void *handle, uint32_t percent); 350 int (*set_fan_speed_percent)(void *handle, uint32_t percent);
@@ -346,6 +358,7 @@ struct amd_powerplay_funcs {
346 int (*set_sclk_od)(void *handle, uint32_t value); 358 int (*set_sclk_od)(void *handle, uint32_t value);
347 int (*get_mclk_od)(void *handle); 359 int (*get_mclk_od)(void *handle);
348 int (*set_mclk_od)(void *handle, uint32_t value); 360 int (*set_mclk_od)(void *handle, uint32_t value);
361 int (*read_sensor)(void *handle, int idx, int32_t *value);
349}; 362};
350 363
351struct amd_powerplay { 364struct amd_powerplay {
@@ -377,4 +390,6 @@ int amd_powerplay_get_clock_by_type(void *handle,
377int amd_powerplay_get_display_mode_validation_clocks(void *handle, 390int amd_powerplay_get_display_mode_validation_clocks(void *handle,
378 struct amd_pp_simple_clock_info *output); 391 struct amd_pp_simple_clock_info *output);
379 392
393int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id);
394
380#endif /* _AMD_POWERPLAY_H_ */ 395#endif /* _AMD_POWERPLAY_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index e98748344801..4f0fedd1e9d3 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -311,8 +311,6 @@ struct pp_hwmgr_func {
311 int (*get_sclk)(struct pp_hwmgr *hwmgr, bool low); 311 int (*get_sclk)(struct pp_hwmgr *hwmgr, bool low);
312 int (*power_state_set)(struct pp_hwmgr *hwmgr, 312 int (*power_state_set)(struct pp_hwmgr *hwmgr,
313 const void *state); 313 const void *state);
314 void (*print_current_perforce_level)(struct pp_hwmgr *hwmgr,
315 struct seq_file *m);
316 int (*enable_clock_power_gating)(struct pp_hwmgr *hwmgr); 314 int (*enable_clock_power_gating)(struct pp_hwmgr *hwmgr);
317 int (*notify_smc_display_config_after_ps_adjustment)(struct pp_hwmgr *hwmgr); 315 int (*notify_smc_display_config_after_ps_adjustment)(struct pp_hwmgr *hwmgr);
318 int (*display_config_changed)(struct pp_hwmgr *hwmgr); 316 int (*display_config_changed)(struct pp_hwmgr *hwmgr);
@@ -359,6 +357,7 @@ struct pp_hwmgr_func {
359 int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value); 357 int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
360 int (*get_mclk_od)(struct pp_hwmgr *hwmgr); 358 int (*get_mclk_od)(struct pp_hwmgr *hwmgr);
361 int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value); 359 int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
360 int (*read_sensor)(struct pp_hwmgr *hwmgr, int idx, int32_t *value);
362}; 361};
363 362
364struct pp_table_func { 363struct pp_table_func {
@@ -709,6 +708,7 @@ extern int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr);
709extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask); 708extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
710extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr); 709extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
711 710
711extern int smu7_hwmgr_init(struct pp_hwmgr *hwmgr);
712extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, 712extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
713 uint32_t sclk, uint16_t id, uint16_t *voltage); 713 uint32_t sclk, uint16_t id, uint16_t *voltage);
714 714
diff --git a/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h b/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h
index f497e7d98e6d..0de443612312 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h
@@ -23,8 +23,7 @@
23#ifndef _POLARIS10_PWRVIRUS_H 23#ifndef _POLARIS10_PWRVIRUS_H
24#define _POLARIS10_PWRVIRUS_H 24#define _POLARIS10_PWRVIRUS_H
25 25
26#define mmSMC_IND_INDEX_11 0x01AC 26
27#define mmSMC_IND_DATA_11 0x01AD
28#define mmCP_HYP_MEC1_UCODE_ADDR 0xf81a 27#define mmCP_HYP_MEC1_UCODE_ADDR 0xf81a
29#define mmCP_HYP_MEC1_UCODE_DATA 0xf81b 28#define mmCP_HYP_MEC1_UCODE_DATA 0xf81b
30#define mmCP_HYP_MEC2_UCODE_ADDR 0xf81c 29#define mmCP_HYP_MEC2_UCODE_ADDR 0xf81c
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h
index 5983e3150cc5..65eb630bfea3 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h
@@ -21,21 +21,38 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef ICELAND_SMUM_H 24#ifndef _PP_COMMON_H
25#define ICELAND_SMUM_H 25#define _PP_COMMON_H
26 26
27#include "ppsmc.h" 27#include "smu7_ppsmc.h"
28#include "cgs_common.h"
28 29
29extern int iceland_smu_init(struct amdgpu_device *adev); 30#include "smu/smu_7_1_3_d.h"
30extern int iceland_smu_fini(struct amdgpu_device *adev); 31#include "smu/smu_7_1_3_sh_mask.h"
31extern int iceland_smu_start(struct amdgpu_device *adev); 32
33
34#include "smu74.h"
35#include "smu74_discrete.h"
36
37#include "gmc/gmc_8_1_d.h"
38#include "gmc/gmc_8_1_sh_mask.h"
39
40#include "bif/bif_5_0_d.h"
41#include "bif/bif_5_0_sh_mask.h"
42
43
44#include "bif/bif_5_0_d.h"
45#include "bif/bif_5_0_sh_mask.h"
46
47#include "dce/dce_10_0_d.h"
48#include "dce/dce_10_0_sh_mask.h"
49
50#include "gca/gfx_8_0_d.h"
51#include "gca/gfx_8_0_sh_mask.h"
52
53#include "oss/oss_3_0_d.h"
54#include "oss/oss_3_0_sh_mask.h"
32 55
33struct iceland_smu_private_data
34{
35 uint8_t *header;
36 uint8_t *mec_image;
37 uint32_t header_addr_high;
38 uint32_t header_addr_low;
39};
40 56
41#endif 57#endif
58
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
new file mode 100644
index 000000000000..bce00096d80d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
@@ -0,0 +1,412 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef DGPU_VI_PP_SMC_H
25#define DGPU_VI_PP_SMC_H
26
27
28#pragma pack(push, 1)
29
30#define PPSMC_MSG_SetGBDroopSettings ((uint16_t) 0x305)
31
32#define PPSMC_SWSTATE_FLAG_DC 0x01
33#define PPSMC_SWSTATE_FLAG_UVD 0x02
34#define PPSMC_SWSTATE_FLAG_VCE 0x04
35
36#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00
37#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01
38#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff
39
40#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01
41#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02
42#define PPSMC_SYSTEMFLAG_GDDR5 0x04
43
44#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08
45
46#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10
47#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20
48
49#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07
50#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08
51
52#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00
53#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01
54
55
56#define PPSMC_DPM2FLAGS_TDPCLMP 0x01
57#define PPSMC_DPM2FLAGS_PWRSHFT 0x02
58#define PPSMC_DPM2FLAGS_OCP 0x04
59
60
61#define PPSMC_DISPLAY_WATERMARK_LOW 0
62#define PPSMC_DISPLAY_WATERMARK_HIGH 1
63
64
65#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01
66#define PPSMC_STATEFLAG_POWERBOOST 0x02
67#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04
68#define PPSMC_STATEFLAG_POWERSHIFT 0x08
69#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10
70#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
71#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40
72
73
74#define FDO_MODE_HARDWARE 0
75#define FDO_MODE_PIECE_WISE_LINEAR 1
76
77enum FAN_CONTROL {
78 FAN_CONTROL_FUZZY,
79 FAN_CONTROL_TABLE
80};
81
82
83#define PPSMC_Result_OK ((uint16_t)0x01)
84#define PPSMC_Result_NoMore ((uint16_t)0x02)
85
86#define PPSMC_Result_NotNow ((uint16_t)0x03)
87#define PPSMC_Result_Failed ((uint16_t)0xFF)
88#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE)
89#define PPSMC_Result_UnknownVT ((uint16_t)0xFD)
90
91typedef uint16_t PPSMC_Result;
92
93#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x))
94
95
96#define PPSMC_MSG_Halt ((uint16_t)0x10)
97#define PPSMC_MSG_Resume ((uint16_t)0x11)
98#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12)
99#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13)
100#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14)
101#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15)
102#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16)
103#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17)
104#define PPSMC_MSG_LevelUp ((uint16_t)0x18)
105#define PPSMC_MSG_LevelDown ((uint16_t)0x19)
106#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a)
107#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20)
108#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f)
109#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40)
110#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41)
111#define PPSMC_MSG_ForceHigh ((uint16_t)0x42)
112#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43)
113#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51)
114#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52)
115#define PPSMC_MSG_EnableCac ((uint16_t)0x53)
116#define PPSMC_MSG_DisableCac ((uint16_t)0x54)
117#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55)
118#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56)
119#define PPSMC_CACHistoryStart ((uint16_t)0x57)
120#define PPSMC_CACHistoryStop ((uint16_t)0x58)
121#define PPSMC_TDPClampingActive ((uint16_t)0x59)
122#define PPSMC_TDPClampingInactive ((uint16_t)0x5A)
123#define PPSMC_StartFanControl ((uint16_t)0x5B)
124#define PPSMC_StopFanControl ((uint16_t)0x5C)
125#define PPSMC_NoDisplay ((uint16_t)0x5D)
126#define PPSMC_HasDisplay ((uint16_t)0x5E)
127#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60)
128#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61)
129#define PPSMC_MSG_EnableULV ((uint16_t)0x62)
130#define PPSMC_MSG_DisableULV ((uint16_t)0x63)
131#define PPSMC_MSG_EnterULV ((uint16_t)0x64)
132#define PPSMC_MSG_ExitULV ((uint16_t)0x65)
133#define PPSMC_PowerShiftActive ((uint16_t)0x6A)
134#define PPSMC_PowerShiftInactive ((uint16_t)0x6B)
135#define PPSMC_OCPActive ((uint16_t)0x6C)
136#define PPSMC_OCPInactive ((uint16_t)0x6D)
137#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E)
138#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F)
139#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70)
140#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71)
141#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72)
142#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73)
143#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74)
144#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75)
145#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76)
146#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77)
147#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78)
148#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79)
149#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A)
150#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B)
151#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C)
152#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D)
153
154#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E)
155#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F)
156#define PPSMC_FlushDataCache ((uint16_t)0x80)
157#define PPSMC_FlushInstrCache ((uint16_t)0x81)
158
159#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82)
160#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83)
161
162#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84)
163
164#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85)
165#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86)
166#define PPSMC_MSG_EnableDTE ((uint16_t)0x87)
167#define PPSMC_MSG_DisableDTE ((uint16_t)0x88)
168
169#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89)
170#define PPSM_MSG_SmcSpaceWriteDWordInc ((uint16_t)0x8A)
171#define PPSM_MSG_SmcSpaceWriteWordInc ((uint16_t)0x8B)
172#define PPSM_MSG_SmcSpaceWriteByteInc ((uint16_t)0x8C)
173
174#define PPSMC_MSG_BREAK ((uint16_t)0xF8)
175
176#define PPSMC_MSG_Test ((uint16_t) 0x100)
177#define PPSMC_MSG_DPM_Voltage_Pwrmgt ((uint16_t) 0x101)
178#define PPSMC_MSG_DPM_Config ((uint16_t) 0x102)
179#define PPSMC_MSG_PM_Controller_Start ((uint16_t) 0x103)
180#define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104)
181#define PPSMC_MSG_PG_PowerDownSIMD ((uint16_t) 0x105)
182#define PPSMC_MSG_PG_PowerUpSIMD ((uint16_t) 0x106)
183#define PPSMC_MSG_PM_Controller_Stop ((uint16_t) 0x107)
184#define PPSMC_MSG_PG_SIMD_Config ((uint16_t) 0x108)
185#define PPSMC_MSG_Voltage_Cntl_Enable ((uint16_t) 0x109)
186#define PPSMC_MSG_Thermal_Cntl_Enable ((uint16_t) 0x10a)
187#define PPSMC_MSG_Reset_Service ((uint16_t) 0x10b)
188#define PPSMC_MSG_VCEPowerOFF ((uint16_t) 0x10e)
189#define PPSMC_MSG_VCEPowerON ((uint16_t) 0x10f)
190#define PPSMC_MSG_DPM_Disable_VCE_HS ((uint16_t) 0x110)
191#define PPSMC_MSG_DPM_Enable_VCE_HS ((uint16_t) 0x111)
192#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint16_t) 0x112)
193#define PPSMC_MSG_DCEPowerOFF ((uint16_t) 0x113)
194#define PPSMC_MSG_DCEPowerON ((uint16_t) 0x114)
195#define PPSMC_MSG_PCIE_DDIPowerDown ((uint16_t) 0x117)
196#define PPSMC_MSG_PCIE_DDIPowerUp ((uint16_t) 0x118)
197#define PPSMC_MSG_PCIE_CascadePLLPowerDown ((uint16_t) 0x119)
198#define PPSMC_MSG_PCIE_CascadePLLPowerUp ((uint16_t) 0x11a)
199#define PPSMC_MSG_SYSPLLPowerOff ((uint16_t) 0x11b)
200#define PPSMC_MSG_SYSPLLPowerOn ((uint16_t) 0x11c)
201#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint16_t) 0x11d)
202#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint16_t) 0x11e)
203#define PPSMC_MSG_DISPLAYPHYStatusNotify ((uint16_t) 0x11f)
204#define PPSMC_MSG_EnableBAPM ((uint16_t) 0x120)
205#define PPSMC_MSG_DisableBAPM ((uint16_t) 0x121)
206#define PPSMC_MSG_Spmi_Enable ((uint16_t) 0x122)
207#define PPSMC_MSG_Spmi_Timer ((uint16_t) 0x123)
208#define PPSMC_MSG_LCLK_DPM_Config ((uint16_t) 0x124)
209#define PPSMC_MSG_VddNB_Request ((uint16_t) 0x125)
210#define PPSMC_MSG_PCIE_DDIPhyPowerDown ((uint32_t) 0x126)
211#define PPSMC_MSG_PCIE_DDIPhyPowerUp ((uint32_t) 0x127)
212#define PPSMC_MSG_MCLKDPM_Config ((uint16_t) 0x128)
213
214#define PPSMC_MSG_UVDDPM_Config ((uint16_t) 0x129)
215#define PPSMC_MSG_VCEDPM_Config ((uint16_t) 0x12A)
216#define PPSMC_MSG_ACPDPM_Config ((uint16_t) 0x12B)
217#define PPSMC_MSG_SAMUDPM_Config ((uint16_t) 0x12C)
218#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D)
219#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E)
220#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F)
221#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130)
222#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131)
223#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132)
224#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133)
225#define PPSMC_MSG_SetTDPLimit ((uint16_t) 0x134)
226#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135)
227#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136)
228#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137)
229#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138)
230#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139)
231#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a)
232#define PPSMC_MSG_SDMAPowerOFF ((uint16_t) 0x13b)
233#define PPSMC_MSG_SDMAPowerON ((uint16_t) 0x13c)
234#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
235#define PPSMC_MSG_IOMMUPowerOFF ((uint16_t) 0x13e)
236#define PPSMC_MSG_IOMMUPowerON ((uint16_t) 0x13f)
237#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140)
238#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141)
239#define PPSMC_MSG_NBDPM_ForceNominal ((uint16_t) 0x142)
240#define PPSMC_MSG_NBDPM_ForcePerformance ((uint16_t) 0x143)
241#define PPSMC_MSG_NBDPM_UnForce ((uint16_t) 0x144)
242#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145)
243#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146)
244#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147)
245#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148)
246#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149)
247#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a)
248#define PPSMC_MSG_SwitchToAC ((uint16_t) 0x14b)
249#define PPSMC_MSG_XDMAPowerOFF ((uint16_t) 0x14c)
250#define PPSMC_MSG_XDMAPowerON ((uint16_t) 0x14d)
251
252#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e)
253#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f)
254#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t) 0x150)
255#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t) 0x151)
256#define PPSMC_MSG_LCLKDPM_Enable ((uint16_t) 0x152)
257#define PPSMC_MSG_LCLKDPM_Disable ((uint16_t) 0x153)
258#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154)
259#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155)
260#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156)
261#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t) 0x157)
262#define PPSMC_MSG_ACPDPM_Enable ((uint16_t) 0x158)
263#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159)
264#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a)
265#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b)
266#define PPSMC_MSG_LCLKDPM_SetEnabledMask ((uint16_t) 0x15c)
267#define PPSMC_MSG_DPM_FPS_Mode ((uint16_t) 0x15d)
268#define PPSMC_MSG_DPM_Activity_Mode ((uint16_t) 0x15e)
269#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f)
270#define PPSMC_MSG_MCLKDPM_GetEnabledMask ((uint16_t) 0x160)
271#define PPSMC_MSG_LCLKDPM_GetEnabledMask ((uint16_t) 0x161)
272#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162)
273#define PPSMC_MSG_UVDDPM_GetEnabledMask ((uint16_t) 0x163)
274#define PPSMC_MSG_SAMUDPM_GetEnabledMask ((uint16_t) 0x164)
275#define PPSMC_MSG_ACPDPM_GetEnabledMask ((uint16_t) 0x165)
276#define PPSMC_MSG_VCEDPM_GetEnabledMask ((uint16_t) 0x166)
277#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167)
278#define PPSMC_MSG_PCIeDPM_GetEnabledMask ((uint16_t) 0x168)
279#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169)
280#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a)
281#define PPSMC_MSG_DPM_AutoRotate_Mode ((uint16_t) 0x16b)
282#define PPSMC_MSG_DISPCLK_FROM_FCH ((uint16_t) 0x16c)
283#define PPSMC_MSG_DISPCLK_FROM_DFS ((uint16_t) 0x16d)
284#define PPSMC_MSG_DPREFCLK_FROM_FCH ((uint16_t) 0x16e)
285#define PPSMC_MSG_DPREFCLK_FROM_DFS ((uint16_t) 0x16f)
286#define PPSMC_MSG_PmStatusLogStart ((uint16_t) 0x170)
287#define PPSMC_MSG_PmStatusLogSample ((uint16_t) 0x171)
288#define PPSMC_MSG_SCLK_AutoDPM_ON ((uint16_t) 0x172)
289#define PPSMC_MSG_MCLK_AutoDPM_ON ((uint16_t) 0x173)
290#define PPSMC_MSG_LCLK_AutoDPM_ON ((uint16_t) 0x174)
291#define PPSMC_MSG_UVD_AutoDPM_ON ((uint16_t) 0x175)
292#define PPSMC_MSG_SAMU_AutoDPM_ON ((uint16_t) 0x176)
293#define PPSMC_MSG_ACP_AutoDPM_ON ((uint16_t) 0x177)
294#define PPSMC_MSG_VCE_AutoDPM_ON ((uint16_t) 0x178)
295#define PPSMC_MSG_PCIe_AutoDPM_ON ((uint16_t) 0x179)
296#define PPSMC_MSG_MASTER_AutoDPM_ON ((uint16_t) 0x17a)
297#define PPSMC_MSG_MASTER_AutoDPM_OFF ((uint16_t) 0x17b)
298#define PPSMC_MSG_DYNAMICDISPPHYPOWER ((uint16_t) 0x17c)
299#define PPSMC_MSG_CAC_COLLECTION_ON ((uint16_t) 0x17d)
300#define PPSMC_MSG_CAC_COLLECTION_OFF ((uint16_t) 0x17e)
301#define PPSMC_MSG_CAC_CORRELATION_ON ((uint16_t) 0x17f)
302#define PPSMC_MSG_CAC_CORRELATION_OFF ((uint16_t) 0x180)
303#define PPSMC_MSG_PM_STATUS_TO_DRAM_ON ((uint16_t) 0x181)
304#define PPSMC_MSG_PM_STATUS_TO_DRAM_OFF ((uint16_t) 0x182)
305#define PPSMC_MSG_ALLOW_LOWSCLK_INTERRUPT ((uint16_t) 0x184)
306#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185)
307#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186)
308#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187)
309#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188)
310#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189)
311#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A)
312#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B)
313#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C)
314#define PPSMC_MSG_START_DRAM_LOGGING ((uint16_t) 0x18D)
315#define PPSMC_MSG_STOP_DRAM_LOGGING ((uint16_t) 0x18E)
316#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F)
317#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
318#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
319#define PPSMC_MSG_DisableACDCGPIOInterrupt ((uint16_t) 0x192)
320#define PPSMC_MSG_OverrideVoltageControl_SetVddc ((uint16_t) 0x193)
321#define PPSMC_MSG_OverrideVoltageControl_SetVddci ((uint16_t) 0x194)
322#define PPSMC_MSG_SetVidOffset_1 ((uint16_t) 0x195)
323#define PPSMC_MSG_SetVidOffset_2 ((uint16_t) 0x207)
324#define PPSMC_MSG_GetVidOffset_1 ((uint16_t) 0x196)
325#define PPSMC_MSG_GetVidOffset_2 ((uint16_t) 0x208)
326#define PPSMC_MSG_THERMAL_OVERDRIVE_Enable ((uint16_t) 0x197)
327#define PPSMC_MSG_THERMAL_OVERDRIVE_Disable ((uint16_t) 0x198)
328#define PPSMC_MSG_SetTjMax ((uint16_t) 0x199)
329#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A)
330#define PPSMC_MSG_WaitForMclkSwitchFinish ((uint16_t) 0x19B)
331#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C)
332#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D)
333
334#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200)
335#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201)
336#define PPSMC_MSG_API_GetSclkBusy ((uint16_t) 0x202)
337#define PPSMC_MSG_API_GetMclkBusy ((uint16_t) 0x203)
338#define PPSMC_MSG_API_GetAsicPower ((uint16_t) 0x204)
339#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205)
340#define PPSMC_MSG_SetFanSclkTarget ((uint16_t) 0x206)
341#define PPSMC_MSG_SetFanMinPwm ((uint16_t) 0x209)
342#define PPSMC_MSG_SetFanTemperatureTarget ((uint16_t) 0x20A)
343
344#define PPSMC_MSG_BACO_StartMonitor ((uint16_t) 0x240)
345#define PPSMC_MSG_BACO_Cancel ((uint16_t) 0x241)
346#define PPSMC_MSG_EnableVddGfx ((uint16_t) 0x242)
347#define PPSMC_MSG_DisableVddGfx ((uint16_t) 0x243)
348#define PPSMC_MSG_UcodeAddressLow ((uint16_t) 0x244)
349#define PPSMC_MSG_UcodeAddressHigh ((uint16_t) 0x245)
350#define PPSMC_MSG_UcodeLoadStatus ((uint16_t) 0x246)
351
352#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t) 0x250)
353#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t) 0x251)
354#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t) 0x252)
355#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t) 0x253)
356#define PPSMC_MSG_LoadUcodes ((uint16_t) 0x254)
357#define PPSMC_MSG_PowerStateNotify ((uint16_t) 0x255)
358#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_HI ((uint16_t) 0x256)
359#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_LO ((uint16_t) 0x257)
360#define PPSMC_MSG_VBIOS_DRAM_ADDR_HI ((uint16_t) 0x258)
361#define PPSMC_MSG_VBIOS_DRAM_ADDR_LO ((uint16_t) 0x259)
362#define PPSMC_MSG_LoadVBios ((uint16_t) 0x25A)
363#define PPSMC_MSG_GetUcodeVersion ((uint16_t) 0x25B)
364#define DMCUSMC_MSG_PSREntry ((uint16_t) 0x25C)
365#define DMCUSMC_MSG_PSRExit ((uint16_t) 0x25D)
366#define PPSMC_MSG_EnableClockGatingFeature ((uint16_t) 0x260)
367#define PPSMC_MSG_DisableClockGatingFeature ((uint16_t) 0x261)
368#define PPSMC_MSG_IsDeviceRunning ((uint16_t) 0x262)
369#define PPSMC_MSG_LoadMetaData ((uint16_t) 0x263)
370#define PPSMC_MSG_TMON_AutoCaliberate_Enable ((uint16_t) 0x264)
371#define PPSMC_MSG_TMON_AutoCaliberate_Disable ((uint16_t) 0x265)
372#define PPSMC_MSG_GetTelemetry1Slope ((uint16_t) 0x266)
373#define PPSMC_MSG_GetTelemetry1Offset ((uint16_t) 0x267)
374#define PPSMC_MSG_GetTelemetry2Slope ((uint16_t) 0x268)
375#define PPSMC_MSG_GetTelemetry2Offset ((uint16_t) 0x269)
376#define PPSMC_MSG_EnableAvfs ((uint16_t) 0x26A)
377#define PPSMC_MSG_DisableAvfs ((uint16_t) 0x26B)
378
379#define PPSMC_MSG_PerformBtc ((uint16_t) 0x26C)
380#define PPSMC_MSG_VftTableIsValid ((uint16_t) 0x275)
381#define PPSMC_MSG_UseNewGPIOScheme ((uint16_t) 0x277)
382#define PPSMC_MSG_GetEnabledPsm ((uint16_t) 0x400)
383#define PPSMC_MSG_AgmStartPsm ((uint16_t) 0x401)
384#define PPSMC_MSG_AgmReadPsm ((uint16_t) 0x402)
385#define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403)
386#define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404)
387
388#define PPSMC_MSG_GFX_CU_PG_ENABLE ((uint16_t) 0x280)
389#define PPSMC_MSG_GFX_CU_PG_DISABLE ((uint16_t) 0x281)
390#define PPSMC_MSG_GetCurrPkgPwr ((uint16_t) 0x282)
391
392#define PPSMC_MSG_SetGpuPllDfsForSclk ((uint16_t) 0x300)
393#define PPSMC_MSG_Didt_Block_Function ((uint16_t) 0x301)
394
395#define PPSMC_MSG_SetVBITimeout ((uint16_t) 0x306)
396
397#define PPSMC_MSG_SecureSRBMWrite ((uint16_t) 0x600)
398#define PPSMC_MSG_SecureSRBMRead ((uint16_t) 0x601)
399#define PPSMC_MSG_SetAddress ((uint16_t) 0x800)
400#define PPSMC_MSG_GetData ((uint16_t) 0x801)
401#define PPSMC_MSG_SetData ((uint16_t) 0x802)
402
403typedef uint16_t PPSMC_Msg;
404
405#define PPSMC_EVENT_STATUS_THERMAL 0x00000001
406#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002
407#define PPSMC_EVENT_STATUS_DC 0x00000004
408
409#pragma pack(pop)
410
411#endif
412
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index 34abfd2cde53..2139072065cc 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -28,6 +28,7 @@
28 28
29struct pp_smumgr; 29struct pp_smumgr;
30struct pp_instance; 30struct pp_instance;
31struct pp_hwmgr;
31 32
32#define smu_lower_32_bits(n) ((uint32_t)(n)) 33#define smu_lower_32_bits(n) ((uint32_t)(n))
33#define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16)) 34#define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16))
@@ -53,6 +54,45 @@ enum AVFS_BTC_STATUS {
53 AVFS_BTC_SMUMSG_ERROR 54 AVFS_BTC_SMUMSG_ERROR
54}; 55};
55 56
57enum SMU_TABLE {
58 SMU_UVD_TABLE = 0,
59 SMU_VCE_TABLE,
60 SMU_SAMU_TABLE,
61 SMU_BIF_TABLE,
62};
63
64enum SMU_TYPE {
65 SMU_SoftRegisters = 0,
66 SMU_Discrete_DpmTable,
67};
68
69enum SMU_MEMBER {
70 HandshakeDisables = 0,
71 VoltageChangeTimeout,
72 AverageGraphicsActivity,
73 PreVBlankGap,
74 VBlankTimeout,
75 UcodeLoadStatus,
76 UvdBootLevel,
77 VceBootLevel,
78 SamuBootLevel,
79 LowSclkInterruptThreshold,
80};
81
82
83enum SMU_MAC_DEFINITION {
84 SMU_MAX_LEVELS_GRAPHICS = 0,
85 SMU_MAX_LEVELS_MEMORY,
86 SMU_MAX_LEVELS_LINK,
87 SMU_MAX_ENTRIES_SMIO,
88 SMU_MAX_LEVELS_VDDC,
89 SMU_MAX_LEVELS_VDDGFX,
90 SMU_MAX_LEVELS_VDDCI,
91 SMU_MAX_LEVELS_MVDD,
92 SMU_UVD_MCLK_HANDSHAKE_DISABLE,
93};
94
95
56struct pp_smumgr_func { 96struct pp_smumgr_func {
57 int (*smu_init)(struct pp_smumgr *smumgr); 97 int (*smu_init)(struct pp_smumgr *smumgr);
58 int (*smu_fini)(struct pp_smumgr *smumgr); 98 int (*smu_fini)(struct pp_smumgr *smumgr);
@@ -69,6 +109,18 @@ struct pp_smumgr_func {
69 int (*download_pptable_settings)(struct pp_smumgr *smumgr, 109 int (*download_pptable_settings)(struct pp_smumgr *smumgr,
70 void **table); 110 void **table);
71 int (*upload_pptable_settings)(struct pp_smumgr *smumgr); 111 int (*upload_pptable_settings)(struct pp_smumgr *smumgr);
112 int (*update_smc_table)(struct pp_hwmgr *hwmgr, uint32_t type);
113 int (*process_firmware_header)(struct pp_hwmgr *hwmgr);
114 int (*update_sclk_threshold)(struct pp_hwmgr *hwmgr);
115 int (*thermal_setup_fan_table)(struct pp_hwmgr *hwmgr);
116 int (*thermal_avfs_enable)(struct pp_hwmgr *hwmgr);
117 int (*init_smc_table)(struct pp_hwmgr *hwmgr);
118 int (*populate_all_graphic_levels)(struct pp_hwmgr *hwmgr);
119 int (*populate_all_memory_levels)(struct pp_hwmgr *hwmgr);
120 int (*initialize_mc_reg_table)(struct pp_hwmgr *hwmgr);
121 uint32_t (*get_offsetof)(uint32_t type, uint32_t member);
122 uint32_t (*get_mac_definition)(uint32_t value);
123 bool (*is_dpm_running)(struct pp_hwmgr *hwmgr);
72}; 124};
73 125
74struct pp_smumgr { 126struct pp_smumgr {
@@ -127,6 +179,24 @@ extern int tonga_smum_init(struct pp_smumgr *smumgr);
127extern int fiji_smum_init(struct pp_smumgr *smumgr); 179extern int fiji_smum_init(struct pp_smumgr *smumgr);
128extern int polaris10_smum_init(struct pp_smumgr *smumgr); 180extern int polaris10_smum_init(struct pp_smumgr *smumgr);
129 181
182extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr);
183
184extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
185extern int smum_process_firmware_header(struct pp_hwmgr *hwmgr);
186extern int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
187 void *input, void *output, void *storage, int result);
188extern int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
189 void *input, void *output, void *storage, int result);
190extern int smum_init_smc_table(struct pp_hwmgr *hwmgr);
191extern int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
192extern int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
193extern int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
194extern uint32_t smum_get_offsetof(struct pp_smumgr *smumgr,
195 uint32_t type, uint32_t member);
196extern uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value);
197
198extern bool smum_is_dpm_running(struct pp_hwmgr *hwmgr);
199
130#define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT 200#define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
131 201
132#define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK 202#define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index 19e79469f6bc..51ff08301651 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -2,8 +2,9 @@
2# Makefile for the 'smu manager' sub-component of powerplay. 2# Makefile for the 'smu manager' sub-component of powerplay.
3# It provides the smu management services for the driver. 3# It provides the smu management services for the driver.
4 4
5SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o \ 5SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o fiji_smc.o \
6 polaris10_smumgr.o iceland_smumgr.o 6 polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o tonga_smc.o \
7 smu7_smumgr.o iceland_smc.o
7 8
8AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) 9AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
9 10
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
new file mode 100644
index 000000000000..76310ac7ef0d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
@@ -0,0 +1,2374 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "fiji_smc.h"
25#include "smu7_dyn_defaults.h"
26
27#include "smu7_hwmgr.h"
28#include "hardwaremanager.h"
29#include "ppatomctrl.h"
30#include "pp_debug.h"
31#include "cgs_common.h"
32#include "atombios.h"
33#include "fiji_smumgr.h"
34#include "pppcielanes.h"
35#include "smu7_ppsmc.h"
36#include "smu73.h"
37#include "smu/smu_7_1_3_d.h"
38#include "smu/smu_7_1_3_sh_mask.h"
39#include "gmc/gmc_8_1_d.h"
40#include "gmc/gmc_8_1_sh_mask.h"
41#include "bif/bif_5_0_d.h"
42#include "bif/bif_5_0_sh_mask.h"
43#include "dce/dce_10_0_d.h"
44#include "dce/dce_10_0_sh_mask.h"
45#include "smu7_smumgr.h"
46
47#define VOLTAGE_SCALE 4
48#define POWERTUNE_DEFAULT_SET_MAX 1
49#define VOLTAGE_VID_OFFSET_SCALE1 625
50#define VOLTAGE_VID_OFFSET_SCALE2 100
51#define VDDC_VDDCI_DELTA 300
52#define MC_CG_ARB_FREQ_F1 0x0b
53
54/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
55 * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
56 */
57static const uint16_t fiji_clock_stretcher_lookup_table[2][4] = {
58 {600, 1050, 3, 0}, {600, 1050, 6, 1} };
59
60/* [FF, SS] type, [] 4 voltage ranges, and
61 * [Floor Freq, Boundary Freq, VID min , VID max]
62 */
63static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] = {
64 { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
65 { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
66
67/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
68 * (coming from PWR_CKS_CNTL.stretch_amount reg spec)
69 */
70static const uint8_t fiji_clock_stretch_amount_conversion[2][6] = {
71 {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
72
73static const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
74 /*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
75 {1, 0xF, 0xFD,
76 /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
77 0x19, 5, 45}
78};
79
80/* PPGen has the gain setting generated in x * 100 unit
81 * This function is to convert the unit to x * 4096(0x1000) unit.
82 * This is the unit expected by SMC firmware
83 */
84static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
85 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
86 uint32_t clock, uint32_t *voltage, uint32_t *mvdd)
87{
88 uint32_t i;
89 uint16_t vddci;
90 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
91 *voltage = *mvdd = 0;
92
93
94 /* clock - voltage dependency table is empty table */
95 if (dep_table->count == 0)
96 return -EINVAL;
97
98 for (i = 0; i < dep_table->count; i++) {
99 /* find first sclk bigger than request */
100 if (dep_table->entries[i].clk >= clock) {
101 *voltage |= (dep_table->entries[i].vddc *
102 VOLTAGE_SCALE) << VDDC_SHIFT;
103 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
104 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
105 VOLTAGE_SCALE) << VDDCI_SHIFT;
106 else if (dep_table->entries[i].vddci)
107 *voltage |= (dep_table->entries[i].vddci *
108 VOLTAGE_SCALE) << VDDCI_SHIFT;
109 else {
110 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
111 (dep_table->entries[i].vddc -
112 VDDC_VDDCI_DELTA));
113 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
114 }
115
116 if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
117 *mvdd = data->vbios_boot_state.mvdd_bootup_value *
118 VOLTAGE_SCALE;
119 else if (dep_table->entries[i].mvdd)
120 *mvdd = (uint32_t) dep_table->entries[i].mvdd *
121 VOLTAGE_SCALE;
122
123 *voltage |= 1 << PHASES_SHIFT;
124 return 0;
125 }
126 }
127
128 /* sclk is bigger than max sclk in the dependence table */
129 *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
130
131 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
132 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
133 VOLTAGE_SCALE) << VDDCI_SHIFT;
134 else if (dep_table->entries[i-1].vddci) {
135 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
136 (dep_table->entries[i].vddc -
137 VDDC_VDDCI_DELTA));
138 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
139 }
140
141 if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
142 *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
143 else if (dep_table->entries[i].mvdd)
144 *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
145
146 return 0;
147}
148
149
150static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
151{
152 uint32_t tmp;
153 tmp = raw_setting * 4096 / 100;
154 return (uint16_t)tmp;
155}
156
157static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t *sda)
158{
159 switch (line) {
160 case SMU7_I2CLineID_DDC1:
161 *scl = SMU7_I2C_DDC1CLK;
162 *sda = SMU7_I2C_DDC1DATA;
163 break;
164 case SMU7_I2CLineID_DDC2:
165 *scl = SMU7_I2C_DDC2CLK;
166 *sda = SMU7_I2C_DDC2DATA;
167 break;
168 case SMU7_I2CLineID_DDC3:
169 *scl = SMU7_I2C_DDC3CLK;
170 *sda = SMU7_I2C_DDC3DATA;
171 break;
172 case SMU7_I2CLineID_DDC4:
173 *scl = SMU7_I2C_DDC4CLK;
174 *sda = SMU7_I2C_DDC4DATA;
175 break;
176 case SMU7_I2CLineID_DDC5:
177 *scl = SMU7_I2C_DDC5CLK;
178 *sda = SMU7_I2C_DDC5DATA;
179 break;
180 case SMU7_I2CLineID_DDC6:
181 *scl = SMU7_I2C_DDC6CLK;
182 *sda = SMU7_I2C_DDC6DATA;
183 break;
184 case SMU7_I2CLineID_SCLSDA:
185 *scl = SMU7_I2C_SCL;
186 *sda = SMU7_I2C_SDA;
187 break;
188 case SMU7_I2CLineID_DDCVGA:
189 *scl = SMU7_I2C_DDCVGACLK;
190 *sda = SMU7_I2C_DDCVGADATA;
191 break;
192 default:
193 *scl = 0;
194 *sda = 0;
195 break;
196 }
197}
198
199static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
200{
201 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
202 struct phm_ppt_v1_information *table_info =
203 (struct phm_ppt_v1_information *)(hwmgr->pptable);
204
205 if (table_info &&
206 table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
207 table_info->cac_dtp_table->usPowerTuneDataSetID)
208 smu_data->power_tune_defaults =
209 &fiji_power_tune_data_set_array
210 [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
211 else
212 smu_data->power_tune_defaults = &fiji_power_tune_data_set_array[0];
213
214}
215
216static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
217{
218
219 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
220 const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
221
222 SMU73_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
223
224 struct phm_ppt_v1_information *table_info =
225 (struct phm_ppt_v1_information *)(hwmgr->pptable);
226 struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
227 struct pp_advance_fan_control_parameters *fan_table =
228 &hwmgr->thermal_controller.advanceFanControlParameters;
229 uint8_t uc_scl, uc_sda;
230
231 /* TDP number of fraction bits are changed from 8 to 7 for Fiji
232 * as requested by SMC team
233 */
234 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
235 (uint16_t)(cac_dtp_table->usTDP * 128));
236 dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
237 (uint16_t)(cac_dtp_table->usTDP * 128));
238
239 PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
240 "Target Operating Temp is out of Range!",
241 );
242
243 dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
244 dpm_table->GpuTjHyst = 8;
245
246 dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase;
247
248 /* The following are for new Fiji Multi-input fan/thermal control */
249 dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
250 cac_dtp_table->usTargetOperatingTemp * 256);
251 dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
252 cac_dtp_table->usTemperatureLimitHotspot * 256);
253 dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US(
254 cac_dtp_table->usTemperatureLimitLiquid1 * 256);
255 dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US(
256 cac_dtp_table->usTemperatureLimitLiquid2 * 256);
257 dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US(
258 cac_dtp_table->usTemperatureLimitVrVddc * 256);
259 dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US(
260 cac_dtp_table->usTemperatureLimitVrMvdd * 256);
261 dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US(
262 cac_dtp_table->usTemperatureLimitPlx * 256);
263
264 dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
265 scale_fan_gain_settings(fan_table->usFanGainEdge));
266 dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
267 scale_fan_gain_settings(fan_table->usFanGainHotspot));
268 dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US(
269 scale_fan_gain_settings(fan_table->usFanGainLiquid));
270 dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US(
271 scale_fan_gain_settings(fan_table->usFanGainVrVddc));
272 dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US(
273 scale_fan_gain_settings(fan_table->usFanGainVrMvdd));
274 dpm_table->FanGainPlx = PP_HOST_TO_SMC_US(
275 scale_fan_gain_settings(fan_table->usFanGainPlx));
276 dpm_table->FanGainHbm = PP_HOST_TO_SMC_US(
277 scale_fan_gain_settings(fan_table->usFanGainHbm));
278
279 dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address;
280 dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address;
281 dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address;
282 dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address;
283
284 get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda);
285 dpm_table->Liquid_I2C_LineSCL = uc_scl;
286 dpm_table->Liquid_I2C_LineSDA = uc_sda;
287
288 get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda);
289 dpm_table->Vr_I2C_LineSCL = uc_scl;
290 dpm_table->Vr_I2C_LineSDA = uc_sda;
291
292 get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda);
293 dpm_table->Plx_I2C_LineSCL = uc_scl;
294 dpm_table->Plx_I2C_LineSDA = uc_sda;
295
296 return 0;
297}
298
299
300static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
301{
302 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
303 const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
304
305 smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
306 smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
307 smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
308 smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
309
310 return 0;
311}
312
313
314static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
315{
316 uint16_t tdc_limit;
317 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
318 struct phm_ppt_v1_information *table_info =
319 (struct phm_ppt_v1_information *)(hwmgr->pptable);
320 const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
321
322 /* TDC number of fraction bits are changed from 8 to 7
323 * for Fiji as requested by SMC team
324 */
325 tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
326 smu_data->power_tune_table.TDC_VDDC_PkgLimit =
327 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
328 smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
329 defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
330 smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
331
332 return 0;
333}
334
335static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
336{
337 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
338 const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
339 uint32_t temp;
340
341 if (smu7_read_smc_sram_dword(hwmgr->smumgr,
342 fuse_table_offset +
343 offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl),
344 (uint32_t *)&temp, SMC_RAM_END))
345 PP_ASSERT_WITH_CODE(false,
346 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
347 return -EINVAL);
348 else {
349 smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
350 smu_data->power_tune_table.LPMLTemperatureMin =
351 (uint8_t)((temp >> 16) & 0xff);
352 smu_data->power_tune_table.LPMLTemperatureMax =
353 (uint8_t)((temp >> 8) & 0xff);
354 smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
355 }
356 return 0;
357}
358
359static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
360{
361 int i;
362 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
363
364 /* Currently not used. Set all to zero. */
365 for (i = 0; i < 16; i++)
366 smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
367
368 return 0;
369}
370
371static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
372{
373 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
374
375 if ((hwmgr->thermal_controller.advanceFanControlParameters.
376 usFanOutputSensitivity & (1 << 15)) ||
377 0 == hwmgr->thermal_controller.advanceFanControlParameters.
378 usFanOutputSensitivity)
379 hwmgr->thermal_controller.advanceFanControlParameters.
380 usFanOutputSensitivity = hwmgr->thermal_controller.
381 advanceFanControlParameters.usDefaultFanOutputSensitivity;
382
383 smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
384 PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
385 advanceFanControlParameters.usFanOutputSensitivity);
386 return 0;
387}
388
389static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
390{
391 int i;
392 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
393
394 /* Currently not used. Set all to zero. */
395 for (i = 0; i < 16; i++)
396 smu_data->power_tune_table.GnbLPML[i] = 0;
397
398 return 0;
399}
400
401static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
402{
403 return 0;
404}
405
406static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
407{
408 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
409 struct phm_ppt_v1_information *table_info =
410 (struct phm_ppt_v1_information *)(hwmgr->pptable);
411 uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
412 uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
413 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
414
415 HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
416 LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
417
418 smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
419 CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
420 smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
421 CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
422
423 return 0;
424}
425
426static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
427{
428 uint32_t pm_fuse_table_offset;
429 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
430
431 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
432 PHM_PlatformCaps_PowerContainment)) {
433 if (smu7_read_smc_sram_dword(hwmgr->smumgr,
434 SMU7_FIRMWARE_HEADER_LOCATION +
435 offsetof(SMU73_Firmware_Header, PmFuseTable),
436 &pm_fuse_table_offset, SMC_RAM_END))
437 PP_ASSERT_WITH_CODE(false,
438 "Attempt to get pm_fuse_table_offset Failed!",
439 return -EINVAL);
440
441 /* DW6 */
442 if (fiji_populate_svi_load_line(hwmgr))
443 PP_ASSERT_WITH_CODE(false,
444 "Attempt to populate SviLoadLine Failed!",
445 return -EINVAL);
446 /* DW7 */
447 if (fiji_populate_tdc_limit(hwmgr))
448 PP_ASSERT_WITH_CODE(false,
449 "Attempt to populate TDCLimit Failed!", return -EINVAL);
450 /* DW8 */
451 if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset))
452 PP_ASSERT_WITH_CODE(false,
453 "Attempt to populate TdcWaterfallCtl, "
454 "LPMLTemperature Min and Max Failed!",
455 return -EINVAL);
456
457 /* DW9-DW12 */
458 if (0 != fiji_populate_temperature_scaler(hwmgr))
459 PP_ASSERT_WITH_CODE(false,
460 "Attempt to populate LPMLTemperatureScaler Failed!",
461 return -EINVAL);
462
463 /* DW13-DW14 */
464 if (fiji_populate_fuzzy_fan(hwmgr))
465 PP_ASSERT_WITH_CODE(false,
466 "Attempt to populate Fuzzy Fan Control parameters Failed!",
467 return -EINVAL);
468
469 /* DW15-DW18 */
470 if (fiji_populate_gnb_lpml(hwmgr))
471 PP_ASSERT_WITH_CODE(false,
472 "Attempt to populate GnbLPML Failed!",
473 return -EINVAL);
474
475 /* DW19 */
476 if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
477 PP_ASSERT_WITH_CODE(false,
478 "Attempt to populate GnbLPML Min and Max Vid Failed!",
479 return -EINVAL);
480
481 /* DW20 */
482 if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr))
483 PP_ASSERT_WITH_CODE(false,
484 "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
485 "Sidd Failed!", return -EINVAL);
486
487 if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
488 (uint8_t *)&smu_data->power_tune_table,
489 sizeof(struct SMU73_Discrete_PmFuses), SMC_RAM_END))
490 PP_ASSERT_WITH_CODE(false,
491 "Attempt to download PmFuseTable Failed!",
492 return -EINVAL);
493 }
494 return 0;
495}
496
497/**
498* Preparation of vddc and vddgfx CAC tables for SMC.
499*
500* @param hwmgr the address of the hardware manager
501* @param table the SMC DPM table structure to be populated
502* @return always 0
503*/
504static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
505 struct SMU73_Discrete_DpmTable *table)
506{
507 uint32_t count;
508 uint8_t index;
509 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
510 struct phm_ppt_v1_information *table_info =
511 (struct phm_ppt_v1_information *)(hwmgr->pptable);
512 struct phm_ppt_v1_voltage_lookup_table *lookup_table =
513 table_info->vddc_lookup_table;
514 /* tables is already swapped, so in order to use the value from it,
515 * we need to swap it back.
516 * We are populating vddc CAC data to BapmVddc table
517 * in split and merged mode
518 */
519
520 for (count = 0; count < lookup_table->count; count++) {
521 index = phm_get_voltage_index(lookup_table,
522 data->vddc_voltage_table.entries[count].value);
523 table->BapmVddcVidLoSidd[count] =
524 convert_to_vid(lookup_table->entries[index].us_cac_low);
525 table->BapmVddcVidHiSidd[count] =
526 convert_to_vid(lookup_table->entries[index].us_cac_high);
527 }
528
529 return 0;
530}
531
532/**
533* Preparation of voltage tables for SMC.
534*
535* @param hwmgr the address of the hardware manager
536* @param table the SMC DPM table structure to be populated
537* @return always 0
538*/
539
540static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
541 struct SMU73_Discrete_DpmTable *table)
542{
543 int result;
544
545 result = fiji_populate_cac_table(hwmgr, table);
546 PP_ASSERT_WITH_CODE(0 == result,
547 "can not populate CAC voltage tables to SMC",
548 return -EINVAL);
549
550 return 0;
551}
552
553static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr,
554 struct SMU73_Discrete_Ulv *state)
555{
556 int result = 0;
557
558 struct phm_ppt_v1_information *table_info =
559 (struct phm_ppt_v1_information *)(hwmgr->pptable);
560
561 state->CcPwrDynRm = 0;
562 state->CcPwrDynRm1 = 0;
563
564 state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
565 state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
566 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
567
568 state->VddcPhase = 1;
569
570 if (!result) {
571 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
572 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
573 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
574 }
575 return result;
576}
577
578static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr,
579 struct SMU73_Discrete_DpmTable *table)
580{
581 return fiji_populate_ulv_level(hwmgr, &table->Ulv);
582}
583
584static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr,
585 struct SMU73_Discrete_DpmTable *table)
586{
587 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
588 struct smu7_dpm_table *dpm_table = &data->dpm_table;
589 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
590 int i;
591
592 /* Index (dpm_table->pcie_speed_table.count)
593 * is reserved for PCIE boot level. */
594 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
595 table->LinkLevel[i].PcieGenSpeed =
596 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
597 table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
598 dpm_table->pcie_speed_table.dpm_levels[i].param1);
599 table->LinkLevel[i].EnabledForActivity = 1;
600 table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
601 table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
602 table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
603 }
604
605 smu_data->smc_state_table.LinkLevelCount =
606 (uint8_t)dpm_table->pcie_speed_table.count;
607 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
608 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
609
610 return 0;
611}
612
613
614/**
615* Calculates the SCLK dividers using the provided engine clock
616*
617* @param hwmgr the address of the hardware manager
618* @param clock the engine clock to use to populate the structure
619* @param sclk the SMC SCLK structure to be populated
620*/
621static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr,
622 uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk)
623{
624 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
625 struct pp_atomctrl_clock_dividers_vi dividers;
626 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
627 uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
628 uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
629 uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
630 uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
631 uint32_t ref_clock;
632 uint32_t ref_divider;
633 uint32_t fbdiv;
634 int result;
635
636 /* get the engine clock dividers for this clock value */
637 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, &dividers);
638
639 PP_ASSERT_WITH_CODE(result == 0,
640 "Error retrieving Engine Clock dividers from VBIOS.",
641 return result);
642
643 /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
644 ref_clock = atomctrl_get_reference_clock(hwmgr);
645 ref_divider = 1 + dividers.uc_pll_ref_div;
646
647 /* low 14 bits is fraction and high 12 bits is divider */
648 fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
649
650 /* SPLL_FUNC_CNTL setup */
651 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
652 SPLL_REF_DIV, dividers.uc_pll_ref_div);
653 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
654 SPLL_PDIV_A, dividers.uc_pll_post_div);
655
656 /* SPLL_FUNC_CNTL_3 setup*/
657 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
658 SPLL_FB_DIV, fbdiv);
659
660 /* set to use fractional accumulation*/
661 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
662 SPLL_DITHEN, 1);
663
664 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
665 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
666 struct pp_atomctrl_internal_ss_info ssInfo;
667
668 uint32_t vco_freq = clock * dividers.uc_pll_post_div;
669 if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
670 vco_freq, &ssInfo)) {
671 /*
672 * ss_info.speed_spectrum_percentage -- in unit of 0.01%
673 * ss_info.speed_spectrum_rate -- in unit of khz
674 *
675 * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2
676 */
677 uint32_t clk_s = ref_clock * 5 /
678 (ref_divider * ssInfo.speed_spectrum_rate);
679 /* clkv = 2 * D * fbdiv / NS */
680 uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage *
681 fbdiv / (clk_s * 10000);
682
683 cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
684 CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
685 cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
686 CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
687 cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
688 CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
689 }
690 }
691
692 sclk->SclkFrequency = clock;
693 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
694 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
695 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
696 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
697 sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
698
699 return 0;
700}
701
702/**
703* Populates single SMC SCLK structure using the provided engine clock
704*
705* @param hwmgr the address of the hardware manager
706* @param clock the engine clock to use to populate the structure
707* @param sclk the SMC SCLK structure to be populated
708*/
709
710static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
711 uint32_t clock, uint16_t sclk_al_threshold,
712 struct SMU73_Discrete_GraphicsLevel *level)
713{
714 int result;
715 /* PP_Clocks minClocks; */
716 uint32_t threshold, mvdd;
717 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
718 struct phm_ppt_v1_information *table_info =
719 (struct phm_ppt_v1_information *)(hwmgr->pptable);
720
721 result = fiji_calculate_sclk_params(hwmgr, clock, level);
722
723 /* populate graphics levels */
724 result = fiji_get_dependency_volt_by_clk(hwmgr,
725 table_info->vdd_dep_on_sclk, clock,
726 (uint32_t *)(&level->MinVoltage), &mvdd);
727 PP_ASSERT_WITH_CODE((0 == result),
728 "can not find VDDC voltage value for "
729 "VDDC engine clock dependency table",
730 return result);
731
732 level->SclkFrequency = clock;
733 level->ActivityLevel = sclk_al_threshold;
734 level->CcPwrDynRm = 0;
735 level->CcPwrDynRm1 = 0;
736 level->EnabledForActivity = 0;
737 level->EnabledForThrottle = 1;
738 level->UpHyst = 10;
739 level->DownHyst = 0;
740 level->VoltageDownHyst = 0;
741 level->PowerThrottle = 0;
742
743 threshold = clock * data->fast_watermark_threshold / 100;
744
745 data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
746
747 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
748 level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
749 hwmgr->display_config.min_core_set_clock_in_sr);
750
751
752 /* Default to slow, highest DPM level will be
753 * set to PPSMC_DISPLAY_WATERMARK_LOW later.
754 */
755 level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
756
757 CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
758 CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
759 CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
760 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
761 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
762 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
763 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
764 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
765 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
766
767 return 0;
768}
769/**
770* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
771*
772* @param hwmgr the address of the hardware manager
773*/
774int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
775{
776 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
777 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
778
779 struct smu7_dpm_table *dpm_table = &data->dpm_table;
780 struct phm_ppt_v1_information *table_info =
781 (struct phm_ppt_v1_information *)(hwmgr->pptable);
782 struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
783 uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
784 int result = 0;
785 uint32_t array = smu_data->smu7_data.dpm_table_start +
786 offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
787 uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
788 SMU73_MAX_LEVELS_GRAPHICS;
789 struct SMU73_Discrete_GraphicsLevel *levels =
790 smu_data->smc_state_table.GraphicsLevel;
791 uint32_t i, max_entry;
792 uint8_t hightest_pcie_level_enabled = 0,
793 lowest_pcie_level_enabled = 0,
794 mid_pcie_level_enabled = 0,
795 count = 0;
796
797 for (i = 0; i < dpm_table->sclk_table.count; i++) {
798 result = fiji_populate_single_graphic_level(hwmgr,
799 dpm_table->sclk_table.dpm_levels[i].value,
800 (uint16_t)smu_data->activity_target[i],
801 &levels[i]);
802 if (result)
803 return result;
804
805 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
806 if (i > 1)
807 levels[i].DeepSleepDivId = 0;
808 }
809
810 /* Only enable level 0 for now.*/
811 levels[0].EnabledForActivity = 1;
812
813 /* set highest level watermark to high */
814 levels[dpm_table->sclk_table.count - 1].DisplayWatermark =
815 PPSMC_DISPLAY_WATERMARK_HIGH;
816
817 smu_data->smc_state_table.GraphicsDpmLevelCount =
818 (uint8_t)dpm_table->sclk_table.count;
819 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
820 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
821
822 if (pcie_table != NULL) {
823 PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
824 "There must be 1 or more PCIE levels defined in PPTable.",
825 return -EINVAL);
826 max_entry = pcie_entry_cnt - 1;
827 for (i = 0; i < dpm_table->sclk_table.count; i++)
828 levels[i].pcieDpmLevel =
829 (uint8_t) ((i < max_entry) ? i : max_entry);
830 } else {
831 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
832 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
833 (1 << (hightest_pcie_level_enabled + 1))) != 0))
834 hightest_pcie_level_enabled++;
835
836 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
837 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
838 (1 << lowest_pcie_level_enabled)) == 0))
839 lowest_pcie_level_enabled++;
840
841 while ((count < hightest_pcie_level_enabled) &&
842 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
843 (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
844 count++;
845
846 mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
847 hightest_pcie_level_enabled ?
848 (lowest_pcie_level_enabled + 1 + count) :
849 hightest_pcie_level_enabled;
850
851 /* set pcieDpmLevel to hightest_pcie_level_enabled */
852 for (i = 2; i < dpm_table->sclk_table.count; i++)
853 levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
854
855 /* set pcieDpmLevel to lowest_pcie_level_enabled */
856 levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
857
858 /* set pcieDpmLevel to mid_pcie_level_enabled */
859 levels[1].pcieDpmLevel = mid_pcie_level_enabled;
860 }
861 /* level count will send to smc once at init smc table and never change */
862 result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
863 (uint32_t)array_size, SMC_RAM_END);
864
865 return result;
866}
867
868
869/**
870 * MCLK Frequency Ratio
871 * SEQ_CG_RESP Bit[31:24] - 0x0
872 * Bit[27:24] \96 DDR3 Frequency ratio
873 * 0x0 <= 100MHz, 450 < 0x8 <= 500MHz
874 * 100 < 0x1 <= 150MHz, 500 < 0x9 <= 550MHz
875 * 150 < 0x2 <= 200MHz, 550 < 0xA <= 600MHz
876 * 200 < 0x3 <= 250MHz, 600 < 0xB <= 650MHz
877 * 250 < 0x4 <= 300MHz, 650 < 0xC <= 700MHz
878 * 300 < 0x5 <= 350MHz, 700 < 0xD <= 750MHz
879 * 350 < 0x6 <= 400MHz, 750 < 0xE <= 800MHz
880 * 400 < 0x7 <= 450MHz, 800 < 0xF
881 */
882static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock)
883{
884 if (mem_clock <= 10000)
885 return 0x0;
886 if (mem_clock <= 15000)
887 return 0x1;
888 if (mem_clock <= 20000)
889 return 0x2;
890 if (mem_clock <= 25000)
891 return 0x3;
892 if (mem_clock <= 30000)
893 return 0x4;
894 if (mem_clock <= 35000)
895 return 0x5;
896 if (mem_clock <= 40000)
897 return 0x6;
898 if (mem_clock <= 45000)
899 return 0x7;
900 if (mem_clock <= 50000)
901 return 0x8;
902 if (mem_clock <= 55000)
903 return 0x9;
904 if (mem_clock <= 60000)
905 return 0xa;
906 if (mem_clock <= 65000)
907 return 0xb;
908 if (mem_clock <= 70000)
909 return 0xc;
910 if (mem_clock <= 75000)
911 return 0xd;
912 if (mem_clock <= 80000)
913 return 0xe;
914 /* mem_clock > 800MHz */
915 return 0xf;
916}
917
918/**
919* Populates the SMC MCLK structure using the provided memory clock
920*
921* @param hwmgr the address of the hardware manager
922* @param clock the memory clock to use to populate the structure
923* @param sclk the SMC SCLK structure to be populated
924*/
925static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr,
926 uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk)
927{
928 struct pp_atomctrl_memory_clock_param mem_param;
929 int result;
930
931 result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param);
932 PP_ASSERT_WITH_CODE((0 == result),
933 "Failed to get Memory PLL Dividers.",
934 );
935
936 /* Save the result data to outpupt memory level structure */
937 mclk->MclkFrequency = clock;
938 mclk->MclkDivider = (uint8_t)mem_param.mpll_post_divider;
939 mclk->FreqRange = fiji_get_mclk_frequency_ratio(clock);
940
941 return result;
942}
943
944static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
945 uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level)
946{
947 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
948 struct phm_ppt_v1_information *table_info =
949 (struct phm_ppt_v1_information *)(hwmgr->pptable);
950 int result = 0;
951 uint32_t mclk_stutter_mode_threshold = 60000;
952
953 if (table_info->vdd_dep_on_mclk) {
954 result = fiji_get_dependency_volt_by_clk(hwmgr,
955 table_info->vdd_dep_on_mclk, clock,
956 (uint32_t *)(&mem_level->MinVoltage), &mem_level->MinMvdd);
957 PP_ASSERT_WITH_CODE((0 == result),
958 "can not find MinVddc voltage value from memory "
959 "VDDC voltage dependency table", return result);
960 }
961
962 mem_level->EnabledForThrottle = 1;
963 mem_level->EnabledForActivity = 0;
964 mem_level->UpHyst = 0;
965 mem_level->DownHyst = 100;
966 mem_level->VoltageDownHyst = 0;
967 mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
968 mem_level->StutterEnable = false;
969
970 mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
971
972 /* enable stutter mode if all the follow condition applied
973 * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI,
974 * &(data->DisplayTiming.numExistingDisplays));
975 */
976 data->display_timing.num_existing_displays = 1;
977
978 if (mclk_stutter_mode_threshold &&
979 (clock <= mclk_stutter_mode_threshold) &&
980 (!data->is_uvd_enabled) &&
981 (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
982 STUTTER_ENABLE) & 0x1))
983 mem_level->StutterEnable = true;
984
985 result = fiji_calculate_mclk_params(hwmgr, clock, mem_level);
986 if (!result) {
987 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
988 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
989 CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
990 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
991 }
992 return result;
993}
994
995/**
996* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
997*
998* @param hwmgr the address of the hardware manager
999*/
1000int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1001{
1002 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1003 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
1004 struct smu7_dpm_table *dpm_table = &data->dpm_table;
1005 int result;
1006 /* populate MCLK dpm table to SMU7 */
1007 uint32_t array = smu_data->smu7_data.dpm_table_start +
1008 offsetof(SMU73_Discrete_DpmTable, MemoryLevel);
1009 uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) *
1010 SMU73_MAX_LEVELS_MEMORY;
1011 struct SMU73_Discrete_MemoryLevel *levels =
1012 smu_data->smc_state_table.MemoryLevel;
1013 uint32_t i;
1014
1015 for (i = 0; i < dpm_table->mclk_table.count; i++) {
1016 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
1017 "can not populate memory level as memory clock is zero",
1018 return -EINVAL);
1019 result = fiji_populate_single_memory_level(hwmgr,
1020 dpm_table->mclk_table.dpm_levels[i].value,
1021 &levels[i]);
1022 if (result)
1023 return result;
1024 }
1025
1026 /* Only enable level 0 for now. */
1027 levels[0].EnabledForActivity = 1;
1028
1029 /* in order to prevent MC activity from stutter mode to push DPM up.
1030 * the UVD change complements this by putting the MCLK in
1031 * a higher state by default such that we are not effected by
1032 * up threshold or and MCLK DPM latency.
1033 */
1034 levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
1035 CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
1036
1037 smu_data->smc_state_table.MemoryDpmLevelCount =
1038 (uint8_t)dpm_table->mclk_table.count;
1039 data->dpm_level_enable_mask.mclk_dpm_enable_mask =
1040 phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1041 /* set highest level watermark to high */
1042 levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
1043 PPSMC_DISPLAY_WATERMARK_HIGH;
1044
1045 /* level count will send to smc once at init smc table and never change */
1046 result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
1047 (uint32_t)array_size, SMC_RAM_END);
1048
1049 return result;
1050}
1051
1052
1053/**
1054* Populates the SMC MVDD structure using the provided memory clock.
1055*
1056* @param hwmgr the address of the hardware manager
1057* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
1058* @param voltage the SMC VOLTAGE structure to be populated
1059*/
1060static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
1061 uint32_t mclk, SMIO_Pattern *smio_pat)
1062{
1063 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1064 struct phm_ppt_v1_information *table_info =
1065 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1066 uint32_t i = 0;
1067
1068 if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
1069 /* find mvdd value which clock is more than request */
1070 for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
1071 if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
1072 smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
1073 break;
1074 }
1075 }
1076 PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
1077 "MVDD Voltage is outside the supported range.",
1078 return -EINVAL);
1079 } else
1080 return -EINVAL;
1081
1082 return 0;
1083}
1084
1085static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1086 SMU73_Discrete_DpmTable *table)
1087{
1088 int result = 0;
1089 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1090 struct phm_ppt_v1_information *table_info =
1091 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1092 struct pp_atomctrl_clock_dividers_vi dividers;
1093 SMIO_Pattern vol_level;
1094 uint32_t mvdd;
1095 uint16_t us_mvdd;
1096 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
1097 uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
1098
1099 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1100
1101 if (!data->sclk_dpm_key_disabled) {
1102 /* Get MinVoltage and Frequency from DPM0,
1103 * already converted to SMC_UL */
1104 table->ACPILevel.SclkFrequency =
1105 data->dpm_table.sclk_table.dpm_levels[0].value;
1106 result = fiji_get_dependency_volt_by_clk(hwmgr,
1107 table_info->vdd_dep_on_sclk,
1108 table->ACPILevel.SclkFrequency,
1109 (uint32_t *)(&table->ACPILevel.MinVoltage), &mvdd);
1110 PP_ASSERT_WITH_CODE((0 == result),
1111 "Cannot find ACPI VDDC voltage value " \
1112 "in Clock Dependency Table",
1113 );
1114 } else {
1115 table->ACPILevel.SclkFrequency =
1116 data->vbios_boot_state.sclk_bootup_value;
1117 table->ACPILevel.MinVoltage =
1118 data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
1119 }
1120
1121 /* get the engine clock dividers for this clock value */
1122 result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
1123 table->ACPILevel.SclkFrequency, &dividers);
1124 PP_ASSERT_WITH_CODE(result == 0,
1125 "Error retrieving Engine Clock dividers from VBIOS.",
1126 return result);
1127
1128 table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
1129 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1130 table->ACPILevel.DeepSleepDivId = 0;
1131
1132 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
1133 SPLL_PWRON, 0);
1134 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
1135 SPLL_RESET, 1);
1136 spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
1137 SCLK_MUX_SEL, 4);
1138
1139 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
1140 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
1141 table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
1142 table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
1143 table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
1144 table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
1145 table->ACPILevel.CcPwrDynRm = 0;
1146 table->ACPILevel.CcPwrDynRm1 = 0;
1147
1148 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1149 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
1150 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
1151 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
1152 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
1153 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
1154 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
1155 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
1156 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
1157 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1158 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1159
1160 if (!data->mclk_dpm_key_disabled) {
1161 /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
1162 table->MemoryACPILevel.MclkFrequency =
1163 data->dpm_table.mclk_table.dpm_levels[0].value;
1164 result = fiji_get_dependency_volt_by_clk(hwmgr,
1165 table_info->vdd_dep_on_mclk,
1166 table->MemoryACPILevel.MclkFrequency,
1167 (uint32_t *)(&table->MemoryACPILevel.MinVoltage), &mvdd);
1168 PP_ASSERT_WITH_CODE((0 == result),
1169 "Cannot find ACPI VDDCI voltage value in Clock Dependency Table",
1170 );
1171 } else {
1172 table->MemoryACPILevel.MclkFrequency =
1173 data->vbios_boot_state.mclk_bootup_value;
1174 table->MemoryACPILevel.MinVoltage =
1175 data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
1176 }
1177
1178 us_mvdd = 0;
1179 if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
1180 (data->mclk_dpm_key_disabled))
1181 us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
1182 else {
1183 if (!fiji_populate_mvdd_value(hwmgr,
1184 data->dpm_table.mclk_table.dpm_levels[0].value,
1185 &vol_level))
1186 us_mvdd = vol_level.Voltage;
1187 }
1188
1189 table->MemoryACPILevel.MinMvdd =
1190 PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE);
1191
1192 table->MemoryACPILevel.EnabledForThrottle = 0;
1193 table->MemoryACPILevel.EnabledForActivity = 0;
1194 table->MemoryACPILevel.UpHyst = 0;
1195 table->MemoryACPILevel.DownHyst = 100;
1196 table->MemoryACPILevel.VoltageDownHyst = 0;
1197 table->MemoryACPILevel.ActivityLevel =
1198 PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
1199
1200 table->MemoryACPILevel.StutterEnable = false;
1201 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
1202 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
1203
1204 return result;
1205}
1206
1207static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1208 SMU73_Discrete_DpmTable *table)
1209{
1210 int result = -EINVAL;
1211 uint8_t count;
1212 struct pp_atomctrl_clock_dividers_vi dividers;
1213 struct phm_ppt_v1_information *table_info =
1214 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1215 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1216 table_info->mm_dep_table;
1217
1218 table->VceLevelCount = (uint8_t)(mm_table->count);
1219 table->VceBootLevel = 0;
1220
1221 for (count = 0; count < table->VceLevelCount; count++) {
1222 table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
1223 table->VceLevel[count].MinVoltage = 0;
1224 table->VceLevel[count].MinVoltage |=
1225 (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
1226 table->VceLevel[count].MinVoltage |=
1227 ((mm_table->entries[count].vddc - VDDC_VDDCI_DELTA) *
1228 VOLTAGE_SCALE) << VDDCI_SHIFT;
1229 table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1230
1231 /*retrieve divider value for VBIOS */
1232 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1233 table->VceLevel[count].Frequency, &dividers);
1234 PP_ASSERT_WITH_CODE((0 == result),
1235 "can not find divide id for VCE engine clock",
1236 return result);
1237
1238 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1239
1240 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1241 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
1242 }
1243 return result;
1244}
1245
1246static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
1247 SMU73_Discrete_DpmTable *table)
1248{
1249 int result = -EINVAL;
1250 uint8_t count;
1251 struct pp_atomctrl_clock_dividers_vi dividers;
1252 struct phm_ppt_v1_information *table_info =
1253 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1254 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1255 table_info->mm_dep_table;
1256
1257 table->AcpLevelCount = (uint8_t)(mm_table->count);
1258 table->AcpBootLevel = 0;
1259
1260 for (count = 0; count < table->AcpLevelCount; count++) {
1261 table->AcpLevel[count].Frequency = mm_table->entries[count].aclk;
1262 table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1263 VOLTAGE_SCALE) << VDDC_SHIFT;
1264 table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
1265 VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
1266 table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1267
1268 /* retrieve divider value for VBIOS */
1269 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1270 table->AcpLevel[count].Frequency, &dividers);
1271 PP_ASSERT_WITH_CODE((0 == result),
1272 "can not find divide id for engine clock", return result);
1273
1274 table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1275
1276 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
1277 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage);
1278 }
1279 return result;
1280}
1281
1282static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1283 SMU73_Discrete_DpmTable *table)
1284{
1285 int result = -EINVAL;
1286 uint8_t count;
1287 struct pp_atomctrl_clock_dividers_vi dividers;
1288 struct phm_ppt_v1_information *table_info =
1289 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1290 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1291 table_info->mm_dep_table;
1292
1293 table->SamuBootLevel = 0;
1294 table->SamuLevelCount = (uint8_t)(mm_table->count);
1295
1296 for (count = 0; count < table->SamuLevelCount; count++) {
1297 /* not sure whether we need evclk or not */
1298 table->SamuLevel[count].MinVoltage = 0;
1299 table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
1300 table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1301 VOLTAGE_SCALE) << VDDC_SHIFT;
1302 table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
1303 VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
1304 table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1305
1306 /* retrieve divider value for VBIOS */
1307 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1308 table->SamuLevel[count].Frequency, &dividers);
1309 PP_ASSERT_WITH_CODE((0 == result),
1310 "can not find divide id for samu clock", return result);
1311
1312 table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1313
1314 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
1315 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
1316 }
1317 return result;
1318}
1319
1320static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
1321 int32_t eng_clock, int32_t mem_clock,
1322 struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
1323{
1324 uint32_t dram_timing;
1325 uint32_t dram_timing2;
1326 uint32_t burstTime;
1327 ULONG state, trrds, trrdl;
1328 int result;
1329
1330 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1331 eng_clock, mem_clock);
1332 PP_ASSERT_WITH_CODE(result == 0,
1333 "Error calling VBIOS to set DRAM_TIMING.", return result);
1334
1335 dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1336 dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1337 burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
1338
1339 state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
1340 trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
1341 trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
1342
1343 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
1344 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
1345 arb_regs->McArbBurstTime = (uint8_t)burstTime;
1346 arb_regs->TRRDS = (uint8_t)trrds;
1347 arb_regs->TRRDL = (uint8_t)trrdl;
1348
1349 return 0;
1350}
1351
1352static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1353{
1354 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1355 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
1356 struct SMU73_Discrete_MCArbDramTimingTable arb_regs;
1357 uint32_t i, j;
1358 int result = 0;
1359
1360 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1361 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1362 result = fiji_populate_memory_timing_parameters(hwmgr,
1363 data->dpm_table.sclk_table.dpm_levels[i].value,
1364 data->dpm_table.mclk_table.dpm_levels[j].value,
1365 &arb_regs.entries[i][j]);
1366 if (result)
1367 break;
1368 }
1369 }
1370
1371 if (!result)
1372 result = smu7_copy_bytes_to_smc(
1373 hwmgr->smumgr,
1374 smu_data->smu7_data.arb_table_start,
1375 (uint8_t *)&arb_regs,
1376 sizeof(SMU73_Discrete_MCArbDramTimingTable),
1377 SMC_RAM_END);
1378 return result;
1379}
1380
1381static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1382 struct SMU73_Discrete_DpmTable *table)
1383{
1384 int result = -EINVAL;
1385 uint8_t count;
1386 struct pp_atomctrl_clock_dividers_vi dividers;
1387 struct phm_ppt_v1_information *table_info =
1388 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1389 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1390 table_info->mm_dep_table;
1391
1392 table->UvdLevelCount = (uint8_t)(mm_table->count);
1393 table->UvdBootLevel = 0;
1394
1395 for (count = 0; count < table->UvdLevelCount; count++) {
1396 table->UvdLevel[count].MinVoltage = 0;
1397 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
1398 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
1399 table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1400 VOLTAGE_SCALE) << VDDC_SHIFT;
1401 table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
1402 VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
1403 table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1404
1405 /* retrieve divider value for VBIOS */
1406 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1407 table->UvdLevel[count].VclkFrequency, &dividers);
1408 PP_ASSERT_WITH_CODE((0 == result),
1409 "can not find divide id for Vclk clock", return result);
1410
1411 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1412
1413 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1414 table->UvdLevel[count].DclkFrequency, &dividers);
1415 PP_ASSERT_WITH_CODE((0 == result),
1416 "can not find divide id for Dclk clock", return result);
1417
1418 table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
1419
1420 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1421 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1422 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
1423
1424 }
1425 return result;
1426}
1427
1428static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1429 struct SMU73_Discrete_DpmTable *table)
1430{
1431 int result = 0;
1432 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1433
1434 table->GraphicsBootLevel = 0;
1435 table->MemoryBootLevel = 0;
1436
1437 /* find boot level from dpm table */
1438 result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1439 data->vbios_boot_state.sclk_bootup_value,
1440 (uint32_t *)&(table->GraphicsBootLevel));
1441
1442 result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1443 data->vbios_boot_state.mclk_bootup_value,
1444 (uint32_t *)&(table->MemoryBootLevel));
1445
1446 table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
1447 VOLTAGE_SCALE;
1448 table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
1449 VOLTAGE_SCALE;
1450 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
1451 VOLTAGE_SCALE;
1452
1453 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
1454 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
1455 CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
1456
1457 return 0;
1458}
1459
1460static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
1461{
1462 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1463 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
1464 struct phm_ppt_v1_information *table_info =
1465 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1466 uint8_t count, level;
1467
1468 count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
1469 for (level = 0; level < count; level++) {
1470 if (table_info->vdd_dep_on_sclk->entries[level].clk >=
1471 data->vbios_boot_state.sclk_bootup_value) {
1472 smu_data->smc_state_table.GraphicsBootLevel = level;
1473 break;
1474 }
1475 }
1476
1477 count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
1478 for (level = 0; level < count; level++) {
1479 if (table_info->vdd_dep_on_mclk->entries[level].clk >=
1480 data->vbios_boot_state.mclk_bootup_value) {
1481 smu_data->smc_state_table.MemoryBootLevel = level;
1482 break;
1483 }
1484 }
1485
1486 return 0;
1487}
1488
1489static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1490{
1491 uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
1492 volt_with_cks, value;
1493 uint16_t clock_freq_u16;
1494 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
1495 uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
1496 volt_offset = 0;
1497 struct phm_ppt_v1_information *table_info =
1498 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1499 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1500 table_info->vdd_dep_on_sclk;
1501
1502 stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
1503
1504 /* Read SMU_Eefuse to read and calculate RO and determine
1505 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
1506 */
1507 efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1508 ixSMU_EFUSE_0 + (146 * 4));
1509 efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1510 ixSMU_EFUSE_0 + (148 * 4));
1511 efuse &= 0xFF000000;
1512 efuse = efuse >> 24;
1513 efuse2 &= 0xF;
1514
1515 if (efuse2 == 1)
1516 ro = (2300 - 1350) * efuse / 255 + 1350;
1517 else
1518 ro = (2500 - 1000) * efuse / 255 + 1000;
1519
1520 if (ro >= 1660)
1521 type = 0;
1522 else
1523 type = 1;
1524
1525 /* Populate Stretch amount */
1526 smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
1527
1528 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
1529 for (i = 0; i < sclk_table->count; i++) {
1530 smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
1531 sclk_table->entries[i].cks_enable << i;
1532 volt_without_cks = (uint32_t)((14041 *
1533 (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
1534 (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
1535 volt_with_cks = (uint32_t)((13946 *
1536 (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
1537 (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
1538 if (volt_without_cks >= volt_with_cks)
1539 volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
1540 sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
1541 smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
1542 }
1543
1544 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1545 STRETCH_ENABLE, 0x0);
1546 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1547 masterReset, 0x1);
1548 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1549 staticEnable, 0x1);
1550 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1551 masterReset, 0x0);
1552
1553 /* Populate CKS Lookup Table */
1554 if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
1555 stretch_amount2 = 0;
1556 else if (stretch_amount == 3 || stretch_amount == 4)
1557 stretch_amount2 = 1;
1558 else {
1559 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1560 PHM_PlatformCaps_ClockStretcher);
1561 PP_ASSERT_WITH_CODE(false,
1562 "Stretch Amount in PPTable not supported\n",
1563 return -EINVAL);
1564 }
1565
1566 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1567 ixPWR_CKS_CNTL);
1568 value &= 0xFFC2FF87;
1569 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
1570 fiji_clock_stretcher_lookup_table[stretch_amount2][0];
1571 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
1572 fiji_clock_stretcher_lookup_table[stretch_amount2][1];
1573 clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
1574 GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
1575 SclkFrequency) / 100);
1576 if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] <
1577 clock_freq_u16 &&
1578 fiji_clock_stretcher_lookup_table[stretch_amount2][1] >
1579 clock_freq_u16) {
1580 /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
1581 value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
1582 /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
1583 value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
1584 /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
1585 value |= (fiji_clock_stretch_amount_conversion
1586 [fiji_clock_stretcher_lookup_table[stretch_amount2][3]]
1587 [stretch_amount]) << 3;
1588 }
1589 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
1590 CKS_LOOKUPTableEntry[0].minFreq);
1591 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
1592 CKS_LOOKUPTableEntry[0].maxFreq);
1593 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
1594 fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
1595 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
1596 (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
1597
1598 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1599 ixPWR_CKS_CNTL, value);
1600
1601 /* Populate DDT Lookup Table */
1602 for (i = 0; i < 4; i++) {
1603 /* Assign the minimum and maximum VID stored
1604 * in the last row of Clock Stretcher Voltage Table.
1605 */
1606 smu_data->smc_state_table.ClockStretcherDataTable.
1607 ClockStretcherDataTableEntry[i].minVID =
1608 (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2];
1609 smu_data->smc_state_table.ClockStretcherDataTable.
1610 ClockStretcherDataTableEntry[i].maxVID =
1611 (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3];
1612 /* Loop through each SCLK and check the frequency
1613 * to see if it lies within the frequency for clock stretcher.
1614 */
1615 for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
1616 cks_setting = 0;
1617 clock_freq = PP_SMC_TO_HOST_UL(
1618 smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
1619 /* Check the allowed frequency against the sclk level[j].
1620 * Sclk's endianness has already been converted,
1621 * and it's in 10Khz unit,
1622 * as opposed to Data table, which is in Mhz unit.
1623 */
1624 if (clock_freq >=
1625 (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) {
1626 cks_setting |= 0x2;
1627 if (clock_freq <
1628 (fiji_clock_stretcher_ddt_table[type][i][1]) * 100)
1629 cks_setting |= 0x1;
1630 }
1631 smu_data->smc_state_table.ClockStretcherDataTable.
1632 ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
1633 }
1634 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
1635 ClockStretcherDataTable.
1636 ClockStretcherDataTableEntry[i].setting);
1637 }
1638
1639 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
1640 value &= 0xFFFFFFFE;
1641 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
1642
1643 return 0;
1644}
1645
1646/**
1647* Populates the SMC VRConfig field in DPM table.
1648*
1649* @param hwmgr the address of the hardware manager
1650* @param table the SMC DPM table structure to be populated
1651* @return always 0
1652*/
1653static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr,
1654 struct SMU73_Discrete_DpmTable *table)
1655{
1656 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1657 uint16_t config;
1658
1659 config = VR_MERGED_WITH_VDDC;
1660 table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
1661
1662 /* Set Vddc Voltage Controller */
1663 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1664 config = VR_SVI2_PLANE_1;
1665 table->VRConfig |= config;
1666 } else {
1667 PP_ASSERT_WITH_CODE(false,
1668 "VDDC should be on SVI2 control in merged mode!",
1669 );
1670 }
1671 /* Set Vddci Voltage Controller */
1672 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
1673 config = VR_SVI2_PLANE_2; /* only in merged mode */
1674 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1675 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
1676 config = VR_SMIO_PATTERN_1;
1677 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1678 } else {
1679 config = VR_STATIC_VOLTAGE;
1680 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1681 }
1682 /* Set Mvdd Voltage Controller */
1683 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
1684 config = VR_SVI2_PLANE_2;
1685 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1686 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1687 config = VR_SMIO_PATTERN_2;
1688 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1689 } else {
1690 config = VR_STATIC_VOLTAGE;
1691 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1692 }
1693
1694 return 0;
1695}
1696
1697static int fiji_init_arb_table_index(struct pp_smumgr *smumgr)
1698{
1699 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(smumgr->backend);
1700 uint32_t tmp;
1701 int result;
1702
1703 /* This is a read-modify-write on the first byte of the ARB table.
1704 * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
1705 * is the field 'current'.
1706 * This solution is ugly, but we never write the whole table only
1707 * individual fields in it.
1708 * In reality this field should not be in that structure
1709 * but in a soft register.
1710 */
1711 result = smu7_read_smc_sram_dword(smumgr,
1712 smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
1713
1714 if (result)
1715 return result;
1716
1717 tmp &= 0x00FFFFFF;
1718 tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
1719
1720 return smu7_write_smc_sram_dword(smumgr,
1721 smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
1722}
1723
1724/**
1725* Initializes the SMC table and uploads it
1726*
1727* @param hwmgr the address of the powerplay hardware manager.
1728* @param pInput the pointer to input data (PowerState)
1729* @return always 0
1730*/
1731int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
1732{
1733 int result;
1734 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1735 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
1736 struct phm_ppt_v1_information *table_info =
1737 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1738 struct SMU73_Discrete_DpmTable *table = &(smu_data->smc_state_table);
1739 uint8_t i;
1740 struct pp_atomctrl_gpio_pin_assignment gpio_pin;
1741
1742 fiji_initialize_power_tune_defaults(hwmgr);
1743
1744 if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
1745 fiji_populate_smc_voltage_tables(hwmgr, table);
1746
1747 table->SystemFlags = 0;
1748
1749 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1750 PHM_PlatformCaps_AutomaticDCTransition))
1751 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1752
1753 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1754 PHM_PlatformCaps_StepVddc))
1755 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1756
1757 if (data->is_memory_gddr5)
1758 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1759
1760 if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
1761 result = fiji_populate_ulv_state(hwmgr, table);
1762 PP_ASSERT_WITH_CODE(0 == result,
1763 "Failed to initialize ULV state!", return result);
1764 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1765 ixCG_ULV_PARAMETER, 0x40035);
1766 }
1767
1768 result = fiji_populate_smc_link_level(hwmgr, table);
1769 PP_ASSERT_WITH_CODE(0 == result,
1770 "Failed to initialize Link Level!", return result);
1771
1772 result = fiji_populate_all_graphic_levels(hwmgr);
1773 PP_ASSERT_WITH_CODE(0 == result,
1774 "Failed to initialize Graphics Level!", return result);
1775
1776 result = fiji_populate_all_memory_levels(hwmgr);
1777 PP_ASSERT_WITH_CODE(0 == result,
1778 "Failed to initialize Memory Level!", return result);
1779
1780 result = fiji_populate_smc_acpi_level(hwmgr, table);
1781 PP_ASSERT_WITH_CODE(0 == result,
1782 "Failed to initialize ACPI Level!", return result);
1783
1784 result = fiji_populate_smc_vce_level(hwmgr, table);
1785 PP_ASSERT_WITH_CODE(0 == result,
1786 "Failed to initialize VCE Level!", return result);
1787
1788 result = fiji_populate_smc_acp_level(hwmgr, table);
1789 PP_ASSERT_WITH_CODE(0 == result,
1790 "Failed to initialize ACP Level!", return result);
1791
1792 result = fiji_populate_smc_samu_level(hwmgr, table);
1793 PP_ASSERT_WITH_CODE(0 == result,
1794 "Failed to initialize SAMU Level!", return result);
1795
1796 /* Since only the initial state is completely set up at this point
1797 * (the other states are just copies of the boot state) we only
1798 * need to populate the ARB settings for the initial state.
1799 */
1800 result = fiji_program_memory_timing_parameters(hwmgr);
1801 PP_ASSERT_WITH_CODE(0 == result,
1802 "Failed to Write ARB settings for the initial state.", return result);
1803
1804 result = fiji_populate_smc_uvd_level(hwmgr, table);
1805 PP_ASSERT_WITH_CODE(0 == result,
1806 "Failed to initialize UVD Level!", return result);
1807
1808 result = fiji_populate_smc_boot_level(hwmgr, table);
1809 PP_ASSERT_WITH_CODE(0 == result,
1810 "Failed to initialize Boot Level!", return result);
1811
1812 result = fiji_populate_smc_initailial_state(hwmgr);
1813 PP_ASSERT_WITH_CODE(0 == result,
1814 "Failed to initialize Boot State!", return result);
1815
1816 result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr);
1817 PP_ASSERT_WITH_CODE(0 == result,
1818 "Failed to populate BAPM Parameters!", return result);
1819
1820 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1821 PHM_PlatformCaps_ClockStretcher)) {
1822 result = fiji_populate_clock_stretcher_data_table(hwmgr);
1823 PP_ASSERT_WITH_CODE(0 == result,
1824 "Failed to populate Clock Stretcher Data Table!",
1825 return result);
1826 }
1827
1828 table->GraphicsVoltageChangeEnable = 1;
1829 table->GraphicsThermThrottleEnable = 1;
1830 table->GraphicsInterval = 1;
1831 table->VoltageInterval = 1;
1832 table->ThermalInterval = 1;
1833 table->TemperatureLimitHigh =
1834 table_info->cac_dtp_table->usTargetOperatingTemp *
1835 SMU7_Q88_FORMAT_CONVERSION_UNIT;
1836 table->TemperatureLimitLow =
1837 (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
1838 SMU7_Q88_FORMAT_CONVERSION_UNIT;
1839 table->MemoryVoltageChangeEnable = 1;
1840 table->MemoryInterval = 1;
1841 table->VoltageResponseTime = 0;
1842 table->PhaseResponseTime = 0;
1843 table->MemoryThermThrottleEnable = 1;
1844 table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
1845 table->PCIeGenInterval = 1;
1846 table->VRConfig = 0;
1847
1848 result = fiji_populate_vr_config(hwmgr, table);
1849 PP_ASSERT_WITH_CODE(0 == result,
1850 "Failed to populate VRConfig setting!", return result);
1851
1852 table->ThermGpio = 17;
1853 table->SclkStepSize = 0x4000;
1854
1855 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
1856 table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
1857 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1858 PHM_PlatformCaps_RegulatorHot);
1859 } else {
1860 table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
1861 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1862 PHM_PlatformCaps_RegulatorHot);
1863 }
1864
1865 if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
1866 &gpio_pin)) {
1867 table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
1868 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1869 PHM_PlatformCaps_AutomaticDCTransition);
1870 } else {
1871 table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
1872 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1873 PHM_PlatformCaps_AutomaticDCTransition);
1874 }
1875
1876 /* Thermal Output GPIO */
1877 if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
1878 &gpio_pin)) {
1879 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1880 PHM_PlatformCaps_ThermalOutGPIO);
1881
1882 table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
1883
1884 /* For porlarity read GPIOPAD_A with assigned Gpio pin
1885 * since VBIOS will program this register to set 'inactive state',
1886 * driver can then determine 'active state' from this and
1887 * program SMU with correct polarity
1888 */
1889 table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
1890 (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
1891 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
1892
1893 /* if required, combine VRHot/PCC with thermal out GPIO */
1894 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1895 PHM_PlatformCaps_RegulatorHot) &&
1896 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1897 PHM_PlatformCaps_CombinePCCWithThermalSignal))
1898 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
1899 } else {
1900 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1901 PHM_PlatformCaps_ThermalOutGPIO);
1902 table->ThermOutGpio = 17;
1903 table->ThermOutPolarity = 1;
1904 table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
1905 }
1906
1907 for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++)
1908 table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
1909
1910 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
1911 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
1912 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
1913 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
1914 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
1915 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
1916 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
1917 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
1918 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
1919
1920 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
1921 result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
1922 smu_data->smu7_data.dpm_table_start +
1923 offsetof(SMU73_Discrete_DpmTable, SystemFlags),
1924 (uint8_t *)&(table->SystemFlags),
1925 sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController),
1926 SMC_RAM_END);
1927 PP_ASSERT_WITH_CODE(0 == result,
1928 "Failed to upload dpm data to SMC memory!", return result);
1929
1930 result = fiji_init_arb_table_index(hwmgr->smumgr);
1931 PP_ASSERT_WITH_CODE(0 == result,
1932 "Failed to upload arb data to SMC memory!", return result);
1933
1934 result = fiji_populate_pm_fuses(hwmgr);
1935 PP_ASSERT_WITH_CODE(0 == result,
1936 "Failed to populate PM fuses to SMC memory!", return result);
1937 return 0;
1938}
1939
1940/**
1941* Set up the fan table to control the fan using the SMC.
1942* @param hwmgr the address of the powerplay hardware manager.
1943* @param pInput the pointer to input data
1944* @param pOutput the pointer to output data
1945* @param pStorage the pointer to temporary storage
1946* @param Result the last failure code
1947* @return result from set temperature range routine
1948*/
1949int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
1950{
1951 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
1952
1953 SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
1954 uint32_t duty100;
1955 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
1956 uint16_t fdo_min, slope1, slope2;
1957 uint32_t reference_clock;
1958 int res;
1959 uint64_t tmp64;
1960
1961 if (smu_data->smu7_data.fan_table_start == 0) {
1962 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1963 PHM_PlatformCaps_MicrocodeFanControl);
1964 return 0;
1965 }
1966
1967 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1968 CG_FDO_CTRL1, FMAX_DUTY100);
1969
1970 if (duty100 == 0) {
1971 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1972 PHM_PlatformCaps_MicrocodeFanControl);
1973 return 0;
1974 }
1975
1976 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
1977 usPWMMin * duty100;
1978 do_div(tmp64, 10000);
1979 fdo_min = (uint16_t)tmp64;
1980
1981 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
1982 hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
1983 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
1984 hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
1985
1986 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
1987 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
1988 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
1989 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
1990
1991 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
1992 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
1993
1994 fan_table.TempMin = cpu_to_be16((50 + hwmgr->
1995 thermal_controller.advanceFanControlParameters.usTMin) / 100);
1996 fan_table.TempMed = cpu_to_be16((50 + hwmgr->
1997 thermal_controller.advanceFanControlParameters.usTMed) / 100);
1998 fan_table.TempMax = cpu_to_be16((50 + hwmgr->
1999 thermal_controller.advanceFanControlParameters.usTMax) / 100);
2000
2001 fan_table.Slope1 = cpu_to_be16(slope1);
2002 fan_table.Slope2 = cpu_to_be16(slope2);
2003
2004 fan_table.FdoMin = cpu_to_be16(fdo_min);
2005
2006 fan_table.HystDown = cpu_to_be16(hwmgr->
2007 thermal_controller.advanceFanControlParameters.ucTHyst);
2008
2009 fan_table.HystUp = cpu_to_be16(1);
2010
2011 fan_table.HystSlope = cpu_to_be16(1);
2012
2013 fan_table.TempRespLim = cpu_to_be16(5);
2014
2015 reference_clock = smu7_get_xclk(hwmgr);
2016
2017 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
2018 thermal_controller.advanceFanControlParameters.ulCycleDelay *
2019 reference_clock) / 1600);
2020
2021 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
2022
2023 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
2024 hwmgr->device, CGS_IND_REG__SMC,
2025 CG_MULT_THERMAL_CTRL, TEMP_SEL);
2026
2027 res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start,
2028 (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
2029 SMC_RAM_END);
2030
2031 if (!res && hwmgr->thermal_controller.
2032 advanceFanControlParameters.ucMinimumPWMLimit)
2033 res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2034 PPSMC_MSG_SetFanMinPwm,
2035 hwmgr->thermal_controller.
2036 advanceFanControlParameters.ucMinimumPWMLimit);
2037
2038 if (!res && hwmgr->thermal_controller.
2039 advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
2040 res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2041 PPSMC_MSG_SetFanSclkTarget,
2042 hwmgr->thermal_controller.
2043 advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
2044
2045 if (res)
2046 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2047 PHM_PlatformCaps_MicrocodeFanControl);
2048
2049 return 0;
2050}
2051
2052int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
2053{
2054 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2055
2056 if (data->need_update_smu7_dpm_table &
2057 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
2058 return fiji_program_memory_timing_parameters(hwmgr);
2059
2060 return 0;
2061}
2062
2063int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2064{
2065 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2066 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
2067
2068 int result = 0;
2069 uint32_t low_sclk_interrupt_threshold = 0;
2070
2071 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2072 PHM_PlatformCaps_SclkThrottleLowNotification)
2073 && (hwmgr->gfx_arbiter.sclk_threshold !=
2074 data->low_sclk_interrupt_threshold)) {
2075 data->low_sclk_interrupt_threshold =
2076 hwmgr->gfx_arbiter.sclk_threshold;
2077 low_sclk_interrupt_threshold =
2078 data->low_sclk_interrupt_threshold;
2079
2080 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
2081
2082 result = smu7_copy_bytes_to_smc(
2083 hwmgr->smumgr,
2084 smu_data->smu7_data.dpm_table_start +
2085 offsetof(SMU73_Discrete_DpmTable,
2086 LowSclkInterruptThreshold),
2087 (uint8_t *)&low_sclk_interrupt_threshold,
2088 sizeof(uint32_t),
2089 SMC_RAM_END);
2090 }
2091 result = fiji_program_mem_timing_parameters(hwmgr);
2092 PP_ASSERT_WITH_CODE((result == 0),
2093 "Failed to program memory timing parameters!",
2094 );
2095 return result;
2096}
2097
2098uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
2099{
2100 switch (type) {
2101 case SMU_SoftRegisters:
2102 switch (member) {
2103 case HandshakeDisables:
2104 return offsetof(SMU73_SoftRegisters, HandshakeDisables);
2105 case VoltageChangeTimeout:
2106 return offsetof(SMU73_SoftRegisters, VoltageChangeTimeout);
2107 case AverageGraphicsActivity:
2108 return offsetof(SMU73_SoftRegisters, AverageGraphicsActivity);
2109 case PreVBlankGap:
2110 return offsetof(SMU73_SoftRegisters, PreVBlankGap);
2111 case VBlankTimeout:
2112 return offsetof(SMU73_SoftRegisters, VBlankTimeout);
2113 case UcodeLoadStatus:
2114 return offsetof(SMU73_SoftRegisters, UcodeLoadStatus);
2115 }
2116 case SMU_Discrete_DpmTable:
2117 switch (member) {
2118 case UvdBootLevel:
2119 return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
2120 case VceBootLevel:
2121 return offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
2122 case SamuBootLevel:
2123 return offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
2124 case LowSclkInterruptThreshold:
2125 return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
2126 }
2127 }
2128 printk("cant't get the offset of type %x member %x \n", type, member);
2129 return 0;
2130}
2131
2132uint32_t fiji_get_mac_definition(uint32_t value)
2133{
2134 switch (value) {
2135 case SMU_MAX_LEVELS_GRAPHICS:
2136 return SMU73_MAX_LEVELS_GRAPHICS;
2137 case SMU_MAX_LEVELS_MEMORY:
2138 return SMU73_MAX_LEVELS_MEMORY;
2139 case SMU_MAX_LEVELS_LINK:
2140 return SMU73_MAX_LEVELS_LINK;
2141 case SMU_MAX_ENTRIES_SMIO:
2142 return SMU73_MAX_ENTRIES_SMIO;
2143 case SMU_MAX_LEVELS_VDDC:
2144 return SMU73_MAX_LEVELS_VDDC;
2145 case SMU_MAX_LEVELS_VDDGFX:
2146 return SMU73_MAX_LEVELS_VDDGFX;
2147 case SMU_MAX_LEVELS_VDDCI:
2148 return SMU73_MAX_LEVELS_VDDCI;
2149 case SMU_MAX_LEVELS_MVDD:
2150 return SMU73_MAX_LEVELS_MVDD;
2151 }
2152
2153 printk("cant't get the mac of %x \n", value);
2154 return 0;
2155}
2156
2157
2158static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
2159{
2160 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
2161 uint32_t mm_boot_level_offset, mm_boot_level_value;
2162 struct phm_ppt_v1_information *table_info =
2163 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2164
2165 smu_data->smc_state_table.UvdBootLevel = 0;
2166 if (table_info->mm_dep_table->count > 0)
2167 smu_data->smc_state_table.UvdBootLevel =
2168 (uint8_t) (table_info->mm_dep_table->count - 1);
2169 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable,
2170 UvdBootLevel);
2171 mm_boot_level_offset /= 4;
2172 mm_boot_level_offset *= 4;
2173 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2174 CGS_IND_REG__SMC, mm_boot_level_offset);
2175 mm_boot_level_value &= 0x00FFFFFF;
2176 mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
2177 cgs_write_ind_register(hwmgr->device,
2178 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2179
2180 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2181 PHM_PlatformCaps_UVDDPM) ||
2182 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2183 PHM_PlatformCaps_StablePState))
2184 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2185 PPSMC_MSG_UVDDPM_SetEnabledMask,
2186 (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
2187 return 0;
2188}
2189
2190static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
2191{
2192 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
2193 uint32_t mm_boot_level_offset, mm_boot_level_value;
2194 struct phm_ppt_v1_information *table_info =
2195 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2196
2197 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2198 PHM_PlatformCaps_StablePState))
2199 smu_data->smc_state_table.VceBootLevel =
2200 (uint8_t) (table_info->mm_dep_table->count - 1);
2201 else
2202 smu_data->smc_state_table.VceBootLevel = 0;
2203
2204 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2205 offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
2206 mm_boot_level_offset /= 4;
2207 mm_boot_level_offset *= 4;
2208 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2209 CGS_IND_REG__SMC, mm_boot_level_offset);
2210 mm_boot_level_value &= 0xFF00FFFF;
2211 mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
2212 cgs_write_ind_register(hwmgr->device,
2213 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2214
2215 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
2216 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2217 PPSMC_MSG_VCEDPM_SetEnabledMask,
2218 (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
2219 return 0;
2220}
2221
2222static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr)
2223{
2224 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
2225 uint32_t mm_boot_level_offset, mm_boot_level_value;
2226
2227
2228 smu_data->smc_state_table.SamuBootLevel = 0;
2229 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2230 offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
2231
2232 mm_boot_level_offset /= 4;
2233 mm_boot_level_offset *= 4;
2234 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2235 CGS_IND_REG__SMC, mm_boot_level_offset);
2236 mm_boot_level_value &= 0xFFFFFF00;
2237 mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
2238 cgs_write_ind_register(hwmgr->device,
2239 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2240
2241 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2242 PHM_PlatformCaps_StablePState))
2243 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2244 PPSMC_MSG_SAMUDPM_SetEnabledMask,
2245 (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
2246 return 0;
2247}
2248
2249int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
2250{
2251 switch (type) {
2252 case SMU_UVD_TABLE:
2253 fiji_update_uvd_smc_table(hwmgr);
2254 break;
2255 case SMU_VCE_TABLE:
2256 fiji_update_vce_smc_table(hwmgr);
2257 break;
2258 case SMU_SAMU_TABLE:
2259 fiji_update_samu_smc_table(hwmgr);
2260 break;
2261 default:
2262 break;
2263 }
2264 return 0;
2265}
2266
2267
2268/**
2269* Get the location of various tables inside the FW image.
2270*
2271* @param hwmgr the address of the powerplay hardware manager.
2272* @return always 0
2273*/
2274int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
2275{
2276 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2277 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
2278 uint32_t tmp;
2279 int result;
2280 bool error = false;
2281
2282 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2283 SMU7_FIRMWARE_HEADER_LOCATION +
2284 offsetof(SMU73_Firmware_Header, DpmTable),
2285 &tmp, SMC_RAM_END);
2286
2287 if (0 == result)
2288 smu_data->smu7_data.dpm_table_start = tmp;
2289
2290 error |= (0 != result);
2291
2292 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2293 SMU7_FIRMWARE_HEADER_LOCATION +
2294 offsetof(SMU73_Firmware_Header, SoftRegisters),
2295 &tmp, SMC_RAM_END);
2296
2297 if (!result) {
2298 data->soft_regs_start = tmp;
2299 smu_data->smu7_data.soft_regs_start = tmp;
2300 }
2301
2302 error |= (0 != result);
2303
2304 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2305 SMU7_FIRMWARE_HEADER_LOCATION +
2306 offsetof(SMU73_Firmware_Header, mcRegisterTable),
2307 &tmp, SMC_RAM_END);
2308
2309 if (!result)
2310 smu_data->smu7_data.mc_reg_table_start = tmp;
2311
2312 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2313 SMU7_FIRMWARE_HEADER_LOCATION +
2314 offsetof(SMU73_Firmware_Header, FanTable),
2315 &tmp, SMC_RAM_END);
2316
2317 if (!result)
2318 smu_data->smu7_data.fan_table_start = tmp;
2319
2320 error |= (0 != result);
2321
2322 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2323 SMU7_FIRMWARE_HEADER_LOCATION +
2324 offsetof(SMU73_Firmware_Header, mcArbDramTimingTable),
2325 &tmp, SMC_RAM_END);
2326
2327 if (!result)
2328 smu_data->smu7_data.arb_table_start = tmp;
2329
2330 error |= (0 != result);
2331
2332 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2333 SMU7_FIRMWARE_HEADER_LOCATION +
2334 offsetof(SMU73_Firmware_Header, Version),
2335 &tmp, SMC_RAM_END);
2336
2337 if (!result)
2338 hwmgr->microcode_version_info.SMC = tmp;
2339
2340 error |= (0 != result);
2341
2342 return error ? -1 : 0;
2343}
2344
2345int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
2346{
2347
2348 /* Program additional LP registers
2349 * that are no longer programmed by VBIOS
2350 */
2351 cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
2352 cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
2353 cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
2354 cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
2355 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
2356 cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
2357 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
2358 cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
2359 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
2360 cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
2361 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
2362 cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
2363 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
2364 cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
2365
2366 return 0;
2367}
2368
2369bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
2370{
2371 return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
2372 CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
2373 ? true : false;
2374}
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smum.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h
index 1cef03deeac3..d30d150f9ca6 100644
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smum.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2014 Advanced Micro Devices, Inc. 2 * Copyright 2015 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -20,23 +20,32 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 */ 22 */
23#ifndef FIJI_SMC_H
24#define FIJI_SMC_H
23 25
24#ifndef FIJI_SMUMGR_H 26#include "smumgr.h"
25#define FIJI_SMUMGR_H 27#include "smu73.h"
26 28
27#include "fiji_ppsmc.h" 29struct fiji_pt_defaults {
28 30 uint8_t SviLoadLineEn;
29int fiji_smu_init(struct amdgpu_device *adev); 31 uint8_t SviLoadLineVddC;
30int fiji_smu_fini(struct amdgpu_device *adev); 32 uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
31int fiji_smu_start(struct amdgpu_device *adev); 33 uint8_t TDC_MAWt;
32 34 uint8_t TdcWaterfallCtl;
33struct fiji_smu_private_data 35 uint8_t DTEAmbientTempBase;
34{
35 uint8_t *header;
36 uint32_t smu_buffer_addr_high;
37 uint32_t smu_buffer_addr_low;
38 uint32_t header_addr_high;
39 uint32_t header_addr_low;
40}; 36};
41 37
38int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
39int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
40int fiji_init_smc_table(struct pp_hwmgr *hwmgr);
41int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
42int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
43int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr);
44uint32_t fiji_get_offsetof(uint32_t type, uint32_t member);
45uint32_t fiji_get_mac_definition(uint32_t value);
46int fiji_process_firmware_header(struct pp_hwmgr *hwmgr);
47int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
48bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr);
49
42#endif 50#endif
51
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 8e52a2e82db5..02fe1df855a9 100644..100755
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -38,6 +38,7 @@
38#include "bif/bif_5_0_sh_mask.h" 38#include "bif/bif_5_0_sh_mask.h"
39#include "pp_debug.h" 39#include "pp_debug.h"
40#include "fiji_pwrvirus.h" 40#include "fiji_pwrvirus.h"
41#include "fiji_smc.h"
41 42
42#define AVFS_EN_MSB 1568 43#define AVFS_EN_MSB 1568
43#define AVFS_EN_LSB 1568 44#define AVFS_EN_LSB 1568
@@ -57,509 +58,6 @@ static const struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = {
57 { 0xf811d047, 0x80380100, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0x21680000, 0x12000000, 0, 0, 0x0c, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 } 58 { 0xf811d047, 0x80380100, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0x21680000, 0x12000000, 0, 0, 0x0c, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }
58}; 59};
59 60
60static enum cgs_ucode_id fiji_convert_fw_type_to_cgs(uint32_t fw_type)
61{
62 enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
63
64 switch (fw_type) {
65 case UCODE_ID_SMU:
66 result = CGS_UCODE_ID_SMU;
67 break;
68 case UCODE_ID_SDMA0:
69 result = CGS_UCODE_ID_SDMA0;
70 break;
71 case UCODE_ID_SDMA1:
72 result = CGS_UCODE_ID_SDMA1;
73 break;
74 case UCODE_ID_CP_CE:
75 result = CGS_UCODE_ID_CP_CE;
76 break;
77 case UCODE_ID_CP_PFP:
78 result = CGS_UCODE_ID_CP_PFP;
79 break;
80 case UCODE_ID_CP_ME:
81 result = CGS_UCODE_ID_CP_ME;
82 break;
83 case UCODE_ID_CP_MEC:
84 result = CGS_UCODE_ID_CP_MEC;
85 break;
86 case UCODE_ID_CP_MEC_JT1:
87 result = CGS_UCODE_ID_CP_MEC_JT1;
88 break;
89 case UCODE_ID_CP_MEC_JT2:
90 result = CGS_UCODE_ID_CP_MEC_JT2;
91 break;
92 case UCODE_ID_RLC_G:
93 result = CGS_UCODE_ID_RLC_G;
94 break;
95 default:
96 break;
97 }
98
99 return result;
100}
101/**
102* Set the address for reading/writing the SMC SRAM space.
103* @param smumgr the address of the powerplay hardware manager.
104* @param smc_addr the address in the SMC RAM to access.
105*/
106static int fiji_set_smc_sram_address(struct pp_smumgr *smumgr,
107 uint32_t smc_addr, uint32_t limit)
108{
109 PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)),
110 "SMC address must be 4 byte aligned.", return -EINVAL;);
111 PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)),
112 "SMC address is beyond the SMC RAM area.", return -EINVAL;);
113
114 cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smc_addr);
115 SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
116
117 return 0;
118}
119
120/**
121* Copy bytes from an array into the SMC RAM space.
122*
123* @param smumgr the address of the powerplay SMU manager.
124* @param smcStartAddress the start address in the SMC RAM to copy bytes to.
125* @param src the byte array to copy the bytes from.
126* @param byteCount the number of bytes to copy.
127*/
128int fiji_copy_bytes_to_smc(struct pp_smumgr *smumgr,
129 uint32_t smcStartAddress, const uint8_t *src,
130 uint32_t byteCount, uint32_t limit)
131{
132 int result;
133 uint32_t data, originalData;
134 uint32_t addr, extraShift;
135
136 PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)),
137 "SMC address must be 4 byte aligned.", return -EINVAL;);
138 PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)),
139 "SMC address is beyond the SMC RAM area.", return -EINVAL;);
140
141 addr = smcStartAddress;
142
143 while (byteCount >= 4) {
144 /* Bytes are written into the SMC addres space with the MSB first. */
145 data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
146
147 result = fiji_set_smc_sram_address(smumgr, addr, limit);
148 if (result)
149 return result;
150
151 cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
152
153 src += 4;
154 byteCount -= 4;
155 addr += 4;
156 }
157
158 if (byteCount) {
159 /* Now write the odd bytes left.
160 * Do a read modify write cycle.
161 */
162 data = 0;
163
164 result = fiji_set_smc_sram_address(smumgr, addr, limit);
165 if (result)
166 return result;
167
168 originalData = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0);
169 extraShift = 8 * (4 - byteCount);
170
171 while (byteCount > 0) {
172 /* Bytes are written into the SMC addres
173 * space with the MSB first.
174 */
175 data = (0x100 * data) + *src++;
176 byteCount--;
177 }
178 data <<= extraShift;
179 data |= (originalData & ~((~0UL) << extraShift));
180
181 result = fiji_set_smc_sram_address(smumgr, addr, limit);
182 if (!result)
183 return result;
184
185 cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
186 }
187 return 0;
188}
189
190int fiji_program_jump_on_start(struct pp_smumgr *smumgr)
191{
192 static const unsigned char data[] = { 0xE0, 0x00, 0x80, 0x40 };
193
194 fiji_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data) + 1);
195
196 return 0;
197}
198
199/**
200* Return if the SMC is currently running.
201*
202* @param smumgr the address of the powerplay hardware manager.
203*/
204bool fiji_is_smc_ram_running(struct pp_smumgr *smumgr)
205{
206 return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device,
207 CGS_IND_REG__SMC,
208 SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
209 && (0x20100 <= cgs_read_ind_register(smumgr->device,
210 CGS_IND_REG__SMC, ixSMC_PC_C)));
211}
212
213/**
214* Send a message to the SMC, and wait for its response.
215*
216* @param smumgr the address of the powerplay hardware manager.
217* @param msg the message to send.
218* @return The response that came from the SMC.
219*/
220int fiji_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
221{
222 if (!fiji_is_smc_ram_running(smumgr))
223 return -1;
224
225 if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) {
226 printk(KERN_ERR "Failed to send Previous Message.");
227 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
228 }
229
230 cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
231 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
232
233 return 0;
234}
235
236/**
237 * Send a message to the SMC with parameter
238 * @param smumgr: the address of the powerplay hardware manager.
239 * @param msg: the message to send.
240 * @param parameter: the parameter to send
241 * @return The response that came from the SMC.
242 */
243int fiji_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
244 uint16_t msg, uint32_t parameter)
245{
246 if (!fiji_is_smc_ram_running(smumgr))
247 return -1;
248
249 if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) {
250 printk(KERN_ERR "Failed to send Previous Message.");
251 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
252 }
253
254 cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
255 cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
256 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
257
258 return 0;
259}
260
261
262/**
263* Send a message to the SMC with parameter, do not wait for response
264*
265* @param smumgr: the address of the powerplay hardware manager.
266* @param msg: the message to send.
267* @param parameter: the parameter to send
268* @return The response that came from the SMC.
269*/
270int fiji_send_msg_to_smc_with_parameter_without_waiting(
271 struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
272{
273 if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) {
274 printk(KERN_ERR "Failed to send Previous Message.");
275 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
276 }
277 cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
278 cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
279
280 return 0;
281}
282
283/**
284* Uploads the SMU firmware from .hex file
285*
286* @param smumgr the address of the powerplay SMU manager.
287* @return 0 or -1.
288*/
289
290static int fiji_upload_smu_firmware_image(struct pp_smumgr *smumgr)
291{
292 const uint8_t *src;
293 uint32_t byte_count;
294 uint32_t *data;
295 struct cgs_firmware_info info = {0};
296
297 cgs_get_firmware_info(smumgr->device,
298 fiji_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
299
300 if (info.image_size & 3) {
301 printk(KERN_ERR "SMC ucode is not 4 bytes aligned\n");
302 return -EINVAL;
303 }
304
305 if (info.image_size > FIJI_SMC_SIZE) {
306 printk(KERN_ERR "SMC address is beyond the SMC RAM area\n");
307 return -EINVAL;
308 }
309
310 cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, 0x20000);
311 SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
312
313 byte_count = info.image_size;
314 src = (const uint8_t *)info.kptr;
315
316 data = (uint32_t *)src;
317 for (; byte_count >= 4; data++, byte_count -= 4)
318 cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data[0]);
319
320 SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
321 return 0;
322}
323
324/**
325* Read a 32bit value from the SMC SRAM space.
326* ALL PARAMETERS ARE IN HOST BYTE ORDER.
327* @param smumgr the address of the powerplay hardware manager.
328* @param smc_addr the address in the SMC RAM to access.
329* @param value and output parameter for the data read from the SMC SRAM.
330*/
331int fiji_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
332 uint32_t *value, uint32_t limit)
333{
334 int result = fiji_set_smc_sram_address(smumgr, smc_addr, limit);
335
336 if (result)
337 return result;
338
339 *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0);
340 return 0;
341}
342
343/**
344* Write a 32bit value to the SMC SRAM space.
345* ALL PARAMETERS ARE IN HOST BYTE ORDER.
346* @param smumgr the address of the powerplay hardware manager.
347* @param smc_addr the address in the SMC RAM to access.
348* @param value to write to the SMC SRAM.
349*/
350int fiji_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
351 uint32_t value, uint32_t limit)
352{
353 int result;
354
355 result = fiji_set_smc_sram_address(smumgr, smc_addr, limit);
356
357 if (result)
358 return result;
359
360 cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value);
361 return 0;
362}
363
364static uint32_t fiji_get_mask_for_firmware_type(uint32_t fw_type)
365{
366 uint32_t result = 0;
367
368 switch (fw_type) {
369 case UCODE_ID_SDMA0:
370 result = UCODE_ID_SDMA0_MASK;
371 break;
372 case UCODE_ID_SDMA1:
373 result = UCODE_ID_SDMA1_MASK;
374 break;
375 case UCODE_ID_CP_CE:
376 result = UCODE_ID_CP_CE_MASK;
377 break;
378 case UCODE_ID_CP_PFP:
379 result = UCODE_ID_CP_PFP_MASK;
380 break;
381 case UCODE_ID_CP_ME:
382 result = UCODE_ID_CP_ME_MASK;
383 break;
384 case UCODE_ID_CP_MEC_JT1:
385 result = UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
386 break;
387 case UCODE_ID_CP_MEC_JT2:
388 result = UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT2_MASK;
389 break;
390 case UCODE_ID_RLC_G:
391 result = UCODE_ID_RLC_G_MASK;
392 break;
393 default:
394 printk(KERN_ERR "UCode type is out of range!");
395 result = 0;
396 }
397
398 return result;
399}
400
401/* Populate one firmware image to the data structure */
402static int fiji_populate_single_firmware_entry(struct pp_smumgr *smumgr,
403 uint32_t fw_type, struct SMU_Entry *entry)
404{
405 int result;
406 struct cgs_firmware_info info = {0};
407
408 result = cgs_get_firmware_info(
409 smumgr->device,
410 fiji_convert_fw_type_to_cgs(fw_type),
411 &info);
412
413 if (!result) {
414 entry->version = 0;
415 entry->id = (uint16_t)fw_type;
416 entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
417 entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
418 entry->meta_data_addr_high = 0;
419 entry->meta_data_addr_low = 0;
420 entry->data_size_byte = info.image_size;
421 entry->num_register_entries = 0;
422
423 if (fw_type == UCODE_ID_RLC_G)
424 entry->flags = 1;
425 else
426 entry->flags = 0;
427 }
428
429 return result;
430}
431
432static int fiji_request_smu_load_fw(struct pp_smumgr *smumgr)
433{
434 struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
435 uint32_t fw_to_load;
436 struct SMU_DRAMData_TOC *toc;
437
438 if (priv->soft_regs_start)
439 cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
440 priv->soft_regs_start +
441 offsetof(SMU73_SoftRegisters, UcodeLoadStatus),
442 0x0);
443
444 toc = (struct SMU_DRAMData_TOC *)priv->header;
445 toc->num_entries = 0;
446 toc->structure_version = 1;
447
448 PP_ASSERT_WITH_CODE(
449 0 == fiji_populate_single_firmware_entry(smumgr,
450 UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
451 "Failed to Get Firmware Entry.\n" , return -1 );
452 PP_ASSERT_WITH_CODE(
453 0 == fiji_populate_single_firmware_entry(smumgr,
454 UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
455 "Failed to Get Firmware Entry.\n" , return -1 );
456 PP_ASSERT_WITH_CODE(
457 0 == fiji_populate_single_firmware_entry(smumgr,
458 UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
459 "Failed to Get Firmware Entry.\n" , return -1 );
460 PP_ASSERT_WITH_CODE(
461 0 == fiji_populate_single_firmware_entry(smumgr,
462 UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
463 "Failed to Get Firmware Entry.\n" , return -1 );
464 PP_ASSERT_WITH_CODE(
465 0 == fiji_populate_single_firmware_entry(smumgr,
466 UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
467 "Failed to Get Firmware Entry.\n" , return -1 );
468 PP_ASSERT_WITH_CODE(
469 0 == fiji_populate_single_firmware_entry(smumgr,
470 UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
471 "Failed to Get Firmware Entry.\n" , return -1 );
472 PP_ASSERT_WITH_CODE(
473 0 == fiji_populate_single_firmware_entry(smumgr,
474 UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
475 "Failed to Get Firmware Entry.\n" , return -1 );
476 PP_ASSERT_WITH_CODE(
477 0 == fiji_populate_single_firmware_entry(smumgr,
478 UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
479 "Failed to Get Firmware Entry.\n" , return -1 );
480 PP_ASSERT_WITH_CODE(
481 0 == fiji_populate_single_firmware_entry(smumgr,
482 UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
483 "Failed to Get Firmware Entry.\n" , return -1 );
484
485 fiji_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI,
486 priv->header_buffer.mc_addr_high);
487 fiji_send_msg_to_smc_with_parameter(smumgr,PPSMC_MSG_DRV_DRAM_ADDR_LO,
488 priv->header_buffer.mc_addr_low);
489
490 fw_to_load = UCODE_ID_RLC_G_MASK
491 + UCODE_ID_SDMA0_MASK
492 + UCODE_ID_SDMA1_MASK
493 + UCODE_ID_CP_CE_MASK
494 + UCODE_ID_CP_ME_MASK
495 + UCODE_ID_CP_PFP_MASK
496 + UCODE_ID_CP_MEC_MASK
497 + UCODE_ID_CP_MEC_JT1_MASK
498 + UCODE_ID_CP_MEC_JT2_MASK;
499
500 if (fiji_send_msg_to_smc_with_parameter(smumgr,
501 PPSMC_MSG_LoadUcodes, fw_to_load))
502 printk(KERN_ERR "Fail to Request SMU Load uCode");
503
504 return 0;
505}
506
507
508/* Check if the FW has been loaded, SMU will not return
509 * if loading has not finished.
510 */
511static int fiji_check_fw_load_finish(struct pp_smumgr *smumgr,
512 uint32_t fw_type)
513{
514 struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
515 uint32_t mask = fiji_get_mask_for_firmware_type(fw_type);
516
517 /* Check SOFT_REGISTERS_TABLE_28.UcodeLoadStatus */
518 if (smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX,
519 priv->soft_regs_start +
520 offsetof(SMU73_SoftRegisters, UcodeLoadStatus),
521 mask, mask)) {
522 printk(KERN_ERR "check firmware loading failed\n");
523 return -EINVAL;
524 }
525 return 0;
526}
527
528
529static int fiji_reload_firmware(struct pp_smumgr *smumgr)
530{
531 return smumgr->smumgr_funcs->start_smu(smumgr);
532}
533
534static bool fiji_is_hw_virtualization_enabled(struct pp_smumgr *smumgr)
535{
536 uint32_t value;
537
538 value = cgs_read_register(smumgr->device, mmBIF_IOV_FUNC_IDENTIFIER);
539 if (value & BIF_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK) {
540 /* driver reads on SR-IOV enabled PF: 0x80000000
541 * driver reads on SR-IOV enabled VF: 0x80000001
542 * driver reads on SR-IOV disabled: 0x00000000
543 */
544 return true;
545 }
546 return false;
547}
548
549static int fiji_request_smu_specific_fw_load(struct pp_smumgr *smumgr, uint32_t fw_type)
550{
551 if (fiji_is_hw_virtualization_enabled(smumgr)) {
552 uint32_t masks = fiji_get_mask_for_firmware_type(fw_type);
553 if (fiji_send_msg_to_smc_with_parameter_without_waiting(smumgr,
554 PPSMC_MSG_LoadUcodes, masks))
555 printk(KERN_ERR "Fail to Request SMU Load uCode");
556 }
557 /* For non-virtualization cases,
558 * SMU loads all FWs at once in fiji_request_smu_load_fw.
559 */
560 return 0;
561}
562
563static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr) 61static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
564{ 62{
565 int result = 0; 63 int result = 0;
@@ -571,7 +69,7 @@ static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
571 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, 69 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
572 SMC_SYSCON_RESET_CNTL, rst_reg, 1); 70 SMC_SYSCON_RESET_CNTL, rst_reg, 1);
573 71
574 result = fiji_upload_smu_firmware_image(smumgr); 72 result = smu7_upload_smu_firmware_image(smumgr);
575 if (result) 73 if (result)
576 return result; 74 return result;
577 75
@@ -610,8 +108,8 @@ static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
610 SMU_STATUS, SMU_DONE, 0); 108 SMU_STATUS, SMU_DONE, 0);
611 109
612 /* Check pass/failed indicator */ 110 /* Check pass/failed indicator */
613 if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, 111 if (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
614 SMU_STATUS, SMU_PASS)) { 112 SMU_STATUS, SMU_PASS) != 1) {
615 PP_ASSERT_WITH_CODE(false, 113 PP_ASSERT_WITH_CODE(false,
616 "SMU Firmware start failed!", return -1); 114 "SMU Firmware start failed!", return -1);
617 } 115 }
@@ -639,12 +137,12 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
639 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, 137 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
640 SMC_SYSCON_RESET_CNTL, rst_reg, 1); 138 SMC_SYSCON_RESET_CNTL, rst_reg, 1);
641 139
642 result = fiji_upload_smu_firmware_image(smumgr); 140 result = smu7_upload_smu_firmware_image(smumgr);
643 if (result) 141 if (result)
644 return result; 142 return result;
645 143
646 /* Set smc instruct start point at 0x0 */ 144 /* Set smc instruct start point at 0x0 */
647 fiji_program_jump_on_start(smumgr); 145 smu7_program_jump_on_start(smumgr);
648 146
649 /* Enable clock */ 147 /* Enable clock */
650 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, 148 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
@@ -698,15 +196,15 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
698 196
699 priv->avfs.AvfsBtcStatus = AVFS_BTC_STARTED; 197 priv->avfs.AvfsBtcStatus = AVFS_BTC_STARTED;
700 if (priv->avfs.AvfsBtcParam) { 198 if (priv->avfs.AvfsBtcParam) {
701 if (!fiji_send_msg_to_smc_with_parameter(smumgr, 199 if (!smum_send_msg_to_smc_with_parameter(smumgr,
702 PPSMC_MSG_PerformBtc, priv->avfs.AvfsBtcParam)) { 200 PPSMC_MSG_PerformBtc, priv->avfs.AvfsBtcParam)) {
703 if (!fiji_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs)) { 201 if (!smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs)) {
704 priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_UNSAVED; 202 priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_UNSAVED;
705 result = 0; 203 result = 0;
706 } else { 204 } else {
707 printk(KERN_ERR "[AVFS][fiji_start_avfs_btc] Attempt" 205 printk(KERN_ERR "[AVFS][fiji_start_avfs_btc] Attempt"
708 " to Enable AVFS Failed!"); 206 " to Enable AVFS Failed!");
709 fiji_send_msg_to_smc(smumgr, PPSMC_MSG_DisableAvfs); 207 smum_send_msg_to_smc(smumgr, PPSMC_MSG_DisableAvfs);
710 result = -1; 208 result = -1;
711 } 209 }
712 } else { 210 } else {
@@ -736,7 +234,7 @@ int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
736 charz_freq = 0x30750000; /* In 10KHz units 0x00007530 Actual value */ 234 charz_freq = 0x30750000; /* In 10KHz units 0x00007530 Actual value */
737 inversion_voltage = 0x1A04; /* mV Q14.2 0x41A Actual value */ 235 inversion_voltage = 0x1A04; /* mV Q14.2 0x41A Actual value */
738 236
739 PP_ASSERT_WITH_CODE(0 == fiji_read_smc_sram_dword(smumgr, 237 PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
740 SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, 238 SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header,
741 PmFuseTable), &table_start, 0x40000), 239 PmFuseTable), &table_start, 0x40000),
742 "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not communicate " 240 "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not communicate "
@@ -748,13 +246,13 @@ int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
748 inversion_voltage_addr = table_start + 246 inversion_voltage_addr = table_start +
749 offsetof(struct SMU73_Discrete_PmFuses, InversionVoltage); 247 offsetof(struct SMU73_Discrete_PmFuses, InversionVoltage);
750 248
751 result = fiji_copy_bytes_to_smc(smumgr, charz_freq_addr, 249 result = smu7_copy_bytes_to_smc(smumgr, charz_freq_addr,
752 (uint8_t *)(&charz_freq), sizeof(charz_freq), 0x40000); 250 (uint8_t *)(&charz_freq), sizeof(charz_freq), 0x40000);
753 PP_ASSERT_WITH_CODE(0 == result, 251 PP_ASSERT_WITH_CODE(0 == result,
754 "[AVFS][fiji_setup_pm_fuse_for_avfs] charz_freq could not " 252 "[AVFS][fiji_setup_pm_fuse_for_avfs] charz_freq could not "
755 "be populated.", return -1;); 253 "be populated.", return -1;);
756 254
757 result = fiji_copy_bytes_to_smc(smumgr, inversion_voltage_addr, 255 result = smu7_copy_bytes_to_smc(smumgr, inversion_voltage_addr,
758 (uint8_t *)(&inversion_voltage), sizeof(inversion_voltage), 0x40000); 256 (uint8_t *)(&inversion_voltage), sizeof(inversion_voltage), 0x40000);
759 PP_ASSERT_WITH_CODE(0 == result, "[AVFS][fiji_setup_pm_fuse_for_avfs] " 257 PP_ASSERT_WITH_CODE(0 == result, "[AVFS][fiji_setup_pm_fuse_for_avfs] "
760 "charz_freq could not be populated.", return -1;); 258 "charz_freq could not be populated.", return -1;);
@@ -769,7 +267,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
769 uint32_t level_addr, vr_config_addr; 267 uint32_t level_addr, vr_config_addr;
770 uint32_t level_size = sizeof(avfs_graphics_level); 268 uint32_t level_size = sizeof(avfs_graphics_level);
771 269
772 PP_ASSERT_WITH_CODE(0 == fiji_read_smc_sram_dword(smumgr, 270 PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
773 SMU7_FIRMWARE_HEADER_LOCATION + 271 SMU7_FIRMWARE_HEADER_LOCATION +
774 offsetof(SMU73_Firmware_Header, DpmTable), 272 offsetof(SMU73_Firmware_Header, DpmTable),
775 &table_start, 0x40000), 273 &table_start, 0x40000),
@@ -784,7 +282,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
784 vr_config_addr = table_start + 282 vr_config_addr = table_start +
785 offsetof(SMU73_Discrete_DpmTable, VRConfig); 283 offsetof(SMU73_Discrete_DpmTable, VRConfig);
786 284
787 PP_ASSERT_WITH_CODE(0 == fiji_copy_bytes_to_smc(smumgr, vr_config_addr, 285 PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_addr,
788 (uint8_t *)&vr_config, sizeof(int32_t), 0x40000), 286 (uint8_t *)&vr_config, sizeof(int32_t), 0x40000),
789 "[AVFS][Fiji_SetupGfxLvlStruct] Problems copying " 287 "[AVFS][Fiji_SetupGfxLvlStruct] Problems copying "
790 "vr_config value over to SMC", 288 "vr_config value over to SMC",
@@ -792,7 +290,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
792 290
793 level_addr = table_start + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel); 291 level_addr = table_start + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
794 292
795 PP_ASSERT_WITH_CODE(0 == fiji_copy_bytes_to_smc(smumgr, level_addr, 293 PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, level_addr,
796 (uint8_t *)(&avfs_graphics_level), level_size, 0x40000), 294 (uint8_t *)(&avfs_graphics_level), level_size, 0x40000),
797 "[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!", 295 "[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!",
798 return -1;); 296 return -1;);
@@ -839,13 +337,13 @@ int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
839 break; 337 break;
840 case AVFS_BTC_COMPLETED_RESTORED: /*S3 State - Post SMU Start*/ 338 case AVFS_BTC_COMPLETED_RESTORED: /*S3 State - Post SMU Start*/
841 priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR; 339 priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR;
842 PP_ASSERT_WITH_CODE(0 == fiji_send_msg_to_smc(smumgr, 340 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr,
843 PPSMC_MSG_VftTableIsValid), 341 0x666),
844 "[AVFS][fiji_avfs_event_mgr] SMU did not respond " 342 "[AVFS][fiji_avfs_event_mgr] SMU did not respond "
845 "correctly to VftTableIsValid Msg", 343 "correctly to VftTableIsValid Msg",
846 return -1;); 344 return -1;);
847 priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR; 345 priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR;
848 PP_ASSERT_WITH_CODE(0 == fiji_send_msg_to_smc(smumgr, 346 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr,
849 PPSMC_MSG_EnableAvfs), 347 PPSMC_MSG_EnableAvfs),
850 "[AVFS][fiji_avfs_event_mgr] SMU did not respond " 348 "[AVFS][fiji_avfs_event_mgr] SMU did not respond "
851 "correctly to EnableAvfs Message Msg", 349 "correctly to EnableAvfs Message Msg",
@@ -898,7 +396,7 @@ static int fiji_start_smu(struct pp_smumgr *smumgr)
898 struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); 396 struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
899 397
900 /* Only start SMC if SMC RAM is not running */ 398 /* Only start SMC if SMC RAM is not running */
901 if (!fiji_is_smc_ram_running(smumgr)) { 399 if (!smu7_is_smc_ram_running(smumgr)) {
902 fiji_avfs_event_mgr(smumgr, false); 400 fiji_avfs_event_mgr(smumgr, false);
903 401
904 /* Check if SMU is running in protected mode */ 402 /* Check if SMU is running in protected mode */
@@ -929,12 +427,12 @@ static int fiji_start_smu(struct pp_smumgr *smumgr)
929 /* Setup SoftRegsStart here for register lookup in case 427 /* Setup SoftRegsStart here for register lookup in case
930 * DummyBackEnd is used and ProcessFirmwareHeader is not executed 428 * DummyBackEnd is used and ProcessFirmwareHeader is not executed
931 */ 429 */
932 fiji_read_smc_sram_dword(smumgr, 430 smu7_read_smc_sram_dword(smumgr,
933 SMU7_FIRMWARE_HEADER_LOCATION + 431 SMU7_FIRMWARE_HEADER_LOCATION +
934 offsetof(SMU73_Firmware_Header, SoftRegisters), 432 offsetof(SMU73_Firmware_Header, SoftRegisters),
935 &(priv->soft_regs_start), 0x40000); 433 &(priv->smu7_data.soft_regs_start), 0x40000);
936 434
937 result = fiji_request_smu_load_fw(smumgr); 435 result = smu7_request_smu_load_fw(smumgr);
938 436
939 return result; 437 return result;
940} 438}
@@ -963,28 +461,10 @@ static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr)
963static int fiji_smu_init(struct pp_smumgr *smumgr) 461static int fiji_smu_init(struct pp_smumgr *smumgr)
964{ 462{
965 struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); 463 struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
966 uint64_t mc_addr; 464 int i;
967 465
968 priv->header_buffer.data_size = 466 if (smu7_init(smumgr))
969 ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; 467 return -EINVAL;
970 smu_allocate_memory(smumgr->device,
971 priv->header_buffer.data_size,
972 CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
973 PAGE_SIZE,
974 &mc_addr,
975 &priv->header_buffer.kaddr,
976 &priv->header_buffer.handle);
977
978 priv->header = priv->header_buffer.kaddr;
979 priv->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
980 priv->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
981
982 PP_ASSERT_WITH_CODE((NULL != priv->header),
983 "Out of memory.",
984 kfree(smumgr->backend);
985 cgs_free_gpu_mem(smumgr->device,
986 (cgs_handle_t)priv->header_buffer.handle);
987 return -1);
988 468
989 priv->avfs.AvfsBtcStatus = AVFS_BTC_BOOT; 469 priv->avfs.AvfsBtcStatus = AVFS_BTC_BOOT;
990 if (fiji_is_hw_avfs_present(smumgr)) 470 if (fiji_is_hw_avfs_present(smumgr))
@@ -999,37 +479,35 @@ static int fiji_smu_init(struct pp_smumgr *smumgr)
999 else 479 else
1000 priv->avfs.AvfsBtcStatus = AVFS_BTC_NOTSUPPORTED; 480 priv->avfs.AvfsBtcStatus = AVFS_BTC_NOTSUPPORTED;
1001 481
1002 priv->acpi_optimization = 1; 482 for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++)
483 priv->activity_target[i] = 30;
1003 484
1004 return 0; 485 return 0;
1005} 486}
1006 487
1007static int fiji_smu_fini(struct pp_smumgr *smumgr)
1008{
1009 struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
1010
1011 smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
1012
1013 if (smumgr->backend) {
1014 kfree(smumgr->backend);
1015 smumgr->backend = NULL;
1016 }
1017
1018 cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
1019 return 0;
1020}
1021 488
1022static const struct pp_smumgr_func fiji_smu_funcs = { 489static const struct pp_smumgr_func fiji_smu_funcs = {
1023 .smu_init = &fiji_smu_init, 490 .smu_init = &fiji_smu_init,
1024 .smu_fini = &fiji_smu_fini, 491 .smu_fini = &smu7_smu_fini,
1025 .start_smu = &fiji_start_smu, 492 .start_smu = &fiji_start_smu,
1026 .check_fw_load_finish = &fiji_check_fw_load_finish, 493 .check_fw_load_finish = &smu7_check_fw_load_finish,
1027 .request_smu_load_fw = &fiji_reload_firmware, 494 .request_smu_load_fw = &smu7_reload_firmware,
1028 .request_smu_load_specific_fw = &fiji_request_smu_specific_fw_load, 495 .request_smu_load_specific_fw = NULL,
1029 .send_msg_to_smc = &fiji_send_msg_to_smc, 496 .send_msg_to_smc = &smu7_send_msg_to_smc,
1030 .send_msg_to_smc_with_parameter = &fiji_send_msg_to_smc_with_parameter, 497 .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
1031 .download_pptable_settings = NULL, 498 .download_pptable_settings = NULL,
1032 .upload_pptable_settings = NULL, 499 .upload_pptable_settings = NULL,
500 .update_smc_table = fiji_update_smc_table,
501 .get_offsetof = fiji_get_offsetof,
502 .process_firmware_header = fiji_process_firmware_header,
503 .init_smc_table = fiji_init_smc_table,
504 .update_sclk_threshold = fiji_update_sclk_threshold,
505 .thermal_setup_fan_table = fiji_thermal_setup_fan_table,
506 .populate_all_graphic_levels = fiji_populate_all_graphic_levels,
507 .populate_all_memory_levels = fiji_populate_all_memory_levels,
508 .get_mac_definition = fiji_get_mac_definition,
509 .initialize_mc_reg_table = fiji_initialize_mc_reg_table,
510 .is_dpm_running = fiji_is_dpm_running,
1033}; 511};
1034 512
1035int fiji_smum_init(struct pp_smumgr *smumgr) 513int fiji_smum_init(struct pp_smumgr *smumgr)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
index b4eb483215b1..adcbdfb209be 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
@@ -23,37 +23,31 @@
23#ifndef _FIJI_SMUMANAGER_H_ 23#ifndef _FIJI_SMUMANAGER_H_
24#define _FIJI_SMUMANAGER_H_ 24#define _FIJI_SMUMANAGER_H_
25 25
26#include "smu73_discrete.h"
27#include <pp_endian.h>
28#include "smu7_smumgr.h"
29
30
26 31
27struct fiji_smu_avfs { 32struct fiji_smu_avfs {
28 enum AVFS_BTC_STATUS AvfsBtcStatus; 33 enum AVFS_BTC_STATUS AvfsBtcStatus;
29 uint32_t AvfsBtcParam; 34 uint32_t AvfsBtcParam;
30}; 35};
31 36
32struct fiji_buffer_entry {
33 uint32_t data_size;
34 uint32_t mc_addr_low;
35 uint32_t mc_addr_high;
36 void *kaddr;
37 unsigned long handle;
38};
39 37
40struct fiji_smumgr { 38struct fiji_smumgr {
41 uint8_t *header; 39 struct smu7_smumgr smu7_data;
42 uint8_t *mec_image; 40
43 uint32_t soft_regs_start;
44 struct fiji_smu_avfs avfs; 41 struct fiji_smu_avfs avfs;
45 uint32_t acpi_optimization; 42 struct SMU73_Discrete_DpmTable smc_state_table;
43 struct SMU73_Discrete_Ulv ulv_setting;
44 struct SMU73_Discrete_PmFuses power_tune_table;
45 const struct fiji_pt_defaults *power_tune_defaults;
46 uint32_t activity_target[SMU73_MAX_LEVELS_GRAPHICS];
46 47
47 struct fiji_buffer_entry header_buffer;
48}; 48};
49 49
50int fiji_smum_init(struct pp_smumgr *smumgr); 50
51int fiji_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress,
52 uint32_t *value, uint32_t limit);
53int fiji_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
54 uint32_t value, uint32_t limit);
55int fiji_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smcStartAddress,
56 const uint8_t *src, uint32_t byteCount, uint32_t limit);
57 51
58#endif 52#endif
59 53
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
new file mode 100644
index 000000000000..40f18685a7f4
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
@@ -0,0 +1,2576 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
9 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
10 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
11 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
12 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
13 * OTHER DEALINGS IN THE SOFTWARE.
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 *
22 */
23
24#include "iceland_smc.h"
25#include "smu7_dyn_defaults.h"
26
27#include "smu7_hwmgr.h"
28#include "hardwaremanager.h"
29#include "ppatomctrl.h"
30#include "pp_debug.h"
31#include "cgs_common.h"
32#include "atombios.h"
33#include "pppcielanes.h"
34#include "pp_endian.h"
35#include "smu7_ppsmc.h"
36
37#include "smu71_discrete.h"
38
39#include "smu/smu_7_1_1_d.h"
40#include "smu/smu_7_1_1_sh_mask.h"
41
42#include "gmc/gmc_8_1_d.h"
43#include "gmc/gmc_8_1_sh_mask.h"
44
45#include "bif/bif_5_0_d.h"
46#include "bif/bif_5_0_sh_mask.h"
47
48#include "dce/dce_10_0_d.h"
49#include "dce/dce_10_0_sh_mask.h"
50#include "processpptables.h"
51
52#include "iceland_smumgr.h"
53
54#define VOLTAGE_SCALE 4
55#define POWERTUNE_DEFAULT_SET_MAX 1
56#define VOLTAGE_VID_OFFSET_SCALE1 625
57#define VOLTAGE_VID_OFFSET_SCALE2 100
58#define MC_CG_ARB_FREQ_F1 0x0b
59#define VDDC_VDDCI_DELTA 200
60
61#define DEVICE_ID_VI_ICELAND_M_6900 0x6900
62#define DEVICE_ID_VI_ICELAND_M_6901 0x6901
63#define DEVICE_ID_VI_ICELAND_M_6902 0x6902
64#define DEVICE_ID_VI_ICELAND_M_6903 0x6903
65
66static struct iceland_pt_defaults defaults_iceland = {
67 /*
68 * sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,
69 * TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
70 */
71 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
72 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
73 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
74};
75
76/* 35W - XT, XTL */
77static struct iceland_pt_defaults defaults_icelandxt = {
78 /*
79 * sviLoadLIneEn, SviLoadLineVddC,
80 * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
81 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
82 * BAPM_TEMP_GRADIENT
83 */
84 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
85 { 0xA7, 0x0, 0x0, 0xB5, 0x0, 0x0, 0x9F, 0x0, 0x0, 0xD6, 0x0, 0x0, 0xD7, 0x0, 0x0},
86 { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
87};
88
89/* 25W - PRO, LE */
90static struct iceland_pt_defaults defaults_icelandpro = {
91 /*
92 * sviLoadLIneEn, SviLoadLineVddC,
93 * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
94 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
95 * BAPM_TEMP_GRADIENT
96 */
97 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
98 { 0xB7, 0x0, 0x0, 0xC3, 0x0, 0x0, 0xB5, 0x0, 0x0, 0xEA, 0x0, 0x0, 0xE6, 0x0, 0x0},
99 { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
100};
101
102static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
103{
104 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
105 struct cgs_system_info sys_info = {0};
106 uint32_t dev_id;
107
108 sys_info.size = sizeof(struct cgs_system_info);
109 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
110 cgs_query_system_info(hwmgr->device, &sys_info);
111 dev_id = (uint32_t)sys_info.value;
112
113 switch (dev_id) {
114 case DEVICE_ID_VI_ICELAND_M_6900:
115 case DEVICE_ID_VI_ICELAND_M_6903:
116 smu_data->power_tune_defaults = &defaults_icelandxt;
117 break;
118
119 case DEVICE_ID_VI_ICELAND_M_6901:
120 case DEVICE_ID_VI_ICELAND_M_6902:
121 smu_data->power_tune_defaults = &defaults_icelandpro;
122 break;
123 default:
124 smu_data->power_tune_defaults = &defaults_iceland;
125 pr_warning("Unknown V.I. Device ID.\n");
126 break;
127 }
128 return;
129}
130
131static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr)
132{
133 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
134 const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
135
136 smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
137 smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
138 smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
139 smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
140
141 return 0;
142}
143
144static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr)
145{
146 uint16_t tdc_limit;
147 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
148 const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
149
150 tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
151 smu_data->power_tune_table.TDC_VDDC_PkgLimit =
152 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
153 smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
154 defaults->tdc_vddc_throttle_release_limit_perc;
155 smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
156
157 return 0;
158}
159
160static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
161{
162 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
163 const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
164 uint32_t temp;
165
166 if (smu7_read_smc_sram_dword(hwmgr->smumgr,
167 fuse_table_offset +
168 offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl),
169 (uint32_t *)&temp, SMC_RAM_END))
170 PP_ASSERT_WITH_CODE(false,
171 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
172 return -EINVAL);
173 else
174 smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
175
176 return 0;
177}
178
179static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
180{
181 return 0;
182}
183
184static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
185{
186 int i;
187 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
188
189 /* Currently not used. Set all to zero. */
190 for (i = 0; i < 8; i++)
191 smu_data->power_tune_table.GnbLPML[i] = 0;
192
193 return 0;
194}
195
196static int iceland_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
197{
198 return 0;
199}
200
201static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
202{
203 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
204 uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
205 uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
206 struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
207
208 HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
209 LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
210
211 smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
212 CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
213 smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
214 CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
215
216 return 0;
217}
218
219static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
220{
221 int i;
222 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
223 uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
224 uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
225
226 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
227 "The CAC Leakage table does not exist!", return -EINVAL);
228 PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
229 "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
230 PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
231 "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
232
233 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
234 for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
235 lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
236 hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
237 }
238 } else {
239 PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL);
240 }
241
242 return 0;
243}
244
245static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr)
246{
247 int i;
248 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
249 uint8_t *vid = smu_data->power_tune_table.VddCVid;
250 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
251
252 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
253 "There should never be more than 8 entries for VddcVid!!!",
254 return -EINVAL);
255
256 for (i = 0; i < (int)data->vddc_voltage_table.count; i++) {
257 vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
258 }
259
260 return 0;
261}
262
263
264
265static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr)
266{
267 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
268 uint32_t pm_fuse_table_offset;
269
270 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
271 PHM_PlatformCaps_PowerContainment)) {
272 if (smu7_read_smc_sram_dword(hwmgr->smumgr,
273 SMU71_FIRMWARE_HEADER_LOCATION +
274 offsetof(SMU71_Firmware_Header, PmFuseTable),
275 &pm_fuse_table_offset, SMC_RAM_END))
276 PP_ASSERT_WITH_CODE(false,
277 "Attempt to get pm_fuse_table_offset Failed!",
278 return -EINVAL);
279
280 /* DW0 - DW3 */
281 if (iceland_populate_bapm_vddc_vid_sidd(hwmgr))
282 PP_ASSERT_WITH_CODE(false,
283 "Attempt to populate bapm vddc vid Failed!",
284 return -EINVAL);
285
286 /* DW4 - DW5 */
287 if (iceland_populate_vddc_vid(hwmgr))
288 PP_ASSERT_WITH_CODE(false,
289 "Attempt to populate vddc vid Failed!",
290 return -EINVAL);
291
292 /* DW6 */
293 if (iceland_populate_svi_load_line(hwmgr))
294 PP_ASSERT_WITH_CODE(false,
295 "Attempt to populate SviLoadLine Failed!",
296 return -EINVAL);
297 /* DW7 */
298 if (iceland_populate_tdc_limit(hwmgr))
299 PP_ASSERT_WITH_CODE(false,
300 "Attempt to populate TDCLimit Failed!", return -EINVAL);
301 /* DW8 */
302 if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset))
303 PP_ASSERT_WITH_CODE(false,
304 "Attempt to populate TdcWaterfallCtl, "
305 "LPMLTemperature Min and Max Failed!",
306 return -EINVAL);
307
308 /* DW9-DW12 */
309 if (0 != iceland_populate_temperature_scaler(hwmgr))
310 PP_ASSERT_WITH_CODE(false,
311 "Attempt to populate LPMLTemperatureScaler Failed!",
312 return -EINVAL);
313
314 /* DW13-DW16 */
315 if (iceland_populate_gnb_lpml(hwmgr))
316 PP_ASSERT_WITH_CODE(false,
317 "Attempt to populate GnbLPML Failed!",
318 return -EINVAL);
319
320 /* DW17 */
321 if (iceland_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
322 PP_ASSERT_WITH_CODE(false,
323 "Attempt to populate GnbLPML Min and Max Vid Failed!",
324 return -EINVAL);
325
326 /* DW18 */
327 if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr))
328 PP_ASSERT_WITH_CODE(false,
329 "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!",
330 return -EINVAL);
331
332 if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
333 (uint8_t *)&smu_data->power_tune_table,
334 sizeof(struct SMU71_Discrete_PmFuses), SMC_RAM_END))
335 PP_ASSERT_WITH_CODE(false,
336 "Attempt to download PmFuseTable Failed!",
337 return -EINVAL);
338 }
339 return 0;
340}
341
342static int iceland_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
343 struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
344 uint32_t clock, uint32_t *vol)
345{
346 uint32_t i = 0;
347
348 /* clock - voltage dependency table is empty table */
349 if (allowed_clock_voltage_table->count == 0)
350 return -EINVAL;
351
352 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
353 /* find first sclk bigger than request */
354 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
355 *vol = allowed_clock_voltage_table->entries[i].v;
356 return 0;
357 }
358 }
359
360 /* sclk is bigger than max sclk in the dependence table */
361 *vol = allowed_clock_voltage_table->entries[i - 1].v;
362
363 return 0;
364}
365
366static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
367 pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
368 uint16_t *lo)
369{
370 uint16_t v_index;
371 bool vol_found = false;
372 *hi = tab->value * VOLTAGE_SCALE;
373 *lo = tab->value * VOLTAGE_SCALE;
374
375 /* SCLK/VDDC Dependency Table has to exist. */
376 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
377 "The SCLK/VDDC Dependency Table does not exist.\n",
378 return -EINVAL);
379
380 if (NULL == hwmgr->dyn_state.cac_leakage_table) {
381 pr_warning("CAC Leakage Table does not exist, using vddc.\n");
382 return 0;
383 }
384
385 /*
386 * Since voltage in the sclk/vddc dependency table is not
387 * necessarily in ascending order because of ELB voltage
388 * patching, loop through entire list to find exact voltage.
389 */
390 for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
391 if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
392 vol_found = true;
393 if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
394 *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
395 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
396 } else {
397 pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
398 *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
399 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
400 }
401 break;
402 }
403 }
404
405 /*
406 * If voltage is not found in the first pass, loop again to
407 * find the best match, equal or higher value.
408 */
409 if (!vol_found) {
410 for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
411 if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
412 vol_found = true;
413 if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
414 *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
415 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
416 } else {
417 pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
418 *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
419 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
420 }
421 break;
422 }
423 }
424
425 if (!vol_found)
426 pr_warning("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
427 }
428
429 return 0;
430}
431
432static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
433 pp_atomctrl_voltage_table_entry *tab,
434 SMU71_Discrete_VoltageLevel *smc_voltage_tab)
435{
436 int result;
437
438 result = iceland_get_std_voltage_value_sidd(hwmgr, tab,
439 &smc_voltage_tab->StdVoltageHiSidd,
440 &smc_voltage_tab->StdVoltageLoSidd);
441 if (0 != result) {
442 smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
443 smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
444 }
445
446 smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
447 CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
448 CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
449
450 return 0;
451}
452
453static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
454 SMU71_Discrete_DpmTable *table)
455{
456 unsigned int count;
457 int result;
458 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
459
460 table->VddcLevelCount = data->vddc_voltage_table.count;
461 for (count = 0; count < table->VddcLevelCount; count++) {
462 result = iceland_populate_smc_voltage_table(hwmgr,
463 &(data->vddc_voltage_table.entries[count]),
464 &(table->VddcLevel[count]));
465 PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
466
467 /* GPIO voltage control */
468 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
469 table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
470 else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
471 table->VddcLevel[count].Smio = 0;
472 }
473
474 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
475
476 return 0;
477}
478
479static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
480 SMU71_Discrete_DpmTable *table)
481{
482 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
483 uint32_t count;
484 int result;
485
486 table->VddciLevelCount = data->vddci_voltage_table.count;
487
488 for (count = 0; count < table->VddciLevelCount; count++) {
489 result = iceland_populate_smc_voltage_table(hwmgr,
490 &(data->vddci_voltage_table.entries[count]),
491 &(table->VddciLevel[count]));
492 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
493 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
494 table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low;
495 else
496 table->VddciLevel[count].Smio |= 0;
497 }
498
499 CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
500
501 return 0;
502}
503
504static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
505 SMU71_Discrete_DpmTable *table)
506{
507 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
508 uint32_t count;
509 int result;
510
511 table->MvddLevelCount = data->mvdd_voltage_table.count;
512
513 for (count = 0; count < table->VddciLevelCount; count++) {
514 result = iceland_populate_smc_voltage_table(hwmgr,
515 &(data->mvdd_voltage_table.entries[count]),
516 &table->MvddLevel[count]);
517 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
518 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control)
519 table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low;
520 else
521 table->MvddLevel[count].Smio |= 0;
522 }
523
524 CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
525
526 return 0;
527}
528
529
530static int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
531 SMU71_Discrete_DpmTable *table)
532{
533 int result;
534
535 result = iceland_populate_smc_vddc_table(hwmgr, table);
536 PP_ASSERT_WITH_CODE(0 == result,
537 "can not populate VDDC voltage table to SMC", return -EINVAL);
538
539 result = iceland_populate_smc_vdd_ci_table(hwmgr, table);
540 PP_ASSERT_WITH_CODE(0 == result,
541 "can not populate VDDCI voltage table to SMC", return -EINVAL);
542
543 result = iceland_populate_smc_mvdd_table(hwmgr, table);
544 PP_ASSERT_WITH_CODE(0 == result,
545 "can not populate MVDD voltage table to SMC", return -EINVAL);
546
547 return 0;
548}
549
550static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr,
551 struct SMU71_Discrete_Ulv *state)
552{
553 uint32_t voltage_response_time, ulv_voltage;
554 int result;
555 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
556
557 state->CcPwrDynRm = 0;
558 state->CcPwrDynRm1 = 0;
559
560 result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
561 PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
562
563 if (ulv_voltage == 0) {
564 data->ulv_supported = false;
565 return 0;
566 }
567
568 if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
569 /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
570 if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
571 state->VddcOffset = 0;
572 else
573 /* used in SMIO Mode. not implemented for now. this is backup only for CI. */
574 state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
575 } else {
576 /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
577 if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
578 state->VddcOffsetVid = 0;
579 else /* used in SVI2 Mode */
580 state->VddcOffsetVid = (uint8_t)(
581 (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
582 * VOLTAGE_VID_OFFSET_SCALE2
583 / VOLTAGE_VID_OFFSET_SCALE1);
584 }
585 state->VddcPhase = 1;
586
587 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
588 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
589 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
590
591 return 0;
592}
593
594static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr,
595 SMU71_Discrete_Ulv *ulv_level)
596{
597 return iceland_populate_ulv_level(hwmgr, ulv_level);
598}
599
600static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table)
601{
602 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
603 struct smu7_dpm_table *dpm_table = &data->dpm_table;
604 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
605 uint32_t i;
606
607 /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
608 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
609 table->LinkLevel[i].PcieGenSpeed =
610 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
611 table->LinkLevel[i].PcieLaneCount =
612 (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
613 table->LinkLevel[i].EnabledForActivity =
614 1;
615 table->LinkLevel[i].SPC =
616 (uint8_t)(data->pcie_spc_cap & 0xff);
617 table->LinkLevel[i].DownThreshold =
618 PP_HOST_TO_SMC_UL(5);
619 table->LinkLevel[i].UpThreshold =
620 PP_HOST_TO_SMC_UL(30);
621 }
622
623 smu_data->smc_state_table.LinkLevelCount =
624 (uint8_t)dpm_table->pcie_speed_table.count;
625 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
626 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
627
628 return 0;
629}
630
631/**
632 * Calculates the SCLK dividers using the provided engine clock
633 *
634 * @param hwmgr the address of the hardware manager
635 * @param engine_clock the engine clock to use to populate the structure
636 * @param sclk the SMC SCLK structure to be populated
637 */
638static int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr,
639 uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk)
640{
641 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
642 pp_atomctrl_clock_dividers_vi dividers;
643 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
644 uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
645 uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
646 uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
647 uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
648 uint32_t reference_clock;
649 uint32_t reference_divider;
650 uint32_t fbdiv;
651 int result;
652
653 /* get the engine clock dividers for this clock value*/
654 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
655
656 PP_ASSERT_WITH_CODE(result == 0,
657 "Error retrieving Engine Clock dividers from VBIOS.", return result);
658
659 /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
660 reference_clock = atomctrl_get_reference_clock(hwmgr);
661
662 reference_divider = 1 + dividers.uc_pll_ref_div;
663
664 /* low 14 bits is fraction and high 12 bits is divider*/
665 fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
666
667 /* SPLL_FUNC_CNTL setup*/
668 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
669 CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
670 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
671 CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
672
673 /* SPLL_FUNC_CNTL_3 setup*/
674 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
675 CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
676
677 /* set to use fractional accumulation*/
678 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
679 CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
680
681 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
682 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
683 pp_atomctrl_internal_ss_info ss_info;
684
685 uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
686 if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
687 /*
688 * ss_info.speed_spectrum_percentage -- in unit of 0.01%
689 * ss_info.speed_spectrum_rate -- in unit of khz
690 */
691 /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
692 uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
693
694 /* clkv = 2 * D * fbdiv / NS */
695 uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
696
697 cg_spll_spread_spectrum =
698 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
699 cg_spll_spread_spectrum =
700 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
701 cg_spll_spread_spectrum_2 =
702 PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
703 }
704 }
705
706 sclk->SclkFrequency = engine_clock;
707 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
708 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
709 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
710 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
711 sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
712
713 return 0;
714}
715
716static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
717 const struct phm_phase_shedding_limits_table *pl,
718 uint32_t sclk, uint32_t *p_shed)
719{
720 unsigned int i;
721
722 /* use the minimum phase shedding */
723 *p_shed = 1;
724
725 for (i = 0; i < pl->count; i++) {
726 if (sclk < pl->entries[i].Sclk) {
727 *p_shed = i;
728 break;
729 }
730 }
731 return 0;
732}
733
734/**
735 * Populates single SMC SCLK structure using the provided engine clock
736 *
737 * @param hwmgr the address of the hardware manager
738 * @param engine_clock the engine clock to use to populate the structure
739 * @param sclk the SMC SCLK structure to be populated
740 */
741static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
742 uint32_t engine_clock,
743 uint16_t sclk_activity_level_threshold,
744 SMU71_Discrete_GraphicsLevel *graphic_level)
745{
746 int result;
747 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
748
749 result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
750
751 /* populate graphics levels*/
752 result = iceland_get_dependecy_volt_by_clk(hwmgr,
753 hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock,
754 &graphic_level->MinVddc);
755 PP_ASSERT_WITH_CODE((0 == result),
756 "can not find VDDC voltage value for VDDC \
757 engine clock dependency table", return result);
758
759 /* SCLK frequency in units of 10KHz*/
760 graphic_level->SclkFrequency = engine_clock;
761 graphic_level->MinVddcPhases = 1;
762
763 if (data->vddc_phase_shed_control)
764 iceland_populate_phase_value_based_on_sclk(hwmgr,
765 hwmgr->dyn_state.vddc_phase_shed_limits_table,
766 engine_clock,
767 &graphic_level->MinVddcPhases);
768
769 /* Indicates maximum activity level for this performance level. 50% for now*/
770 graphic_level->ActivityLevel = sclk_activity_level_threshold;
771
772 graphic_level->CcPwrDynRm = 0;
773 graphic_level->CcPwrDynRm1 = 0;
774 /* this level can be used if activity is high enough.*/
775 graphic_level->EnabledForActivity = 0;
776 /* this level can be used for throttling.*/
777 graphic_level->EnabledForThrottle = 1;
778 graphic_level->UpHyst = 0;
779 graphic_level->DownHyst = 100;
780 graphic_level->VoltageDownHyst = 0;
781 graphic_level->PowerThrottle = 0;
782
783 data->display_timing.min_clock_in_sr =
784 hwmgr->display_config.min_core_set_clock_in_sr;
785
786 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
787 PHM_PlatformCaps_SclkDeepSleep))
788 graphic_level->DeepSleepDivId =
789 smu7_get_sleep_divider_id_from_clock(engine_clock,
790 data->display_timing.min_clock_in_sr);
791
792 /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
793 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
794
795 if (0 == result) {
796 graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE);
797 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);
798 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
799 CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
800 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
801 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
802 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
803 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
804 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
805 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
806 }
807
808 return result;
809}
810
811/**
812 * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
813 *
814 * @param hwmgr the address of the hardware manager
815 */
816int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
817{
818 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
819 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
820 struct smu7_dpm_table *dpm_table = &data->dpm_table;
821 uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start +
822 offsetof(SMU71_Discrete_DpmTable, GraphicsLevel);
823
824 uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) *
825 SMU71_MAX_LEVELS_GRAPHICS;
826
827 SMU71_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
828
829 uint32_t i;
830 uint8_t highest_pcie_level_enabled = 0;
831 uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
832 uint8_t count = 0;
833 int result = 0;
834
835 memset(levels, 0x00, level_array_size);
836
837 for (i = 0; i < dpm_table->sclk_table.count; i++) {
838 result = iceland_populate_single_graphic_level(hwmgr,
839 dpm_table->sclk_table.dpm_levels[i].value,
840 (uint16_t)smu_data->activity_target[i],
841 &(smu_data->smc_state_table.GraphicsLevel[i]));
842 if (result != 0)
843 return result;
844
845 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
846 if (i > 1)
847 smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
848 }
849
850 /* Only enable level 0 for now. */
851 smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
852
853 /* set highest level watermark to high */
854 if (dpm_table->sclk_table.count > 1)
855 smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
856 PPSMC_DISPLAY_WATERMARK_HIGH;
857
858 smu_data->smc_state_table.GraphicsDpmLevelCount =
859 (uint8_t)dpm_table->sclk_table.count;
860 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
861 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
862
863 while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
864 (1 << (highest_pcie_level_enabled + 1))) != 0) {
865 highest_pcie_level_enabled++;
866 }
867
868 while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
869 (1 << lowest_pcie_level_enabled)) == 0) {
870 lowest_pcie_level_enabled++;
871 }
872
873 while ((count < highest_pcie_level_enabled) &&
874 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
875 (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) {
876 count++;
877 }
878
879 mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
880 (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
881
882
883 /* set pcieDpmLevel to highest_pcie_level_enabled*/
884 for (i = 2; i < dpm_table->sclk_table.count; i++) {
885 smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
886 }
887
888 /* set pcieDpmLevel to lowest_pcie_level_enabled*/
889 smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
890
891 /* set pcieDpmLevel to mid_pcie_level_enabled*/
892 smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
893
894 /* level count will send to smc once at init smc table and never change*/
895 result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress,
896 (uint8_t *)levels, (uint32_t)level_array_size,
897 SMC_RAM_END);
898
899 return result;
900}
901
902/**
903 * Populates the SMC MCLK structure using the provided memory clock
904 *
905 * @param hwmgr the address of the hardware manager
906 * @param memory_clock the memory clock to use to populate the structure
907 * @param sclk the SMC SCLK structure to be populated
908 */
909static int iceland_calculate_mclk_params(
910 struct pp_hwmgr *hwmgr,
911 uint32_t memory_clock,
912 SMU71_Discrete_MemoryLevel *mclk,
913 bool strobe_mode,
914 bool dllStateOn
915 )
916{
917 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
918
919 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
920 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
921 uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
922 uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
923 uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
924 uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
925 uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
926 uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
927 uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
928
929 pp_atomctrl_memory_clock_param mpll_param;
930 int result;
931
932 result = atomctrl_get_memory_pll_dividers_si(hwmgr,
933 memory_clock, &mpll_param, strobe_mode);
934 PP_ASSERT_WITH_CODE(0 == result,
935 "Error retrieving Memory Clock Parameters from VBIOS.", return result);
936
937 /* MPLL_FUNC_CNTL setup*/
938 mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
939
940 /* MPLL_FUNC_CNTL_1 setup*/
941 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
942 MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
943 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
944 MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
945 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
946 MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
947
948 /* MPLL_AD_FUNC_CNTL setup*/
949 mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
950 MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
951
952 if (data->is_memory_gddr5) {
953 /* MPLL_DQ_FUNC_CNTL setup*/
954 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
955 MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
956 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
957 MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
958 }
959
960 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
961 PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
962 /*
963 ************************************
964 Fref = Reference Frequency
965 NF = Feedback divider ratio
966 NR = Reference divider ratio
967 Fnom = Nominal VCO output frequency = Fref * NF / NR
968 Fs = Spreading Rate
969 D = Percentage down-spread / 2
970 Fint = Reference input frequency to PFD = Fref / NR
971 NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
972 CLKS = NS - 1 = ISS_STEP_NUM[11:0]
973 NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
974 CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
975 *************************************
976 */
977 pp_atomctrl_internal_ss_info ss_info;
978 uint32_t freq_nom;
979 uint32_t tmp;
980 uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
981
982 /* for GDDR5 for all modes and DDR3 */
983 if (1 == mpll_param.qdr)
984 freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
985 else
986 freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
987
988 /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
989 tmp = (freq_nom / reference_clock);
990 tmp = tmp * tmp;
991
992 if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
993 /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
994 /* ss.Info.speed_spectrum_rate -- in unit of khz */
995 /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
996 /* = reference_clock * 5 / speed_spectrum_rate */
997 uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
998
999 /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
1000 /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
1001 uint32_t clkv =
1002 (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
1003 ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
1004
1005 mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
1006 mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
1007 }
1008 }
1009
1010 /* MCLK_PWRMGT_CNTL setup */
1011 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1012 MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
1013 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1014 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
1015 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1016 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
1017
1018
1019 /* Save the result data to outpupt memory level structure */
1020 mclk->MclkFrequency = memory_clock;
1021 mclk->MpllFuncCntl = mpll_func_cntl;
1022 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
1023 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
1024 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
1025 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
1026 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
1027 mclk->DllCntl = dll_cntl;
1028 mclk->MpllSs1 = mpll_ss1;
1029 mclk->MpllSs2 = mpll_ss2;
1030
1031 return 0;
1032}
1033
1034static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock,
1035 bool strobe_mode)
1036{
1037 uint8_t mc_para_index;
1038
1039 if (strobe_mode) {
1040 if (memory_clock < 12500) {
1041 mc_para_index = 0x00;
1042 } else if (memory_clock > 47500) {
1043 mc_para_index = 0x0f;
1044 } else {
1045 mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
1046 }
1047 } else {
1048 if (memory_clock < 65000) {
1049 mc_para_index = 0x00;
1050 } else if (memory_clock > 135000) {
1051 mc_para_index = 0x0f;
1052 } else {
1053 mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
1054 }
1055 }
1056
1057 return mc_para_index;
1058}
1059
1060static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
1061{
1062 uint8_t mc_para_index;
1063
1064 if (memory_clock < 10000) {
1065 mc_para_index = 0;
1066 } else if (memory_clock >= 80000) {
1067 mc_para_index = 0x0f;
1068 } else {
1069 mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
1070 }
1071
1072 return mc_para_index;
1073}
1074
1075static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
1076 uint32_t memory_clock, uint32_t *p_shed)
1077{
1078 unsigned int i;
1079
1080 *p_shed = 1;
1081
1082 for (i = 0; i < pl->count; i++) {
1083 if (memory_clock < pl->entries[i].Mclk) {
1084 *p_shed = i;
1085 break;
1086 }
1087 }
1088
1089 return 0;
1090}
1091
1092static int iceland_populate_single_memory_level(
1093 struct pp_hwmgr *hwmgr,
1094 uint32_t memory_clock,
1095 SMU71_Discrete_MemoryLevel *memory_level
1096 )
1097{
1098 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1099 int result = 0;
1100 bool dll_state_on;
1101 struct cgs_display_info info = {0};
1102 uint32_t mclk_edc_wr_enable_threshold = 40000;
1103 uint32_t mclk_edc_enable_threshold = 40000;
1104 uint32_t mclk_strobe_mode_threshold = 40000;
1105
1106 if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
1107 result = iceland_get_dependecy_volt_by_clk(hwmgr,
1108 hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
1109 PP_ASSERT_WITH_CODE((0 == result),
1110 "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
1111 }
1112
1113 if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) {
1114 memory_level->MinVddci = memory_level->MinVddc;
1115 } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
1116 result = iceland_get_dependecy_volt_by_clk(hwmgr,
1117 hwmgr->dyn_state.vddci_dependency_on_mclk,
1118 memory_clock,
1119 &memory_level->MinVddci);
1120 PP_ASSERT_WITH_CODE((0 == result),
1121 "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
1122 }
1123
1124 memory_level->MinVddcPhases = 1;
1125
1126 if (data->vddc_phase_shed_control) {
1127 iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
1128 memory_clock, &memory_level->MinVddcPhases);
1129 }
1130
1131 memory_level->EnabledForThrottle = 1;
1132 memory_level->EnabledForActivity = 0;
1133 memory_level->UpHyst = 0;
1134 memory_level->DownHyst = 100;
1135 memory_level->VoltageDownHyst = 0;
1136
1137 /* Indicates maximum activity level for this performance level.*/
1138 memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
1139 memory_level->StutterEnable = 0;
1140 memory_level->StrobeEnable = 0;
1141 memory_level->EdcReadEnable = 0;
1142 memory_level->EdcWriteEnable = 0;
1143 memory_level->RttEnable = 0;
1144
1145 /* default set to low watermark. Highest level will be set to high later.*/
1146 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1147
1148 cgs_get_active_displays_info(hwmgr->device, &info);
1149 data->display_timing.num_existing_displays = info.display_count;
1150
1151 /* stutter mode not support on iceland */
1152
1153 /* decide strobe mode*/
1154 memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
1155 (memory_clock <= mclk_strobe_mode_threshold);
1156
1157 /* decide EDC mode and memory clock ratio*/
1158 if (data->is_memory_gddr5) {
1159 memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock,
1160 memory_level->StrobeEnable);
1161
1162 if ((mclk_edc_enable_threshold != 0) &&
1163 (memory_clock > mclk_edc_enable_threshold)) {
1164 memory_level->EdcReadEnable = 1;
1165 }
1166
1167 if ((mclk_edc_wr_enable_threshold != 0) &&
1168 (memory_clock > mclk_edc_wr_enable_threshold)) {
1169 memory_level->EdcWriteEnable = 1;
1170 }
1171
1172 if (memory_level->StrobeEnable) {
1173 if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >=
1174 ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
1175 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1176 else
1177 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
1178 } else
1179 dll_state_on = data->dll_default_on;
1180 } else {
1181 memory_level->StrobeRatio =
1182 iceland_get_ddr3_mclk_frequency_ratio(memory_clock);
1183 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1184 }
1185
1186 result = iceland_calculate_mclk_params(hwmgr,
1187 memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
1188
1189 if (0 == result) {
1190 memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
1191 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
1192 memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
1193 memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
1194 /* MCLK frequency in units of 10KHz*/
1195 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
1196 /* Indicates maximum activity level for this performance level.*/
1197 CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
1198 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
1199 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
1200 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
1201 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
1202 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
1203 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
1204 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
1205 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
1206 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
1207 }
1208
1209 return result;
1210}
1211
1212/**
1213 * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
1214 *
1215 * @param hwmgr the address of the hardware manager
1216 */
1217
1218int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1219{
1220 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1221 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
1222 struct smu7_dpm_table *dpm_table = &data->dpm_table;
1223 int result;
1224
1225 /* populate MCLK dpm table to SMU7 */
1226 uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel);
1227 uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY;
1228 SMU71_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
1229 uint32_t i;
1230
1231 memset(levels, 0x00, level_array_size);
1232
1233 for (i = 0; i < dpm_table->mclk_table.count; i++) {
1234 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
1235 "can not populate memory level as memory clock is zero", return -EINVAL);
1236 result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
1237 &(smu_data->smc_state_table.MemoryLevel[i]));
1238 if (0 != result) {
1239 return result;
1240 }
1241 }
1242
1243 /* Only enable level 0 for now.*/
1244 smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
1245
1246 /*
1247 * in order to prevent MC activity from stutter mode to push DPM up.
1248 * the UVD change complements this by putting the MCLK in a higher state
1249 * by default such that we are not effected by up threshold or and MCLK DPM latency.
1250 */
1251 smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
1252 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
1253
1254 smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
1255 data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1256 /* set highest level watermark to high*/
1257 smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
1258
1259 /* level count will send to smc once at init smc table and never change*/
1260 result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
1261 level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size,
1262 SMC_RAM_END);
1263
1264 return result;
1265}
1266
1267static int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
1268 SMU71_Discrete_VoltageLevel *voltage)
1269{
1270 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1271
1272 uint32_t i = 0;
1273
1274 if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
1275 /* find mvdd value which clock is more than request */
1276 for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
1277 if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
1278 /* Always round to higher voltage. */
1279 voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
1280 break;
1281 }
1282 }
1283
1284 PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
1285 "MVDD Voltage is outside the supported range.", return -EINVAL);
1286
1287 } else {
1288 return -EINVAL;
1289 }
1290
1291 return 0;
1292}
1293
1294static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1295 SMU71_Discrete_DpmTable *table)
1296{
1297 int result = 0;
1298 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1299 struct pp_atomctrl_clock_dividers_vi dividers;
1300 uint32_t vddc_phase_shed_control = 0;
1301
1302 SMU71_Discrete_VoltageLevel voltage_level;
1303 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
1304 uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
1305 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1306 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1307
1308
1309 /* The ACPI state should not do DPM on DC (or ever).*/
1310 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1311
1312 if (data->acpi_vddc)
1313 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
1314 else
1315 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
1316
1317 table->ACPILevel.MinVddcPhases = vddc_phase_shed_control ? 0 : 1;
1318 /* assign zero for now*/
1319 table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
1320
1321 /* get the engine clock dividers for this clock value*/
1322 result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
1323 table->ACPILevel.SclkFrequency, &dividers);
1324
1325 PP_ASSERT_WITH_CODE(result == 0,
1326 "Error retrieving Engine Clock dividers from VBIOS.", return result);
1327
1328 /* divider ID for required SCLK*/
1329 table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
1330 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1331 table->ACPILevel.DeepSleepDivId = 0;
1332
1333 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
1334 CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0);
1335 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
1336 CG_SPLL_FUNC_CNTL, SPLL_RESET, 1);
1337 spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2,
1338 CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4);
1339
1340 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
1341 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
1342 table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
1343 table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
1344 table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
1345 table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
1346 table->ACPILevel.CcPwrDynRm = 0;
1347 table->ACPILevel.CcPwrDynRm1 = 0;
1348
1349
1350 /* For various features to be enabled/disabled while this level is active.*/
1351 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1352 /* SCLK frequency in units of 10KHz*/
1353 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
1354 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
1355 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
1356 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
1357 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
1358 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
1359 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
1360 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1361 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1362
1363 /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
1364 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
1365 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
1366
1367 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
1368 table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
1369 else {
1370 if (data->acpi_vddci != 0)
1371 table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
1372 else
1373 table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
1374 }
1375
1376 if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level))
1377 table->MemoryACPILevel.MinMvdd =
1378 PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
1379 else
1380 table->MemoryACPILevel.MinMvdd = 0;
1381
1382 /* Force reset on DLL*/
1383 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1384 MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
1385 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1386 MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
1387
1388 /* Disable DLL in ACPIState*/
1389 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1390 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
1391 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1392 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
1393
1394 /* Enable DLL bypass signal*/
1395 dll_cntl = PHM_SET_FIELD(dll_cntl,
1396 DLL_CNTL, MRDCK0_BYPASS, 0);
1397 dll_cntl = PHM_SET_FIELD(dll_cntl,
1398 DLL_CNTL, MRDCK1_BYPASS, 0);
1399
1400 table->MemoryACPILevel.DllCntl =
1401 PP_HOST_TO_SMC_UL(dll_cntl);
1402 table->MemoryACPILevel.MclkPwrmgtCntl =
1403 PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
1404 table->MemoryACPILevel.MpllAdFuncCntl =
1405 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
1406 table->MemoryACPILevel.MpllDqFuncCntl =
1407 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
1408 table->MemoryACPILevel.MpllFuncCntl =
1409 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
1410 table->MemoryACPILevel.MpllFuncCntl_1 =
1411 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
1412 table->MemoryACPILevel.MpllFuncCntl_2 =
1413 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
1414 table->MemoryACPILevel.MpllSs1 =
1415 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
1416 table->MemoryACPILevel.MpllSs2 =
1417 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
1418
1419 table->MemoryACPILevel.EnabledForThrottle = 0;
1420 table->MemoryACPILevel.EnabledForActivity = 0;
1421 table->MemoryACPILevel.UpHyst = 0;
1422 table->MemoryACPILevel.DownHyst = 100;
1423 table->MemoryACPILevel.VoltageDownHyst = 0;
1424 /* Indicates maximum activity level for this performance level.*/
1425 table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
1426
1427 table->MemoryACPILevel.StutterEnable = 0;
1428 table->MemoryACPILevel.StrobeEnable = 0;
1429 table->MemoryACPILevel.EdcReadEnable = 0;
1430 table->MemoryACPILevel.EdcWriteEnable = 0;
1431 table->MemoryACPILevel.RttEnable = 0;
1432
1433 return result;
1434}
1435
1436static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1437 SMU71_Discrete_DpmTable *table)
1438{
1439 return 0;
1440}
1441
1442static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1443 SMU71_Discrete_DpmTable *table)
1444{
1445 return 0;
1446}
1447
1448static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
1449 SMU71_Discrete_DpmTable *table)
1450{
1451 return 0;
1452}
1453
1454static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1455 SMU71_Discrete_DpmTable *table)
1456{
1457 return 0;
1458}
1459
1460static int iceland_populate_memory_timing_parameters(
1461 struct pp_hwmgr *hwmgr,
1462 uint32_t engine_clock,
1463 uint32_t memory_clock,
1464 struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs
1465 )
1466{
1467 uint32_t dramTiming;
1468 uint32_t dramTiming2;
1469 uint32_t burstTime;
1470 int result;
1471
1472 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1473 engine_clock, memory_clock);
1474
1475 PP_ASSERT_WITH_CODE(result == 0,
1476 "Error calling VBIOS to set DRAM_TIMING.", return result);
1477
1478 dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1479 dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1480 burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1481
1482 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
1483 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
1484 arb_regs->McArbBurstTime = (uint8_t)burstTime;
1485
1486 return 0;
1487}
1488
1489/**
1490 * Setup parameters for the MC ARB.
1491 *
1492 * @param hwmgr the address of the powerplay hardware manager.
1493 * @return always 0
1494 * This function is to be called from the SetPowerState table.
1495 */
1496static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1497{
1498 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1499 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
1500 int result = 0;
1501 SMU71_Discrete_MCArbDramTimingTable arb_regs;
1502 uint32_t i, j;
1503
1504 memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable));
1505
1506 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1507 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1508 result = iceland_populate_memory_timing_parameters
1509 (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
1510 data->dpm_table.mclk_table.dpm_levels[j].value,
1511 &arb_regs.entries[i][j]);
1512
1513 if (0 != result) {
1514 break;
1515 }
1516 }
1517 }
1518
1519 if (0 == result) {
1520 result = smu7_copy_bytes_to_smc(
1521 hwmgr->smumgr,
1522 smu_data->smu7_data.arb_table_start,
1523 (uint8_t *)&arb_regs,
1524 sizeof(SMU71_Discrete_MCArbDramTimingTable),
1525 SMC_RAM_END
1526 );
1527 }
1528
1529 return result;
1530}
1531
1532static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1533 SMU71_Discrete_DpmTable *table)
1534{
1535 int result = 0;
1536 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1537 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
1538 table->GraphicsBootLevel = 0;
1539 table->MemoryBootLevel = 0;
1540
1541 /* find boot level from dpm table*/
1542 result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1543 data->vbios_boot_state.sclk_bootup_value,
1544 (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
1545
1546 if (0 != result) {
1547 smu_data->smc_state_table.GraphicsBootLevel = 0;
1548 printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
1549 in dependency table. Using Graphics DPM level 0!");
1550 result = 0;
1551 }
1552
1553 result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1554 data->vbios_boot_state.mclk_bootup_value,
1555 (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
1556
1557 if (0 != result) {
1558 smu_data->smc_state_table.MemoryBootLevel = 0;
1559 printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
1560 in dependency table. Using Memory DPM level 0!");
1561 result = 0;
1562 }
1563
1564 table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
1565 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
1566 table->BootVddci = table->BootVddc;
1567 else
1568 table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
1569
1570 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
1571
1572 return result;
1573}
1574
1575static int iceland_populate_mc_reg_address(struct pp_smumgr *smumgr,
1576 SMU71_Discrete_MCRegisters *mc_reg_table)
1577{
1578 const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)smumgr->backend;
1579
1580 uint32_t i, j;
1581
1582 for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
1583 if (smu_data->mc_reg_table.validflag & 1<<j) {
1584 PP_ASSERT_WITH_CODE(i < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE,
1585 "Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
1586 mc_reg_table->address[i].s0 =
1587 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
1588 mc_reg_table->address[i].s1 =
1589 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
1590 i++;
1591 }
1592 }
1593
1594 mc_reg_table->last = (uint8_t)i;
1595
1596 return 0;
1597}
1598
1599/*convert register values from driver to SMC format */
1600static void iceland_convert_mc_registers(
1601 const struct iceland_mc_reg_entry *entry,
1602 SMU71_Discrete_MCRegisterSet *data,
1603 uint32_t num_entries, uint32_t valid_flag)
1604{
1605 uint32_t i, j;
1606
1607 for (i = 0, j = 0; j < num_entries; j++) {
1608 if (valid_flag & 1<<j) {
1609 data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
1610 i++;
1611 }
1612 }
1613}
1614
1615static int iceland_convert_mc_reg_table_entry_to_smc(
1616 struct pp_smumgr *smumgr,
1617 const uint32_t memory_clock,
1618 SMU71_Discrete_MCRegisterSet *mc_reg_table_data
1619 )
1620{
1621 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
1622 uint32_t i = 0;
1623
1624 for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
1625 if (memory_clock <=
1626 smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
1627 break;
1628 }
1629 }
1630
1631 if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
1632 --i;
1633
1634 iceland_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
1635 mc_reg_table_data, smu_data->mc_reg_table.last,
1636 smu_data->mc_reg_table.validflag);
1637
1638 return 0;
1639}
1640
1641static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
1642 SMU71_Discrete_MCRegisters *mc_regs)
1643{
1644 int result = 0;
1645 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1646 int res;
1647 uint32_t i;
1648
1649 for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
1650 res = iceland_convert_mc_reg_table_entry_to_smc(
1651 hwmgr->smumgr,
1652 data->dpm_table.mclk_table.dpm_levels[i].value,
1653 &mc_regs->data[i]
1654 );
1655
1656 if (0 != res)
1657 result = res;
1658 }
1659
1660 return result;
1661}
1662
1663static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
1664{
1665 struct pp_smumgr *smumgr = hwmgr->smumgr;
1666 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
1667 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1668 uint32_t address;
1669 int32_t result;
1670
1671 if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
1672 return 0;
1673
1674
1675 memset(&smu_data->mc_regs, 0, sizeof(SMU71_Discrete_MCRegisters));
1676
1677 result = iceland_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
1678
1679 if (result != 0)
1680 return result;
1681
1682
1683 address = smu_data->smu7_data.mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]);
1684
1685 return smu7_copy_bytes_to_smc(hwmgr->smumgr, address,
1686 (uint8_t *)&smu_data->mc_regs.data[0],
1687 sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
1688 SMC_RAM_END);
1689}
1690
1691static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
1692{
1693 int result;
1694 struct pp_smumgr *smumgr = hwmgr->smumgr;
1695 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
1696
1697 memset(&smu_data->mc_regs, 0x00, sizeof(SMU71_Discrete_MCRegisters));
1698 result = iceland_populate_mc_reg_address(smumgr, &(smu_data->mc_regs));
1699 PP_ASSERT_WITH_CODE(0 == result,
1700 "Failed to initialize MCRegTable for the MC register addresses!", return result;);
1701
1702 result = iceland_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
1703 PP_ASSERT_WITH_CODE(0 == result,
1704 "Failed to initialize MCRegTable for driver state!", return result;);
1705
1706 return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start,
1707 (uint8_t *)&smu_data->mc_regs, sizeof(SMU71_Discrete_MCRegisters), SMC_RAM_END);
1708}
1709
1710static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
1711{
1712 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1713 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
1714 uint8_t count, level;
1715
1716 count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
1717
1718 for (level = 0; level < count; level++) {
1719 if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
1720 >= data->vbios_boot_state.sclk_bootup_value) {
1721 smu_data->smc_state_table.GraphicsBootLevel = level;
1722 break;
1723 }
1724 }
1725
1726 count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
1727
1728 for (level = 0; level < count; level++) {
1729 if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
1730 >= data->vbios_boot_state.mclk_bootup_value) {
1731 smu_data->smc_state_table.MemoryBootLevel = level;
1732 break;
1733 }
1734 }
1735
1736 return 0;
1737}
1738
1739static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
1740{
1741 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1742 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
1743 struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
1744 SMU71_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
1745 struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
1746 struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
1747 uint16_t *def1, *def2;
1748 int i, j, k;
1749
1750
1751 /*
1752 * TDP number of fraction bits are changed from 8 to 7 for Iceland
1753 * as requested by SMC team
1754 */
1755
1756 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
1757 dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
1758
1759
1760 dpm_table->DTETjOffset = 0;
1761
1762 dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
1763 dpm_table->GpuTjHyst = 8;
1764
1765 dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
1766
1767 /* The following are for new Iceland Multi-input fan/thermal control */
1768 if (NULL != ppm) {
1769 dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
1770 dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
1771 } else {
1772 dpm_table->PPM_PkgPwrLimit = 0;
1773 dpm_table->PPM_TemperatureLimit = 0;
1774 }
1775
1776 CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
1777 CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
1778
1779 dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
1780 def1 = defaults->bapmti_r;
1781 def2 = defaults->bapmti_rc;
1782
1783 for (i = 0; i < SMU71_DTE_ITERATIONS; i++) {
1784 for (j = 0; j < SMU71_DTE_SOURCES; j++) {
1785 for (k = 0; k < SMU71_DTE_SINKS; k++) {
1786 dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
1787 dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
1788 def1++;
1789 def2++;
1790 }
1791 }
1792 }
1793
1794 return 0;
1795}
1796
1797static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
1798 SMU71_Discrete_DpmTable *tab)
1799{
1800 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1801
1802 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
1803 tab->SVI2Enable |= VDDC_ON_SVI2;
1804
1805 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1806 tab->SVI2Enable |= VDDCI_ON_SVI2;
1807 else
1808 tab->MergedVddci = 1;
1809
1810 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control)
1811 tab->SVI2Enable |= MVDD_ON_SVI2;
1812
1813 PP_ASSERT_WITH_CODE(tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) &&
1814 (tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL);
1815
1816 return 0;
1817}
1818
1819/**
1820 * Initializes the SMC table and uploads it
1821 *
1822 * @param hwmgr the address of the powerplay hardware manager.
1823 * @param pInput the pointer to input data (PowerState)
1824 * @return always 0
1825 */
1826int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
1827{
1828 int result;
1829 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1830 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
1831 SMU71_Discrete_DpmTable *table = &(smu_data->smc_state_table);
1832
1833
1834 iceland_initialize_power_tune_defaults(hwmgr);
1835 memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
1836
1837 if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) {
1838 iceland_populate_smc_voltage_tables(hwmgr, table);
1839 }
1840
1841 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1842 PHM_PlatformCaps_AutomaticDCTransition))
1843 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1844
1845
1846 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1847 PHM_PlatformCaps_StepVddc))
1848 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1849
1850 if (data->is_memory_gddr5)
1851 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1852
1853
1854 if (data->ulv_supported) {
1855 result = iceland_populate_ulv_state(hwmgr, &(smu_data->ulv_setting));
1856 PP_ASSERT_WITH_CODE(0 == result,
1857 "Failed to initialize ULV state!", return result;);
1858
1859 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1860 ixCG_ULV_PARAMETER, 0x40035);
1861 }
1862
1863 result = iceland_populate_smc_link_level(hwmgr, table);
1864 PP_ASSERT_WITH_CODE(0 == result,
1865 "Failed to initialize Link Level!", return result;);
1866
1867 result = iceland_populate_all_graphic_levels(hwmgr);
1868 PP_ASSERT_WITH_CODE(0 == result,
1869 "Failed to initialize Graphics Level!", return result;);
1870
1871 result = iceland_populate_all_memory_levels(hwmgr);
1872 PP_ASSERT_WITH_CODE(0 == result,
1873 "Failed to initialize Memory Level!", return result;);
1874
1875 result = iceland_populate_smc_acpi_level(hwmgr, table);
1876 PP_ASSERT_WITH_CODE(0 == result,
1877 "Failed to initialize ACPI Level!", return result;);
1878
1879 result = iceland_populate_smc_vce_level(hwmgr, table);
1880 PP_ASSERT_WITH_CODE(0 == result,
1881 "Failed to initialize VCE Level!", return result;);
1882
1883 result = iceland_populate_smc_acp_level(hwmgr, table);
1884 PP_ASSERT_WITH_CODE(0 == result,
1885 "Failed to initialize ACP Level!", return result;);
1886
1887 result = iceland_populate_smc_samu_level(hwmgr, table);
1888 PP_ASSERT_WITH_CODE(0 == result,
1889 "Failed to initialize SAMU Level!", return result;);
1890
1891 /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
1892 /* need to populate the ARB settings for the initial state. */
1893 result = iceland_program_memory_timing_parameters(hwmgr);
1894 PP_ASSERT_WITH_CODE(0 == result,
1895 "Failed to Write ARB settings for the initial state.", return result;);
1896
1897 result = iceland_populate_smc_uvd_level(hwmgr, table);
1898 PP_ASSERT_WITH_CODE(0 == result,
1899 "Failed to initialize UVD Level!", return result;);
1900
1901 table->GraphicsBootLevel = 0;
1902 table->MemoryBootLevel = 0;
1903
1904 result = iceland_populate_smc_boot_level(hwmgr, table);
1905 PP_ASSERT_WITH_CODE(0 == result,
1906 "Failed to initialize Boot Level!", return result;);
1907
1908 result = iceland_populate_smc_initial_state(hwmgr);
1909 PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
1910
1911 result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr);
1912 PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
1913
1914 table->GraphicsVoltageChangeEnable = 1;
1915 table->GraphicsThermThrottleEnable = 1;
1916 table->GraphicsInterval = 1;
1917 table->VoltageInterval = 1;
1918 table->ThermalInterval = 1;
1919
1920 table->TemperatureLimitHigh =
1921 (data->thermal_temp_setting.temperature_high *
1922 SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1923 table->TemperatureLimitLow =
1924 (data->thermal_temp_setting.temperature_low *
1925 SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1926
1927 table->MemoryVoltageChangeEnable = 1;
1928 table->MemoryInterval = 1;
1929 table->VoltageResponseTime = 0;
1930 table->PhaseResponseTime = 0;
1931 table->MemoryThermThrottleEnable = 1;
1932 table->PCIeBootLinkLevel = 0;
1933 table->PCIeGenInterval = 1;
1934
1935 result = iceland_populate_smc_svi2_config(hwmgr, table);
1936 PP_ASSERT_WITH_CODE(0 == result,
1937 "Failed to populate SVI2 setting!", return result);
1938
1939 table->ThermGpio = 17;
1940 table->SclkStepSize = 0x4000;
1941
1942 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
1943 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
1944 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
1945 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
1946 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
1947 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
1948 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
1949 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
1950 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
1951 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
1952
1953 table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
1954 table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
1955 table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
1956
1957 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
1958 result = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.dpm_table_start +
1959 offsetof(SMU71_Discrete_DpmTable, SystemFlags),
1960 (uint8_t *)&(table->SystemFlags),
1961 sizeof(SMU71_Discrete_DpmTable)-3 * sizeof(SMU71_PIDController),
1962 SMC_RAM_END);
1963
1964 PP_ASSERT_WITH_CODE(0 == result,
1965 "Failed to upload dpm data to SMC memory!", return result;);
1966
1967 /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */
1968 result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
1969 smu_data->smu7_data.ulv_setting_starts,
1970 (uint8_t *)&(smu_data->ulv_setting),
1971 sizeof(SMU71_Discrete_Ulv),
1972 SMC_RAM_END);
1973
1974
1975 result = iceland_populate_initial_mc_reg_table(hwmgr);
1976 PP_ASSERT_WITH_CODE((0 == result),
1977 "Failed to populate initialize MC Reg table!", return result);
1978
1979 result = iceland_populate_pm_fuses(hwmgr);
1980 PP_ASSERT_WITH_CODE(0 == result,
1981 "Failed to populate PM fuses to SMC memory!", return result);
1982
1983 return 0;
1984}
1985
1986/**
1987* Set up the fan table to control the fan using the SMC.
1988* @param hwmgr the address of the powerplay hardware manager.
1989* @param pInput the pointer to input data
1990* @param pOutput the pointer to output data
1991* @param pStorage the pointer to temporary storage
1992* @param Result the last failure code
1993* @return result from set temperature range routine
1994*/
1995int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
1996{
1997 struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend);
1998 SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
1999 uint32_t duty100;
2000 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
2001 uint16_t fdo_min, slope1, slope2;
2002 uint32_t reference_clock;
2003 int res;
2004 uint64_t tmp64;
2005
2006 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
2007 return 0;
2008
2009 if (0 == smu7_data->fan_table_start) {
2010 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2011 return 0;
2012 }
2013
2014 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
2015
2016 if (0 == duty100) {
2017 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2018 return 0;
2019 }
2020
2021 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
2022 do_div(tmp64, 10000);
2023 fdo_min = (uint16_t)tmp64;
2024
2025 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
2026 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
2027
2028 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
2029 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
2030
2031 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
2032 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
2033
2034 fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
2035 fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
2036 fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
2037
2038 fan_table.Slope1 = cpu_to_be16(slope1);
2039 fan_table.Slope2 = cpu_to_be16(slope2);
2040
2041 fan_table.FdoMin = cpu_to_be16(fdo_min);
2042
2043 fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
2044
2045 fan_table.HystUp = cpu_to_be16(1);
2046
2047 fan_table.HystSlope = cpu_to_be16(1);
2048
2049 fan_table.TempRespLim = cpu_to_be16(5);
2050
2051 reference_clock = smu7_get_xclk(hwmgr);
2052
2053 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
2054
2055 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
2056
2057 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
2058
2059 /* fan_table.FanControl_GL_Flag = 1; */
2060
2061 res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
2062
2063 return 0;
2064}
2065
2066
2067static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
2068{
2069 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2070
2071 if (data->need_update_smu7_dpm_table &
2072 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
2073 return iceland_program_memory_timing_parameters(hwmgr);
2074
2075 return 0;
2076}
2077
2078int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2079{
2080 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2081 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
2082
2083 int result = 0;
2084 uint32_t low_sclk_interrupt_threshold = 0;
2085
2086 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2087 PHM_PlatformCaps_SclkThrottleLowNotification)
2088 && (hwmgr->gfx_arbiter.sclk_threshold !=
2089 data->low_sclk_interrupt_threshold)) {
2090 data->low_sclk_interrupt_threshold =
2091 hwmgr->gfx_arbiter.sclk_threshold;
2092 low_sclk_interrupt_threshold =
2093 data->low_sclk_interrupt_threshold;
2094
2095 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
2096
2097 result = smu7_copy_bytes_to_smc(
2098 hwmgr->smumgr,
2099 smu_data->smu7_data.dpm_table_start +
2100 offsetof(SMU71_Discrete_DpmTable,
2101 LowSclkInterruptThreshold),
2102 (uint8_t *)&low_sclk_interrupt_threshold,
2103 sizeof(uint32_t),
2104 SMC_RAM_END);
2105 }
2106
2107 result = iceland_update_and_upload_mc_reg_table(hwmgr);
2108
2109 PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
2110
2111 result = iceland_program_mem_timing_parameters(hwmgr);
2112 PP_ASSERT_WITH_CODE((result == 0),
2113 "Failed to program memory timing parameters!",
2114 );
2115
2116 return result;
2117}
2118
2119uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
2120{
2121 switch (type) {
2122 case SMU_SoftRegisters:
2123 switch (member) {
2124 case HandshakeDisables:
2125 return offsetof(SMU71_SoftRegisters, HandshakeDisables);
2126 case VoltageChangeTimeout:
2127 return offsetof(SMU71_SoftRegisters, VoltageChangeTimeout);
2128 case AverageGraphicsActivity:
2129 return offsetof(SMU71_SoftRegisters, AverageGraphicsActivity);
2130 case PreVBlankGap:
2131 return offsetof(SMU71_SoftRegisters, PreVBlankGap);
2132 case VBlankTimeout:
2133 return offsetof(SMU71_SoftRegisters, VBlankTimeout);
2134 case UcodeLoadStatus:
2135 return offsetof(SMU71_SoftRegisters, UcodeLoadStatus);
2136 }
2137 case SMU_Discrete_DpmTable:
2138 switch (member) {
2139 case LowSclkInterruptThreshold:
2140 return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
2141 }
2142 }
2143 printk("cant't get the offset of type %x member %x \n", type, member);
2144 return 0;
2145}
2146
2147uint32_t iceland_get_mac_definition(uint32_t value)
2148{
2149 switch (value) {
2150 case SMU_MAX_LEVELS_GRAPHICS:
2151 return SMU71_MAX_LEVELS_GRAPHICS;
2152 case SMU_MAX_LEVELS_MEMORY:
2153 return SMU71_MAX_LEVELS_MEMORY;
2154 case SMU_MAX_LEVELS_LINK:
2155 return SMU71_MAX_LEVELS_LINK;
2156 case SMU_MAX_ENTRIES_SMIO:
2157 return SMU71_MAX_ENTRIES_SMIO;
2158 case SMU_MAX_LEVELS_VDDC:
2159 return SMU71_MAX_LEVELS_VDDC;
2160 case SMU_MAX_LEVELS_VDDCI:
2161 return SMU71_MAX_LEVELS_VDDCI;
2162 case SMU_MAX_LEVELS_MVDD:
2163 return SMU71_MAX_LEVELS_MVDD;
2164 }
2165
2166 printk("cant't get the mac of %x \n", value);
2167 return 0;
2168}
2169
2170/**
2171 * Get the location of various tables inside the FW image.
2172 *
2173 * @param hwmgr the address of the powerplay hardware manager.
2174 * @return always 0
2175 */
2176int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
2177{
2178 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2179 struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend);
2180
2181 uint32_t tmp;
2182 int result;
2183 bool error = false;
2184
2185 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2186 SMU71_FIRMWARE_HEADER_LOCATION +
2187 offsetof(SMU71_Firmware_Header, DpmTable),
2188 &tmp, SMC_RAM_END);
2189
2190 if (0 == result) {
2191 smu7_data->dpm_table_start = tmp;
2192 }
2193
2194 error |= (0 != result);
2195
2196 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2197 SMU71_FIRMWARE_HEADER_LOCATION +
2198 offsetof(SMU71_Firmware_Header, SoftRegisters),
2199 &tmp, SMC_RAM_END);
2200
2201 if (0 == result) {
2202 data->soft_regs_start = tmp;
2203 smu7_data->soft_regs_start = tmp;
2204 }
2205
2206 error |= (0 != result);
2207
2208
2209 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2210 SMU71_FIRMWARE_HEADER_LOCATION +
2211 offsetof(SMU71_Firmware_Header, mcRegisterTable),
2212 &tmp, SMC_RAM_END);
2213
2214 if (0 == result) {
2215 smu7_data->mc_reg_table_start = tmp;
2216 }
2217
2218 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2219 SMU71_FIRMWARE_HEADER_LOCATION +
2220 offsetof(SMU71_Firmware_Header, FanTable),
2221 &tmp, SMC_RAM_END);
2222
2223 if (0 == result) {
2224 smu7_data->fan_table_start = tmp;
2225 }
2226
2227 error |= (0 != result);
2228
2229 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2230 SMU71_FIRMWARE_HEADER_LOCATION +
2231 offsetof(SMU71_Firmware_Header, mcArbDramTimingTable),
2232 &tmp, SMC_RAM_END);
2233
2234 if (0 == result) {
2235 smu7_data->arb_table_start = tmp;
2236 }
2237
2238 error |= (0 != result);
2239
2240
2241 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2242 SMU71_FIRMWARE_HEADER_LOCATION +
2243 offsetof(SMU71_Firmware_Header, Version),
2244 &tmp, SMC_RAM_END);
2245
2246 if (0 == result) {
2247 hwmgr->microcode_version_info.SMC = tmp;
2248 }
2249
2250 error |= (0 != result);
2251
2252 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2253 SMU71_FIRMWARE_HEADER_LOCATION +
2254 offsetof(SMU71_Firmware_Header, UlvSettings),
2255 &tmp, SMC_RAM_END);
2256
2257 if (0 == result) {
2258 smu7_data->ulv_setting_starts = tmp;
2259 }
2260
2261 error |= (0 != result);
2262
2263 return error ? 1 : 0;
2264}
2265
2266/*---------------------------MC----------------------------*/
2267
2268static uint8_t iceland_get_memory_modile_index(struct pp_hwmgr *hwmgr)
2269{
2270 return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
2271}
2272
2273static bool iceland_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
2274{
2275 bool result = true;
2276
2277 switch (in_reg) {
2278 case mmMC_SEQ_RAS_TIMING:
2279 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
2280 break;
2281
2282 case mmMC_SEQ_DLL_STBY:
2283 *out_reg = mmMC_SEQ_DLL_STBY_LP;
2284 break;
2285
2286 case mmMC_SEQ_G5PDX_CMD0:
2287 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
2288 break;
2289
2290 case mmMC_SEQ_G5PDX_CMD1:
2291 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
2292 break;
2293
2294 case mmMC_SEQ_G5PDX_CTRL:
2295 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
2296 break;
2297
2298 case mmMC_SEQ_CAS_TIMING:
2299 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
2300 break;
2301
2302 case mmMC_SEQ_MISC_TIMING:
2303 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
2304 break;
2305
2306 case mmMC_SEQ_MISC_TIMING2:
2307 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
2308 break;
2309
2310 case mmMC_SEQ_PMG_DVS_CMD:
2311 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
2312 break;
2313
2314 case mmMC_SEQ_PMG_DVS_CTL:
2315 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
2316 break;
2317
2318 case mmMC_SEQ_RD_CTL_D0:
2319 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
2320 break;
2321
2322 case mmMC_SEQ_RD_CTL_D1:
2323 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
2324 break;
2325
2326 case mmMC_SEQ_WR_CTL_D0:
2327 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
2328 break;
2329
2330 case mmMC_SEQ_WR_CTL_D1:
2331 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
2332 break;
2333
2334 case mmMC_PMG_CMD_EMRS:
2335 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
2336 break;
2337
2338 case mmMC_PMG_CMD_MRS:
2339 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
2340 break;
2341
2342 case mmMC_PMG_CMD_MRS1:
2343 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
2344 break;
2345
2346 case mmMC_SEQ_PMG_TIMING:
2347 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
2348 break;
2349
2350 case mmMC_PMG_CMD_MRS2:
2351 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
2352 break;
2353
2354 case mmMC_SEQ_WR_CTL_2:
2355 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
2356 break;
2357
2358 default:
2359 result = false;
2360 break;
2361 }
2362
2363 return result;
2364}
2365
2366static int iceland_set_s0_mc_reg_index(struct iceland_mc_reg_table *table)
2367{
2368 uint32_t i;
2369 uint16_t address;
2370
2371 for (i = 0; i < table->last; i++) {
2372 table->mc_reg_address[i].s0 =
2373 iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
2374 ? address : table->mc_reg_address[i].s1;
2375 }
2376 return 0;
2377}
2378
2379static int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
2380 struct iceland_mc_reg_table *ni_table)
2381{
2382 uint8_t i, j;
2383
2384 PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2385 "Invalid VramInfo table.", return -EINVAL);
2386 PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
2387 "Invalid VramInfo table.", return -EINVAL);
2388
2389 for (i = 0; i < table->last; i++) {
2390 ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
2391 }
2392 ni_table->last = table->last;
2393
2394 for (i = 0; i < table->num_entries; i++) {
2395 ni_table->mc_reg_table_entry[i].mclk_max =
2396 table->mc_reg_table_entry[i].mclk_max;
2397 for (j = 0; j < table->last; j++) {
2398 ni_table->mc_reg_table_entry[i].mc_data[j] =
2399 table->mc_reg_table_entry[i].mc_data[j];
2400 }
2401 }
2402
2403 ni_table->num_entries = table->num_entries;
2404
2405 return 0;
2406}
2407
2408/**
2409 * VBIOS omits some information to reduce size, we need to recover them here.
2410 * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0].
2411 * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
2412 * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0].
2413 * 3. need to set these data for each clock range
2414 *
2415 * @param hwmgr the address of the powerplay hardware manager.
2416 * @param table the address of MCRegTable
2417 * @return always 0
2418 */
2419static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
2420 struct iceland_mc_reg_table *table)
2421{
2422 uint8_t i, j, k;
2423 uint32_t temp_reg;
2424 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2425
2426 for (i = 0, j = table->last; i < table->last; i++) {
2427 PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2428 "Invalid VramInfo table.", return -EINVAL);
2429
2430 switch (table->mc_reg_address[i].s1) {
2431
2432 case mmMC_SEQ_MISC1:
2433 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
2434 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
2435 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
2436 for (k = 0; k < table->num_entries; k++) {
2437 table->mc_reg_table_entry[k].mc_data[j] =
2438 ((temp_reg & 0xffff0000)) |
2439 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
2440 }
2441 j++;
2442 PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2443 "Invalid VramInfo table.", return -EINVAL);
2444
2445 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
2446 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
2447 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
2448 for (k = 0; k < table->num_entries; k++) {
2449 table->mc_reg_table_entry[k].mc_data[j] =
2450 (temp_reg & 0xffff0000) |
2451 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2452
2453 if (!data->is_memory_gddr5) {
2454 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
2455 }
2456 }
2457 j++;
2458 PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2459 "Invalid VramInfo table.", return -EINVAL);
2460
2461 if (!data->is_memory_gddr5) {
2462 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
2463 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
2464 for (k = 0; k < table->num_entries; k++) {
2465 table->mc_reg_table_entry[k].mc_data[j] =
2466 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
2467 }
2468 j++;
2469 PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2470 "Invalid VramInfo table.", return -EINVAL);
2471 }
2472
2473 break;
2474
2475 case mmMC_SEQ_RESERVE_M:
2476 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
2477 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
2478 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
2479 for (k = 0; k < table->num_entries; k++) {
2480 table->mc_reg_table_entry[k].mc_data[j] =
2481 (temp_reg & 0xffff0000) |
2482 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2483 }
2484 j++;
2485 PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2486 "Invalid VramInfo table.", return -EINVAL);
2487 break;
2488
2489 default:
2490 break;
2491 }
2492
2493 }
2494
2495 table->last = j;
2496
2497 return 0;
2498}
2499
2500static int iceland_set_valid_flag(struct iceland_mc_reg_table *table)
2501{
2502 uint8_t i, j;
2503 for (i = 0; i < table->last; i++) {
2504 for (j = 1; j < table->num_entries; j++) {
2505 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
2506 table->mc_reg_table_entry[j].mc_data[i]) {
2507 table->validflag |= (1<<i);
2508 break;
2509 }
2510 }
2511 }
2512
2513 return 0;
2514}
2515
2516int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
2517{
2518 int result;
2519 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
2520 pp_atomctrl_mc_reg_table *table;
2521 struct iceland_mc_reg_table *ni_table = &smu_data->mc_reg_table;
2522 uint8_t module_index = iceland_get_memory_modile_index(hwmgr);
2523
2524 table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
2525
2526 if (NULL == table)
2527 return -ENOMEM;
2528
2529 /* Program additional LP registers that are no longer programmed by VBIOS */
2530 cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
2531 cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
2532 cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
2533 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
2534 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
2535 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
2536 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
2537 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
2538 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
2539 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
2540 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
2541 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
2542 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
2543 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
2544 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
2545 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
2546 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
2547 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
2548 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
2549 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
2550
2551 memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
2552
2553 result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
2554
2555 if (0 == result)
2556 result = iceland_copy_vbios_smc_reg_table(table, ni_table);
2557
2558 if (0 == result) {
2559 iceland_set_s0_mc_reg_index(ni_table);
2560 result = iceland_set_mc_special_registers(hwmgr, ni_table);
2561 }
2562
2563 if (0 == result)
2564 iceland_set_valid_flag(ni_table);
2565
2566 kfree(table);
2567
2568 return result;
2569}
2570
2571bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr)
2572{
2573 return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
2574 CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
2575 ? true : false;
2576}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h
index 8bc38cb17b7f..13c8dbbccaf2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h
@@ -20,17 +20,21 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 */ 22 */
23#ifndef _ICELAND_SMC_H
24#define _ICELAND_SMC_H
23 25
24#ifndef _TONGA_CLOCK_POWER_GATING_H_ 26#include "smumgr.h"
25#define _TONGA_CLOCK_POWER_GATING_H_
26 27
27#include "tonga_hwmgr.h"
28#include "pp_asicblocks.h"
29 28
30extern int tonga_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating); 29int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
31extern int tonga_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); 30int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
32extern int tonga_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); 31int iceland_init_smc_table(struct pp_hwmgr *hwmgr);
33extern int tonga_phm_powerdown_uvd(struct pp_hwmgr *hwmgr); 32int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
34extern int tonga_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr); 33int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr);
35extern int tonga_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id); 34uint32_t iceland_get_offsetof(uint32_t type, uint32_t member);
36#endif /* _TONGA_CLOCK_POWER_GATING_H_ */ 35uint32_t iceland_get_mac_definition(uint32_t value);
36int iceland_process_firmware_header(struct pp_hwmgr *hwmgr);
37int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
38bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr);
39#endif
40
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index f50658332d9d..eeafefc4acba 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -35,120 +35,10 @@
35#include "smu/smu_7_1_1_d.h" 35#include "smu/smu_7_1_1_d.h"
36#include "smu/smu_7_1_1_sh_mask.h" 36#include "smu/smu_7_1_1_sh_mask.h"
37#include "cgs_common.h" 37#include "cgs_common.h"
38#include "iceland_smc.h"
38 39
39#define ICELAND_SMC_SIZE 0x20000 40#define ICELAND_SMC_SIZE 0x20000
40#define BUFFER_SIZE 80000
41#define MAX_STRING_SIZE 15
42#define BUFFER_SIZETWO 131072 /*128 *1024*/
43 41
44/**
45 * Set the address for reading/writing the SMC SRAM space.
46 * @param smumgr the address of the powerplay hardware manager.
47 * @param smcAddress the address in the SMC RAM to access.
48 */
49static int iceland_set_smc_sram_address(struct pp_smumgr *smumgr,
50 uint32_t smcAddress, uint32_t limit)
51{
52 if (smumgr == NULL || smumgr->device == NULL)
53 return -EINVAL;
54 PP_ASSERT_WITH_CODE((0 == (3 & smcAddress)),
55 "SMC address must be 4 byte aligned.",
56 return -1;);
57
58 PP_ASSERT_WITH_CODE((limit > (smcAddress + 3)),
59 "SMC address is beyond the SMC RAM area.",
60 return -1;);
61
62 cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smcAddress);
63 SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
64
65 return 0;
66}
67
68/**
69 * Copy bytes from an array into the SMC RAM space.
70 *
71 * @param smumgr the address of the powerplay SMU manager.
72 * @param smcStartAddress the start address in the SMC RAM to copy bytes to.
73 * @param src the byte array to copy the bytes from.
74 * @param byteCount the number of bytes to copy.
75 */
76int iceland_copy_bytes_to_smc(struct pp_smumgr *smumgr,
77 uint32_t smcStartAddress, const uint8_t *src,
78 uint32_t byteCount, uint32_t limit)
79{
80 uint32_t addr;
81 uint32_t data, orig_data;
82 int result = 0;
83 uint32_t extra_shift;
84
85 if (smumgr == NULL || smumgr->device == NULL)
86 return -EINVAL;
87 PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)),
88 "SMC address must be 4 byte aligned.",
89 return 0;);
90
91 PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)),
92 "SMC address is beyond the SMC RAM area.",
93 return 0;);
94
95 addr = smcStartAddress;
96
97 while (byteCount >= 4) {
98 /*
99 * Bytes are written into the
100 * SMC address space with the MSB first
101 */
102 data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
103
104 result = iceland_set_smc_sram_address(smumgr, addr, limit);
105
106 if (result)
107 goto out;
108
109 cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
110
111 src += 4;
112 byteCount -= 4;
113 addr += 4;
114 }
115
116 if (0 != byteCount) {
117 /* Now write odd bytes left, do a read modify write cycle */
118 data = 0;
119
120 result = iceland_set_smc_sram_address(smumgr, addr, limit);
121 if (result)
122 goto out;
123
124 orig_data = cgs_read_register(smumgr->device,
125 mmSMC_IND_DATA_0);
126 extra_shift = 8 * (4 - byteCount);
127
128 while (byteCount > 0) {
129 data = (data << 8) + *src++;
130 byteCount--;
131 }
132
133 data <<= extra_shift;
134 data |= (orig_data & ~((~0UL) << extra_shift));
135
136 result = iceland_set_smc_sram_address(smumgr, addr, limit);
137 if (result)
138 goto out;
139
140 cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
141 }
142
143out:
144 return result;
145}
146
147/**
148 * Deassert the reset'pin' (set it to high).
149 *
150 * @param smumgr the address of the powerplay hardware manager.
151 */
152static int iceland_start_smc(struct pp_smumgr *smumgr) 42static int iceland_start_smc(struct pp_smumgr *smumgr)
153{ 43{
154 SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, 44 SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
@@ -157,284 +47,15 @@ static int iceland_start_smc(struct pp_smumgr *smumgr)
157 return 0; 47 return 0;
158} 48}
159 49
160static void iceland_pp_reset_smc(struct pp_smumgr *smumgr) 50static void iceland_reset_smc(struct pp_smumgr *smumgr)
161{ 51{
162 SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, 52 SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
163 SMC_SYSCON_RESET_CNTL, 53 SMC_SYSCON_RESET_CNTL,
164 rst_reg, 1); 54 rst_reg, 1);
165} 55}
166 56
167int iceland_program_jump_on_start(struct pp_smumgr *smumgr)
168{
169 static const unsigned char pData[] = { 0xE0, 0x00, 0x80, 0x40 };
170
171 iceland_copy_bytes_to_smc(smumgr, 0x0, pData, 4, sizeof(pData)+1);
172 57
173 return 0; 58static void iceland_stop_smc_clock(struct pp_smumgr *smumgr)
174}
175
176/**
177 * Return if the SMC is currently running.
178 *
179 * @param smumgr the address of the powerplay hardware manager.
180 */
181bool iceland_is_smc_ram_running(struct pp_smumgr *smumgr)
182{
183 uint32_t val1, val2;
184
185 val1 = SMUM_READ_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
186 SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
187 val2 = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC,
188 ixSMC_PC_C);
189
190 return ((0 == val1) && (0x20100 <= val2));
191}
192
193/**
194 * Send a message to the SMC, and wait for its response.
195 *
196 * @param smumgr the address of the powerplay hardware manager.
197 * @param msg the message to send.
198 * @return The response that came from the SMC.
199 */
200static int iceland_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
201{
202 if (smumgr == NULL || smumgr->device == NULL)
203 return -EINVAL;
204
205 if (!iceland_is_smc_ram_running(smumgr))
206 return -EINVAL;
207
208 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
209 PP_ASSERT_WITH_CODE(
210 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
211 "Failed to send Previous Message.",
212 );
213
214 cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
215
216 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
217 PP_ASSERT_WITH_CODE(
218 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
219 "Failed to send Message.",
220 );
221
222 return 0;
223}
224
225/**
226 * Send a message to the SMC with parameter
227 *
228 * @param smumgr: the address of the powerplay hardware manager.
229 * @param msg: the message to send.
230 * @param parameter: the parameter to send
231 * @return The response that came from the SMC.
232 */
233static int iceland_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
234 uint16_t msg, uint32_t parameter)
235{
236 if (smumgr == NULL || smumgr->device == NULL)
237 return -EINVAL;
238
239 cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
240
241 return iceland_send_msg_to_smc(smumgr, msg);
242}
243
244/*
245 * Read a 32bit value from the SMC SRAM space.
246 * ALL PARAMETERS ARE IN HOST BYTE ORDER.
247 * @param smumgr the address of the powerplay hardware manager.
248 * @param smcAddress the address in the SMC RAM to access.
249 * @param value and output parameter for the data read from the SMC SRAM.
250 */
251int iceland_read_smc_sram_dword(struct pp_smumgr *smumgr,
252 uint32_t smcAddress, uint32_t *value,
253 uint32_t limit)
254{
255 int result;
256
257 result = iceland_set_smc_sram_address(smumgr, smcAddress, limit);
258
259 if (0 != result)
260 return result;
261
262 *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0);
263
264 return 0;
265}
266
267/*
268 * Write a 32bit value to the SMC SRAM space.
269 * ALL PARAMETERS ARE IN HOST BYTE ORDER.
270 * @param smumgr the address of the powerplay hardware manager.
271 * @param smcAddress the address in the SMC RAM to access.
272 * @param value to write to the SMC SRAM.
273 */
274int iceland_write_smc_sram_dword(struct pp_smumgr *smumgr,
275 uint32_t smcAddress, uint32_t value,
276 uint32_t limit)
277{
278 int result;
279
280 result = iceland_set_smc_sram_address(smumgr, smcAddress, limit);
281
282 if (0 != result)
283 return result;
284
285 cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value);
286
287 return 0;
288}
289
290static int iceland_smu_fini(struct pp_smumgr *smumgr)
291{
292 struct iceland_smumgr *priv = (struct iceland_smumgr *)(smumgr->backend);
293
294 smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
295
296 if (smumgr->backend != NULL) {
297 kfree(smumgr->backend);
298 smumgr->backend = NULL;
299 }
300
301 cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
302 return 0;
303}
304
305static enum cgs_ucode_id iceland_convert_fw_type_to_cgs(uint32_t fw_type)
306{
307 enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
308
309 switch (fw_type) {
310 case UCODE_ID_SMU:
311 result = CGS_UCODE_ID_SMU;
312 break;
313 case UCODE_ID_SDMA0:
314 result = CGS_UCODE_ID_SDMA0;
315 break;
316 case UCODE_ID_SDMA1:
317 result = CGS_UCODE_ID_SDMA1;
318 break;
319 case UCODE_ID_CP_CE:
320 result = CGS_UCODE_ID_CP_CE;
321 break;
322 case UCODE_ID_CP_PFP:
323 result = CGS_UCODE_ID_CP_PFP;
324 break;
325 case UCODE_ID_CP_ME:
326 result = CGS_UCODE_ID_CP_ME;
327 break;
328 case UCODE_ID_CP_MEC:
329 result = CGS_UCODE_ID_CP_MEC;
330 break;
331 case UCODE_ID_CP_MEC_JT1:
332 result = CGS_UCODE_ID_CP_MEC_JT1;
333 break;
334 case UCODE_ID_CP_MEC_JT2:
335 result = CGS_UCODE_ID_CP_MEC_JT2;
336 break;
337 case UCODE_ID_RLC_G:
338 result = CGS_UCODE_ID_RLC_G;
339 break;
340 default:
341 break;
342 }
343
344 return result;
345}
346
347/**
348 * Convert the PPIRI firmware type to SMU type mask.
349 * For MEC, we need to check all MEC related type
350 */
351static uint16_t iceland_get_mask_for_firmware_type(uint16_t firmwareType)
352{
353 uint16_t result = 0;
354
355 switch (firmwareType) {
356 case UCODE_ID_SDMA0:
357 result = UCODE_ID_SDMA0_MASK;
358 break;
359 case UCODE_ID_SDMA1:
360 result = UCODE_ID_SDMA1_MASK;
361 break;
362 case UCODE_ID_CP_CE:
363 result = UCODE_ID_CP_CE_MASK;
364 break;
365 case UCODE_ID_CP_PFP:
366 result = UCODE_ID_CP_PFP_MASK;
367 break;
368 case UCODE_ID_CP_ME:
369 result = UCODE_ID_CP_ME_MASK;
370 break;
371 case UCODE_ID_CP_MEC:
372 case UCODE_ID_CP_MEC_JT1:
373 case UCODE_ID_CP_MEC_JT2:
374 result = UCODE_ID_CP_MEC_MASK;
375 break;
376 case UCODE_ID_RLC_G:
377 result = UCODE_ID_RLC_G_MASK;
378 break;
379 default:
380 break;
381 }
382
383 return result;
384}
385
386/**
387 * Check if the FW has been loaded,
388 * SMU will not return if loading has not finished.
389*/
390static int iceland_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fwType)
391{
392 uint16_t fwMask = iceland_get_mask_for_firmware_type(fwType);
393
394 if (0 != SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, SMC_IND,
395 SOFT_REGISTERS_TABLE_27, fwMask, fwMask)) {
396 pr_err("[ powerplay ] check firmware loading failed\n");
397 return -EINVAL;
398 }
399
400 return 0;
401}
402
403/* Populate one firmware image to the data structure */
404static int iceland_populate_single_firmware_entry(struct pp_smumgr *smumgr,
405 uint16_t firmware_type,
406 struct SMU_Entry *pentry)
407{
408 int result;
409 struct cgs_firmware_info info = {0};
410
411 result = cgs_get_firmware_info(
412 smumgr->device,
413 iceland_convert_fw_type_to_cgs(firmware_type),
414 &info);
415
416 if (result == 0) {
417 pentry->version = 0;
418 pentry->id = (uint16_t)firmware_type;
419 pentry->image_addr_high = smu_upper_32_bits(info.mc_addr);
420 pentry->image_addr_low = smu_lower_32_bits(info.mc_addr);
421 pentry->meta_data_addr_high = 0;
422 pentry->meta_data_addr_low = 0;
423 pentry->data_size_byte = info.image_size;
424 pentry->num_register_entries = 0;
425
426 if (firmware_type == UCODE_ID_RLC_G)
427 pentry->flags = 1;
428 else
429 pentry->flags = 0;
430 } else {
431 return result;
432 }
433
434 return result;
435}
436
437static void iceland_pp_stop_smc_clock(struct pp_smumgr *smumgr)
438{ 59{
439 SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, 60 SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
440 SMC_SYSCON_CLOCK_CNTL_0, 61 SMC_SYSCON_CLOCK_CNTL_0,
@@ -448,10 +69,10 @@ static void iceland_start_smc_clock(struct pp_smumgr *smumgr)
448 ck_disable, 0); 69 ck_disable, 0);
449} 70}
450 71
451int iceland_smu_start_smc(struct pp_smumgr *smumgr) 72static int iceland_smu_start_smc(struct pp_smumgr *smumgr)
452{ 73{
453 /* set smc instruct start point at 0x0 */ 74 /* set smc instruct start point at 0x0 */
454 iceland_program_jump_on_start(smumgr); 75 smu7_program_jump_on_start(smumgr);
455 76
456 /* enable smc clock */ 77 /* enable smc clock */
457 iceland_start_smc_clock(smumgr); 78 iceland_start_smc_clock(smumgr);
@@ -465,17 +86,37 @@ int iceland_smu_start_smc(struct pp_smumgr *smumgr)
465 return 0; 86 return 0;
466} 87}
467 88
468/** 89
469 * Upload the SMC firmware to the SMC microcontroller. 90static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr,
470 * 91 uint32_t length, const uint8_t *src,
471 * @param smumgr the address of the powerplay hardware manager. 92 uint32_t limit, uint32_t start_addr)
472 * @param pFirmware the data structure containing the various sections of the firmware.
473 */
474int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr)
475{ 93{
476 const uint8_t *src; 94 uint32_t byte_count = length;
477 uint32_t byte_count, val;
478 uint32_t data; 95 uint32_t data;
96
97 PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);
98
99 cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, start_addr);
100 SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
101
102 while (byte_count >= 4) {
103 data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
104 cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
105 src += 4;
106 byte_count -= 4;
107 }
108
109 SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
110
111 PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL);
112
113 return 0;
114}
115
116
117static int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr)
118{
119 uint32_t val;
479 struct cgs_firmware_info info = {0}; 120 struct cgs_firmware_info info = {0};
480 121
481 if (smumgr == NULL || smumgr->device == NULL) 122 if (smumgr == NULL || smumgr->device == NULL)
@@ -483,7 +124,7 @@ int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr)
483 124
484 /* load SMC firmware */ 125 /* load SMC firmware */
485 cgs_get_firmware_info(smumgr->device, 126 cgs_get_firmware_info(smumgr->device,
486 iceland_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); 127 smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
487 128
488 if (info.image_size & 3) { 129 if (info.image_size & 3) {
489 pr_err("[ powerplay ] SMC ucode is not 4 bytes aligned\n"); 130 pr_err("[ powerplay ] SMC ucode is not 4 bytes aligned\n");
@@ -506,122 +147,17 @@ int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr)
506 ixSMC_SYSCON_MISC_CNTL, val | 1); 147 ixSMC_SYSCON_MISC_CNTL, val | 1);
507 148
508 /* stop smc clock */ 149 /* stop smc clock */
509 iceland_pp_stop_smc_clock(smumgr); 150 iceland_stop_smc_clock(smumgr);
510 151
511 /* reset smc */ 152 /* reset smc */
512 iceland_pp_reset_smc(smumgr); 153 iceland_reset_smc(smumgr);
513 154 iceland_upload_smc_firmware_data(smumgr, info.image_size,
514 cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, 155 (uint8_t *)info.kptr, ICELAND_SMC_SIZE,
515 info.ucode_start_address); 156 info.ucode_start_address);
516
517 SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL,
518 AUTO_INCREMENT_IND_0, 1);
519
520 byte_count = info.image_size;
521 src = (const uint8_t *)info.kptr;
522
523 while (byte_count >= 4) {
524 data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
525 cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
526 src += 4;
527 byte_count -= 4;
528 }
529
530 SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL,
531 AUTO_INCREMENT_IND_0, 0);
532 157
533 return 0; 158 return 0;
534} 159}
535 160
536static int iceland_request_smu_reload_fw(struct pp_smumgr *smumgr)
537{
538 struct iceland_smumgr *iceland_smu =
539 (struct iceland_smumgr *)(smumgr->backend);
540 uint16_t fw_to_load;
541 int result = 0;
542 struct SMU_DRAMData_TOC *toc;
543
544 toc = (struct SMU_DRAMData_TOC *)iceland_smu->pHeader;
545 toc->num_entries = 0;
546 toc->structure_version = 1;
547
548 PP_ASSERT_WITH_CODE(
549 0 == iceland_populate_single_firmware_entry(smumgr,
550 UCODE_ID_RLC_G,
551 &toc->entry[toc->num_entries++]),
552 "Failed to Get Firmware Entry.\n",
553 return -1);
554 PP_ASSERT_WITH_CODE(
555 0 == iceland_populate_single_firmware_entry(smumgr,
556 UCODE_ID_CP_CE,
557 &toc->entry[toc->num_entries++]),
558 "Failed to Get Firmware Entry.\n",
559 return -1);
560 PP_ASSERT_WITH_CODE(
561 0 == iceland_populate_single_firmware_entry
562 (smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
563 "Failed to Get Firmware Entry.\n", return -1);
564 PP_ASSERT_WITH_CODE(
565 0 == iceland_populate_single_firmware_entry
566 (smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
567 "Failed to Get Firmware Entry.\n", return -1);
568 PP_ASSERT_WITH_CODE(
569 0 == iceland_populate_single_firmware_entry
570 (smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
571 "Failed to Get Firmware Entry.\n", return -1);
572 PP_ASSERT_WITH_CODE(
573 0 == iceland_populate_single_firmware_entry
574 (smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
575 "Failed to Get Firmware Entry.\n", return -1);
576 PP_ASSERT_WITH_CODE(
577 0 == iceland_populate_single_firmware_entry
578 (smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
579 "Failed to Get Firmware Entry.\n", return -1);
580 PP_ASSERT_WITH_CODE(
581 0 == iceland_populate_single_firmware_entry
582 (smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
583 "Failed to Get Firmware Entry.\n", return -1);
584 PP_ASSERT_WITH_CODE(
585 0 == iceland_populate_single_firmware_entry
586 (smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
587 "Failed to Get Firmware Entry.\n", return -1);
588
589 if (!iceland_is_smc_ram_running(smumgr)) {
590 result = iceland_smu_upload_firmware_image(smumgr);
591 if (result)
592 return result;
593
594 result = iceland_smu_start_smc(smumgr);
595 if (result)
596 return result;
597 }
598
599 iceland_send_msg_to_smc_with_parameter(smumgr,
600 PPSMC_MSG_DRV_DRAM_ADDR_HI,
601 iceland_smu->header_buffer.mc_addr_high);
602
603 iceland_send_msg_to_smc_with_parameter(smumgr,
604 PPSMC_MSG_DRV_DRAM_ADDR_LO,
605 iceland_smu->header_buffer.mc_addr_low);
606
607 fw_to_load = UCODE_ID_RLC_G_MASK
608 + UCODE_ID_SDMA0_MASK
609 + UCODE_ID_SDMA1_MASK
610 + UCODE_ID_CP_CE_MASK
611 + UCODE_ID_CP_ME_MASK
612 + UCODE_ID_CP_PFP_MASK
613 + UCODE_ID_CP_MEC_MASK
614 + UCODE_ID_CP_MEC_JT1_MASK
615 + UCODE_ID_CP_MEC_JT2_MASK;
616
617 PP_ASSERT_WITH_CODE(
618 0 == iceland_send_msg_to_smc_with_parameter(
619 smumgr, PPSMC_MSG_LoadUcodes, fw_to_load),
620 "Fail to Request SMU Load uCode", return 0);
621
622 return result;
623}
624
625static int iceland_request_smu_load_specific_fw(struct pp_smumgr *smumgr, 161static int iceland_request_smu_load_specific_fw(struct pp_smumgr *smumgr,
626 uint32_t firmwareType) 162 uint32_t firmwareType)
627{ 163{
@@ -635,12 +171,22 @@ static int iceland_start_smu(struct pp_smumgr *smumgr)
635 result = iceland_smu_upload_firmware_image(smumgr); 171 result = iceland_smu_upload_firmware_image(smumgr);
636 if (result) 172 if (result)
637 return result; 173 return result;
638
639 result = iceland_smu_start_smc(smumgr); 174 result = iceland_smu_start_smc(smumgr);
640 if (result) 175 if (result)
641 return result; 176 return result;
642 177
643 result = iceland_request_smu_reload_fw(smumgr); 178 if (!smu7_is_smc_ram_running(smumgr)) {
179 printk("smu not running, upload firmware again \n");
180 result = iceland_smu_upload_firmware_image(smumgr);
181 if (result)
182 return result;
183
184 result = iceland_smu_start_smc(smumgr);
185 if (result)
186 return result;
187 }
188
189 result = smu7_request_smu_load_fw(smumgr);
644 190
645 return result; 191 return result;
646} 192}
@@ -654,47 +200,38 @@ static int iceland_start_smu(struct pp_smumgr *smumgr)
654 */ 200 */
655static int iceland_smu_init(struct pp_smumgr *smumgr) 201static int iceland_smu_init(struct pp_smumgr *smumgr)
656{ 202{
657 struct iceland_smumgr *iceland_smu; 203 int i;
658 uint64_t mc_addr = 0; 204 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
659 205 if (smu7_init(smumgr))
660 /* Allocate memory for backend private data */ 206 return -EINVAL;
661 iceland_smu = (struct iceland_smumgr *)(smumgr->backend); 207
662 iceland_smu->header_buffer.data_size = 208 for (i = 0; i < SMU71_MAX_LEVELS_GRAPHICS; i++)
663 ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; 209 smu_data->activity_target[i] = 30;
664
665 smu_allocate_memory(smumgr->device,
666 iceland_smu->header_buffer.data_size,
667 CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
668 PAGE_SIZE,
669 &mc_addr,
670 &iceland_smu->header_buffer.kaddr,
671 &iceland_smu->header_buffer.handle);
672
673 iceland_smu->pHeader = iceland_smu->header_buffer.kaddr;
674 iceland_smu->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
675 iceland_smu->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
676
677 PP_ASSERT_WITH_CODE((NULL != iceland_smu->pHeader),
678 "Out of memory.",
679 kfree(smumgr->backend);
680 cgs_free_gpu_mem(smumgr->device,
681 (cgs_handle_t)iceland_smu->header_buffer.handle);
682 return -1);
683 210
684 return 0; 211 return 0;
685} 212}
686 213
687static const struct pp_smumgr_func iceland_smu_funcs = { 214static const struct pp_smumgr_func iceland_smu_funcs = {
688 .smu_init = &iceland_smu_init, 215 .smu_init = &iceland_smu_init,
689 .smu_fini = &iceland_smu_fini, 216 .smu_fini = &smu7_smu_fini,
690 .start_smu = &iceland_start_smu, 217 .start_smu = &iceland_start_smu,
691 .check_fw_load_finish = &iceland_check_fw_load_finish, 218 .check_fw_load_finish = &smu7_check_fw_load_finish,
692 .request_smu_load_fw = &iceland_request_smu_reload_fw, 219 .request_smu_load_fw = &smu7_reload_firmware,
693 .request_smu_load_specific_fw = &iceland_request_smu_load_specific_fw, 220 .request_smu_load_specific_fw = &iceland_request_smu_load_specific_fw,
694 .send_msg_to_smc = &iceland_send_msg_to_smc, 221 .send_msg_to_smc = &smu7_send_msg_to_smc,
695 .send_msg_to_smc_with_parameter = &iceland_send_msg_to_smc_with_parameter, 222 .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
696 .download_pptable_settings = NULL, 223 .download_pptable_settings = NULL,
697 .upload_pptable_settings = NULL, 224 .upload_pptable_settings = NULL,
225 .get_offsetof = iceland_get_offsetof,
226 .process_firmware_header = iceland_process_firmware_header,
227 .init_smc_table = iceland_init_smc_table,
228 .update_sclk_threshold = iceland_update_sclk_threshold,
229 .thermal_setup_fan_table = iceland_thermal_setup_fan_table,
230 .populate_all_graphic_levels = iceland_populate_all_graphic_levels,
231 .populate_all_memory_levels = iceland_populate_all_memory_levels,
232 .get_mac_definition = iceland_get_mac_definition,
233 .initialize_mc_reg_table = iceland_initialize_mc_reg_table,
234 .is_dpm_running = iceland_is_dpm_running,
698}; 235};
699 236
700int iceland_smum_init(struct pp_smumgr *smumgr) 237int iceland_smum_init(struct pp_smumgr *smumgr)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
index 62009a7ae827..cfadfeeea039 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
@@ -26,39 +26,46 @@
26#ifndef _ICELAND_SMUMGR_H_ 26#ifndef _ICELAND_SMUMGR_H_
27#define _ICELAND_SMUMGR_H_ 27#define _ICELAND_SMUMGR_H_
28 28
29struct iceland_buffer_entry {
30 uint32_t data_size;
31 uint32_t mc_addr_low;
32 uint32_t mc_addr_high;
33 void *kaddr;
34 unsigned long handle;
35};
36 29
37/* Iceland only has header_buffer, don't have smu buffer. */ 30#include "smu7_smumgr.h"
38struct iceland_smumgr { 31#include "pp_endian.h"
39 uint8_t *pHeader; 32#include "smu71_discrete.h"
40 uint8_t *pMecImage;
41 uint32_t ulSoftRegsStart;
42 33
43 struct iceland_buffer_entry header_buffer; 34struct iceland_pt_defaults {
35 uint8_t svi_load_line_en;
36 uint8_t svi_load_line_vddc;
37 uint8_t tdc_vddc_throttle_release_limit_perc;
38 uint8_t tdc_mawt;
39 uint8_t tdc_waterfall_ctl;
40 uint8_t dte_ambient_temp_base;
41 uint32_t display_cac;
42 uint32_t bamp_temp_gradient;
43 uint16_t bapmti_r[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
44 uint16_t bapmti_rc[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
44}; 45};
45 46
46extern int iceland_smum_init(struct pp_smumgr *smumgr); 47struct iceland_mc_reg_entry {
47extern int iceland_copy_bytes_to_smc(struct pp_smumgr *smumgr, 48 uint32_t mclk_max;
48 uint32_t smcStartAddress, 49 uint32_t mc_data[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
49 const uint8_t *src, 50};
50 uint32_t byteCount, uint32_t limit);
51
52extern int iceland_smu_start_smc(struct pp_smumgr *smumgr);
53 51
54extern int iceland_read_smc_sram_dword(struct pp_smumgr *smumgr, 52struct iceland_mc_reg_table {
55 uint32_t smcAddress, 53 uint8_t last; /* number of registers*/
56 uint32_t *value, uint32_t limit); 54 uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/
57extern int iceland_write_smc_sram_dword(struct pp_smumgr *smumgr, 55 uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/
58 uint32_t smcAddress, 56 struct iceland_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
59 uint32_t value, uint32_t limit); 57 SMU71_Discrete_MCRegisterAddress mc_reg_address[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
58};
60 59
61extern bool iceland_is_smc_ram_running(struct pp_smumgr *smumgr); 60struct iceland_smumgr {
62extern int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr); 61 struct smu7_smumgr smu7_data;
62 struct SMU71_Discrete_DpmTable smc_state_table;
63 struct SMU71_Discrete_PmFuses power_tune_table;
64 struct SMU71_Discrete_Ulv ulv_setting;
65 struct iceland_pt_defaults *power_tune_defaults;
66 SMU71_Discrete_MCRegisters mc_regs;
67 struct iceland_mc_reg_table mc_reg_table;
68 uint32_t activity_target[SMU71_MAX_LEVELS_GRAPHICS];
69};
63 70
64#endif 71#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
new file mode 100644
index 000000000000..4ccc0b72324d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
@@ -0,0 +1,2287 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "polaris10_smc.h"
25#include "smu7_dyn_defaults.h"
26
27#include "smu7_hwmgr.h"
28#include "hardwaremanager.h"
29#include "ppatomctrl.h"
30#include "pp_debug.h"
31#include "cgs_common.h"
32#include "atombios.h"
33#include "polaris10_smumgr.h"
34#include "pppcielanes.h"
35
36#include "smu_ucode_xfer_vi.h"
37#include "smu74_discrete.h"
38#include "smu/smu_7_1_3_d.h"
39#include "smu/smu_7_1_3_sh_mask.h"
40#include "gmc/gmc_8_1_d.h"
41#include "gmc/gmc_8_1_sh_mask.h"
42#include "oss/oss_3_0_d.h"
43#include "gca/gfx_8_0_d.h"
44#include "bif/bif_5_0_d.h"
45#include "bif/bif_5_0_sh_mask.h"
46#include "dce/dce_10_0_d.h"
47#include "dce/dce_10_0_sh_mask.h"
48#include "polaris10_pwrvirus.h"
49#include "smu7_ppsmc.h"
50#include "smu7_smumgr.h"
51
52#define POLARIS10_SMC_SIZE 0x20000
53#define VOLTAGE_VID_OFFSET_SCALE1 625
54#define VOLTAGE_VID_OFFSET_SCALE2 100
55#define POWERTUNE_DEFAULT_SET_MAX 1
56#define VDDC_VDDCI_DELTA 200
57#define MC_CG_ARB_FREQ_F1 0x0b
58
59static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
60 /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
61 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
62 { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
63 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
64 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
65};
66
67static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = {
68 {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112},
69 {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
70 {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112},
71 {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160},
72 {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112},
73 {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160},
74 {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108},
75 {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} };
76
77static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
78 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
79 uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
80{
81 uint32_t i;
82 uint16_t vddci;
83 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
84
85 *voltage = *mvdd = 0;
86
87 /* clock - voltage dependency table is empty table */
88 if (dep_table->count == 0)
89 return -EINVAL;
90
91 for (i = 0; i < dep_table->count; i++) {
92 /* find first sclk bigger than request */
93 if (dep_table->entries[i].clk >= clock) {
94 *voltage |= (dep_table->entries[i].vddc *
95 VOLTAGE_SCALE) << VDDC_SHIFT;
96 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
97 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
98 VOLTAGE_SCALE) << VDDCI_SHIFT;
99 else if (dep_table->entries[i].vddci)
100 *voltage |= (dep_table->entries[i].vddci *
101 VOLTAGE_SCALE) << VDDCI_SHIFT;
102 else {
103 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
104 (dep_table->entries[i].vddc -
105 (uint16_t)VDDC_VDDCI_DELTA));
106 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
107 }
108
109 if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
110 *mvdd = data->vbios_boot_state.mvdd_bootup_value *
111 VOLTAGE_SCALE;
112 else if (dep_table->entries[i].mvdd)
113 *mvdd = (uint32_t) dep_table->entries[i].mvdd *
114 VOLTAGE_SCALE;
115
116 *voltage |= 1 << PHASES_SHIFT;
117 return 0;
118 }
119 }
120
121 /* sclk is bigger than max sclk in the dependence table */
122 *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
123
124 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
125 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
126 VOLTAGE_SCALE) << VDDCI_SHIFT;
127 else if (dep_table->entries[i-1].vddci) {
128 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
129 (dep_table->entries[i].vddc -
130 (uint16_t)VDDC_VDDCI_DELTA));
131 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
132 }
133
134 if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
135 *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
136 else if (dep_table->entries[i].mvdd)
137 *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
138
139 return 0;
140}
141
142static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
143{
144 uint32_t tmp;
145 tmp = raw_setting * 4096 / 100;
146 return (uint16_t)tmp;
147}
148
149static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
150{
151 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
152
153 const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
154 SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
155 struct phm_ppt_v1_information *table_info =
156 (struct phm_ppt_v1_information *)(hwmgr->pptable);
157 struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
158 struct pp_advance_fan_control_parameters *fan_table =
159 &hwmgr->thermal_controller.advanceFanControlParameters;
160 int i, j, k;
161 const uint16_t *pdef1;
162 const uint16_t *pdef2;
163
164 table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
165 table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
166
167 PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
168 "Target Operating Temp is out of Range!",
169 );
170
171 table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
172 cac_dtp_table->usTargetOperatingTemp * 256);
173 table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
174 cac_dtp_table->usTemperatureLimitHotspot * 256);
175 table->FanGainEdge = PP_HOST_TO_SMC_US(
176 scale_fan_gain_settings(fan_table->usFanGainEdge));
177 table->FanGainHotspot = PP_HOST_TO_SMC_US(
178 scale_fan_gain_settings(fan_table->usFanGainHotspot));
179
180 pdef1 = defaults->BAPMTI_R;
181 pdef2 = defaults->BAPMTI_RC;
182
183 for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
184 for (j = 0; j < SMU74_DTE_SOURCES; j++) {
185 for (k = 0; k < SMU74_DTE_SINKS; k++) {
186 table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
187 table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
188 pdef1++;
189 pdef2++;
190 }
191 }
192 }
193
194 return 0;
195}
196
197static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
198{
199 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
200 const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
201
202 smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
203 smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
204 smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
205 smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
206
207 return 0;
208}
209
210static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
211{
212 uint16_t tdc_limit;
213 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
214 struct phm_ppt_v1_information *table_info =
215 (struct phm_ppt_v1_information *)(hwmgr->pptable);
216 const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
217
218 tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
219 smu_data->power_tune_table.TDC_VDDC_PkgLimit =
220 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
221 smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
222 defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
223 smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
224
225 return 0;
226}
227
228static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
229{
230 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
231 const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
232 uint32_t temp;
233
234 if (smu7_read_smc_sram_dword(hwmgr->smumgr,
235 fuse_table_offset +
236 offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
237 (uint32_t *)&temp, SMC_RAM_END))
238 PP_ASSERT_WITH_CODE(false,
239 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
240 return -EINVAL);
241 else {
242 smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
243 smu_data->power_tune_table.LPMLTemperatureMin =
244 (uint8_t)((temp >> 16) & 0xff);
245 smu_data->power_tune_table.LPMLTemperatureMax =
246 (uint8_t)((temp >> 8) & 0xff);
247 smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
248 }
249 return 0;
250}
251
252static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
253{
254 int i;
255 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
256
257 /* Currently not used. Set all to zero. */
258 for (i = 0; i < 16; i++)
259 smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
260
261 return 0;
262}
263
264static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
265{
266 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
267
268/* TO DO move to hwmgr */
269 if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
270 || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
271 hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
272 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
273
274 smu_data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
275 hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
276 return 0;
277}
278
279static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
280{
281 int i;
282 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
283
284 /* Currently not used. Set all to zero. */
285 for (i = 0; i < 16; i++)
286 smu_data->power_tune_table.GnbLPML[i] = 0;
287
288 return 0;
289}
290
291static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
292{
293 return 0;
294}
295
296static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
297{
298 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
299 struct phm_ppt_v1_information *table_info =
300 (struct phm_ppt_v1_information *)(hwmgr->pptable);
301 uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
302 uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
303 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
304
305 hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
306 lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
307
308 smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
309 CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
310 smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
311 CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
312
313 return 0;
314}
315
316static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
317{
318 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
319 uint32_t pm_fuse_table_offset;
320
321 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
322 PHM_PlatformCaps_PowerContainment)) {
323 if (smu7_read_smc_sram_dword(hwmgr->smumgr,
324 SMU7_FIRMWARE_HEADER_LOCATION +
325 offsetof(SMU74_Firmware_Header, PmFuseTable),
326 &pm_fuse_table_offset, SMC_RAM_END))
327 PP_ASSERT_WITH_CODE(false,
328 "Attempt to get pm_fuse_table_offset Failed!",
329 return -EINVAL);
330
331 if (polaris10_populate_svi_load_line(hwmgr))
332 PP_ASSERT_WITH_CODE(false,
333 "Attempt to populate SviLoadLine Failed!",
334 return -EINVAL);
335
336 if (polaris10_populate_tdc_limit(hwmgr))
337 PP_ASSERT_WITH_CODE(false,
338 "Attempt to populate TDCLimit Failed!", return -EINVAL);
339
340 if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
341 PP_ASSERT_WITH_CODE(false,
342 "Attempt to populate TdcWaterfallCtl, "
343 "LPMLTemperature Min and Max Failed!",
344 return -EINVAL);
345
346 if (0 != polaris10_populate_temperature_scaler(hwmgr))
347 PP_ASSERT_WITH_CODE(false,
348 "Attempt to populate LPMLTemperatureScaler Failed!",
349 return -EINVAL);
350
351 if (polaris10_populate_fuzzy_fan(hwmgr))
352 PP_ASSERT_WITH_CODE(false,
353 "Attempt to populate Fuzzy Fan Control parameters Failed!",
354 return -EINVAL);
355
356 if (polaris10_populate_gnb_lpml(hwmgr))
357 PP_ASSERT_WITH_CODE(false,
358 "Attempt to populate GnbLPML Failed!",
359 return -EINVAL);
360
361 if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
362 PP_ASSERT_WITH_CODE(false,
363 "Attempt to populate GnbLPML Min and Max Vid Failed!",
364 return -EINVAL);
365
366 if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
367 PP_ASSERT_WITH_CODE(false,
368 "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
369 "Sidd Failed!", return -EINVAL);
370
371 if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
372 (uint8_t *)&smu_data->power_tune_table,
373 (sizeof(struct SMU74_Discrete_PmFuses) - 92), SMC_RAM_END))
374 PP_ASSERT_WITH_CODE(false,
375 "Attempt to download PmFuseTable Failed!",
376 return -EINVAL);
377 }
378 return 0;
379}
380
381/**
382 * Mvdd table preparation for SMC.
383 *
384 * @param *hwmgr The address of the hardware manager.
385 * @param *table The SMC DPM table structure to be populated.
386 * @return 0
387 */
388static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
389 SMU74_Discrete_DpmTable *table)
390{
391 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
392 uint32_t count, level;
393
394 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
395 count = data->mvdd_voltage_table.count;
396 if (count > SMU_MAX_SMIO_LEVELS)
397 count = SMU_MAX_SMIO_LEVELS;
398 for (level = 0; level < count; level++) {
399 table->SmioTable2.Pattern[level].Voltage =
400 PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
401 /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
402 table->SmioTable2.Pattern[level].Smio =
403 (uint8_t) level;
404 table->Smio[level] |=
405 data->mvdd_voltage_table.entries[level].smio_low;
406 }
407 table->SmioMask2 = data->mvdd_voltage_table.mask_low;
408
409 table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
410 }
411
412 return 0;
413}
414
415static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
416 struct SMU74_Discrete_DpmTable *table)
417{
418 uint32_t count, level;
419 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
420
421 count = data->vddci_voltage_table.count;
422
423 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
424 if (count > SMU_MAX_SMIO_LEVELS)
425 count = SMU_MAX_SMIO_LEVELS;
426 for (level = 0; level < count; ++level) {
427 table->SmioTable1.Pattern[level].Voltage =
428 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
429 table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
430
431 table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
432 }
433 }
434
435 table->SmioMask1 = data->vddci_voltage_table.mask_low;
436
437 return 0;
438}
439
440/**
441* Preparation of vddc and vddgfx CAC tables for SMC.
442*
443* @param hwmgr the address of the hardware manager
444* @param table the SMC DPM table structure to be populated
445* @return always 0
446*/
447static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
448 struct SMU74_Discrete_DpmTable *table)
449{
450 uint32_t count;
451 uint8_t index;
452 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
453 struct phm_ppt_v1_information *table_info =
454 (struct phm_ppt_v1_information *)(hwmgr->pptable);
455 struct phm_ppt_v1_voltage_lookup_table *lookup_table =
456 table_info->vddc_lookup_table;
457 /* tables is already swapped, so in order to use the value from it,
458 * we need to swap it back.
459 * We are populating vddc CAC data to BapmVddc table
460 * in split and merged mode
461 */
462 for (count = 0; count < lookup_table->count; count++) {
463 index = phm_get_voltage_index(lookup_table,
464 data->vddc_voltage_table.entries[count].value);
465 table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
466 table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
467 table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
468 }
469
470 return 0;
471}
472
473/**
474* Preparation of voltage tables for SMC.
475*
476* @param hwmgr the address of the hardware manager
477* @param table the SMC DPM table structure to be populated
478* @return always 0
479*/
480
481static int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
482 struct SMU74_Discrete_DpmTable *table)
483{
484 polaris10_populate_smc_vddci_table(hwmgr, table);
485 polaris10_populate_smc_mvdd_table(hwmgr, table);
486 polaris10_populate_cac_table(hwmgr, table);
487
488 return 0;
489}
490
491static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
492 struct SMU74_Discrete_Ulv *state)
493{
494 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
495 struct phm_ppt_v1_information *table_info =
496 (struct phm_ppt_v1_information *)(hwmgr->pptable);
497
498 state->CcPwrDynRm = 0;
499 state->CcPwrDynRm1 = 0;
500
501 state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
502 state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
503 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
504
505 state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
506
507 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
508 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
509 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
510
511 return 0;
512}
513
514static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr,
515 struct SMU74_Discrete_DpmTable *table)
516{
517 return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
518}
519
520static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
521 struct SMU74_Discrete_DpmTable *table)
522{
523 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
524 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
525 struct smu7_dpm_table *dpm_table = &data->dpm_table;
526 int i;
527
528 /* Index (dpm_table->pcie_speed_table.count)
529 * is reserved for PCIE boot level. */
530 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
531 table->LinkLevel[i].PcieGenSpeed =
532 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
533 table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
534 dpm_table->pcie_speed_table.dpm_levels[i].param1);
535 table->LinkLevel[i].EnabledForActivity = 1;
536 table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
537 table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
538 table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
539 }
540
541 smu_data->smc_state_table.LinkLevelCount =
542 (uint8_t)dpm_table->pcie_speed_table.count;
543
544/* To Do move to hwmgr */
545 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
546 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
547
548 return 0;
549}
550
551
552static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr,
553 SMU74_Discrete_DpmTable *table)
554{
555 struct pp_smumgr *smumgr = hwmgr->smumgr;
556 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
557 uint32_t i, ref_clk;
558
559 struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
560
561 ref_clk = smu7_get_xclk(hwmgr);
562
563 if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
564 for (i = 0; i < NUM_SCLK_RANGE; i++) {
565 table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
566 table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
567 table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
568
569 table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
570 table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
571
572 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
573 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
574 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
575 }
576 return;
577 }
578
579 for (i = 0; i < NUM_SCLK_RANGE; i++) {
580 smu_data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
581 smu_data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
582
583 table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
584 table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
585 table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
586
587 table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
588 table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
589
590 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
591 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
592 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
593 }
594}
595
596/**
597* Calculates the SCLK dividers using the provided engine clock
598*
599* @param hwmgr the address of the hardware manager
600* @param clock the engine clock to use to populate the structure
601* @param sclk the SMC SCLK structure to be populated
602*/
603static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
604 uint32_t clock, SMU_SclkSetting *sclk_setting)
605{
606 struct pp_smumgr *smumgr = hwmgr->smumgr;
607 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
608 const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
609 struct pp_atomctrl_clock_dividers_ai dividers;
610 uint32_t ref_clock;
611 uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
612 uint8_t i;
613 int result;
614 uint64_t temp;
615
616 sclk_setting->SclkFrequency = clock;
617 /* get the engine clock dividers for this clock value */
618 result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, &dividers);
619 if (result == 0) {
620 sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
621 sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
622 sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
623 sclk_setting->PllRange = dividers.ucSclkPllRange;
624 sclk_setting->Sclk_slew_rate = 0x400;
625 sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
626 sclk_setting->Pcc_down_slew_rate = 0xffff;
627 sclk_setting->SSc_En = dividers.ucSscEnable;
628 sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
629 sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
630 sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
631 return result;
632 }
633
634 ref_clock = smu7_get_xclk(hwmgr);
635
636 for (i = 0; i < NUM_SCLK_RANGE; i++) {
637 if (clock > smu_data->range_table[i].trans_lower_frequency
638 && clock <= smu_data->range_table[i].trans_upper_frequency) {
639 sclk_setting->PllRange = i;
640 break;
641 }
642 }
643
644 sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
645 temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
646 temp <<= 0x10;
647 do_div(temp, ref_clock);
648 sclk_setting->Fcw_frac = temp & 0xffff;
649
650 pcc_target_percent = 10; /* Hardcode 10% for now. */
651 pcc_target_freq = clock - (clock * pcc_target_percent / 100);
652 sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
653
654 ss_target_percent = 2; /* Hardcode 2% for now. */
655 sclk_setting->SSc_En = 0;
656 if (ss_target_percent) {
657 sclk_setting->SSc_En = 1;
658 ss_target_freq = clock - (clock * ss_target_percent / 100);
659 sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
660 temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
661 temp <<= 0x10;
662 do_div(temp, ref_clock);
663 sclk_setting->Fcw1_frac = temp & 0xffff;
664 }
665
666 return 0;
667}
668
669/**
670* Populates single SMC SCLK structure using the provided engine clock
671*
672* @param hwmgr the address of the hardware manager
673* @param clock the engine clock to use to populate the structure
674* @param sclk the SMC SCLK structure to be populated
675*/
676
677static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
678 uint32_t clock, uint16_t sclk_al_threshold,
679 struct SMU74_Discrete_GraphicsLevel *level)
680{
681 int result;
682 /* PP_Clocks minClocks; */
683 uint32_t mvdd;
684 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
685 struct phm_ppt_v1_information *table_info =
686 (struct phm_ppt_v1_information *)(hwmgr->pptable);
687 SMU_SclkSetting curr_sclk_setting = { 0 };
688
689 result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
690
691 /* populate graphics levels */
692 result = polaris10_get_dependency_volt_by_clk(hwmgr,
693 table_info->vdd_dep_on_sclk, clock,
694 &level->MinVoltage, &mvdd);
695
696 PP_ASSERT_WITH_CODE((0 == result),
697 "can not find VDDC voltage value for "
698 "VDDC engine clock dependency table",
699 return result);
700 level->ActivityLevel = sclk_al_threshold;
701
702 level->CcPwrDynRm = 0;
703 level->CcPwrDynRm1 = 0;
704 level->EnabledForActivity = 0;
705 level->EnabledForThrottle = 1;
706 level->UpHyst = 10;
707 level->DownHyst = 0;
708 level->VoltageDownHyst = 0;
709 level->PowerThrottle = 0;
710 data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
711
712 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
713 level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
714 hwmgr->display_config.min_core_set_clock_in_sr);
715
716 /* Default to slow, highest DPM level will be
717 * set to PPSMC_DISPLAY_WATERMARK_LOW later.
718 */
719 if (data->update_up_hyst)
720 level->UpHyst = (uint8_t)data->up_hyst;
721 if (data->update_down_hyst)
722 level->DownHyst = (uint8_t)data->down_hyst;
723
724 level->SclkSetting = curr_sclk_setting;
725
726 CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
727 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
728 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
729 CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
730 CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
731 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
732 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
733 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
734 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
735 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
736 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
737 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
738 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
739 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
740 return 0;
741}
742
743/**
744* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
745*
746* @param hwmgr the address of the hardware manager
747*/
748int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
749{
750 struct pp_smumgr *smumgr = hwmgr->smumgr;
751 struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
752 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
753 struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
754 struct phm_ppt_v1_information *table_info =
755 (struct phm_ppt_v1_information *)(hwmgr->pptable);
756 struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
757 uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count;
758 int result = 0;
759 uint32_t array = smu_data->smu7_data.dpm_table_start +
760 offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
761 uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
762 SMU74_MAX_LEVELS_GRAPHICS;
763 struct SMU74_Discrete_GraphicsLevel *levels =
764 smu_data->smc_state_table.GraphicsLevel;
765 uint32_t i, max_entry;
766 uint8_t hightest_pcie_level_enabled = 0,
767 lowest_pcie_level_enabled = 0,
768 mid_pcie_level_enabled = 0,
769 count = 0;
770
771 polaris10_get_sclk_range_table(hwmgr, &(smu_data->smc_state_table));
772
773 for (i = 0; i < dpm_table->sclk_table.count; i++) {
774
775 result = polaris10_populate_single_graphic_level(hwmgr,
776 dpm_table->sclk_table.dpm_levels[i].value,
777 (uint16_t)smu_data->activity_target[i],
778 &(smu_data->smc_state_table.GraphicsLevel[i]));
779 if (result)
780 return result;
781
782 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
783 if (i > 1)
784 levels[i].DeepSleepDivId = 0;
785 }
786 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
787 PHM_PlatformCaps_SPLLShutdownSupport))
788 smu_data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
789
790 smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
791 smu_data->smc_state_table.GraphicsDpmLevelCount =
792 (uint8_t)dpm_table->sclk_table.count;
793 hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask =
794 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
795
796
797 if (pcie_table != NULL) {
798 PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
799 "There must be 1 or more PCIE levels defined in PPTable.",
800 return -EINVAL);
801 max_entry = pcie_entry_cnt - 1;
802 for (i = 0; i < dpm_table->sclk_table.count; i++)
803 levels[i].pcieDpmLevel =
804 (uint8_t) ((i < max_entry) ? i : max_entry);
805 } else {
806 while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
807 ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
808 (1 << (hightest_pcie_level_enabled + 1))) != 0))
809 hightest_pcie_level_enabled++;
810
811 while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
812 ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
813 (1 << lowest_pcie_level_enabled)) == 0))
814 lowest_pcie_level_enabled++;
815
816 while ((count < hightest_pcie_level_enabled) &&
817 ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
818 (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
819 count++;
820
821 mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
822 hightest_pcie_level_enabled ?
823 (lowest_pcie_level_enabled + 1 + count) :
824 hightest_pcie_level_enabled;
825
826 /* set pcieDpmLevel to hightest_pcie_level_enabled */
827 for (i = 2; i < dpm_table->sclk_table.count; i++)
828 levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
829
830 /* set pcieDpmLevel to lowest_pcie_level_enabled */
831 levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
832
833 /* set pcieDpmLevel to mid_pcie_level_enabled */
834 levels[1].pcieDpmLevel = mid_pcie_level_enabled;
835 }
836 /* level count will send to smc once at init smc table and never change */
837 result = smu7_copy_bytes_to_smc(smumgr, array, (uint8_t *)levels,
838 (uint32_t)array_size, SMC_RAM_END);
839
840 return result;
841}
842
843
844static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
845 uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
846{
847 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
848 struct phm_ppt_v1_information *table_info =
849 (struct phm_ppt_v1_information *)(hwmgr->pptable);
850 int result = 0;
851 struct cgs_display_info info = {0, 0, NULL};
852 uint32_t mclk_stutter_mode_threshold = 40000;
853
854 cgs_get_active_displays_info(hwmgr->device, &info);
855
856 if (table_info->vdd_dep_on_mclk) {
857 result = polaris10_get_dependency_volt_by_clk(hwmgr,
858 table_info->vdd_dep_on_mclk, clock,
859 &mem_level->MinVoltage, &mem_level->MinMvdd);
860 PP_ASSERT_WITH_CODE((0 == result),
861 "can not find MinVddc voltage value from memory "
862 "VDDC voltage dependency table", return result);
863 }
864
865 mem_level->MclkFrequency = clock;
866 mem_level->EnabledForThrottle = 1;
867 mem_level->EnabledForActivity = 0;
868 mem_level->UpHyst = 0;
869 mem_level->DownHyst = 100;
870 mem_level->VoltageDownHyst = 0;
871 mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
872 mem_level->StutterEnable = false;
873 mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
874
875 data->display_timing.num_existing_displays = info.display_count;
876
877 if (mclk_stutter_mode_threshold &&
878 (clock <= mclk_stutter_mode_threshold) &&
879 (SMUM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
880 STUTTER_ENABLE) & 0x1))
881 mem_level->StutterEnable = true;
882
883 if (!result) {
884 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
885 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
886 CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
887 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
888 }
889 return result;
890}
891
892/**
893* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
894*
895* @param hwmgr the address of the hardware manager
896*/
897int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
898{
899 struct pp_smumgr *smumgr = hwmgr->smumgr;
900 struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
901 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
902 struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
903 int result;
904 /* populate MCLK dpm table to SMU7 */
905 uint32_t array = smu_data->smu7_data.dpm_table_start +
906 offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
907 uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
908 SMU74_MAX_LEVELS_MEMORY;
909 struct SMU74_Discrete_MemoryLevel *levels =
910 smu_data->smc_state_table.MemoryLevel;
911 uint32_t i;
912
913 for (i = 0; i < dpm_table->mclk_table.count; i++) {
914 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
915 "can not populate memory level as memory clock is zero",
916 return -EINVAL);
917 result = polaris10_populate_single_memory_level(hwmgr,
918 dpm_table->mclk_table.dpm_levels[i].value,
919 &levels[i]);
920 if (i == dpm_table->mclk_table.count - 1) {
921 levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
922 levels[i].EnabledForActivity = 1;
923 }
924 if (result)
925 return result;
926 }
927
928 /* In order to prevent MC activity from stutter mode to push DPM up,
929 * the UVD change complements this by putting the MCLK in
930 * a higher state by default such that we are not affected by
931 * up threshold or and MCLK DPM latency.
932 */
933 levels[0].ActivityLevel = 0x1f;
934 CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
935
936 smu_data->smc_state_table.MemoryDpmLevelCount =
937 (uint8_t)dpm_table->mclk_table.count;
938 hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask =
939 phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
940
941 /* level count will send to smc once at init smc table and never change */
942 result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
943 (uint32_t)array_size, SMC_RAM_END);
944
945 return result;
946}
947
948/**
949* Populates the SMC MVDD structure using the provided memory clock.
950*
951* @param hwmgr the address of the hardware manager
952* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
953* @param voltage the SMC VOLTAGE structure to be populated
954*/
955static int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
956 uint32_t mclk, SMIO_Pattern *smio_pat)
957{
958 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
959 struct phm_ppt_v1_information *table_info =
960 (struct phm_ppt_v1_information *)(hwmgr->pptable);
961 uint32_t i = 0;
962
963 if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
964 /* find mvdd value which clock is more than request */
965 for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
966 if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
967 smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
968 break;
969 }
970 }
971 PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
972 "MVDD Voltage is outside the supported range.",
973 return -EINVAL);
974 } else
975 return -EINVAL;
976
977 return 0;
978}
979
980static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
981 SMU74_Discrete_DpmTable *table)
982{
983 int result = 0;
984 uint32_t sclk_frequency;
985 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
986 struct phm_ppt_v1_information *table_info =
987 (struct phm_ppt_v1_information *)(hwmgr->pptable);
988 SMIO_Pattern vol_level;
989 uint32_t mvdd;
990 uint16_t us_mvdd;
991
992 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
993
994 /* Get MinVoltage and Frequency from DPM0,
995 * already converted to SMC_UL */
996 sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
997 result = polaris10_get_dependency_volt_by_clk(hwmgr,
998 table_info->vdd_dep_on_sclk,
999 sclk_frequency,
1000 &table->ACPILevel.MinVoltage, &mvdd);
1001 PP_ASSERT_WITH_CODE((0 == result),
1002 "Cannot find ACPI VDDC voltage value "
1003 "in Clock Dependency Table",
1004 );
1005
1006 result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting));
1007 PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
1008
1009 table->ACPILevel.DeepSleepDivId = 0;
1010 table->ACPILevel.CcPwrDynRm = 0;
1011 table->ACPILevel.CcPwrDynRm1 = 0;
1012
1013 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1014 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
1015 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1016 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1017
1018 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
1019 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
1020 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
1021 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
1022 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
1023 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
1024 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
1025 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
1026 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
1027 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
1028
1029
1030 /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
1031 table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
1032 result = polaris10_get_dependency_volt_by_clk(hwmgr,
1033 table_info->vdd_dep_on_mclk,
1034 table->MemoryACPILevel.MclkFrequency,
1035 &table->MemoryACPILevel.MinVoltage, &mvdd);
1036 PP_ASSERT_WITH_CODE((0 == result),
1037 "Cannot find ACPI VDDCI voltage value "
1038 "in Clock Dependency Table",
1039 );
1040
1041 us_mvdd = 0;
1042 if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
1043 (data->mclk_dpm_key_disabled))
1044 us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
1045 else {
1046 if (!polaris10_populate_mvdd_value(hwmgr,
1047 data->dpm_table.mclk_table.dpm_levels[0].value,
1048 &vol_level))
1049 us_mvdd = vol_level.Voltage;
1050 }
1051
1052 if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
1053 table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
1054 else
1055 table->MemoryACPILevel.MinMvdd = 0;
1056
1057 table->MemoryACPILevel.StutterEnable = false;
1058
1059 table->MemoryACPILevel.EnabledForThrottle = 0;
1060 table->MemoryACPILevel.EnabledForActivity = 0;
1061 table->MemoryACPILevel.UpHyst = 0;
1062 table->MemoryACPILevel.DownHyst = 100;
1063 table->MemoryACPILevel.VoltageDownHyst = 0;
1064 table->MemoryACPILevel.ActivityLevel =
1065 PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
1066
1067 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
1068 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
1069
1070 return result;
1071}
1072
1073static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1074 SMU74_Discrete_DpmTable *table)
1075{
1076 int result = -EINVAL;
1077 uint8_t count;
1078 struct pp_atomctrl_clock_dividers_vi dividers;
1079 struct phm_ppt_v1_information *table_info =
1080 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1081 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1082 table_info->mm_dep_table;
1083 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1084 uint32_t vddci;
1085
1086 table->VceLevelCount = (uint8_t)(mm_table->count);
1087 table->VceBootLevel = 0;
1088
1089 for (count = 0; count < table->VceLevelCount; count++) {
1090 table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
1091 table->VceLevel[count].MinVoltage = 0;
1092 table->VceLevel[count].MinVoltage |=
1093 (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
1094
1095 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1096 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1097 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1098 else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1099 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1100 else
1101 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1102
1103
1104 table->VceLevel[count].MinVoltage |=
1105 (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1106 table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1107
1108 /*retrieve divider value for VBIOS */
1109 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1110 table->VceLevel[count].Frequency, &dividers);
1111 PP_ASSERT_WITH_CODE((0 == result),
1112 "can not find divide id for VCE engine clock",
1113 return result);
1114
1115 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1116
1117 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1118 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
1119 }
1120 return result;
1121}
1122
1123
1124static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1125 SMU74_Discrete_DpmTable *table)
1126{
1127 int result = -EINVAL;
1128 uint8_t count;
1129 struct pp_atomctrl_clock_dividers_vi dividers;
1130 struct phm_ppt_v1_information *table_info =
1131 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1132 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1133 table_info->mm_dep_table;
1134 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1135 uint32_t vddci;
1136
1137 table->SamuBootLevel = 0;
1138 table->SamuLevelCount = (uint8_t)(mm_table->count);
1139
1140 for (count = 0; count < table->SamuLevelCount; count++) {
1141 /* not sure whether we need evclk or not */
1142 table->SamuLevel[count].MinVoltage = 0;
1143 table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
1144 table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1145 VOLTAGE_SCALE) << VDDC_SHIFT;
1146
1147 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1148 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1149 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1150 else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1151 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1152 else
1153 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1154
1155 table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1156 table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1157
1158 /* retrieve divider value for VBIOS */
1159 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1160 table->SamuLevel[count].Frequency, &dividers);
1161 PP_ASSERT_WITH_CODE((0 == result),
1162 "can not find divide id for samu clock", return result);
1163
1164 table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1165
1166 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
1167 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
1168 }
1169 return result;
1170}
1171
1172static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
1173 int32_t eng_clock, int32_t mem_clock,
1174 SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
1175{
1176 uint32_t dram_timing;
1177 uint32_t dram_timing2;
1178 uint32_t burst_time;
1179 int result;
1180
1181 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1182 eng_clock, mem_clock);
1183 PP_ASSERT_WITH_CODE(result == 0,
1184 "Error calling VBIOS to set DRAM_TIMING.", return result);
1185
1186 dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1187 dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1188 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1189
1190
1191 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
1192 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
1193 arb_regs->McArbBurstTime = (uint8_t)burst_time;
1194
1195 return 0;
1196}
1197
1198static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1199{
1200 struct pp_smumgr *smumgr = hwmgr->smumgr;
1201 struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
1202 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
1203 struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
1204 uint32_t i, j;
1205 int result = 0;
1206
1207 for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) {
1208 for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) {
1209 result = polaris10_populate_memory_timing_parameters(hwmgr,
1210 hw_data->dpm_table.sclk_table.dpm_levels[i].value,
1211 hw_data->dpm_table.mclk_table.dpm_levels[j].value,
1212 &arb_regs.entries[i][j]);
1213 if (result == 0)
1214 result = atomctrl_set_ac_timing_ai(hwmgr, hw_data->dpm_table.mclk_table.dpm_levels[j].value, j);
1215 if (result != 0)
1216 return result;
1217 }
1218 }
1219
1220 result = smu7_copy_bytes_to_smc(
1221 hwmgr->smumgr,
1222 smu_data->smu7_data.arb_table_start,
1223 (uint8_t *)&arb_regs,
1224 sizeof(SMU74_Discrete_MCArbDramTimingTable),
1225 SMC_RAM_END);
1226 return result;
1227}
1228
1229static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1230 struct SMU74_Discrete_DpmTable *table)
1231{
1232 int result = -EINVAL;
1233 uint8_t count;
1234 struct pp_atomctrl_clock_dividers_vi dividers;
1235 struct phm_ppt_v1_information *table_info =
1236 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1237 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1238 table_info->mm_dep_table;
1239 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1240 uint32_t vddci;
1241
1242 table->UvdLevelCount = (uint8_t)(mm_table->count);
1243 table->UvdBootLevel = 0;
1244
1245 for (count = 0; count < table->UvdLevelCount; count++) {
1246 table->UvdLevel[count].MinVoltage = 0;
1247 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
1248 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
1249 table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1250 VOLTAGE_SCALE) << VDDC_SHIFT;
1251
1252 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1253 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1254 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1255 else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1256 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1257 else
1258 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1259
1260 table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1261 table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1262
1263 /* retrieve divider value for VBIOS */
1264 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1265 table->UvdLevel[count].VclkFrequency, &dividers);
1266 PP_ASSERT_WITH_CODE((0 == result),
1267 "can not find divide id for Vclk clock", return result);
1268
1269 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1270
1271 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1272 table->UvdLevel[count].DclkFrequency, &dividers);
1273 PP_ASSERT_WITH_CODE((0 == result),
1274 "can not find divide id for Dclk clock", return result);
1275
1276 table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
1277
1278 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1279 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1280 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
1281 }
1282
1283 return result;
1284}
1285
1286static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1287 struct SMU74_Discrete_DpmTable *table)
1288{
1289 int result = 0;
1290 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1291
1292 table->GraphicsBootLevel = 0;
1293 table->MemoryBootLevel = 0;
1294
1295 /* find boot level from dpm table */
1296 result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1297 data->vbios_boot_state.sclk_bootup_value,
1298 (uint32_t *)&(table->GraphicsBootLevel));
1299
1300 result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1301 data->vbios_boot_state.mclk_bootup_value,
1302 (uint32_t *)&(table->MemoryBootLevel));
1303
1304 table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
1305 VOLTAGE_SCALE;
1306 table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
1307 VOLTAGE_SCALE;
1308 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
1309 VOLTAGE_SCALE;
1310
1311 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
1312 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
1313 CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
1314
1315 return 0;
1316}
1317
1318static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
1319{
1320 struct pp_smumgr *smumgr = hwmgr->smumgr;
1321 struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
1322 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
1323 struct phm_ppt_v1_information *table_info =
1324 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1325 uint8_t count, level;
1326
1327 count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
1328
1329 for (level = 0; level < count; level++) {
1330 if (table_info->vdd_dep_on_sclk->entries[level].clk >=
1331 hw_data->vbios_boot_state.sclk_bootup_value) {
1332 smu_data->smc_state_table.GraphicsBootLevel = level;
1333 break;
1334 }
1335 }
1336
1337 count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
1338 for (level = 0; level < count; level++) {
1339 if (table_info->vdd_dep_on_mclk->entries[level].clk >=
1340 hw_data->vbios_boot_state.mclk_bootup_value) {
1341 smu_data->smc_state_table.MemoryBootLevel = level;
1342 break;
1343 }
1344 }
1345
1346 return 0;
1347}
1348
1349
1350static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1351{
1352 uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
1353 struct pp_smumgr *smumgr = hwmgr->smumgr;
1354 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
1355
1356 uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
1357 struct phm_ppt_v1_information *table_info =
1358 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1359 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1360 table_info->vdd_dep_on_sclk;
1361
1362 stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
1363
1364 /* Read SMU_Eefuse to read and calculate RO and determine
1365 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
1366 */
1367 efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1368 ixSMU_EFUSE_0 + (67 * 4));
1369 efuse &= 0xFF000000;
1370 efuse = efuse >> 24;
1371
1372 if (hwmgr->chip_id == CHIP_POLARIS10) {
1373 min = 1000;
1374 max = 2300;
1375 } else {
1376 min = 1100;
1377 max = 2100;
1378 }
1379
1380 ro = efuse * (max - min) / 255 + min;
1381
1382 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
1383 for (i = 0; i < sclk_table->count; i++) {
1384 smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
1385 sclk_table->entries[i].cks_enable << i;
1386 if (hwmgr->chip_id == CHIP_POLARIS10) {
1387 volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 - (ro - 70) * 1000000) / \
1388 (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
1389 volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
1390 (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
1391 } else {
1392 volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 - (ro - 50) * 1000000) / \
1393 (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
1394 volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
1395 (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
1396 }
1397
1398 if (volt_without_cks >= volt_with_cks)
1399 volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
1400 sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
1401
1402 smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
1403 }
1404
1405 smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
1406 /* Populate CKS Lookup Table */
1407 if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
1408 stretch_amount2 = 0;
1409 else if (stretch_amount == 3 || stretch_amount == 4)
1410 stretch_amount2 = 1;
1411 else {
1412 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1413 PHM_PlatformCaps_ClockStretcher);
1414 PP_ASSERT_WITH_CODE(false,
1415 "Stretch Amount in PPTable not supported\n",
1416 return -EINVAL);
1417 }
1418
1419 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
1420 value &= 0xFFFFFFFE;
1421 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
1422
1423 return 0;
1424}
1425
1426/**
1427* Populates the SMC VRConfig field in DPM table.
1428*
1429* @param hwmgr the address of the hardware manager
1430* @param table the SMC DPM table structure to be populated
1431* @return always 0
1432*/
1433static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
1434 struct SMU74_Discrete_DpmTable *table)
1435{
1436 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1437 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
1438 uint16_t config;
1439
1440 config = VR_MERGED_WITH_VDDC;
1441 table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
1442
1443 /* Set Vddc Voltage Controller */
1444 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1445 config = VR_SVI2_PLANE_1;
1446 table->VRConfig |= config;
1447 } else {
1448 PP_ASSERT_WITH_CODE(false,
1449 "VDDC should be on SVI2 control in merged mode!",
1450 );
1451 }
1452 /* Set Vddci Voltage Controller */
1453 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
1454 config = VR_SVI2_PLANE_2; /* only in merged mode */
1455 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1456 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
1457 config = VR_SMIO_PATTERN_1;
1458 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1459 } else {
1460 config = VR_STATIC_VOLTAGE;
1461 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1462 }
1463 /* Set Mvdd Voltage Controller */
1464 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
1465 config = VR_SVI2_PLANE_2;
1466 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1467 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, smu_data->smu7_data.soft_regs_start +
1468 offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
1469 } else {
1470 config = VR_STATIC_VOLTAGE;
1471 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1472 }
1473
1474 return 0;
1475}
1476
1477
1478static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
1479{
1480 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1481 struct pp_smumgr *smumgr = hwmgr->smumgr;
1482 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
1483
1484 SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
1485 int result = 0;
1486 struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
1487 AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
1488 AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
1489 uint32_t tmp, i;
1490
1491 struct phm_ppt_v1_information *table_info =
1492 (struct phm_ppt_v1_information *)hwmgr->pptable;
1493 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1494 table_info->vdd_dep_on_sclk;
1495
1496
1497 if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
1498 return result;
1499
1500 result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
1501
1502 if (0 == result) {
1503 table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
1504 table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
1505 table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
1506 table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
1507 table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
1508 table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
1509 table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
1510 table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
1511 table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
1512 table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
1513 table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12;
1514 table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
1515 table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
1516 table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
1517 table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
1518 table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12;
1519 table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
1520 AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
1521 AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
1522 AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
1523 AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
1524 AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
1525 AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
1526 AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
1527
1528 for (i = 0; i < NUM_VFT_COLUMNS; i++) {
1529 AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
1530 AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
1531 }
1532
1533 result = smu7_read_smc_sram_dword(smumgr,
1534 SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
1535 &tmp, SMC_RAM_END);
1536
1537 smu7_copy_bytes_to_smc(smumgr,
1538 tmp,
1539 (uint8_t *)&AVFS_meanNsigma,
1540 sizeof(AVFS_meanNsigma_t),
1541 SMC_RAM_END);
1542
1543 result = smu7_read_smc_sram_dword(smumgr,
1544 SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
1545 &tmp, SMC_RAM_END);
1546 smu7_copy_bytes_to_smc(smumgr,
1547 tmp,
1548 (uint8_t *)&AVFS_SclkOffset,
1549 sizeof(AVFS_Sclk_Offset_t),
1550 SMC_RAM_END);
1551
1552 data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
1553 (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
1554 (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
1555 (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
1556 data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
1557 }
1558 return result;
1559}
1560
1561
1562/**
1563* Initialize the ARB DRAM timing table's index field.
1564*
1565* @param hwmgr the address of the powerplay hardware manager.
1566* @return always 0
1567*/
1568static int polaris10_init_arb_table_index(struct pp_smumgr *smumgr)
1569{
1570 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
1571 uint32_t tmp;
1572 int result;
1573
1574 /* This is a read-modify-write on the first byte of the ARB table.
1575 * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
1576 * is the field 'current'.
1577 * This solution is ugly, but we never write the whole table only
1578 * individual fields in it.
1579 * In reality this field should not be in that structure
1580 * but in a soft register.
1581 */
1582 result = smu7_read_smc_sram_dword(smumgr,
1583 smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
1584
1585 if (result)
1586 return result;
1587
1588 tmp &= 0x00FFFFFF;
1589 tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
1590
1591 return smu7_write_smc_sram_dword(smumgr,
1592 smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
1593}
1594
1595static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
1596{
1597 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
1598 struct phm_ppt_v1_information *table_info =
1599 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1600
1601 if (table_info &&
1602 table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
1603 table_info->cac_dtp_table->usPowerTuneDataSetID)
1604 smu_data->power_tune_defaults =
1605 &polaris10_power_tune_data_set_array
1606 [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
1607 else
1608 smu_data->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
1609
1610}
1611
1612/**
1613* Initializes the SMC table and uploads it
1614*
1615* @param hwmgr the address of the powerplay hardware manager.
1616* @return always 0
1617*/
1618int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
1619{
1620 int result;
1621 struct pp_smumgr *smumgr = hwmgr->smumgr;
1622 struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
1623 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
1624 struct phm_ppt_v1_information *table_info =
1625 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1626 struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
1627 uint8_t i;
1628 struct pp_atomctrl_gpio_pin_assignment gpio_pin;
1629 pp_atomctrl_clock_dividers_vi dividers;
1630
1631 polaris10_initialize_power_tune_defaults(hwmgr);
1632
1633 if (SMU7_VOLTAGE_CONTROL_NONE != hw_data->voltage_control)
1634 polaris10_populate_smc_voltage_tables(hwmgr, table);
1635
1636 table->SystemFlags = 0;
1637 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1638 PHM_PlatformCaps_AutomaticDCTransition))
1639 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1640
1641 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1642 PHM_PlatformCaps_StepVddc))
1643 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1644
1645 if (hw_data->is_memory_gddr5)
1646 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1647
1648 if (hw_data->ulv_supported && table_info->us_ulv_voltage_offset) {
1649 result = polaris10_populate_ulv_state(hwmgr, table);
1650 PP_ASSERT_WITH_CODE(0 == result,
1651 "Failed to initialize ULV state!", return result);
1652 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1653 ixCG_ULV_PARAMETER, SMU7_CGULVPARAMETER_DFLT);
1654 }
1655
1656 result = polaris10_populate_smc_link_level(hwmgr, table);
1657 PP_ASSERT_WITH_CODE(0 == result,
1658 "Failed to initialize Link Level!", return result);
1659
1660 result = polaris10_populate_all_graphic_levels(hwmgr);
1661 PP_ASSERT_WITH_CODE(0 == result,
1662 "Failed to initialize Graphics Level!", return result);
1663
1664 result = polaris10_populate_all_memory_levels(hwmgr);
1665 PP_ASSERT_WITH_CODE(0 == result,
1666 "Failed to initialize Memory Level!", return result);
1667
1668 result = polaris10_populate_smc_acpi_level(hwmgr, table);
1669 PP_ASSERT_WITH_CODE(0 == result,
1670 "Failed to initialize ACPI Level!", return result);
1671
1672 result = polaris10_populate_smc_vce_level(hwmgr, table);
1673 PP_ASSERT_WITH_CODE(0 == result,
1674 "Failed to initialize VCE Level!", return result);
1675
1676 result = polaris10_populate_smc_samu_level(hwmgr, table);
1677 PP_ASSERT_WITH_CODE(0 == result,
1678 "Failed to initialize SAMU Level!", return result);
1679
1680 /* Since only the initial state is completely set up at this point
1681 * (the other states are just copies of the boot state) we only
1682 * need to populate the ARB settings for the initial state.
1683 */
1684 result = polaris10_program_memory_timing_parameters(hwmgr);
1685 PP_ASSERT_WITH_CODE(0 == result,
1686 "Failed to Write ARB settings for the initial state.", return result);
1687
1688 result = polaris10_populate_smc_uvd_level(hwmgr, table);
1689 PP_ASSERT_WITH_CODE(0 == result,
1690 "Failed to initialize UVD Level!", return result);
1691
1692 result = polaris10_populate_smc_boot_level(hwmgr, table);
1693 PP_ASSERT_WITH_CODE(0 == result,
1694 "Failed to initialize Boot Level!", return result);
1695
1696 result = polaris10_populate_smc_initailial_state(hwmgr);
1697 PP_ASSERT_WITH_CODE(0 == result,
1698 "Failed to initialize Boot State!", return result);
1699
1700 result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr);
1701 PP_ASSERT_WITH_CODE(0 == result,
1702 "Failed to populate BAPM Parameters!", return result);
1703
1704 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1705 PHM_PlatformCaps_ClockStretcher)) {
1706 result = polaris10_populate_clock_stretcher_data_table(hwmgr);
1707 PP_ASSERT_WITH_CODE(0 == result,
1708 "Failed to populate Clock Stretcher Data Table!",
1709 return result);
1710 }
1711
1712 result = polaris10_populate_avfs_parameters(hwmgr);
1713 PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
1714
1715 table->CurrSclkPllRange = 0xff;
1716 table->GraphicsVoltageChangeEnable = 1;
1717 table->GraphicsThermThrottleEnable = 1;
1718 table->GraphicsInterval = 1;
1719 table->VoltageInterval = 1;
1720 table->ThermalInterval = 1;
1721 table->TemperatureLimitHigh =
1722 table_info->cac_dtp_table->usTargetOperatingTemp *
1723 SMU7_Q88_FORMAT_CONVERSION_UNIT;
1724 table->TemperatureLimitLow =
1725 (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
1726 SMU7_Q88_FORMAT_CONVERSION_UNIT;
1727 table->MemoryVoltageChangeEnable = 1;
1728 table->MemoryInterval = 1;
1729 table->VoltageResponseTime = 0;
1730 table->PhaseResponseTime = 0;
1731 table->MemoryThermThrottleEnable = 1;
1732 table->PCIeBootLinkLevel = 0;
1733 table->PCIeGenInterval = 1;
1734 table->VRConfig = 0;
1735
1736 result = polaris10_populate_vr_config(hwmgr, table);
1737 PP_ASSERT_WITH_CODE(0 == result,
1738 "Failed to populate VRConfig setting!", return result);
1739
1740 table->ThermGpio = 17;
1741 table->SclkStepSize = 0x4000;
1742
1743 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
1744 table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
1745 } else {
1746 table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
1747 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1748 PHM_PlatformCaps_RegulatorHot);
1749 }
1750
1751 if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
1752 &gpio_pin)) {
1753 table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
1754 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1755 PHM_PlatformCaps_AutomaticDCTransition);
1756 } else {
1757 table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
1758 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1759 PHM_PlatformCaps_AutomaticDCTransition);
1760 }
1761
1762 /* Thermal Output GPIO */
1763 if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
1764 &gpio_pin)) {
1765 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1766 PHM_PlatformCaps_ThermalOutGPIO);
1767
1768 table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
1769
1770 /* For porlarity read GPIOPAD_A with assigned Gpio pin
1771 * since VBIOS will program this register to set 'inactive state',
1772 * driver can then determine 'active state' from this and
1773 * program SMU with correct polarity
1774 */
1775 table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
1776 & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
1777 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
1778
1779 /* if required, combine VRHot/PCC with thermal out GPIO */
1780 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
1781 && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
1782 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
1783 } else {
1784 table->ThermOutGpio = 17;
1785 table->ThermOutPolarity = 1;
1786 table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
1787 }
1788
1789 /* Populate BIF_SCLK levels into SMC DPM table */
1790 for (i = 0; i <= hw_data->dpm_table.pcie_speed_table.count; i++) {
1791 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, smu_data->bif_sclk_table[i], &dividers);
1792 PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
1793
1794 if (i == 0)
1795 table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
1796 else
1797 table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
1798 }
1799
1800 for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
1801 table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
1802
1803 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
1804 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
1805 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
1806 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
1807 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
1808 CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
1809 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
1810 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
1811 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
1812 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
1813
1814 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
1815 result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
1816 smu_data->smu7_data.dpm_table_start +
1817 offsetof(SMU74_Discrete_DpmTable, SystemFlags),
1818 (uint8_t *)&(table->SystemFlags),
1819 sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
1820 SMC_RAM_END);
1821 PP_ASSERT_WITH_CODE(0 == result,
1822 "Failed to upload dpm data to SMC memory!", return result);
1823
1824 result = polaris10_init_arb_table_index(hwmgr->smumgr);
1825 PP_ASSERT_WITH_CODE(0 == result,
1826 "Failed to upload arb data to SMC memory!", return result);
1827
1828 result = polaris10_populate_pm_fuses(hwmgr);
1829 PP_ASSERT_WITH_CODE(0 == result,
1830 "Failed to populate PM fuses to SMC memory!", return result);
1831 return 0;
1832}
1833
1834static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
1835{
1836 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1837
1838 if (data->need_update_smu7_dpm_table &
1839 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
1840 return polaris10_program_memory_timing_parameters(hwmgr);
1841
1842 return 0;
1843}
1844
1845int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
1846{
1847 int ret;
1848 struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
1849 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
1850 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1851
1852 if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
1853 return 0;
1854
1855 ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1856 PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
1857
1858 ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
1859 0 : -1;
1860
1861 if (!ret)
1862 /* If this param is not changed, this function could fire unnecessarily */
1863 smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
1864
1865 return ret;
1866}
1867
1868/**
1869* Set up the fan table to control the fan using the SMC.
1870* @param hwmgr the address of the powerplay hardware manager.
1871* @param pInput the pointer to input data
1872* @param pOutput the pointer to output data
1873* @param pStorage the pointer to temporary storage
1874* @param Result the last failure code
1875* @return result from set temperature range routine
1876*/
1877int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
1878{
1879 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
1880 SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
1881 uint32_t duty100;
1882 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
1883 uint16_t fdo_min, slope1, slope2;
1884 uint32_t reference_clock;
1885 int res;
1886 uint64_t tmp64;
1887
1888 if (smu_data->smu7_data.fan_table_start == 0) {
1889 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1890 PHM_PlatformCaps_MicrocodeFanControl);
1891 return 0;
1892 }
1893
1894 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1895 CG_FDO_CTRL1, FMAX_DUTY100);
1896
1897 if (duty100 == 0) {
1898 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1899 PHM_PlatformCaps_MicrocodeFanControl);
1900 return 0;
1901 }
1902
1903 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
1904 usPWMMin * duty100;
1905 do_div(tmp64, 10000);
1906 fdo_min = (uint16_t)tmp64;
1907
1908 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
1909 hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
1910 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
1911 hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
1912
1913 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
1914 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
1915 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
1916 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
1917
1918 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
1919 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
1920
1921 fan_table.TempMin = cpu_to_be16((50 + hwmgr->
1922 thermal_controller.advanceFanControlParameters.usTMin) / 100);
1923 fan_table.TempMed = cpu_to_be16((50 + hwmgr->
1924 thermal_controller.advanceFanControlParameters.usTMed) / 100);
1925 fan_table.TempMax = cpu_to_be16((50 + hwmgr->
1926 thermal_controller.advanceFanControlParameters.usTMax) / 100);
1927
1928 fan_table.Slope1 = cpu_to_be16(slope1);
1929 fan_table.Slope2 = cpu_to_be16(slope2);
1930
1931 fan_table.FdoMin = cpu_to_be16(fdo_min);
1932
1933 fan_table.HystDown = cpu_to_be16(hwmgr->
1934 thermal_controller.advanceFanControlParameters.ucTHyst);
1935
1936 fan_table.HystUp = cpu_to_be16(1);
1937
1938 fan_table.HystSlope = cpu_to_be16(1);
1939
1940 fan_table.TempRespLim = cpu_to_be16(5);
1941
1942 reference_clock = smu7_get_xclk(hwmgr);
1943
1944 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
1945 thermal_controller.advanceFanControlParameters.ulCycleDelay *
1946 reference_clock) / 1600);
1947
1948 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
1949
1950 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
1951 hwmgr->device, CGS_IND_REG__SMC,
1952 CG_MULT_THERMAL_CTRL, TEMP_SEL);
1953
1954 res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start,
1955 (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
1956 SMC_RAM_END);
1957
1958 if (!res && hwmgr->thermal_controller.
1959 advanceFanControlParameters.ucMinimumPWMLimit)
1960 res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1961 PPSMC_MSG_SetFanMinPwm,
1962 hwmgr->thermal_controller.
1963 advanceFanControlParameters.ucMinimumPWMLimit);
1964
1965 if (!res && hwmgr->thermal_controller.
1966 advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
1967 res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1968 PPSMC_MSG_SetFanSclkTarget,
1969 hwmgr->thermal_controller.
1970 advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
1971
1972 if (res)
1973 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1974 PHM_PlatformCaps_MicrocodeFanControl);
1975
1976 return 0;
1977}
1978
1979static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
1980{
1981 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
1982 uint32_t mm_boot_level_offset, mm_boot_level_value;
1983 struct phm_ppt_v1_information *table_info =
1984 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1985
1986 smu_data->smc_state_table.UvdBootLevel = 0;
1987 if (table_info->mm_dep_table->count > 0)
1988 smu_data->smc_state_table.UvdBootLevel =
1989 (uint8_t) (table_info->mm_dep_table->count - 1);
1990 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable,
1991 UvdBootLevel);
1992 mm_boot_level_offset /= 4;
1993 mm_boot_level_offset *= 4;
1994 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
1995 CGS_IND_REG__SMC, mm_boot_level_offset);
1996 mm_boot_level_value &= 0x00FFFFFF;
1997 mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
1998 cgs_write_ind_register(hwmgr->device,
1999 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2000
2001 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2002 PHM_PlatformCaps_UVDDPM) ||
2003 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2004 PHM_PlatformCaps_StablePState))
2005 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2006 PPSMC_MSG_UVDDPM_SetEnabledMask,
2007 (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
2008 return 0;
2009}
2010
2011static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
2012{
2013 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
2014 uint32_t mm_boot_level_offset, mm_boot_level_value;
2015 struct phm_ppt_v1_information *table_info =
2016 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2017
2018 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2019 PHM_PlatformCaps_StablePState))
2020 smu_data->smc_state_table.VceBootLevel =
2021 (uint8_t) (table_info->mm_dep_table->count - 1);
2022 else
2023 smu_data->smc_state_table.VceBootLevel = 0;
2024
2025 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2026 offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
2027 mm_boot_level_offset /= 4;
2028 mm_boot_level_offset *= 4;
2029 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2030 CGS_IND_REG__SMC, mm_boot_level_offset);
2031 mm_boot_level_value &= 0xFF00FFFF;
2032 mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
2033 cgs_write_ind_register(hwmgr->device,
2034 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2035
2036 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
2037 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2038 PPSMC_MSG_VCEDPM_SetEnabledMask,
2039 (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
2040 return 0;
2041}
2042
2043static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr)
2044{
2045 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
2046 uint32_t mm_boot_level_offset, mm_boot_level_value;
2047
2048
2049 smu_data->smc_state_table.SamuBootLevel = 0;
2050 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2051 offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
2052
2053 mm_boot_level_offset /= 4;
2054 mm_boot_level_offset *= 4;
2055 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2056 CGS_IND_REG__SMC, mm_boot_level_offset);
2057 mm_boot_level_value &= 0xFFFFFF00;
2058 mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
2059 cgs_write_ind_register(hwmgr->device,
2060 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2061
2062 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2063 PHM_PlatformCaps_StablePState))
2064 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2065 PPSMC_MSG_SAMUDPM_SetEnabledMask,
2066 (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
2067 return 0;
2068}
2069
2070
2071static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr)
2072{
2073 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
2074 struct phm_ppt_v1_information *table_info =
2075 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2076 struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
2077 int max_entry, i;
2078
2079 max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
2080 SMU74_MAX_LEVELS_LINK :
2081 pcie_table->count;
2082 /* Setup BIF_SCLK levels */
2083 for (i = 0; i < max_entry; i++)
2084 smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
2085 return 0;
2086}
2087
2088int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
2089{
2090 switch (type) {
2091 case SMU_UVD_TABLE:
2092 polaris10_update_uvd_smc_table(hwmgr);
2093 break;
2094 case SMU_VCE_TABLE:
2095 polaris10_update_vce_smc_table(hwmgr);
2096 break;
2097 case SMU_SAMU_TABLE:
2098 polaris10_update_samu_smc_table(hwmgr);
2099 break;
2100 case SMU_BIF_TABLE:
2101 polaris10_update_bif_smc_table(hwmgr);
2102 default:
2103 break;
2104 }
2105 return 0;
2106}
2107
2108int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2109{
2110 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2111 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
2112
2113 int result = 0;
2114 uint32_t low_sclk_interrupt_threshold = 0;
2115
2116 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2117 PHM_PlatformCaps_SclkThrottleLowNotification)
2118 && (hwmgr->gfx_arbiter.sclk_threshold !=
2119 data->low_sclk_interrupt_threshold)) {
2120 data->low_sclk_interrupt_threshold =
2121 hwmgr->gfx_arbiter.sclk_threshold;
2122 low_sclk_interrupt_threshold =
2123 data->low_sclk_interrupt_threshold;
2124
2125 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
2126
2127 result = smu7_copy_bytes_to_smc(
2128 hwmgr->smumgr,
2129 smu_data->smu7_data.dpm_table_start +
2130 offsetof(SMU74_Discrete_DpmTable,
2131 LowSclkInterruptThreshold),
2132 (uint8_t *)&low_sclk_interrupt_threshold,
2133 sizeof(uint32_t),
2134 SMC_RAM_END);
2135 }
2136 PP_ASSERT_WITH_CODE((result == 0),
2137 "Failed to update SCLK threshold!", return result);
2138
2139 result = polaris10_program_mem_timing_parameters(hwmgr);
2140 PP_ASSERT_WITH_CODE((result == 0),
2141 "Failed to program memory timing parameters!",
2142 );
2143
2144 return result;
2145}
2146
2147uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
2148{
2149 switch (type) {
2150 case SMU_SoftRegisters:
2151 switch (member) {
2152 case HandshakeDisables:
2153 return offsetof(SMU74_SoftRegisters, HandshakeDisables);
2154 case VoltageChangeTimeout:
2155 return offsetof(SMU74_SoftRegisters, VoltageChangeTimeout);
2156 case AverageGraphicsActivity:
2157 return offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
2158 case PreVBlankGap:
2159 return offsetof(SMU74_SoftRegisters, PreVBlankGap);
2160 case VBlankTimeout:
2161 return offsetof(SMU74_SoftRegisters, VBlankTimeout);
2162 case UcodeLoadStatus:
2163 return offsetof(SMU74_SoftRegisters, UcodeLoadStatus);
2164 }
2165 case SMU_Discrete_DpmTable:
2166 switch (member) {
2167 case UvdBootLevel:
2168 return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
2169 case VceBootLevel:
2170 return offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
2171 case SamuBootLevel:
2172 return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
2173 case LowSclkInterruptThreshold:
2174 return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
2175 }
2176 }
2177 printk("cant't get the offset of type %x member %x \n", type, member);
2178 return 0;
2179}
2180
2181uint32_t polaris10_get_mac_definition(uint32_t value)
2182{
2183 switch (value) {
2184 case SMU_MAX_LEVELS_GRAPHICS:
2185 return SMU74_MAX_LEVELS_GRAPHICS;
2186 case SMU_MAX_LEVELS_MEMORY:
2187 return SMU74_MAX_LEVELS_MEMORY;
2188 case SMU_MAX_LEVELS_LINK:
2189 return SMU74_MAX_LEVELS_LINK;
2190 case SMU_MAX_ENTRIES_SMIO:
2191 return SMU74_MAX_ENTRIES_SMIO;
2192 case SMU_MAX_LEVELS_VDDC:
2193 return SMU74_MAX_LEVELS_VDDC;
2194 case SMU_MAX_LEVELS_VDDGFX:
2195 return SMU74_MAX_LEVELS_VDDGFX;
2196 case SMU_MAX_LEVELS_VDDCI:
2197 return SMU74_MAX_LEVELS_VDDCI;
2198 case SMU_MAX_LEVELS_MVDD:
2199 return SMU74_MAX_LEVELS_MVDD;
2200 case SMU_UVD_MCLK_HANDSHAKE_DISABLE:
2201 return SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
2202 }
2203
2204 printk("cant't get the mac of %x \n", value);
2205 return 0;
2206}
2207
2208/**
2209* Get the location of various tables inside the FW image.
2210*
2211* @param hwmgr the address of the powerplay hardware manager.
2212* @return always 0
2213*/
2214int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
2215{
2216 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
2217 uint32_t tmp;
2218 int result;
2219 bool error = false;
2220
2221 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2222 SMU7_FIRMWARE_HEADER_LOCATION +
2223 offsetof(SMU74_Firmware_Header, DpmTable),
2224 &tmp, SMC_RAM_END);
2225
2226 if (0 == result)
2227 smu_data->smu7_data.dpm_table_start = tmp;
2228
2229 error |= (0 != result);
2230
2231 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2232 SMU7_FIRMWARE_HEADER_LOCATION +
2233 offsetof(SMU74_Firmware_Header, SoftRegisters),
2234 &tmp, SMC_RAM_END);
2235
2236 if (!result)
2237 smu_data->smu7_data.soft_regs_start = tmp;
2238
2239 error |= (0 != result);
2240
2241 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2242 SMU7_FIRMWARE_HEADER_LOCATION +
2243 offsetof(SMU74_Firmware_Header, mcRegisterTable),
2244 &tmp, SMC_RAM_END);
2245
2246 if (!result)
2247 smu_data->smu7_data.mc_reg_table_start = tmp;
2248
2249 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2250 SMU7_FIRMWARE_HEADER_LOCATION +
2251 offsetof(SMU74_Firmware_Header, FanTable),
2252 &tmp, SMC_RAM_END);
2253
2254 if (!result)
2255 smu_data->smu7_data.fan_table_start = tmp;
2256
2257 error |= (0 != result);
2258
2259 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2260 SMU7_FIRMWARE_HEADER_LOCATION +
2261 offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
2262 &tmp, SMC_RAM_END);
2263
2264 if (!result)
2265 smu_data->smu7_data.arb_table_start = tmp;
2266
2267 error |= (0 != result);
2268
2269 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2270 SMU7_FIRMWARE_HEADER_LOCATION +
2271 offsetof(SMU74_Firmware_Header, Version),
2272 &tmp, SMC_RAM_END);
2273
2274 if (!result)
2275 hwmgr->microcode_version_info.SMC = tmp;
2276
2277 error |= (0 != result);
2278
2279 return error ? -1 : 0;
2280}
2281
2282bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
2283{
2284 return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
2285 CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
2286 ? true : false;
2287}
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smum.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h
index c031ff99fe3e..5ade3cea8bb7 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smum.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2014 Advanced Micro Devices, Inc. 2 * Copyright 2015 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -20,23 +20,23 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 */ 22 */
23#ifndef POLARIS10_SMC_H
24#define POLARIS10_SMC_H
23 25
24#ifndef TONGA_SMUMGR_H 26#include "smumgr.h"
25#define TONGA_SMUMGR_H
26 27
27#include "tonga_ppsmc.h"
28 28
29int tonga_smu_init(struct amdgpu_device *adev); 29int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
30int tonga_smu_fini(struct amdgpu_device *adev); 30int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
31int tonga_smu_start(struct amdgpu_device *adev); 31int polaris10_init_smc_table(struct pp_hwmgr *hwmgr);
32 32int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
33struct tonga_smu_private_data 33int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr);
34{ 34int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
35 uint8_t *header; 35int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr);
36 uint32_t smu_buffer_addr_high; 36uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member);
37 uint32_t smu_buffer_addr_low; 37uint32_t polaris10_get_mac_definition(uint32_t value);
38 uint32_t header_addr_high; 38int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr);
39 uint32_t header_addr_low; 39bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr);
40};
41 40
42#endif 41#endif
42
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 8047ad221e74..5c3598ab7dae 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -38,15 +38,11 @@
38#include "ppatomctrl.h" 38#include "ppatomctrl.h"
39#include "pp_debug.h" 39#include "pp_debug.h"
40#include "cgs_common.h" 40#include "cgs_common.h"
41#include "polaris10_smc.h"
42#include "smu7_ppsmc.h"
43#include "smu7_smumgr.h"
41 44
42#define POLARIS10_SMC_SIZE 0x20000 45#define PPPOLARIS10_TARGETACTIVITY_DFLT 50
43
44/* Microcode file is stored in this buffer */
45#define BUFFER_SIZE 80000
46#define MAX_STRING_SIZE 15
47#define BUFFER_SIZETWO 131072 /* 128 *1024 */
48
49#define SMC_RAM_END 0x40000
50 46
51static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = { 47static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
52 /* Min pcie DeepSleep Activity CgSpll CgSpll CcPwr CcPwr Sclk Enabled Enabled Voltage Power */ 48 /* Min pcie DeepSleep Activity CgSpll CgSpll CcPwr CcPwr Sclk Enabled Enabled Voltage Power */
@@ -61,572 +57,9 @@ static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
61 { 0xa00fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0xa0860100, 0x2800, 0, 0x2000, 2, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } } 57 { 0xa00fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0xa0860100, 0x2800, 0, 0x2000, 2, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } }
62}; 58};
63 59
64static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = 60static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = {
65 {0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00}; 61 0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
66
67/**
68* Set the address for reading/writing the SMC SRAM space.
69* @param smumgr the address of the powerplay hardware manager.
70* @param smcAddress the address in the SMC RAM to access.
71*/
72static int polaris10_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit)
73{
74 PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL);
75 PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL);
76
77 cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr);
78 SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
79
80 return 0;
81}
82
83/**
84* Copy bytes from SMC RAM space into driver memory.
85*
86* @param smumgr the address of the powerplay SMU manager.
87* @param smc_start_address the start address in the SMC RAM to copy bytes from
88* @param src the byte array to copy the bytes to.
89* @param byte_count the number of bytes to copy.
90*/
91int polaris10_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
92{
93 uint32_t data;
94 uint32_t addr;
95 uint8_t *dest_byte;
96 uint8_t i, data_byte[4] = {0};
97 uint32_t *pdata = (uint32_t *)&data_byte;
98
99 PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -1;);
100 PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -1);
101
102 addr = smc_start_address;
103
104 while (byte_count >= 4) {
105 polaris10_read_smc_sram_dword(smumgr, addr, &data, limit);
106
107 *dest = PP_SMC_TO_HOST_UL(data);
108
109 dest += 1;
110 byte_count -= 4;
111 addr += 4;
112 }
113
114 if (byte_count) {
115 polaris10_read_smc_sram_dword(smumgr, addr, &data, limit);
116 *pdata = PP_SMC_TO_HOST_UL(data);
117 /* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */
118 dest_byte = (uint8_t *)dest;
119 for (i = 0; i < byte_count; i++)
120 dest_byte[i] = data_byte[i];
121 }
122
123 return 0;
124}
125
126/**
127* Copy bytes from an array into the SMC RAM space.
128*
129* @param pSmuMgr the address of the powerplay SMU manager.
130* @param smc_start_address the start address in the SMC RAM to copy bytes to.
131* @param src the byte array to copy the bytes from.
132* @param byte_count the number of bytes to copy.
133*/
134int polaris10_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
135 const uint8_t *src, uint32_t byte_count, uint32_t limit)
136{
137 int result;
138 uint32_t data = 0;
139 uint32_t original_data;
140 uint32_t addr = 0;
141 uint32_t extra_shift;
142
143 PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -1);
144 PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -1);
145
146 addr = smc_start_address;
147
148 while (byte_count >= 4) {
149 /* Bytes are written into the SMC addres space with the MSB first. */
150 data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
151
152 result = polaris10_set_smc_sram_address(smumgr, addr, limit);
153
154 if (0 != result)
155 return result;
156
157 cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
158
159 src += 4;
160 byte_count -= 4;
161 addr += 4;
162 }
163
164 if (0 != byte_count) {
165
166 data = 0;
167
168 result = polaris10_set_smc_sram_address(smumgr, addr, limit);
169
170 if (0 != result)
171 return result;
172
173
174 original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
175
176 extra_shift = 8 * (4 - byte_count);
177
178 while (byte_count > 0) {
179 /* Bytes are written into the SMC addres space with the MSB first. */
180 data = (0x100 * data) + *src++;
181 byte_count--;
182 }
183
184 data <<= extra_shift;
185
186 data |= (original_data & ~((~0UL) << extra_shift));
187
188 result = polaris10_set_smc_sram_address(smumgr, addr, limit);
189
190 if (0 != result)
191 return result;
192
193 cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
194 }
195
196 return 0;
197}
198
199
200static int polaris10_program_jump_on_start(struct pp_smumgr *smumgr)
201{
202 static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
203
204 polaris10_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1);
205
206 return 0;
207}
208
209/**
210* Return if the SMC is currently running.
211*
212* @param smumgr the address of the powerplay hardware manager.
213*/
214bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr)
215{
216 return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
217 && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
218}
219
220static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
221{
222 uint32_t efuse;
223
224 efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
225 efuse &= 0x00000001;
226 if (efuse)
227 return true;
228
229 return false;
230}
231
232/**
233* Send a message to the SMC, and wait for its response.
234*
235* @param smumgr the address of the powerplay hardware manager.
236* @param msg the message to send.
237* @return The response that came from the SMC.
238*/
239int polaris10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
240{
241 int ret;
242
243 if (!polaris10_is_smc_ram_running(smumgr))
244 return -1;
245
246
247 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
248
249 ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
250
251 if (ret != 1)
252 printk("\n failed to send pre message %x ret is %d \n", msg, ret);
253
254 cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
255
256 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
257
258 ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
259
260 if (ret != 1)
261 printk("\n failed to send message %x ret is %d \n", msg, ret);
262
263 return 0;
264}
265
266
267/**
268* Send a message to the SMC, and do not wait for its response.
269*
270* @param smumgr the address of the powerplay hardware manager.
271* @param msg the message to send.
272* @return Always return 0.
273*/
274int polaris10_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg)
275{
276 cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
277
278 return 0;
279}
280
281/**
282* Send a message to the SMC with parameter
283*
284* @param smumgr: the address of the powerplay hardware manager.
285* @param msg: the message to send.
286* @param parameter: the parameter to send
287* @return The response that came from the SMC.
288*/
289int polaris10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
290{
291 if (!polaris10_is_smc_ram_running(smumgr)) {
292 return -1;
293 }
294
295 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
296
297 cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
298
299 return polaris10_send_msg_to_smc(smumgr, msg);
300}
301
302
303/**
304* Send a message to the SMC with parameter, do not wait for response
305*
306* @param smumgr: the address of the powerplay hardware manager.
307* @param msg: the message to send.
308* @param parameter: the parameter to send
309* @return The response that came from the SMC.
310*/
311int polaris10_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
312{
313 cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
314
315 return polaris10_send_msg_to_smc_without_waiting(smumgr, msg);
316}
317
318int polaris10_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
319{
320 cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
321
322 cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
323
324 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
325
326 if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
327 printk("Failed to send Message.\n");
328
329 return 0;
330}
331
332/**
333* Wait until the SMC is doing nithing. Doing nothing means that the SMC is either turned off or it is sitting on the STOP instruction.
334*
335* @param smumgr the address of the powerplay hardware manager.
336* @param msg the message to send.
337* @return The response that came from the SMC.
338*/
339int polaris10_wait_for_smc_inactive(struct pp_smumgr *smumgr)
340{
341 /* If the SMC is not even on it qualifies as inactive. */
342 if (!polaris10_is_smc_ram_running(smumgr))
343 return -1;
344
345 SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
346 return 0;
347}
348
349
350/**
351* Upload the SMC firmware to the SMC microcontroller.
352*
353* @param smumgr the address of the powerplay hardware manager.
354* @param pFirmware the data structure containing the various sections of the firmware.
355*/
356static int polaris10_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit)
357{
358 uint32_t byte_count = length;
359
360 PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -1);
361 62
362 cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000);
363 SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
364
365 for (; byte_count >= 4; byte_count -= 4)
366 cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++);
367
368 SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
369
370 PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -1);
371
372 return 0;
373}
374
375static enum cgs_ucode_id polaris10_convert_fw_type_to_cgs(uint32_t fw_type)
376{
377 enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
378
379 switch (fw_type) {
380 case UCODE_ID_SMU:
381 result = CGS_UCODE_ID_SMU;
382 break;
383 case UCODE_ID_SMU_SK:
384 result = CGS_UCODE_ID_SMU_SK;
385 break;
386 case UCODE_ID_SDMA0:
387 result = CGS_UCODE_ID_SDMA0;
388 break;
389 case UCODE_ID_SDMA1:
390 result = CGS_UCODE_ID_SDMA1;
391 break;
392 case UCODE_ID_CP_CE:
393 result = CGS_UCODE_ID_CP_CE;
394 break;
395 case UCODE_ID_CP_PFP:
396 result = CGS_UCODE_ID_CP_PFP;
397 break;
398 case UCODE_ID_CP_ME:
399 result = CGS_UCODE_ID_CP_ME;
400 break;
401 case UCODE_ID_CP_MEC:
402 result = CGS_UCODE_ID_CP_MEC;
403 break;
404 case UCODE_ID_CP_MEC_JT1:
405 result = CGS_UCODE_ID_CP_MEC_JT1;
406 break;
407 case UCODE_ID_CP_MEC_JT2:
408 result = CGS_UCODE_ID_CP_MEC_JT2;
409 break;
410 case UCODE_ID_RLC_G:
411 result = CGS_UCODE_ID_RLC_G;
412 break;
413 default:
414 break;
415 }
416
417 return result;
418}
419
420static int polaris10_upload_smu_firmware_image(struct pp_smumgr *smumgr)
421{
422 int result = 0;
423 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
424
425 struct cgs_firmware_info info = {0};
426
427 if (smu_data->security_hard_key == 1)
428 cgs_get_firmware_info(smumgr->device,
429 polaris10_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
430 else
431 cgs_get_firmware_info(smumgr->device,
432 polaris10_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
433
434 /* TO DO cgs_init_samu_load_smu(smumgr->device, (uint32_t *)info.kptr, info.image_size, smu_data->post_initial_boot);*/
435 result = polaris10_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, POLARIS10_SMC_SIZE);
436
437 return result;
438}
439
440/**
441* Read a 32bit value from the SMC SRAM space.
442* ALL PARAMETERS ARE IN HOST BYTE ORDER.
443* @param smumgr the address of the powerplay hardware manager.
444* @param smcAddress the address in the SMC RAM to access.
445* @param value and output parameter for the data read from the SMC SRAM.
446*/
447int polaris10_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
448{
449 int result;
450
451 result = polaris10_set_smc_sram_address(smumgr, smc_addr, limit);
452
453 if (result)
454 return result;
455
456 *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
457 return 0;
458}
459
460/**
461* Write a 32bit value to the SMC SRAM space.
462* ALL PARAMETERS ARE IN HOST BYTE ORDER.
463* @param smumgr the address of the powerplay hardware manager.
464* @param smc_addr the address in the SMC RAM to access.
465* @param value to write to the SMC SRAM.
466*/
467int polaris10_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
468{
469 int result;
470
471 result = polaris10_set_smc_sram_address(smumgr, smc_addr, limit);
472
473 if (result)
474 return result;
475
476 cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value);
477
478 return 0;
479}
480
481
482int polaris10_smu_fini(struct pp_smumgr *smumgr)
483{
484 if (smumgr->backend) {
485 kfree(smumgr->backend);
486 smumgr->backend = NULL;
487 }
488 cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
489 return 0;
490}
491
492/* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */
493static uint32_t polaris10_get_mask_for_firmware_type(uint32_t fw_type)
494{
495 uint32_t result = 0;
496
497 switch (fw_type) {
498 case UCODE_ID_SDMA0:
499 result = UCODE_ID_SDMA0_MASK;
500 break;
501 case UCODE_ID_SDMA1:
502 result = UCODE_ID_SDMA1_MASK;
503 break;
504 case UCODE_ID_CP_CE:
505 result = UCODE_ID_CP_CE_MASK;
506 break;
507 case UCODE_ID_CP_PFP:
508 result = UCODE_ID_CP_PFP_MASK;
509 break;
510 case UCODE_ID_CP_ME:
511 result = UCODE_ID_CP_ME_MASK;
512 break;
513 case UCODE_ID_CP_MEC_JT1:
514 case UCODE_ID_CP_MEC_JT2:
515 result = UCODE_ID_CP_MEC_MASK;
516 break;
517 case UCODE_ID_RLC_G:
518 result = UCODE_ID_RLC_G_MASK;
519 break;
520 default:
521 printk("UCode type is out of range! \n");
522 result = 0;
523 }
524
525 return result;
526}
527
528/* Populate one firmware image to the data structure */
529
530static int polaris10_populate_single_firmware_entry(struct pp_smumgr *smumgr,
531 uint32_t fw_type,
532 struct SMU_Entry *entry)
533{
534 int result = 0;
535 struct cgs_firmware_info info = {0};
536
537 result = cgs_get_firmware_info(smumgr->device,
538 polaris10_convert_fw_type_to_cgs(fw_type),
539 &info);
540
541 if (!result) {
542 entry->version = info.version;
543 entry->id = (uint16_t)fw_type;
544 entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
545 entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
546 entry->meta_data_addr_high = 0;
547 entry->meta_data_addr_low = 0;
548 entry->data_size_byte = info.image_size;
549 entry->num_register_entries = 0;
550 }
551
552 if (fw_type == UCODE_ID_RLC_G)
553 entry->flags = 1;
554 else
555 entry->flags = 0;
556
557 return 0;
558}
559
560static int polaris10_request_smu_load_fw(struct pp_smumgr *smumgr)
561{
562 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
563 uint32_t fw_to_load;
564
565 int result = 0;
566 struct SMU_DRAMData_TOC *toc;
567
568 if (!smumgr->reload_fw) {
569 printk(KERN_INFO "[ powerplay ] skip reloading...\n");
570 return 0;
571 }
572
573 if (smu_data->soft_regs_start)
574 cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
575 smu_data->soft_regs_start + offsetof(SMU74_SoftRegisters, UcodeLoadStatus),
576 0x0);
577
578 polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, smu_data->smu_buffer.mc_addr_high);
579 polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_LO, smu_data->smu_buffer.mc_addr_low);
580
581 toc = (struct SMU_DRAMData_TOC *)smu_data->header;
582 toc->num_entries = 0;
583 toc->structure_version = 1;
584
585 PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
586 PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
587 PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
588 PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
589 PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
590 PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
591 PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
592 PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
593 PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
594
595 polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
596 polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
597
598 fw_to_load = UCODE_ID_RLC_G_MASK
599 + UCODE_ID_SDMA0_MASK
600 + UCODE_ID_SDMA1_MASK
601 + UCODE_ID_CP_CE_MASK
602 + UCODE_ID_CP_ME_MASK
603 + UCODE_ID_CP_PFP_MASK
604 + UCODE_ID_CP_MEC_MASK;
605
606 if (polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load))
607 printk(KERN_ERR "Fail to Request SMU Load uCode");
608
609 return result;
610}
611
612/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
613static int polaris10_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type)
614{
615 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
616 uint32_t fw_mask = polaris10_get_mask_for_firmware_type(fw_type);
617 uint32_t ret;
618 /* Check SOFT_REGISTERS_TABLE_28.UcodeLoadStatus */
619 ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11,
620 smu_data->soft_regs_start + offsetof(SMU74_SoftRegisters, UcodeLoadStatus),
621 fw_mask, fw_mask);
622
623 return ret;
624}
625
626static int polaris10_reload_firmware(struct pp_smumgr *smumgr)
627{
628 return smumgr->smumgr_funcs->start_smu(smumgr);
629}
630 63
631static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr) 64static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr)
632{ 65{
@@ -668,7 +101,7 @@ static int polaris10_perform_btc(struct pp_smumgr *smumgr)
668 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); 101 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
669 102
670 if (0 != smu_data->avfs.avfs_btc_param) { 103 if (0 != smu_data->avfs.avfs_btc_param) {
671 if (0 != polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) { 104 if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
672 printk("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed"); 105 printk("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed");
673 result = -1; 106 result = -1;
674 } 107 }
@@ -696,7 +129,7 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
696 graphics_level_size = sizeof(avfs_graphics_level_polaris10); 129 graphics_level_size = sizeof(avfs_graphics_level_polaris10);
697 u16_boot_mvdd = PP_HOST_TO_SMC_US(1300 * VOLTAGE_SCALE); 130 u16_boot_mvdd = PP_HOST_TO_SMC_US(1300 * VOLTAGE_SCALE);
698 131
699 PP_ASSERT_WITH_CODE(0 == polaris10_read_smc_sram_dword(smumgr, 132 PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
700 SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, DpmTable), 133 SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, DpmTable),
701 &dpm_table_start, 0x40000), 134 &dpm_table_start, 0x40000),
702 "[AVFS][Polaris10_SetupGfxLvlStruct] SMU could not communicate starting address of DPM table", 135 "[AVFS][Polaris10_SetupGfxLvlStruct] SMU could not communicate starting address of DPM table",
@@ -707,14 +140,14 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
707 140
708 vr_config_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, VRConfig); 141 vr_config_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, VRConfig);
709 142
710 PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, vr_config_address, 143 PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_address,
711 (uint8_t *)&vr_config, sizeof(uint32_t), 0x40000), 144 (uint8_t *)&vr_config, sizeof(uint32_t), 0x40000),
712 "[AVFS][Polaris10_SetupGfxLvlStruct] Problems copying VRConfig value over to SMC", 145 "[AVFS][Polaris10_SetupGfxLvlStruct] Problems copying VRConfig value over to SMC",
713 return -1); 146 return -1);
714 147
715 graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, GraphicsLevel); 148 graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
716 149
717 PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address, 150 PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
718 (uint8_t *)(&avfs_graphics_level_polaris10), 151 (uint8_t *)(&avfs_graphics_level_polaris10),
719 graphics_level_size, 0x40000), 152 graphics_level_size, 0x40000),
720 "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of SCLK DPM table failed!", 153 "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of SCLK DPM table failed!",
@@ -722,7 +155,7 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
722 155
723 graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, MemoryLevel); 156 graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
724 157
725 PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address, 158 PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
726 (uint8_t *)(&avfs_memory_level_polaris10), sizeof(avfs_memory_level_polaris10), 0x40000), 159 (uint8_t *)(&avfs_memory_level_polaris10), sizeof(avfs_memory_level_polaris10), 0x40000),
727 "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of MCLK DPM table failed!", 160 "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of MCLK DPM table failed!",
728 return -1); 161 return -1);
@@ -731,7 +164,7 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
731 164
732 graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, BootMVdd); 165 graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, BootMVdd);
733 166
734 PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address, 167 PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
735 (uint8_t *)(&u16_boot_mvdd), sizeof(u16_boot_mvdd), 0x40000), 168 (uint8_t *)(&u16_boot_mvdd), sizeof(u16_boot_mvdd), 0x40000),
736 "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of DPM table failed!", 169 "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of DPM table failed!",
737 return -1); 170 return -1);
@@ -792,7 +225,7 @@ static int polaris10_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
792 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, 225 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
793 SMC_SYSCON_RESET_CNTL, rst_reg, 1); 226 SMC_SYSCON_RESET_CNTL, rst_reg, 1);
794 227
795 result = polaris10_upload_smu_firmware_image(smumgr); 228 result = smu7_upload_smu_firmware_image(smumgr);
796 if (result != 0) 229 if (result != 0)
797 return result; 230 return result;
798 231
@@ -811,7 +244,7 @@ static int polaris10_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
811 244
812 245
813 /* Call Test SMU message with 0x20000 offset to trigger SMU start */ 246 /* Call Test SMU message with 0x20000 offset to trigger SMU start */
814 polaris10_send_msg_to_smc_offset(smumgr); 247 smu7_send_msg_to_smc_offset(smumgr);
815 248
816 /* Wait done bit to be set */ 249 /* Wait done bit to be set */
817 /* Check pass/failed indicator */ 250 /* Check pass/failed indicator */
@@ -852,12 +285,12 @@ static int polaris10_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
852 SMC_SYSCON_RESET_CNTL, 285 SMC_SYSCON_RESET_CNTL,
853 rst_reg, 1); 286 rst_reg, 1);
854 287
855 result = polaris10_upload_smu_firmware_image(smumgr); 288 result = smu7_upload_smu_firmware_image(smumgr);
856 if (result != 0) 289 if (result != 0)
857 return result; 290 return result;
858 291
859 /* Set smc instruct start point at 0x0 */ 292 /* Set smc instruct start point at 0x0 */
860 polaris10_program_jump_on_start(smumgr); 293 smu7_program_jump_on_start(smumgr);
861 294
862 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, 295 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
863 SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); 296 SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
@@ -880,10 +313,10 @@ static int polaris10_start_smu(struct pp_smumgr *smumgr)
880 bool SMU_VFT_INTACT; 313 bool SMU_VFT_INTACT;
881 314
882 /* Only start SMC if SMC RAM is not running */ 315 /* Only start SMC if SMC RAM is not running */
883 if (!polaris10_is_smc_ram_running(smumgr)) { 316 if (!smu7_is_smc_ram_running(smumgr)) {
884 SMU_VFT_INTACT = false; 317 SMU_VFT_INTACT = false;
885 smu_data->protected_mode = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)); 318 smu_data->protected_mode = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
886 smu_data->security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL)); 319 smu_data->smu7_data.security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
887 320
888 /* Check if SMU is running in protected mode */ 321 /* Check if SMU is running in protected mode */
889 if (smu_data->protected_mode == 0) { 322 if (smu_data->protected_mode == 0) {
@@ -893,7 +326,7 @@ static int polaris10_start_smu(struct pp_smumgr *smumgr)
893 326
894 /* If failed, try with different security Key. */ 327 /* If failed, try with different security Key. */
895 if (result != 0) { 328 if (result != 0) {
896 smu_data->security_hard_key ^= 1; 329 smu_data->smu7_data.security_hard_key ^= 1;
897 result = polaris10_start_smu_in_protection_mode(smumgr); 330 result = polaris10_start_smu_in_protection_mode(smumgr);
898 } 331 }
899 } 332 }
@@ -905,89 +338,69 @@ static int polaris10_start_smu(struct pp_smumgr *smumgr)
905 } else 338 } else
906 SMU_VFT_INTACT = true; /*Driver went offline but SMU was still alive and contains the VFT table */ 339 SMU_VFT_INTACT = true; /*Driver went offline but SMU was still alive and contains the VFT table */
907 340
908 smu_data->post_initial_boot = true;
909 polaris10_avfs_event_mgr(smumgr, SMU_VFT_INTACT); 341 polaris10_avfs_event_mgr(smumgr, SMU_VFT_INTACT);
910 /* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */ 342 /* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */
911 polaris10_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters), 343 smu7_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters),
912 &(smu_data->soft_regs_start), 0x40000); 344 &(smu_data->smu7_data.soft_regs_start), 0x40000);
913 345
914 result = polaris10_request_smu_load_fw(smumgr); 346 result = smu7_request_smu_load_fw(smumgr);
915 347
916 return result; 348 return result;
917} 349}
918 350
351static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
352{
353 uint32_t efuse;
354
355 efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
356 efuse &= 0x00000001;
357 if (efuse)
358 return true;
359
360 return false;
361}
362
919static int polaris10_smu_init(struct pp_smumgr *smumgr) 363static int polaris10_smu_init(struct pp_smumgr *smumgr)
920{ 364{
921 struct polaris10_smumgr *smu_data; 365 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
922 uint8_t *internal_buf; 366 int i;
923 uint64_t mc_addr = 0;
924 /* Allocate memory for backend private data */
925 smu_data = (struct polaris10_smumgr *)(smumgr->backend);
926 smu_data->header_buffer.data_size =
927 ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
928 smu_data->smu_buffer.data_size = 200*4096;
929 smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
930/* Allocate FW image data structure and header buffer and
931 * send the header buffer address to SMU */
932 smu_allocate_memory(smumgr->device,
933 smu_data->header_buffer.data_size,
934 CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
935 PAGE_SIZE,
936 &mc_addr,
937 &smu_data->header_buffer.kaddr,
938 &smu_data->header_buffer.handle);
939
940 smu_data->header = smu_data->header_buffer.kaddr;
941 smu_data->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
942 smu_data->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
943
944 PP_ASSERT_WITH_CODE((NULL != smu_data->header),
945 "Out of memory.",
946 kfree(smumgr->backend);
947 cgs_free_gpu_mem(smumgr->device,
948 (cgs_handle_t)smu_data->header_buffer.handle);
949 return -1);
950 367
951/* Allocate buffer for SMU internal buffer and send the address to SMU. 368 if (smu7_init(smumgr))
952 * Iceland SMU does not need internal buffer.*/ 369 return -EINVAL;
953 smu_allocate_memory(smumgr->device,
954 smu_data->smu_buffer.data_size,
955 CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
956 PAGE_SIZE,
957 &mc_addr,
958 &smu_data->smu_buffer.kaddr,
959 &smu_data->smu_buffer.handle);
960
961 internal_buf = smu_data->smu_buffer.kaddr;
962 smu_data->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
963 smu_data->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
964
965 PP_ASSERT_WITH_CODE((NULL != internal_buf),
966 "Out of memory.",
967 kfree(smumgr->backend);
968 cgs_free_gpu_mem(smumgr->device,
969 (cgs_handle_t)smu_data->smu_buffer.handle);
970 return -1;);
971 370
972 if (polaris10_is_hw_avfs_present(smumgr)) 371 if (polaris10_is_hw_avfs_present(smumgr))
973 smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT; 372 smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
974 else 373 else
975 smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED; 374 smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
976 375
376 for (i = 0; i < SMU74_MAX_LEVELS_GRAPHICS; i++)
377 smu_data->activity_target[i] = PPPOLARIS10_TARGETACTIVITY_DFLT;
378
977 return 0; 379 return 0;
978} 380}
979 381
980static const struct pp_smumgr_func polaris10_smu_funcs = { 382static const struct pp_smumgr_func polaris10_smu_funcs = {
981 .smu_init = polaris10_smu_init, 383 .smu_init = polaris10_smu_init,
982 .smu_fini = polaris10_smu_fini, 384 .smu_fini = smu7_smu_fini,
983 .start_smu = polaris10_start_smu, 385 .start_smu = polaris10_start_smu,
984 .check_fw_load_finish = polaris10_check_fw_load_finish, 386 .check_fw_load_finish = smu7_check_fw_load_finish,
985 .request_smu_load_fw = polaris10_reload_firmware, 387 .request_smu_load_fw = smu7_reload_firmware,
986 .request_smu_load_specific_fw = NULL, 388 .request_smu_load_specific_fw = NULL,
987 .send_msg_to_smc = polaris10_send_msg_to_smc, 389 .send_msg_to_smc = smu7_send_msg_to_smc,
988 .send_msg_to_smc_with_parameter = polaris10_send_msg_to_smc_with_parameter, 390 .send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter,
989 .download_pptable_settings = NULL, 391 .download_pptable_settings = NULL,
990 .upload_pptable_settings = NULL, 392 .upload_pptable_settings = NULL,
393 .update_smc_table = polaris10_update_smc_table,
394 .get_offsetof = polaris10_get_offsetof,
395 .process_firmware_header = polaris10_process_firmware_header,
396 .init_smc_table = polaris10_init_smc_table,
397 .update_sclk_threshold = polaris10_update_sclk_threshold,
398 .thermal_avfs_enable = polaris10_thermal_avfs_enable,
399 .thermal_setup_fan_table = polaris10_thermal_setup_fan_table,
400 .populate_all_graphic_levels = polaris10_populate_all_graphic_levels,
401 .populate_all_memory_levels = polaris10_populate_all_memory_levels,
402 .get_mac_definition = polaris10_get_mac_definition,
403 .is_dpm_running = polaris10_is_dpm_running,
991}; 404};
992 405
993int polaris10_smum_init(struct pp_smumgr *smumgr) 406int polaris10_smum_init(struct pp_smumgr *smumgr)
@@ -997,7 +410,7 @@ int polaris10_smum_init(struct pp_smumgr *smumgr)
997 polaris10_smu = kzalloc(sizeof(struct polaris10_smumgr), GFP_KERNEL); 410 polaris10_smu = kzalloc(sizeof(struct polaris10_smumgr), GFP_KERNEL);
998 411
999 if (polaris10_smu == NULL) 412 if (polaris10_smu == NULL)
1000 return -1; 413 return -EINVAL;
1001 414
1002 smumgr->backend = polaris10_smu; 415 smumgr->backend = polaris10_smu;
1003 smumgr->smumgr_funcs = &polaris10_smu_funcs; 416 smumgr->smumgr_funcs = &polaris10_smu_funcs;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
index 7c2445f1f043..49ebf1d5a53c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
@@ -24,9 +24,13 @@
24#ifndef _POLARIS10_SMUMANAGER_H 24#ifndef _POLARIS10_SMUMANAGER_H
25#define _POLARIS10_SMUMANAGER_H 25#define _POLARIS10_SMUMANAGER_H
26 26
27#include <polaris10_ppsmc.h> 27
28#include <pp_endian.h> 28#include <pp_endian.h>
29#include "smu74.h" 29#include "smu74.h"
30#include "smu74_discrete.h"
31#include "smu7_smumgr.h"
32
33#define SMC_RAM_END 0x40000
30 34
31struct polaris10_avfs { 35struct polaris10_avfs {
32 enum AVFS_BTC_STATUS avfs_btc_status; 36 enum AVFS_BTC_STATUS avfs_btc_status;
@@ -47,13 +51,7 @@ struct polaris10_pt_defaults {
47 uint16_t BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS]; 51 uint16_t BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
48}; 52};
49 53
50struct polaris10_buffer_entry { 54
51 uint32_t data_size;
52 uint32_t mc_addr_low;
53 uint32_t mc_addr_high;
54 void *kaddr;
55 unsigned long handle;
56};
57 55
58struct polaris10_range_table { 56struct polaris10_range_table {
59 uint32_t trans_lower_frequency; /* in 10khz */ 57 uint32_t trans_lower_frequency; /* in 10khz */
@@ -61,28 +59,17 @@ struct polaris10_range_table {
61}; 59};
62 60
63struct polaris10_smumgr { 61struct polaris10_smumgr {
64 uint8_t *header; 62 struct smu7_smumgr smu7_data;
65 uint8_t *mec_image;
66 struct polaris10_buffer_entry smu_buffer;
67 struct polaris10_buffer_entry header_buffer;
68 uint32_t soft_regs_start;
69 uint8_t *read_rrm_straps;
70 uint32_t read_drm_straps_mc_address_high;
71 uint32_t read_drm_straps_mc_address_low;
72 uint32_t acpi_optimization;
73 bool post_initial_boot;
74 uint8_t protected_mode; 63 uint8_t protected_mode;
75 uint8_t security_hard_key;
76 struct polaris10_avfs avfs; 64 struct polaris10_avfs avfs;
65 SMU74_Discrete_DpmTable smc_state_table;
66 struct SMU74_Discrete_Ulv ulv_setting;
67 struct SMU74_Discrete_PmFuses power_tune_table;
68 struct polaris10_range_table range_table[NUM_SCLK_RANGE];
69 const struct polaris10_pt_defaults *power_tune_defaults;
70 uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS];
71 uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK];
77}; 72};
78 73
79 74
80int polaris10_smum_init(struct pp_smumgr *smumgr);
81
82int polaris10_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit);
83int polaris10_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit);
84int polaris10_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
85 const uint8_t *src, uint32_t byte_count, uint32_t limit);
86
87#endif 75#endif
88
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
new file mode 100644
index 000000000000..6af744f42ec9
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -0,0 +1,589 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24
25#include "smumgr.h"
26#include "smu_ucode_xfer_vi.h"
27#include "smu/smu_7_1_3_d.h"
28#include "smu/smu_7_1_3_sh_mask.h"
29#include "ppatomctrl.h"
30#include "pp_debug.h"
31#include "cgs_common.h"
32#include "smu7_ppsmc.h"
33#include "smu7_smumgr.h"
34
35#define SMU7_SMC_SIZE 0x20000
36
37static int smu7_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit)
38{
39 PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL);
40 PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL);
41
42 cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr);
43 SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */
44 return 0;
45}
46
47
48int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
49{
50 uint32_t data;
51 uint32_t addr;
52 uint8_t *dest_byte;
53 uint8_t i, data_byte[4] = {0};
54 uint32_t *pdata = (uint32_t *)&data_byte;
55
56 PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
57 PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
58
59 addr = smc_start_address;
60
61 while (byte_count >= 4) {
62 smu7_read_smc_sram_dword(smumgr, addr, &data, limit);
63
64 *dest = PP_SMC_TO_HOST_UL(data);
65
66 dest += 1;
67 byte_count -= 4;
68 addr += 4;
69 }
70
71 if (byte_count) {
72 smu7_read_smc_sram_dword(smumgr, addr, &data, limit);
73 *pdata = PP_SMC_TO_HOST_UL(data);
74 /* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */
75 dest_byte = (uint8_t *)dest;
76 for (i = 0; i < byte_count; i++)
77 dest_byte[i] = data_byte[i];
78 }
79
80 return 0;
81}
82
83
84int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
85 const uint8_t *src, uint32_t byte_count, uint32_t limit)
86{
87 int result;
88 uint32_t data = 0;
89 uint32_t original_data;
90 uint32_t addr = 0;
91 uint32_t extra_shift;
92
93 PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
94 PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
95
96 addr = smc_start_address;
97
98 while (byte_count >= 4) {
99 /* Bytes are written into the SMC addres space with the MSB first. */
100 data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
101
102 result = smu7_set_smc_sram_address(smumgr, addr, limit);
103
104 if (0 != result)
105 return result;
106
107 cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
108
109 src += 4;
110 byte_count -= 4;
111 addr += 4;
112 }
113
114 if (0 != byte_count) {
115
116 data = 0;
117
118 result = smu7_set_smc_sram_address(smumgr, addr, limit);
119
120 if (0 != result)
121 return result;
122
123
124 original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
125
126 extra_shift = 8 * (4 - byte_count);
127
128 while (byte_count > 0) {
129 /* Bytes are written into the SMC addres space with the MSB first. */
130 data = (0x100 * data) + *src++;
131 byte_count--;
132 }
133
134 data <<= extra_shift;
135
136 data |= (original_data & ~((~0UL) << extra_shift));
137
138 result = smu7_set_smc_sram_address(smumgr, addr, limit);
139
140 if (0 != result)
141 return result;
142
143 cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
144 }
145
146 return 0;
147}
148
149
150int smu7_program_jump_on_start(struct pp_smumgr *smumgr)
151{
152 static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
153
154 smu7_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1);
155
156 return 0;
157}
158
159bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr)
160{
161 return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
162 && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
163}
164
165int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
166{
167 int ret;
168
169 if (!smu7_is_smc_ram_running(smumgr))
170 return -EINVAL;
171
172
173 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
174
175 ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
176
177 if (ret != 1)
178 printk("\n failed to send pre message %x ret is %d \n", msg, ret);
179
180 cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
181
182 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
183
184 ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
185
186 if (ret != 1)
187 printk("\n failed to send message %x ret is %d \n", msg, ret);
188
189 return 0;
190}
191
192int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg)
193{
194 cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
195
196 return 0;
197}
198
199int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
200{
201 if (!smu7_is_smc_ram_running(smumgr)) {
202 return -EINVAL;
203 }
204
205 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
206
207 cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
208
209 return smu7_send_msg_to_smc(smumgr, msg);
210}
211
212int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
213{
214 cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
215
216 return smu7_send_msg_to_smc_without_waiting(smumgr, msg);
217}
218
219int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
220{
221 cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
222
223 cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
224
225 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
226
227 if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
228 printk("Failed to send Message.\n");
229
230 return 0;
231}
232
233int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr)
234{
235 if (!smu7_is_smc_ram_running(smumgr))
236 return -EINVAL;
237
238 SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
239 return 0;
240}
241
242
243enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
244{
245 enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
246
247 switch (fw_type) {
248 case UCODE_ID_SMU:
249 result = CGS_UCODE_ID_SMU;
250 break;
251 case UCODE_ID_SMU_SK:
252 result = CGS_UCODE_ID_SMU_SK;
253 break;
254 case UCODE_ID_SDMA0:
255 result = CGS_UCODE_ID_SDMA0;
256 break;
257 case UCODE_ID_SDMA1:
258 result = CGS_UCODE_ID_SDMA1;
259 break;
260 case UCODE_ID_CP_CE:
261 result = CGS_UCODE_ID_CP_CE;
262 break;
263 case UCODE_ID_CP_PFP:
264 result = CGS_UCODE_ID_CP_PFP;
265 break;
266 case UCODE_ID_CP_ME:
267 result = CGS_UCODE_ID_CP_ME;
268 break;
269 case UCODE_ID_CP_MEC:
270 result = CGS_UCODE_ID_CP_MEC;
271 break;
272 case UCODE_ID_CP_MEC_JT1:
273 result = CGS_UCODE_ID_CP_MEC_JT1;
274 break;
275 case UCODE_ID_CP_MEC_JT2:
276 result = CGS_UCODE_ID_CP_MEC_JT2;
277 break;
278 case UCODE_ID_RLC_G:
279 result = CGS_UCODE_ID_RLC_G;
280 break;
281 default:
282 break;
283 }
284
285 return result;
286}
287
288
289int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
290{
291 int result;
292
293 result = smu7_set_smc_sram_address(smumgr, smc_addr, limit);
294
295 if (result)
296 return result;
297
298 *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
299 return 0;
300}
301
302int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
303{
304 int result;
305
306 result = smu7_set_smc_sram_address(smumgr, smc_addr, limit);
307
308 if (result)
309 return result;
310
311 cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value);
312
313 return 0;
314}
315
316/* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */
317
318static uint32_t smu7_get_mask_for_firmware_type(uint32_t fw_type)
319{
320 uint32_t result = 0;
321
322 switch (fw_type) {
323 case UCODE_ID_SDMA0:
324 result = UCODE_ID_SDMA0_MASK;
325 break;
326 case UCODE_ID_SDMA1:
327 result = UCODE_ID_SDMA1_MASK;
328 break;
329 case UCODE_ID_CP_CE:
330 result = UCODE_ID_CP_CE_MASK;
331 break;
332 case UCODE_ID_CP_PFP:
333 result = UCODE_ID_CP_PFP_MASK;
334 break;
335 case UCODE_ID_CP_ME:
336 result = UCODE_ID_CP_ME_MASK;
337 break;
338 case UCODE_ID_CP_MEC:
339 case UCODE_ID_CP_MEC_JT1:
340 case UCODE_ID_CP_MEC_JT2:
341 result = UCODE_ID_CP_MEC_MASK;
342 break;
343 case UCODE_ID_RLC_G:
344 result = UCODE_ID_RLC_G_MASK;
345 break;
346 default:
347 printk("UCode type is out of range! \n");
348 result = 0;
349 }
350
351 return result;
352}
353
354static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr,
355 uint32_t fw_type,
356 struct SMU_Entry *entry)
357{
358 int result = 0;
359 struct cgs_firmware_info info = {0};
360
361 result = cgs_get_firmware_info(smumgr->device,
362 smu7_convert_fw_type_to_cgs(fw_type),
363 &info);
364
365 if (!result) {
366 entry->version = info.version;
367 entry->id = (uint16_t)fw_type;
368 entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
369 entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
370 entry->meta_data_addr_high = 0;
371 entry->meta_data_addr_low = 0;
372 entry->data_size_byte = info.image_size;
373 entry->num_register_entries = 0;
374 }
375
376 if (fw_type == UCODE_ID_RLC_G)
377 entry->flags = 1;
378 else
379 entry->flags = 0;
380
381 return 0;
382}
383
384int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
385{
386 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
387 uint32_t fw_to_load;
388 int result = 0;
389 struct SMU_DRAMData_TOC *toc;
390
391 if (!smumgr->reload_fw) {
392 printk(KERN_INFO "[ powerplay ] skip reloading...\n");
393 return 0;
394 }
395
396 if (smu_data->soft_regs_start)
397 cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
398 smu_data->soft_regs_start + smum_get_offsetof(smumgr,
399 SMU_SoftRegisters, UcodeLoadStatus),
400 0x0);
401
402 if (smumgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
403 smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, smu_data->smu_buffer.mc_addr_high);
404 smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_LO, smu_data->smu_buffer.mc_addr_low);
405 fw_to_load = UCODE_ID_RLC_G_MASK
406 + UCODE_ID_SDMA0_MASK
407 + UCODE_ID_SDMA1_MASK
408 + UCODE_ID_CP_CE_MASK
409 + UCODE_ID_CP_ME_MASK
410 + UCODE_ID_CP_PFP_MASK
411 + UCODE_ID_CP_MEC_MASK;
412 } else {
413 fw_to_load = UCODE_ID_RLC_G_MASK
414 + UCODE_ID_SDMA0_MASK
415 + UCODE_ID_SDMA1_MASK
416 + UCODE_ID_CP_CE_MASK
417 + UCODE_ID_CP_ME_MASK
418 + UCODE_ID_CP_PFP_MASK
419 + UCODE_ID_CP_MEC_MASK
420 + UCODE_ID_CP_MEC_JT1_MASK
421 + UCODE_ID_CP_MEC_JT2_MASK;
422 }
423
424 toc = (struct SMU_DRAMData_TOC *)smu_data->header;
425 toc->num_entries = 0;
426 toc->structure_version = 1;
427
428 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
429 UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
430 "Failed to Get Firmware Entry.", return -EINVAL);
431 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
432 UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
433 "Failed to Get Firmware Entry.", return -EINVAL);
434 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
435 UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
436 "Failed to Get Firmware Entry.", return -EINVAL);
437 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
438 UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
439 "Failed to Get Firmware Entry.", return -EINVAL);
440 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
441 UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
442 "Failed to Get Firmware Entry.", return -EINVAL);
443 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
444 UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
445 "Failed to Get Firmware Entry.", return -EINVAL);
446 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
447 UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
448 "Failed to Get Firmware Entry.", return -EINVAL);
449 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
450 UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
451 "Failed to Get Firmware Entry.", return -EINVAL);
452 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
453 UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
454 "Failed to Get Firmware Entry.", return -EINVAL);
455
456 smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
457 smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
458
459 if (smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load))
460 printk(KERN_ERR "Fail to Request SMU Load uCode");
461
462 return result;
463}
464
465/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
466int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type)
467{
468 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
469 uint32_t fw_mask = smu7_get_mask_for_firmware_type(fw_type);
470 uint32_t ret;
471
472 ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11,
473 smu_data->soft_regs_start + smum_get_offsetof(smumgr,
474 SMU_SoftRegisters, UcodeLoadStatus),
475 fw_mask, fw_mask);
476
477 return ret;
478}
479
480int smu7_reload_firmware(struct pp_smumgr *smumgr)
481{
482 return smumgr->smumgr_funcs->start_smu(smumgr);
483}
484
485static int smu7_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit)
486{
487 uint32_t byte_count = length;
488
489 PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);
490
491 cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000);
492 SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
493
494 for (; byte_count >= 4; byte_count -= 4)
495 cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++);
496
497 SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
498
499 PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL);
500
501 return 0;
502}
503
504
505int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr)
506{
507 int result = 0;
508 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
509
510 struct cgs_firmware_info info = {0};
511
512 if (smu_data->security_hard_key == 1)
513 cgs_get_firmware_info(smumgr->device,
514 smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
515 else
516 cgs_get_firmware_info(smumgr->device,
517 smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
518
519 result = smu7_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE);
520
521 return result;
522}
523
524
525int smu7_init(struct pp_smumgr *smumgr)
526{
527 struct smu7_smumgr *smu_data;
528 uint8_t *internal_buf;
529 uint64_t mc_addr = 0;
530
531 /* Allocate memory for backend private data */
532 smu_data = (struct smu7_smumgr *)(smumgr->backend);
533 smu_data->header_buffer.data_size =
534 ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
535 smu_data->smu_buffer.data_size = 200*4096;
536
537/* Allocate FW image data structure and header buffer and
538 * send the header buffer address to SMU */
539 smu_allocate_memory(smumgr->device,
540 smu_data->header_buffer.data_size,
541 CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
542 PAGE_SIZE,
543 &mc_addr,
544 &smu_data->header_buffer.kaddr,
545 &smu_data->header_buffer.handle);
546
547 smu_data->header = smu_data->header_buffer.kaddr;
548 smu_data->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
549 smu_data->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
550
551 PP_ASSERT_WITH_CODE((NULL != smu_data->header),
552 "Out of memory.",
553 kfree(smumgr->backend);
554 cgs_free_gpu_mem(smumgr->device,
555 (cgs_handle_t)smu_data->header_buffer.handle);
556 return -EINVAL);
557
558 smu_allocate_memory(smumgr->device,
559 smu_data->smu_buffer.data_size,
560 CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
561 PAGE_SIZE,
562 &mc_addr,
563 &smu_data->smu_buffer.kaddr,
564 &smu_data->smu_buffer.handle);
565
566 internal_buf = smu_data->smu_buffer.kaddr;
567 smu_data->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
568 smu_data->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
569
570 PP_ASSERT_WITH_CODE((NULL != internal_buf),
571 "Out of memory.",
572 kfree(smumgr->backend);
573 cgs_free_gpu_mem(smumgr->device,
574 (cgs_handle_t)smu_data->smu_buffer.handle);
575 return -EINVAL);
576
577 return 0;
578}
579
580
581int smu7_smu_fini(struct pp_smumgr *smumgr)
582{
583 if (smumgr->backend) {
584 kfree(smumgr->backend);
585 smumgr->backend = NULL;
586 }
587 cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
588 return 0;
589}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
new file mode 100644
index 000000000000..76352f2423ae
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -0,0 +1,87 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _SMU7_SMUMANAGER_H
25#define _SMU7_SMUMANAGER_H
26
27
28#include <pp_endian.h>
29
30#define SMC_RAM_END 0x40000
31#define mmSMC_IND_INDEX_11 0x01AC
32#define mmSMC_IND_DATA_11 0x01AD
33
34struct smu7_buffer_entry {
35 uint32_t data_size;
36 uint32_t mc_addr_low;
37 uint32_t mc_addr_high;
38 void *kaddr;
39 unsigned long handle;
40};
41
42struct smu7_smumgr {
43 uint8_t *header;
44 uint8_t *mec_image;
45 struct smu7_buffer_entry smu_buffer;
46 struct smu7_buffer_entry header_buffer;
47
48 uint32_t soft_regs_start;
49 uint32_t dpm_table_start;
50 uint32_t mc_reg_table_start;
51 uint32_t fan_table_start;
52 uint32_t arb_table_start;
53 uint32_t ulv_setting_starts;
54 uint8_t security_hard_key;
55 uint32_t acpi_optimization;
56};
57
58
59int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
60 uint32_t *dest, uint32_t byte_count, uint32_t limit);
61int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
62 const uint8_t *src, uint32_t byte_count, uint32_t limit);
63int smu7_program_jump_on_start(struct pp_smumgr *smumgr);
64bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr);
65int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg);
66int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg);
67int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg,
68 uint32_t parameter);
69int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr,
70 uint16_t msg, uint32_t parameter);
71int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr);
72int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr);
73
74enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type);
75int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
76 uint32_t *value, uint32_t limit);
77int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
78 uint32_t value, uint32_t limit);
79
80int smu7_request_smu_load_fw(struct pp_smumgr *smumgr);
81int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type);
82int smu7_reload_firmware(struct pp_smumgr *smumgr);
83int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr);
84int smu7_init(struct pp_smumgr *smumgr);
85int smu7_smu_fini(struct pp_smumgr *smumgr);
86
87#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index bbeb786db003..e5812aa456f3 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -86,6 +86,57 @@ int smum_fini(struct pp_smumgr *smumgr)
86 return 0; 86 return 0;
87} 87}
88 88
89int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
90 void *input, void *output, void *storage, int result)
91{
92 if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable)
93 return hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable(hwmgr);
94
95 return 0;
96}
97
98int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
99 void *input, void *output, void *storage, int result)
100{
101 if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table)
102 return hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table(hwmgr);
103
104 return 0;
105}
106
107int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr)
108{
109
110 if (NULL != hwmgr->smumgr->smumgr_funcs->update_sclk_threshold)
111 return hwmgr->smumgr->smumgr_funcs->update_sclk_threshold(hwmgr);
112
113 return 0;
114}
115
116int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
117{
118
119 if (NULL != hwmgr->smumgr->smumgr_funcs->update_smc_table)
120 return hwmgr->smumgr->smumgr_funcs->update_smc_table(hwmgr, type);
121
122 return 0;
123}
124
125uint32_t smum_get_offsetof(struct pp_smumgr *smumgr, uint32_t type, uint32_t member)
126{
127 if (NULL != smumgr->smumgr_funcs->get_offsetof)
128 return smumgr->smumgr_funcs->get_offsetof(type, member);
129
130 return 0;
131}
132
133int smum_process_firmware_header(struct pp_hwmgr *hwmgr)
134{
135 if (NULL != hwmgr->smumgr->smumgr_funcs->process_firmware_header)
136 return hwmgr->smumgr->smumgr_funcs->process_firmware_header(hwmgr);
137 return 0;
138}
139
89int smum_get_argument(struct pp_smumgr *smumgr) 140int smum_get_argument(struct pp_smumgr *smumgr)
90{ 141{
91 if (NULL != smumgr->smumgr_funcs->get_argument) 142 if (NULL != smumgr->smumgr_funcs->get_argument)
@@ -94,13 +145,20 @@ int smum_get_argument(struct pp_smumgr *smumgr)
94 return 0; 145 return 0;
95} 146}
96 147
148uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value)
149{
150 if (NULL != smumgr->smumgr_funcs->get_mac_definition)
151 return smumgr->smumgr_funcs->get_mac_definition(value);
152
153 return 0;
154}
155
97int smum_download_powerplay_table(struct pp_smumgr *smumgr, 156int smum_download_powerplay_table(struct pp_smumgr *smumgr,
98 void **table) 157 void **table)
99{ 158{
100 if (NULL != smumgr->smumgr_funcs->download_pptable_settings) 159 if (NULL != smumgr->smumgr_funcs->download_pptable_settings)
101 return smumgr->smumgr_funcs->download_pptable_settings(smumgr, 160 return smumgr->smumgr_funcs->download_pptable_settings(smumgr,
102 table); 161 table);
103
104 return 0; 162 return 0;
105} 163}
106 164
@@ -267,3 +325,44 @@ int smu_free_memory(void *device, void *handle)
267 325
268 return 0; 326 return 0;
269} 327}
328
329int smum_init_smc_table(struct pp_hwmgr *hwmgr)
330{
331 if (NULL != hwmgr->smumgr->smumgr_funcs->init_smc_table)
332 return hwmgr->smumgr->smumgr_funcs->init_smc_table(hwmgr);
333
334 return 0;
335}
336
337int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
338{
339 if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels)
340 return hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels(hwmgr);
341
342 return 0;
343}
344
345int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
346{
347 if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels)
348 return hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels(hwmgr);
349
350 return 0;
351}
352
353/*this interface is needed by island ci/vi */
354int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
355{
356 if (NULL != hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table)
357 return hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table(hwmgr);
358
359 return 0;
360}
361
362bool smum_is_dpm_running(struct pp_hwmgr *hwmgr)
363{
364 if (NULL != hwmgr->smumgr->smumgr_funcs->is_dpm_running)
365 return hwmgr->smumgr->smumgr_funcs->is_dpm_running(hwmgr);
366
367 return true;
368}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
new file mode 100644
index 000000000000..4dfd3f60a967
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
@@ -0,0 +1,3207 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
9 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
10 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
11 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
12 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
13 * OTHER DEALINGS IN THE SOFTWARE.
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 *
22 */
23
24#include "tonga_smc.h"
25#include "smu7_dyn_defaults.h"
26
27#include "smu7_hwmgr.h"
28#include "hardwaremanager.h"
29#include "ppatomctrl.h"
30#include "pp_debug.h"
31#include "cgs_common.h"
32#include "atombios.h"
33#include "tonga_smumgr.h"
34#include "pppcielanes.h"
35#include "pp_endian.h"
36#include "smu7_ppsmc.h"
37
38#include "smu72_discrete.h"
39
40#include "smu/smu_7_1_2_d.h"
41#include "smu/smu_7_1_2_sh_mask.h"
42
43#include "gmc/gmc_8_1_d.h"
44#include "gmc/gmc_8_1_sh_mask.h"
45
46#include "bif/bif_5_0_d.h"
47#include "bif/bif_5_0_sh_mask.h"
48
49#include "dce/dce_10_0_d.h"
50#include "dce/dce_10_0_sh_mask.h"
51
52
53#define VOLTAGE_SCALE 4
54#define POWERTUNE_DEFAULT_SET_MAX 1
55#define VOLTAGE_VID_OFFSET_SCALE1 625
56#define VOLTAGE_VID_OFFSET_SCALE2 100
57#define MC_CG_ARB_FREQ_F1 0x0b
58#define VDDC_VDDCI_DELTA 200
59
60
61static struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
62/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
63 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
64 */
65 {1, 0xF, 0xFD, 0x19,
66 5, 45, 0, 0xB0000,
67 {0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8,
68 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
69 {0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203,
70 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4}
71 },
72};
73
74/* [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */
75static const uint16_t tonga_clock_stretcher_lookup_table[2][4] = {
76 {600, 1050, 3, 0},
77 {600, 1050, 6, 1}
78};
79
80/* [FF, SS] type, [] 4 voltage ranges,
81 * and [Floor Freq, Boundary Freq, VID min , VID max]
82 */
83static const uint32_t tonga_clock_stretcher_ddt_table[2][4][4] = {
84 { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
85 { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} }
86};
87
88/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] */
89static const uint8_t tonga_clock_stretch_amount_conversion[2][6] = {
90 {0, 1, 3, 2, 4, 5},
91 {0, 2, 4, 5, 6, 5}
92};
93
94/* PPGen has the gain setting generated in x * 100 unit
95 * This function is to convert the unit to x * 4096(0x1000) unit.
96 * This is the unit expected by SMC firmware
97 */
98
99
100static int tonga_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
101 phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table,
102 uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
103{
104 uint32_t i = 0;
105 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
106 struct phm_ppt_v1_information *pptable_info =
107 (struct phm_ppt_v1_information *)(hwmgr->pptable);
108
109 /* clock - voltage dependency table is empty table */
110 if (allowed_clock_voltage_table->count == 0)
111 return -EINVAL;
112
113 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
114 /* find first sclk bigger than request */
115 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
116 voltage->VddGfx = phm_get_voltage_index(
117 pptable_info->vddgfx_lookup_table,
118 allowed_clock_voltage_table->entries[i].vddgfx);
119 voltage->Vddc = phm_get_voltage_index(
120 pptable_info->vddc_lookup_table,
121 allowed_clock_voltage_table->entries[i].vddc);
122
123 if (allowed_clock_voltage_table->entries[i].vddci)
124 voltage->Vddci =
125 phm_get_voltage_id(&data->vddci_voltage_table, allowed_clock_voltage_table->entries[i].vddci);
126 else
127 voltage->Vddci =
128 phm_get_voltage_id(&data->vddci_voltage_table,
129 allowed_clock_voltage_table->entries[i].vddc - VDDC_VDDCI_DELTA);
130
131
132 if (allowed_clock_voltage_table->entries[i].mvdd)
133 *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd;
134
135 voltage->Phases = 1;
136 return 0;
137 }
138 }
139
140 /* sclk is bigger than max sclk in the dependence table */
141 voltage->VddGfx = phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
142 allowed_clock_voltage_table->entries[i-1].vddgfx);
143 voltage->Vddc = phm_get_voltage_index(pptable_info->vddc_lookup_table,
144 allowed_clock_voltage_table->entries[i-1].vddc);
145
146 if (allowed_clock_voltage_table->entries[i-1].vddci)
147 voltage->Vddci = phm_get_voltage_id(&data->vddci_voltage_table,
148 allowed_clock_voltage_table->entries[i-1].vddci);
149
150 if (allowed_clock_voltage_table->entries[i-1].mvdd)
151 *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd;
152
153 return 0;
154}
155
156
157/**
158 * Vddc table preparation for SMC.
159 *
160 * @param hwmgr the address of the hardware manager
161 * @param table the SMC DPM table structure to be populated
162 * @return always 0
163 */
164static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
165 SMU72_Discrete_DpmTable *table)
166{
167 unsigned int count;
168 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
169
170 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
171 table->VddcLevelCount = data->vddc_voltage_table.count;
172 for (count = 0; count < table->VddcLevelCount; count++) {
173 table->VddcTable[count] =
174 PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE);
175 }
176 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
177 }
178 return 0;
179}
180
181/**
182 * VddGfx table preparation for SMC.
183 *
184 * @param hwmgr the address of the hardware manager
185 * @param table the SMC DPM table structure to be populated
186 * @return always 0
187 */
188static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr,
189 SMU72_Discrete_DpmTable *table)
190{
191 unsigned int count;
192 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
193
194 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
195 table->VddGfxLevelCount = data->vddgfx_voltage_table.count;
196 for (count = 0; count < data->vddgfx_voltage_table.count; count++) {
197 table->VddGfxTable[count] =
198 PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE);
199 }
200 CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount);
201 }
202 return 0;
203}
204
205/**
206 * Vddci table preparation for SMC.
207 *
208 * @param *hwmgr The address of the hardware manager.
209 * @param *table The SMC DPM table structure to be populated.
210 * @return 0
211 */
212static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
213 SMU72_Discrete_DpmTable *table)
214{
215 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
216 uint32_t count;
217
218 table->VddciLevelCount = data->vddci_voltage_table.count;
219 for (count = 0; count < table->VddciLevelCount; count++) {
220 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
221 table->VddciTable[count] =
222 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
223 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
224 table->SmioTable1.Pattern[count].Voltage =
225 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
226 /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */
227 table->SmioTable1.Pattern[count].Smio =
228 (uint8_t) count;
229 table->Smio[count] |=
230 data->vddci_voltage_table.entries[count].smio_low;
231 table->VddciTable[count] =
232 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
233 }
234 }
235
236 table->SmioMask1 = data->vddci_voltage_table.mask_low;
237 CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
238
239 return 0;
240}
241
242/**
243 * Mvdd table preparation for SMC.
244 *
245 * @param *hwmgr The address of the hardware manager.
246 * @param *table The SMC DPM table structure to be populated.
247 * @return 0
248 */
249static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
250 SMU72_Discrete_DpmTable *table)
251{
252 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
253 uint32_t count;
254
255 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
256 table->MvddLevelCount = data->mvdd_voltage_table.count;
257 for (count = 0; count < table->MvddLevelCount; count++) {
258 table->SmioTable2.Pattern[count].Voltage =
259 PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
260 /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
261 table->SmioTable2.Pattern[count].Smio =
262 (uint8_t) count;
263 table->Smio[count] |=
264 data->mvdd_voltage_table.entries[count].smio_low;
265 }
266 table->SmioMask2 = data->mvdd_voltage_table.mask_low;
267
268 CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
269 }
270
271 return 0;
272}
273
274/**
275 * Preparation of vddc and vddgfx CAC tables for SMC.
276 *
277 * @param hwmgr the address of the hardware manager
278 * @param table the SMC DPM table structure to be populated
279 * @return always 0
280 */
281static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
282 SMU72_Discrete_DpmTable *table)
283{
284 uint32_t count;
285 uint8_t index = 0;
286 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
287 struct phm_ppt_v1_information *pptable_info =
288 (struct phm_ppt_v1_information *)(hwmgr->pptable);
289 struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table =
290 pptable_info->vddgfx_lookup_table;
291 struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table =
292 pptable_info->vddc_lookup_table;
293
294 /* table is already swapped, so in order to use the value from it
295 * we need to swap it back.
296 */
297 uint32_t vddc_level_count = PP_SMC_TO_HOST_UL(table->VddcLevelCount);
298 uint32_t vddgfx_level_count = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount);
299
300 for (count = 0; count < vddc_level_count; count++) {
301 /* We are populating vddc CAC data to BapmVddc table in split and merged mode */
302 index = phm_get_voltage_index(vddc_lookup_table,
303 data->vddc_voltage_table.entries[count].value);
304 table->BapmVddcVidLoSidd[count] =
305 convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
306 table->BapmVddcVidHiSidd[count] =
307 convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
308 table->BapmVddcVidHiSidd2[count] =
309 convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
310 }
311
312 if ((data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2)) {
313 /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */
314 for (count = 0; count < vddgfx_level_count; count++) {
315 index = phm_get_voltage_index(vddgfx_lookup_table,
316 convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid));
317 table->BapmVddGfxVidHiSidd2[count] =
318 convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high);
319 }
320 } else {
321 for (count = 0; count < vddc_level_count; count++) {
322 index = phm_get_voltage_index(vddc_lookup_table,
323 data->vddc_voltage_table.entries[count].value);
324 table->BapmVddGfxVidLoSidd[count] =
325 convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
326 table->BapmVddGfxVidHiSidd[count] =
327 convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
328 table->BapmVddGfxVidHiSidd2[count] =
329 convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
330 }
331 }
332
333 return 0;
334}
335
336/**
337 * Preparation of voltage tables for SMC.
338 *
339 * @param hwmgr the address of the hardware manager
340 * @param table the SMC DPM table structure to be populated
341 * @return always 0
342 */
343
344static int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
345 SMU72_Discrete_DpmTable *table)
346{
347 int result;
348
349 result = tonga_populate_smc_vddc_table(hwmgr, table);
350 PP_ASSERT_WITH_CODE(!result,
351 "can not populate VDDC voltage table to SMC",
352 return -EINVAL);
353
354 result = tonga_populate_smc_vdd_ci_table(hwmgr, table);
355 PP_ASSERT_WITH_CODE(!result,
356 "can not populate VDDCI voltage table to SMC",
357 return -EINVAL);
358
359 result = tonga_populate_smc_vdd_gfx_table(hwmgr, table);
360 PP_ASSERT_WITH_CODE(!result,
361 "can not populate VDDGFX voltage table to SMC",
362 return -EINVAL);
363
364 result = tonga_populate_smc_mvdd_table(hwmgr, table);
365 PP_ASSERT_WITH_CODE(!result,
366 "can not populate MVDD voltage table to SMC",
367 return -EINVAL);
368
369 result = tonga_populate_cac_tables(hwmgr, table);
370 PP_ASSERT_WITH_CODE(!result,
371 "can not populate CAC voltage tables to SMC",
372 return -EINVAL);
373
374 return 0;
375}
376
377static int tonga_populate_ulv_level(struct pp_hwmgr *hwmgr,
378 struct SMU72_Discrete_Ulv *state)
379{
380 struct phm_ppt_v1_information *table_info =
381 (struct phm_ppt_v1_information *)(hwmgr->pptable);
382
383 state->CcPwrDynRm = 0;
384 state->CcPwrDynRm1 = 0;
385
386 state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
387 state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
388 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
389
390 state->VddcPhase = 1;
391
392 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
393 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
394 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
395
396 return 0;
397}
398
399static int tonga_populate_ulv_state(struct pp_hwmgr *hwmgr,
400 struct SMU72_Discrete_DpmTable *table)
401{
402 return tonga_populate_ulv_level(hwmgr, &table->Ulv);
403}
404
405static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table)
406{
407 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
408 struct smu7_dpm_table *dpm_table = &data->dpm_table;
409 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
410 uint32_t i;
411
412 /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
413 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
414 table->LinkLevel[i].PcieGenSpeed =
415 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
416 table->LinkLevel[i].PcieLaneCount =
417 (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
418 table->LinkLevel[i].EnabledForActivity =
419 1;
420 table->LinkLevel[i].SPC =
421 (uint8_t)(data->pcie_spc_cap & 0xff);
422 table->LinkLevel[i].DownThreshold =
423 PP_HOST_TO_SMC_UL(5);
424 table->LinkLevel[i].UpThreshold =
425 PP_HOST_TO_SMC_UL(30);
426 }
427
428 smu_data->smc_state_table.LinkLevelCount =
429 (uint8_t)dpm_table->pcie_speed_table.count;
430 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
431 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
432
433 return 0;
434}
435
436/**
437 * Calculates the SCLK dividers using the provided engine clock
438 *
439 * @param hwmgr the address of the hardware manager
440 * @param engine_clock the engine clock to use to populate the structure
441 * @param sclk the SMC SCLK structure to be populated
442 */
443static int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
444 uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk)
445{
446 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
447 pp_atomctrl_clock_dividers_vi dividers;
448 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
449 uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
450 uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
451 uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
452 uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
453 uint32_t reference_clock;
454 uint32_t reference_divider;
455 uint32_t fbdiv;
456 int result;
457
458 /* get the engine clock dividers for this clock value*/
459 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
460
461 PP_ASSERT_WITH_CODE(result == 0,
462 "Error retrieving Engine Clock dividers from VBIOS.", return result);
463
464 /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
465 reference_clock = atomctrl_get_reference_clock(hwmgr);
466
467 reference_divider = 1 + dividers.uc_pll_ref_div;
468
469 /* low 14 bits is fraction and high 12 bits is divider*/
470 fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
471
472 /* SPLL_FUNC_CNTL setup*/
473 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
474 CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
475 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
476 CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
477
478 /* SPLL_FUNC_CNTL_3 setup*/
479 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
480 CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
481
482 /* set to use fractional accumulation*/
483 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
484 CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
485
486 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
487 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
488 pp_atomctrl_internal_ss_info ss_info;
489
490 uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
491 if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
492 /*
493 * ss_info.speed_spectrum_percentage -- in unit of 0.01%
494 * ss_info.speed_spectrum_rate -- in unit of khz
495 */
496 /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
497 uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
498
499 /* clkv = 2 * D * fbdiv / NS */
500 uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
501
502 cg_spll_spread_spectrum =
503 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
504 cg_spll_spread_spectrum =
505 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
506 cg_spll_spread_spectrum_2 =
507 PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
508 }
509 }
510
511 sclk->SclkFrequency = engine_clock;
512 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
513 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
514 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
515 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
516 sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
517
518 return 0;
519}
520
521/**
522 * Populates single SMC SCLK structure using the provided engine clock
523 *
524 * @param hwmgr the address of the hardware manager
525 * @param engine_clock the engine clock to use to populate the structure
526 * @param sclk the SMC SCLK structure to be populated
527 */
528static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
529 uint32_t engine_clock,
530 uint16_t sclk_activity_level_threshold,
531 SMU72_Discrete_GraphicsLevel *graphic_level)
532{
533 int result;
534 uint32_t mvdd;
535 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
536 struct phm_ppt_v1_information *pptable_info =
537 (struct phm_ppt_v1_information *)(hwmgr->pptable);
538
539 result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
540
541 /* populate graphics levels*/
542 result = tonga_get_dependecy_volt_by_clk(hwmgr,
543 pptable_info->vdd_dep_on_sclk, engine_clock,
544 &graphic_level->MinVoltage, &mvdd);
545 PP_ASSERT_WITH_CODE((!result),
546 "can not find VDDC voltage value for VDDC "
547 "engine clock dependency table", return result);
548
549 /* SCLK frequency in units of 10KHz*/
550 graphic_level->SclkFrequency = engine_clock;
551 /* Indicates maximum activity level for this performance level. 50% for now*/
552 graphic_level->ActivityLevel = sclk_activity_level_threshold;
553
554 graphic_level->CcPwrDynRm = 0;
555 graphic_level->CcPwrDynRm1 = 0;
556 /* this level can be used if activity is high enough.*/
557 graphic_level->EnabledForActivity = 0;
558 /* this level can be used for throttling.*/
559 graphic_level->EnabledForThrottle = 1;
560 graphic_level->UpHyst = 0;
561 graphic_level->DownHyst = 0;
562 graphic_level->VoltageDownHyst = 0;
563 graphic_level->PowerThrottle = 0;
564
565 data->display_timing.min_clock_in_sr =
566 hwmgr->display_config.min_core_set_clock_in_sr;
567
568 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
569 PHM_PlatformCaps_SclkDeepSleep))
570 graphic_level->DeepSleepDivId =
571 smu7_get_sleep_divider_id_from_clock(engine_clock,
572 data->display_timing.min_clock_in_sr);
573
574 /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
575 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
576
577 if (!result) {
578 /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
579 /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/
580 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
581 CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
582 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
583 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
584 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
585 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
586 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
587 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
588 }
589
590 return result;
591}
592
593/**
594 * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
595 *
596 * @param hwmgr the address of the hardware manager
597 */
598int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
599{
600 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
601 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
602 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
603 struct smu7_dpm_table *dpm_table = &data->dpm_table;
604 struct phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
605 uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count;
606 uint32_t level_array_address = smu_data->smu7_data.dpm_table_start +
607 offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
608
609 uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) *
610 SMU72_MAX_LEVELS_GRAPHICS;
611
612 SMU72_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
613
614 uint32_t i, max_entry;
615 uint8_t highest_pcie_level_enabled = 0;
616 uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
617 uint8_t count = 0;
618 int result = 0;
619
620 memset(levels, 0x00, level_array_size);
621
622 for (i = 0; i < dpm_table->sclk_table.count; i++) {
623 result = tonga_populate_single_graphic_level(hwmgr,
624 dpm_table->sclk_table.dpm_levels[i].value,
625 (uint16_t)smu_data->activity_target[i],
626 &(smu_data->smc_state_table.GraphicsLevel[i]));
627 if (result != 0)
628 return result;
629
630 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
631 if (i > 1)
632 smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
633 }
634
635 /* Only enable level 0 for now. */
636 smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
637
638 /* set highest level watermark to high */
639 if (dpm_table->sclk_table.count > 1)
640 smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
641 PPSMC_DISPLAY_WATERMARK_HIGH;
642
643 smu_data->smc_state_table.GraphicsDpmLevelCount =
644 (uint8_t)dpm_table->sclk_table.count;
645 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
646 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
647
648 if (pcie_table != NULL) {
649 PP_ASSERT_WITH_CODE((pcie_entry_count >= 1),
650 "There must be 1 or more PCIE levels defined in PPTable.",
651 return -EINVAL);
652 max_entry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/
653 for (i = 0; i < dpm_table->sclk_table.count; i++) {
654 smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel =
655 (uint8_t) ((i < max_entry) ? i : max_entry);
656 }
657 } else {
658 if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask)
659 printk(KERN_ERR "[ powerplay ] Pcie Dpm Enablemask is 0 !");
660
661 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
662 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
663 (1<<(highest_pcie_level_enabled+1))) != 0)) {
664 highest_pcie_level_enabled++;
665 }
666
667 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
668 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
669 (1<<lowest_pcie_level_enabled)) == 0)) {
670 lowest_pcie_level_enabled++;
671 }
672
673 while ((count < highest_pcie_level_enabled) &&
674 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
675 (1<<(lowest_pcie_level_enabled+1+count))) == 0)) {
676 count++;
677 }
678 mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
679 (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
680
681
682 /* set pcieDpmLevel to highest_pcie_level_enabled*/
683 for (i = 2; i < dpm_table->sclk_table.count; i++)
684 smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
685
686 /* set pcieDpmLevel to lowest_pcie_level_enabled*/
687 smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
688
689 /* set pcieDpmLevel to mid_pcie_level_enabled*/
690 smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
691 }
692 /* level count will send to smc once at init smc table and never change*/
693 result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_address,
694 (uint8_t *)levels, (uint32_t)level_array_size,
695 SMC_RAM_END);
696
697 return result;
698}
699
700/**
701 * Populates the SMC MCLK structure using the provided memory clock
702 *
703 * @param hwmgr the address of the hardware manager
704 * @param memory_clock the memory clock to use to populate the structure
705 * @param sclk the SMC SCLK structure to be populated
706 */
707static int tonga_calculate_mclk_params(
708 struct pp_hwmgr *hwmgr,
709 uint32_t memory_clock,
710 SMU72_Discrete_MemoryLevel *mclk,
711 bool strobe_mode,
712 bool dllStateOn
713 )
714{
715 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
716
717 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
718 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
719 uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
720 uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
721 uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
722 uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
723 uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
724 uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
725 uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
726
727 pp_atomctrl_memory_clock_param mpll_param;
728 int result;
729
730 result = atomctrl_get_memory_pll_dividers_si(hwmgr,
731 memory_clock, &mpll_param, strobe_mode);
732 PP_ASSERT_WITH_CODE(
733 !result,
734 "Error retrieving Memory Clock Parameters from VBIOS.",
735 return result);
736
737 /* MPLL_FUNC_CNTL setup*/
738 mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL,
739 mpll_param.bw_ctrl);
740
741 /* MPLL_FUNC_CNTL_1 setup*/
742 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
743 MPLL_FUNC_CNTL_1, CLKF,
744 mpll_param.mpll_fb_divider.cl_kf);
745 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
746 MPLL_FUNC_CNTL_1, CLKFRAC,
747 mpll_param.mpll_fb_divider.clk_frac);
748 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
749 MPLL_FUNC_CNTL_1, VCO_MODE,
750 mpll_param.vco_mode);
751
752 /* MPLL_AD_FUNC_CNTL setup*/
753 mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
754 MPLL_AD_FUNC_CNTL, YCLK_POST_DIV,
755 mpll_param.mpll_post_divider);
756
757 if (data->is_memory_gddr5) {
758 /* MPLL_DQ_FUNC_CNTL setup*/
759 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
760 MPLL_DQ_FUNC_CNTL, YCLK_SEL,
761 mpll_param.yclk_sel);
762 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
763 MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV,
764 mpll_param.mpll_post_divider);
765 }
766
767 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
768 PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
769 /*
770 ************************************
771 Fref = Reference Frequency
772 NF = Feedback divider ratio
773 NR = Reference divider ratio
774 Fnom = Nominal VCO output frequency = Fref * NF / NR
775 Fs = Spreading Rate
776 D = Percentage down-spread / 2
777 Fint = Reference input frequency to PFD = Fref / NR
778 NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
779 CLKS = NS - 1 = ISS_STEP_NUM[11:0]
780 NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
781 CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
782 *************************************
783 */
784 pp_atomctrl_internal_ss_info ss_info;
785 uint32_t freq_nom;
786 uint32_t tmp;
787 uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
788
789 /* for GDDR5 for all modes and DDR3 */
790 if (1 == mpll_param.qdr)
791 freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
792 else
793 freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
794
795 /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
796 tmp = (freq_nom / reference_clock);
797 tmp = tmp * tmp;
798
799 if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
800 /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
801 /* ss.Info.speed_spectrum_rate -- in unit of khz */
802 /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
803 /* = reference_clock * 5 / speed_spectrum_rate */
804 uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
805
806 /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
807 /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
808 uint32_t clkv =
809 (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
810 ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
811
812 mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
813 mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
814 }
815 }
816
817 /* MCLK_PWRMGT_CNTL setup */
818 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
819 MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
820 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
821 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
822 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
823 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
824
825 /* Save the result data to outpupt memory level structure */
826 mclk->MclkFrequency = memory_clock;
827 mclk->MpllFuncCntl = mpll_func_cntl;
828 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
829 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
830 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
831 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
832 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
833 mclk->DllCntl = dll_cntl;
834 mclk->MpllSs1 = mpll_ss1;
835 mclk->MpllSs2 = mpll_ss2;
836
837 return 0;
838}
839
840static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock,
841 bool strobe_mode)
842{
843 uint8_t mc_para_index;
844
845 if (strobe_mode) {
846 if (memory_clock < 12500)
847 mc_para_index = 0x00;
848 else if (memory_clock > 47500)
849 mc_para_index = 0x0f;
850 else
851 mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
852 } else {
853 if (memory_clock < 65000)
854 mc_para_index = 0x00;
855 else if (memory_clock > 135000)
856 mc_para_index = 0x0f;
857 else
858 mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
859 }
860
861 return mc_para_index;
862}
863
864static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
865{
866 uint8_t mc_para_index;
867
868 if (memory_clock < 10000)
869 mc_para_index = 0;
870 else if (memory_clock >= 80000)
871 mc_para_index = 0x0f;
872 else
873 mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
874
875 return mc_para_index;
876}
877
878
879static int tonga_populate_single_memory_level(
880 struct pp_hwmgr *hwmgr,
881 uint32_t memory_clock,
882 SMU72_Discrete_MemoryLevel *memory_level
883 )
884{
885 uint32_t mvdd = 0;
886 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
887 struct phm_ppt_v1_information *pptable_info =
888 (struct phm_ppt_v1_information *)(hwmgr->pptable);
889 int result = 0;
890 bool dll_state_on;
891 struct cgs_display_info info = {0};
892 uint32_t mclk_edc_wr_enable_threshold = 40000;
893 uint32_t mclk_stutter_mode_threshold = 30000;
894 uint32_t mclk_edc_enable_threshold = 40000;
895 uint32_t mclk_strobe_mode_threshold = 40000;
896
897 if (NULL != pptable_info->vdd_dep_on_mclk) {
898 result = tonga_get_dependecy_volt_by_clk(hwmgr,
899 pptable_info->vdd_dep_on_mclk,
900 memory_clock,
901 &memory_level->MinVoltage, &mvdd);
902 PP_ASSERT_WITH_CODE(
903 !result,
904 "can not find MinVddc voltage value from memory VDDC "
905 "voltage dependency table",
906 return result);
907 }
908
909 if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
910 memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value;
911 else
912 memory_level->MinMvdd = mvdd;
913
914 memory_level->EnabledForThrottle = 1;
915 memory_level->EnabledForActivity = 0;
916 memory_level->UpHyst = 0;
917 memory_level->DownHyst = 100;
918 memory_level->VoltageDownHyst = 0;
919
920 /* Indicates maximum activity level for this performance level.*/
921 memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
922 memory_level->StutterEnable = 0;
923 memory_level->StrobeEnable = 0;
924 memory_level->EdcReadEnable = 0;
925 memory_level->EdcWriteEnable = 0;
926 memory_level->RttEnable = 0;
927
928 /* default set to low watermark. Highest level will be set to high later.*/
929 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
930
931 cgs_get_active_displays_info(hwmgr->device, &info);
932 data->display_timing.num_existing_displays = info.display_count;
933
934 if ((mclk_stutter_mode_threshold != 0) &&
935 (memory_clock <= mclk_stutter_mode_threshold) &&
936 (!data->is_uvd_enabled)
937 && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
938 && (data->display_timing.num_existing_displays <= 2)
939 && (data->display_timing.num_existing_displays != 0))
940 memory_level->StutterEnable = 1;
941
942 /* decide strobe mode*/
943 memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
944 (memory_clock <= mclk_strobe_mode_threshold);
945
946 /* decide EDC mode and memory clock ratio*/
947 if (data->is_memory_gddr5) {
948 memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock,
949 memory_level->StrobeEnable);
950
951 if ((mclk_edc_enable_threshold != 0) &&
952 (memory_clock > mclk_edc_enable_threshold)) {
953 memory_level->EdcReadEnable = 1;
954 }
955
956 if ((mclk_edc_wr_enable_threshold != 0) &&
957 (memory_clock > mclk_edc_wr_enable_threshold)) {
958 memory_level->EdcWriteEnable = 1;
959 }
960
961 if (memory_level->StrobeEnable) {
962 if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >=
963 ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
964 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
965 } else {
966 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
967 }
968
969 } else {
970 dll_state_on = data->dll_default_on;
971 }
972 } else {
973 memory_level->StrobeRatio =
974 tonga_get_ddr3_mclk_frequency_ratio(memory_clock);
975 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
976 }
977
978 result = tonga_calculate_mclk_params(hwmgr,
979 memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
980
981 if (!result) {
982 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd);
983 /* MCLK frequency in units of 10KHz*/
984 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
985 /* Indicates maximum activity level for this performance level.*/
986 CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
987 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
988 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
989 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
990 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
991 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
992 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
993 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
994 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
995 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
996 }
997
998 return result;
999}
1000
1001int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1002{
1003 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1004 struct tonga_smumgr *smu_data =
1005 (struct tonga_smumgr *)(hwmgr->smumgr->backend);
1006 struct smu7_dpm_table *dpm_table = &data->dpm_table;
1007 int result;
1008
1009 /* populate MCLK dpm table to SMU7 */
1010 uint32_t level_array_address =
1011 smu_data->smu7_data.dpm_table_start +
1012 offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
1013 uint32_t level_array_size =
1014 sizeof(SMU72_Discrete_MemoryLevel) *
1015 SMU72_MAX_LEVELS_MEMORY;
1016 SMU72_Discrete_MemoryLevel *levels =
1017 smu_data->smc_state_table.MemoryLevel;
1018 uint32_t i;
1019
1020 memset(levels, 0x00, level_array_size);
1021
1022 for (i = 0; i < dpm_table->mclk_table.count; i++) {
1023 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
1024 "can not populate memory level as memory clock is zero",
1025 return -EINVAL);
1026 result = tonga_populate_single_memory_level(
1027 hwmgr,
1028 dpm_table->mclk_table.dpm_levels[i].value,
1029 &(smu_data->smc_state_table.MemoryLevel[i]));
1030 if (result)
1031 return result;
1032 }
1033
1034 /* Only enable level 0 for now.*/
1035 smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
1036
1037 /*
1038 * in order to prevent MC activity from stutter mode to push DPM up.
1039 * the UVD change complements this by putting the MCLK in a higher state
1040 * by default such that we are not effected by up threshold or and MCLK DPM latency.
1041 */
1042 smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
1043 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
1044
1045 smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
1046 data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1047 /* set highest level watermark to high*/
1048 smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
1049
1050 /* level count will send to smc once at init smc table and never change*/
1051 result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
1052 level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
1053 SMC_RAM_END);
1054
1055 return result;
1056}
1057
1058static int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr,
1059 uint32_t mclk, SMIO_Pattern *smio_pattern)
1060{
1061 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1062 struct phm_ppt_v1_information *table_info =
1063 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1064 uint32_t i = 0;
1065
1066 if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
1067 /* find mvdd value which clock is more than request */
1068 for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
1069 if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
1070 /* Always round to higher voltage. */
1071 smio_pattern->Voltage =
1072 data->mvdd_voltage_table.entries[i].value;
1073 break;
1074 }
1075 }
1076
1077 PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
1078 "MVDD Voltage is outside the supported range.",
1079 return -EINVAL);
1080 } else {
1081 return -EINVAL;
1082 }
1083
1084 return 0;
1085}
1086
1087
1088static int tonga_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1089 SMU72_Discrete_DpmTable *table)
1090{
1091 int result = 0;
1092 struct tonga_smumgr *smu_data =
1093 (struct tonga_smumgr *)(hwmgr->smumgr->backend);
1094 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1095 struct pp_atomctrl_clock_dividers_vi dividers;
1096
1097 SMIO_Pattern voltage_level;
1098 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
1099 uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
1100 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1101 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1102
1103 /* The ACPI state should not do DPM on DC (or ever).*/
1104 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1105
1106 table->ACPILevel.MinVoltage =
1107 smu_data->smc_state_table.GraphicsLevel[0].MinVoltage;
1108
1109 /* assign zero for now*/
1110 table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
1111
1112 /* get the engine clock dividers for this clock value*/
1113 result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
1114 table->ACPILevel.SclkFrequency, &dividers);
1115
1116 PP_ASSERT_WITH_CODE(result == 0,
1117 "Error retrieving Engine Clock dividers from VBIOS.",
1118 return result);
1119
1120 /* divider ID for required SCLK*/
1121 table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
1122 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1123 table->ACPILevel.DeepSleepDivId = 0;
1124
1125 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
1126 SPLL_PWRON, 0);
1127 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
1128 SPLL_RESET, 1);
1129 spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
1130 SCLK_MUX_SEL, 4);
1131
1132 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
1133 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
1134 table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
1135 table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
1136 table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
1137 table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
1138 table->ACPILevel.CcPwrDynRm = 0;
1139 table->ACPILevel.CcPwrDynRm1 = 0;
1140
1141
1142 /* For various features to be enabled/disabled while this level is active.*/
1143 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1144 /* SCLK frequency in units of 10KHz*/
1145 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
1146 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
1147 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
1148 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
1149 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
1150 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
1151 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
1152 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1153 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1154
1155 /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
1156 table->MemoryACPILevel.MinVoltage =
1157 smu_data->smc_state_table.MemoryLevel[0].MinVoltage;
1158
1159 /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
1160
1161 if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level))
1162 table->MemoryACPILevel.MinMvdd =
1163 PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
1164 else
1165 table->MemoryACPILevel.MinMvdd = 0;
1166
1167 /* Force reset on DLL*/
1168 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1169 MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
1170 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1171 MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
1172
1173 /* Disable DLL in ACPIState*/
1174 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1175 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
1176 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1177 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
1178
1179 /* Enable DLL bypass signal*/
1180 dll_cntl = PHM_SET_FIELD(dll_cntl,
1181 DLL_CNTL, MRDCK0_BYPASS, 0);
1182 dll_cntl = PHM_SET_FIELD(dll_cntl,
1183 DLL_CNTL, MRDCK1_BYPASS, 0);
1184
1185 table->MemoryACPILevel.DllCntl =
1186 PP_HOST_TO_SMC_UL(dll_cntl);
1187 table->MemoryACPILevel.MclkPwrmgtCntl =
1188 PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
1189 table->MemoryACPILevel.MpllAdFuncCntl =
1190 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
1191 table->MemoryACPILevel.MpllDqFuncCntl =
1192 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
1193 table->MemoryACPILevel.MpllFuncCntl =
1194 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
1195 table->MemoryACPILevel.MpllFuncCntl_1 =
1196 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
1197 table->MemoryACPILevel.MpllFuncCntl_2 =
1198 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
1199 table->MemoryACPILevel.MpllSs1 =
1200 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
1201 table->MemoryACPILevel.MpllSs2 =
1202 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
1203
1204 table->MemoryACPILevel.EnabledForThrottle = 0;
1205 table->MemoryACPILevel.EnabledForActivity = 0;
1206 table->MemoryACPILevel.UpHyst = 0;
1207 table->MemoryACPILevel.DownHyst = 100;
1208 table->MemoryACPILevel.VoltageDownHyst = 0;
1209 /* Indicates maximum activity level for this performance level.*/
1210 table->MemoryACPILevel.ActivityLevel =
1211 PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
1212
1213 table->MemoryACPILevel.StutterEnable = 0;
1214 table->MemoryACPILevel.StrobeEnable = 0;
1215 table->MemoryACPILevel.EdcReadEnable = 0;
1216 table->MemoryACPILevel.EdcWriteEnable = 0;
1217 table->MemoryACPILevel.RttEnable = 0;
1218
1219 return result;
1220}
1221
1222static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1223 SMU72_Discrete_DpmTable *table)
1224{
1225 int result = 0;
1226
1227 uint8_t count;
1228 pp_atomctrl_clock_dividers_vi dividers;
1229 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1230 struct phm_ppt_v1_information *pptable_info =
1231 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1232 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1233 pptable_info->mm_dep_table;
1234
1235 table->UvdLevelCount = (uint8_t) (mm_table->count);
1236 table->UvdBootLevel = 0;
1237
1238 for (count = 0; count < table->UvdLevelCount; count++) {
1239 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
1240 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
1241 table->UvdLevel[count].MinVoltage.Vddc =
1242 phm_get_voltage_index(pptable_info->vddc_lookup_table,
1243 mm_table->entries[count].vddc);
1244 table->UvdLevel[count].MinVoltage.VddGfx =
1245 (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
1246 phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
1247 mm_table->entries[count].vddgfx) : 0;
1248 table->UvdLevel[count].MinVoltage.Vddci =
1249 phm_get_voltage_id(&data->vddci_voltage_table,
1250 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1251 table->UvdLevel[count].MinVoltage.Phases = 1;
1252
1253 /* retrieve divider value for VBIOS */
1254 result = atomctrl_get_dfs_pll_dividers_vi(
1255 hwmgr,
1256 table->UvdLevel[count].VclkFrequency,
1257 &dividers);
1258
1259 PP_ASSERT_WITH_CODE((!result),
1260 "can not find divide id for Vclk clock",
1261 return result);
1262
1263 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1264
1265 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1266 table->UvdLevel[count].DclkFrequency, &dividers);
1267 PP_ASSERT_WITH_CODE((!result),
1268 "can not find divide id for Dclk clock",
1269 return result);
1270
1271 table->UvdLevel[count].DclkDivider =
1272 (uint8_t)dividers.pll_post_divider;
1273
1274 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1275 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1276 }
1277
1278 return result;
1279
1280}
1281
1282static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1283 SMU72_Discrete_DpmTable *table)
1284{
1285 int result = 0;
1286
1287 uint8_t count;
1288 pp_atomctrl_clock_dividers_vi dividers;
1289 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1290 struct phm_ppt_v1_information *pptable_info =
1291 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1292 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1293 pptable_info->mm_dep_table;
1294
1295 table->VceLevelCount = (uint8_t) (mm_table->count);
1296 table->VceBootLevel = 0;
1297
1298 for (count = 0; count < table->VceLevelCount; count++) {
1299 table->VceLevel[count].Frequency =
1300 mm_table->entries[count].eclk;
1301 table->VceLevel[count].MinVoltage.Vddc =
1302 phm_get_voltage_index(pptable_info->vddc_lookup_table,
1303 mm_table->entries[count].vddc);
1304 table->VceLevel[count].MinVoltage.VddGfx =
1305 (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
1306 phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
1307 mm_table->entries[count].vddgfx) : 0;
1308 table->VceLevel[count].MinVoltage.Vddci =
1309 phm_get_voltage_id(&data->vddci_voltage_table,
1310 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1311 table->VceLevel[count].MinVoltage.Phases = 1;
1312
1313 /* retrieve divider value for VBIOS */
1314 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1315 table->VceLevel[count].Frequency, &dividers);
1316 PP_ASSERT_WITH_CODE((!result),
1317 "can not find divide id for VCE engine clock",
1318 return result);
1319
1320 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1321
1322 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1323 }
1324
1325 return result;
1326}
1327
1328static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
1329 SMU72_Discrete_DpmTable *table)
1330{
1331 int result = 0;
1332 uint8_t count;
1333 pp_atomctrl_clock_dividers_vi dividers;
1334 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1335 struct phm_ppt_v1_information *pptable_info =
1336 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1337 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1338 pptable_info->mm_dep_table;
1339
1340 table->AcpLevelCount = (uint8_t) (mm_table->count);
1341 table->AcpBootLevel = 0;
1342
1343 for (count = 0; count < table->AcpLevelCount; count++) {
1344 table->AcpLevel[count].Frequency =
1345 pptable_info->mm_dep_table->entries[count].aclk;
1346 table->AcpLevel[count].MinVoltage.Vddc =
1347 phm_get_voltage_index(pptable_info->vddc_lookup_table,
1348 mm_table->entries[count].vddc);
1349 table->AcpLevel[count].MinVoltage.VddGfx =
1350 (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
1351 phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
1352 mm_table->entries[count].vddgfx) : 0;
1353 table->AcpLevel[count].MinVoltage.Vddci =
1354 phm_get_voltage_id(&data->vddci_voltage_table,
1355 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1356 table->AcpLevel[count].MinVoltage.Phases = 1;
1357
1358 /* retrieve divider value for VBIOS */
1359 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1360 table->AcpLevel[count].Frequency, &dividers);
1361 PP_ASSERT_WITH_CODE((!result),
1362 "can not find divide id for engine clock", return result);
1363
1364 table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1365
1366 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
1367 }
1368
1369 return result;
1370}
1371
1372static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1373 SMU72_Discrete_DpmTable *table)
1374{
1375 int result = 0;
1376 uint8_t count;
1377 pp_atomctrl_clock_dividers_vi dividers;
1378 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1379 struct phm_ppt_v1_information *pptable_info =
1380 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1381 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1382 pptable_info->mm_dep_table;
1383
1384 table->SamuBootLevel = 0;
1385 table->SamuLevelCount = (uint8_t) (mm_table->count);
1386
1387 for (count = 0; count < table->SamuLevelCount; count++) {
1388 /* not sure whether we need evclk or not */
1389 table->SamuLevel[count].Frequency =
1390 pptable_info->mm_dep_table->entries[count].samclock;
1391 table->SamuLevel[count].MinVoltage.Vddc =
1392 phm_get_voltage_index(pptable_info->vddc_lookup_table,
1393 mm_table->entries[count].vddc);
1394 table->SamuLevel[count].MinVoltage.VddGfx =
1395 (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
1396 phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
1397 mm_table->entries[count].vddgfx) : 0;
1398 table->SamuLevel[count].MinVoltage.Vddci =
1399 phm_get_voltage_id(&data->vddci_voltage_table,
1400 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1401 table->SamuLevel[count].MinVoltage.Phases = 1;
1402
1403 /* retrieve divider value for VBIOS */
1404 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1405 table->SamuLevel[count].Frequency, &dividers);
1406 PP_ASSERT_WITH_CODE((!result),
1407 "can not find divide id for samu clock", return result);
1408
1409 table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1410
1411 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
1412 }
1413
1414 return result;
1415}
1416
1417static int tonga_populate_memory_timing_parameters(
1418 struct pp_hwmgr *hwmgr,
1419 uint32_t engine_clock,
1420 uint32_t memory_clock,
1421 struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs
1422 )
1423{
1424 uint32_t dramTiming;
1425 uint32_t dramTiming2;
1426 uint32_t burstTime;
1427 int result;
1428
1429 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1430 engine_clock, memory_clock);
1431
1432 PP_ASSERT_WITH_CODE(result == 0,
1433 "Error calling VBIOS to set DRAM_TIMING.", return result);
1434
1435 dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1436 dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1437 burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1438
1439 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
1440 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
1441 arb_regs->McArbBurstTime = (uint8_t)burstTime;
1442
1443 return 0;
1444}
1445
1446/**
1447 * Setup parameters for the MC ARB.
1448 *
1449 * @param hwmgr the address of the powerplay hardware manager.
1450 * @return always 0
1451 * This function is to be called from the SetPowerState table.
1452 */
1453static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1454{
1455 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1456 struct tonga_smumgr *smu_data =
1457 (struct tonga_smumgr *)(hwmgr->smumgr->backend);
1458 int result = 0;
1459 SMU72_Discrete_MCArbDramTimingTable arb_regs;
1460 uint32_t i, j;
1461
1462 memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable));
1463
1464 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1465 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1466 result = tonga_populate_memory_timing_parameters
1467 (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
1468 data->dpm_table.mclk_table.dpm_levels[j].value,
1469 &arb_regs.entries[i][j]);
1470
1471 if (result)
1472 break;
1473 }
1474 }
1475
1476 if (!result) {
1477 result = smu7_copy_bytes_to_smc(
1478 hwmgr->smumgr,
1479 smu_data->smu7_data.arb_table_start,
1480 (uint8_t *)&arb_regs,
1481 sizeof(SMU72_Discrete_MCArbDramTimingTable),
1482 SMC_RAM_END
1483 );
1484 }
1485
1486 return result;
1487}
1488
1489static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1490 SMU72_Discrete_DpmTable *table)
1491{
1492 int result = 0;
1493 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1494 struct tonga_smumgr *smu_data =
1495 (struct tonga_smumgr *)(hwmgr->smumgr->backend);
1496 table->GraphicsBootLevel = 0;
1497 table->MemoryBootLevel = 0;
1498
1499 /* find boot level from dpm table*/
1500 result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1501 data->vbios_boot_state.sclk_bootup_value,
1502 (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
1503
1504 if (result != 0) {
1505 smu_data->smc_state_table.GraphicsBootLevel = 0;
1506 printk(KERN_ERR "[powerplay] VBIOS did not find boot engine "
1507 "clock value in dependency table. "
1508 "Using Graphics DPM level 0 !");
1509 result = 0;
1510 }
1511
1512 result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1513 data->vbios_boot_state.mclk_bootup_value,
1514 (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
1515
1516 if (result != 0) {
1517 smu_data->smc_state_table.MemoryBootLevel = 0;
1518 printk(KERN_ERR "[powerplay] VBIOS did not find boot "
1519 "engine clock value in dependency table."
1520 "Using Memory DPM level 0 !");
1521 result = 0;
1522 }
1523
1524 table->BootVoltage.Vddc =
1525 phm_get_voltage_id(&(data->vddc_voltage_table),
1526 data->vbios_boot_state.vddc_bootup_value);
1527 table->BootVoltage.VddGfx =
1528 phm_get_voltage_id(&(data->vddgfx_voltage_table),
1529 data->vbios_boot_state.vddgfx_bootup_value);
1530 table->BootVoltage.Vddci =
1531 phm_get_voltage_id(&(data->vddci_voltage_table),
1532 data->vbios_boot_state.vddci_bootup_value);
1533 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
1534
1535 CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
1536
1537 return result;
1538}
1539
1540static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1541{
1542 uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
1543 volt_with_cks, value;
1544 uint16_t clock_freq_u16;
1545 struct tonga_smumgr *smu_data =
1546 (struct tonga_smumgr *)(hwmgr->smumgr->backend);
1547 uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
1548 volt_offset = 0;
1549 struct phm_ppt_v1_information *table_info =
1550 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1551 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1552 table_info->vdd_dep_on_sclk;
1553 uint32_t hw_revision, dev_id;
1554 struct cgs_system_info sys_info = {0};
1555
1556 stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
1557
1558 sys_info.size = sizeof(struct cgs_system_info);
1559
1560 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
1561 cgs_query_system_info(hwmgr->device, &sys_info);
1562 hw_revision = (uint32_t)sys_info.value;
1563
1564 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
1565 cgs_query_system_info(hwmgr->device, &sys_info);
1566 dev_id = (uint32_t)sys_info.value;
1567
1568 /* Read SMU_Eefuse to read and calculate RO and determine
1569 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
1570 */
1571 efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1572 ixSMU_EFUSE_0 + (146 * 4));
1573 efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1574 ixSMU_EFUSE_0 + (148 * 4));
1575 efuse &= 0xFF000000;
1576 efuse = efuse >> 24;
1577 efuse2 &= 0xF;
1578
1579 if (efuse2 == 1)
1580 ro = (2300 - 1350) * efuse / 255 + 1350;
1581 else
1582 ro = (2500 - 1000) * efuse / 255 + 1000;
1583
1584 if (ro >= 1660)
1585 type = 0;
1586 else
1587 type = 1;
1588
1589 /* Populate Stretch amount */
1590 smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
1591
1592
1593 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
1594 for (i = 0; i < sclk_table->count; i++) {
1595 smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
1596 sclk_table->entries[i].cks_enable << i;
1597 if (ASICID_IS_TONGA_P(dev_id, hw_revision)) {
1598 volt_without_cks = (uint32_t)((7732 + 60 - ro - 20838 *
1599 (sclk_table->entries[i].clk/100) / 10000) * 1000 /
1600 (8730 - (5301 * (sclk_table->entries[i].clk/100) / 1000)));
1601 volt_with_cks = (uint32_t)((5250 + 51 - ro - 2404 *
1602 (sclk_table->entries[i].clk/100) / 100000) * 1000 /
1603 (6146 - (3193 * (sclk_table->entries[i].clk/100) / 1000)));
1604 } else {
1605 volt_without_cks = (uint32_t)((14041 *
1606 (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
1607 (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
1608 volt_with_cks = (uint32_t)((13946 *
1609 (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
1610 (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
1611 }
1612 if (volt_without_cks >= volt_with_cks)
1613 volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
1614 sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
1615 smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
1616 }
1617
1618 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1619 STRETCH_ENABLE, 0x0);
1620 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1621 masterReset, 0x1);
1622 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1623 staticEnable, 0x1);
1624 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1625 masterReset, 0x0);
1626
1627 /* Populate CKS Lookup Table */
1628 if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
1629 stretch_amount2 = 0;
1630 else if (stretch_amount == 3 || stretch_amount == 4)
1631 stretch_amount2 = 1;
1632 else {
1633 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1634 PHM_PlatformCaps_ClockStretcher);
1635 PP_ASSERT_WITH_CODE(false,
1636 "Stretch Amount in PPTable not supported\n",
1637 return -EINVAL);
1638 }
1639
1640 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1641 ixPWR_CKS_CNTL);
1642 value &= 0xFFC2FF87;
1643 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
1644 tonga_clock_stretcher_lookup_table[stretch_amount2][0];
1645 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
1646 tonga_clock_stretcher_lookup_table[stretch_amount2][1];
1647 clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
1648 GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
1649 SclkFrequency) / 100);
1650 if (tonga_clock_stretcher_lookup_table[stretch_amount2][0] <
1651 clock_freq_u16 &&
1652 tonga_clock_stretcher_lookup_table[stretch_amount2][1] >
1653 clock_freq_u16) {
1654 /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
1655 value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
1656 /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
1657 value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
1658 /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
1659 value |= (tonga_clock_stretch_amount_conversion
1660 [tonga_clock_stretcher_lookup_table[stretch_amount2][3]]
1661 [stretch_amount]) << 3;
1662 }
1663 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
1664 CKS_LOOKUPTableEntry[0].minFreq);
1665 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
1666 CKS_LOOKUPTableEntry[0].maxFreq);
1667 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
1668 tonga_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
1669 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
1670 (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
1671
1672 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1673 ixPWR_CKS_CNTL, value);
1674
1675 /* Populate DDT Lookup Table */
1676 for (i = 0; i < 4; i++) {
1677 /* Assign the minimum and maximum VID stored
1678 * in the last row of Clock Stretcher Voltage Table.
1679 */
1680 smu_data->smc_state_table.ClockStretcherDataTable.
1681 ClockStretcherDataTableEntry[i].minVID =
1682 (uint8_t) tonga_clock_stretcher_ddt_table[type][i][2];
1683 smu_data->smc_state_table.ClockStretcherDataTable.
1684 ClockStretcherDataTableEntry[i].maxVID =
1685 (uint8_t) tonga_clock_stretcher_ddt_table[type][i][3];
1686 /* Loop through each SCLK and check the frequency
1687 * to see if it lies within the frequency for clock stretcher.
1688 */
1689 for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
1690 cks_setting = 0;
1691 clock_freq = PP_SMC_TO_HOST_UL(
1692 smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
1693 /* Check the allowed frequency against the sclk level[j].
1694 * Sclk's endianness has already been converted,
1695 * and it's in 10Khz unit,
1696 * as opposed to Data table, which is in Mhz unit.
1697 */
1698 if (clock_freq >= tonga_clock_stretcher_ddt_table[type][i][0] * 100) {
1699 cks_setting |= 0x2;
1700 if (clock_freq < tonga_clock_stretcher_ddt_table[type][i][1] * 100)
1701 cks_setting |= 0x1;
1702 }
1703 smu_data->smc_state_table.ClockStretcherDataTable.
1704 ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
1705 }
1706 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
1707 ClockStretcherDataTable.
1708 ClockStretcherDataTableEntry[i].setting);
1709 }
1710
1711 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1712 ixPWR_CKS_CNTL);
1713 value &= 0xFFFFFFFE;
1714 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1715 ixPWR_CKS_CNTL, value);
1716
1717 return 0;
1718}
1719
1720/**
1721 * Populates the SMC VRConfig field in DPM table.
1722 *
1723 * @param hwmgr the address of the hardware manager
1724 * @param table the SMC DPM table structure to be populated
1725 * @return always 0
1726 */
1727static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
1728 SMU72_Discrete_DpmTable *table)
1729{
1730 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1731 uint16_t config;
1732
1733 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
1734 /* Splitted mode */
1735 config = VR_SVI2_PLANE_1;
1736 table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
1737
1738 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1739 config = VR_SVI2_PLANE_2;
1740 table->VRConfig |= config;
1741 } else {
1742 printk(KERN_ERR "[ powerplay ] VDDC and VDDGFX should "
1743 "be both on SVI2 control in splitted mode !\n");
1744 }
1745 } else {
1746 /* Merged mode */
1747 config = VR_MERGED_WITH_VDDC;
1748 table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
1749
1750 /* Set Vddc Voltage Controller */
1751 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1752 config = VR_SVI2_PLANE_1;
1753 table->VRConfig |= config;
1754 } else {
1755 printk(KERN_ERR "[ powerplay ] VDDC should be on "
1756 "SVI2 control in merged mode !\n");
1757 }
1758 }
1759
1760 /* Set Vddci Voltage Controller */
1761 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
1762 config = VR_SVI2_PLANE_2; /* only in merged mode */
1763 table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1764 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
1765 config = VR_SMIO_PATTERN_1;
1766 table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1767 }
1768
1769 /* Set Mvdd Voltage Controller */
1770 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1771 config = VR_SMIO_PATTERN_2;
1772 table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
1773 }
1774
1775 return 0;
1776}
1777
1778
1779/**
1780 * Initialize the ARB DRAM timing table's index field.
1781 *
1782 * @param hwmgr the address of the powerplay hardware manager.
1783 * @return always 0
1784 */
1785static int tonga_init_arb_table_index(struct pp_smumgr *smumgr)
1786{
1787 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
1788 uint32_t tmp;
1789 int result;
1790
1791 /*
1792 * This is a read-modify-write on the first byte of the ARB table.
1793 * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure
1794 * is the field 'current'.
1795 * This solution is ugly, but we never write the whole table only
1796 * individual fields in it.
1797 * In reality this field should not be in that structure
1798 * but in a soft register.
1799 */
1800 result = smu7_read_smc_sram_dword(smumgr,
1801 smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
1802
1803 if (result != 0)
1804 return result;
1805
1806 tmp &= 0x00FFFFFF;
1807 tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
1808
1809 return smu7_write_smc_sram_dword(smumgr,
1810 smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
1811}
1812
1813
1814static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
1815{
1816 struct tonga_smumgr *smu_data =
1817 (struct tonga_smumgr *)(hwmgr->smumgr->backend);
1818 struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
1819 SMU72_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
1820 struct phm_ppt_v1_information *table_info =
1821 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1822 struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
1823 int i, j, k;
1824 uint16_t *pdef1;
1825 uint16_t *pdef2;
1826
1827 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
1828 (uint16_t)(cac_dtp_table->usTDP * 256));
1829 dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
1830 (uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
1831
1832 PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
1833 "Target Operating Temp is out of Range !",
1834 );
1835
1836 dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
1837 dpm_table->GpuTjHyst = 8;
1838
1839 dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
1840
1841 dpm_table->BAPM_TEMP_GRADIENT =
1842 PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
1843 pdef1 = defaults->bapmti_r;
1844 pdef2 = defaults->bapmti_rc;
1845
1846 for (i = 0; i < SMU72_DTE_ITERATIONS; i++) {
1847 for (j = 0; j < SMU72_DTE_SOURCES; j++) {
1848 for (k = 0; k < SMU72_DTE_SINKS; k++) {
1849 dpm_table->BAPMTI_R[i][j][k] =
1850 PP_HOST_TO_SMC_US(*pdef1);
1851 dpm_table->BAPMTI_RC[i][j][k] =
1852 PP_HOST_TO_SMC_US(*pdef2);
1853 pdef1++;
1854 pdef2++;
1855 }
1856 }
1857 }
1858
1859 return 0;
1860}
1861
1862static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr)
1863{
1864 struct tonga_smumgr *smu_data =
1865 (struct tonga_smumgr *)(hwmgr->smumgr->backend);
1866 struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
1867
1868 smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
1869 smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC;
1870 smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
1871 smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
1872
1873 return 0;
1874}
1875
1876static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr)
1877{
1878 uint16_t tdc_limit;
1879 struct tonga_smumgr *smu_data =
1880 (struct tonga_smumgr *)(hwmgr->smumgr->backend);
1881 struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
1882 struct phm_ppt_v1_information *table_info =
1883 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1884
1885 /* TDC number of fraction bits are changed from 8 to 7
1886 * for Fiji as requested by SMC team
1887 */
1888 tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256);
1889 smu_data->power_tune_table.TDC_VDDC_PkgLimit =
1890 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
1891 smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
1892 defaults->tdc_vddc_throttle_release_limit_perc;
1893 smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
1894
1895 return 0;
1896}
1897
1898static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
1899{
1900 struct tonga_smumgr *smu_data =
1901 (struct tonga_smumgr *)(hwmgr->smumgr->backend);
1902 struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
1903 uint32_t temp;
1904
1905 if (smu7_read_smc_sram_dword(hwmgr->smumgr,
1906 fuse_table_offset +
1907 offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl),
1908 (uint32_t *)&temp, SMC_RAM_END))
1909 PP_ASSERT_WITH_CODE(false,
1910 "Attempt to read PmFuses.DW6 "
1911 "(SviLoadLineEn) from SMC Failed !",
1912 return -EINVAL);
1913 else
1914 smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
1915
1916 return 0;
1917}
1918
1919static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
1920{
1921 int i;
1922 struct tonga_smumgr *smu_data =
1923 (struct tonga_smumgr *)(hwmgr->smumgr->backend);
1924
1925 /* Currently not used. Set all to zero. */
1926 for (i = 0; i < 16; i++)
1927 smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
1928
1929 return 0;
1930}
1931
1932static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
1933{
1934 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
1935
1936 if ((hwmgr->thermal_controller.advanceFanControlParameters.
1937 usFanOutputSensitivity & (1 << 15)) ||
1938 (hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0))
1939 hwmgr->thermal_controller.advanceFanControlParameters.
1940 usFanOutputSensitivity = hwmgr->thermal_controller.
1941 advanceFanControlParameters.usDefaultFanOutputSensitivity;
1942
1943 smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
1944 PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
1945 advanceFanControlParameters.usFanOutputSensitivity);
1946 return 0;
1947}
1948
1949static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
1950{
1951 int i;
1952 struct tonga_smumgr *smu_data =
1953 (struct tonga_smumgr *)(hwmgr->smumgr->backend);
1954
1955 /* Currently not used. Set all to zero. */
1956 for (i = 0; i < 16; i++)
1957 smu_data->power_tune_table.GnbLPML[i] = 0;
1958
1959 return 0;
1960}
1961
1962static int tonga_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
1963{
1964 return 0;
1965}
1966
1967static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
1968{
1969 struct tonga_smumgr *smu_data =
1970 (struct tonga_smumgr *)(hwmgr->smumgr->backend);
1971 struct phm_ppt_v1_information *table_info =
1972 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1973 uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
1974 uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
1975 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
1976
1977 hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
1978 lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
1979
1980 smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
1981 CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
1982 smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
1983 CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
1984
1985 return 0;
1986}
1987
1988static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr)
1989{
1990 struct tonga_smumgr *smu_data =
1991 (struct tonga_smumgr *)(hwmgr->smumgr->backend);
1992 uint32_t pm_fuse_table_offset;
1993
1994 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1995 PHM_PlatformCaps_PowerContainment)) {
1996 if (smu7_read_smc_sram_dword(hwmgr->smumgr,
1997 SMU72_FIRMWARE_HEADER_LOCATION +
1998 offsetof(SMU72_Firmware_Header, PmFuseTable),
1999 &pm_fuse_table_offset, SMC_RAM_END))
2000 PP_ASSERT_WITH_CODE(false,
2001 "Attempt to get pm_fuse_table_offset Failed !",
2002 return -EINVAL);
2003
2004 /* DW6 */
2005 if (tonga_populate_svi_load_line(hwmgr))
2006 PP_ASSERT_WITH_CODE(false,
2007 "Attempt to populate SviLoadLine Failed !",
2008 return -EINVAL);
2009 /* DW7 */
2010 if (tonga_populate_tdc_limit(hwmgr))
2011 PP_ASSERT_WITH_CODE(false,
2012 "Attempt to populate TDCLimit Failed !",
2013 return -EINVAL);
2014 /* DW8 */
2015 if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset))
2016 PP_ASSERT_WITH_CODE(false,
2017 "Attempt to populate TdcWaterfallCtl Failed !",
2018 return -EINVAL);
2019
2020 /* DW9-DW12 */
2021 if (tonga_populate_temperature_scaler(hwmgr) != 0)
2022 PP_ASSERT_WITH_CODE(false,
2023 "Attempt to populate LPMLTemperatureScaler Failed !",
2024 return -EINVAL);
2025
2026 /* DW13-DW14 */
2027 if (tonga_populate_fuzzy_fan(hwmgr))
2028 PP_ASSERT_WITH_CODE(false,
2029 "Attempt to populate Fuzzy Fan "
2030 "Control parameters Failed !",
2031 return -EINVAL);
2032
2033 /* DW15-DW18 */
2034 if (tonga_populate_gnb_lpml(hwmgr))
2035 PP_ASSERT_WITH_CODE(false,
2036 "Attempt to populate GnbLPML Failed !",
2037 return -EINVAL);
2038
2039 /* DW19 */
2040 if (tonga_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
2041 PP_ASSERT_WITH_CODE(false,
2042 "Attempt to populate GnbLPML "
2043 "Min and Max Vid Failed !",
2044 return -EINVAL);
2045
2046 /* DW20 */
2047 if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr))
2048 PP_ASSERT_WITH_CODE(
2049 false,
2050 "Attempt to populate BapmVddCBaseLeakage "
2051 "Hi and Lo Sidd Failed !",
2052 return -EINVAL);
2053
2054 if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
2055 (uint8_t *)&smu_data->power_tune_table,
2056 sizeof(struct SMU72_Discrete_PmFuses), SMC_RAM_END))
2057 PP_ASSERT_WITH_CODE(false,
2058 "Attempt to download PmFuseTable Failed !",
2059 return -EINVAL);
2060 }
2061 return 0;
2062}
2063
2064static int tonga_populate_mc_reg_address(struct pp_smumgr *smumgr,
2065 SMU72_Discrete_MCRegisters *mc_reg_table)
2066{
2067 const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)smumgr->backend;
2068
2069 uint32_t i, j;
2070
2071 for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
2072 if (smu_data->mc_reg_table.validflag & 1<<j) {
2073 PP_ASSERT_WITH_CODE(
2074 i < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE,
2075 "Index of mc_reg_table->address[] array "
2076 "out of boundary",
2077 return -EINVAL);
2078 mc_reg_table->address[i].s0 =
2079 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
2080 mc_reg_table->address[i].s1 =
2081 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
2082 i++;
2083 }
2084 }
2085
2086 mc_reg_table->last = (uint8_t)i;
2087
2088 return 0;
2089}
2090
2091/*convert register values from driver to SMC format */
2092static void tonga_convert_mc_registers(
2093 const struct tonga_mc_reg_entry *entry,
2094 SMU72_Discrete_MCRegisterSet *data,
2095 uint32_t num_entries, uint32_t valid_flag)
2096{
2097 uint32_t i, j;
2098
2099 for (i = 0, j = 0; j < num_entries; j++) {
2100 if (valid_flag & 1<<j) {
2101 data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
2102 i++;
2103 }
2104 }
2105}
2106
2107static int tonga_convert_mc_reg_table_entry_to_smc(
2108 struct pp_smumgr *smumgr,
2109 const uint32_t memory_clock,
2110 SMU72_Discrete_MCRegisterSet *mc_reg_table_data
2111 )
2112{
2113 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
2114 uint32_t i = 0;
2115
2116 for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
2117 if (memory_clock <=
2118 smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
2119 break;
2120 }
2121 }
2122
2123 if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
2124 --i;
2125
2126 tonga_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
2127 mc_reg_table_data, smu_data->mc_reg_table.last,
2128 smu_data->mc_reg_table.validflag);
2129
2130 return 0;
2131}
2132
2133static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
2134 SMU72_Discrete_MCRegisters *mc_regs)
2135{
2136 int result = 0;
2137 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2138 int res;
2139 uint32_t i;
2140
2141 for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
2142 res = tonga_convert_mc_reg_table_entry_to_smc(
2143 hwmgr->smumgr,
2144 data->dpm_table.mclk_table.dpm_levels[i].value,
2145 &mc_regs->data[i]
2146 );
2147
2148 if (0 != res)
2149 result = res;
2150 }
2151
2152 return result;
2153}
2154
2155static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
2156{
2157 struct pp_smumgr *smumgr = hwmgr->smumgr;
2158 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
2159 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2160 uint32_t address;
2161 int32_t result;
2162
2163 if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
2164 return 0;
2165
2166
2167 memset(&smu_data->mc_regs, 0, sizeof(SMU72_Discrete_MCRegisters));
2168
2169 result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
2170
2171 if (result != 0)
2172 return result;
2173
2174
2175 address = smu_data->smu7_data.mc_reg_table_start +
2176 (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]);
2177
2178 return smu7_copy_bytes_to_smc(
2179 hwmgr->smumgr, address,
2180 (uint8_t *)&smu_data->mc_regs.data[0],
2181 sizeof(SMU72_Discrete_MCRegisterSet) *
2182 data->dpm_table.mclk_table.count,
2183 SMC_RAM_END);
2184}
2185
2186static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
2187{
2188 int result;
2189 struct pp_smumgr *smumgr = hwmgr->smumgr;
2190 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
2191
2192 memset(&smu_data->mc_regs, 0x00, sizeof(SMU72_Discrete_MCRegisters));
2193 result = tonga_populate_mc_reg_address(smumgr, &(smu_data->mc_regs));
2194 PP_ASSERT_WITH_CODE(!result,
2195 "Failed to initialize MCRegTable for the MC register addresses !",
2196 return result;);
2197
2198 result = tonga_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
2199 PP_ASSERT_WITH_CODE(!result,
2200 "Failed to initialize MCRegTable for driver state !",
2201 return result;);
2202
2203 return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start,
2204 (uint8_t *)&smu_data->mc_regs, sizeof(SMU72_Discrete_MCRegisters), SMC_RAM_END);
2205}
2206
2207static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
2208{
2209 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
2210 struct phm_ppt_v1_information *table_info =
2211 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2212
2213 if (table_info &&
2214 table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
2215 table_info->cac_dtp_table->usPowerTuneDataSetID)
2216 smu_data->power_tune_defaults =
2217 &tonga_power_tune_data_set_array
2218 [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
2219 else
2220 smu_data->power_tune_defaults = &tonga_power_tune_data_set_array[0];
2221}
2222
2223/**
2224 * Initializes the SMC table and uploads it
2225 *
2226 * @param hwmgr the address of the powerplay hardware manager.
2227 * @param pInput the pointer to input data (PowerState)
2228 * @return always 0
2229 */
2230int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
2231{
2232 int result;
2233 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2234 struct tonga_smumgr *smu_data =
2235 (struct tonga_smumgr *)(hwmgr->smumgr->backend);
2236 SMU72_Discrete_DpmTable *table = &(smu_data->smc_state_table);
2237 struct phm_ppt_v1_information *table_info =
2238 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2239
2240 uint8_t i;
2241 pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
2242
2243
2244 memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
2245
2246 tonga_initialize_power_tune_defaults(hwmgr);
2247
2248 if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
2249 tonga_populate_smc_voltage_tables(hwmgr, table);
2250
2251 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2252 PHM_PlatformCaps_AutomaticDCTransition))
2253 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
2254
2255
2256 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2257 PHM_PlatformCaps_StepVddc))
2258 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
2259
2260 if (data->is_memory_gddr5)
2261 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
2262
2263 i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN);
2264
2265 if (i == 1 || i == 0)
2266 table->SystemFlags |= 0x40;
2267
2268 if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
2269 result = tonga_populate_ulv_state(hwmgr, table);
2270 PP_ASSERT_WITH_CODE(!result,
2271 "Failed to initialize ULV state !",
2272 return result;);
2273
2274 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2275 ixCG_ULV_PARAMETER, 0x40035);
2276 }
2277
2278 result = tonga_populate_smc_link_level(hwmgr, table);
2279 PP_ASSERT_WITH_CODE(!result,
2280 "Failed to initialize Link Level !", return result);
2281
2282 result = tonga_populate_all_graphic_levels(hwmgr);
2283 PP_ASSERT_WITH_CODE(!result,
2284 "Failed to initialize Graphics Level !", return result);
2285
2286 result = tonga_populate_all_memory_levels(hwmgr);
2287 PP_ASSERT_WITH_CODE(!result,
2288 "Failed to initialize Memory Level !", return result);
2289
2290 result = tonga_populate_smc_acpi_level(hwmgr, table);
2291 PP_ASSERT_WITH_CODE(!result,
2292 "Failed to initialize ACPI Level !", return result);
2293
2294 result = tonga_populate_smc_vce_level(hwmgr, table);
2295 PP_ASSERT_WITH_CODE(!result,
2296 "Failed to initialize VCE Level !", return result);
2297
2298 result = tonga_populate_smc_acp_level(hwmgr, table);
2299 PP_ASSERT_WITH_CODE(!result,
2300 "Failed to initialize ACP Level !", return result);
2301
2302 result = tonga_populate_smc_samu_level(hwmgr, table);
2303 PP_ASSERT_WITH_CODE(!result,
2304 "Failed to initialize SAMU Level !", return result);
2305
2306 /* Since only the initial state is completely set up at this
2307 * point (the other states are just copies of the boot state) we only
2308 * need to populate the ARB settings for the initial state.
2309 */
2310 result = tonga_program_memory_timing_parameters(hwmgr);
2311 PP_ASSERT_WITH_CODE(!result,
2312 "Failed to Write ARB settings for the initial state.",
2313 return result;);
2314
2315 result = tonga_populate_smc_uvd_level(hwmgr, table);
2316 PP_ASSERT_WITH_CODE(!result,
2317 "Failed to initialize UVD Level !", return result);
2318
2319 result = tonga_populate_smc_boot_level(hwmgr, table);
2320 PP_ASSERT_WITH_CODE(!result,
2321 "Failed to initialize Boot Level !", return result);
2322
2323 tonga_populate_bapm_parameters_in_dpm_table(hwmgr);
2324 PP_ASSERT_WITH_CODE(!result,
2325 "Failed to populate BAPM Parameters !", return result);
2326
2327 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2328 PHM_PlatformCaps_ClockStretcher)) {
2329 result = tonga_populate_clock_stretcher_data_table(hwmgr);
2330 PP_ASSERT_WITH_CODE(!result,
2331 "Failed to populate Clock Stretcher Data Table !",
2332 return result;);
2333 }
2334 table->GraphicsVoltageChangeEnable = 1;
2335 table->GraphicsThermThrottleEnable = 1;
2336 table->GraphicsInterval = 1;
2337 table->VoltageInterval = 1;
2338 table->ThermalInterval = 1;
2339 table->TemperatureLimitHigh =
2340 table_info->cac_dtp_table->usTargetOperatingTemp *
2341 SMU7_Q88_FORMAT_CONVERSION_UNIT;
2342 table->TemperatureLimitLow =
2343 (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
2344 SMU7_Q88_FORMAT_CONVERSION_UNIT;
2345 table->MemoryVoltageChangeEnable = 1;
2346 table->MemoryInterval = 1;
2347 table->VoltageResponseTime = 0;
2348 table->PhaseResponseTime = 0;
2349 table->MemoryThermThrottleEnable = 1;
2350
2351 /*
2352 * Cail reads current link status and reports it as cap (we cannot
2353 * change this due to some previous issues we had)
2354 * SMC drops the link status to lowest level after enabling
2355 * DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again
2356 * but this time Cail reads current link status which was set to low by
2357 * SMC and reports it as cap to powerplay
2358 * To avoid it, we set PCIeBootLinkLevel to highest dpm level
2359 */
2360 PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
2361 "There must be 1 or more PCIE levels defined in PPTable.",
2362 return -EINVAL);
2363
2364 table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
2365
2366 table->PCIeGenInterval = 1;
2367
2368 result = tonga_populate_vr_config(hwmgr, table);
2369 PP_ASSERT_WITH_CODE(!result,
2370 "Failed to populate VRConfig setting !", return result);
2371
2372 table->ThermGpio = 17;
2373 table->SclkStepSize = 0x4000;
2374
2375 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
2376 &gpio_pin_assignment)) {
2377 table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
2378 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2379 PHM_PlatformCaps_RegulatorHot);
2380 } else {
2381 table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
2382 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2383 PHM_PlatformCaps_RegulatorHot);
2384 }
2385
2386 if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
2387 &gpio_pin_assignment)) {
2388 table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
2389 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2390 PHM_PlatformCaps_AutomaticDCTransition);
2391 } else {
2392 table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
2393 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2394 PHM_PlatformCaps_AutomaticDCTransition);
2395 }
2396
2397 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2398 PHM_PlatformCaps_Falcon_QuickTransition);
2399
2400 if (0) {
2401 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2402 PHM_PlatformCaps_AutomaticDCTransition);
2403 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2404 PHM_PlatformCaps_Falcon_QuickTransition);
2405 }
2406
2407 if (atomctrl_get_pp_assign_pin(hwmgr,
2408 THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment)) {
2409 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2410 PHM_PlatformCaps_ThermalOutGPIO);
2411
2412 table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
2413
2414 table->ThermOutPolarity =
2415 (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
2416 (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1 : 0;
2417
2418 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
2419
2420 /* if required, combine VRHot/PCC with thermal out GPIO*/
2421 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2422 PHM_PlatformCaps_RegulatorHot) &&
2423 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2424 PHM_PlatformCaps_CombinePCCWithThermalSignal)){
2425 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
2426 }
2427 } else {
2428 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2429 PHM_PlatformCaps_ThermalOutGPIO);
2430
2431 table->ThermOutGpio = 17;
2432 table->ThermOutPolarity = 1;
2433 table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
2434 }
2435
2436 for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++)
2437 table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
2438
2439 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
2440 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
2441 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
2442 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
2443 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
2444 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
2445 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
2446 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
2447 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
2448
2449 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
2450 result = smu7_copy_bytes_to_smc(
2451 hwmgr->smumgr,
2452 smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, SystemFlags),
2453 (uint8_t *)&(table->SystemFlags),
2454 sizeof(SMU72_Discrete_DpmTable) - 3 * sizeof(SMU72_PIDController),
2455 SMC_RAM_END);
2456
2457 PP_ASSERT_WITH_CODE(!result,
2458 "Failed to upload dpm data to SMC memory !", return result;);
2459
2460 result = tonga_init_arb_table_index(hwmgr->smumgr);
2461 PP_ASSERT_WITH_CODE(!result,
2462 "Failed to upload arb data to SMC memory !", return result);
2463
2464 tonga_populate_pm_fuses(hwmgr);
2465 PP_ASSERT_WITH_CODE((!result),
2466 "Failed to populate initialize pm fuses !", return result);
2467
2468 result = tonga_populate_initial_mc_reg_table(hwmgr);
2469 PP_ASSERT_WITH_CODE((!result),
2470 "Failed to populate initialize MC Reg table !", return result);
2471
2472 return 0;
2473}
2474
2475/**
2476* Set up the fan table to control the fan using the SMC.
2477* @param hwmgr the address of the powerplay hardware manager.
2478* @param pInput the pointer to input data
2479* @param pOutput the pointer to output data
2480* @param pStorage the pointer to temporary storage
2481* @param Result the last failure code
2482* @return result from set temperature range routine
2483*/
2484int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
2485{
2486 struct tonga_smumgr *smu_data =
2487 (struct tonga_smumgr *)(hwmgr->smumgr->backend);
2488 SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
2489 uint32_t duty100;
2490 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
2491 uint16_t fdo_min, slope1, slope2;
2492 uint32_t reference_clock;
2493 int res;
2494 uint64_t tmp64;
2495
2496 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2497 PHM_PlatformCaps_MicrocodeFanControl))
2498 return 0;
2499
2500 if (0 == smu_data->smu7_data.fan_table_start) {
2501 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2502 PHM_PlatformCaps_MicrocodeFanControl);
2503 return 0;
2504 }
2505
2506 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
2507 CGS_IND_REG__SMC,
2508 CG_FDO_CTRL1, FMAX_DUTY100);
2509
2510 if (0 == duty100) {
2511 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2512 PHM_PlatformCaps_MicrocodeFanControl);
2513 return 0;
2514 }
2515
2516 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
2517 do_div(tmp64, 10000);
2518 fdo_min = (uint16_t)tmp64;
2519
2520 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
2521 hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
2522 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
2523 hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
2524
2525 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
2526 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
2527 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
2528 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
2529
2530 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
2531 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
2532
2533 fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
2534 fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
2535 fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
2536
2537 fan_table.Slope1 = cpu_to_be16(slope1);
2538 fan_table.Slope2 = cpu_to_be16(slope2);
2539
2540 fan_table.FdoMin = cpu_to_be16(fdo_min);
2541
2542 fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
2543
2544 fan_table.HystUp = cpu_to_be16(1);
2545
2546 fan_table.HystSlope = cpu_to_be16(1);
2547
2548 fan_table.TempRespLim = cpu_to_be16(5);
2549
2550 reference_clock = smu7_get_xclk(hwmgr);
2551
2552 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
2553
2554 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
2555
2556 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
2557
2558 fan_table.FanControl_GL_Flag = 1;
2559
2560 res = smu7_copy_bytes_to_smc(hwmgr->smumgr,
2561 smu_data->smu7_data.fan_table_start,
2562 (uint8_t *)&fan_table,
2563 (uint32_t)sizeof(fan_table),
2564 SMC_RAM_END);
2565
2566 return 0;
2567}
2568
2569
2570static int tonga_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
2571{
2572 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2573
2574 if (data->need_update_smu7_dpm_table &
2575 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
2576 return tonga_program_memory_timing_parameters(hwmgr);
2577
2578 return 0;
2579}
2580
2581int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2582{
2583 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2584 struct tonga_smumgr *smu_data =
2585 (struct tonga_smumgr *)(hwmgr->smumgr->backend);
2586
2587 int result = 0;
2588 uint32_t low_sclk_interrupt_threshold = 0;
2589
2590 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2591 PHM_PlatformCaps_SclkThrottleLowNotification)
2592 && (hwmgr->gfx_arbiter.sclk_threshold !=
2593 data->low_sclk_interrupt_threshold)) {
2594 data->low_sclk_interrupt_threshold =
2595 hwmgr->gfx_arbiter.sclk_threshold;
2596 low_sclk_interrupt_threshold =
2597 data->low_sclk_interrupt_threshold;
2598
2599 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
2600
2601 result = smu7_copy_bytes_to_smc(
2602 hwmgr->smumgr,
2603 smu_data->smu7_data.dpm_table_start +
2604 offsetof(SMU72_Discrete_DpmTable,
2605 LowSclkInterruptThreshold),
2606 (uint8_t *)&low_sclk_interrupt_threshold,
2607 sizeof(uint32_t),
2608 SMC_RAM_END);
2609 }
2610
2611 result = tonga_update_and_upload_mc_reg_table(hwmgr);
2612
2613 PP_ASSERT_WITH_CODE((!result),
2614 "Failed to upload MC reg table !",
2615 return result);
2616
2617 result = tonga_program_mem_timing_parameters(hwmgr);
2618 PP_ASSERT_WITH_CODE((result == 0),
2619 "Failed to program memory timing parameters !",
2620 );
2621
2622 return result;
2623}
2624
2625uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
2626{
2627 switch (type) {
2628 case SMU_SoftRegisters:
2629 switch (member) {
2630 case HandshakeDisables:
2631 return offsetof(SMU72_SoftRegisters, HandshakeDisables);
2632 case VoltageChangeTimeout:
2633 return offsetof(SMU72_SoftRegisters, VoltageChangeTimeout);
2634 case AverageGraphicsActivity:
2635 return offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
2636 case PreVBlankGap:
2637 return offsetof(SMU72_SoftRegisters, PreVBlankGap);
2638 case VBlankTimeout:
2639 return offsetof(SMU72_SoftRegisters, VBlankTimeout);
2640 case UcodeLoadStatus:
2641 return offsetof(SMU72_SoftRegisters, UcodeLoadStatus);
2642 }
2643 case SMU_Discrete_DpmTable:
2644 switch (member) {
2645 case UvdBootLevel:
2646 return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
2647 case VceBootLevel:
2648 return offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
2649 case SamuBootLevel:
2650 return offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
2651 case LowSclkInterruptThreshold:
2652 return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
2653 }
2654 }
2655 printk("cant't get the offset of type %x member %x\n", type, member);
2656 return 0;
2657}
2658
2659uint32_t tonga_get_mac_definition(uint32_t value)
2660{
2661 switch (value) {
2662 case SMU_MAX_LEVELS_GRAPHICS:
2663 return SMU72_MAX_LEVELS_GRAPHICS;
2664 case SMU_MAX_LEVELS_MEMORY:
2665 return SMU72_MAX_LEVELS_MEMORY;
2666 case SMU_MAX_LEVELS_LINK:
2667 return SMU72_MAX_LEVELS_LINK;
2668 case SMU_MAX_ENTRIES_SMIO:
2669 return SMU72_MAX_ENTRIES_SMIO;
2670 case SMU_MAX_LEVELS_VDDC:
2671 return SMU72_MAX_LEVELS_VDDC;
2672 case SMU_MAX_LEVELS_VDDGFX:
2673 return SMU72_MAX_LEVELS_VDDGFX;
2674 case SMU_MAX_LEVELS_VDDCI:
2675 return SMU72_MAX_LEVELS_VDDCI;
2676 case SMU_MAX_LEVELS_MVDD:
2677 return SMU72_MAX_LEVELS_MVDD;
2678 }
2679 printk("cant't get the mac value %x\n", value);
2680
2681 return 0;
2682}
2683
2684
2685static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
2686{
2687 struct tonga_smumgr *smu_data =
2688 (struct tonga_smumgr *)(hwmgr->smumgr->backend);
2689 uint32_t mm_boot_level_offset, mm_boot_level_value;
2690 struct phm_ppt_v1_information *table_info =
2691 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2692
2693 smu_data->smc_state_table.UvdBootLevel = 0;
2694 if (table_info->mm_dep_table->count > 0)
2695 smu_data->smc_state_table.UvdBootLevel =
2696 (uint8_t) (table_info->mm_dep_table->count - 1);
2697 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2698 offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
2699 mm_boot_level_offset /= 4;
2700 mm_boot_level_offset *= 4;
2701 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2702 CGS_IND_REG__SMC, mm_boot_level_offset);
2703 mm_boot_level_value &= 0x00FFFFFF;
2704 mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
2705 cgs_write_ind_register(hwmgr->device,
2706 CGS_IND_REG__SMC,
2707 mm_boot_level_offset, mm_boot_level_value);
2708
2709 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2710 PHM_PlatformCaps_UVDDPM) ||
2711 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2712 PHM_PlatformCaps_StablePState))
2713 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2714 PPSMC_MSG_UVDDPM_SetEnabledMask,
2715 (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
2716 return 0;
2717}
2718
2719static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
2720{
2721 struct tonga_smumgr *smu_data =
2722 (struct tonga_smumgr *)(hwmgr->smumgr->backend);
2723 uint32_t mm_boot_level_offset, mm_boot_level_value;
2724 struct phm_ppt_v1_information *table_info =
2725 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2726
2727
2728 smu_data->smc_state_table.VceBootLevel =
2729 (uint8_t) (table_info->mm_dep_table->count - 1);
2730
2731 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2732 offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
2733 mm_boot_level_offset /= 4;
2734 mm_boot_level_offset *= 4;
2735 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2736 CGS_IND_REG__SMC, mm_boot_level_offset);
2737 mm_boot_level_value &= 0xFF00FFFF;
2738 mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
2739 cgs_write_ind_register(hwmgr->device,
2740 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2741
2742 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2743 PHM_PlatformCaps_StablePState))
2744 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2745 PPSMC_MSG_VCEDPM_SetEnabledMask,
2746 (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
2747 return 0;
2748}
2749
2750static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr)
2751{
2752 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
2753 uint32_t mm_boot_level_offset, mm_boot_level_value;
2754
2755 smu_data->smc_state_table.SamuBootLevel = 0;
2756 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2757 offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
2758
2759 mm_boot_level_offset /= 4;
2760 mm_boot_level_offset *= 4;
2761 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2762 CGS_IND_REG__SMC, mm_boot_level_offset);
2763 mm_boot_level_value &= 0xFFFFFF00;
2764 mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
2765 cgs_write_ind_register(hwmgr->device,
2766 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2767
2768 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2769 PHM_PlatformCaps_StablePState))
2770 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2771 PPSMC_MSG_SAMUDPM_SetEnabledMask,
2772 (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
2773 return 0;
2774}
2775
2776int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
2777{
2778 switch (type) {
2779 case SMU_UVD_TABLE:
2780 tonga_update_uvd_smc_table(hwmgr);
2781 break;
2782 case SMU_VCE_TABLE:
2783 tonga_update_vce_smc_table(hwmgr);
2784 break;
2785 case SMU_SAMU_TABLE:
2786 tonga_update_samu_smc_table(hwmgr);
2787 break;
2788 default:
2789 break;
2790 }
2791 return 0;
2792}
2793
2794
2795/**
2796 * Get the location of various tables inside the FW image.
2797 *
2798 * @param hwmgr the address of the powerplay hardware manager.
2799 * @return always 0
2800 */
2801int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
2802{
2803 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2804 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
2805
2806 uint32_t tmp;
2807 int result;
2808 bool error = false;
2809
2810 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2811 SMU72_FIRMWARE_HEADER_LOCATION +
2812 offsetof(SMU72_Firmware_Header, DpmTable),
2813 &tmp, SMC_RAM_END);
2814
2815 if (!result)
2816 smu_data->smu7_data.dpm_table_start = tmp;
2817
2818 error |= (result != 0);
2819
2820 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2821 SMU72_FIRMWARE_HEADER_LOCATION +
2822 offsetof(SMU72_Firmware_Header, SoftRegisters),
2823 &tmp, SMC_RAM_END);
2824
2825 if (!result) {
2826 data->soft_regs_start = tmp;
2827 smu_data->smu7_data.soft_regs_start = tmp;
2828 }
2829
2830 error |= (result != 0);
2831
2832
2833 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2834 SMU72_FIRMWARE_HEADER_LOCATION +
2835 offsetof(SMU72_Firmware_Header, mcRegisterTable),
2836 &tmp, SMC_RAM_END);
2837
2838 if (!result)
2839 smu_data->smu7_data.mc_reg_table_start = tmp;
2840
2841 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2842 SMU72_FIRMWARE_HEADER_LOCATION +
2843 offsetof(SMU72_Firmware_Header, FanTable),
2844 &tmp, SMC_RAM_END);
2845
2846 if (!result)
2847 smu_data->smu7_data.fan_table_start = tmp;
2848
2849 error |= (result != 0);
2850
2851 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2852 SMU72_FIRMWARE_HEADER_LOCATION +
2853 offsetof(SMU72_Firmware_Header, mcArbDramTimingTable),
2854 &tmp, SMC_RAM_END);
2855
2856 if (!result)
2857 smu_data->smu7_data.arb_table_start = tmp;
2858
2859 error |= (result != 0);
2860
2861 result = smu7_read_smc_sram_dword(hwmgr->smumgr,
2862 SMU72_FIRMWARE_HEADER_LOCATION +
2863 offsetof(SMU72_Firmware_Header, Version),
2864 &tmp, SMC_RAM_END);
2865
2866 if (!result)
2867 hwmgr->microcode_version_info.SMC = tmp;
2868
2869 error |= (result != 0);
2870
2871 return error ? 1 : 0;
2872}
2873
2874/*---------------------------MC----------------------------*/
2875
2876static uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
2877{
2878 return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
2879}
2880
2881static bool tonga_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
2882{
2883 bool result = true;
2884
2885 switch (in_reg) {
2886 case mmMC_SEQ_RAS_TIMING:
2887 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
2888 break;
2889
2890 case mmMC_SEQ_DLL_STBY:
2891 *out_reg = mmMC_SEQ_DLL_STBY_LP;
2892 break;
2893
2894 case mmMC_SEQ_G5PDX_CMD0:
2895 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
2896 break;
2897
2898 case mmMC_SEQ_G5PDX_CMD1:
2899 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
2900 break;
2901
2902 case mmMC_SEQ_G5PDX_CTRL:
2903 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
2904 break;
2905
2906 case mmMC_SEQ_CAS_TIMING:
2907 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
2908 break;
2909
2910 case mmMC_SEQ_MISC_TIMING:
2911 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
2912 break;
2913
2914 case mmMC_SEQ_MISC_TIMING2:
2915 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
2916 break;
2917
2918 case mmMC_SEQ_PMG_DVS_CMD:
2919 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
2920 break;
2921
2922 case mmMC_SEQ_PMG_DVS_CTL:
2923 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
2924 break;
2925
2926 case mmMC_SEQ_RD_CTL_D0:
2927 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
2928 break;
2929
2930 case mmMC_SEQ_RD_CTL_D1:
2931 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
2932 break;
2933
2934 case mmMC_SEQ_WR_CTL_D0:
2935 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
2936 break;
2937
2938 case mmMC_SEQ_WR_CTL_D1:
2939 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
2940 break;
2941
2942 case mmMC_PMG_CMD_EMRS:
2943 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
2944 break;
2945
2946 case mmMC_PMG_CMD_MRS:
2947 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
2948 break;
2949
2950 case mmMC_PMG_CMD_MRS1:
2951 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
2952 break;
2953
2954 case mmMC_SEQ_PMG_TIMING:
2955 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
2956 break;
2957
2958 case mmMC_PMG_CMD_MRS2:
2959 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
2960 break;
2961
2962 case mmMC_SEQ_WR_CTL_2:
2963 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
2964 break;
2965
2966 default:
2967 result = false;
2968 break;
2969 }
2970
2971 return result;
2972}
2973
2974static int tonga_set_s0_mc_reg_index(struct tonga_mc_reg_table *table)
2975{
2976 uint32_t i;
2977 uint16_t address;
2978
2979 for (i = 0; i < table->last; i++) {
2980 table->mc_reg_address[i].s0 =
2981 tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1,
2982 &address) ?
2983 address :
2984 table->mc_reg_address[i].s1;
2985 }
2986 return 0;
2987}
2988
2989static int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
2990 struct tonga_mc_reg_table *ni_table)
2991{
2992 uint8_t i, j;
2993
2994 PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2995 "Invalid VramInfo table.", return -EINVAL);
2996 PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
2997 "Invalid VramInfo table.", return -EINVAL);
2998
2999 for (i = 0; i < table->last; i++)
3000 ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
3001
3002 ni_table->last = table->last;
3003
3004 for (i = 0; i < table->num_entries; i++) {
3005 ni_table->mc_reg_table_entry[i].mclk_max =
3006 table->mc_reg_table_entry[i].mclk_max;
3007 for (j = 0; j < table->last; j++) {
3008 ni_table->mc_reg_table_entry[i].mc_data[j] =
3009 table->mc_reg_table_entry[i].mc_data[j];
3010 }
3011 }
3012
3013 ni_table->num_entries = table->num_entries;
3014
3015 return 0;
3016}
3017
3018/**
3019 * VBIOS omits some information to reduce size, we need to recover them here.
3020 * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to
3021 * mmMC_PMG_CMD_EMRS /_LP[15:0]. Bit[15:0] MRS, need to be update
3022 * mmMC_PMG_CMD_MRS/_LP[15:0]
3023 * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to
3024 * mmMC_PMG_CMD_MRS1/_LP[15:0].
3025 * 3. need to set these data for each clock range
3026 * @param hwmgr the address of the powerplay hardware manager.
3027 * @param table the address of MCRegTable
3028 * @return always 0
3029 */
3030static int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr,
3031 struct tonga_mc_reg_table *table)
3032{
3033 uint8_t i, j, k;
3034 uint32_t temp_reg;
3035 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3036
3037 for (i = 0, j = table->last; i < table->last; i++) {
3038 PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3039 "Invalid VramInfo table.", return -EINVAL);
3040
3041 switch (table->mc_reg_address[i].s1) {
3042
3043 case mmMC_SEQ_MISC1:
3044 temp_reg = cgs_read_register(hwmgr->device,
3045 mmMC_PMG_CMD_EMRS);
3046 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
3047 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
3048 for (k = 0; k < table->num_entries; k++) {
3049 table->mc_reg_table_entry[k].mc_data[j] =
3050 ((temp_reg & 0xffff0000)) |
3051 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
3052 }
3053 j++;
3054 PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3055 "Invalid VramInfo table.", return -EINVAL);
3056
3057 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
3058 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
3059 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
3060 for (k = 0; k < table->num_entries; k++) {
3061 table->mc_reg_table_entry[k].mc_data[j] =
3062 (temp_reg & 0xffff0000) |
3063 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3064
3065 if (!data->is_memory_gddr5)
3066 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
3067 }
3068 j++;
3069 PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3070 "Invalid VramInfo table.", return -EINVAL);
3071
3072 if (!data->is_memory_gddr5) {
3073 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
3074 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
3075 for (k = 0; k < table->num_entries; k++)
3076 table->mc_reg_table_entry[k].mc_data[j] =
3077 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
3078 j++;
3079 PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3080 "Invalid VramInfo table.", return -EINVAL);
3081 }
3082
3083 break;
3084
3085 case mmMC_SEQ_RESERVE_M:
3086 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
3087 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
3088 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
3089 for (k = 0; k < table->num_entries; k++) {
3090 table->mc_reg_table_entry[k].mc_data[j] =
3091 (temp_reg & 0xffff0000) |
3092 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3093 }
3094 j++;
3095 PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3096 "Invalid VramInfo table.", return -EINVAL);
3097 break;
3098
3099 default:
3100 break;
3101 }
3102
3103 }
3104
3105 table->last = j;
3106
3107 return 0;
3108}
3109
3110static int tonga_set_valid_flag(struct tonga_mc_reg_table *table)
3111{
3112 uint8_t i, j;
3113
3114 for (i = 0; i < table->last; i++) {
3115 for (j = 1; j < table->num_entries; j++) {
3116 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
3117 table->mc_reg_table_entry[j].mc_data[i]) {
3118 table->validflag |= (1<<i);
3119 break;
3120 }
3121 }
3122 }
3123
3124 return 0;
3125}
3126
3127int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
3128{
3129 int result;
3130 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
3131 pp_atomctrl_mc_reg_table *table;
3132 struct tonga_mc_reg_table *ni_table = &smu_data->mc_reg_table;
3133 uint8_t module_index = tonga_get_memory_modile_index(hwmgr);
3134
3135 table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
3136
3137 if (table == NULL)
3138 return -ENOMEM;
3139
3140 /* Program additional LP registers that are no longer programmed by VBIOS */
3141 cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
3142 cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
3143 cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
3144 cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
3145 cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP,
3146 cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
3147 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP,
3148 cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
3149 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP,
3150 cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
3151 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP,
3152 cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
3153 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP,
3154 cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
3155 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP,
3156 cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
3157 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP,
3158 cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
3159 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
3160 cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
3161 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP,
3162 cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
3163 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP,
3164 cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
3165 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP,
3166 cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
3167 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP,
3168 cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
3169 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
3170 cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
3171 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
3172 cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
3173 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
3174 cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
3175 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
3176 cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
3177 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP,
3178 cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
3179 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP,
3180 cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
3181
3182 memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
3183
3184 result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
3185
3186 if (!result)
3187 result = tonga_copy_vbios_smc_reg_table(table, ni_table);
3188
3189 if (!result) {
3190 tonga_set_s0_mc_reg_index(ni_table);
3191 result = tonga_set_mc_special_registers(hwmgr, ni_table);
3192 }
3193
3194 if (!result)
3195 tonga_set_valid_flag(ni_table);
3196
3197 kfree(table);
3198
3199 return result;
3200}
3201
3202bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
3203{
3204 return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
3205 CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
3206 ? true : false;
3207}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h
index c8bdb92d81f4..8ae169ff541d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h
@@ -20,36 +20,17 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 */ 22 */
23#ifndef _TONGA_SMC_H
24#define _TONGA_SMC_H
23 25
24#ifndef TONGA_POWERTUNE_H 26#include "smumgr.h"
25#define TONGA_POWERTUNE_H 27#include "smu72.h"
26 28
27enum _phw_tonga_ptc_config_reg_type {
28 TONGA_CONFIGREG_MMR = 0,
29 TONGA_CONFIGREG_SMC_IND,
30 TONGA_CONFIGREG_DIDT_IND,
31 TONGA_CONFIGREG_CACHE,
32 29
33 TONGA_CONFIGREG_MAX 30#define ASICID_IS_TONGA_P(wDID, bRID) \
34}; 31 (((wDID == 0x6930) && ((bRID == 0xF0) || (bRID == 0xF1) || (bRID == 0xFF))) \
35typedef enum _phw_tonga_ptc_config_reg_type phw_tonga_ptc_config_reg_type; 32 || ((wDID == 0x6920) && ((bRID == 0) || (bRID == 1))))
36
37/* PowerContainment Features */
38#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
39
40
41/* PowerContainment Features */
42#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001
43#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
44#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
45 33
46struct tonga_pt_config_reg {
47 uint32_t Offset;
48 uint32_t Mask;
49 uint32_t Shift;
50 uint32_t Value;
51 phw_tonga_ptc_config_reg_type Type;
52};
53 34
54struct tonga_pt_defaults { 35struct tonga_pt_defaults {
55 uint8_t svi_load_line_en; 36 uint8_t svi_load_line_en;
@@ -64,17 +45,16 @@ struct tonga_pt_defaults {
64 uint16_t bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS]; 45 uint16_t bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
65}; 46};
66 47
67 48int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
68 49int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
69void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr); 50int tonga_init_smc_table(struct pp_hwmgr *hwmgr);
70int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); 51int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
71int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr); 52int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
72int tonga_enable_smc_cac(struct pp_hwmgr *hwmgr); 53int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr);
73int tonga_disable_smc_cac(struct pp_hwmgr *hwmgr); 54uint32_t tonga_get_offsetof(uint32_t type, uint32_t member);
74int tonga_enable_power_containment(struct pp_hwmgr *hwmgr); 55uint32_t tonga_get_mac_definition(uint32_t value);
75int tonga_disable_power_containment(struct pp_hwmgr *hwmgr); 56int tonga_process_firmware_header(struct pp_hwmgr *hwmgr);
76int tonga_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); 57int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
77int tonga_power_control_set_level(struct pp_hwmgr *hwmgr); 58bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr);
78
79#endif 59#endif
80 60
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index f42c536b3af1..5f9124046b9b 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -33,587 +33,9 @@
33#include "smu/smu_7_1_2_d.h" 33#include "smu/smu_7_1_2_d.h"
34#include "smu/smu_7_1_2_sh_mask.h" 34#include "smu/smu_7_1_2_sh_mask.h"
35#include "cgs_common.h" 35#include "cgs_common.h"
36#include "tonga_smc.h"
37#include "smu7_smumgr.h"
36 38
37#define TONGA_SMC_SIZE 0x20000
38#define BUFFER_SIZE 80000
39#define MAX_STRING_SIZE 15
40#define BUFFER_SIZETWO 131072 /*128 *1024*/
41
42/**
43* Set the address for reading/writing the SMC SRAM space.
44* @param smumgr the address of the powerplay hardware manager.
45* @param smcAddress the address in the SMC RAM to access.
46*/
47static int tonga_set_smc_sram_address(struct pp_smumgr *smumgr,
48 uint32_t smcAddress, uint32_t limit)
49{
50 if (smumgr == NULL || smumgr->device == NULL)
51 return -EINVAL;
52 PP_ASSERT_WITH_CODE((0 == (3 & smcAddress)),
53 "SMC address must be 4 byte aligned.",
54 return -1;);
55
56 PP_ASSERT_WITH_CODE((limit > (smcAddress + 3)),
57 "SMC address is beyond the SMC RAM area.",
58 return -1;);
59
60 cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smcAddress);
61 SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
62
63 return 0;
64}
65
66/**
67* Copy bytes from an array into the SMC RAM space.
68*
69* @param smumgr the address of the powerplay SMU manager.
70* @param smcStartAddress the start address in the SMC RAM to copy bytes to.
71* @param src the byte array to copy the bytes from.
72* @param byteCount the number of bytes to copy.
73*/
74int tonga_copy_bytes_to_smc(struct pp_smumgr *smumgr,
75 uint32_t smcStartAddress, const uint8_t *src,
76 uint32_t byteCount, uint32_t limit)
77{
78 uint32_t addr;
79 uint32_t data, orig_data;
80 int result = 0;
81 uint32_t extra_shift;
82
83 if (smumgr == NULL || smumgr->device == NULL)
84 return -EINVAL;
85 PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)),
86 "SMC address must be 4 byte aligned.",
87 return 0;);
88
89 PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)),
90 "SMC address is beyond the SMC RAM area.",
91 return 0;);
92
93 addr = smcStartAddress;
94
95 while (byteCount >= 4) {
96 /*
97 * Bytes are written into the
98 * SMC address space with the MSB first
99 */
100 data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
101
102 result = tonga_set_smc_sram_address(smumgr, addr, limit);
103
104 if (result)
105 goto out;
106
107 cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
108
109 src += 4;
110 byteCount -= 4;
111 addr += 4;
112 }
113
114 if (0 != byteCount) {
115 /* Now write odd bytes left, do a read modify write cycle */
116 data = 0;
117
118 result = tonga_set_smc_sram_address(smumgr, addr, limit);
119 if (result)
120 goto out;
121
122 orig_data = cgs_read_register(smumgr->device,
123 mmSMC_IND_DATA_0);
124 extra_shift = 8 * (4 - byteCount);
125
126 while (byteCount > 0) {
127 data = (data << 8) + *src++;
128 byteCount--;
129 }
130
131 data <<= extra_shift;
132 data |= (orig_data & ~((~0UL) << extra_shift));
133
134 result = tonga_set_smc_sram_address(smumgr, addr, limit);
135 if (result)
136 goto out;
137
138 cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
139 }
140
141out:
142 return result;
143}
144
145
146int tonga_program_jump_on_start(struct pp_smumgr *smumgr)
147{
148 static const unsigned char pData[] = { 0xE0, 0x00, 0x80, 0x40 };
149
150 tonga_copy_bytes_to_smc(smumgr, 0x0, pData, 4, sizeof(pData)+1);
151
152 return 0;
153}
154
155/**
156* Return if the SMC is currently running.
157*
158* @param smumgr the address of the powerplay hardware manager.
159*/
160static int tonga_is_smc_ram_running(struct pp_smumgr *smumgr)
161{
162 return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
163 SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
164 && (0x20100 <= cgs_read_ind_register(smumgr->device,
165 CGS_IND_REG__SMC, ixSMC_PC_C)));
166}
167
168static int tonga_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
169{
170 if (smumgr == NULL || smumgr->device == NULL)
171 return -EINVAL;
172
173 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
174
175 cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
176 cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
177
178 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
179
180 return 0;
181}
182
183/**
184* Send a message to the SMC, and wait for its response.
185*
186* @param smumgr the address of the powerplay hardware manager.
187* @param msg the message to send.
188* @return The response that came from the SMC.
189*/
190static int tonga_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
191{
192 if (smumgr == NULL || smumgr->device == NULL)
193 return -EINVAL;
194
195 if (!tonga_is_smc_ram_running(smumgr))
196 return -1;
197
198 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
199 PP_ASSERT_WITH_CODE(
200 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
201 "Failed to send Previous Message.",
202 );
203
204 cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
205
206 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
207 PP_ASSERT_WITH_CODE(
208 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
209 "Failed to send Message.",
210 );
211
212 return 0;
213}
214
215/*
216* Send a message to the SMC, and do not wait for its response.
217*
218* @param smumgr the address of the powerplay hardware manager.
219* @param msg the message to send.
220* @return The response that came from the SMC.
221*/
222static int tonga_send_msg_to_smc_without_waiting
223 (struct pp_smumgr *smumgr, uint16_t msg)
224{
225 if (smumgr == NULL || smumgr->device == NULL)
226 return -EINVAL;
227
228 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
229 PP_ASSERT_WITH_CODE(
230 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
231 "Failed to send Previous Message.",
232 );
233 cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
234
235 return 0;
236}
237
238/*
239* Send a message to the SMC with parameter
240*
241* @param smumgr: the address of the powerplay hardware manager.
242* @param msg: the message to send.
243* @param parameter: the parameter to send
244* @return The response that came from the SMC.
245*/
246static int tonga_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
247 uint16_t msg, uint32_t parameter)
248{
249 if (smumgr == NULL || smumgr->device == NULL)
250 return -EINVAL;
251
252 if (!tonga_is_smc_ram_running(smumgr))
253 return PPSMC_Result_Failed;
254
255 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
256 cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
257
258 return tonga_send_msg_to_smc(smumgr, msg);
259}
260
261/*
262* Send a message to the SMC with parameter, do not wait for response
263*
264* @param smumgr: the address of the powerplay hardware manager.
265* @param msg: the message to send.
266* @param parameter: the parameter to send
267* @return The response that came from the SMC.
268*/
269static int tonga_send_msg_to_smc_with_parameter_without_waiting(
270 struct pp_smumgr *smumgr,
271 uint16_t msg, uint32_t parameter)
272{
273 if (smumgr == NULL || smumgr->device == NULL)
274 return -EINVAL;
275
276 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
277
278 cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
279
280 return tonga_send_msg_to_smc_without_waiting(smumgr, msg);
281}
282
283/*
284 * Read a 32bit value from the SMC SRAM space.
285 * ALL PARAMETERS ARE IN HOST BYTE ORDER.
286 * @param smumgr the address of the powerplay hardware manager.
287 * @param smcAddress the address in the SMC RAM to access.
288 * @param value and output parameter for the data read from the SMC SRAM.
289 */
290int tonga_read_smc_sram_dword(struct pp_smumgr *smumgr,
291 uint32_t smcAddress, uint32_t *value,
292 uint32_t limit)
293{
294 int result;
295
296 result = tonga_set_smc_sram_address(smumgr, smcAddress, limit);
297
298 if (0 != result)
299 return result;
300
301 *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0);
302
303 return 0;
304}
305
306/*
307 * Write a 32bit value to the SMC SRAM space.
308 * ALL PARAMETERS ARE IN HOST BYTE ORDER.
309 * @param smumgr the address of the powerplay hardware manager.
310 * @param smcAddress the address in the SMC RAM to access.
311 * @param value to write to the SMC SRAM.
312 */
313int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr,
314 uint32_t smcAddress, uint32_t value,
315 uint32_t limit)
316{
317 int result;
318
319 result = tonga_set_smc_sram_address(smumgr, smcAddress, limit);
320
321 if (0 != result)
322 return result;
323
324 cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value);
325
326 return 0;
327}
328
329static int tonga_smu_fini(struct pp_smumgr *smumgr)
330{
331 struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend);
332
333 smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle);
334 smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
335
336 if (smumgr->backend != NULL) {
337 kfree(smumgr->backend);
338 smumgr->backend = NULL;
339 }
340
341 cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
342 return 0;
343}
344
345static enum cgs_ucode_id tonga_convert_fw_type_to_cgs(uint32_t fw_type)
346{
347 enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
348
349 switch (fw_type) {
350 case UCODE_ID_SMU:
351 result = CGS_UCODE_ID_SMU;
352 break;
353 case UCODE_ID_SDMA0:
354 result = CGS_UCODE_ID_SDMA0;
355 break;
356 case UCODE_ID_SDMA1:
357 result = CGS_UCODE_ID_SDMA1;
358 break;
359 case UCODE_ID_CP_CE:
360 result = CGS_UCODE_ID_CP_CE;
361 break;
362 case UCODE_ID_CP_PFP:
363 result = CGS_UCODE_ID_CP_PFP;
364 break;
365 case UCODE_ID_CP_ME:
366 result = CGS_UCODE_ID_CP_ME;
367 break;
368 case UCODE_ID_CP_MEC:
369 result = CGS_UCODE_ID_CP_MEC;
370 break;
371 case UCODE_ID_CP_MEC_JT1:
372 result = CGS_UCODE_ID_CP_MEC_JT1;
373 break;
374 case UCODE_ID_CP_MEC_JT2:
375 result = CGS_UCODE_ID_CP_MEC_JT2;
376 break;
377 case UCODE_ID_RLC_G:
378 result = CGS_UCODE_ID_RLC_G;
379 break;
380 default:
381 break;
382 }
383
384 return result;
385}
386
387/**
388 * Convert the PPIRI firmware type to SMU type mask.
389 * For MEC, we need to check all MEC related type
390*/
391static uint16_t tonga_get_mask_for_firmware_type(uint16_t firmwareType)
392{
393 uint16_t result = 0;
394
395 switch (firmwareType) {
396 case UCODE_ID_SDMA0:
397 result = UCODE_ID_SDMA0_MASK;
398 break;
399 case UCODE_ID_SDMA1:
400 result = UCODE_ID_SDMA1_MASK;
401 break;
402 case UCODE_ID_CP_CE:
403 result = UCODE_ID_CP_CE_MASK;
404 break;
405 case UCODE_ID_CP_PFP:
406 result = UCODE_ID_CP_PFP_MASK;
407 break;
408 case UCODE_ID_CP_ME:
409 result = UCODE_ID_CP_ME_MASK;
410 break;
411 case UCODE_ID_CP_MEC:
412 case UCODE_ID_CP_MEC_JT1:
413 case UCODE_ID_CP_MEC_JT2:
414 result = UCODE_ID_CP_MEC_MASK;
415 break;
416 case UCODE_ID_RLC_G:
417 result = UCODE_ID_RLC_G_MASK;
418 break;
419 default:
420 break;
421 }
422
423 return result;
424}
425
426/**
427 * Check if the FW has been loaded,
428 * SMU will not return if loading has not finished.
429*/
430static int tonga_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fwType)
431{
432 uint16_t fwMask = tonga_get_mask_for_firmware_type(fwType);
433
434 if (0 != SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, SMC_IND,
435 SOFT_REGISTERS_TABLE_28, fwMask, fwMask)) {
436 printk(KERN_ERR "[ powerplay ] check firmware loading failed\n");
437 return -EINVAL;
438 }
439
440 return 0;
441}
442
443/* Populate one firmware image to the data structure */
444static int tonga_populate_single_firmware_entry(struct pp_smumgr *smumgr,
445 uint16_t firmware_type,
446 struct SMU_Entry *pentry)
447{
448 int result;
449 struct cgs_firmware_info info = {0};
450
451 result = cgs_get_firmware_info(
452 smumgr->device,
453 tonga_convert_fw_type_to_cgs(firmware_type),
454 &info);
455
456 if (result == 0) {
457 pentry->version = 0;
458 pentry->id = (uint16_t)firmware_type;
459 pentry->image_addr_high = smu_upper_32_bits(info.mc_addr);
460 pentry->image_addr_low = smu_lower_32_bits(info.mc_addr);
461 pentry->meta_data_addr_high = 0;
462 pentry->meta_data_addr_low = 0;
463 pentry->data_size_byte = info.image_size;
464 pentry->num_register_entries = 0;
465
466 if (firmware_type == UCODE_ID_RLC_G)
467 pentry->flags = 1;
468 else
469 pentry->flags = 0;
470 } else {
471 return result;
472 }
473
474 return result;
475}
476
477static int tonga_request_smu_reload_fw(struct pp_smumgr *smumgr)
478{
479 struct tonga_smumgr *tonga_smu =
480 (struct tonga_smumgr *)(smumgr->backend);
481 uint16_t fw_to_load;
482 struct SMU_DRAMData_TOC *toc;
483 /**
484 * First time this gets called during SmuMgr init,
485 * we haven't processed SMU header file yet,
486 * so Soft Register Start offset is unknown.
487 * However, for this case, UcodeLoadStatus is already 0,
488 * so we can skip this if the Soft Registers Start offset is 0.
489 */
490 cgs_write_ind_register(smumgr->device,
491 CGS_IND_REG__SMC, ixSOFT_REGISTERS_TABLE_28, 0);
492
493 tonga_send_msg_to_smc_with_parameter(smumgr,
494 PPSMC_MSG_SMU_DRAM_ADDR_HI,
495 tonga_smu->smu_buffer.mc_addr_high);
496 tonga_send_msg_to_smc_with_parameter(smumgr,
497 PPSMC_MSG_SMU_DRAM_ADDR_LO,
498 tonga_smu->smu_buffer.mc_addr_low);
499
500 toc = (struct SMU_DRAMData_TOC *)tonga_smu->pHeader;
501 toc->num_entries = 0;
502 toc->structure_version = 1;
503
504 PP_ASSERT_WITH_CODE(
505 0 == tonga_populate_single_firmware_entry(smumgr,
506 UCODE_ID_RLC_G,
507 &toc->entry[toc->num_entries++]),
508 "Failed to Get Firmware Entry.\n",
509 return -1);
510 PP_ASSERT_WITH_CODE(
511 0 == tonga_populate_single_firmware_entry(smumgr,
512 UCODE_ID_CP_CE,
513 &toc->entry[toc->num_entries++]),
514 "Failed to Get Firmware Entry.\n",
515 return -1);
516 PP_ASSERT_WITH_CODE(
517 0 == tonga_populate_single_firmware_entry
518 (smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
519 "Failed to Get Firmware Entry.\n", return -1);
520 PP_ASSERT_WITH_CODE(
521 0 == tonga_populate_single_firmware_entry
522 (smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
523 "Failed to Get Firmware Entry.\n", return -1);
524 PP_ASSERT_WITH_CODE(
525 0 == tonga_populate_single_firmware_entry
526 (smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
527 "Failed to Get Firmware Entry.\n", return -1);
528 PP_ASSERT_WITH_CODE(
529 0 == tonga_populate_single_firmware_entry
530 (smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
531 "Failed to Get Firmware Entry.\n", return -1);
532 PP_ASSERT_WITH_CODE(
533 0 == tonga_populate_single_firmware_entry
534 (smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
535 "Failed to Get Firmware Entry.\n", return -1);
536 PP_ASSERT_WITH_CODE(
537 0 == tonga_populate_single_firmware_entry
538 (smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
539 "Failed to Get Firmware Entry.\n", return -1);
540 PP_ASSERT_WITH_CODE(
541 0 == tonga_populate_single_firmware_entry
542 (smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
543 "Failed to Get Firmware Entry.\n", return -1);
544
545 tonga_send_msg_to_smc_with_parameter(smumgr,
546 PPSMC_MSG_DRV_DRAM_ADDR_HI,
547 tonga_smu->header_buffer.mc_addr_high);
548 tonga_send_msg_to_smc_with_parameter(smumgr,
549 PPSMC_MSG_DRV_DRAM_ADDR_LO,
550 tonga_smu->header_buffer.mc_addr_low);
551
552 fw_to_load = UCODE_ID_RLC_G_MASK
553 + UCODE_ID_SDMA0_MASK
554 + UCODE_ID_SDMA1_MASK
555 + UCODE_ID_CP_CE_MASK
556 + UCODE_ID_CP_ME_MASK
557 + UCODE_ID_CP_PFP_MASK
558 + UCODE_ID_CP_MEC_MASK;
559
560 PP_ASSERT_WITH_CODE(
561 0 == tonga_send_msg_to_smc_with_parameter_without_waiting(
562 smumgr, PPSMC_MSG_LoadUcodes, fw_to_load),
563 "Fail to Request SMU Load uCode", return 0);
564
565 return 0;
566}
567
568static int tonga_request_smu_load_specific_fw(struct pp_smumgr *smumgr,
569 uint32_t firmwareType)
570{
571 return 0;
572}
573
574/**
575 * Upload the SMC firmware to the SMC microcontroller.
576 *
577 * @param smumgr the address of the powerplay hardware manager.
578 * @param pFirmware the data structure containing the various sections of the firmware.
579 */
580static int tonga_smu_upload_firmware_image(struct pp_smumgr *smumgr)
581{
582 const uint8_t *src;
583 uint32_t byte_count;
584 uint32_t *data;
585 struct cgs_firmware_info info = {0};
586
587 if (smumgr == NULL || smumgr->device == NULL)
588 return -EINVAL;
589
590 cgs_get_firmware_info(smumgr->device,
591 tonga_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
592
593 if (info.image_size & 3) {
594 printk(KERN_ERR "[ powerplay ] SMC ucode is not 4 bytes aligned\n");
595 return -EINVAL;
596 }
597
598 if (info.image_size > TONGA_SMC_SIZE) {
599 printk(KERN_ERR "[ powerplay ] SMC address is beyond the SMC RAM area\n");
600 return -EINVAL;
601 }
602
603 cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, 0x20000);
604 SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
605
606 byte_count = info.image_size;
607 src = (const uint8_t *)info.kptr;
608
609 data = (uint32_t *)src;
610 for (; byte_count >= 4; data++, byte_count -= 4)
611 cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data[0]);
612
613 SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
614
615 return 0;
616}
617 39
618static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr) 40static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr)
619{ 41{
@@ -623,7 +45,7 @@ static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr)
623 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, 45 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
624 SMC_SYSCON_RESET_CNTL, rst_reg, 1); 46 SMC_SYSCON_RESET_CNTL, rst_reg, 1);
625 47
626 result = tonga_smu_upload_firmware_image(smumgr); 48 result = smu7_upload_smu_firmware_image(smumgr);
627 if (result) 49 if (result)
628 return result; 50 return result;
629 51
@@ -653,7 +75,7 @@ static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr)
653 /** 75 /**
654 * Call Test SMU message with 0x20000 offset to trigger SMU start 76 * Call Test SMU message with 0x20000 offset to trigger SMU start
655 */ 77 */
656 tonga_send_msg_to_smc_offset(smumgr); 78 smu7_send_msg_to_smc_offset(smumgr);
657 79
658 /* Wait for done bit to be set */ 80 /* Wait for done bit to be set */
659 SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, 81 SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
@@ -690,13 +112,13 @@ static int tonga_start_in_non_protection_mode(struct pp_smumgr *smumgr)
690 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, 112 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
691 SMC_SYSCON_RESET_CNTL, rst_reg, 1); 113 SMC_SYSCON_RESET_CNTL, rst_reg, 1);
692 114
693 result = tonga_smu_upload_firmware_image(smumgr); 115 result = smu7_upload_smu_firmware_image(smumgr);
694 116
695 if (result != 0) 117 if (result != 0)
696 return result; 118 return result;
697 119
698 /* Set smc instruct start point at 0x0 */ 120 /* Set smc instruct start point at 0x0 */
699 tonga_program_jump_on_start(smumgr); 121 smu7_program_jump_on_start(smumgr);
700 122
701 123
702 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, 124 SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
@@ -718,7 +140,7 @@ static int tonga_start_smu(struct pp_smumgr *smumgr)
718 int result; 140 int result;
719 141
720 /* Only start SMC if SMC RAM is not running */ 142 /* Only start SMC if SMC RAM is not running */
721 if (!tonga_is_smc_ram_running(smumgr)) { 143 if (!smu7_is_smc_ram_running(smumgr)) {
722 /*Check if SMU is running in protected mode*/ 144 /*Check if SMU is running in protected mode*/
723 if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, 145 if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
724 SMU_FIRMWARE, SMU_MODE)) { 146 SMU_FIRMWARE, SMU_MODE)) {
@@ -732,7 +154,7 @@ static int tonga_start_smu(struct pp_smumgr *smumgr)
732 } 154 }
733 } 155 }
734 156
735 result = tonga_request_smu_reload_fw(smumgr); 157 result = smu7_request_smu_load_fw(smumgr);
736 158
737 return result; 159 return result;
738} 160}
@@ -746,67 +168,41 @@ static int tonga_start_smu(struct pp_smumgr *smumgr)
746 */ 168 */
747static int tonga_smu_init(struct pp_smumgr *smumgr) 169static int tonga_smu_init(struct pp_smumgr *smumgr)
748{ 170{
749 struct tonga_smumgr *tonga_smu; 171 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
750 uint8_t *internal_buf; 172
751 uint64_t mc_addr = 0; 173 int i;
752 /* Allocate memory for backend private data */ 174
753 tonga_smu = (struct tonga_smumgr *)(smumgr->backend); 175 if (smu7_init(smumgr))
754 tonga_smu->header_buffer.data_size = 176 return -EINVAL;
755 ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; 177
756 tonga_smu->smu_buffer.data_size = 200*4096; 178 for (i = 0; i < SMU72_MAX_LEVELS_GRAPHICS; i++)
757 179 smu_data->activity_target[i] = 30;
758 smu_allocate_memory(smumgr->device,
759 tonga_smu->header_buffer.data_size,
760 CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
761 PAGE_SIZE,
762 &mc_addr,
763 &tonga_smu->header_buffer.kaddr,
764 &tonga_smu->header_buffer.handle);
765
766 tonga_smu->pHeader = tonga_smu->header_buffer.kaddr;
767 tonga_smu->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
768 tonga_smu->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
769
770 PP_ASSERT_WITH_CODE((NULL != tonga_smu->pHeader),
771 "Out of memory.",
772 kfree(smumgr->backend);
773 cgs_free_gpu_mem(smumgr->device,
774 (cgs_handle_t)tonga_smu->header_buffer.handle);
775 return -1);
776
777 smu_allocate_memory(smumgr->device,
778 tonga_smu->smu_buffer.data_size,
779 CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
780 PAGE_SIZE,
781 &mc_addr,
782 &tonga_smu->smu_buffer.kaddr,
783 &tonga_smu->smu_buffer.handle);
784
785 internal_buf = tonga_smu->smu_buffer.kaddr;
786 tonga_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
787 tonga_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
788
789 PP_ASSERT_WITH_CODE((NULL != internal_buf),
790 "Out of memory.",
791 kfree(smumgr->backend);
792 cgs_free_gpu_mem(smumgr->device,
793 (cgs_handle_t)tonga_smu->smu_buffer.handle);
794 return -1;);
795 180
796 return 0; 181 return 0;
797} 182}
798 183
799static const struct pp_smumgr_func tonga_smu_funcs = { 184static const struct pp_smumgr_func tonga_smu_funcs = {
800 .smu_init = &tonga_smu_init, 185 .smu_init = &tonga_smu_init,
801 .smu_fini = &tonga_smu_fini, 186 .smu_fini = &smu7_smu_fini,
802 .start_smu = &tonga_start_smu, 187 .start_smu = &tonga_start_smu,
803 .check_fw_load_finish = &tonga_check_fw_load_finish, 188 .check_fw_load_finish = &smu7_check_fw_load_finish,
804 .request_smu_load_fw = &tonga_request_smu_reload_fw, 189 .request_smu_load_fw = &smu7_request_smu_load_fw,
805 .request_smu_load_specific_fw = &tonga_request_smu_load_specific_fw, 190 .request_smu_load_specific_fw = NULL,
806 .send_msg_to_smc = &tonga_send_msg_to_smc, 191 .send_msg_to_smc = &smu7_send_msg_to_smc,
807 .send_msg_to_smc_with_parameter = &tonga_send_msg_to_smc_with_parameter, 192 .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
808 .download_pptable_settings = NULL, 193 .download_pptable_settings = NULL,
809 .upload_pptable_settings = NULL, 194 .upload_pptable_settings = NULL,
195 .update_smc_table = tonga_update_smc_table,
196 .get_offsetof = tonga_get_offsetof,
197 .process_firmware_header = tonga_process_firmware_header,
198 .init_smc_table = tonga_init_smc_table,
199 .update_sclk_threshold = tonga_update_sclk_threshold,
200 .thermal_setup_fan_table = tonga_thermal_setup_fan_table,
201 .populate_all_graphic_levels = tonga_populate_all_graphic_levels,
202 .populate_all_memory_levels = tonga_populate_all_memory_levels,
203 .get_mac_definition = tonga_get_mac_definition,
204 .initialize_mc_reg_table = tonga_initialize_mc_reg_table,
205 .is_dpm_running = tonga_is_dpm_running,
810}; 206};
811 207
812int tonga_smum_init(struct pp_smumgr *smumgr) 208int tonga_smum_init(struct pp_smumgr *smumgr)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
index 33c788d7f05c..edb5f203f7f5 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
@@ -24,30 +24,36 @@
24#ifndef _TONGA_SMUMGR_H_ 24#ifndef _TONGA_SMUMGR_H_
25#define _TONGA_SMUMGR_H_ 25#define _TONGA_SMUMGR_H_
26 26
27struct tonga_buffer_entry { 27#include "smu72_discrete.h"
28 uint32_t data_size; 28
29 uint32_t mc_addr_low; 29#include "smu7_smumgr.h"
30 uint32_t mc_addr_high; 30
31 void *kaddr; 31struct tonga_mc_reg_entry {
32 unsigned long handle; 32 uint32_t mclk_max;
33 uint32_t mc_data[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
34};
35
36struct tonga_mc_reg_table {
37 uint8_t last; /* number of registers*/
38 uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/
39 uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/
40 struct tonga_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
41 SMU72_Discrete_MCRegisterAddress mc_reg_address[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
33}; 42};
34 43
44
35struct tonga_smumgr { 45struct tonga_smumgr {
36 uint8_t *pHeader;
37 uint8_t *pMecImage;
38 uint32_t ulSoftRegsStart;
39 46
40 struct tonga_buffer_entry header_buffer; 47 struct smu7_smumgr smu7_data;
41 struct tonga_buffer_entry smu_buffer; 48 struct SMU72_Discrete_DpmTable smc_state_table;
42}; 49 struct SMU72_Discrete_Ulv ulv_setting;
50 struct SMU72_Discrete_PmFuses power_tune_table;
51 struct tonga_pt_defaults *power_tune_defaults;
52 SMU72_Discrete_MCRegisters mc_regs;
53 struct tonga_mc_reg_table mc_reg_table;
43 54
44extern int tonga_smum_init(struct pp_smumgr *smumgr); 55 uint32_t activity_target[SMU72_MAX_LEVELS_GRAPHICS];
45extern int tonga_copy_bytes_to_smc(struct pp_smumgr *smumgr, 56
46 uint32_t smcStartAddress, const uint8_t *src, 57};
47 uint32_t byteCount, uint32_t limit);
48extern int tonga_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress,
49 uint32_t *value, uint32_t limit);
50extern int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress,
51 uint32_t value, uint32_t limit);
52 58
53#endif 59#endif
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 31c9a92d6a1b..6efbd65c929e 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -25,6 +25,7 @@
25#include <linux/acpi.h> 25#include <linux/acpi.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/power_supply.h> 27#include <linux/power_supply.h>
28#include <linux/pm_runtime.h>
28#include <acpi/video.h> 29#include <acpi/video.h>
29#include <drm/drmP.h> 30#include <drm/drmP.h>
30#include <drm/drm_crtc_helper.h> 31#include <drm/drm_crtc_helper.h>
@@ -32,6 +33,12 @@
32#include "radeon_acpi.h" 33#include "radeon_acpi.h"
33#include "atom.h" 34#include "atom.h"
34 35
36#if defined(CONFIG_VGA_SWITCHEROO)
37bool radeon_atpx_dgpu_req_power_for_displays(void);
38#else
39static inline bool radeon_atpx_dgpu_req_power_for_displays(void) { return false; }
40#endif
41
35#define ACPI_AC_CLASS "ac_adapter" 42#define ACPI_AC_CLASS "ac_adapter"
36 43
37extern void radeon_pm_acpi_event_handler(struct radeon_device *rdev); 44extern void radeon_pm_acpi_event_handler(struct radeon_device *rdev);
@@ -394,6 +401,16 @@ int radeon_atif_handler(struct radeon_device *rdev,
394#endif 401#endif
395 } 402 }
396 } 403 }
404 if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
405 if ((rdev->flags & RADEON_IS_PX) &&
406 radeon_atpx_dgpu_req_power_for_displays()) {
407 pm_runtime_get_sync(rdev->ddev->dev);
408 /* Just fire off a uevent and let userspace tell us what to do */
409 drm_helper_hpd_irq_event(rdev->ddev);
410 pm_runtime_mark_last_busy(rdev->ddev->dev);
411 pm_runtime_put_autosuspend(rdev->ddev->dev);
412 }
413 }
397 /* TODO: check other events */ 414 /* TODO: check other events */
398 415
399 /* We've handled the event, stop the notifier chain. The ACPI interface 416 /* We've handled the event, stop the notifier chain. The ACPI interface
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 6de342861202..a1321b2fa454 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -29,6 +29,7 @@ struct radeon_atpx {
29 acpi_handle handle; 29 acpi_handle handle;
30 struct radeon_atpx_functions functions; 30 struct radeon_atpx_functions functions;
31 bool is_hybrid; 31 bool is_hybrid;
32 bool dgpu_req_power_for_displays;
32}; 33};
33 34
34static struct radeon_atpx_priv { 35static struct radeon_atpx_priv {
@@ -72,6 +73,10 @@ bool radeon_is_atpx_hybrid(void) {
72 return radeon_atpx_priv.atpx.is_hybrid; 73 return radeon_atpx_priv.atpx.is_hybrid;
73} 74}
74 75
76bool radeon_atpx_dgpu_req_power_for_displays(void) {
77 return radeon_atpx_priv.atpx.dgpu_req_power_for_displays;
78}
79
75/** 80/**
76 * radeon_atpx_call - call an ATPX method 81 * radeon_atpx_call - call an ATPX method
77 * 82 *
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index b423c0159581..eb92aef46e3c 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -661,8 +661,9 @@ bool radeon_card_posted(struct radeon_device *rdev)
661{ 661{
662 uint32_t reg; 662 uint32_t reg;
663 663
664 /* for pass through, always force asic_init */ 664 /* for pass through, always force asic_init for CI */
665 if (radeon_device_is_virtual()) 665 if (rdev->family >= CHIP_BONAIRE &&
666 radeon_device_is_virtual())
666 return false; 667 return false;
667 668
668 /* required for EFI mode on macbook2,1 which uses an r5xx asic */ 669 /* required for EFI mode on macbook2,1 which uses an r5xx asic */
@@ -1594,8 +1595,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1594 1595
1595 rdev = dev->dev_private; 1596 rdev = dev->dev_private;
1596 1597
1597 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF || 1598 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1598 dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
1599 return 0; 1599 return 0;
1600 1600
1601 drm_kms_helper_poll_disable(dev); 1601 drm_kms_helper_poll_disable(dev);
@@ -1690,8 +1690,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1690 struct drm_crtc *crtc; 1690 struct drm_crtc *crtc;
1691 int r; 1691 int r;
1692 1692
1693 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF || 1693 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1694 dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
1695 return 0; 1694 return 0;
1696 1695
1697 if (fbcon) { 1696 if (fbcon) {