aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c172
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c106
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h)24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c219
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h178
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c101
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c42
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c35
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c35
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h2007
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile6
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c)10
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c2486
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h53
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c2422
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h9
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c2568
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c2489
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c2344
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h44
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c2269
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c46
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c3261
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c3117
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h20
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c29
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h19
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c367
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c1
-rw-r--r--include/uapi/drm/amdgpu_drm.h31
65 files changed, 12036 insertions, 13396 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 25a95c95df14..ef9a3b6d7b62 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -25,7 +25,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
25 amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ 25 amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
26 amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ 26 amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
27 amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ 27 amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
28 amdgpu_queue_mgr.o amdgpu_vf_error.o 28 amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o
29 29
30# add asic specific block 30# add asic specific block
31amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ 31amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index a23b8af95319..cbcb6a153aba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -732,10 +732,14 @@ struct amdgpu_ctx {
732 struct amdgpu_device *adev; 732 struct amdgpu_device *adev;
733 struct amdgpu_queue_mgr queue_mgr; 733 struct amdgpu_queue_mgr queue_mgr;
734 unsigned reset_counter; 734 unsigned reset_counter;
735 uint32_t vram_lost_counter;
735 spinlock_t ring_lock; 736 spinlock_t ring_lock;
736 struct dma_fence **fences; 737 struct dma_fence **fences;
737 struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; 738 struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
738 bool preamble_presented; 739 bool preamble_presented;
740 enum amd_sched_priority init_priority;
741 enum amd_sched_priority override_priority;
742 struct mutex lock;
739}; 743};
740 744
741struct amdgpu_ctx_mgr { 745struct amdgpu_ctx_mgr {
@@ -752,13 +756,18 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
752 struct dma_fence *fence, uint64_t *seq); 756 struct dma_fence *fence, uint64_t *seq);
753struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, 757struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
754 struct amdgpu_ring *ring, uint64_t seq); 758 struct amdgpu_ring *ring, uint64_t seq);
759void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
760 enum amd_sched_priority priority);
755 761
756int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, 762int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
757 struct drm_file *filp); 763 struct drm_file *filp);
758 764
765int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
766
759void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr); 767void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
760void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); 768void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
761 769
770
762/* 771/*
763 * file private structure 772 * file private structure
764 */ 773 */
@@ -770,7 +779,6 @@ struct amdgpu_fpriv {
770 struct mutex bo_list_lock; 779 struct mutex bo_list_lock;
771 struct idr bo_list_handles; 780 struct idr bo_list_handles;
772 struct amdgpu_ctx_mgr ctx_mgr; 781 struct amdgpu_ctx_mgr ctx_mgr;
773 u32 vram_lost_counter;
774}; 782};
775 783
776/* 784/*
@@ -871,7 +879,7 @@ struct amdgpu_mec {
871struct amdgpu_kiq { 879struct amdgpu_kiq {
872 u64 eop_gpu_addr; 880 u64 eop_gpu_addr;
873 struct amdgpu_bo *eop_obj; 881 struct amdgpu_bo *eop_obj;
874 struct mutex ring_mutex; 882 spinlock_t ring_lock;
875 struct amdgpu_ring ring; 883 struct amdgpu_ring ring;
876 struct amdgpu_irq_src irq; 884 struct amdgpu_irq_src irq;
877}; 885};
@@ -1035,6 +1043,10 @@ struct amdgpu_gfx {
1035 bool in_suspend; 1043 bool in_suspend;
1036 /* NGG */ 1044 /* NGG */
1037 struct amdgpu_ngg ngg; 1045 struct amdgpu_ngg ngg;
1046
1047 /* pipe reservation */
1048 struct mutex pipe_reserve_mutex;
1049 DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1038}; 1050};
1039 1051
1040int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, 1052int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@@ -1113,6 +1125,7 @@ struct amdgpu_job {
1113 uint32_t gds_base, gds_size; 1125 uint32_t gds_base, gds_size;
1114 uint32_t gws_base, gws_size; 1126 uint32_t gws_base, gws_size;
1115 uint32_t oa_base, oa_size; 1127 uint32_t oa_base, oa_size;
1128 uint32_t vram_lost_counter;
1116 1129
1117 /* user fence handling */ 1130 /* user fence handling */
1118 uint64_t uf_addr; 1131 uint64_t uf_addr;
@@ -1138,7 +1151,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
1138/* 1151/*
1139 * Writeback 1152 * Writeback
1140 */ 1153 */
1141#define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */ 1154#define AMDGPU_MAX_WB 512 /* Reserve at most 512 WB slots for amdgpu-owned rings. */
1142 1155
1143struct amdgpu_wb { 1156struct amdgpu_wb {
1144 struct amdgpu_bo *wb_obj; 1157 struct amdgpu_bo *wb_obj;
@@ -1379,6 +1392,18 @@ struct amdgpu_atcs {
1379}; 1392};
1380 1393
1381/* 1394/*
1395 * Firmware VRAM reservation
1396 */
1397struct amdgpu_fw_vram_usage {
1398 u64 start_offset;
1399 u64 size;
1400 struct amdgpu_bo *reserved_bo;
1401 void *va;
1402};
1403
1404int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev);
1405
1406/*
1382 * CGS 1407 * CGS
1383 */ 1408 */
1384struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev); 1409struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
@@ -1582,6 +1607,8 @@ struct amdgpu_device {
1582 struct delayed_work late_init_work; 1607 struct delayed_work late_init_work;
1583 1608
1584 struct amdgpu_virt virt; 1609 struct amdgpu_virt virt;
1610 /* firmware VRAM reservation */
1611 struct amdgpu_fw_vram_usage fw_vram_usage;
1585 1612
1586 /* link all shadow bo */ 1613 /* link all shadow bo */
1587 struct list_head shadow_list; 1614 struct list_head shadow_list;
@@ -1833,8 +1860,6 @@ static inline bool amdgpu_has_atpx(void) { return false; }
1833extern const struct drm_ioctl_desc amdgpu_ioctls_kms[]; 1860extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
1834extern const int amdgpu_max_kms_ioctl; 1861extern const int amdgpu_max_kms_ioctl;
1835 1862
1836bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
1837 struct amdgpu_fpriv *fpriv);
1838int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags); 1863int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
1839void amdgpu_driver_unload_kms(struct drm_device *dev); 1864void amdgpu_driver_unload_kms(struct drm_device *dev);
1840void amdgpu_driver_lastclose_kms(struct drm_device *dev); 1865void amdgpu_driver_lastclose_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index ce443586a0c7..f66d33e4baca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1807,6 +1807,8 @@ int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
1807 uint16_t data_offset; 1807 uint16_t data_offset;
1808 int usage_bytes = 0; 1808 int usage_bytes = 0;
1809 struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage; 1809 struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
1810 u64 start_addr;
1811 u64 size;
1810 1812
1811 if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { 1813 if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
1812 firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); 1814 firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
@@ -1815,7 +1817,21 @@ int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
1815 le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware), 1817 le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
1816 le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb)); 1818 le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
1817 1819
1818 usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024; 1820 start_addr = firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware;
1821 size = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb;
1822
1823 if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
1824 (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
1825 ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
1826 /* Firmware request VRAM reservation for SR-IOV */
1827 adev->fw_vram_usage.start_offset = (start_addr &
1828 (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
1829 adev->fw_vram_usage.size = size << 10;
1830 /* Use the default scratch size */
1831 usage_bytes = 0;
1832 } else {
1833 usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
1834 }
1819 } 1835 }
1820 ctx->scratch_size_bytes = 0; 1836 ctx->scratch_size_bytes = 0;
1821 if (usage_bytes == 0) 1837 if (usage_bytes == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index ab83dfcabb41..f7fceb63413c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -90,12 +90,14 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
90 goto free_chunk; 90 goto free_chunk;
91 } 91 }
92 92
93 mutex_lock(&p->ctx->lock);
94
93 /* get chunks */ 95 /* get chunks */
94 chunk_array_user = u64_to_user_ptr(cs->in.chunks); 96 chunk_array_user = u64_to_user_ptr(cs->in.chunks);
95 if (copy_from_user(chunk_array, chunk_array_user, 97 if (copy_from_user(chunk_array, chunk_array_user,
96 sizeof(uint64_t)*cs->in.num_chunks)) { 98 sizeof(uint64_t)*cs->in.num_chunks)) {
97 ret = -EFAULT; 99 ret = -EFAULT;
98 goto put_ctx; 100 goto free_chunk;
99 } 101 }
100 102
101 p->nchunks = cs->in.num_chunks; 103 p->nchunks = cs->in.num_chunks;
@@ -103,7 +105,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
103 GFP_KERNEL); 105 GFP_KERNEL);
104 if (!p->chunks) { 106 if (!p->chunks) {
105 ret = -ENOMEM; 107 ret = -ENOMEM;
106 goto put_ctx; 108 goto free_chunk;
107 } 109 }
108 110
109 for (i = 0; i < p->nchunks; i++) { 111 for (i = 0; i < p->nchunks; i++) {
@@ -170,6 +172,11 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
170 if (ret) 172 if (ret)
171 goto free_all_kdata; 173 goto free_all_kdata;
172 174
175 if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
176 ret = -ECANCELED;
177 goto free_all_kdata;
178 }
179
173 if (p->uf_entry.robj) 180 if (p->uf_entry.robj)
174 p->job->uf_addr = uf_offset; 181 p->job->uf_addr = uf_offset;
175 kfree(chunk_array); 182 kfree(chunk_array);
@@ -183,8 +190,6 @@ free_partial_kdata:
183 kfree(p->chunks); 190 kfree(p->chunks);
184 p->chunks = NULL; 191 p->chunks = NULL;
185 p->nchunks = 0; 192 p->nchunks = 0;
186put_ctx:
187 amdgpu_ctx_put(p->ctx);
188free_chunk: 193free_chunk:
189 kfree(chunk_array); 194 kfree(chunk_array);
190 195
@@ -705,7 +710,8 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
705 710
706 list_for_each_entry(e, &p->validated, tv.head) { 711 list_for_each_entry(e, &p->validated, tv.head) {
707 struct reservation_object *resv = e->robj->tbo.resv; 712 struct reservation_object *resv = e->robj->tbo.resv;
708 r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp); 713 r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
714 amdgpu_bo_explicit_sync(e->robj));
709 715
710 if (r) 716 if (r)
711 return r; 717 return r;
@@ -736,8 +742,10 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
736 742
737 dma_fence_put(parser->fence); 743 dma_fence_put(parser->fence);
738 744
739 if (parser->ctx) 745 if (parser->ctx) {
746 mutex_unlock(&parser->ctx->lock);
740 amdgpu_ctx_put(parser->ctx); 747 amdgpu_ctx_put(parser->ctx);
748 }
741 if (parser->bo_list) 749 if (parser->bo_list)
742 amdgpu_bo_list_put(parser->bo_list); 750 amdgpu_bo_list_put(parser->bo_list);
743 751
@@ -844,14 +852,58 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
844 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 852 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
845 struct amdgpu_vm *vm = &fpriv->vm; 853 struct amdgpu_vm *vm = &fpriv->vm;
846 struct amdgpu_ring *ring = p->job->ring; 854 struct amdgpu_ring *ring = p->job->ring;
847 int i, r; 855 int r;
848 856
849 /* Only for UVD/VCE VM emulation */ 857 /* Only for UVD/VCE VM emulation */
850 if (ring->funcs->parse_cs) { 858 if (p->job->ring->funcs->parse_cs) {
851 for (i = 0; i < p->job->num_ibs; i++) { 859 unsigned i, j;
852 r = amdgpu_ring_parse_cs(ring, p, i); 860
861 for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
862 struct drm_amdgpu_cs_chunk_ib *chunk_ib;
863 struct amdgpu_bo_va_mapping *m;
864 struct amdgpu_bo *aobj = NULL;
865 struct amdgpu_cs_chunk *chunk;
866 struct amdgpu_ib *ib;
867 uint64_t offset;
868 uint8_t *kptr;
869
870 chunk = &p->chunks[i];
871 ib = &p->job->ibs[j];
872 chunk_ib = chunk->kdata;
873
874 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
875 continue;
876
877 r = amdgpu_cs_find_mapping(p, chunk_ib->va_start,
878 &aobj, &m);
879 if (r) {
880 DRM_ERROR("IB va_start is invalid\n");
881 return r;
882 }
883
884 if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
885 (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
886 DRM_ERROR("IB va_start+ib_bytes is invalid\n");
887 return -EINVAL;
888 }
889
890 /* the IB should be reserved at this point */
891 r = amdgpu_bo_kmap(aobj, (void **)&kptr);
892 if (r) {
893 return r;
894 }
895
896 offset = m->start * AMDGPU_GPU_PAGE_SIZE;
897 kptr += chunk_ib->va_start - offset;
898
899 memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
900 amdgpu_bo_kunmap(aobj);
901
902 r = amdgpu_ring_parse_cs(ring, p, j);
853 if (r) 903 if (r)
854 return r; 904 return r;
905
906 j++;
855 } 907 }
856 } 908 }
857 909
@@ -918,54 +970,18 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
918 970
919 parser->job->ring = ring; 971 parser->job->ring = ring;
920 972
921 if (ring->funcs->parse_cs) { 973 r = amdgpu_ib_get(adev, vm,
922 struct amdgpu_bo_va_mapping *m; 974 ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0,
923 struct amdgpu_bo *aobj = NULL; 975 ib);
924 uint64_t offset; 976 if (r) {
925 uint8_t *kptr; 977 DRM_ERROR("Failed to get ib !\n");
926 978 return r;
927 r = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
928 &aobj, &m);
929 if (r) {
930 DRM_ERROR("IB va_start is invalid\n");
931 return r;
932 }
933
934 if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
935 (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
936 DRM_ERROR("IB va_start+ib_bytes is invalid\n");
937 return -EINVAL;
938 }
939
940 /* the IB should be reserved at this point */
941 r = amdgpu_bo_kmap(aobj, (void **)&kptr);
942 if (r) {
943 return r;
944 }
945
946 offset = m->start * AMDGPU_GPU_PAGE_SIZE;
947 kptr += chunk_ib->va_start - offset;
948
949 r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
950 if (r) {
951 DRM_ERROR("Failed to get ib !\n");
952 return r;
953 }
954
955 memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
956 amdgpu_bo_kunmap(aobj);
957 } else {
958 r = amdgpu_ib_get(adev, vm, 0, ib);
959 if (r) {
960 DRM_ERROR("Failed to get ib !\n");
961 return r;
962 }
963
964 } 979 }
965 980
966 ib->gpu_addr = chunk_ib->va_start; 981 ib->gpu_addr = chunk_ib->va_start;
967 ib->length_dw = chunk_ib->ib_bytes / 4; 982 ib->length_dw = chunk_ib->ib_bytes / 4;
968 ib->flags = chunk_ib->flags; 983 ib->flags = chunk_ib->flags;
984
969 j++; 985 j++;
970 } 986 }
971 987
@@ -975,7 +991,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
975 parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE)) 991 parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
976 return -EINVAL; 992 return -EINVAL;
977 993
978 return 0; 994 return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->job->ring->idx);
979} 995}
980 996
981static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, 997static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
@@ -1176,6 +1192,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1176 job->uf_sequence = seq; 1192 job->uf_sequence = seq;
1177 1193
1178 amdgpu_job_free_resources(job); 1194 amdgpu_job_free_resources(job);
1195 amdgpu_ring_priority_get(job->ring,
1196 amd_sched_get_job_priority(&job->base));
1179 1197
1180 trace_amdgpu_cs_ioctl(job); 1198 trace_amdgpu_cs_ioctl(job);
1181 amd_sched_entity_push_job(&job->base); 1199 amd_sched_entity_push_job(&job->base);
@@ -1189,7 +1207,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1189int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 1207int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1190{ 1208{
1191 struct amdgpu_device *adev = dev->dev_private; 1209 struct amdgpu_device *adev = dev->dev_private;
1192 struct amdgpu_fpriv *fpriv = filp->driver_priv;
1193 union drm_amdgpu_cs *cs = data; 1210 union drm_amdgpu_cs *cs = data;
1194 struct amdgpu_cs_parser parser = {}; 1211 struct amdgpu_cs_parser parser = {};
1195 bool reserved_buffers = false; 1212 bool reserved_buffers = false;
@@ -1197,8 +1214,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1197 1214
1198 if (!adev->accel_working) 1215 if (!adev->accel_working)
1199 return -EBUSY; 1216 return -EBUSY;
1200 if (amdgpu_kms_vram_lost(adev, fpriv))
1201 return -ENODEV;
1202 1217
1203 parser.adev = adev; 1218 parser.adev = adev;
1204 parser.filp = filp; 1219 parser.filp = filp;
@@ -1209,6 +1224,10 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1209 goto out; 1224 goto out;
1210 } 1225 }
1211 1226
1227 r = amdgpu_cs_ib_fill(adev, &parser);
1228 if (r)
1229 goto out;
1230
1212 r = amdgpu_cs_parser_bos(&parser, data); 1231 r = amdgpu_cs_parser_bos(&parser, data);
1213 if (r) { 1232 if (r) {
1214 if (r == -ENOMEM) 1233 if (r == -ENOMEM)
@@ -1219,9 +1238,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1219 } 1238 }
1220 1239
1221 reserved_buffers = true; 1240 reserved_buffers = true;
1222 r = amdgpu_cs_ib_fill(adev, &parser);
1223 if (r)
1224 goto out;
1225 1241
1226 r = amdgpu_cs_dependencies(adev, &parser); 1242 r = amdgpu_cs_dependencies(adev, &parser);
1227 if (r) { 1243 if (r) {
@@ -1257,16 +1273,12 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1257{ 1273{
1258 union drm_amdgpu_wait_cs *wait = data; 1274 union drm_amdgpu_wait_cs *wait = data;
1259 struct amdgpu_device *adev = dev->dev_private; 1275 struct amdgpu_device *adev = dev->dev_private;
1260 struct amdgpu_fpriv *fpriv = filp->driver_priv;
1261 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); 1276 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1262 struct amdgpu_ring *ring = NULL; 1277 struct amdgpu_ring *ring = NULL;
1263 struct amdgpu_ctx *ctx; 1278 struct amdgpu_ctx *ctx;
1264 struct dma_fence *fence; 1279 struct dma_fence *fence;
1265 long r; 1280 long r;
1266 1281
1267 if (amdgpu_kms_vram_lost(adev, fpriv))
1268 return -ENODEV;
1269
1270 ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); 1282 ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1271 if (ctx == NULL) 1283 if (ctx == NULL)
1272 return -EINVAL; 1284 return -EINVAL;
@@ -1284,6 +1296,8 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1284 r = PTR_ERR(fence); 1296 r = PTR_ERR(fence);
1285 else if (fence) { 1297 else if (fence) {
1286 r = dma_fence_wait_timeout(fence, true, timeout); 1298 r = dma_fence_wait_timeout(fence, true, timeout);
1299 if (r > 0 && fence->error)
1300 r = fence->error;
1287 dma_fence_put(fence); 1301 dma_fence_put(fence);
1288 } else 1302 } else
1289 r = 1; 1303 r = 1;
@@ -1335,16 +1349,12 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1335 struct drm_file *filp) 1349 struct drm_file *filp)
1336{ 1350{
1337 struct amdgpu_device *adev = dev->dev_private; 1351 struct amdgpu_device *adev = dev->dev_private;
1338 struct amdgpu_fpriv *fpriv = filp->driver_priv;
1339 union drm_amdgpu_fence_to_handle *info = data; 1352 union drm_amdgpu_fence_to_handle *info = data;
1340 struct dma_fence *fence; 1353 struct dma_fence *fence;
1341 struct drm_syncobj *syncobj; 1354 struct drm_syncobj *syncobj;
1342 struct sync_file *sync_file; 1355 struct sync_file *sync_file;
1343 int fd, r; 1356 int fd, r;
1344 1357
1345 if (amdgpu_kms_vram_lost(adev, fpriv))
1346 return -ENODEV;
1347
1348 fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence); 1358 fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1349 if (IS_ERR(fence)) 1359 if (IS_ERR(fence))
1350 return PTR_ERR(fence); 1360 return PTR_ERR(fence);
@@ -1425,6 +1435,9 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1425 1435
1426 if (r == 0) 1436 if (r == 0)
1427 break; 1437 break;
1438
1439 if (fence->error)
1440 return fence->error;
1428 } 1441 }
1429 1442
1430 memset(wait, 0, sizeof(*wait)); 1443 memset(wait, 0, sizeof(*wait));
@@ -1485,7 +1498,7 @@ out:
1485 wait->out.status = (r > 0); 1498 wait->out.status = (r > 0);
1486 wait->out.first_signaled = first; 1499 wait->out.first_signaled = first;
1487 /* set return value 0 to indicate success */ 1500 /* set return value 0 to indicate success */
1488 r = 0; 1501 r = array[first]->error;
1489 1502
1490err_free_fence_array: 1503err_free_fence_array:
1491 for (i = 0; i < fence_count; i++) 1504 for (i = 0; i < fence_count; i++)
@@ -1506,15 +1519,12 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1506 struct drm_file *filp) 1519 struct drm_file *filp)
1507{ 1520{
1508 struct amdgpu_device *adev = dev->dev_private; 1521 struct amdgpu_device *adev = dev->dev_private;
1509 struct amdgpu_fpriv *fpriv = filp->driver_priv;
1510 union drm_amdgpu_wait_fences *wait = data; 1522 union drm_amdgpu_wait_fences *wait = data;
1511 uint32_t fence_count = wait->in.fence_count; 1523 uint32_t fence_count = wait->in.fence_count;
1512 struct drm_amdgpu_fence *fences_user; 1524 struct drm_amdgpu_fence *fences_user;
1513 struct drm_amdgpu_fence *fences; 1525 struct drm_amdgpu_fence *fences;
1514 int r; 1526 int r;
1515 1527
1516 if (amdgpu_kms_vram_lost(adev, fpriv))
1517 return -ENODEV;
1518 /* Get the fences from userspace */ 1528 /* Get the fences from userspace */
1519 fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence), 1529 fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1520 GFP_KERNEL); 1530 GFP_KERNEL);
@@ -1572,14 +1582,14 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1572 if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket) 1582 if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
1573 return -EINVAL; 1583 return -EINVAL;
1574 1584
1575 r = amdgpu_ttm_bind(&(*bo)->tbo, &(*bo)->tbo.mem); 1585 if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1576 if (unlikely(r)) 1586 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1577 return r; 1587 amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
1578 1588 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false,
1579 if ((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) 1589 false);
1580 return 0; 1590 if (r)
1591 return r;
1592 }
1581 1593
1582 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 1594 return amdgpu_ttm_bind(&(*bo)->tbo, &(*bo)->tbo.mem);
1583 amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
1584 return ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false, false);
1585} 1595}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 75c933b1a432..c184468e2b2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -23,13 +23,41 @@
23 */ 23 */
24 24
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include <drm/drm_auth.h>
26#include "amdgpu.h" 27#include "amdgpu.h"
28#include "amdgpu_sched.h"
27 29
28static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx) 30static int amdgpu_ctx_priority_permit(struct drm_file *filp,
31 enum amd_sched_priority priority)
32{
33 /* NORMAL and below are accessible by everyone */
34 if (priority <= AMD_SCHED_PRIORITY_NORMAL)
35 return 0;
36
37 if (capable(CAP_SYS_NICE))
38 return 0;
39
40 if (drm_is_current_master(filp))
41 return 0;
42
43 return -EACCES;
44}
45
46static int amdgpu_ctx_init(struct amdgpu_device *adev,
47 enum amd_sched_priority priority,
48 struct drm_file *filp,
49 struct amdgpu_ctx *ctx)
29{ 50{
30 unsigned i, j; 51 unsigned i, j;
31 int r; 52 int r;
32 53
54 if (priority < 0 || priority >= AMD_SCHED_PRIORITY_MAX)
55 return -EINVAL;
56
57 r = amdgpu_ctx_priority_permit(filp, priority);
58 if (r)
59 return r;
60
33 memset(ctx, 0, sizeof(*ctx)); 61 memset(ctx, 0, sizeof(*ctx));
34 ctx->adev = adev; 62 ctx->adev = adev;
35 kref_init(&ctx->refcount); 63 kref_init(&ctx->refcount);
@@ -39,19 +67,24 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
39 if (!ctx->fences) 67 if (!ctx->fences)
40 return -ENOMEM; 68 return -ENOMEM;
41 69
70 mutex_init(&ctx->lock);
71
42 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 72 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
43 ctx->rings[i].sequence = 1; 73 ctx->rings[i].sequence = 1;
44 ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i]; 74 ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
45 } 75 }
46 76
47 ctx->reset_counter = atomic_read(&adev->gpu_reset_counter); 77 ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
78 ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
79 ctx->init_priority = priority;
80 ctx->override_priority = AMD_SCHED_PRIORITY_UNSET;
48 81
49 /* create context entity for each ring */ 82 /* create context entity for each ring */
50 for (i = 0; i < adev->num_rings; i++) { 83 for (i = 0; i < adev->num_rings; i++) {
51 struct amdgpu_ring *ring = adev->rings[i]; 84 struct amdgpu_ring *ring = adev->rings[i];
52 struct amd_sched_rq *rq; 85 struct amd_sched_rq *rq;
53 86
54 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; 87 rq = &ring->sched.sched_rq[priority];
55 88
56 if (ring == &adev->gfx.kiq.ring) 89 if (ring == &adev->gfx.kiq.ring)
57 continue; 90 continue;
@@ -96,10 +129,14 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
96 &ctx->rings[i].entity); 129 &ctx->rings[i].entity);
97 130
98 amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr); 131 amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
132
133 mutex_destroy(&ctx->lock);
99} 134}
100 135
101static int amdgpu_ctx_alloc(struct amdgpu_device *adev, 136static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
102 struct amdgpu_fpriv *fpriv, 137 struct amdgpu_fpriv *fpriv,
138 struct drm_file *filp,
139 enum amd_sched_priority priority,
103 uint32_t *id) 140 uint32_t *id)
104{ 141{
105 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; 142 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
@@ -117,8 +154,9 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
117 kfree(ctx); 154 kfree(ctx);
118 return r; 155 return r;
119 } 156 }
157
120 *id = (uint32_t)r; 158 *id = (uint32_t)r;
121 r = amdgpu_ctx_init(adev, ctx); 159 r = amdgpu_ctx_init(adev, priority, filp, ctx);
122 if (r) { 160 if (r) {
123 idr_remove(&mgr->ctx_handles, *id); 161 idr_remove(&mgr->ctx_handles, *id);
124 *id = 0; 162 *id = 0;
@@ -193,6 +231,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
193{ 231{
194 int r; 232 int r;
195 uint32_t id; 233 uint32_t id;
234 enum amd_sched_priority priority;
196 235
197 union drm_amdgpu_ctx *args = data; 236 union drm_amdgpu_ctx *args = data;
198 struct amdgpu_device *adev = dev->dev_private; 237 struct amdgpu_device *adev = dev->dev_private;
@@ -200,10 +239,16 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
200 239
201 r = 0; 240 r = 0;
202 id = args->in.ctx_id; 241 id = args->in.ctx_id;
242 priority = amdgpu_to_sched_priority(args->in.priority);
243
244 /* For backwards compatibility reasons, we need to accept
245 * ioctls with garbage in the priority field */
246 if (priority == AMD_SCHED_PRIORITY_INVALID)
247 priority = AMD_SCHED_PRIORITY_NORMAL;
203 248
204 switch (args->in.op) { 249 switch (args->in.op) {
205 case AMDGPU_CTX_OP_ALLOC_CTX: 250 case AMDGPU_CTX_OP_ALLOC_CTX:
206 r = amdgpu_ctx_alloc(adev, fpriv, &id); 251 r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
207 args->out.alloc.ctx_id = id; 252 args->out.alloc.ctx_id = id;
208 break; 253 break;
209 case AMDGPU_CTX_OP_FREE_CTX: 254 case AMDGPU_CTX_OP_FREE_CTX:
@@ -256,12 +301,8 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
256 301
257 idx = seq & (amdgpu_sched_jobs - 1); 302 idx = seq & (amdgpu_sched_jobs - 1);
258 other = cring->fences[idx]; 303 other = cring->fences[idx];
259 if (other) { 304 if (other)
260 signed long r; 305 BUG_ON(!dma_fence_is_signaled(other));
261 r = dma_fence_wait_timeout(other, true, MAX_SCHEDULE_TIMEOUT);
262 if (r < 0)
263 return r;
264 }
265 306
266 dma_fence_get(fence); 307 dma_fence_get(fence);
267 308
@@ -305,6 +346,51 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
305 return fence; 346 return fence;
306} 347}
307 348
349void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
350 enum amd_sched_priority priority)
351{
352 int i;
353 struct amdgpu_device *adev = ctx->adev;
354 struct amd_sched_rq *rq;
355 struct amd_sched_entity *entity;
356 struct amdgpu_ring *ring;
357 enum amd_sched_priority ctx_prio;
358
359 ctx->override_priority = priority;
360
361 ctx_prio = (ctx->override_priority == AMD_SCHED_PRIORITY_UNSET) ?
362 ctx->init_priority : ctx->override_priority;
363
364 for (i = 0; i < adev->num_rings; i++) {
365 ring = adev->rings[i];
366 entity = &ctx->rings[i].entity;
367 rq = &ring->sched.sched_rq[ctx_prio];
368
369 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
370 continue;
371
372 amd_sched_entity_set_rq(entity, rq);
373 }
374}
375
376int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
377{
378 struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id];
379 unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1);
380 struct dma_fence *other = cring->fences[idx];
381
382 if (other) {
383 signed long r;
384 r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
385 if (r < 0) {
386 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
387 return r;
388 }
389 }
390
391 return 0;
392}
393
308void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) 394void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
309{ 395{
310 mutex_init(&mgr->lock); 396 mutex_init(&mgr->lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1949d8aedf49..0b9332e65a4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -109,10 +109,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
109{ 109{
110 uint32_t ret; 110 uint32_t ret;
111 111
112 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) { 112 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
113 BUG_ON(in_interrupt());
114 return amdgpu_virt_kiq_rreg(adev, reg); 113 return amdgpu_virt_kiq_rreg(adev, reg);
115 }
116 114
117 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) 115 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
118 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); 116 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
@@ -137,10 +135,8 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
137 adev->last_mm_index = v; 135 adev->last_mm_index = v;
138 } 136 }
139 137
140 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) { 138 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
141 BUG_ON(in_interrupt());
142 return amdgpu_virt_kiq_wreg(adev, reg, v); 139 return amdgpu_virt_kiq_wreg(adev, reg, v);
143 }
144 140
145 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) 141 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
146 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); 142 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
@@ -658,6 +654,81 @@ void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
658} 654}
659 655
660/* 656/*
657 * Firmware Reservation functions
658 */
659/**
660 * amdgpu_fw_reserve_vram_fini - free fw reserved vram
661 *
662 * @adev: amdgpu_device pointer
663 *
664 * free fw reserved vram if it has been reserved.
665 */
666void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev)
667{
668 amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
669 NULL, &adev->fw_vram_usage.va);
670}
671
672/**
673 * amdgpu_fw_reserve_vram_init - create bo vram reservation from fw
674 *
675 * @adev: amdgpu_device pointer
676 *
677 * create bo vram reservation from fw.
678 */
679int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
680{
681 int r = 0;
682 u64 gpu_addr;
683 u64 vram_size = adev->mc.visible_vram_size;
684
685 adev->fw_vram_usage.va = NULL;
686 adev->fw_vram_usage.reserved_bo = NULL;
687
688 if (adev->fw_vram_usage.size > 0 &&
689 adev->fw_vram_usage.size <= vram_size) {
690
691 r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
692 PAGE_SIZE, true, 0,
693 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
694 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
695 &adev->fw_vram_usage.reserved_bo);
696 if (r)
697 goto error_create;
698
699 r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
700 if (r)
701 goto error_reserve;
702 r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
703 AMDGPU_GEM_DOMAIN_VRAM,
704 adev->fw_vram_usage.start_offset,
705 (adev->fw_vram_usage.start_offset +
706 adev->fw_vram_usage.size), &gpu_addr);
707 if (r)
708 goto error_pin;
709 r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
710 &adev->fw_vram_usage.va);
711 if (r)
712 goto error_kmap;
713
714 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
715 }
716 return r;
717
718error_kmap:
719 amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
720error_pin:
721 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
722error_reserve:
723 amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
724error_create:
725 adev->fw_vram_usage.va = NULL;
726 adev->fw_vram_usage.reserved_bo = NULL;
727 return r;
728}
729
730
731/*
661 * GPU helpers function. 732 * GPU helpers function.
662 */ 733 */
663/** 734/**
@@ -1604,7 +1675,6 @@ static int amdgpu_init(struct amdgpu_device *adev)
1604 return r; 1675 return r;
1605 } 1676 }
1606 adev->ip_blocks[i].status.sw = true; 1677 adev->ip_blocks[i].status.sw = true;
1607
1608 /* need to do gmc hw init early so we can allocate gpu mem */ 1678 /* need to do gmc hw init early so we can allocate gpu mem */
1609 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 1679 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1610 r = amdgpu_vram_scratch_init(adev); 1680 r = amdgpu_vram_scratch_init(adev);
@@ -1635,11 +1705,6 @@ static int amdgpu_init(struct amdgpu_device *adev)
1635 } 1705 }
1636 } 1706 }
1637 1707
1638 mutex_lock(&adev->firmware.mutex);
1639 if (amdgpu_ucode_init_bo(adev))
1640 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
1641 mutex_unlock(&adev->firmware.mutex);
1642
1643 for (i = 0; i < adev->num_ip_blocks; i++) { 1708 for (i = 0; i < adev->num_ip_blocks; i++) {
1644 if (!adev->ip_blocks[i].status.sw) 1709 if (!adev->ip_blocks[i].status.sw)
1645 continue; 1710 continue;
@@ -1775,8 +1840,6 @@ static int amdgpu_fini(struct amdgpu_device *adev)
1775 1840
1776 adev->ip_blocks[i].status.hw = false; 1841 adev->ip_blocks[i].status.hw = false;
1777 } 1842 }
1778 if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT)
1779 amdgpu_ucode_fini_bo(adev);
1780 1843
1781 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1844 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1782 if (!adev->ip_blocks[i].status.sw) 1845 if (!adev->ip_blocks[i].status.sw)
@@ -2019,6 +2082,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
2019 adev->vm_manager.vm_pte_num_rings = 0; 2082 adev->vm_manager.vm_pte_num_rings = 0;
2020 adev->gart.gart_funcs = NULL; 2083 adev->gart.gart_funcs = NULL;
2021 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); 2084 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2085 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
2022 2086
2023 adev->smc_rreg = &amdgpu_invalid_rreg; 2087 adev->smc_rreg = &amdgpu_invalid_rreg;
2024 adev->smc_wreg = &amdgpu_invalid_wreg; 2088 adev->smc_wreg = &amdgpu_invalid_wreg;
@@ -2047,6 +2111,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
2047 mutex_init(&adev->pm.mutex); 2111 mutex_init(&adev->pm.mutex);
2048 mutex_init(&adev->gfx.gpu_clock_mutex); 2112 mutex_init(&adev->gfx.gpu_clock_mutex);
2049 mutex_init(&adev->srbm_mutex); 2113 mutex_init(&adev->srbm_mutex);
2114 mutex_init(&adev->gfx.pipe_reserve_mutex);
2050 mutex_init(&adev->grbm_idx_mutex); 2115 mutex_init(&adev->grbm_idx_mutex);
2051 mutex_init(&adev->mn_lock); 2116 mutex_init(&adev->mn_lock);
2052 mutex_init(&adev->virt.vf_errors.lock); 2117 mutex_init(&adev->virt.vf_errors.lock);
@@ -2223,6 +2288,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
2223 if (r) 2288 if (r)
2224 DRM_ERROR("ib ring test failed (%d).\n", r); 2289 DRM_ERROR("ib ring test failed (%d).\n", r);
2225 2290
2291 if (amdgpu_sriov_vf(adev))
2292 amdgpu_virt_init_data_exchange(adev);
2293
2226 amdgpu_fbdev_init(adev); 2294 amdgpu_fbdev_init(adev);
2227 2295
2228 r = amdgpu_pm_sysfs_init(adev); 2296 r = amdgpu_pm_sysfs_init(adev);
@@ -2300,6 +2368,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
2300 /* evict vram memory */ 2368 /* evict vram memory */
2301 amdgpu_bo_evict_vram(adev); 2369 amdgpu_bo_evict_vram(adev);
2302 amdgpu_ib_pool_fini(adev); 2370 amdgpu_ib_pool_fini(adev);
2371 amdgpu_fw_reserve_vram_fini(adev);
2303 amdgpu_fence_driver_fini(adev); 2372 amdgpu_fence_driver_fini(adev);
2304 amdgpu_fbdev_fini(adev); 2373 amdgpu_fbdev_fini(adev);
2305 r = amdgpu_fini(adev); 2374 r = amdgpu_fini(adev);
@@ -2552,6 +2621,9 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2552 int i; 2621 int i;
2553 bool asic_hang = false; 2622 bool asic_hang = false;
2554 2623
2624 if (amdgpu_sriov_vf(adev))
2625 return true;
2626
2555 for (i = 0; i < adev->num_ip_blocks; i++) { 2627 for (i = 0; i < adev->num_ip_blocks; i++) {
2556 if (!adev->ip_blocks[i].status.valid) 2628 if (!adev->ip_blocks[i].status.valid)
2557 continue; 2629 continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index ad02d3fbb44c..dd2f060d62a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -71,9 +71,11 @@
71 * - 3.19.0 - Add support for UVD MJPEG decode 71 * - 3.19.0 - Add support for UVD MJPEG decode
72 * - 3.20.0 - Add support for local BOs 72 * - 3.20.0 - Add support for local BOs
73 * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl 73 * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
74 * - 3.22.0 - Add DRM_AMDGPU_SCHED ioctl
75 * - 3.23.0 - Add query for VRAM lost counter
74 */ 76 */
75#define KMS_DRIVER_MAJOR 3 77#define KMS_DRIVER_MAJOR 3
76#define KMS_DRIVER_MINOR 21 78#define KMS_DRIVER_MINOR 23
77#define KMS_DRIVER_PATCHLEVEL 0 79#define KMS_DRIVER_PATCHLEVEL 0
78 80
79int amdgpu_vram_limit = 0; 81int amdgpu_vram_limit = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 333bad749067..fb9f88ef6059 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -169,6 +169,32 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
169} 169}
170 170
171/** 171/**
172 * amdgpu_fence_emit_polling - emit a fence on the requeste ring
173 *
174 * @ring: ring the fence is associated with
175 * @s: resulting sequence number
176 *
177 * Emits a fence command on the requested ring (all asics).
178 * Used For polling fence.
179 * Returns 0 on success, -ENOMEM on failure.
180 */
181int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
182{
183 uint32_t seq;
184
185 if (!s)
186 return -EINVAL;
187
188 seq = ++ring->fence_drv.sync_seq;
189 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
190 seq, AMDGPU_FENCE_FLAG_INT);
191
192 *s = seq;
193
194 return 0;
195}
196
197/**
172 * amdgpu_fence_schedule_fallback - schedule fallback check 198 * amdgpu_fence_schedule_fallback - schedule fallback check
173 * 199 *
174 * @ring: pointer to struct amdgpu_ring 200 * @ring: pointer to struct amdgpu_ring
@@ -282,6 +308,30 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
282} 308}
283 309
284/** 310/**
311 * amdgpu_fence_wait_polling - busy wait for givn sequence number
312 *
313 * @ring: ring index the fence is associated with
314 * @wait_seq: sequence number to wait
315 * @timeout: the timeout for waiting in usecs
316 *
317 * Wait for all fences on the requested ring to signal (all asics).
318 * Returns left time if no timeout, 0 or minus if timeout.
319 */
320signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
321 uint32_t wait_seq,
322 signed long timeout)
323{
324 uint32_t seq;
325
326 do {
327 seq = amdgpu_fence_read(ring);
328 udelay(5);
329 timeout -= 5;
330 } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
331
332 return timeout > 0 ? timeout : 0;
333}
334/**
285 * amdgpu_fence_count_emitted - get the count of emitted fences 335 * amdgpu_fence_count_emitted - get the count of emitted fences
286 * 336 *
287 * @ring: ring the fence is associated with 337 * @ring: ring the fence is associated with
@@ -641,6 +691,19 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
641 atomic_read(&ring->fence_drv.last_seq)); 691 atomic_read(&ring->fence_drv.last_seq));
642 seq_printf(m, "Last emitted 0x%08x\n", 692 seq_printf(m, "Last emitted 0x%08x\n",
643 ring->fence_drv.sync_seq); 693 ring->fence_drv.sync_seq);
694
695 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
696 continue;
697
698 /* set in CP_VMID_PREEMPT and preemption occurred */
699 seq_printf(m, "Last preempted 0x%08x\n",
700 le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
701 /* set in CP_VMID_RESET and reset occurred */
702 seq_printf(m, "Last reset 0x%08x\n",
703 le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
704 /* Both preemption and reset occurred */
705 seq_printf(m, "Last both 0x%08x\n",
706 le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
644 } 707 }
645 return 0; 708 return 0;
646} 709}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index b0d45c8e6bb3..fb72edc4c026 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -212,7 +212,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
212 AMDGPU_GEM_CREATE_NO_CPU_ACCESS | 212 AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
213 AMDGPU_GEM_CREATE_CPU_GTT_USWC | 213 AMDGPU_GEM_CREATE_CPU_GTT_USWC |
214 AMDGPU_GEM_CREATE_VRAM_CLEARED | 214 AMDGPU_GEM_CREATE_VRAM_CLEARED |
215 AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)) 215 AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
216 AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
217
216 return -EINVAL; 218 return -EINVAL;
217 219
218 /* reject invalid gem domains */ 220 /* reject invalid gem domains */
@@ -577,11 +579,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
577 args->operation); 579 args->operation);
578 return -EINVAL; 580 return -EINVAL;
579 } 581 }
580 if ((args->operation == AMDGPU_VA_OP_MAP) ||
581 (args->operation == AMDGPU_VA_OP_REPLACE)) {
582 if (amdgpu_kms_vram_lost(adev, fpriv))
583 return -ENODEV;
584 }
585 582
586 INIT_LIST_HEAD(&list); 583 INIT_LIST_HEAD(&list);
587 INIT_LIST_HEAD(&duplicates); 584 INIT_LIST_HEAD(&duplicates);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 83435ccbad44..ef043361009f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -201,7 +201,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
201 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 201 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
202 int r = 0; 202 int r = 0;
203 203
204 mutex_init(&kiq->ring_mutex); 204 spin_lock_init(&kiq->ring_lock);
205 205
206 r = amdgpu_wb_get(adev, &adev->virt.reg_val_offs); 206 r = amdgpu_wb_get(adev, &adev->virt.reg_val_offs);
207 if (r) 207 if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 0d15eb7d31d7..33535d347734 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -169,7 +169,8 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
169 int r; 169 int r;
170 170
171 spin_lock(&mgr->lock); 171 spin_lock(&mgr->lock);
172 if (atomic64_read(&mgr->available) < mem->num_pages) { 172 if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) &&
173 atomic64_read(&mgr->available) < mem->num_pages) {
173 spin_unlock(&mgr->lock); 174 spin_unlock(&mgr->lock);
174 return 0; 175 return 0;
175 } 176 }
@@ -244,8 +245,9 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
244uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man) 245uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
245{ 246{
246 struct amdgpu_gtt_mgr *mgr = man->priv; 247 struct amdgpu_gtt_mgr *mgr = man->priv;
248 s64 result = man->size - atomic64_read(&mgr->available);
247 249
248 return (u64)(man->size - atomic64_read(&mgr->available)) * PAGE_SIZE; 250 return (result > 0 ? result : 0) * PAGE_SIZE;
249} 251}
250 252
251/** 253/**
@@ -265,7 +267,7 @@ static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
265 drm_mm_print(&mgr->mm, printer); 267 drm_mm_print(&mgr->mm, printer);
266 spin_unlock(&mgr->lock); 268 spin_unlock(&mgr->lock);
267 269
268 drm_printf(printer, "man size:%llu pages, gtt available:%llu pages, usage:%lluMB\n", 270 drm_printf(printer, "man size:%llu pages, gtt available:%lld pages, usage:%lluMB\n",
269 man->size, (u64)atomic64_read(&mgr->available), 271 man->size, (u64)atomic64_read(&mgr->available),
270 amdgpu_gtt_mgr_usage(man) >> 20); 272 amdgpu_gtt_mgr_usage(man) >> 20);
271} 273}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 4510627ae83e..0cfc68db575b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -65,6 +65,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
65 amdgpu_sync_create(&(*job)->sync); 65 amdgpu_sync_create(&(*job)->sync);
66 amdgpu_sync_create(&(*job)->dep_sync); 66 amdgpu_sync_create(&(*job)->dep_sync);
67 amdgpu_sync_create(&(*job)->sched_sync); 67 amdgpu_sync_create(&(*job)->sched_sync);
68 (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
68 69
69 return 0; 70 return 0;
70} 71}
@@ -103,6 +104,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
103{ 104{
104 struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); 105 struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
105 106
107 amdgpu_ring_priority_put(job->ring, amd_sched_get_job_priority(s_job));
106 dma_fence_put(job->fence); 108 dma_fence_put(job->fence);
107 amdgpu_sync_free(&job->sync); 109 amdgpu_sync_free(&job->sync);
108 amdgpu_sync_free(&job->dep_sync); 110 amdgpu_sync_free(&job->dep_sync);
@@ -139,6 +141,8 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
139 job->fence_ctx = entity->fence_context; 141 job->fence_ctx = entity->fence_context;
140 *f = dma_fence_get(&job->base.s_fence->finished); 142 *f = dma_fence_get(&job->base.s_fence->finished);
141 amdgpu_job_free_resources(job); 143 amdgpu_job_free_resources(job);
144 amdgpu_ring_priority_get(job->ring,
145 amd_sched_get_job_priority(&job->base));
142 amd_sched_entity_push_job(&job->base); 146 amd_sched_entity_push_job(&job->base);
143 147
144 return 0; 148 return 0;
@@ -177,8 +181,8 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
177static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job) 181static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
178{ 182{
179 struct dma_fence *fence = NULL; 183 struct dma_fence *fence = NULL;
184 struct amdgpu_device *adev;
180 struct amdgpu_job *job; 185 struct amdgpu_job *job;
181 struct amdgpu_fpriv *fpriv = NULL;
182 int r; 186 int r;
183 187
184 if (!sched_job) { 188 if (!sched_job) {
@@ -186,23 +190,25 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
186 return NULL; 190 return NULL;
187 } 191 }
188 job = to_amdgpu_job(sched_job); 192 job = to_amdgpu_job(sched_job);
193 adev = job->adev;
189 194
190 BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL)); 195 BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
191 196
192 trace_amdgpu_sched_run_job(job); 197 trace_amdgpu_sched_run_job(job);
193 if (job->vm)
194 fpriv = container_of(job->vm, struct amdgpu_fpriv, vm);
195 /* skip ib schedule when vram is lost */ 198 /* skip ib schedule when vram is lost */
196 if (fpriv && amdgpu_kms_vram_lost(job->adev, fpriv)) 199 if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) {
200 dma_fence_set_error(&job->base.s_fence->finished, -ECANCELED);
197 DRM_ERROR("Skip scheduling IBs!\n"); 201 DRM_ERROR("Skip scheduling IBs!\n");
198 else { 202 } else {
199 r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, &fence); 203 r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
204 &fence);
200 if (r) 205 if (r)
201 DRM_ERROR("Error scheduling IBs (%d)\n", r); 206 DRM_ERROR("Error scheduling IBs (%d)\n", r);
202 } 207 }
203 /* if gpu reset, hw fence will be replaced here */ 208 /* if gpu reset, hw fence will be replaced here */
204 dma_fence_put(job->fence); 209 dma_fence_put(job->fence);
205 job->fence = dma_fence_get(fence); 210 job->fence = dma_fence_get(fence);
211
206 amdgpu_job_free_resources(job); 212 amdgpu_job_free_resources(job);
207 return fence; 213 return fence;
208} 214}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 51841259e23f..6f0b26dae3b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -28,6 +28,7 @@
28#include <drm/drmP.h> 28#include <drm/drmP.h>
29#include "amdgpu.h" 29#include "amdgpu.h"
30#include <drm/amdgpu_drm.h> 30#include <drm/amdgpu_drm.h>
31#include "amdgpu_sched.h"
31#include "amdgpu_uvd.h" 32#include "amdgpu_uvd.h"
32#include "amdgpu_vce.h" 33#include "amdgpu_vce.h"
33 34
@@ -269,7 +270,6 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
269static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 270static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
270{ 271{
271 struct amdgpu_device *adev = dev->dev_private; 272 struct amdgpu_device *adev = dev->dev_private;
272 struct amdgpu_fpriv *fpriv = filp->driver_priv;
273 struct drm_amdgpu_info *info = data; 273 struct drm_amdgpu_info *info = data;
274 struct amdgpu_mode_info *minfo = &adev->mode_info; 274 struct amdgpu_mode_info *minfo = &adev->mode_info;
275 void __user *out = (void __user *)(uintptr_t)info->return_pointer; 275 void __user *out = (void __user *)(uintptr_t)info->return_pointer;
@@ -282,8 +282,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
282 282
283 if (!info->return_size || !info->return_pointer) 283 if (!info->return_size || !info->return_pointer)
284 return -EINVAL; 284 return -EINVAL;
285 if (amdgpu_kms_vram_lost(adev, fpriv))
286 return -ENODEV;
287 285
288 switch (info->query) { 286 switch (info->query) {
289 case AMDGPU_INFO_ACCEL_WORKING: 287 case AMDGPU_INFO_ACCEL_WORKING:
@@ -765,6 +763,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
765 } 763 }
766 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 764 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
767 } 765 }
766 case AMDGPU_INFO_VRAM_LOST_COUNTER:
767 ui32 = atomic_read(&adev->vram_lost_counter);
768 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
768 default: 769 default:
769 DRM_DEBUG_KMS("Invalid request %d\n", info->query); 770 DRM_DEBUG_KMS("Invalid request %d\n", info->query);
770 return -EINVAL; 771 return -EINVAL;
@@ -791,12 +792,6 @@ void amdgpu_driver_lastclose_kms(struct drm_device *dev)
791 vga_switcheroo_process_delayed_switch(); 792 vga_switcheroo_process_delayed_switch();
792} 793}
793 794
794bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
795 struct amdgpu_fpriv *fpriv)
796{
797 return fpriv->vram_lost_counter != atomic_read(&adev->vram_lost_counter);
798}
799
800/** 795/**
801 * amdgpu_driver_open_kms - drm callback for open 796 * amdgpu_driver_open_kms - drm callback for open
802 * 797 *
@@ -853,7 +848,6 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
853 848
854 amdgpu_ctx_mgr_init(&fpriv->ctx_mgr); 849 amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
855 850
856 fpriv->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
857 file_priv->driver_priv = fpriv; 851 file_priv->driver_priv = fpriv;
858 852
859out_suspend: 853out_suspend:
@@ -1023,6 +1017,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
1023 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1017 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1024 DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1018 DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1025 DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1019 DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1020 DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
1026 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1021 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1027 DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1022 DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1028 /* KMS */ 1023 /* KMS */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 6982baeccd14..8b4ed8a98a18 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -40,9 +40,7 @@
40static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) 40static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
41{ 41{
42 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); 42 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
43 struct amdgpu_bo *bo; 43 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
44
45 bo = container_of(tbo, struct amdgpu_bo, tbo);
46 44
47 amdgpu_bo_kunmap(bo); 45 amdgpu_bo_kunmap(bo);
48 46
@@ -884,7 +882,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
884 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) 882 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
885 return; 883 return;
886 884
887 abo = container_of(bo, struct amdgpu_bo, tbo); 885 abo = ttm_to_amdgpu_bo(bo);
888 amdgpu_vm_bo_invalidate(adev, abo, evict); 886 amdgpu_vm_bo_invalidate(adev, abo, evict);
889 887
890 amdgpu_bo_kunmap(abo); 888 amdgpu_bo_kunmap(abo);
@@ -911,7 +909,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
911 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) 909 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
912 return 0; 910 return 0;
913 911
914 abo = container_of(bo, struct amdgpu_bo, tbo); 912 abo = ttm_to_amdgpu_bo(bo);
915 913
916 /* Remember that this BO was accessed by the CPU */ 914 /* Remember that this BO was accessed by the CPU */
917 abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 915 abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 39b6bf6fb051..428aae048f4b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -94,6 +94,11 @@ struct amdgpu_bo {
94 }; 94 };
95}; 95};
96 96
97static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
98{
99 return container_of(tbo, struct amdgpu_bo, tbo);
100}
101
97/** 102/**
98 * amdgpu_mem_type_to_domain - return domain corresponding to mem_type 103 * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
99 * @mem_type: ttm memory type 104 * @mem_type: ttm memory type
@@ -188,6 +193,14 @@ static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
188 } 193 }
189} 194}
190 195
196/**
197 * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
198 */
199static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
200{
201 return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
202}
203
191int amdgpu_bo_create(struct amdgpu_device *adev, 204int amdgpu_bo_create(struct amdgpu_device *adev,
192 unsigned long size, int byte_align, 205 unsigned long size, int byte_align,
193 bool kernel, u32 domain, u64 flags, 206 bool kernel, u32 domain, u64 flags,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 3b42f407971d..5f5aa5fddc16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -145,6 +145,8 @@ static int amdgpu_pp_hw_init(void *handle)
145 int ret = 0; 145 int ret = 0;
146 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 146 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
147 147
148 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
149 amdgpu_ucode_init_bo(adev);
148 150
149 if (adev->powerplay.ip_funcs->hw_init) 151 if (adev->powerplay.ip_funcs->hw_init)
150 ret = adev->powerplay.ip_funcs->hw_init( 152 ret = adev->powerplay.ip_funcs->hw_init(
@@ -162,6 +164,9 @@ static int amdgpu_pp_hw_fini(void *handle)
162 ret = adev->powerplay.ip_funcs->hw_fini( 164 ret = adev->powerplay.ip_funcs->hw_fini(
163 adev->powerplay.pp_handle); 165 adev->powerplay.pp_handle);
164 166
167 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
168 amdgpu_ucode_fini_bo(adev);
169
165 return ret; 170 return ret;
166} 171}
167 172
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index f1035a689d35..447d446b5015 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -411,6 +411,13 @@ static int psp_hw_init(void *handle)
411 return 0; 411 return 0;
412 412
413 mutex_lock(&adev->firmware.mutex); 413 mutex_lock(&adev->firmware.mutex);
414 /*
415 * This sequence is just used on hw_init only once, no need on
416 * resume.
417 */
418 ret = amdgpu_ucode_init_bo(adev);
419 if (ret)
420 goto failed;
414 421
415 ret = psp_load_fw(adev); 422 ret = psp_load_fw(adev);
416 if (ret) { 423 if (ret) {
@@ -435,6 +442,8 @@ static int psp_hw_fini(void *handle)
435 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 442 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
436 return 0; 443 return 0;
437 444
445 amdgpu_ucode_fini_bo(adev);
446
438 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 447 psp_ring_destroy(psp, PSP_RING_TYPE__KM);
439 448
440 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf); 449 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 019932a7ea3a..e5ece1fae149 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -155,6 +155,75 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
155} 155}
156 156
157/** 157/**
158 * amdgpu_ring_priority_put - restore a ring's priority
159 *
160 * @ring: amdgpu_ring structure holding the information
161 * @priority: target priority
162 *
163 * Release a request for executing at @priority
164 */
165void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
166 enum amd_sched_priority priority)
167{
168 int i;
169
170 if (!ring->funcs->set_priority)
171 return;
172
173 if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
174 return;
175
176 /* no need to restore if the job is already at the lowest priority */
177 if (priority == AMD_SCHED_PRIORITY_NORMAL)
178 return;
179
180 mutex_lock(&ring->priority_mutex);
181 /* something higher prio is executing, no need to decay */
182 if (ring->priority > priority)
183 goto out_unlock;
184
185 /* decay priority to the next level with a job available */
186 for (i = priority; i >= AMD_SCHED_PRIORITY_MIN; i--) {
187 if (i == AMD_SCHED_PRIORITY_NORMAL
188 || atomic_read(&ring->num_jobs[i])) {
189 ring->priority = i;
190 ring->funcs->set_priority(ring, i);
191 break;
192 }
193 }
194
195out_unlock:
196 mutex_unlock(&ring->priority_mutex);
197}
198
199/**
200 * amdgpu_ring_priority_get - change the ring's priority
201 *
202 * @ring: amdgpu_ring structure holding the information
203 * @priority: target priority
204 *
205 * Request a ring's priority to be raised to @priority (refcounted).
206 */
207void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
208 enum amd_sched_priority priority)
209{
210 if (!ring->funcs->set_priority)
211 return;
212
213 atomic_inc(&ring->num_jobs[priority]);
214
215 mutex_lock(&ring->priority_mutex);
216 if (priority <= ring->priority)
217 goto out_unlock;
218
219 ring->priority = priority;
220 ring->funcs->set_priority(ring, priority);
221
222out_unlock:
223 mutex_unlock(&ring->priority_mutex);
224}
225
226/**
158 * amdgpu_ring_init - init driver ring struct. 227 * amdgpu_ring_init - init driver ring struct.
159 * 228 *
160 * @adev: amdgpu_device pointer 229 * @adev: amdgpu_device pointer
@@ -169,7 +238,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
169 unsigned max_dw, struct amdgpu_irq_src *irq_src, 238 unsigned max_dw, struct amdgpu_irq_src *irq_src,
170 unsigned irq_type) 239 unsigned irq_type)
171{ 240{
172 int r; 241 int r, i;
173 int sched_hw_submission = amdgpu_sched_hw_submission; 242 int sched_hw_submission = amdgpu_sched_hw_submission;
174 243
175 /* Set the hw submission limit higher for KIQ because 244 /* Set the hw submission limit higher for KIQ because
@@ -247,9 +316,14 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
247 } 316 }
248 317
249 ring->max_dw = max_dw; 318 ring->max_dw = max_dw;
319 ring->priority = AMD_SCHED_PRIORITY_NORMAL;
320 mutex_init(&ring->priority_mutex);
250 INIT_LIST_HEAD(&ring->lru_list); 321 INIT_LIST_HEAD(&ring->lru_list);
251 amdgpu_ring_lru_touch(adev, ring); 322 amdgpu_ring_lru_touch(adev, ring);
252 323
324 for (i = 0; i < AMD_SCHED_PRIORITY_MAX; ++i)
325 atomic_set(&ring->num_jobs[i], 0);
326
253 if (amdgpu_debugfs_ring_init(adev, ring)) { 327 if (amdgpu_debugfs_ring_init(adev, ring)) {
254 DRM_ERROR("Failed to register debugfs file for rings !\n"); 328 DRM_ERROR("Failed to register debugfs file for rings !\n");
255 } 329 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 491bd5512dcc..b18c2b96691f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -24,6 +24,7 @@
24#ifndef __AMDGPU_RING_H__ 24#ifndef __AMDGPU_RING_H__
25#define __AMDGPU_RING_H__ 25#define __AMDGPU_RING_H__
26 26
27#include <drm/amdgpu_drm.h>
27#include "gpu_scheduler.h" 28#include "gpu_scheduler.h"
28 29
29/* max number of rings */ 30/* max number of rings */
@@ -56,6 +57,7 @@ struct amdgpu_device;
56struct amdgpu_ring; 57struct amdgpu_ring;
57struct amdgpu_ib; 58struct amdgpu_ib;
58struct amdgpu_cs_parser; 59struct amdgpu_cs_parser;
60struct amdgpu_job;
59 61
60/* 62/*
61 * Fences. 63 * Fences.
@@ -88,8 +90,12 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
88void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); 90void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
89void amdgpu_fence_driver_resume(struct amdgpu_device *adev); 91void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
90int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence); 92int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence);
93int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s);
91void amdgpu_fence_process(struct amdgpu_ring *ring); 94void amdgpu_fence_process(struct amdgpu_ring *ring);
92int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); 95int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
96signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
97 uint32_t wait_seq,
98 signed long timeout);
93unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); 99unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
94 100
95/* 101/*
@@ -147,6 +153,9 @@ struct amdgpu_ring_funcs {
147 void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg); 153 void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
148 void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); 154 void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
149 void (*emit_tmz)(struct amdgpu_ring *ring, bool start); 155 void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
156 /* priority functions */
157 void (*set_priority) (struct amdgpu_ring *ring,
158 enum amd_sched_priority priority);
150}; 159};
151 160
152struct amdgpu_ring { 161struct amdgpu_ring {
@@ -187,6 +196,12 @@ struct amdgpu_ring {
187 volatile u32 *cond_exe_cpu_addr; 196 volatile u32 *cond_exe_cpu_addr;
188 unsigned vm_inv_eng; 197 unsigned vm_inv_eng;
189 bool has_compute_vm_bug; 198 bool has_compute_vm_bug;
199
200 atomic_t num_jobs[AMD_SCHED_PRIORITY_MAX];
201 struct mutex priority_mutex;
202 /* protected by priority_mutex */
203 int priority;
204
190#if defined(CONFIG_DEBUG_FS) 205#if defined(CONFIG_DEBUG_FS)
191 struct dentry *ent; 206 struct dentry *ent;
192#endif 207#endif
@@ -197,6 +212,10 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
197void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); 212void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
198void amdgpu_ring_commit(struct amdgpu_ring *ring); 213void amdgpu_ring_commit(struct amdgpu_ring *ring);
199void amdgpu_ring_undo(struct amdgpu_ring *ring); 214void amdgpu_ring_undo(struct amdgpu_ring *ring);
215void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
216 enum amd_sched_priority priority);
217void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
218 enum amd_sched_priority priority);
200int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, 219int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
201 unsigned ring_size, struct amdgpu_irq_src *irq_src, 220 unsigned ring_size, struct amdgpu_irq_src *irq_src,
202 unsigned irq_type); 221 unsigned irq_type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
new file mode 100644
index 000000000000..290cc3f9c433
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -0,0 +1,109 @@
1/*
2 * Copyright 2017 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Andres Rodriguez <andresx7@gmail.com>
23 */
24
25#include <linux/fdtable.h>
26#include <linux/pid.h>
27#include <drm/amdgpu_drm.h>
28#include "amdgpu.h"
29
30#include "amdgpu_vm.h"
31
32enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
33{
34 switch (amdgpu_priority) {
35 case AMDGPU_CTX_PRIORITY_VERY_HIGH:
36 return AMD_SCHED_PRIORITY_HIGH_HW;
37 case AMDGPU_CTX_PRIORITY_HIGH:
38 return AMD_SCHED_PRIORITY_HIGH_SW;
39 case AMDGPU_CTX_PRIORITY_NORMAL:
40 return AMD_SCHED_PRIORITY_NORMAL;
41 case AMDGPU_CTX_PRIORITY_LOW:
42 case AMDGPU_CTX_PRIORITY_VERY_LOW:
43 return AMD_SCHED_PRIORITY_LOW;
44 case AMDGPU_CTX_PRIORITY_UNSET:
45 return AMD_SCHED_PRIORITY_UNSET;
46 default:
47 WARN(1, "Invalid context priority %d\n", amdgpu_priority);
48 return AMD_SCHED_PRIORITY_INVALID;
49 }
50}
51
52static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
53 int fd,
54 enum amd_sched_priority priority)
55{
56 struct file *filp = fcheck(fd);
57 struct drm_file *file;
58 struct pid *pid;
59 struct amdgpu_fpriv *fpriv;
60 struct amdgpu_ctx *ctx;
61 uint32_t id;
62
63 if (!filp)
64 return -EINVAL;
65
66 pid = get_pid(((struct drm_file *)filp->private_data)->pid);
67
68 mutex_lock(&adev->ddev->filelist_mutex);
69 list_for_each_entry(file, &adev->ddev->filelist, lhead) {
70 if (file->pid != pid)
71 continue;
72
73 fpriv = file->driver_priv;
74 idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
75 amdgpu_ctx_priority_override(ctx, priority);
76 }
77 mutex_unlock(&adev->ddev->filelist_mutex);
78
79 put_pid(pid);
80
81 return 0;
82}
83
84int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
85 struct drm_file *filp)
86{
87 union drm_amdgpu_sched *args = data;
88 struct amdgpu_device *adev = dev->dev_private;
89 enum amd_sched_priority priority;
90 int r;
91
92 priority = amdgpu_to_sched_priority(args->in.priority);
93 if (args->in.flags || priority == AMD_SCHED_PRIORITY_INVALID)
94 return -EINVAL;
95
96 switch (args->in.op) {
97 case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE:
98 r = amdgpu_sched_process_priority_override(adev,
99 args->in.fd,
100 priority);
101 break;
102 default:
103 DRM_ERROR("Invalid sched op specified: %d\n", args->in.op);
104 r = -EINVAL;
105 break;
106 }
107
108 return r;
109}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
index 13c8dbbccaf2..b28c067d3822 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2015 Advanced Micro Devices, Inc. 2 * Copyright 2017 Valve Corporation
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -19,22 +19,16 @@
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 * Authors: Andres Rodriguez <andresx7@gmail.com>
22 */ 23 */
23#ifndef _ICELAND_SMC_H
24#define _ICELAND_SMC_H
25 24
26#include "smumgr.h" 25#ifndef __AMDGPU_SCHED_H__
26#define __AMDGPU_SCHED_H__
27 27
28#include <drm/drmP.h>
28 29
29int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr); 30enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority);
30int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr); 31int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
31int iceland_init_smc_table(struct pp_hwmgr *hwmgr); 32 struct drm_file *filp);
32int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
33int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr);
34uint32_t iceland_get_offsetof(uint32_t type, uint32_t member);
35uint32_t iceland_get_mac_definition(uint32_t value);
36int iceland_process_firmware_header(struct pp_hwmgr *hwmgr);
37int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
38bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr);
39#endif
40 33
34#endif // __AMDGPU_SCHED_H__
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index c586f44312f9..a4bf21f8f1c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -169,14 +169,14 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
169 * 169 *
170 * @sync: sync object to add fences from reservation object to 170 * @sync: sync object to add fences from reservation object to
171 * @resv: reservation object with embedded fence 171 * @resv: reservation object with embedded fence
172 * @shared: true if we should only sync to the exclusive fence 172 * @explicit_sync: true if we should only sync to the exclusive fence
173 * 173 *
174 * Sync to the fence 174 * Sync to the fence
175 */ 175 */
176int amdgpu_sync_resv(struct amdgpu_device *adev, 176int amdgpu_sync_resv(struct amdgpu_device *adev,
177 struct amdgpu_sync *sync, 177 struct amdgpu_sync *sync,
178 struct reservation_object *resv, 178 struct reservation_object *resv,
179 void *owner) 179 void *owner, bool explicit_sync)
180{ 180{
181 struct reservation_object_list *flist; 181 struct reservation_object_list *flist;
182 struct dma_fence *f; 182 struct dma_fence *f;
@@ -191,6 +191,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
191 f = reservation_object_get_excl(resv); 191 f = reservation_object_get_excl(resv);
192 r = amdgpu_sync_fence(adev, sync, f); 192 r = amdgpu_sync_fence(adev, sync, f);
193 193
194 if (explicit_sync)
195 return r;
196
194 flist = reservation_object_get_list(resv); 197 flist = reservation_object_get_list(resv);
195 if (!flist || r) 198 if (!flist || r)
196 return r; 199 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index dc7687993317..70d7e3a279a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -45,7 +45,8 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
45int amdgpu_sync_resv(struct amdgpu_device *adev, 45int amdgpu_sync_resv(struct amdgpu_device *adev,
46 struct amdgpu_sync *sync, 46 struct amdgpu_sync *sync,
47 struct reservation_object *resv, 47 struct reservation_object *resv,
48 void *owner); 48 void *owner,
49 bool explicit_sync);
49struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, 50struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
50 struct amdgpu_ring *ring); 51 struct amdgpu_ring *ring);
51struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); 52struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 15a28578d458..51eacefadea1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -44,6 +44,7 @@
44#include <linux/debugfs.h> 44#include <linux/debugfs.h>
45#include <linux/iommu.h> 45#include <linux/iommu.h>
46#include "amdgpu.h" 46#include "amdgpu.h"
47#include "amdgpu_object.h"
47#include "amdgpu_trace.h" 48#include "amdgpu_trace.h"
48#include "bif/bif_4_1_d.h" 49#include "bif/bif_4_1_d.h"
49 50
@@ -209,7 +210,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
209 placement->num_busy_placement = 1; 210 placement->num_busy_placement = 1;
210 return; 211 return;
211 } 212 }
212 abo = container_of(bo, struct amdgpu_bo, tbo); 213 abo = ttm_to_amdgpu_bo(bo);
213 switch (bo->mem.mem_type) { 214 switch (bo->mem.mem_type) {
214 case TTM_PL_VRAM: 215 case TTM_PL_VRAM:
215 if (adev->mman.buffer_funcs && 216 if (adev->mman.buffer_funcs &&
@@ -257,7 +258,7 @@ gtt:
257 258
258static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) 259static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
259{ 260{
260 struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo); 261 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
261 262
262 if (amdgpu_ttm_tt_get_usermm(bo->ttm)) 263 if (amdgpu_ttm_tt_get_usermm(bo->ttm))
263 return -EPERM; 264 return -EPERM;
@@ -289,97 +290,177 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
289 return addr; 290 return addr;
290} 291}
291 292
292static int amdgpu_move_blit(struct ttm_buffer_object *bo, 293/**
293 bool evict, bool no_wait_gpu, 294 * amdgpu_find_mm_node - Helper function finds the drm_mm_node
294 struct ttm_mem_reg *new_mem, 295 * corresponding to @offset. It also modifies the offset to be
295 struct ttm_mem_reg *old_mem) 296 * within the drm_mm_node returned
297 */
298static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
299 unsigned long *offset)
296{ 300{
297 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 301 struct drm_mm_node *mm_node = mem->mm_node;
298 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
299 302
300 struct drm_mm_node *old_mm, *new_mm; 303 while (*offset >= (mm_node->size << PAGE_SHIFT)) {
301 uint64_t old_start, old_size, new_start, new_size; 304 *offset -= (mm_node->size << PAGE_SHIFT);
302 unsigned long num_pages; 305 ++mm_node;
303 struct dma_fence *fence = NULL; 306 }
304 int r; 307 return mm_node;
308}
305 309
306 BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0); 310/**
311 * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
312 *
313 * The function copies @size bytes from {src->mem + src->offset} to
314 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
315 * move and different for a BO to BO copy.
316 *
317 * @f: Returns the last fence if multiple jobs are submitted.
318 */
319int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
320 struct amdgpu_copy_mem *src,
321 struct amdgpu_copy_mem *dst,
322 uint64_t size,
323 struct reservation_object *resv,
324 struct dma_fence **f)
325{
326 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
327 struct drm_mm_node *src_mm, *dst_mm;
328 uint64_t src_node_start, dst_node_start, src_node_size,
329 dst_node_size, src_page_offset, dst_page_offset;
330 struct dma_fence *fence = NULL;
331 int r = 0;
332 const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
333 AMDGPU_GPU_PAGE_SIZE);
307 334
308 if (!ring->ready) { 335 if (!ring->ready) {
309 DRM_ERROR("Trying to move memory with ring turned off.\n"); 336 DRM_ERROR("Trying to move memory with ring turned off.\n");
310 return -EINVAL; 337 return -EINVAL;
311 } 338 }
312 339
313 old_mm = old_mem->mm_node; 340 src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
314 old_size = old_mm->size; 341 src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
315 old_start = amdgpu_mm_node_addr(bo, old_mm, old_mem); 342 src->offset;
343 src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
344 src_page_offset = src_node_start & (PAGE_SIZE - 1);
316 345
317 new_mm = new_mem->mm_node; 346 dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
318 new_size = new_mm->size; 347 dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
319 new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem); 348 dst->offset;
349 dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
350 dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
320 351
321 num_pages = new_mem->num_pages;
322 mutex_lock(&adev->mman.gtt_window_lock); 352 mutex_lock(&adev->mman.gtt_window_lock);
323 while (num_pages) { 353
324 unsigned long cur_pages = min(min(old_size, new_size), 354 while (size) {
325 (u64)AMDGPU_GTT_MAX_TRANSFER_SIZE); 355 unsigned long cur_size;
326 uint64_t from = old_start, to = new_start; 356 uint64_t from = src_node_start, to = dst_node_start;
327 struct dma_fence *next; 357 struct dma_fence *next;
328 358
329 if (old_mem->mem_type == TTM_PL_TT && 359 /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
330 !amdgpu_gtt_mgr_is_allocated(old_mem)) { 360 * begins at an offset, then adjust the size accordingly
331 r = amdgpu_map_buffer(bo, old_mem, cur_pages, 361 */
332 old_start, 0, ring, &from); 362 cur_size = min3(min(src_node_size, dst_node_size), size,
363 GTT_MAX_BYTES);
364 if (cur_size + src_page_offset > GTT_MAX_BYTES ||
365 cur_size + dst_page_offset > GTT_MAX_BYTES)
366 cur_size -= max(src_page_offset, dst_page_offset);
367
368 /* Map only what needs to be accessed. Map src to window 0 and
369 * dst to window 1
370 */
371 if (src->mem->mem_type == TTM_PL_TT &&
372 !amdgpu_gtt_mgr_is_allocated(src->mem)) {
373 r = amdgpu_map_buffer(src->bo, src->mem,
374 PFN_UP(cur_size + src_page_offset),
375 src_node_start, 0, ring,
376 &from);
333 if (r) 377 if (r)
334 goto error; 378 goto error;
379 /* Adjust the offset because amdgpu_map_buffer returns
380 * start of mapped page
381 */
382 from += src_page_offset;
335 } 383 }
336 384
337 if (new_mem->mem_type == TTM_PL_TT && 385 if (dst->mem->mem_type == TTM_PL_TT &&
338 !amdgpu_gtt_mgr_is_allocated(new_mem)) { 386 !amdgpu_gtt_mgr_is_allocated(dst->mem)) {
339 r = amdgpu_map_buffer(bo, new_mem, cur_pages, 387 r = amdgpu_map_buffer(dst->bo, dst->mem,
340 new_start, 1, ring, &to); 388 PFN_UP(cur_size + dst_page_offset),
389 dst_node_start, 1, ring,
390 &to);
341 if (r) 391 if (r)
342 goto error; 392 goto error;
393 to += dst_page_offset;
343 } 394 }
344 395
345 r = amdgpu_copy_buffer(ring, from, to, 396 r = amdgpu_copy_buffer(ring, from, to, cur_size,
346 cur_pages * PAGE_SIZE, 397 resv, &next, false, true);
347 bo->resv, &next, false, true);
348 if (r) 398 if (r)
349 goto error; 399 goto error;
350 400
351 dma_fence_put(fence); 401 dma_fence_put(fence);
352 fence = next; 402 fence = next;
353 403
354 num_pages -= cur_pages; 404 size -= cur_size;
355 if (!num_pages) 405 if (!size)
356 break; 406 break;
357 407
358 old_size -= cur_pages; 408 src_node_size -= cur_size;
359 if (!old_size) { 409 if (!src_node_size) {
360 old_start = amdgpu_mm_node_addr(bo, ++old_mm, old_mem); 410 src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
361 old_size = old_mm->size; 411 src->mem);
412 src_node_size = (src_mm->size << PAGE_SHIFT);
362 } else { 413 } else {
363 old_start += cur_pages * PAGE_SIZE; 414 src_node_start += cur_size;
415 src_page_offset = src_node_start & (PAGE_SIZE - 1);
364 } 416 }
365 417 dst_node_size -= cur_size;
366 new_size -= cur_pages; 418 if (!dst_node_size) {
367 if (!new_size) { 419 dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
368 new_start = amdgpu_mm_node_addr(bo, ++new_mm, new_mem); 420 dst->mem);
369 new_size = new_mm->size; 421 dst_node_size = (dst_mm->size << PAGE_SHIFT);
370 } else { 422 } else {
371 new_start += cur_pages * PAGE_SIZE; 423 dst_node_start += cur_size;
424 dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
372 } 425 }
373 } 426 }
427error:
374 mutex_unlock(&adev->mman.gtt_window_lock); 428 mutex_unlock(&adev->mman.gtt_window_lock);
429 if (f)
430 *f = dma_fence_get(fence);
431 dma_fence_put(fence);
432 return r;
433}
434
435
436static int amdgpu_move_blit(struct ttm_buffer_object *bo,
437 bool evict, bool no_wait_gpu,
438 struct ttm_mem_reg *new_mem,
439 struct ttm_mem_reg *old_mem)
440{
441 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
442 struct amdgpu_copy_mem src, dst;
443 struct dma_fence *fence = NULL;
444 int r;
445
446 src.bo = bo;
447 dst.bo = bo;
448 src.mem = old_mem;
449 dst.mem = new_mem;
450 src.offset = 0;
451 dst.offset = 0;
452
453 r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
454 new_mem->num_pages << PAGE_SHIFT,
455 bo->resv, &fence);
456 if (r)
457 goto error;
375 458
376 r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); 459 r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
377 dma_fence_put(fence); 460 dma_fence_put(fence);
378 return r; 461 return r;
379 462
380error: 463error:
381 mutex_unlock(&adev->mman.gtt_window_lock);
382
383 if (fence) 464 if (fence)
384 dma_fence_wait(fence, false); 465 dma_fence_wait(fence, false);
385 dma_fence_put(fence); 466 dma_fence_put(fence);
@@ -484,7 +565,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
484 int r; 565 int r;
485 566
486 /* Can't move a pinned BO */ 567 /* Can't move a pinned BO */
487 abo = container_of(bo, struct amdgpu_bo, tbo); 568 abo = ttm_to_amdgpu_bo(bo);
488 if (WARN_ON_ONCE(abo->pin_count > 0)) 569 if (WARN_ON_ONCE(abo->pin_count > 0))
489 return -EINVAL; 570 return -EINVAL;
490 571
@@ -582,13 +663,12 @@ static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
582static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, 663static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
583 unsigned long page_offset) 664 unsigned long page_offset)
584{ 665{
585 struct drm_mm_node *mm = bo->mem.mm_node; 666 struct drm_mm_node *mm;
586 uint64_t size = mm->size; 667 unsigned long offset = (page_offset << PAGE_SHIFT);
587 uint64_t offset = page_offset;
588 668
589 page_offset = do_div(offset, size); 669 mm = amdgpu_find_mm_node(&bo->mem, &offset);
590 mm += offset; 670 return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
591 return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + page_offset; 671 (offset >> PAGE_SHIFT);
592} 672}
593 673
594/* 674/*
@@ -1142,9 +1222,9 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1142 unsigned long offset, 1222 unsigned long offset,
1143 void *buf, int len, int write) 1223 void *buf, int len, int write)
1144{ 1224{
1145 struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo); 1225 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1146 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); 1226 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1147 struct drm_mm_node *nodes = abo->tbo.mem.mm_node; 1227 struct drm_mm_node *nodes;
1148 uint32_t value = 0; 1228 uint32_t value = 0;
1149 int ret = 0; 1229 int ret = 0;
1150 uint64_t pos; 1230 uint64_t pos;
@@ -1153,10 +1233,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1153 if (bo->mem.mem_type != TTM_PL_VRAM) 1233 if (bo->mem.mem_type != TTM_PL_VRAM)
1154 return -EIO; 1234 return -EIO;
1155 1235
1156 while (offset >= (nodes->size << PAGE_SHIFT)) { 1236 nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
1157 offset -= nodes->size << PAGE_SHIFT;
1158 ++nodes;
1159 }
1160 pos = (nodes->start << PAGE_SHIFT) + offset; 1237 pos = (nodes->start << PAGE_SHIFT) + offset;
1161 1238
1162 while (len && pos < adev->mc.mc_vram_size) { 1239 while (len && pos < adev->mc.mc_vram_size) {
@@ -1255,6 +1332,15 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
1255 /* Change the size here instead of the init above so only lpfn is affected */ 1332 /* Change the size here instead of the init above so only lpfn is affected */
1256 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); 1333 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
1257 1334
1335 /*
1336 *The reserved vram for firmware must be pinned to the specified
1337 *place on the VRAM, so reserve it early.
1338 */
1339 r = amdgpu_fw_reserve_vram_init(adev);
1340 if (r) {
1341 return r;
1342 }
1343
1258 r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE, 1344 r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE,
1259 AMDGPU_GEM_DOMAIN_VRAM, 1345 AMDGPU_GEM_DOMAIN_VRAM,
1260 &adev->stolen_vga_memory, 1346 &adev->stolen_vga_memory,
@@ -1479,7 +1565,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
1479 job->vm_needs_flush = vm_needs_flush; 1565 job->vm_needs_flush = vm_needs_flush;
1480 if (resv) { 1566 if (resv) {
1481 r = amdgpu_sync_resv(adev, &job->sync, resv, 1567 r = amdgpu_sync_resv(adev, &job->sync, resv,
1482 AMDGPU_FENCE_OWNER_UNDEFINED); 1568 AMDGPU_FENCE_OWNER_UNDEFINED,
1569 false);
1483 if (r) { 1570 if (r) {
1484 DRM_ERROR("sync failed (%d).\n", r); 1571 DRM_ERROR("sync failed (%d).\n", r);
1485 goto error_free; 1572 goto error_free;
@@ -1571,7 +1658,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
1571 1658
1572 if (resv) { 1659 if (resv) {
1573 r = amdgpu_sync_resv(adev, &job->sync, resv, 1660 r = amdgpu_sync_resv(adev, &job->sync, resv,
1574 AMDGPU_FENCE_OWNER_UNDEFINED); 1661 AMDGPU_FENCE_OWNER_UNDEFINED, false);
1575 if (r) { 1662 if (r) {
1576 DRM_ERROR("sync failed (%d).\n", r); 1663 DRM_ERROR("sync failed (%d).\n", r);
1577 goto error_free; 1664 goto error_free;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 7abae6867339..abd4084982a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -58,6 +58,12 @@ struct amdgpu_mman {
58 struct amd_sched_entity entity; 58 struct amd_sched_entity entity;
59}; 59};
60 60
61struct amdgpu_copy_mem {
62 struct ttm_buffer_object *bo;
63 struct ttm_mem_reg *mem;
64 unsigned long offset;
65};
66
61extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; 67extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
62extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; 68extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
63 69
@@ -72,6 +78,12 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
72 struct reservation_object *resv, 78 struct reservation_object *resv,
73 struct dma_fence **fence, bool direct_submit, 79 struct dma_fence **fence, bool direct_submit,
74 bool vm_needs_flush); 80 bool vm_needs_flush);
81int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
82 struct amdgpu_copy_mem *src,
83 struct amdgpu_copy_mem *dst,
84 uint64_t size,
85 struct reservation_object *resv,
86 struct dma_fence **f);
75int amdgpu_fill_buffer(struct amdgpu_bo *bo, 87int amdgpu_fill_buffer(struct amdgpu_bo *bo,
76 uint64_t src_data, 88 uint64_t src_data,
77 struct reservation_object *resv, 89 struct reservation_object *resv,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index ab05121b9272..e97f80f86005 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -22,7 +22,7 @@
22 */ 22 */
23 23
24#include "amdgpu.h" 24#include "amdgpu.h"
25#define MAX_KIQ_REG_WAIT 100000 25#define MAX_KIQ_REG_WAIT 100000000 /* in usecs */
26 26
27int amdgpu_allocate_static_csa(struct amdgpu_device *adev) 27int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
28{ 28{
@@ -114,27 +114,24 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
114uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) 114uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
115{ 115{
116 signed long r; 116 signed long r;
117 uint32_t val; 117 uint32_t val, seq;
118 struct dma_fence *f;
119 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 118 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
120 struct amdgpu_ring *ring = &kiq->ring; 119 struct amdgpu_ring *ring = &kiq->ring;
121 120
122 BUG_ON(!ring->funcs->emit_rreg); 121 BUG_ON(!ring->funcs->emit_rreg);
123 122
124 mutex_lock(&kiq->ring_mutex); 123 spin_lock(&kiq->ring_lock);
125 amdgpu_ring_alloc(ring, 32); 124 amdgpu_ring_alloc(ring, 32);
126 amdgpu_ring_emit_rreg(ring, reg); 125 amdgpu_ring_emit_rreg(ring, reg);
127 amdgpu_fence_emit(ring, &f); 126 amdgpu_fence_emit_polling(ring, &seq);
128 amdgpu_ring_commit(ring); 127 amdgpu_ring_commit(ring);
129 mutex_unlock(&kiq->ring_mutex); 128 spin_unlock(&kiq->ring_lock);
130 129
131 r = dma_fence_wait_timeout(f, false, msecs_to_jiffies(MAX_KIQ_REG_WAIT)); 130 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
132 dma_fence_put(f);
133 if (r < 1) { 131 if (r < 1) {
134 DRM_ERROR("wait for kiq fence error: %ld.\n", r); 132 DRM_ERROR("wait for kiq fence error: %ld\n", r);
135 return ~0; 133 return ~0;
136 } 134 }
137
138 val = adev->wb.wb[adev->virt.reg_val_offs]; 135 val = adev->wb.wb[adev->virt.reg_val_offs];
139 136
140 return val; 137 return val;
@@ -143,23 +140,22 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
143void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) 140void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
144{ 141{
145 signed long r; 142 signed long r;
146 struct dma_fence *f; 143 uint32_t seq;
147 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 144 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
148 struct amdgpu_ring *ring = &kiq->ring; 145 struct amdgpu_ring *ring = &kiq->ring;
149 146
150 BUG_ON(!ring->funcs->emit_wreg); 147 BUG_ON(!ring->funcs->emit_wreg);
151 148
152 mutex_lock(&kiq->ring_mutex); 149 spin_lock(&kiq->ring_lock);
153 amdgpu_ring_alloc(ring, 32); 150 amdgpu_ring_alloc(ring, 32);
154 amdgpu_ring_emit_wreg(ring, reg, v); 151 amdgpu_ring_emit_wreg(ring, reg, v);
155 amdgpu_fence_emit(ring, &f); 152 amdgpu_fence_emit_polling(ring, &seq);
156 amdgpu_ring_commit(ring); 153 amdgpu_ring_commit(ring);
157 mutex_unlock(&kiq->ring_mutex); 154 spin_unlock(&kiq->ring_lock);
158 155
159 r = dma_fence_wait_timeout(f, false, msecs_to_jiffies(MAX_KIQ_REG_WAIT)); 156 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
160 if (r < 1) 157 if (r < 1)
161 DRM_ERROR("wait for kiq fence error: %ld.\n", r); 158 DRM_ERROR("wait for kiq fence error: %ld\n", r);
162 dma_fence_put(f);
163} 159}
164 160
165/** 161/**
@@ -274,3 +270,78 @@ void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
274 (void *)&adev->virt.mm_table.cpu_addr); 270 (void *)&adev->virt.mm_table.cpu_addr);
275 adev->virt.mm_table.gpu_addr = 0; 271 adev->virt.mm_table.gpu_addr = 0;
276} 272}
273
274
275int amdgpu_virt_fw_reserve_get_checksum(void *obj,
276 unsigned long obj_size,
277 unsigned int key,
278 unsigned int chksum)
279{
280 unsigned int ret = key;
281 unsigned long i = 0;
282 unsigned char *pos;
283
284 pos = (char *)obj;
285 /* calculate checksum */
286 for (i = 0; i < obj_size; ++i)
287 ret += *(pos + i);
288 /* minus the chksum itself */
289 pos = (char *)&chksum;
290 for (i = 0; i < sizeof(chksum); ++i)
291 ret -= *(pos + i);
292 return ret;
293}
294
295void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
296{
297 uint32_t pf2vf_ver = 0;
298 uint32_t pf2vf_size = 0;
299 uint32_t checksum = 0;
300 uint32_t checkval;
301 char *str;
302
303 adev->virt.fw_reserve.p_pf2vf = NULL;
304 adev->virt.fw_reserve.p_vf2pf = NULL;
305
306 if (adev->fw_vram_usage.va != NULL) {
307 adev->virt.fw_reserve.p_pf2vf =
308 (struct amdgim_pf2vf_info_header *)(
309 adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
310 pf2vf_ver = adev->virt.fw_reserve.p_pf2vf->version;
311 AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
312 AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
313
314 /* pf2vf message must be in 4K */
315 if (pf2vf_size > 0 && pf2vf_size < 4096) {
316 checkval = amdgpu_virt_fw_reserve_get_checksum(
317 adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
318 adev->virt.fw_reserve.checksum_key, checksum);
319 if (checkval == checksum) {
320 adev->virt.fw_reserve.p_vf2pf =
321 ((void *)adev->virt.fw_reserve.p_pf2vf +
322 pf2vf_size);
323 memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
324 sizeof(amdgim_vf2pf_info));
325 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
326 AMDGPU_FW_VRAM_VF2PF_VER);
327 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
328 sizeof(amdgim_vf2pf_info));
329 AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
330 &str);
331 if (THIS_MODULE->version != NULL)
332 strcpy(str, THIS_MODULE->version);
333 else
334 strcpy(str, "N/A");
335 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
336 0);
337 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
338 amdgpu_virt_fw_reserve_get_checksum(
339 adev->virt.fw_reserve.p_vf2pf,
340 pf2vf_size,
341 adev->virt.fw_reserve.checksum_key, 0));
342 }
343 }
344 }
345}
346
347
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index e5fd0ff6b29d..b89d37fc406f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -58,6 +58,179 @@ struct amdgpu_virt_ops {
58 void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); 58 void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
59}; 59};
60 60
61/*
62 * Firmware Reserve Frame buffer
63 */
64struct amdgpu_virt_fw_reserve {
65 struct amdgim_pf2vf_info_header *p_pf2vf;
66 struct amdgim_vf2pf_info_header *p_vf2pf;
67 unsigned int checksum_key;
68};
69/*
70 * Defination between PF and VF
71 * Structures forcibly aligned to 4 to keep the same style as PF.
72 */
73#define AMDGIM_DATAEXCHANGE_OFFSET (64 * 1024)
74
75#define AMDGIM_GET_STRUCTURE_RESERVED_SIZE(total, u8, u16, u32, u64) \
76 (total - (((u8)+3) / 4 + ((u16)+1) / 2 + (u32) + (u64)*2))
77
78enum AMDGIM_FEATURE_FLAG {
79 /* GIM supports feature of Error log collecting */
80 AMDGIM_FEATURE_ERROR_LOG_COLLECT = 0x1,
81 /* GIM supports feature of loading uCodes */
82 AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
83};
84
85struct amdgim_pf2vf_info_header {
86 /* the total structure size in byte. */
87 uint32_t size;
88 /* version of this structure, written by the GIM */
89 uint32_t version;
90} __aligned(4);
91struct amdgim_pf2vf_info_v1 {
92 /* header contains size and version */
93 struct amdgim_pf2vf_info_header header;
94 /* max_width * max_height */
95 unsigned int uvd_enc_max_pixels_count;
96 /* 16x16 pixels/sec, codec independent */
97 unsigned int uvd_enc_max_bandwidth;
98 /* max_width * max_height */
99 unsigned int vce_enc_max_pixels_count;
100 /* 16x16 pixels/sec, codec independent */
101 unsigned int vce_enc_max_bandwidth;
102 /* MEC FW position in kb from the start of visible frame buffer */
103 unsigned int mecfw_kboffset;
104 /* The features flags of the GIM driver supports. */
105 unsigned int feature_flags;
106 /* use private key from mailbox 2 to create chueksum */
107 unsigned int checksum;
108} __aligned(4);
109
110struct amdgim_pf2vf_info_v2 {
111 /* header contains size and version */
112 struct amdgim_pf2vf_info_header header;
113 /* use private key from mailbox 2 to create chueksum */
114 uint32_t checksum;
115 /* The features flags of the GIM driver supports. */
116 uint32_t feature_flags;
117 /* max_width * max_height */
118 uint32_t uvd_enc_max_pixels_count;
119 /* 16x16 pixels/sec, codec independent */
120 uint32_t uvd_enc_max_bandwidth;
121 /* max_width * max_height */
122 uint32_t vce_enc_max_pixels_count;
123 /* 16x16 pixels/sec, codec independent */
124 uint32_t vce_enc_max_bandwidth;
125 /* MEC FW position in kb from the start of VF visible frame buffer */
126 uint64_t mecfw_kboffset;
127 /* MEC FW size in KB */
128 uint32_t mecfw_ksize;
129 /* UVD FW position in kb from the start of VF visible frame buffer */
130 uint64_t uvdfw_kboffset;
131 /* UVD FW size in KB */
132 uint32_t uvdfw_ksize;
133 /* VCE FW position in kb from the start of VF visible frame buffer */
134 uint64_t vcefw_kboffset;
135 /* VCE FW size in KB */
136 uint32_t vcefw_ksize;
137 uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amdgim_pf2vf_info_header)/sizeof(uint32_t)), 3)];
138} __aligned(4);
139
140
141struct amdgim_vf2pf_info_header {
142 /* the total structure size in byte. */
143 uint32_t size;
144 /*version of this structure, written by the guest */
145 uint32_t version;
146} __aligned(4);
147
148struct amdgim_vf2pf_info_v1 {
149 /* header contains size and version */
150 struct amdgim_vf2pf_info_header header;
151 /* driver version */
152 char driver_version[64];
153 /* driver certification, 1=WHQL, 0=None */
154 unsigned int driver_cert;
155 /* guest OS type and version: need a define */
156 unsigned int os_info;
157 /* in the unit of 1M */
158 unsigned int fb_usage;
159 /* guest gfx engine usage percentage */
160 unsigned int gfx_usage;
161 /* guest gfx engine health percentage */
162 unsigned int gfx_health;
163 /* guest compute engine usage percentage */
164 unsigned int compute_usage;
165 /* guest compute engine health percentage */
166 unsigned int compute_health;
167 /* guest vce engine usage percentage. 0xffff means N/A. */
168 unsigned int vce_enc_usage;
169 /* guest vce engine health percentage. 0xffff means N/A. */
170 unsigned int vce_enc_health;
171 /* guest uvd engine usage percentage. 0xffff means N/A. */
172 unsigned int uvd_enc_usage;
173 /* guest uvd engine usage percentage. 0xffff means N/A. */
174 unsigned int uvd_enc_health;
175 unsigned int checksum;
176} __aligned(4);
177
178struct amdgim_vf2pf_info_v2 {
179 /* header contains size and version */
180 struct amdgim_vf2pf_info_header header;
181 uint32_t checksum;
182 /* driver version */
183 uint8_t driver_version[64];
184 /* driver certification, 1=WHQL, 0=None */
185 uint32_t driver_cert;
186 /* guest OS type and version: need a define */
187 uint32_t os_info;
188 /* in the unit of 1M */
189 uint32_t fb_usage;
190 /* guest gfx engine usage percentage */
191 uint32_t gfx_usage;
192 /* guest gfx engine health percentage */
193 uint32_t gfx_health;
194 /* guest compute engine usage percentage */
195 uint32_t compute_usage;
196 /* guest compute engine health percentage */
197 uint32_t compute_health;
198 /* guest vce engine usage percentage. 0xffff means N/A. */
199 uint32_t vce_enc_usage;
200 /* guest vce engine health percentage. 0xffff means N/A. */
201 uint32_t vce_enc_health;
202 /* guest uvd engine usage percentage. 0xffff means N/A. */
203 uint32_t uvd_enc_usage;
204 /* guest uvd engine usage percentage. 0xffff means N/A. */
205 uint32_t uvd_enc_health;
206 uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amdgim_vf2pf_info_header)/sizeof(uint32_t)), 0)];
207} __aligned(4);
208
209#define AMDGPU_FW_VRAM_VF2PF_VER 2
210typedef struct amdgim_vf2pf_info_v2 amdgim_vf2pf_info ;
211
212#define AMDGPU_FW_VRAM_VF2PF_WRITE(adev, field, val) \
213 do { \
214 ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field = (val); \
215 } while (0)
216
217#define AMDGPU_FW_VRAM_VF2PF_READ(adev, field, val) \
218 do { \
219 (*val) = ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field; \
220 } while (0)
221
222#define AMDGPU_FW_VRAM_PF2VF_READ(adev, field, val) \
223 do { \
224 if (!adev->virt.fw_reserve.p_pf2vf) \
225 *(val) = 0; \
226 else { \
227 if (adev->virt.fw_reserve.p_pf2vf->version == 1) \
228 *(val) = ((struct amdgim_pf2vf_info_v1 *)adev->virt.fw_reserve.p_pf2vf)->field; \
229 if (adev->virt.fw_reserve.p_pf2vf->version == 2) \
230 *(val) = ((struct amdgim_pf2vf_info_v2 *)adev->virt.fw_reserve.p_pf2vf)->field; \
231 } \
232 } while (0)
233
61/* GPU virtualization */ 234/* GPU virtualization */
62struct amdgpu_virt { 235struct amdgpu_virt {
63 uint32_t caps; 236 uint32_t caps;
@@ -72,6 +245,7 @@ struct amdgpu_virt {
72 struct amdgpu_mm_table mm_table; 245 struct amdgpu_mm_table mm_table;
73 const struct amdgpu_virt_ops *ops; 246 const struct amdgpu_virt_ops *ops;
74 struct amdgpu_vf_error_buffer vf_errors; 247 struct amdgpu_vf_error_buffer vf_errors;
248 struct amdgpu_virt_fw_reserve fw_reserve;
75}; 249};
76 250
77#define AMDGPU_CSA_SIZE (8 * 1024) 251#define AMDGPU_CSA_SIZE (8 * 1024)
@@ -114,5 +288,9 @@ int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
114int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job); 288int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job);
115int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev); 289int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
116void amdgpu_virt_free_mm_table(struct amdgpu_device *adev); 290void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
291int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
292 unsigned int key,
293 unsigned int chksum);
294void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
117 295
118#endif 296#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index fee0a32ac56f..010d14195a5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -328,9 +328,10 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
328 AMDGPU_GEM_CREATE_SHADOW); 328 AMDGPU_GEM_CREATE_SHADOW);
329 329
330 if (vm->pte_support_ats) { 330 if (vm->pte_support_ats) {
331 init_value = AMDGPU_PTE_SYSTEM; 331 init_value = AMDGPU_PTE_DEFAULT_ATC;
332 if (level != adev->vm_manager.num_level - 1) 332 if (level != adev->vm_manager.num_level - 1)
333 init_value |= AMDGPU_PDE_PTE; 333 init_value |= AMDGPU_PDE_PTE;
334
334 } 335 }
335 336
336 /* walk over the address space and allocate the page tables */ 337 /* walk over the address space and allocate the page tables */
@@ -1034,7 +1035,7 @@ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1034 int r; 1035 int r;
1035 1036
1036 amdgpu_sync_create(&sync); 1037 amdgpu_sync_create(&sync);
1037 amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner); 1038 amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
1038 r = amdgpu_sync_wait(&sync, true); 1039 r = amdgpu_sync_wait(&sync, true);
1039 amdgpu_sync_free(&sync); 1040 amdgpu_sync_free(&sync);
1040 1041
@@ -1175,11 +1176,11 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
1175 amdgpu_ring_pad_ib(ring, params.ib); 1176 amdgpu_ring_pad_ib(ring, params.ib);
1176 amdgpu_sync_resv(adev, &job->sync, 1177 amdgpu_sync_resv(adev, &job->sync,
1177 parent->base.bo->tbo.resv, 1178 parent->base.bo->tbo.resv,
1178 AMDGPU_FENCE_OWNER_VM); 1179 AMDGPU_FENCE_OWNER_VM, false);
1179 if (shadow) 1180 if (shadow)
1180 amdgpu_sync_resv(adev, &job->sync, 1181 amdgpu_sync_resv(adev, &job->sync,
1181 shadow->tbo.resv, 1182 shadow->tbo.resv,
1182 AMDGPU_FENCE_OWNER_VM); 1183 AMDGPU_FENCE_OWNER_VM, false);
1183 1184
1184 WARN_ON(params.ib->length_dw > ndw); 1185 WARN_ON(params.ib->length_dw > ndw);
1185 r = amdgpu_job_submit(job, ring, &vm->entity, 1186 r = amdgpu_job_submit(job, ring, &vm->entity,
@@ -1643,7 +1644,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1643 goto error_free; 1644 goto error_free;
1644 1645
1645 r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv, 1646 r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
1646 owner); 1647 owner, false);
1647 if (r) 1648 if (r)
1648 goto error_free; 1649 goto error_free;
1649 1650
@@ -1698,6 +1699,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1698 struct drm_mm_node *nodes, 1699 struct drm_mm_node *nodes,
1699 struct dma_fence **fence) 1700 struct dma_fence **fence)
1700{ 1701{
1702 unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1701 uint64_t pfn, start = mapping->start; 1703 uint64_t pfn, start = mapping->start;
1702 int r; 1704 int r;
1703 1705
@@ -1732,6 +1734,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1732 } 1734 }
1733 1735
1734 do { 1736 do {
1737 dma_addr_t *dma_addr = NULL;
1735 uint64_t max_entries; 1738 uint64_t max_entries;
1736 uint64_t addr, last; 1739 uint64_t addr, last;
1737 1740
@@ -1745,15 +1748,32 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1745 } 1748 }
1746 1749
1747 if (pages_addr) { 1750 if (pages_addr) {
1751 uint64_t count;
1752
1748 max_entries = min(max_entries, 16ull * 1024ull); 1753 max_entries = min(max_entries, 16ull * 1024ull);
1749 addr = 0; 1754 for (count = 1; count < max_entries; ++count) {
1755 uint64_t idx = pfn + count;
1756
1757 if (pages_addr[idx] !=
1758 (pages_addr[idx - 1] + PAGE_SIZE))
1759 break;
1760 }
1761
1762 if (count < min_linear_pages) {
1763 addr = pfn << PAGE_SHIFT;
1764 dma_addr = pages_addr;
1765 } else {
1766 addr = pages_addr[pfn];
1767 max_entries = count;
1768 }
1769
1750 } else if (flags & AMDGPU_PTE_VALID) { 1770 } else if (flags & AMDGPU_PTE_VALID) {
1751 addr += adev->vm_manager.vram_base_offset; 1771 addr += adev->vm_manager.vram_base_offset;
1772 addr += pfn << PAGE_SHIFT;
1752 } 1773 }
1753 addr += pfn << PAGE_SHIFT;
1754 1774
1755 last = min((uint64_t)mapping->last, start + max_entries - 1); 1775 last = min((uint64_t)mapping->last, start + max_entries - 1);
1756 r = amdgpu_vm_bo_update_mapping(adev, exclusive, pages_addr, vm, 1776 r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
1757 start, last, flags, addr, 1777 start, last, flags, addr,
1758 fence); 1778 fence);
1759 if (r) 1779 if (r)
@@ -2017,7 +2037,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
2017 list_del(&mapping->list); 2037 list_del(&mapping->list);
2018 2038
2019 if (vm->pte_support_ats) 2039 if (vm->pte_support_ats)
2020 init_pte_value = AMDGPU_PTE_SYSTEM; 2040 init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
2021 2041
2022 r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm, 2042 r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
2023 mapping->start, mapping->last, 2043 mapping->start, mapping->last,
@@ -2629,7 +2649,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2629 2649
2630 if (adev->asic_type == CHIP_RAVEN) { 2650 if (adev->asic_type == CHIP_RAVEN) {
2631 vm->pte_support_ats = true; 2651 vm->pte_support_ats = true;
2632 init_pde_value = AMDGPU_PTE_SYSTEM | AMDGPU_PDE_PTE; 2652 init_pde_value = AMDGPU_PTE_DEFAULT_ATC
2653 | AMDGPU_PDE_PTE;
2654
2633 } 2655 }
2634 } else 2656 } else
2635 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & 2657 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
@@ -2737,8 +2759,9 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2737{ 2759{
2738 struct amdgpu_bo_va_mapping *mapping, *tmp; 2760 struct amdgpu_bo_va_mapping *mapping, *tmp;
2739 bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt; 2761 bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
2762 struct amdgpu_bo *root;
2740 u64 fault; 2763 u64 fault;
2741 int i; 2764 int i, r;
2742 2765
2743 /* Clear pending page faults from IH when the VM is destroyed */ 2766 /* Clear pending page faults from IH when the VM is destroyed */
2744 while (kfifo_get(&vm->faults, &fault)) 2767 while (kfifo_get(&vm->faults, &fault))
@@ -2773,7 +2796,15 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2773 amdgpu_vm_free_mapping(adev, vm, mapping, NULL); 2796 amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2774 } 2797 }
2775 2798
2776 amdgpu_vm_free_levels(&vm->root); 2799 root = amdgpu_bo_ref(vm->root.base.bo);
2800 r = amdgpu_bo_reserve(root, true);
2801 if (r) {
2802 dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
2803 } else {
2804 amdgpu_vm_free_levels(&vm->root);
2805 amdgpu_bo_unreserve(root);
2806 }
2807 amdgpu_bo_unref(&root);
2777 dma_fence_put(vm->last_update); 2808 dma_fence_put(vm->last_update);
2778 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) 2809 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2779 amdgpu_vm_free_reserved_vmid(adev, vm, i); 2810 amdgpu_vm_free_reserved_vmid(adev, vm, i);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index d68f39b4e5e7..aa914256b4bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -73,6 +73,16 @@ struct amdgpu_bo_list_entry;
73#define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57) 73#define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57)
74#define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL) 74#define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL)
75 75
76/* For Raven */
77#define AMDGPU_MTYPE_CC 2
78
79#define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \
80 | AMDGPU_PTE_SNOOPED \
81 | AMDGPU_PTE_EXECUTABLE \
82 | AMDGPU_PTE_READABLE \
83 | AMDGPU_PTE_WRITEABLE \
84 | AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_CC))
85
76/* How to programm VM fault handling */ 86/* How to programm VM fault handling */
77#define AMDGPU_VM_FAULT_STOP_NEVER 0 87#define AMDGPU_VM_FAULT_STOP_NEVER 0
78#define AMDGPU_VM_FAULT_STOP_FIRST 1 88#define AMDGPU_VM_FAULT_STOP_FIRST 1
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 147e92b3a959..b8002ac3e536 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -20,6 +20,7 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 */ 22 */
23#include <linux/kernel.h>
23#include <linux/firmware.h> 24#include <linux/firmware.h>
24#include <drm/drmP.h> 25#include <drm/drmP.h>
25#include "amdgpu.h" 26#include "amdgpu.h"
@@ -3952,10 +3953,10 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
3952 adev->gfx.rlc.reg_list_format_size_bytes >> 2, 3953 adev->gfx.rlc.reg_list_format_size_bytes >> 2,
3953 unique_indices, 3954 unique_indices,
3954 &indices_count, 3955 &indices_count,
3955 sizeof(unique_indices) / sizeof(int), 3956 ARRAY_SIZE(unique_indices),
3956 indirect_start_offsets, 3957 indirect_start_offsets,
3957 &offset_count, 3958 &offset_count,
3958 sizeof(indirect_start_offsets)/sizeof(int)); 3959 ARRAY_SIZE(indirect_start_offsets));
3959 3960
3960 /* save and restore list */ 3961 /* save and restore list */
3961 WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1); 3962 WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1);
@@ -3977,14 +3978,14 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
3977 /* starting offsets starts */ 3978 /* starting offsets starts */
3978 WREG32(mmRLC_GPM_SCRATCH_ADDR, 3979 WREG32(mmRLC_GPM_SCRATCH_ADDR,
3979 adev->gfx.rlc.starting_offsets_start); 3980 adev->gfx.rlc.starting_offsets_start);
3980 for (i = 0; i < sizeof(indirect_start_offsets)/sizeof(int); i++) 3981 for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
3981 WREG32(mmRLC_GPM_SCRATCH_DATA, 3982 WREG32(mmRLC_GPM_SCRATCH_DATA,
3982 indirect_start_offsets[i]); 3983 indirect_start_offsets[i]);
3983 3984
3984 /* unique indices */ 3985 /* unique indices */
3985 temp = mmRLC_SRM_INDEX_CNTL_ADDR_0; 3986 temp = mmRLC_SRM_INDEX_CNTL_ADDR_0;
3986 data = mmRLC_SRM_INDEX_CNTL_DATA_0; 3987 data = mmRLC_SRM_INDEX_CNTL_DATA_0;
3987 for (i = 0; i < sizeof(unique_indices) / sizeof(int); i++) { 3988 for (i = 0; i < ARRAY_SIZE(unique_indices); i++) {
3988 if (unique_indices[i] != 0) { 3989 if (unique_indices[i] != 0) {
3989 WREG32(temp + i, unique_indices[i] & 0x3FFFF); 3990 WREG32(temp + i, unique_indices[i] & 0x3FFFF);
3990 WREG32(data + i, unique_indices[i] >> 20); 3991 WREG32(data + i, unique_indices[i] >> 20);
@@ -6394,6 +6395,104 @@ static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
6394 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 6395 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
6395} 6396}
6396 6397
6398static void gfx_v8_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
6399 bool acquire)
6400{
6401 struct amdgpu_device *adev = ring->adev;
6402 int pipe_num, tmp, reg;
6403 int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
6404
6405 pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
6406
6407 /* first me only has 2 entries, GFX and HP3D */
6408 if (ring->me > 0)
6409 pipe_num -= 2;
6410
6411 reg = mmSPI_WCL_PIPE_PERCENT_GFX + pipe_num;
6412 tmp = RREG32(reg);
6413 tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
6414 WREG32(reg, tmp);
6415}
6416
6417static void gfx_v8_0_pipe_reserve_resources(struct amdgpu_device *adev,
6418 struct amdgpu_ring *ring,
6419 bool acquire)
6420{
6421 int i, pipe;
6422 bool reserve;
6423 struct amdgpu_ring *iring;
6424
6425 mutex_lock(&adev->gfx.pipe_reserve_mutex);
6426 pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0);
6427 if (acquire)
6428 set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
6429 else
6430 clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
6431
6432 if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
6433 /* Clear all reservations - everyone reacquires all resources */
6434 for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
6435 gfx_v8_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
6436 true);
6437
6438 for (i = 0; i < adev->gfx.num_compute_rings; ++i)
6439 gfx_v8_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
6440 true);
6441 } else {
6442 /* Lower all pipes without a current reservation */
6443 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
6444 iring = &adev->gfx.gfx_ring[i];
6445 pipe = amdgpu_gfx_queue_to_bit(adev,
6446 iring->me,
6447 iring->pipe,
6448 0);
6449 reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
6450 gfx_v8_0_ring_set_pipe_percent(iring, reserve);
6451 }
6452
6453 for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
6454 iring = &adev->gfx.compute_ring[i];
6455 pipe = amdgpu_gfx_queue_to_bit(adev,
6456 iring->me,
6457 iring->pipe,
6458 0);
6459 reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
6460 gfx_v8_0_ring_set_pipe_percent(iring, reserve);
6461 }
6462 }
6463
6464 mutex_unlock(&adev->gfx.pipe_reserve_mutex);
6465}
6466
6467static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev,
6468 struct amdgpu_ring *ring,
6469 bool acquire)
6470{
6471 uint32_t pipe_priority = acquire ? 0x2 : 0x0;
6472 uint32_t queue_priority = acquire ? 0xf : 0x0;
6473
6474 mutex_lock(&adev->srbm_mutex);
6475 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
6476
6477 WREG32(mmCP_HQD_PIPE_PRIORITY, pipe_priority);
6478 WREG32(mmCP_HQD_QUEUE_PRIORITY, queue_priority);
6479
6480 vi_srbm_select(adev, 0, 0, 0, 0);
6481 mutex_unlock(&adev->srbm_mutex);
6482}
6483static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring,
6484 enum amd_sched_priority priority)
6485{
6486 struct amdgpu_device *adev = ring->adev;
6487 bool acquire = priority == AMD_SCHED_PRIORITY_HIGH_HW;
6488
6489 if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
6490 return;
6491
6492 gfx_v8_0_hqd_set_priority(adev, ring, acquire);
6493 gfx_v8_0_pipe_reserve_resources(adev, ring, acquire);
6494}
6495
6397static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring, 6496static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
6398 u64 addr, u64 seq, 6497 u64 addr, u64 seq,
6399 unsigned flags) 6498 unsigned flags)
@@ -6839,6 +6938,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
6839 .test_ib = gfx_v8_0_ring_test_ib, 6938 .test_ib = gfx_v8_0_ring_test_ib,
6840 .insert_nop = amdgpu_ring_insert_nop, 6939 .insert_nop = amdgpu_ring_insert_nop,
6841 .pad_ib = amdgpu_ring_generic_pad_ib, 6940 .pad_ib = amdgpu_ring_generic_pad_ib,
6941 .set_priority = gfx_v8_0_ring_set_priority_compute,
6842}; 6942};
6843 6943
6844static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = { 6944static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 99a5b3b92e8e..7f15bb2c5233 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -20,6 +20,7 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 */ 22 */
23#include <linux/kernel.h>
23#include <linux/firmware.h> 24#include <linux/firmware.h>
24#include <drm/drmP.h> 25#include <drm/drmP.h>
25#include "amdgpu.h" 26#include "amdgpu.h"
@@ -1730,10 +1731,10 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
1730 adev->gfx.rlc.reg_list_format_size_bytes >> 2, 1731 adev->gfx.rlc.reg_list_format_size_bytes >> 2,
1731 unique_indirect_regs, 1732 unique_indirect_regs,
1732 &unique_indirect_reg_count, 1733 &unique_indirect_reg_count,
1733 sizeof(unique_indirect_regs)/sizeof(int), 1734 ARRAY_SIZE(unique_indirect_regs),
1734 indirect_start_offsets, 1735 indirect_start_offsets,
1735 &indirect_start_offsets_count, 1736 &indirect_start_offsets_count,
1736 sizeof(indirect_start_offsets)/sizeof(int)); 1737 ARRAY_SIZE(indirect_start_offsets));
1737 1738
1738 /* enable auto inc in case it is disabled */ 1739 /* enable auto inc in case it is disabled */
1739 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL)); 1740 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
@@ -1770,12 +1771,12 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
1770 /* write the starting offsets to RLC scratch ram */ 1771 /* write the starting offsets to RLC scratch ram */
1771 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR), 1772 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1772 adev->gfx.rlc.starting_offsets_start); 1773 adev->gfx.rlc.starting_offsets_start);
1773 for (i = 0; i < sizeof(indirect_start_offsets)/sizeof(int); i++) 1774 for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
1774 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), 1775 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
1775 indirect_start_offsets[i]); 1776 indirect_start_offsets[i]);
1776 1777
1777 /* load unique indirect regs*/ 1778 /* load unique indirect regs*/
1778 for (i = 0; i < sizeof(unique_indirect_regs)/sizeof(int); i++) { 1779 for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
1779 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0) + i, 1780 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0) + i,
1780 unique_indirect_regs[i] & 0x3FFFF); 1781 unique_indirect_regs[i] & 0x3FFFF);
1781 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0) + i, 1782 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0) + i,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 2812d88a8bdd..b4906d2f30d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -183,6 +183,12 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
183 pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n"); 183 pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
184 return r; 184 return r;
185 } 185 }
186 /* Retrieve checksum from mailbox2 */
187 if (req == IDH_REQ_GPU_INIT_ACCESS) {
188 adev->virt.fw_reserve.checksum_key =
189 RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
190 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
191 }
186 } 192 }
187 193
188 return 0; 194 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 1c006ba9d826..3ca9d114f630 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -279,10 +279,7 @@ static void soc15_init_golden_registers(struct amdgpu_device *adev)
279} 279}
280static u32 soc15_get_xclk(struct amdgpu_device *adev) 280static u32 soc15_get_xclk(struct amdgpu_device *adev)
281{ 281{
282 if (adev->asic_type == CHIP_VEGA10) 282 return adev->clock.spll.reference_freq;
283 return adev->clock.spll.reference_freq/4;
284 else
285 return adev->clock.spll.reference_freq;
286} 283}
287 284
288 285
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 60af7310a234..71299c67c517 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -268,8 +268,9 @@ err:
268 * 268 *
269 * Close up a stream for HW test or if userspace failed to do so 269 * Close up a stream for HW test or if userspace failed to do so
270 */ 270 */
271int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 271static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
272 bool direct, struct dma_fence **fence) 272 uint32_t handle,
273 bool direct, struct dma_fence **fence)
273{ 274{
274 const unsigned ib_size_dw = 16; 275 const unsigned ib_size_dw = 16;
275 struct amdgpu_job *job; 276 struct amdgpu_job *job;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 189f3b54a385..ad1f6b57884b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -961,18 +961,13 @@ static void cz_clear_voting_clients(struct pp_hwmgr *hwmgr)
961 961
962static int cz_start_dpm(struct pp_hwmgr *hwmgr) 962static int cz_start_dpm(struct pp_hwmgr *hwmgr)
963{ 963{
964 int ret = 0;
965 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 964 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
966 unsigned long dpm_features = 0;
967 965
968 cz_hwmgr->dpm_flags |= DPMFlags_SCLK_Enabled; 966 cz_hwmgr->dpm_flags |= DPMFlags_SCLK_Enabled;
969 dpm_features |= SCLK_DPM_MASK;
970 967
971 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 968 return smum_send_msg_to_smc_with_parameter(hwmgr,
972 PPSMC_MSG_EnableAllSmuFeatures, 969 PPSMC_MSG_EnableAllSmuFeatures,
973 dpm_features); 970 SCLK_DPM_MASK);
974
975 return ret;
976} 971}
977 972
978static int cz_stop_dpm(struct pp_hwmgr *hwmgr) 973static int cz_stop_dpm(struct pp_hwmgr *hwmgr)
@@ -1279,27 +1274,18 @@ static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1279 1274
1280int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr) 1275int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
1281{ 1276{
1282 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1277 if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
1283 PHM_PlatformCaps_UVDPowerGating)) 1278 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
1284 return smum_send_msg_to_smc(hwmgr,
1285 PPSMC_MSG_UVDPowerOFF);
1286 return 0; 1279 return 0;
1287} 1280}
1288 1281
1289int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr) 1282int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
1290{ 1283{
1291 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1284 if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
1292 PHM_PlatformCaps_UVDPowerGating)) { 1285 return smum_send_msg_to_smc_with_parameter(
1293 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1286 hwmgr,
1294 PHM_PlatformCaps_UVDDynamicPowerGating)) { 1287 PPSMC_MSG_UVDPowerON,
1295 return smum_send_msg_to_smc_with_parameter( 1288 PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0);
1296 hwmgr,
1297 PPSMC_MSG_UVDPowerON, 1);
1298 } else {
1299 return smum_send_msg_to_smc_with_parameter(
1300 hwmgr,
1301 PPSMC_MSG_UVDPowerON, 0);
1302 }
1303 } 1289 }
1304 1290
1305 return 0; 1291 return 0;
@@ -1313,17 +1299,16 @@ int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
1313 1299
1314 if (!bgate) { 1300 if (!bgate) {
1315 /* Stable Pstate is enabled and we need to set the UVD DPM to highest level */ 1301 /* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
1316 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1302 if (PP_CAP(PHM_PlatformCaps_StablePState) ||
1317 PHM_PlatformCaps_StablePState) 1303 hwmgr->en_umd_pstate) {
1318 || hwmgr->en_umd_pstate) {
1319 cz_hwmgr->uvd_dpm.hard_min_clk = 1304 cz_hwmgr->uvd_dpm.hard_min_clk =
1320 ptable->entries[ptable->count - 1].vclk; 1305 ptable->entries[ptable->count - 1].vclk;
1321 1306
1322 smum_send_msg_to_smc_with_parameter(hwmgr, 1307 smum_send_msg_to_smc_with_parameter(hwmgr,
1323 PPSMC_MSG_SetUvdHardMin, 1308 PPSMC_MSG_SetUvdHardMin,
1324 cz_get_uvd_level(hwmgr, 1309 cz_get_uvd_level(hwmgr,
1325 cz_hwmgr->uvd_dpm.hard_min_clk, 1310 cz_hwmgr->uvd_dpm.hard_min_clk,
1326 PPSMC_MSG_SetUvdHardMin)); 1311 PPSMC_MSG_SetUvdHardMin));
1327 1312
1328 cz_enable_disable_uvd_dpm(hwmgr, true); 1313 cz_enable_disable_uvd_dpm(hwmgr, true);
1329 } else { 1314 } else {
@@ -1343,17 +1328,16 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
1343 hwmgr->dyn_state.vce_clock_voltage_dependency_table; 1328 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1344 1329
1345 /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */ 1330 /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
1346 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1331 if (PP_CAP(PHM_PlatformCaps_StablePState) ||
1347 PHM_PlatformCaps_StablePState) 1332 hwmgr->en_umd_pstate) {
1348 || hwmgr->en_umd_pstate) {
1349 cz_hwmgr->vce_dpm.hard_min_clk = 1333 cz_hwmgr->vce_dpm.hard_min_clk =
1350 ptable->entries[ptable->count - 1].ecclk; 1334 ptable->entries[ptable->count - 1].ecclk;
1351 1335
1352 smum_send_msg_to_smc_with_parameter(hwmgr, 1336 smum_send_msg_to_smc_with_parameter(hwmgr,
1353 PPSMC_MSG_SetEclkHardMin, 1337 PPSMC_MSG_SetEclkHardMin,
1354 cz_get_eclk_level(hwmgr, 1338 cz_get_eclk_level(hwmgr,
1355 cz_hwmgr->vce_dpm.hard_min_clk, 1339 cz_hwmgr->vce_dpm.hard_min_clk,
1356 PPSMC_MSG_SetEclkHardMin)); 1340 PPSMC_MSG_SetEclkHardMin));
1357 } else { 1341 } else {
1358 /*Program HardMin based on the vce_arbiter.ecclk */ 1342 /*Program HardMin based on the vce_arbiter.ecclk */
1359 if (hwmgr->vce_arbiter.ecclk == 0) { 1343 if (hwmgr->vce_arbiter.ecclk == 0) {
@@ -1366,10 +1350,10 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
1366 } else { 1350 } else {
1367 cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; 1351 cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
1368 smum_send_msg_to_smc_with_parameter(hwmgr, 1352 smum_send_msg_to_smc_with_parameter(hwmgr,
1369 PPSMC_MSG_SetEclkHardMin, 1353 PPSMC_MSG_SetEclkHardMin,
1370 cz_get_eclk_level(hwmgr, 1354 cz_get_eclk_level(hwmgr,
1371 cz_hwmgr->vce_dpm.hard_min_clk, 1355 cz_hwmgr->vce_dpm.hard_min_clk,
1372 PPSMC_MSG_SetEclkHardMin)); 1356 PPSMC_MSG_SetEclkHardMin));
1373 } 1357 }
1374 } 1358 }
1375 return 0; 1359 return 0;
@@ -1377,8 +1361,7 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
1377 1361
1378int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr) 1362int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
1379{ 1363{
1380 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1364 if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
1381 PHM_PlatformCaps_VCEPowerGating))
1382 return smum_send_msg_to_smc(hwmgr, 1365 return smum_send_msg_to_smc(hwmgr,
1383 PPSMC_MSG_VCEPowerOFF); 1366 PPSMC_MSG_VCEPowerOFF);
1384 return 0; 1367 return 0;
@@ -1386,8 +1369,7 @@ int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
1386 1369
1387int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr) 1370int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
1388{ 1371{
1389 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1372 if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
1390 PHM_PlatformCaps_VCEPowerGating))
1391 return smum_send_msg_to_smc(hwmgr, 1373 return smum_send_msg_to_smc(hwmgr,
1392 PPSMC_MSG_VCEPowerON); 1374 PPSMC_MSG_VCEPowerON);
1393 return 0; 1375 return 0;
@@ -1871,6 +1853,33 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1871 } 1853 }
1872} 1854}
1873 1855
1856static int cz_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
1857 uint32_t virtual_addr_low,
1858 uint32_t virtual_addr_hi,
1859 uint32_t mc_addr_low,
1860 uint32_t mc_addr_hi,
1861 uint32_t size)
1862{
1863 smum_send_msg_to_smc_with_parameter(hwmgr,
1864 PPSMC_MSG_DramAddrHiVirtual,
1865 mc_addr_hi);
1866 smum_send_msg_to_smc_with_parameter(hwmgr,
1867 PPSMC_MSG_DramAddrLoVirtual,
1868 mc_addr_low);
1869 smum_send_msg_to_smc_with_parameter(hwmgr,
1870 PPSMC_MSG_DramAddrHiPhysical,
1871 virtual_addr_hi);
1872 smum_send_msg_to_smc_with_parameter(hwmgr,
1873 PPSMC_MSG_DramAddrLoPhysical,
1874 virtual_addr_low);
1875
1876 smum_send_msg_to_smc_with_parameter(hwmgr,
1877 PPSMC_MSG_DramBufferSize,
1878 size);
1879 return 0;
1880}
1881
1882
1874static const struct pp_hwmgr_func cz_hwmgr_funcs = { 1883static const struct pp_hwmgr_func cz_hwmgr_funcs = {
1875 .backend_init = cz_hwmgr_backend_init, 1884 .backend_init = cz_hwmgr_backend_init,
1876 .backend_fini = cz_hwmgr_backend_fini, 1885 .backend_fini = cz_hwmgr_backend_fini,
@@ -1894,12 +1903,14 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
1894 .get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks, 1903 .get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks,
1895 .get_clock_by_type = cz_get_clock_by_type, 1904 .get_clock_by_type = cz_get_clock_by_type,
1896 .get_max_high_clocks = cz_get_max_high_clocks, 1905 .get_max_high_clocks = cz_get_max_high_clocks,
1906 .get_temperature = cz_thermal_get_temperature,
1897 .read_sensor = cz_read_sensor, 1907 .read_sensor = cz_read_sensor,
1898 .power_off_asic = cz_power_off_asic, 1908 .power_off_asic = cz_power_off_asic,
1899 .asic_setup = cz_setup_asic_task, 1909 .asic_setup = cz_setup_asic_task,
1900 .dynamic_state_management_enable = cz_enable_dpm_tasks, 1910 .dynamic_state_management_enable = cz_enable_dpm_tasks,
1901 .power_state_set = cz_set_power_state_tasks, 1911 .power_state_set = cz_set_power_state_tasks,
1902 .dynamic_state_management_disable = cz_disable_dpm_tasks, 1912 .dynamic_state_management_disable = cz_disable_dpm_tasks,
1913 .notify_cac_buffer_info = cz_notify_cac_buffer_info,
1903}; 1914};
1904 1915
1905int cz_init_function_pointers(struct pp_hwmgr *hwmgr) 1916int cz_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 4826b2991b7e..e32f18a99074 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -4645,6 +4645,47 @@ static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
4645 return 0; 4645 return 0;
4646} 4646}
4647 4647
4648static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
4649 uint32_t virtual_addr_low,
4650 uint32_t virtual_addr_hi,
4651 uint32_t mc_addr_low,
4652 uint32_t mc_addr_hi,
4653 uint32_t size)
4654{
4655 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4656
4657 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4658 data->soft_regs_start +
4659 smum_get_offsetof(hwmgr,
4660 SMU_SoftRegisters, DRAM_LOG_ADDR_H),
4661 mc_addr_hi);
4662
4663 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4664 data->soft_regs_start +
4665 smum_get_offsetof(hwmgr,
4666 SMU_SoftRegisters, DRAM_LOG_ADDR_L),
4667 mc_addr_low);
4668
4669 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4670 data->soft_regs_start +
4671 smum_get_offsetof(hwmgr,
4672 SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H),
4673 virtual_addr_hi);
4674
4675 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4676 data->soft_regs_start +
4677 smum_get_offsetof(hwmgr,
4678 SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L),
4679 virtual_addr_low);
4680
4681 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4682 data->soft_regs_start +
4683 smum_get_offsetof(hwmgr,
4684 SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE),
4685 size);
4686 return 0;
4687}
4688
4648static const struct pp_hwmgr_func smu7_hwmgr_funcs = { 4689static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
4649 .backend_init = &smu7_hwmgr_backend_init, 4690 .backend_init = &smu7_hwmgr_backend_init,
4650 .backend_fini = &smu7_hwmgr_backend_fini, 4691 .backend_fini = &smu7_hwmgr_backend_fini,
@@ -4696,6 +4737,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
4696 .avfs_control = smu7_avfs_control, 4737 .avfs_control = smu7_avfs_control,
4697 .disable_smc_firmware_ctf = smu7_thermal_disable_alert, 4738 .disable_smc_firmware_ctf = smu7_thermal_disable_alert,
4698 .start_thermal_controller = smu7_start_thermal_controller, 4739 .start_thermal_controller = smu7_start_thermal_controller,
4740 .notify_cac_buffer_info = smu7_notify_cac_buffer_info,
4699}; 4741};
4700 4742
4701uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, 4743uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 48de45ec0eaf..0519338e0e5e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -1161,6 +1161,8 @@ static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr,
1161{ 1161{
1162 int i; 1162 int i;
1163 1163
1164 dpm_table->count = 0;
1165
1164 for (i = 0; i < dep_table->count; i++) { 1166 for (i = 0; i < dep_table->count; i++) {
1165 if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value <= 1167 if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value <=
1166 dep_table->entries[i].clk) { 1168 dep_table->entries[i].clk) {
@@ -1269,10 +1271,6 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1269 return -EINVAL); 1271 return -EINVAL);
1270 1272
1271 /* Initialize Sclk DPM table based on allow Sclk values */ 1273 /* Initialize Sclk DPM table based on allow Sclk values */
1272 data->dpm_table.soc_table.count = 0;
1273 data->dpm_table.gfx_table.count = 0;
1274 data->dpm_table.dcef_table.count = 0;
1275
1276 dpm_table = &(data->dpm_table.soc_table); 1274 dpm_table = &(data->dpm_table.soc_table);
1277 vega10_setup_default_single_dpm_table(hwmgr, 1275 vega10_setup_default_single_dpm_table(hwmgr,
1278 dpm_table, 1276 dpm_table,
@@ -4994,6 +4992,33 @@ static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4994 return 0; 4992 return 0;
4995} 4993}
4996 4994
4995static int vega10_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
4996 uint32_t virtual_addr_low,
4997 uint32_t virtual_addr_hi,
4998 uint32_t mc_addr_low,
4999 uint32_t mc_addr_hi,
5000 uint32_t size)
5001{
5002 smum_send_msg_to_smc_with_parameter(hwmgr,
5003 PPSMC_MSG_SetSystemVirtualDramAddrHigh,
5004 virtual_addr_hi);
5005 smum_send_msg_to_smc_with_parameter(hwmgr,
5006 PPSMC_MSG_SetSystemVirtualDramAddrLow,
5007 virtual_addr_low);
5008 smum_send_msg_to_smc_with_parameter(hwmgr,
5009 PPSMC_MSG_DramLogSetDramAddrHigh,
5010 mc_addr_hi);
5011
5012 smum_send_msg_to_smc_with_parameter(hwmgr,
5013 PPSMC_MSG_DramLogSetDramAddrLow,
5014 mc_addr_low);
5015
5016 smum_send_msg_to_smc_with_parameter(hwmgr,
5017 PPSMC_MSG_DramLogSetDramSize,
5018 size);
5019 return 0;
5020}
5021
4997static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr, 5022static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr,
4998 const void *info) 5023 const void *info)
4999{ 5024{
@@ -5079,7 +5104,9 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
5079 .get_mclk_od = vega10_get_mclk_od, 5104 .get_mclk_od = vega10_get_mclk_od,
5080 .set_mclk_od = vega10_set_mclk_od, 5105 .set_mclk_od = vega10_set_mclk_od,
5081 .avfs_control = vega10_avfs_enable, 5106 .avfs_control = vega10_avfs_enable,
5107 .notify_cac_buffer_info = vega10_notify_cac_buffer_info,
5082 .register_internal_thermal_interrupt = vega10_register_thermal_interrupt, 5108 .register_internal_thermal_interrupt = vega10_register_thermal_interrupt,
5109 .start_thermal_controller = vega10_start_thermal_controller,
5083}; 5110};
5084 5111
5085int vega10_hwmgr_init(struct pp_hwmgr *hwmgr) 5112int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index e343df190375..f14c7611fad3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -291,8 +291,7 @@ static int get_mm_clock_voltage_table(
291 table_size = sizeof(uint32_t) + 291 table_size = sizeof(uint32_t) +
292 sizeof(phm_ppt_v1_mm_clock_voltage_dependency_record) * 292 sizeof(phm_ppt_v1_mm_clock_voltage_dependency_record) *
293 mm_dependency_table->ucNumEntries; 293 mm_dependency_table->ucNumEntries;
294 mm_table = (phm_ppt_v1_mm_clock_voltage_dependency_table *) 294 mm_table = kzalloc(table_size, GFP_KERNEL);
295 kzalloc(table_size, GFP_KERNEL);
296 295
297 if (!mm_table) 296 if (!mm_table)
298 return -ENOMEM; 297 return -ENOMEM;
@@ -519,8 +518,7 @@ static int get_socclk_voltage_dependency_table(
519 sizeof(phm_ppt_v1_clock_voltage_dependency_record) * 518 sizeof(phm_ppt_v1_clock_voltage_dependency_record) *
520 clk_dep_table->ucNumEntries; 519 clk_dep_table->ucNumEntries;
521 520
522 clk_table = (phm_ppt_v1_clock_voltage_dependency_table *) 521 clk_table = kzalloc(table_size, GFP_KERNEL);
523 kzalloc(table_size, GFP_KERNEL);
524 522
525 if (!clk_table) 523 if (!clk_table)
526 return -ENOMEM; 524 return -ENOMEM;
@@ -554,8 +552,7 @@ static int get_mclk_voltage_dependency_table(
554 sizeof(phm_ppt_v1_clock_voltage_dependency_record) * 552 sizeof(phm_ppt_v1_clock_voltage_dependency_record) *
555 mclk_dep_table->ucNumEntries; 553 mclk_dep_table->ucNumEntries;
556 554
557 mclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) 555 mclk_table = kzalloc(table_size, GFP_KERNEL);
558 kzalloc(table_size, GFP_KERNEL);
559 556
560 if (!mclk_table) 557 if (!mclk_table)
561 return -ENOMEM; 558 return -ENOMEM;
@@ -596,8 +593,7 @@ static int get_gfxclk_voltage_dependency_table(
596 sizeof(phm_ppt_v1_clock_voltage_dependency_record) * 593 sizeof(phm_ppt_v1_clock_voltage_dependency_record) *
597 clk_dep_table->ucNumEntries; 594 clk_dep_table->ucNumEntries;
598 595
599 clk_table = (struct phm_ppt_v1_clock_voltage_dependency_table *) 596 clk_table = kzalloc(table_size, GFP_KERNEL);
600 kzalloc(table_size, GFP_KERNEL);
601 597
602 if (!clk_table) 598 if (!clk_table)
603 return -ENOMEM; 599 return -ENOMEM;
@@ -663,8 +659,7 @@ static int get_pix_clk_voltage_dependency_table(
663 sizeof(phm_ppt_v1_clock_voltage_dependency_record) * 659 sizeof(phm_ppt_v1_clock_voltage_dependency_record) *
664 clk_dep_table->ucNumEntries; 660 clk_dep_table->ucNumEntries;
665 661
666 clk_table = (struct phm_ppt_v1_clock_voltage_dependency_table *) 662 clk_table = kzalloc(table_size, GFP_KERNEL);
667 kzalloc(table_size, GFP_KERNEL);
668 663
669 if (!clk_table) 664 if (!clk_table)
670 return -ENOMEM; 665 return -ENOMEM;
@@ -728,8 +723,7 @@ static int get_dcefclk_voltage_dependency_table(
728 sizeof(phm_ppt_v1_clock_voltage_dependency_record) * 723 sizeof(phm_ppt_v1_clock_voltage_dependency_record) *
729 num_entries; 724 num_entries;
730 725
731 clk_table = (struct phm_ppt_v1_clock_voltage_dependency_table *) 726 clk_table = kzalloc(table_size, GFP_KERNEL);
732 kzalloc(table_size, GFP_KERNEL);
733 727
734 if (!clk_table) 728 if (!clk_table)
735 return -ENOMEM; 729 return -ENOMEM;
@@ -772,8 +766,7 @@ static int get_pcie_table(struct pp_hwmgr *hwmgr,
772 sizeof(struct phm_ppt_v1_pcie_record) * 766 sizeof(struct phm_ppt_v1_pcie_record) *
773 atom_pcie_table->ucNumEntries; 767 atom_pcie_table->ucNumEntries;
774 768
775 pcie_table = (struct phm_ppt_v1_pcie_table *) 769 pcie_table = kzalloc(table_size, GFP_KERNEL);
776 kzalloc(table_size, GFP_KERNEL);
777 770
778 if (!pcie_table) 771 if (!pcie_table)
779 return -ENOMEM; 772 return -ENOMEM;
@@ -1026,10 +1019,9 @@ static int get_vddc_lookup_table(
1026 table_size = sizeof(uint32_t) + 1019 table_size = sizeof(uint32_t) +
1027 sizeof(phm_ppt_v1_voltage_lookup_record) * max_levels; 1020 sizeof(phm_ppt_v1_voltage_lookup_record) * max_levels;
1028 1021
1029 table = (phm_ppt_v1_voltage_lookup_table *) 1022 table = kzalloc(table_size, GFP_KERNEL);
1030 kzalloc(table_size, GFP_KERNEL);
1031 1023
1032 if (NULL == table) 1024 if (table == NULL)
1033 return -ENOMEM; 1025 return -ENOMEM;
1034 1026
1035 table->count = vddc_lookup_pp_tables->ucNumEntries; 1027 table->count = vddc_lookup_pp_tables->ucNumEntries;
@@ -1138,12 +1130,12 @@ int vega10_pp_tables_initialize(struct pp_hwmgr *hwmgr)
1138 1130
1139 hwmgr->pptable = kzalloc(sizeof(struct phm_ppt_v2_information), GFP_KERNEL); 1131 hwmgr->pptable = kzalloc(sizeof(struct phm_ppt_v2_information), GFP_KERNEL);
1140 1132
1141 PP_ASSERT_WITH_CODE((NULL != hwmgr->pptable), 1133 PP_ASSERT_WITH_CODE((hwmgr->pptable != NULL),
1142 "Failed to allocate hwmgr->pptable!", return -ENOMEM); 1134 "Failed to allocate hwmgr->pptable!", return -ENOMEM);
1143 1135
1144 powerplay_table = get_powerplay_table(hwmgr); 1136 powerplay_table = get_powerplay_table(hwmgr);
1145 1137
1146 PP_ASSERT_WITH_CODE((NULL != powerplay_table), 1138 PP_ASSERT_WITH_CODE((powerplay_table != NULL),
1147 "Missing PowerPlay Table!", return -1); 1139 "Missing PowerPlay Table!", return -1);
1148 1140
1149 result = check_powerplay_tables(hwmgr, powerplay_table); 1141 result = check_powerplay_tables(hwmgr, powerplay_table);
@@ -1182,7 +1174,6 @@ int vega10_pp_tables_initialize(struct pp_hwmgr *hwmgr)
1182 1174
1183static int vega10_pp_tables_uninitialize(struct pp_hwmgr *hwmgr) 1175static int vega10_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
1184{ 1176{
1185 int result = 0;
1186 struct phm_ppt_v2_information *pp_table_info = 1177 struct phm_ppt_v2_information *pp_table_info =
1187 (struct phm_ppt_v2_information *)(hwmgr->pptable); 1178 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1188 1179
@@ -1225,7 +1216,7 @@ static int vega10_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
1225 kfree(hwmgr->pptable); 1216 kfree(hwmgr->pptable);
1226 hwmgr->pptable = NULL; 1217 hwmgr->pptable = NULL;
1227 1218
1228 return result; 1219 return 0;
1229} 1220}
1230 1221
1231const struct pp_table_func vega10_pptable_funcs = { 1222const struct pp_table_func vega10_pptable_funcs = {
@@ -1238,7 +1229,7 @@ int vega10_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
1238 const ATOM_Vega10_State_Array *state_arrays; 1229 const ATOM_Vega10_State_Array *state_arrays;
1239 const ATOM_Vega10_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr); 1230 const ATOM_Vega10_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
1240 1231
1241 PP_ASSERT_WITH_CODE((NULL != pp_table), 1232 PP_ASSERT_WITH_CODE((pp_table != NULL),
1242 "Missing PowerPlay Table!", return -1); 1233 "Missing PowerPlay Table!", return -1);
1243 PP_ASSERT_WITH_CODE((pp_table->sHeader.format_revision >= 1234 PP_ASSERT_WITH_CODE((pp_table->sHeader.format_revision >=
1244 ATOM_Vega10_TABLE_REVISION_VEGA10), 1235 ATOM_Vega10_TABLE_REVISION_VEGA10),
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h
index f34ce04cfd89..82f10bdd5f07 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h
@@ -71,7 +71,8 @@ extern int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr,
71extern int vega10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr); 71extern int vega10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
72extern int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr); 72extern int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr);
73extern int vega10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr); 73extern int vega10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr);
74 74extern int vega10_start_thermal_controller(struct pp_hwmgr *hwmgr,
75 struct PP_TemperatureRange *range);
75extern uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr); 76extern uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr);
76 77
77#endif 78#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h b/drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h
deleted file mode 100644
index 9d391f0eca94..000000000000
--- a/drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h
+++ /dev/null
@@ -1,2007 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef _FIJI_PWRVIRUS_H_
24#define _FIJI_PWRVIRUS_H_
25
26#define mmCP_HYP_MEC1_UCODE_ADDR 0xf81a
27#define mmCP_HYP_MEC1_UCODE_DATA 0xf81b
28#define mmCP_HYP_MEC2_UCODE_ADDR 0xf81c
29#define mmCP_HYP_MEC2_UCODE_DATA 0xf81d
30
31struct PWR_Command_Table
32{
33 uint32_t data;
34 uint32_t reg;
35};
36typedef struct PWR_Command_Table PWR_Command_Table;
37
38struct PWR_DFY_Section {
39 uint32_t dfy_cntl;
40 uint32_t dfy_addr_hi, dfy_addr_lo;
41 uint32_t dfy_size;
42 uint32_t dfy_data[];
43};
44
45typedef struct PWR_DFY_Section PWR_DFY_Section;
46
47static const PWR_Command_Table PwrVirusTable_pre[] =
48{
49 { 0x100100b6, mmPCIE_INDEX },
50 { 0x00000000, mmPCIE_DATA },
51 { 0x100100b6, mmPCIE_INDEX },
52 { 0x0300078c, mmPCIE_DATA },
53 { 0x00000000, mmBIF_CLK_CTRL },
54 { 0x00000001, mmBIF_CLK_CTRL },
55 { 0x00000000, mmBIF_CLK_CTRL },
56 { 0x00000003, mmBIF_FB_EN },
57 { 0x00000000, mmBIF_FB_EN },
58 { 0x00000001, mmBIF_DOORBELL_APER_EN },
59 { 0x00000000, mmBIF_DOORBELL_APER_EN },
60 { 0x014000c0, mmPCIE_INDEX },
61 { 0x00000000, mmPCIE_DATA },
62 { 0x014000c0, mmPCIE_INDEX },
63 { 0x22000000, mmPCIE_DATA },
64 { 0x014000c0, mmPCIE_INDEX },
65 { 0x00000000, mmPCIE_DATA },
66 /*
67 { 0x009f0090, mmMC_VM_FB_LOCATION },
68 { 0x00000000, mmMC_CITF_CNTL },
69 { 0x00000000, mmMC_VM_FB_LOCATION },
70 { 0x009f0090, mmMC_VM_FB_LOCATION },
71 { 0x00000000, mmMC_VM_FB_LOCATION },
72 { 0x009f0090, mmMC_VM_FB_LOCATION },
73 { 0x00000000, mmMC_VM_FB_OFFSET },*/
74 { 0x00000000, mmRLC_CSIB_ADDR_LO },
75 { 0x00000000, mmRLC_CSIB_ADDR_HI },
76 { 0x00000000, mmRLC_CSIB_LENGTH },
77 /*
78 { 0x00000000, mmMC_VM_MX_L1_TLB_CNTL },
79 { 0x00000001, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR },
80 { 0x00000000, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR },
81 { 0x00000000, mmMC_VM_FB_LOCATION },
82 { 0x009f0090, mmMC_VM_FB_LOCATION },*/
83 { 0x00000000, mmVM_CONTEXT0_CNTL },
84 { 0x00000000, mmVM_CONTEXT1_CNTL },
85 /*
86 { 0x00000000, mmMC_VM_AGP_BASE },
87 { 0x00000002, mmMC_VM_AGP_BOT },
88 { 0x00000000, mmMC_VM_AGP_TOP },*/
89 { 0x04000000, mmATC_VM_APERTURE0_LOW_ADDR },
90 { 0x0400ff20, mmATC_VM_APERTURE0_HIGH_ADDR },
91 { 0x00000002, mmATC_VM_APERTURE0_CNTL },
92 { 0x0000ffff, mmATC_VM_APERTURE0_CNTL2 },
93 { 0x00000001, mmATC_VM_APERTURE1_LOW_ADDR },
94 { 0x00000000, mmATC_VM_APERTURE1_HIGH_ADDR },
95 { 0x00000000, mmATC_VM_APERTURE1_CNTL },
96 { 0x00000000, mmATC_VM_APERTURE1_CNTL2 },
97 //{ 0x00000000, mmMC_ARB_RAMCFG },
98 { 0x12011003, mmGB_ADDR_CONFIG },
99 { 0x00800010, mmGB_TILE_MODE0 },
100 { 0x00800810, mmGB_TILE_MODE1 },
101 { 0x00801010, mmGB_TILE_MODE2 },
102 { 0x00801810, mmGB_TILE_MODE3 },
103 { 0x00802810, mmGB_TILE_MODE4 },
104 { 0x00802808, mmGB_TILE_MODE5 },
105 { 0x00802814, mmGB_TILE_MODE6 },
106 { 0x00000000, mmGB_TILE_MODE7 },
107 { 0x00000004, mmGB_TILE_MODE8 },
108 { 0x02000008, mmGB_TILE_MODE9 },
109 { 0x02000010, mmGB_TILE_MODE10 },
110 { 0x06000014, mmGB_TILE_MODE11 },
111 { 0x00000000, mmGB_TILE_MODE12 },
112 { 0x02400008, mmGB_TILE_MODE13 },
113 { 0x02400010, mmGB_TILE_MODE14 },
114 { 0x02400030, mmGB_TILE_MODE15 },
115 { 0x06400014, mmGB_TILE_MODE16 },
116 { 0x00000000, mmGB_TILE_MODE17 },
117 { 0x0040000c, mmGB_TILE_MODE18 },
118 { 0x0100000c, mmGB_TILE_MODE19 },
119 { 0x0100001c, mmGB_TILE_MODE20 },
120 { 0x01000034, mmGB_TILE_MODE21 },
121 { 0x01000024, mmGB_TILE_MODE22 },
122 { 0x00000000, mmGB_TILE_MODE23 },
123 { 0x0040001c, mmGB_TILE_MODE24 },
124 { 0x01000020, mmGB_TILE_MODE25 },
125 { 0x01000038, mmGB_TILE_MODE26 },
126 { 0x02c00008, mmGB_TILE_MODE27 },
127 { 0x02c00010, mmGB_TILE_MODE28 },
128 { 0x06c00014, mmGB_TILE_MODE29 },
129 { 0x00000000, mmGB_TILE_MODE30 },
130 { 0x00000000, mmGB_TILE_MODE31 },
131 { 0x000000a8, mmGB_MACROTILE_MODE0 },
132 { 0x000000a4, mmGB_MACROTILE_MODE1 },
133 { 0x00000090, mmGB_MACROTILE_MODE2 },
134 { 0x00000090, mmGB_MACROTILE_MODE3 },
135 { 0x00000090, mmGB_MACROTILE_MODE4 },
136 { 0x00000090, mmGB_MACROTILE_MODE5 },
137 { 0x00000090, mmGB_MACROTILE_MODE6 },
138 { 0x00000000, mmGB_MACROTILE_MODE7 },
139 { 0x000000ee, mmGB_MACROTILE_MODE8 },
140 { 0x000000ea, mmGB_MACROTILE_MODE9 },
141 { 0x000000e9, mmGB_MACROTILE_MODE10 },
142 { 0x000000e5, mmGB_MACROTILE_MODE11 },
143 { 0x000000e4, mmGB_MACROTILE_MODE12 },
144 { 0x000000e0, mmGB_MACROTILE_MODE13 },
145 { 0x00000090, mmGB_MACROTILE_MODE14 },
146 { 0x00000000, mmGB_MACROTILE_MODE15 },
147 { 0x00900000, mmHDP_NONSURFACE_BASE },
148 { 0x00008000, mmHDP_NONSURFACE_INFO },
149 { 0x3fffffff, mmHDP_NONSURFACE_SIZE },
150 { 0x00000003, mmBIF_FB_EN },
151 //{ 0x00000000, mmMC_VM_FB_OFFSET },
152 { 0x00000000, mmSRBM_CNTL },
153 { 0x00020000, mmSRBM_CNTL },
154 { 0x80000000, mmATC_VMID0_PASID_MAPPING },
155 { 0x00000000, mmATC_VMID_PASID_MAPPING_UPDATE_STATUS },
156 { 0x00000000, mmRLC_CNTL },
157 { 0x00000000, mmRLC_CNTL },
158 { 0x00000000, mmRLC_CNTL },
159 { 0xe0000000, mmGRBM_GFX_INDEX },
160 { 0x00000000, mmCGTS_TCC_DISABLE },
161 { 0x00000000, mmTCP_ADDR_CONFIG },
162 { 0x000000ff, mmTCP_ADDR_CONFIG },
163 { 0x76543210, mmTCP_CHAN_STEER_LO },
164 { 0xfedcba98, mmTCP_CHAN_STEER_HI },
165 { 0x00000000, mmDB_DEBUG2 },
166 { 0x00000000, mmDB_DEBUG },
167 { 0x00002b16, mmCP_QUEUE_THRESHOLDS },
168 { 0x00006030, mmCP_MEQ_THRESHOLDS },
169 { 0x01000104, mmSPI_CONFIG_CNTL_1 },
170 { 0x98184020, mmPA_SC_FIFO_SIZE },
171 { 0x00000001, mmVGT_NUM_INSTANCES },
172 { 0x00000000, mmCP_PERFMON_CNTL },
173 { 0x01180000, mmSQ_CONFIG },
174 { 0x00000000, mmVGT_CACHE_INVALIDATION },
175 { 0x00000000, mmSQ_THREAD_TRACE_BASE },
176 { 0x0000df80, mmSQ_THREAD_TRACE_MASK },
177 { 0x02249249, mmSQ_THREAD_TRACE_MODE },
178 { 0x00000000, mmPA_SC_LINE_STIPPLE_STATE },
179 { 0x00000000, mmCB_PERFCOUNTER0_SELECT1 },
180 { 0x06000100, mmCGTT_VGT_CLK_CTRL },
181 { 0x00000007, mmPA_CL_ENHANCE },
182 { 0x00000001, mmPA_SC_ENHANCE },
183 { 0x00ffffff, mmPA_SC_FORCE_EOV_MAX_CNTS },
184 { 0x00000000, mmSRBM_GFX_CNTL },
185 { 0x00000320, mmSH_MEM_CONFIG },
186 { 0x00000010, mmSRBM_GFX_CNTL },
187 { 0x00000320, mmSH_MEM_CONFIG },
188 { 0x00000020, mmSRBM_GFX_CNTL },
189 { 0x00000320, mmSH_MEM_CONFIG },
190 { 0x00000030, mmSRBM_GFX_CNTL },
191 { 0x00000320, mmSH_MEM_CONFIG },
192 { 0x00000040, mmSRBM_GFX_CNTL },
193 { 0x00000320, mmSH_MEM_CONFIG },
194 { 0x00000050, mmSRBM_GFX_CNTL },
195 { 0x00000320, mmSH_MEM_CONFIG },
196 { 0x00000060, mmSRBM_GFX_CNTL },
197 { 0x00000320, mmSH_MEM_CONFIG },
198 { 0x00000070, mmSRBM_GFX_CNTL },
199 { 0x00000320, mmSH_MEM_CONFIG },
200 { 0x00000080, mmSRBM_GFX_CNTL },
201 { 0x00000320, mmSH_MEM_CONFIG },
202 { 0x00000090, mmSRBM_GFX_CNTL },
203 { 0x00000320, mmSH_MEM_CONFIG },
204 { 0x000000a0, mmSRBM_GFX_CNTL },
205 { 0x00000320, mmSH_MEM_CONFIG },
206 { 0x000000b0, mmSRBM_GFX_CNTL },
207 { 0x00000320, mmSH_MEM_CONFIG },
208 { 0x000000c0, mmSRBM_GFX_CNTL },
209 { 0x00000320, mmSH_MEM_CONFIG },
210 { 0x000000d0, mmSRBM_GFX_CNTL },
211 { 0x00000320, mmSH_MEM_CONFIG },
212 { 0x000000e0, mmSRBM_GFX_CNTL },
213 { 0x00000320, mmSH_MEM_CONFIG },
214 { 0x000000f0, mmSRBM_GFX_CNTL },
215 { 0x00000320, mmSH_MEM_CONFIG },
216 { 0x00000000, mmSRBM_GFX_CNTL },
217 { 0x00000000, mmGRBM_STATUS },
218 { 0x00000000, mmGRBM_STATUS },
219 { 0x00000000, mmGRBM_STATUS },
220 { 0x00000000, mmGRBM_STATUS },
221 { 0x00000000, mmGRBM_STATUS },
222 { 0x00000000, mmGRBM_STATUS },
223 { 0x00000000, mmGRBM_STATUS },
224 { 0x00000000, mmGRBM_STATUS },
225 { 0x00000000, mmGRBM_STATUS },
226 { 0x00000000, mmGRBM_STATUS },
227 { 0x00000000, mmGRBM_STATUS },
228 { 0x00000000, mmGRBM_STATUS },
229 { 0x00000000, mmGRBM_STATUS },
230 { 0x00000000, mmGRBM_STATUS },
231 { 0x00000000, mmGRBM_STATUS },
232 { 0x00000000, mmGRBM_STATUS },
233 { 0x00000000, mmGRBM_STATUS },
234 { 0x00000000, mmGRBM_STATUS },
235 { 0x00000000, mmGRBM_STATUS },
236 { 0x00000000, mmGRBM_STATUS },
237 { 0x00000000, mmGRBM_STATUS },
238 { 0x00000000, mmGRBM_STATUS },
239 { 0x00000000, mmGRBM_STATUS },
240 { 0x00000000, mmGRBM_STATUS },
241 { 0x00000000, mmGRBM_STATUS },
242 { 0x00000000, mmGRBM_STATUS },
243 { 0x00000000, mmGRBM_STATUS },
244 { 0x00000000, mmGRBM_STATUS },
245 { 0x00000000, mmGRBM_STATUS },
246 { 0x00000000, mmRLC_PG_CNTL },
247 { 0x00000000, mmGRBM_STATUS2 },
248 { 0x15000000, mmCP_ME_CNTL },
249 { 0x50000000, mmCP_MEC_CNTL },
250 { 0x00000000, mmSRBM_GFX_CNTL },
251 { 0x0000000e, mmSH_MEM_APE1_BASE },
252 { 0x0000020d, mmSH_MEM_APE1_LIMIT },
253 { 0x00000000, mmSRBM_GFX_CNTL },
254 { 0x00000000, mmSRBM_GFX_CNTL },
255 { 0x00000000, mmSH_MEM_CONFIG },
256 { 0x00000320, mmSH_MEM_CONFIG },
257 { 0x00000000, mmSRBM_GFX_CNTL },
258 { 0x00000000, mmCP_RB_VMID },
259 { 0x00000000, mmGRBM_STATUS },
260 { 0x00000000, mmRLC_CNTL },
261 { 0x00000000, mmRLC_CNTL },
262 { 0x00000000, mmRLC_SRM_CNTL },
263 { 0x00000002, mmRLC_SRM_CNTL },
264 { 0x00000000, mmCP_ME_CNTL },
265 { 0x15000000, mmCP_ME_CNTL },
266 { 0x00000000, mmCP_MEC_CNTL },
267 { 0x50000000, mmCP_MEC_CNTL },
268 { 0x80000004, mmCP_DFY_CNTL },
269 { 0x0840800a, mmCP_RB0_CNTL },
270 { 0xf30fff0f, mmTCC_CTRL },
271 { 0x00000002, mmTCC_EXE_DISABLE },
272 { 0x000000ff, mmTCP_ADDR_CONFIG },
273 { 0x540ff000, mmCP_CPC_IC_BASE_LO },
274 { 0x000000b4, mmCP_CPC_IC_BASE_HI },
275 { 0x00010000, mmCP_HYP_MEC1_UCODE_ADDR },
276 { 0x00041b75, mmCP_HYP_MEC1_UCODE_DATA },
277 { 0x000710e8, mmCP_HYP_MEC1_UCODE_DATA },
278 { 0x000910dd, mmCP_HYP_MEC1_UCODE_DATA },
279 { 0x000a1081, mmCP_HYP_MEC1_UCODE_DATA },
280 { 0x000b016f, mmCP_HYP_MEC1_UCODE_DATA },
281 { 0x000c0e3c, mmCP_HYP_MEC1_UCODE_DATA },
282 { 0x000d10ec, mmCP_HYP_MEC1_UCODE_DATA },
283 { 0x000e0188, mmCP_HYP_MEC1_UCODE_DATA },
284 { 0x00101b5d, mmCP_HYP_MEC1_UCODE_DATA },
285 { 0x00150a6c, mmCP_HYP_MEC1_UCODE_DATA },
286 { 0x00170c5e, mmCP_HYP_MEC1_UCODE_DATA },
287 { 0x001d0c8c, mmCP_HYP_MEC1_UCODE_DATA },
288 { 0x001e0cfe, mmCP_HYP_MEC1_UCODE_DATA },
289 { 0x00221408, mmCP_HYP_MEC1_UCODE_DATA },
290 { 0x00370d7b, mmCP_HYP_MEC1_UCODE_DATA },
291 { 0x00390dcb, mmCP_HYP_MEC1_UCODE_DATA },
292 { 0x003c142f, mmCP_HYP_MEC1_UCODE_DATA },
293 { 0x003f0b27, mmCP_HYP_MEC1_UCODE_DATA },
294 { 0x00400e63, mmCP_HYP_MEC1_UCODE_DATA },
295 { 0x00500f62, mmCP_HYP_MEC1_UCODE_DATA },
296 { 0x00460fa7, mmCP_HYP_MEC1_UCODE_DATA },
297 { 0x00490fa7, mmCP_HYP_MEC1_UCODE_DATA },
298 { 0x005811d4, mmCP_HYP_MEC1_UCODE_DATA },
299 { 0x00680ad6, mmCP_HYP_MEC1_UCODE_DATA },
300 { 0x00760b00, mmCP_HYP_MEC1_UCODE_DATA },
301 { 0x00780b0c, mmCP_HYP_MEC1_UCODE_DATA },
302 { 0x00790af7, mmCP_HYP_MEC1_UCODE_DATA },
303 { 0x007d1aba, mmCP_HYP_MEC1_UCODE_DATA },
304 { 0x007e1abe, mmCP_HYP_MEC1_UCODE_DATA },
305 { 0x00591260, mmCP_HYP_MEC1_UCODE_DATA },
306 { 0x005a12fb, mmCP_HYP_MEC1_UCODE_DATA },
307 { 0x00861ac7, mmCP_HYP_MEC1_UCODE_DATA },
308 { 0x008c1b01, mmCP_HYP_MEC1_UCODE_DATA },
309 { 0x008d1b34, mmCP_HYP_MEC1_UCODE_DATA },
310 { 0x00a014b9, mmCP_HYP_MEC1_UCODE_DATA },
311 { 0x00a1152e, mmCP_HYP_MEC1_UCODE_DATA },
312 { 0x00a216fb, mmCP_HYP_MEC1_UCODE_DATA },
313 { 0x00a41890, mmCP_HYP_MEC1_UCODE_DATA },
314 { 0x00a31906, mmCP_HYP_MEC1_UCODE_DATA },
315 { 0x00a50b14, mmCP_HYP_MEC1_UCODE_DATA },
316 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
317 { 0x00621387, mmCP_HYP_MEC1_UCODE_DATA },
318 { 0x005c0b27, mmCP_HYP_MEC1_UCODE_DATA },
319 { 0x00160a75, mmCP_HYP_MEC1_UCODE_DATA },
320 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
321 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
322 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
323 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
324 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
325 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
326 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
327 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
328 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
329 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
330 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
331 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
332 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
333 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
334 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
335 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
336 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
337 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
338 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
339 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
340 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
341 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
342 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
343 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
344 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
345 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
346 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
347 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
348 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
349 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
350 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
351 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
352 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
353 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
354 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
355 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
356 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
357 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
358 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
359 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
360 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
361 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
362 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
363 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
364 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
365 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
366 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
367 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
368 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
369 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
370 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
371 { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
372 { 0x00010000, mmCP_HYP_MEC2_UCODE_ADDR },
373 { 0x00041b75, mmCP_HYP_MEC2_UCODE_DATA },
374 { 0x000710e8, mmCP_HYP_MEC2_UCODE_DATA },
375 { 0x000910dd, mmCP_HYP_MEC2_UCODE_DATA },
376 { 0x000a1081, mmCP_HYP_MEC2_UCODE_DATA },
377 { 0x000b016f, mmCP_HYP_MEC2_UCODE_DATA },
378 { 0x000c0e3c, mmCP_HYP_MEC2_UCODE_DATA },
379 { 0x000d10ec, mmCP_HYP_MEC2_UCODE_DATA },
380 { 0x000e0188, mmCP_HYP_MEC2_UCODE_DATA },
381 { 0x00101b5d, mmCP_HYP_MEC2_UCODE_DATA },
382 { 0x00150a6c, mmCP_HYP_MEC2_UCODE_DATA },
383 { 0x00170c5e, mmCP_HYP_MEC2_UCODE_DATA },
384 { 0x001d0c8c, mmCP_HYP_MEC2_UCODE_DATA },
385 { 0x001e0cfe, mmCP_HYP_MEC2_UCODE_DATA },
386 { 0x00221408, mmCP_HYP_MEC2_UCODE_DATA },
387 { 0x00370d7b, mmCP_HYP_MEC2_UCODE_DATA },
388 { 0x00390dcb, mmCP_HYP_MEC2_UCODE_DATA },
389 { 0x003c142f, mmCP_HYP_MEC2_UCODE_DATA },
390 { 0x003f0b27, mmCP_HYP_MEC2_UCODE_DATA },
391 { 0x00400e63, mmCP_HYP_MEC2_UCODE_DATA },
392 { 0x00500f62, mmCP_HYP_MEC2_UCODE_DATA },
393 { 0x00460fa7, mmCP_HYP_MEC2_UCODE_DATA },
394 { 0x00490fa7, mmCP_HYP_MEC2_UCODE_DATA },
395 { 0x005811d4, mmCP_HYP_MEC2_UCODE_DATA },
396 { 0x00680ad6, mmCP_HYP_MEC2_UCODE_DATA },
397 { 0x00760b00, mmCP_HYP_MEC2_UCODE_DATA },
398 { 0x00780b0c, mmCP_HYP_MEC2_UCODE_DATA },
399 { 0x00790af7, mmCP_HYP_MEC2_UCODE_DATA },
400 { 0x007d1aba, mmCP_HYP_MEC2_UCODE_DATA },
401 { 0x007e1abe, mmCP_HYP_MEC2_UCODE_DATA },
402 { 0x00591260, mmCP_HYP_MEC2_UCODE_DATA },
403 { 0x005a12fb, mmCP_HYP_MEC2_UCODE_DATA },
404 { 0x00861ac7, mmCP_HYP_MEC2_UCODE_DATA },
405 { 0x008c1b01, mmCP_HYP_MEC2_UCODE_DATA },
406 { 0x008d1b34, mmCP_HYP_MEC2_UCODE_DATA },
407 { 0x00a014b9, mmCP_HYP_MEC2_UCODE_DATA },
408 { 0x00a1152e, mmCP_HYP_MEC2_UCODE_DATA },
409 { 0x00a216fb, mmCP_HYP_MEC2_UCODE_DATA },
410 { 0x00a41890, mmCP_HYP_MEC2_UCODE_DATA },
411 { 0x00a31906, mmCP_HYP_MEC2_UCODE_DATA },
412 { 0x00a50b14, mmCP_HYP_MEC2_UCODE_DATA },
413 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
414 { 0x00621387, mmCP_HYP_MEC2_UCODE_DATA },
415 { 0x005c0b27, mmCP_HYP_MEC2_UCODE_DATA },
416 { 0x00160a75, mmCP_HYP_MEC2_UCODE_DATA },
417 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
418 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
419 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
420 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
421 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
422 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
423 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
424 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
425 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
426 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
427 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
428 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
429 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
430 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
431 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
432 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
433 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
434 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
435 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
436 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
437 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
438 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
439 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
440 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
441 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
442 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
443 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
444 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
445 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
446 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
447 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
448 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
449 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
450 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
451 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
452 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
453 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
454 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
455 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
456 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
457 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
458 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
459 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
460 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
461 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
462 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
463 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
464 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
465 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
466 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
467 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
468 { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
469 { 0x00000000, 0xFFFFFFFF },
470};
471
472static const PWR_DFY_Section pwr_virus_section1 = {
473 .dfy_cntl = 0x80000004,
474 .dfy_addr_hi = 0x000000b4,
475 .dfy_addr_lo = 0x540fe800,
476 .dfy_data = {
477 0x7e000200, 0x7e020201, 0x7e040204, 0x7e060205, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
478 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
479 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
480 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
481 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
482 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
483 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
484 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
485 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
486 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
487 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
488 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
489 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
490 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
491 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
492 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
493 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
494 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
495 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
496 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
497 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
498 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
499 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
500 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
501 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
502 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
503 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
504 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
505 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
506 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
507 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
508 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
509 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
510 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
511 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
512 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
513 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
514 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
515 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
516 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
517 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
518 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
519 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
520 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
521 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
522 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
523 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
524 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
525 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0xbf810000, 0x00000000, 0x00000000, 0x00000000,
526 0x00000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
527 0x54106f00, 0x000400b4, 0x00004000, 0x00804fac, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
528 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
529 },
530 .dfy_size = 416
531};
532
533static const PWR_DFY_Section pwr_virus_section2 = {
534 .dfy_cntl = 0x80000004,
535 .dfy_addr_hi = 0x000000b4,
536 .dfy_addr_lo = 0x540fef00,
537 .dfy_data = {
538 0xc0031502, 0x00001e00, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0x00000000, 0x00000000,
539 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
540 },
541 .dfy_size = 16
542};
543
544static const PWR_DFY_Section pwr_virus_section3 = {
545 .dfy_cntl = 0x80000004,
546 .dfy_addr_hi = 0x000000b4,
547 .dfy_addr_lo = 0x540ff000,
548 .dfy_data = {
549 0xc424000b, 0x80000145, 0x94800001, 0x94c00001, 0x95000001, 0x95400001, 0x95800001, 0xdc810000,
550 0xdcc10000, 0xdd010000, 0xdd410000, 0xdd810000, 0xc4080061, 0xd8400013, 0xd8000003, 0xc40c0001,
551 0x24ccffff, 0x3cd08000, 0x9500fffd, 0x1cd0ffcf, 0x7d018001, 0xc4140004, 0x050c0019, 0xd8400008,
552 0x84c00000, 0x80000023, 0x80000067, 0x8000006a, 0x8000006d, 0x80000079, 0x80000084, 0x8000008f,
553 0x80000099, 0x800000a0, 0x800000af, 0xd8400053, 0xc4080007, 0x388c0001, 0x08880002, 0x04100003,
554 0x94c00005, 0x98800003, 0x04100004, 0x8000002d, 0x04100005, 0x8c00003f, 0x8c000043, 0x28cc0000,
555 0xccc00050, 0x8c000055, 0x28080001, 0xcc000004, 0x7d808001, 0xd8400013, 0xd88130b8, 0xcd400008,
556 0xdc180000, 0xdc140000, 0xdc100000, 0xdc0c0000, 0xcc800005, 0xdc080000, 0x80000168, 0xc40c000e,
557 0x28cc0008, 0xccc00013, 0x90000000, 0xcd013278, 0xc4113278, 0x95000001, 0x24cc0700, 0xd8400029,
558 0xc4113255, 0xcd01324f, 0xc4113254, 0x1d10ffdf, 0xcd013254, 0x10cc0014, 0x1d10c017, 0x7d0d000a,
559 0xd8400013, 0xd8400008, 0xcd0130b7, 0x14cc0010, 0x90000000, 0xd9c00036, 0x8000005d, 0xd8400013,
560 0xc00c4000, 0xccc130b5, 0xc40c000e, 0x28cc0008, 0xccc00013, 0xc40c0021, 0x14d00011, 0x9500fffe,
561 0xdc030000, 0xd800000c, 0xd800000d, 0xc40c005e, 0x94c01b10, 0xd8400013, 0x90000000, 0xc00e0080,
562 0xccc130b5, 0x8000013b, 0xc00e0800, 0xccc130b5, 0x8000013b, 0xd8400053, 0x04100006, 0x8c00003f,
563 0x8c000043, 0x28cc0000, 0xccc00050, 0x8c000055, 0x280c0008, 0xccc00052, 0xd8000021, 0x28180039,
564 0x80000034, 0xd8400053, 0x04100007, 0x8c00003f, 0x8c000043, 0x28cc0001, 0xccc00050, 0x8c000055,
565 0x280c0010, 0xccc00052, 0x28180039, 0x80000034, 0xd8400053, 0x04100008, 0x8c00003f, 0x8c000043,
566 0x28cc0003, 0xccc00050, 0x8c000055, 0x280c0020, 0xccc00052, 0x28180039, 0x80000034, 0xdc030000,
567 0xd8000069, 0x28080001, 0xc428000d, 0x7ca88004, 0xcc800079, 0x04280001, 0xcc00006f, 0x8000013b,
568 0x80000034, 0x04100010, 0x8c00003f, 0x8c000043, 0xccc00078, 0x8c000055, 0x28180080, 0x80000034,
569 0x04100001, 0xc40c000e, 0x28cc0008, 0xccc00013, 0xcd013278, 0xc4113278, 0x95000001, 0xc00c4000,
570 0xc4113254, 0x1d10c017, 0xd8400013, 0xd8400008, 0xccc130b5, 0xcd0130b7, 0x8000013b, 0x95c00001,
571 0x96000001, 0x96400001, 0x96800001, 0x96c00001, 0x97000001, 0x97400001, 0x97800001, 0x97c00001,
572 0xdc810000, 0xc40c000c, 0xcd4c0380, 0xcdcc0388, 0x55dc0020, 0xcdcc038c, 0xce0c0390, 0x56200020,
573 0xce0c0394, 0xce4c0398, 0x56640020, 0xce4c039c, 0xce8c03a0, 0x56a80020, 0xce8c03a4, 0xcecc03a8,
574 0x56ec0020, 0xcecc03ac, 0xcf0c03b0, 0x57300020, 0xcf0c03b4, 0xcf4c03b8, 0x57740020, 0xcf4c03bc,
575 0xcf8c03c0, 0x57b80020, 0xcf8c03c4, 0xcfcc03c8, 0x57fc0020, 0xcfcc03cc, 0xd9000033, 0xc41c0009,
576 0x25dc0010, 0x95c0fffe, 0xd8400013, 0xc41c000c, 0x05dc002f, 0xcdc12009, 0xc41d200a, 0xd8400013,
577 0xcc012009, 0xd9000034, 0x25e01c00, 0x12200013, 0x25e40300, 0x12640008, 0x25e800c0, 0x12a80002,
578 0x25ec003f, 0x7e25c00a, 0x7eae400a, 0x7de5c00a, 0xddc10000, 0xc02ee000, 0xcec1c200, 0xc40c005f,
579 0xccc00037, 0x24d000ff, 0x31100006, 0x9500007b, 0x8c000190, 0xdc1c0000, 0xd8400013, 0xcdc1c200,
580 0xc40c000c, 0xc4df0388, 0xc4d7038c, 0x51540020, 0x7d5dc01a, 0xc4e30390, 0xc4d70394, 0x51540020,
581 0x7d62001a, 0xc4e70398, 0xc4d7039c, 0x51540020, 0x7d66401a, 0xc4eb03a0, 0xc4d703a4, 0x51540020,
582 0x7d6a801a, 0xc4ef03a8, 0xc4d703ac, 0x51540020, 0x7d6ec01a, 0xc4f303b0, 0xc4d703b4, 0x51540020,
583 0x7d73001a, 0xc4f703b8, 0xc4d703bc, 0x51540020, 0x7d77401a, 0xc4fb03c0, 0xc4d703c4, 0x51540020,
584 0x7d7b801a, 0xc4ff03c8, 0xc4d703cc, 0x51540020, 0x7d7fc01a, 0xdc080000, 0xcc800013, 0xc4d70380,
585 0xc4080001, 0x1c88001c, 0xcd400008, 0xc40c0083, 0x94c00010, 0xdc0e0000, 0x94c0000e, 0xc40c0082,
586 0x24d00001, 0x9900000b, 0x18cc01e3, 0x3cd00004, 0x95000008, 0xc40c0085, 0x18cc006a, 0x98c00005,
587 0xc40c0082, 0x18cc01e3, 0x3cd00004, 0x9900fffa, 0xdc180000, 0xdc140000, 0xdc100000, 0xdc0c0000,
588 0xcc800004, 0xdc080000, 0x90000000, 0xc4080001, 0x1c88001c, 0xcd400008, 0xdc180000, 0xdc140000,
589 0xdc100000, 0xdc0c0000, 0xcc800004, 0xdc080000, 0x90000000, 0xd8400051, 0xc428000c, 0x04180018,
590 0x32640002, 0x9a80001f, 0x9a40001e, 0xcd800013, 0xc4293265, 0x040c0000, 0x1aac0027, 0x2aa80080,
591 0xce813265, 0x9ac00017, 0xd80002f1, 0x04080002, 0x08880001, 0xd8080250, 0xd8080258, 0xd8080230,
592 0xd8080238, 0xd8080240, 0xd8080248, 0xd8080268, 0xd8080270, 0xd8080278, 0xd8080280, 0xd8080228,
593 0xd8000367, 0x9880fff3, 0x04080010, 0x08880001, 0xd80c0309, 0xd80c0319, 0x04cc0001, 0x9880fffc,
594 0x7c408001, 0x88000000, 0xc00e0100, 0xd8400013, 0xd8400008, 0xccc130b5, 0x8000016e, 0xc4180032,
595 0x29980008, 0xcd800013, 0x95800001, 0x7c40c001, 0x18d0003f, 0x24d4001f, 0x24d80001, 0x155c0001,
596 0x05e80180, 0x9900000b, 0x202c003d, 0xcd800010, 0xcec1325b, 0xc42d325b, 0x96c00001, 0x86800000,
597 0x80000168, 0x80000aa7, 0x80000bfc, 0x800012e9, 0xc4200007, 0x0a200001, 0xce000010, 0x80001b70,
598 0x7c40c001, 0x8c000190, 0xc410001b, 0xd8000032, 0xd8000031, 0x9900091a, 0x7c408001, 0x88000000,
599 0x24d000ff, 0x05280196, 0x18d4fe04, 0x29540008, 0xcd400013, 0x86800000, 0x800001b4, 0x8000032b,
600 0x80000350, 0x80000352, 0x8000035f, 0x80000701, 0x8000047c, 0x8000019f, 0x80000800, 0xc419325b,
601 0x1d98001f, 0xcd81325b, 0x8c00003f, 0xc4140004, 0xd8400008, 0x04100002, 0x8c000043, 0x28cc0002,
602 0xccc00050, 0xc43c0044, 0x27fc0003, 0x9bc00002, 0x97c00006, 0xc00c4000, 0xccc130b5, 0x8c000055,
603 0xd8400013, 0xd88130b8, 0xcd400008, 0x90000000, 0xd8400008, 0xcd400013, 0x7d40c001, 0xd8400028,
604 0xd8400029, 0xd9400036, 0xc4193256, 0xc41d3254, 0x15540008, 0xcd400009, 0xcd40005b, 0xcd40005e,
605 0xcd40005d, 0xd840006d, 0xc421325a, 0xc42d3249, 0x11540015, 0x19a4003c, 0x1998003f, 0x1af0007d,
606 0x11dc000b, 0x1264001f, 0x15dc000d, 0x7d65400a, 0x13300018, 0x1a38003f, 0x7dd5c00a, 0x7df1c00a,
607 0xcd800045, 0xcdc00100, 0xc411326a, 0xc415326b, 0xc419326c, 0xc41d326d, 0xc425326e, 0xc4293279,
608 0xce800077, 0xcd000056, 0xcd400057, 0xcd800058, 0xcdc00059, 0xc4193265, 0x259c8000, 0x99c00004,
609 0xce40005a, 0x29988000, 0xcd813265, 0xc4113248, 0x2510000f, 0xcd000073, 0xc418000d, 0xc411326f,
610 0x17300019, 0x97000009, 0x25140fff, 0x95400007, 0xd800003a, 0x8c001b6d, 0xc4153279, 0xcd400077,
611 0xcd00005f, 0xd8000075, 0x26f00001, 0x15100010, 0x7d190004, 0xcd000035, 0x97000035, 0x1af07fe8,
612 0xd8800013, 0xd8400010, 0xd8400008, 0xcf00000d, 0xcf00000a, 0x8c001427, 0x04340022, 0x07740001,
613 0x04300010, 0xdf430000, 0x7c434001, 0x7c408001, 0xd4412e01, 0x0434001e, 0xdf430000, 0xd4400078,
614 0xdf030000, 0xd4412e40, 0xd8400013, 0xcc41c030, 0xcc41c031, 0xc43dc031, 0xccc00013, 0x04343000,
615 0xc4113246, 0xc41d3245, 0xcf413267, 0x51100020, 0x7dd1c01a, 0xc4353267, 0x45dc0160, 0xc810001f,
616 0x1b4c0057, 0x1b700213, 0x1b740199, 0x7f4f400a, 0x7f73400a, 0x55180020, 0x2198003f, 0xd1c00025,
617 0xcf400024, 0xcd000026, 0xcd800026, 0xd8400027, 0x9bc00001, 0x248dfffe, 0xd8800013, 0xccc12e00,
618 0x7c434001, 0x7c434001, 0x8c00142b, 0xc43c000e, 0x1af4007d, 0x2bfc0008, 0x33740003, 0x26d80001,
619 0xcfc00013, 0x1ae8003e, 0x9680000c, 0xc4253277, 0x26680001, 0x96800009, 0x2a640002, 0xce413277,
620 0xd8400013, 0xc4253348, 0xce413348, 0xc4253348, 0x96400001, 0xcfc00013, 0x9b400003, 0x958000d8,
621 0x80000315, 0xc4253277, 0x04303000, 0x26680001, 0xcf013267, 0xc4193246, 0xc41d3245, 0xc4313267,
622 0x96800041, 0x51980020, 0x1b342010, 0x7d9d801a, 0x1714000c, 0x25540800, 0x1b30c012, 0x459801b0,
623 0x7d77400a, 0x7f37000a, 0x2b300000, 0xcf00001c, 0xd180001e, 0xd8400021, 0x04240010, 0x199c01e2,
624 0x7e5e4002, 0x3e5c0004, 0x3e540002, 0xc428000f, 0x9a80ffff, 0x95c00006, 0xc80c0011, 0xc8140011,
625 0x54d00020, 0x55580020, 0x80000282, 0x95400015, 0xc80c0011, 0x0a640002, 0x041c0001, 0x45980008,
626 0x54d00020, 0x96400004, 0xc8140011, 0x45980004, 0x041c0000, 0xcf00001c, 0xd180001e, 0xd8400021,
627 0xc428000f, 0x9a80ffff, 0x99c00003, 0xc8180011, 0x80000282, 0xc8140011, 0x55580020, 0x80000282,
628 0x45980004, 0xc80c0011, 0xcf00001c, 0xd180001e, 0xd8400021, 0xc428000f, 0x9a80ffff, 0xc8100011,
629 0xc8140011, 0x55580020, 0xd8400013, 0xccc1334e, 0xcd01334f, 0xcd413350, 0xcd813351, 0xd881334d,
630 0xcfc00013, 0xc4193273, 0xc41d3275, 0xc40d3271, 0xc4113270, 0xc4153274, 0x50cc0020, 0x7cd0c01a,
631 0x7cdcc011, 0x05900008, 0xcd00006a, 0xcdc0006b, 0xc41d3272, 0x7d594002, 0x54d00020, 0xd8800013,
632 0xccc12e23, 0xcd012e24, 0xcdc12e25, 0xcfc00013, 0xc4193246, 0xc41d3245, 0xc4313267, 0x15540002,
633 0x51980020, 0x7d9d801a, 0xc81c001f, 0x1b340057, 0x1b280213, 0x1b300199, 0x45980198, 0x7f37000a,
634 0x7f2b000a, 0x55e40020, 0xcf000024, 0xd1800025, 0xcdc00026, 0xce400026, 0xd8400027, 0xcd40000d,
635 0xcd40000a, 0xc40d3249, 0x20cc003c, 0xccc13249, 0xc4113274, 0xdd430000, 0xc01e0001, 0x29dc0002,
636 0x04280000, 0xd8000036, 0xcc400078, 0xcc400078, 0x2d540002, 0x95400022, 0x078c0000, 0x07d40000,
637 0x8c00120d, 0x8c001239, 0x8c001232, 0x04f80000, 0x057c0000, 0xcdc00013, 0xc414000d, 0xc41c0019,
638 0x7dd5c005, 0x25dc0001, 0xd840007c, 0xd8400074, 0xd8400069, 0xc40c005e, 0x94c018a6, 0xd4412e22,
639 0xd800007c, 0xc40c005e, 0x94c018a2, 0x95c00007, 0xc40c0019, 0x7cd4c005, 0x24cc0001, 0x94c00008,
640 0x9680fffc, 0x800002e3, 0xc40c0057, 0x7cd0c002, 0x94c00003, 0x9680fffd, 0x800002e3, 0xd8000069,
641 0xcfc00013, 0xcd013273, 0xcd013275, 0xd8000074, 0xc414005e, 0x9540188f, 0xcfc00013, 0xc40d3249,
642 0xc013cfff, 0x7cd0c009, 0xccc13249, 0x9680000b, 0xc40c0077, 0x38d00001, 0x99000006, 0x04cc0002,
643 0xdcc30000, 0xc40c005e, 0x94c01882, 0xd4400078, 0xd800000d, 0x80000304, 0x7c41c001, 0x7c41c001,
644 0xd840002f, 0xc41c0015, 0x95c0ffff, 0xd8400030, 0xc41c0016, 0x95c0ffff, 0xd8000030, 0xc41c0016,
645 0x99c0ffff, 0xd800002f, 0xc41c0015, 0x99c0ffff, 0xc81c001f, 0x49980198, 0x55e40020, 0x459801a0,
646 0xcf000024, 0xd1800025, 0xcdc00026, 0xce400026, 0xd8400027, 0x04302000, 0xcfc00013, 0xcf013267,
647 0xc4313267, 0x96800004, 0x97000001, 0xd8000036, 0x80000329, 0xd8800013, 0xcc812e00, 0x04302000,
648 0xcfc00013, 0xcf013267, 0xc4313267, 0x97000001, 0xc4193256, 0xc42d3249, 0x16ec001f, 0xd8000028,
649 0xd800002b, 0x1998003e, 0xcec00031, 0xd8000036, 0xd8000010, 0x97800004, 0xd8400010, 0xce00000a,
650 0x1a18003e, 0xcd800008, 0x90000000, 0xc4380004, 0xd8400008, 0xd8400013, 0xd88130b8, 0x04100000,
651 0x7d43c001, 0xcd400013, 0xc4093249, 0x1888003e, 0x94800015, 0xd8400074, 0x8c000671, 0xcd400013,
652 0x9a400006, 0xc419324c, 0x259c0001, 0x1598001f, 0x95c0000d, 0x9580000c, 0x99000003, 0xd8400036,
653 0x04100001, 0xc40c0021, 0x14d80011, 0x24dc00ff, 0x31e00002, 0x31dc0003, 0x9580fff0, 0x9a000003,
654 0x99c00002, 0xd9c00036, 0x94800004, 0xd8000074, 0xc418005e, 0x95801827, 0xcf800008, 0x90000000,
655 0xd8800036, 0x90000000, 0xd8c00036, 0xc424000b, 0x32640002, 0x9a400004, 0xc4180014, 0x9580ffff,
656 0xd840002f, 0xc40c0021, 0x14dc0011, 0x95c0fffe, 0xccc00037, 0x8c000190, 0x90000000, 0xd8400008,
657 0xd800006d, 0xc41d3246, 0xc4193245, 0x51dc0020, 0x7d9d801a, 0xd8400028, 0xd8400029, 0xc420000b,
658 0x32200002, 0x9a0000ad, 0x04200032, 0xd9000010, 0xde030000, 0xd8400033, 0x04080000, 0xc43c0009,
659 0x27fc0002, 0x97c0fffe, 0xc42c0015, 0x96c0ffff, 0xd800002e, 0xc42d3249, 0x1af4003e, 0x9740004d,
660 0xc428000d, 0xc4080060, 0x7ca88005, 0x24880001, 0x7f4b4009, 0x97400046, 0xc4313274, 0xc4100057,
661 0x7d33400c, 0x97400009, 0x28240100, 0x7e6a4004, 0xce400079, 0x1eecffdd, 0xcec13249, 0xcf013273,
662 0xcf013275, 0x800003c3, 0xc429326f, 0x1aa80030, 0x96800006, 0x28240001, 0xc428000d, 0x06a80008,
663 0x7e6a8004, 0xce800035, 0xc41d3272, 0x25cc0001, 0x10cc0004, 0x19e80042, 0x25dc0006, 0x11dc0001,
664 0x7e8e800a, 0x7de9c00a, 0xc40d3271, 0xc4293270, 0x50cc0020, 0x7ce8c01a, 0x7cd30011, 0x11e80007,
665 0x2aa80000, 0xce80001c, 0xd300001e, 0xd8400021, 0xc428000f, 0x9a80ffff, 0xc4300011, 0x1b30003f,
666 0x33300000, 0xc4240059, 0x1660001f, 0x7e320009, 0xc0328000, 0x7e72400a, 0x0430000c, 0x9a000002,
667 0x04300008, 0xc02ac000, 0x7d310002, 0x17300002, 0x2aa87600, 0x7cd0c011, 0xcdc00024, 0xd0c00025,
668 0xce800026, 0x04280222, 0xce800026, 0x96000002, 0xce400026, 0xd8400027, 0xc4280058, 0x22ec003d,
669 0xcec13249, 0xcd013273, 0xce813275, 0xd800007b, 0xc8380018, 0x57b00020, 0x04343108, 0xc429325d,
670 0x040c3000, 0x13740008, 0x2374007e, 0x32a80003, 0xccc13267, 0xc40d3267, 0x18ec0057, 0x18e40213,
671 0x18cc0199, 0x7cecc00a, 0x7ce4c00a, 0x94800003, 0xd4400078, 0x800003e7, 0x04200022, 0xde030000,
672 0xccc00024, 0xd1800025, 0xcf400026, 0xd4400026, 0xd8400027, 0x04200010, 0xde030000, 0xccc00024,
673 0x45980104, 0xd1800025, 0xd4400026, 0xcf800026, 0xcf000026, 0xd8400027, 0x49980104, 0x9a80000a,
674 0xc81c001f, 0x45980168, 0x55e00020, 0xccc00024, 0xd1800025, 0xcdc00026, 0xce000026, 0xd8400027,
675 0x800003f2, 0x8c000448, 0xcd400013, 0x040c2000, 0xccc13267, 0xc40d3267, 0x94c00001, 0xc40d3249,
676 0x18cc003e, 0xd8400030, 0xc42c0016, 0x96c0ffff, 0xd8000030, 0xc42c0016, 0x9ac0ffff, 0xd800002f,
677 0xc42c0015, 0x9ac0ffff, 0xd8400034, 0xc4300025, 0xc4340024, 0xc4380081, 0xcf813279, 0xcf41326e,
678 0xcf01326d, 0x94c0000d, 0x254c0700, 0xc424001e, 0x10cc0010, 0x1a641fe8, 0x28cc0726, 0x2a640200,
679 0xd8400013, 0xccc1237b, 0x2264003f, 0xcd400013, 0xd8813260, 0xce41325b, 0xc4240033, 0xc4280034,
680 0xd9000036, 0xd8000010, 0x8c001427, 0x96400006, 0xde430000, 0xce40000c, 0xc40c005e, 0x94c01755,
681 0xd4400078, 0x9680000a, 0xce80000a, 0x06a80002, 0xd8400010, 0xde830000, 0xce80000d, 0xc40c005e,
682 0x94c0174c, 0xd4400078, 0xd8000010, 0x8c00142b, 0xc4393265, 0x2bb80040, 0xd8400032, 0xcf813265,
683 0xc4200012, 0x9a00ffff, 0xc4100044, 0x19180024, 0xc8100072, 0x551c003f, 0x99c00003, 0x95800010,
684 0x8000043d, 0xc00c8000, 0xd840006c, 0x28200000, 0x8000043f, 0xc00c4000, 0x282000f0, 0xcd400013,
685 0xd8400008, 0xc4113255, 0xcd01324f, 0xd8400013, 0xd88130b8, 0xccc130b5, 0xce000053, 0x90000000,
686 0x195c00e8, 0xc4100004, 0x2555fff0, 0xc0360001, 0x042c0000, 0x29540001, 0xd8400008, 0x04240000,
687 0x04280004, 0xc420000b, 0x32200002, 0x9a000009, 0xcd400013, 0xcec1c200, 0xc5e124dc, 0x0aa80001,
688 0x7ef6c001, 0x7e624001, 0x96000001, 0x9a80fff9, 0xc02ee000, 0xcd400013, 0x2555fff0, 0xcec1c200,
689 0x29540008, 0xc81c001f, 0xcd400013, 0x55e00020, 0xc42d3255, 0xc4353259, 0xd8013260, 0x45980158,
690 0xccc00024, 0xd1800025, 0xcdc00026, 0xce000026, 0xd8400027, 0x49980158, 0x45980170, 0xc4200012,
691 0x16200010, 0x9a00fffe, 0xccc00024, 0xd1800025, 0xc429324f, 0xce400026, 0xce800026, 0xcec00026,
692 0xcf400026, 0xd8400027, 0xcd000008, 0x90000000, 0xc40d325b, 0x7d43c001, 0x195400e8, 0x1154000a,
693 0x18dc00e8, 0x05e80488, 0x18d0006c, 0x18f807f0, 0x18e40077, 0x18ec0199, 0x7e6e400a, 0x86800000,
694 0x8000048e, 0x80000494, 0x800004de, 0x80000685, 0x80000686, 0x800006ac, 0x1ccc001f, 0xccc1325b,
695 0xc411325d, 0x251001ef, 0xcd01325d, 0x90000000, 0xc4293254, 0x1264000a, 0xc4300004, 0x7d79400a,
696 0x7e7a400a, 0x52a8001e, 0x15180001, 0x7d69401a, 0x202c007d, 0xcec1325b, 0x95000008, 0x95800028,
697 0xc42d3267, 0xc4193246, 0xc41d3245, 0x1aec0028, 0xc40d325c, 0x800004cc, 0xc42d3256, 0xc419324e,
698 0x26e8003f, 0x1aec003e, 0x12f4000e, 0xc41d324d, 0xc40d324f, 0x7d75401a, 0x04100002, 0x7d290004,
699 0x7f8f4001, 0x7f52800f, 0x51980020, 0x7d9d801a, 0x50e00002, 0x51980008, 0x9a800002, 0x800004d1,
700 0x7d0dc002, 0x6665fc00, 0x7e5e401a, 0xcec00008, 0x7da1c011, 0xd140000b, 0xd1c00002, 0x2a644000,
701 0xce400002, 0x7f534002, 0x6665fc00, 0x7e76401a, 0xd1800002, 0xce400002, 0x800004d7, 0xc42d325a,
702 0xc4193258, 0x1aec003e, 0xc41d3257, 0xc4213259, 0x12f4000e, 0x7d75401a, 0x51980020, 0x52200002,
703 0x7d9d801a, 0xcec00008, 0x7da1c011, 0xd140000b, 0xd1c00002, 0x2a644000, 0xce400002, 0x202c003d,
704 0xcf000008, 0xcfc00013, 0xcec1325b, 0xc42d325b, 0x96c00001, 0x90000000, 0xc4193260, 0x259c0007,
705 0x15980004, 0x05e804e3, 0x86800000, 0x800004e7, 0x800004f0, 0x80000505, 0x8000016a, 0xc4380004,
706 0xcfc00013, 0xd8400008, 0xc435325d, 0xd801325b, 0x277401ef, 0xcf41325d, 0xcf800008, 0x90000000,
707 0xc4380004, 0xd8400008, 0x8c000671, 0x9640fff4, 0x17e00008, 0xc418000d, 0xce000009, 0xd84131db,
708 0xcf800008, 0xcd800009, 0xc430001e, 0xcfc00013, 0xc42d325b, 0x1b301ff8, 0x2b300400, 0x2330003f,
709 0x26edf000, 0x7ef2c00a, 0xd8413260, 0xcec1325b, 0x90000000, 0x05a80507, 0x86800000, 0x8000050c,
710 0x80000528, 0x8000057d, 0x800005c2, 0x800005f3, 0xc4380004, 0xd8400008, 0x8c000671, 0xcfc00013,
711 0x9a400012, 0x1bd400e8, 0xc42c004a, 0xcd40005e, 0xc41c004d, 0xcec0005e, 0x99c0000c, 0xc4100019,
712 0x7d150005, 0x25100001, 0x99000008, 0x8c00063b, 0xcfc00013, 0xc4113277, 0x2511fffd, 0xcd013277,
713 0xd801326f, 0x80000624, 0x04240012, 0x1be00fe4, 0xce413260, 0xce000066, 0xcf800008, 0x90000000,
714 0xd8400068, 0xc4380004, 0xd8400008, 0x8c000671, 0xcfc00013, 0x9a400013, 0x1bd400e8, 0xc42c004a,
715 0xcd40005e, 0xc41c004d, 0xcec0005e, 0x99c0000d, 0xc4100019, 0x7d150005, 0x25100001, 0x99000009,
716 0xd8400067, 0x8c00063b, 0xcfc00013, 0xc4113277, 0x2511fffd, 0xcd013277, 0xd801326f, 0x80000624,
717 0x1bd400e8, 0xc42c0060, 0x7ed6c005, 0x26ec0001, 0xc4113271, 0xc4153270, 0xc4193272, 0xc41d3273,
718 0x04280022, 0x51100020, 0x7d51401a, 0xc4113274, 0xc4213275, 0xc4253276, 0xc4313248, 0xd1400061,
719 0x2730000f, 0x13300010, 0x7db1800a, 0xcd800060, 0x96c00002, 0x05dc0008, 0xcdc00062, 0x042c3000,
720 0xcd000063, 0xce000064, 0xce400065, 0xcec13267, 0xc42d3246, 0xc4313245, 0xc4353267, 0xce813260,
721 0x52ec0020, 0x7ef2c01a, 0xc820001f, 0x1b700057, 0x1b680213, 0x1b740199, 0x46ec0188, 0x7f73400a,
722 0x7f6b400a, 0x56240020, 0xcf400024, 0xd2c00025, 0xce000026, 0xce400026, 0x042c2000, 0xd8400027,
723 0xc418000d, 0x17e00008, 0xce000009, 0xcec13267, 0xc42d3267, 0x26e01000, 0x9a00fffe, 0xd8400013,
724 0xd9c131fc, 0xcd800009, 0xcf800008, 0x96c00001, 0x90000000, 0xc4380004, 0xd8400008, 0xc4113277,
725 0xc41c000b, 0xc420000c, 0x11dc0002, 0x7de1c001, 0x11dc0008, 0x29dc0001, 0x25140001, 0x191807e4,
726 0x192007ec, 0x95400004, 0xd8400013, 0xcdc1334a, 0xcfc00013, 0x9580000e, 0x09980001, 0x041c0001,
727 0x95800005, 0x09980001, 0x51dc0001, 0x69dc0001, 0x9980fffd, 0x7de20014, 0x561c0020, 0xd8400013,
728 0xce013344, 0xcdc13345, 0xcfc00013, 0x95400022, 0x042c3000, 0xcec13267, 0xc42d3246, 0xc4313245,
729 0xc4353267, 0xd8400013, 0xc425334d, 0x26640001, 0x9640fffe, 0xc419334e, 0xc41d334f, 0xc4213350,
730 0xc4253351, 0x52ec0020, 0x1b680057, 0x7ef2c01a, 0x1b700213, 0x1b740199, 0x46ec01b0, 0x7f6b400a,
731 0x7f73400a, 0xcfc00013, 0xcf400024, 0xd2c00025, 0xcd800026, 0xcdc00026, 0xce000026, 0xce400026,
732 0x042c2000, 0xd8400027, 0xcec13267, 0xc42d3267, 0x96c00001, 0x04280032, 0xce813260, 0xd8800068,
733 0xcf800008, 0x90000000, 0xc4380004, 0xd8400008, 0x2010007d, 0xcd01325b, 0xc411325b, 0x1910003e,
734 0x9500fffe, 0x04100040, 0xcd00001b, 0xd8400021, 0xc410000f, 0x9900ffff, 0x04100060, 0xcd00001b,
735 0xd8400021, 0xc410000f, 0x9900ffff, 0xcfc00013, 0x2010003d, 0xcd01325b, 0xc4113277, 0x25140001,
736 0x191807e4, 0x9540000b, 0x2511fffd, 0xcd013277, 0xc41c000b, 0xc420000c, 0x11dc0002, 0x7de1c001,
737 0x11dc0008, 0xd8400013, 0xcdc1334a, 0xcfc00013, 0x95800005, 0xd8400013, 0xd8013344, 0xd8013345,
738 0xcfc00013, 0xc4180050, 0xc41c0052, 0x04280042, 0xcd813273, 0xcdc13275, 0xce813260, 0xd9000068,
739 0xd8400067, 0xcf800008, 0x90000000, 0x07d40000, 0x8c00120d, 0x8c00124f, 0x8c001232, 0x057c0000,
740 0x042c3000, 0xc4380004, 0xcfc00013, 0xd8400008, 0xcec13267, 0xc42d3246, 0xc4313245, 0xc4353267,
741 0x52ec0020, 0x7ef2c01a, 0x1b680057, 0x1b700213, 0x1b740199, 0xc820001f, 0x46ec0190, 0x7f6b400a,
742 0x7f73400a, 0x56240020, 0xcf400024, 0xd2c00025, 0xce000026, 0xce400026, 0x042c2000, 0xd8400027,
743 0xcfc00013, 0xcec13267, 0xc4153249, 0x2154003d, 0xc41c0019, 0x1bd800e8, 0x7dd9c005, 0x25dc0001,
744 0xc42c004a, 0xcd80005e, 0xc420004d, 0xcec0005e, 0x11dc0010, 0x7e1e000a, 0xcd413249, 0xce01326f,
745 0x28340001, 0x05980008, 0x7f598004, 0xcd800035, 0x1be800e8, 0xc42c004a, 0xce80005e, 0xd801327a,
746 0xd800005f, 0xd8000075, 0xd800007f, 0xc424004c, 0xce41326e, 0xcec0005e, 0x28240100, 0x7e6a4004,
747 0xce400079, 0xc435325d, 0x277401ef, 0x04240020, 0xce41325e, 0xd801325b, 0xd8013260, 0xcf41325d,
748 0xda000068, 0xcf800008, 0x90000000, 0xc4113277, 0xc41c000b, 0xc420000c, 0x11dc0002, 0x7de1c001,
749 0x11dc0008, 0x29dc0001, 0x25140001, 0x9540002d, 0xd8400013, 0xcdc1334a, 0xcfc00013, 0x042c3000,
750 0xcec13267, 0xc42d3246, 0xc4313245, 0xc4353267, 0xd8400013, 0xc425334d, 0x26640001, 0x9640fffe,
751 0xc419334e, 0xc41d334f, 0xc4213350, 0xc4253351, 0x52ec0020, 0x1b680057, 0x7ef2c01a, 0x1b700213,
752 0x1b740199, 0x46ec01b0, 0x7f6b400a, 0x7f73400a, 0xcfc00013, 0xcf400024, 0xd2c00025, 0xcd800026,
753 0xcdc00026, 0xce000026, 0xce400026, 0x042c2000, 0xd8400027, 0xcec13267, 0xc42d3267, 0x96c00001,
754 0xc41c000b, 0xc420000c, 0x11dc0002, 0x7de1c001, 0x11dc0008, 0xd8400013, 0xcdc1334a, 0xcfc00013,
755 0x90000000, 0xc430000b, 0x33300002, 0x04240000, 0x9b000010, 0x1be000e8, 0x042c0000, 0xc0360001,
756 0x04280004, 0xd8400013, 0xcec1c200, 0xc63124dc, 0x0aa80001, 0x7ef6c001, 0x7e724001, 0x97000001,
757 0x9a80fff9, 0xc02ee000, 0xd8400013, 0xcec1c200, 0x90000000, 0x90000000, 0xc4253260, 0x7fc14001,
758 0xc40d3249, 0x18cc003e, 0x98c00005, 0x194c1c03, 0xccc0003b, 0xc40c002d, 0x80000697, 0xc420004a,
759 0x194c00e8, 0xccc0005e, 0xc40c004c, 0xc431326d, 0x27301fff, 0xce00005e, 0x7cf0c00d, 0x98c00003,
760 0x8c0007e0, 0x95c00008, 0xc430001e, 0x1b301ff8, 0x2b300400, 0x2330003f, 0xcd400013, 0xcf01325b,
761 0x90000000, 0xcd400013, 0xd801325b, 0xc411325d, 0x251001ef, 0xcd01325d, 0x25100007, 0x31100005,
762 0x9900008e, 0xc40c0007, 0xd9000010, 0x8000075e, 0x202c007d, 0xcec1325b, 0xc4293265, 0xc4353254,
763 0x26a9feff, 0xc4380004, 0xd8400008, 0x1374000b, 0xc40c000d, 0xd8000009, 0x1774000d, 0xd8400013,
764 0xc41d30b8, 0xcfc00013, 0x95c00008, 0xc411325d, 0xd801325b, 0xccc00009, 0xcf800008, 0x251001ef,
765 0xcd01325d, 0x90000000, 0xce813265, 0xcf400100, 0xc00ac006, 0xc00e0000, 0x28880700, 0x28cc0014,
766 0x8c0006de, 0x14cc0010, 0x30d4000f, 0x04cc0001, 0x10cc0010, 0x28cc0014, 0x99400009, 0xd8400013,
767 0xc41530b8, 0xcfc00013, 0xc4193265, 0x19980028, 0x99400003, 0x99800002, 0x800006c8, 0xcfc00013,
768 0xc411325d, 0xd801325b, 0xcf800008, 0x251001ef, 0xcd01325d, 0x90000000, 0x15600008, 0xce000009,
769 0xc8380023, 0xc4180081, 0x11a00002, 0x7fa38011, 0xc4100026, 0x05980008, 0x7d1a0002, 0x282c2002,
770 0x3e280008, 0xcec00013, 0xc4300027, 0x042c0008, 0xd3800025, 0xcf000024, 0x202400d0, 0x7ca48001,
771 0xcc800026, 0xccc00026, 0x28240006, 0xcc000026, 0x0a640001, 0x9a40fffe, 0x9a800004, 0x32280000,
772 0x9a800002, 0x9a000000, 0xd8400027, 0x24d8003f, 0xd840003c, 0xcec0003a, 0xd8800013, 0xcd81a2a4,
773 0x90000000, 0xc41d325d, 0x25dc0007, 0xc40d3249, 0x18cc003e, 0x94c0000a, 0xc420004a, 0x194c00e8,
774 0xccc0005e, 0xc40c004c, 0xc431326d, 0x27301fff, 0xce00005e, 0x7cf0c00d, 0x80000712, 0x194c1c03,
775 0xccc0003b, 0xc40c002d, 0x05e80714, 0x86800000, 0x8000071c, 0x80000720, 0x80000747, 0x8000071d,
776 0x800007c4, 0x80000732, 0x80000745, 0x80000744, 0x90000000, 0x98c00006, 0x8000072e, 0x90000000,
777 0x98c00003, 0x8c0007e0, 0x95c0000c, 0xcd400013, 0xc4253265, 0x2a64008c, 0xce413265, 0xc430001e,
778 0x1b301fe8, 0x2b300400, 0x2330003f, 0xd8013260, 0xcf01325b, 0x90000000, 0xc40c0007, 0xd9000010,
779 0x04240000, 0x8000075e, 0x98c0fff1, 0x8c0007e0, 0x95c00002, 0x80000723, 0xcd400013, 0xc41f02f1,
780 0x95c00004, 0xd8013247, 0xd801325d, 0x80000743, 0xd8813247, 0xd801325d, 0xc4100004, 0xd8400008,
781 0xd8400013, 0xd88130b8, 0xcd000008, 0x90000000, 0x04100001, 0x98c0ffde, 0x8000072e, 0x98c00003,
782 0x8c0007e0, 0x95c00012, 0xc4340004, 0xd8400008, 0x15600008, 0xc418000d, 0xce000009, 0xd8400013,
783 0xd84131db, 0xcf400008, 0xcd800009, 0xc430001e, 0x1b301ff8, 0x2b300400, 0x2330003f, 0xcd400013,
784 0xd8413260, 0xcf01325b, 0x90000000, 0xc40c0007, 0xd9000010, 0x04240000, 0xcd400013, 0x041c3000,
785 0xcdc13267, 0xc41d3267, 0xc41d3265, 0x25dc8000, 0x95c00007, 0xc41c004a, 0x195800e8, 0xcd80005e,
786 0xc418004c, 0xcd81326e, 0xcdc0005e, 0xc41d3265, 0x25dd7fff, 0xcdc13265, 0xc41d3246, 0xc4193245,
787 0xc42d3267, 0x51e00020, 0x7e1a001a, 0x46200200, 0x04283247, 0x04300033, 0x1af80057, 0x1af40213,
788 0x042c000c, 0x7f7b400a, 0x7f6f400a, 0xcf400024, 0xd2000025, 0xcd800026, 0xcdc00026, 0xc6990000,
789 0x329c325d, 0x99c00008, 0x329c3269, 0x99c00006, 0x329c3267, 0x95c00005, 0xc01defff, 0x7d9d8009,
790 0x8000078a, 0x25980000, 0x0b300001, 0x06a80001, 0xcd800026, 0x9b00fff2, 0xd8400027, 0xc43c0012,
791 0x9bc0ffff, 0xcd400013, 0xd801325b, 0xc431325a, 0xc03e7ff0, 0x7f3f0009, 0xcf01325a, 0xc4313249,
792 0x1f30001f, 0xcf013249, 0xc03e4000, 0xcfc13254, 0xcd400013, 0xd8013254, 0xc431325d, 0xd801324f,
793 0xd8013255, 0xd8013247, 0xd801325d, 0x1b300028, 0x8c00120d, 0x8c001219, 0x8c001232, 0xc4380004,
794 0xd8400008, 0xd8400013, 0x9900000d, 0xd88130b8, 0x9700000b, 0xc43d30b5, 0x1bf0003a, 0x9b000b80,
795 0x203c003a, 0xc430000e, 0x27300700, 0x13300014, 0x2b300001, 0xcf0130b7, 0xcfc130b5, 0x46200008,
796 0xcf400024, 0xd2000025, 0xd8000026, 0xd8400027, 0x043c2000, 0xcd400013, 0xcfc13267, 0xc43d3267,
797 0x9bc00001, 0xccc00010, 0xcf800008, 0x90000000, 0xc4080007, 0xd9000010, 0xc4193260, 0x259c0003,
798 0x31dc0003, 0x95c00014, 0x040c3000, 0xd8400008, 0xccc13267, 0xc40d3267, 0x18ec0057, 0x18e40213,
799 0x18cc0199, 0x7cecc00a, 0x7ce4c00a, 0xc4193246, 0xc41d3245, 0x51980020, 0x7d9d801a, 0x8c000448,
800 0xcd400013, 0x040c2000, 0xccc13267, 0xc40d3267, 0x94c00001, 0xcc800010, 0xd801325d, 0x90000000,
801 0xc418000b, 0x31980002, 0x041c0000, 0x9980001c, 0x19580066, 0x15600008, 0x040c0000, 0xc0120001,
802 0x11980003, 0x04240004, 0x7da18001, 0xc4200007, 0xc4340004, 0xd9000010, 0xd8400008, 0xd8400013,
803 0xccc1c200, 0xc41d24db, 0x7cd0c001, 0x0a640001, 0x7dd9c005, 0x25dc0001, 0x99c00002, 0x9a40fff8,
804 0xc418005e, 0x9580137b, 0xc00ee000, 0xd8400013, 0xccc1c200, 0xce000010, 0xcf400008, 0x90000000,
805 0xd840004f, 0xc4113269, 0x19080070, 0x190c00e8, 0x2510003f, 0x2518000f, 0xcd813268, 0x05a80809,
806 0x86800000, 0x8000080e, 0x8000080f, 0x80000898, 0x80000946, 0x800009e1, 0x80000a5a, 0x04a80811,
807 0x86800000, 0x80000815, 0x80000834, 0x8000085e, 0x8000085e, 0x04341001, 0xcf400013, 0xc4380004,
808 0xd8400008, 0xc42d3045, 0xcec1c091, 0x31300021, 0x9700000b, 0xd84002f1, 0xd8400013, 0xc43130b8,
809 0x27300001, 0xc4293059, 0x56a8001f, 0x7f2b000a, 0xcf800008, 0x9b000241, 0x8000084a, 0xcf400013,
810 0xd8400008, 0xc43130b6, 0x9b000003, 0xc02f0001, 0xcec130b6, 0xc4252087, 0x5668001a, 0x26a80005,
811 0x9a80fffd, 0xcf400013, 0xd80130b6, 0x8000084a, 0xc4380004, 0xd8400008, 0x04341001, 0xcf400013,
812 0xc431ecaa, 0x27300080, 0x9b000010, 0xc02e0001, 0xcec130b6, 0xcf400013, 0xd80130b6, 0x31300021,
813 0x9700000a, 0xd84002f1, 0xd8400013, 0xc43130b8, 0x27300001, 0xc4293059, 0x56a8001f, 0x7f2b000a,
814 0xcf800008, 0x9b00021d, 0xdd410000, 0x040c0005, 0xd84802e9, 0x8c001a41, 0xc43b02f1, 0x9b800006,
815 0xc4380004, 0xd8400008, 0xd8400013, 0xd88130b8, 0xcf800008, 0xcec80278, 0x56f00020, 0xcf080280,
816 0x8c001608, 0xdc140000, 0xcd400013, 0xd8813247, 0xd80802e9, 0x8000085e, 0xcd400013, 0x31100011,
817 0x950001fa, 0xc02e0001, 0x2aec0008, 0xc01c0020, 0xc0180001, 0xc00c0007, 0x11a40006, 0x7de6000a,
818 0x10e40008, 0x7e26000a, 0x7e2e000a, 0xce000013, 0xc4113254, 0x1d10ffdf, 0x2110003e, 0xcd013254,
819 0xd801324f, 0xd8013255, 0x1d10ff9e, 0xcd013254, 0xd8013247, 0xd801325d, 0xd801325e, 0xc0245301,
820 0xce413249, 0xd801325f, 0xc425326c, 0xc0121fff, 0x29108eff, 0x7e524009, 0xce41326c, 0xc425325a,
821 0xc0127ff0, 0x7e524009, 0xce41325a, 0xc425325b, 0xc0131fff, 0x7e524009, 0xce41325b, 0xd801326d,
822 0xd801326e, 0xd8013279, 0x94c00003, 0x08cc0001, 0x80000866, 0xc00c0007, 0x95800003, 0x09980001,
823 0x80000866, 0xc0100010, 0x7dd2400c, 0x9a400004, 0xc0180003, 0x7dd1c002, 0x80000866, 0x80000a5a,
824 0x04a8089a, 0x86800000, 0x8000089e, 0x800008fa, 0x80000945, 0x80000945, 0x31300022, 0x97000007,
825 0xc4380004, 0xd8400008, 0xd8400013, 0xc43130b8, 0x27300001, 0xcf800008, 0xcd400013, 0x04183000,
826 0xcd813267, 0xc4113246, 0xc4193245, 0x51100020, 0x7d91801a, 0x459801e0, 0xc4313267, 0x2738000f,
827 0x1b342010, 0x172c000c, 0x26ec0800, 0x1b30c012, 0x7ef7400a, 0x7f37000a, 0x2b300000, 0xcf00001c,
828 0xd180001e, 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc8300011, 0x97000036, 0x45980008, 0xd180001e,
829 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc8340011, 0x9740002f, 0xc43c0004, 0xd8400008, 0xd8400013,
830 0x13b80001, 0xc79d3300, 0xc7a13301, 0x96000001, 0xd8393300, 0xc0260001, 0xce793301, 0xc424005e,
831 0x964012a4, 0x7c028009, 0x9740001c, 0x27580001, 0x99800004, 0x57740001, 0x06a80400, 0x800008d2,
832 0xc4180006, 0x9980ffff, 0x29640001, 0xce40001a, 0x242c0000, 0x06ec0400, 0x57740001, 0x27580001,
833 0x9980fffd, 0xc02620c0, 0xce41c078, 0xce81c080, 0xcc01c081, 0xcf01c082, 0x57240020, 0xce41c083,
834 0xc0260400, 0x7e6e400a, 0xce41c084, 0x7eae8001, 0x7f2f0011, 0x800008d2, 0xc4180006, 0x9980ffff,
835 0xcdf93300, 0xce393301, 0xcfc00008, 0xcd400013, 0xc43c0004, 0xd8400008, 0x04182000, 0xcd813267,
836 0xcfc00008, 0x80000903, 0x31240022, 0x96400008, 0x04100001, 0xc4380004, 0xd8400008, 0xd8400013,
837 0xc43130b8, 0x27300001, 0xcf800008, 0xc4af0280, 0xc4b30278, 0x52ec0020, 0x7ef2c01a, 0x7ec30011,
838 0x32f80000, 0x9b800011, 0x043c0020, 0x04280000, 0x67180001, 0x0bfc0001, 0x57300001, 0x95800006,
839 0x8c001628, 0x9a400003, 0xd981325d, 0x80000915, 0xd9c1325d, 0x06a80001, 0x9bc0fff6, 0x7f818001,
840 0x8c001606, 0x7d838001, 0x94800010, 0xcd400013, 0xc41d3259, 0xc421325a, 0x16240014, 0x12640014,
841 0x1a2801f0, 0x12a80010, 0x2620ffff, 0x7e2a000a, 0x7de1c001, 0x7e5e400a, 0x9b800002, 0x2264003f,
842 0xce41325a, 0xd8013259, 0xc40c0007, 0xd9000010, 0x8c00075e, 0xc4af0228, 0x043c0000, 0x66d80001,
843 0x95800010, 0x04300002, 0x1330000d, 0x13f40014, 0x7f73400a, 0xcf400013, 0x04380040, 0xcf80001b,
844 0xd8400021, 0xc438000f, 0x9b80ffff, 0x04380060, 0xcf80001b, 0xd8400021, 0xc438000f, 0x9b80ffff,
845 0x07fc0001, 0x56ec0001, 0x33e80010, 0x9680ffec, 0x80000a5a, 0x80000a5a, 0x04a80948, 0x86800000,
846 0x8000094c, 0x8000099b, 0x800009e0, 0x800009e0, 0xc43c0004, 0xd8400008, 0xcd400013, 0x04183000,
847 0xcd813267, 0xc4113246, 0xc4193245, 0x51100020, 0x7d91801a, 0x459801e0, 0xc4313267, 0x2738000f,
848 0x1b342010, 0x172c000c, 0x26ec0800, 0x1b30c012, 0x7ef7400a, 0x7f37000a, 0x2b300000, 0xcf00001c,
849 0xd180001e, 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc8300011, 0x97000033, 0x45980008, 0xd180001e,
850 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc8340011, 0x9740002c, 0xd8400013, 0x13b80001, 0xc79d3300,
851 0xc7a13301, 0x96000001, 0xd8393300, 0xc0260001, 0xce793301, 0xc424005e, 0x964011fe, 0x7c028009,
852 0x9740001c, 0x27580001, 0x99800004, 0x57740001, 0x06a80400, 0x80000978, 0xc4180006, 0x9980ffff,
853 0x29640001, 0xce40001a, 0x242c0000, 0x06ec0400, 0x57740001, 0x27580001, 0x9980fffd, 0xc0260010,
854 0xce41c078, 0xcf01c080, 0x57240020, 0xce41c081, 0xce81c082, 0xcc01c083, 0xc0260800, 0x7e6e400a,
855 0xce41c084, 0x7eae8001, 0x7f2f0011, 0x80000978, 0xc4180006, 0x9980ffff, 0xcdf93300, 0xce393301,
856 0x04182000, 0xcd813267, 0xcfc00008, 0xcd400013, 0xc4193246, 0xc41d3245, 0x51980020, 0x7dda801a,
857 0x7d41c001, 0x7e838011, 0xd84802e9, 0x8c001802, 0x469c0390, 0xc4313267, 0x04183000, 0xcd813267,
858 0x1b342010, 0x172c000c, 0x26ec0800, 0x1b30c012, 0x7ef7400a, 0x7f37000a, 0x2b300000, 0xcf00001c,
859 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4200011, 0x45dc0004, 0xd1c0001e,
860 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4240011, 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f,
861 0x9980ffff, 0xc4280011, 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f, 0x9980ffff, 0xc42c0011,
862 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4300011, 0x45dc0004, 0xd1c0001e,
863 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4340011, 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f,
864 0x9980ffff, 0xc4380011, 0xcd400013, 0x04182000, 0xcd813267, 0x043c0001, 0x8c0014df, 0x80000a5a,
865 0x80000a5a, 0x31280014, 0xce8802ef, 0x9a800062, 0x31280034, 0x9a800060, 0x04a809e8, 0x86800000,
866 0x800009ec, 0x80000a45, 0x80000a59, 0x80000a59, 0xcd400013, 0xc4113246, 0xc4193245, 0x51100020,
867 0x7d91801a, 0x45980400, 0xc4b30258, 0xc4a70250, 0x53300020, 0x7e72401a, 0xc4313267, 0x1b342010,
868 0x172c000c, 0x26ec0800, 0x1b30c012, 0x7ef7400a, 0x7f37000a, 0x2b300000, 0xcf00001c, 0x042c0020,
869 0x66740001, 0x97400041, 0xcd400013, 0x04383000, 0xcf813267, 0xc4393267, 0x9b800001, 0xd180001e,
870 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4300011, 0x1b38007e, 0x33b40003, 0x9b400003, 0x4598001c,
871 0x9740002f, 0x45980004, 0xd180001e, 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc40c0011, 0x45980004,
872 0xd180001e, 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4100011, 0x45980004, 0xd180001e, 0xd8400021,
873 0xc438000f, 0x9b80ffff, 0xc4340011, 0xcf4002eb, 0x45980004, 0xd180001e, 0xd8400021, 0xc438000f,
874 0x9b80ffff, 0xc4340011, 0xcf4002ec, 0x45980004, 0xd180001e, 0xd8400021, 0xc438000f, 0x9b80ffff,
875 0xc4340011, 0xcf4002ed, 0x45980004, 0xd180001e, 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4340011,
876 0xcf4002ee, 0x45980004, 0xcd400013, 0x04382000, 0xcf813267, 0xd84802e9, 0x8c001715, 0xcd400013,
877 0x04382000, 0xcf813267, 0x56640001, 0x0aec0001, 0x9ac0ffbc, 0xc4380004, 0xd8400008, 0x04341001,
878 0xcf400013, 0x94800005, 0xc431ecaa, 0x27300080, 0x97000002, 0x80000a55, 0xc43130b6, 0x233c0032,
879 0xcfc130b6, 0xcf400013, 0xcf0130b6, 0xc49302ef, 0x99000003, 0xcd400013, 0xd8413247, 0xcf800008,
880 0x80000a5a, 0x80000a5a, 0xcd400013, 0x04180001, 0x5198001f, 0xcd813268, 0xc4193269, 0x2598000f,
881 0x9980fffe, 0xd80002f1, 0xcd400013, 0xd8013268, 0xd800004f, 0x90000000, 0xcd400013, 0x04380001,
882 0x53b8001f, 0x7db9801a, 0xcd813268, 0x80000a5e, 0xd8400029, 0xc40c005e, 0x94c01106, 0xd8800013,
883 0xcc412e01, 0xcc412e02, 0xcc412e03, 0xcc412e00, 0x80000aa7, 0xd8400029, 0xc40c005e, 0x94c010fd,
884 0x7c40c001, 0x50640020, 0x7ce4c01a, 0xd0c00072, 0xc80c0072, 0x58e801fc, 0x12a80009, 0x2aa80000,
885 0xd0c0001e, 0xce80001c, 0xd8400021, 0xc424000f, 0x9a40ffff, 0x04240010, 0x18dc01e2, 0x7e5e4002,
886 0x3e5c0003, 0x3e540002, 0x95c00006, 0xc8180011, 0xc8100011, 0xc8100011, 0x55140020, 0x80000aa2,
887 0x9540000a, 0xc8180011, 0x44cc0008, 0x55900020, 0xd0c0001e, 0xd8400021, 0xc424000f, 0x9a40ffff,
888 0xc4140011, 0x80000aa2, 0x44cc0004, 0xc4180011, 0xd0c0001e, 0xd8400021, 0xc424000f, 0x9a40ffff,
889 0xc8100011, 0x55140020, 0xd8800013, 0xcd812e01, 0xcd012e02, 0xcd412e03, 0xcc412e00, 0xc428000e,
890 0x2aa80008, 0xce800013, 0xc4253249, 0x2264003f, 0xce413249, 0xce800013, 0xc4253249, 0x96400001,
891 0xd800002a, 0xc410001a, 0xc40c0021, 0xc4140028, 0x95000005, 0x1e64001f, 0xce800013, 0xce413249,
892 0x80001b70, 0x14d00010, 0xc4180030, 0xc41c0007, 0x99000004, 0x99400009, 0x9980000c, 0x80000ab1,
893 0xccc00037, 0x8c000190, 0xc420001c, 0xd8000032, 0x9a0010ac, 0x80000aa7, 0xd880003f, 0x95c00002,
894 0xd8c0003f, 0x80001082, 0xd8800040, 0x95c00002, 0xd8c00040, 0x800010de, 0xc010ffff, 0x18d403f7,
895 0x7d0cc009, 0xc41b0367, 0x7d958004, 0x7d85800a, 0xdc1e0000, 0x90000000, 0xc424000b, 0x32640002,
896 0x7c40c001, 0x18d001fc, 0x05280adc, 0x86800000, 0x80000af1, 0x80000adf, 0x80000ae7, 0x8c000ace,
897 0xd8c00013, 0x96400002, 0xd8400013, 0xcd8d2000, 0x99c00010, 0x7c408001, 0x88000000, 0x18d803f7,
898 0xc010ffff, 0x7d0cc009, 0x04140000, 0x11940014, 0x29544001, 0x9a400002, 0x29544003, 0xcd400013,
899 0x80000af4, 0xd8c00013, 0x96400002, 0xd8400013, 0xd44d2000, 0x7c408001, 0x88000000, 0xc424000b,
900 0x32640002, 0x7c40c001, 0xd8c00013, 0x96400002, 0xd8400013, 0xd44dc000, 0x7c408001, 0x88000000,
901 0x7c40c001, 0x18d0003c, 0x95000006, 0x8c000ace, 0xd8800013, 0xcd8d2c00, 0x99c00003, 0x80000b0a,
902 0xd8800013, 0xd44d2c00, 0x7c408001, 0x88000000, 0x7c40c001, 0x28148004, 0x24d800ff, 0xccc00019,
903 0xcd400013, 0xd4593240, 0x7c408001, 0x88000000, 0xd8400029, 0xc40c005e, 0x94c0105e, 0x7c410001,
904 0x50540020, 0x7c418001, 0x2198003f, 0x199c0034, 0xc40c0007, 0x95c00028, 0xc428000e, 0x2aa80008,
905 0xce800013, 0xc42d324f, 0xc4313255, 0x7ef3400c, 0x9b400021, 0xd800002a, 0x80001b70, 0xc40c0007,
906 0x14e80001, 0x9a8000af, 0xd9000010, 0x041c0002, 0x042c01c8, 0x8c000d61, 0xccc00010, 0xd8400029,
907 0xc40c005e, 0x94c01043, 0x7c410001, 0x50540020, 0x7c418001, 0x18a01fe8, 0x3620005c, 0x9a00000e,
908 0x2464003f, 0xd8400013, 0xc6290ce7, 0x16ac001f, 0x96c00004, 0x26ac003f, 0x7ee6c00d, 0x96c00005,
909 0x06200001, 0x2620000f, 0x9a00fff8, 0x8000016a, 0xce000367, 0xc424005e, 0x9640102e, 0xc428000e,
910 0x199c0037, 0x19a00035, 0x2aa80008, 0xce800013, 0x95c0005d, 0xd800002a, 0xc42d3256, 0xc431325a,
911 0x2330003f, 0x16f8001f, 0x9780000d, 0xc4253248, 0xc035f0ff, 0x7e764009, 0x19b401f8, 0x13740008,
912 0x7e76400a, 0xce800013, 0xce413248, 0xcf01325a, 0xce800013, 0xc431325a, 0x97000001, 0x7d15001a,
913 0xd1000072, 0xc8100072, 0x55140020, 0x199c0034, 0xd8400010, 0xd8400029, 0x9b800004, 0x1ae4003e,
914 0xce400008, 0x80000b7c, 0xc4353254, 0x16a80008, 0x1aec003c, 0x19a4003f, 0x12a80015, 0x12ec001f,
915 0x1374000b, 0x7eae800a, 0xc02e4000, 0x1774000d, 0x7eae800a, 0xce400008, 0x7f6b400a, 0x95c00005,
916 0xc43d3248, 0x1bfc01e8, 0x13fc0018, 0x7dbd800a, 0x1d98ff15, 0x592c00fc, 0xcd80000a, 0x12e00016,
917 0x7da1800a, 0x592c007e, 0x12e00015, 0x7da1800a, 0xd1000001, 0xcd800001, 0x11a0000c, 0x1264001e,
918 0x1620000c, 0x7e26000a, 0x7e32000a, 0x12e4001b, 0x7e26000a, 0x5924007e, 0x12640017, 0x7e26000a,
919 0x19a4003c, 0x12640018, 0x7e26000a, 0xd800002a, 0xce01325a, 0xcd013257, 0xcd413258, 0xc429325a,
920 0xc40c005e, 0x94c00fdb, 0x96800001, 0x95c00003, 0x7c40c001, 0x7c410001, 0x9780f5ca, 0xcf400100,
921 0xc40c0007, 0xd9000010, 0x8c00120d, 0x8c001219, 0x8c001232, 0xccc00010, 0x8c001b6d, 0x7c408001,
922 0x88000000, 0xc42d324e, 0xc431324d, 0x52ec0020, 0x7ef2c01a, 0xc435324f, 0xc4293256, 0x52ec0008,
923 0x07740003, 0x04240002, 0x269c003f, 0x7e5e4004, 0x7f67000f, 0x97000003, 0x7f674002, 0x0b740001,
924 0x53740002, 0x7ef6c011, 0x1ab42010, 0x1ab8c006, 0x16a8000c, 0x26a80800, 0x2b740000, 0x7f7b400a,
925 0x7f6b400a, 0xcf40001c, 0xd2c0001e, 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4180011, 0x9a000003,
926 0x8c000bec, 0x80000b47, 0xc42c001d, 0xc4313256, 0x1b34060b, 0x1b300077, 0x7f37000a, 0x13300017,
927 0x04340100, 0x26ec00ff, 0xc03a8004, 0x7ef6c00a, 0x7f3b000a, 0x7ef2c00a, 0xcec1325b, 0x80000c16,
928 0xc40c0032, 0xc410001d, 0x28cc0008, 0xccc00013, 0xc415325b, 0x7c418001, 0x7c418001, 0x18580037,
929 0x251000ff, 0xc421325d, 0x262001ef, 0xce01325d, 0x99800004, 0x7d15400a, 0xcd41325b, 0x80000168,
930 0x1d54001f, 0xcd41325b, 0x7c408001, 0x88000000, 0xc428000b, 0xc42c000c, 0x12a80001, 0x26a80004,
931 0x7eae800a, 0xc40c0021, 0xc4340028, 0x14f00010, 0xc4380030, 0xc43c0007, 0xcd280200, 0xcd680208,
932 0xcda80210, 0x9b00000c, 0x9b400014, 0x9b800017, 0xc428000b, 0xc42c000c, 0x12a80001, 0x26a80004,
933 0x7eae800a, 0xc6930200, 0xc6970208, 0xc69b0210, 0x90000000, 0x17300001, 0x9b000005, 0xccc00037,
934 0x8c000190, 0xd8000032, 0x90000000, 0xd8000028, 0xd800002b, 0x80000168, 0xd900003f, 0x97c00002,
935 0xd940003f, 0x80001082, 0xd9000040, 0x97c00002, 0xd9400040, 0x800010de, 0xc40c0021, 0x14fc0011,
936 0x24f800ff, 0x33b80001, 0x97c0fffc, 0x9b800007, 0xccc00037, 0x8c000190, 0xd8000032, 0xd8000028,
937 0xd800002b, 0x80001b70, 0xc4380004, 0xd8400008, 0xd8400013, 0xd88130b8, 0x04100000, 0x04140000,
938 0xc418000e, 0x29980008, 0x7d83c001, 0xcd800013, 0xc4093249, 0x1888003e, 0x94800020, 0xd8400074,
939 0x8c000671, 0x9a400009, 0xc418000e, 0x29980008, 0xcd800013, 0xc419324c, 0x259c0001, 0x1598001f,
940 0x95c00016, 0x95800015, 0x99000003, 0xd8400036, 0x04100001, 0xc40c0021, 0x14d80011, 0x24e000ff,
941 0x321c0002, 0x32200001, 0x9580ffee, 0x99c00014, 0x96000004, 0xccc00037, 0x04140001, 0x80000c30,
942 0x9480000a, 0xd8000074, 0xc418005e, 0x95800f29, 0xcf800008, 0x80000c16, 0x94800004, 0xd8000074,
943 0xc418005e, 0x95800f23, 0xd9c00036, 0x99400002, 0xccc00037, 0xcf800008, 0x80000c16, 0x94800004,
944 0xd8000074, 0xc418005e, 0x95800f1a, 0xccc00037, 0xd8800036, 0x80001b70, 0x041c0003, 0x042c01c8,
945 0x8c000d61, 0xc4200007, 0xc40c0077, 0x94c00001, 0x7c418001, 0xc428000e, 0x9600f502, 0x0a200001,
946 0x98c0f500, 0x2aa80008, 0xce000010, 0x9a000f05, 0xce800013, 0xc431325a, 0xc42d3256, 0x1f30001f,
947 0x16e4001f, 0xcf01325a, 0xc431325a, 0x97000001, 0x9640f4f4, 0xc434000b, 0x33740002, 0x9b40f4f1,
948 0xc4353254, 0x16a80008, 0x1aec003c, 0x12a80015, 0x12ec001f, 0x1374000b, 0x7eae800a, 0xc02e4000,
949 0x1774000d, 0x7eae800a, 0x7f6b400a, 0xcf400100, 0x12780001, 0x2bb80001, 0xc00ac005, 0xc00e0002,
950 0x28cc8000, 0x28884900, 0x28cc0014, 0x80000ff3, 0xc43c0007, 0x7c40c001, 0x17fc0001, 0xd8400013,
951 0x9bc00004, 0xd8400029, 0xc424005e, 0x96400ee1, 0xcc41c40a, 0xcc41c40c, 0xcc41c40d, 0x7c414001,
952 0x24d0007f, 0x15580010, 0x255400ff, 0xcd01c411, 0xcd81c40f, 0xcd41c40e, 0xcc41c410, 0x7c414001,
953 0x7c418001, 0x04200000, 0x18e80033, 0x18ec0034, 0xcc41c414, 0xcc41c415, 0xcd81c413, 0xcd41c412,
954 0x18dc0032, 0x7c030011, 0x7c038011, 0x95c00027, 0x96c00002, 0xc431c417, 0xc435c416, 0x96800004,
955 0x96c00002, 0xc439c419, 0xc43dc418, 0xc41c000e, 0x29dc0008, 0xcdc00013, 0xcf413261, 0x96c00002,
956 0xcf013262, 0x96800004, 0xcfc13263, 0x96c00002, 0xcf813264, 0x18dc0030, 0xc43c0007, 0x95c00017,
957 0x17fc0001, 0x9ac00005, 0x7d77000c, 0x9bc00015, 0x9700000a, 0x80000cd6, 0x51b80020, 0x53300020,
958 0x7f97801a, 0x7f37001a, 0x7f3b000c, 0x9bc0000d, 0x97800002, 0x80000cd6, 0x9a000018, 0xd8400013,
959 0x28200001, 0x80000ca7, 0x18dc0031, 0x95c00003, 0xc435c40b, 0x9740fffd, 0xd800002a, 0x80001b70,
960 0xc4280032, 0x2aa80008, 0xce800013, 0xc40d325b, 0x97000002, 0x800012c2, 0xc438001d, 0x1bb81ff0,
961 0x7f8cc00a, 0xccc1325b, 0xc411325d, 0x251001ef, 0xcd01325d, 0x80001b70, 0xc428000e, 0xc43c0007,
962 0x2aa80008, 0xc438001d, 0xce800013, 0x13f4000c, 0x9bc00006, 0xc43d3256, 0x1bf0060b, 0x1bfc0077,
963 0x7ff3c00a, 0x80000cf4, 0xc43d325a, 0x1bfc0677, 0x13fc0017, 0x04300100, 0x1bb81fe8, 0x7f73400a,
964 0xc032800b, 0x7fb7800a, 0x7ff3c00a, 0x7ffbc00a, 0xcfc1325b, 0x80000c16, 0xc43c0007, 0x7c40c001,
965 0x18d42011, 0x17fc0001, 0x18d001e8, 0x24cc007f, 0x7cd4c00a, 0x9bc00004, 0xd8400029, 0xc428005e,
966 0x96800e6c, 0x7c414001, 0x50580020, 0x7d59401a, 0xd1400072, 0xc8140072, 0x596001fc, 0x12200009,
967 0x7ce0c00a, 0x7c418001, 0x505c0020, 0x7d9d801a, 0x7c41c001, 0x50600020, 0x7de1c01a, 0x7c420001,
968 0xccc0001b, 0xd140001d, 0xd180001f, 0xd1c00020, 0xd8400021, 0x95000010, 0x04300000, 0xc428000f,
969 0x9a80ffff, 0xc8240010, 0x7e5e800c, 0x9bc00015, 0x9a80000c, 0x9b000024, 0x28300001, 0x122c0004,
970 0x06ec0001, 0x0aec0001, 0x9ac0ffff, 0xd8400021, 0x80000d1f, 0xc428000f, 0x9a80ffff, 0xc8240010,
971 0x566c0020, 0xc428000e, 0x2aa80008, 0xce800013, 0xce413261, 0xcec13262, 0xd800002a, 0x80001b70,
972 0xc4340032, 0x2b740008, 0xcf400013, 0xc40d325b, 0x96800005, 0x566c0020, 0xce413261, 0xcec13262,
973 0x800012c2, 0xc438001d, 0x1bb81fe8, 0x7f8cc00a, 0xccc1325b, 0xc411325d, 0x251001ef, 0xcd01325d,
974 0x80001b70, 0xc43c0007, 0xc438001d, 0xc428000e, 0x2aa80008, 0xce800013, 0x13f4000c, 0x9bc00006,
975 0xc43d3256, 0x1bf0060b, 0x1bfc0077, 0x7ff3c00a, 0x80000d57, 0xc43d325a, 0x1bfc0677, 0x13fc0017,
976 0x04300100, 0x1bb81fe8, 0x7f73400a, 0xc0328009, 0x7fb7800a, 0x7ff3c00a, 0x7ffbc00a, 0xcfc1325b,
977 0x80000c16, 0xc43c000e, 0x2bfc0008, 0xcfc00013, 0xc4253246, 0xc4113245, 0x04143000, 0xcd413267,
978 0x52640020, 0x7e51001a, 0xc4153267, 0x7d2d0011, 0x19640057, 0x19580213, 0x19600199, 0x7da6400a,
979 0x7e26400a, 0xd1000025, 0xce400024, 0xcdc00026, 0xd8400027, 0x04142000, 0xcfc00013, 0xcd413267,
980 0xc4153267, 0x99400001, 0x90000000, 0x7c40c001, 0x18d001e8, 0x18d40030, 0x18d80034, 0x05280d83,
981 0x7c420001, 0x7c424001, 0x86800000, 0x80000d8a, 0x8000016a, 0x80000d95, 0x80000db1, 0x8000016a,
982 0x80000d95, 0x80000dbc, 0x11540010, 0x7e010001, 0x8c00187c, 0x7d75400a, 0xcd400013, 0xd4610000,
983 0x9580f3d8, 0xc439c040, 0x97800001, 0x7c408001, 0x88000000, 0xd8000016, 0x526c0020, 0x18e80058,
984 0x7e2ec01a, 0xd2c00072, 0xc82c0072, 0x5ae0073a, 0x7ea2800a, 0x9940000a, 0xce800024, 0xd2c00025,
985 0xd4400026, 0xd8400027, 0x9580f3c6, 0xc4380012, 0x9b80ffff, 0x7c408001, 0x88000000, 0xdc3a0000,
986 0x0bb80001, 0xce800024, 0xd2c00025, 0xcc400026, 0xd8400027, 0x9b80fffb, 0x9980fff5, 0x7c408001,
987 0x88000000, 0xc02a0001, 0x2aa80001, 0x16200002, 0xce800013, 0xce01c405, 0xd441c406, 0x9580f3b1,
988 0xc439c409, 0x97800001, 0x7c408001, 0x88000000, 0xc424000b, 0x32640002, 0x9a40000b, 0x11540010,
989 0x29540002, 0xcd400013, 0xd4610000, 0x9580f3a5, 0xd8400013, 0xc439c040, 0x97800001, 0x7c408001,
990 0x88000000, 0xd4400078, 0x80000168, 0xd8400029, 0xc40c005e, 0x94c00da7, 0x7c40c001, 0x50500020,
991 0x7cd0c01a, 0xd0c00072, 0xc8280072, 0x5aac007e, 0x12d80017, 0x7c41c001, 0x7d9d800a, 0x56a00020,
992 0x2620ffff, 0x7da1800a, 0x51980020, 0x7e82400a, 0x7e58c01a, 0x19d4003d, 0x28182002, 0x99400030,
993 0x8c00104f, 0xc430000d, 0xc4340035, 0xd800002a, 0xcd800013, 0xc8140023, 0xc4180081, 0x13300005,
994 0xc011000f, 0xc4240004, 0x11a00002, 0x7c908009, 0x12640004, 0x7d614011, 0xc4100026, 0x05980008,
995 0x7ca4800a, 0x7d1a0002, 0x7cb0800a, 0x3e280008, 0x20880188, 0x54ec0020, 0x7cb4800a, 0xc4300027,
996 0x04380008, 0xd1400025, 0xcf000024, 0x20240090, 0x7ca48001, 0xcc800026, 0xccc00026, 0xcec00026,
997 0xcec00026, 0x28240004, 0xcc000026, 0x0a640001, 0x9a40fffe, 0x9a800005, 0x32280000, 0x9a800002,
998 0x9a000000, 0x7c018001, 0xd8400027, 0xd8000016, 0xcf80003a, 0xd901a2a4, 0x80001037, 0xc418000e,
999 0x29980008, 0xcd800013, 0xc421326c, 0x1624001f, 0x9a40fffe, 0xd841325f, 0xd8800033, 0xc43c0009,
1000 0x27fc0004, 0x97c0fffe, 0xd8000039, 0xd0c00038, 0xc43c0022, 0x9bc0ffff, 0xd8800034, 0xc429325f,
1001 0x26ac0001, 0x9ac0fffe, 0x26ac0002, 0x96c00003, 0xd800002a, 0x80001b70, 0xc43c0007, 0xc430001e,
1002 0xd8800033, 0x13f4000c, 0x1b301ff0, 0x2b300300, 0x2330003f, 0x7f37000a, 0x9680000b, 0xc43c0009,
1003 0x27fc0004, 0x97c0fffe, 0xd8400039, 0xd0c00038, 0xc43c0022, 0x9bc0ffff, 0xcf01325b, 0xd8800034,
1004 0x80000c16, 0xd8800034, 0x8c0001a2, 0x80001b70, 0xcc80003b, 0x24b00008, 0xc418000e, 0x1330000a,
1005 0x18ac0024, 0x2b304000, 0x7c40c001, 0xcec00008, 0x18a800e5, 0x1d980008, 0x12a80008, 0x7da9800a,
1006 0x29980008, 0xcd800013, 0xc4113249, 0x1910003e, 0x99000002, 0xd840003d, 0x7c410001, 0xd4400078,
1007 0x51100020, 0xcf01326c, 0x7cd0c01a, 0xc421326c, 0x12a80014, 0x2220003f, 0x7e2a000a, 0xcd800013,
1008 0xce01326c, 0xd8800033, 0xc43c0009, 0x27fc0004, 0x97c0fffe, 0xd8000039, 0xd0c00038, 0xc43c0022,
1009 0x9bc0ffff, 0xd8800034, 0x80001190, 0x7c40c001, 0x18dc003d, 0x95c00004, 0x041c0001, 0x042c01c8,
1010 0x8c000d61, 0x18d40030, 0x18d001e8, 0x18fc0034, 0x24e8000f, 0x06a80e71, 0x7c418001, 0x7c41c001,
1011 0x86800000, 0x80000edd, 0x80000e91, 0x80000e91, 0x80000ea1, 0x80000eaa, 0x80000e7c, 0x80000e7f,
1012 0x80000e7f, 0x80000e87, 0x80000e8f, 0x8000016a, 0x51dc0020, 0x7d9e001a, 0x80000ee6, 0xc420000e,
1013 0x2a200008, 0xce000013, 0xc4213262, 0xc4253261, 0x52200020, 0x7e26001a, 0x80000ee6, 0xc420000e,
1014 0x2a200008, 0xce000013, 0xc4213264, 0xc4253263, 0x52200020, 0x7e26001a, 0x80000ee6, 0xc820001f,
1015 0x80000ee6, 0x18e82005, 0x51e00020, 0x2aa80000, 0x7da1801a, 0xd1800072, 0xc8180072, 0x59a001fc,
1016 0x12200009, 0x7ea2800a, 0xce80001c, 0xd180001e, 0xd8400021, 0xc428000f, 0x9a80ffff, 0xc8200011,
1017 0x80000ee6, 0x15980002, 0xd8400013, 0xcd81c400, 0xc421c401, 0x95400041, 0xc425c401, 0x52640020,
1018 0x7e26001a, 0x80000ee6, 0x31ac2580, 0x9ac00011, 0x31ac260c, 0x9ac0000f, 0x31ac0800, 0x9ac0000d,
1019 0x31ac0828, 0x9ac0000b, 0x31ac2440, 0x9ac00009, 0x31ac2390, 0x9ac00007, 0x31ac0093, 0x9ac00005,
1020 0x31ac31dc, 0x9ac00003, 0x31ac31e6, 0x96c00004, 0xc4340004, 0xd8400008, 0x80000ede, 0x39ac7c06,
1021 0x3db07c00, 0x9ac00003, 0x97000002, 0x80000ebc, 0x39acc337, 0x3db0c330, 0x9ac00003, 0x97000002,
1022 0x80000ebc, 0x39acc335, 0x3db0c336, 0x9ac00003, 0x97000002, 0x80000ebc, 0x39ac9002, 0x3db09001,
1023 0x9ac00003, 0x97000002, 0x80000ebc, 0x39ac9012, 0x3db09011, 0x9ac00003, 0x97000002, 0x80000ebc,
1024 0x39acec70, 0x3db0ec6f, 0x9ac00003, 0x97000002, 0x80000ebc, 0xc4340004, 0xd8400013, 0xc5a10000,
1025 0x95400005, 0x05980001, 0xc5a50000, 0x52640020, 0x7e26001a, 0xcf400008, 0x05280eea, 0x7c418001,
1026 0x7c41c001, 0x86800000, 0x80000ef1, 0x8000016a, 0x80000efe, 0x80000f11, 0x80000f2e, 0x80000efe,
1027 0x80000f1f, 0xc4340004, 0xd8400013, 0xce190000, 0x95400005, 0x05980001, 0x56200020, 0xce190000,
1028 0xcf400008, 0x97c0f26f, 0xc439c040, 0x97800001, 0x7c408001, 0x88000000, 0x51ec0020, 0x18e80058,
1029 0x7daec01a, 0xd2c00072, 0xc82c0072, 0x5af8073a, 0x7eba800a, 0xd2c00025, 0xce800024, 0xce000026,
1030 0x95400003, 0x56240020, 0xce400026, 0xd8400027, 0x97c0f25c, 0xc4380012, 0x9b80ffff, 0x7c408001,
1031 0x88000000, 0xc02a0001, 0x2aa80001, 0x15980002, 0xce800013, 0xcd81c405, 0xce01c406, 0x95400003,
1032 0x56240020, 0xce41c406, 0x97c0f24e, 0xc439c409, 0x97800001, 0x7c408001, 0x88000000, 0xc424000b,
1033 0x32640002, 0x9a40f247, 0xd8800013, 0xce190000, 0x95400004, 0x05980001, 0x56200020, 0xce190000,
1034 0x97c0f240, 0xd8400013, 0xc439c040, 0x97800001, 0x7c408001, 0x88000000, 0x31ac2580, 0x9ac00011,
1035 0x31ac260c, 0x9ac0000f, 0x31ac0800, 0x9ac0000d, 0x31ac0828, 0x9ac0000b, 0x31ac2440, 0x9ac00009,
1036 0x31ac2390, 0x9ac00007, 0x31ac0093, 0x9ac00005, 0x31ac31dc, 0x9ac00003, 0x31ac31e6, 0x96c00004,
1037 0xc4340004, 0xd8400008, 0x80000ef2, 0x39ac7c06, 0x3db07c00, 0x9ac00003, 0x97000002, 0x80000f40,
1038 0x39acc337, 0x3db0c330, 0x9ac00003, 0x97000002, 0x80000f40, 0x39acc335, 0x3db0c336, 0x9ac00003,
1039 0x97000002, 0x80000f40, 0x39acec70, 0x3db0ec6f, 0x9ac00003, 0x97000002, 0x80000f40, 0x39ac9002,
1040 0x3db09002, 0x9ac00003, 0x97000002, 0x80000f40, 0x39ac9012, 0x3db09012, 0x9ac00003, 0x97000002,
1041 0x80000f40, 0x80000ef1, 0xc40c0006, 0x98c0ffff, 0x7c40c001, 0x7c410001, 0x7c414001, 0x7c418001,
1042 0x7c41c001, 0x7c43c001, 0x95c00001, 0xc434000e, 0x2b740008, 0x2b780001, 0xcf400013, 0xd8c1325e,
1043 0xcf80001a, 0xd8400013, 0x7c034001, 0x7c038001, 0x18e0007d, 0x32240003, 0x9a400006, 0x32240000,
1044 0x9a400004, 0xcd01c080, 0xcd41c081, 0x80000f88, 0x51640020, 0x7e52401a, 0xd2400072, 0xc8280072,
1045 0xce81c080, 0x56ac0020, 0x26f0ffff, 0xcf01c081, 0x1af000fc, 0x1334000a, 0x24e02000, 0x7f63400a,
1046 0x18e00074, 0x32240003, 0x9a400006, 0x32240000, 0x9a400004, 0xcd81c082, 0xcdc1c083, 0x80000f9d,
1047 0x51e40020, 0x7e5a401a, 0xd2400072, 0xc8280072, 0xce81c082, 0x56ac0020, 0x26f0ffff, 0xcf01c083,
1048 0x1af000fc, 0x13380016, 0x18e00039, 0x12200019, 0x7fa3800a, 0x7fb7800a, 0x18e0007d, 0x1220001d,
1049 0x7fa3800a, 0x18e00074, 0x12200014, 0x7fa3800a, 0xcf81c078, 0xcfc1c084, 0x80000c16, 0x7c40c001,
1050 0x18dc003d, 0x95c00004, 0x041c0000, 0x042c01c8, 0x8c000d61, 0x18d001e8, 0x31140005, 0x99400003,
1051 0x31140006, 0x95400002, 0x8c00104f, 0x05280fb7, 0x28140002, 0xcd400013, 0x86800000, 0x80000fbe,
1052 0x80000fbe, 0x80000fc2, 0x80000fbe, 0x80000fd1, 0x80000ff2, 0x80000ff2, 0x24cc003f, 0xccc1a2a4,
1053 0x7c408001, 0x88000000, 0x7c414001, 0x18e80039, 0x52a8003b, 0x50580020, 0x24cc003f, 0x7d59401a,
1054 0xd1400072, 0xc8140072, 0x7d69401a, 0xc41c0017, 0x99c0ffff, 0xd140004b, 0xccc1a2a4, 0x7c408001,
1055 0x88000000, 0xc414000d, 0x04180001, 0x24cc003f, 0x7d958004, 0xcd800035, 0xccc1a2a4, 0xc43c000e,
1056 0x2bfc0008, 0xcfc00013, 0xc43d3249, 0x1bfc003e, 0x97c00002, 0xd8400074, 0xc4100019, 0x7d150005,
1057 0x25100001, 0x9500000b, 0x97c0fffc, 0xc4180021, 0x159c0011, 0x259800ff, 0x31a00003, 0x31a40001,
1058 0x7e25800a, 0x95c0fff5, 0x9580fff4, 0x80000fef, 0xc411326f, 0x1d100010, 0xcd01326f, 0x97c00002,
1059 0xd8000074, 0x80001b70, 0x04380000, 0xc430000d, 0xc8140023, 0xc4180081, 0x13300005, 0xc011000f,
1060 0xc4240004, 0x33b40003, 0x97400003, 0xc0340008, 0x80000ffe, 0xc4340035, 0x11a00002, 0x7c908009,
1061 0x12640004, 0x7d614011, 0xc4100026, 0x05980008, 0x7ca4800a, 0x7d1a0002, 0x7cb0800a, 0x282c2002,
1062 0x208801a8, 0x3e280008, 0x7cb4800a, 0xcec00013, 0xc4300027, 0x042c0008, 0xd1400025, 0xcf000024,
1063 0x20240030, 0x7ca48001, 0xcc800026, 0xccc00026, 0x9b800013, 0xcc400026, 0x7c414001, 0x28340000,
1064 0xcf400013, 0x507c0020, 0x7d7d401a, 0xd1400072, 0xc8140072, 0x557c0020, 0x28342002, 0xcf400013,
1065 0xcd400026, 0xcfc00026, 0xd4400026, 0x9a80000e, 0x32280000, 0x9a80000b, 0x8000102f, 0xcc000026,
1066 0xcc000026, 0xcc000026, 0xcc000026, 0xcc000026, 0x9a800005, 0x32280000, 0x9a800002, 0x9a000000,
1067 0x7c018001, 0xcc000026, 0xd8400027, 0x1cccfe08, 0xd8800013, 0xcec0003a, 0xccc1a2a4, 0xc43c000e,
1068 0x2bfc0008, 0xcfc00013, 0xc43d3249, 0x1bfc003e, 0x9bc00007, 0xc428000e, 0x16a80008, 0xce800009,
1069 0xc42c005e, 0x96c00b33, 0xd840003c, 0xc4200025, 0x7da2400f, 0x7da28002, 0x7e1ac002, 0x0aec0001,
1070 0x96400002, 0x7d2ac002, 0x3ef40010, 0x9b40f11d, 0x04380030, 0xcf81325e, 0x80000c16, 0xde410000,
1071 0xdcc10000, 0xdd010000, 0xdd410000, 0xdd810000, 0xddc10000, 0xde010000, 0xc40c000e, 0x7c024001,
1072 0x28cc0008, 0xccc00013, 0xc8100086, 0x5510003f, 0xc40d3249, 0x18cc003e, 0x98c00003, 0x99000011,
1073 0x80001075, 0x9900000c, 0xc40c0026, 0xc4100081, 0xc4140025, 0x7d15800f, 0x7d15c002, 0x7d520002,
1074 0x0a200001, 0x95800002, 0x7cde0002, 0x3e20001a, 0x9a000009, 0x040c0030, 0xccc1325e, 0x80001071,
1075 0xd9c00036, 0xd8400029, 0xc40c005e, 0x94c00b01, 0x04240001, 0xdc200000, 0xdc1c0000, 0xdc180000,
1076 0xdc140000, 0xdc100000, 0xdc0c0000, 0x96400004, 0xdc240000, 0xdc0c0000, 0x80000c16, 0xdc240000,
1077 0x90000000, 0xcc40003f, 0xd8c00010, 0xc4080029, 0xcc80003b, 0xc418000e, 0x18a800e5, 0x1d980008,
1078 0x12a80008, 0x7da9800a, 0x29980008, 0xcd800013, 0x18a400e5, 0x12500009, 0x248c0008, 0x94c00006,
1079 0x200c006d, 0x7cd0c00a, 0xccc1326c, 0xc421326c, 0x96000001, 0xcd800013, 0x200c0228, 0x7cd0c00a,
1080 0xccc1326c, 0xc421326c, 0x96000001, 0xc40c002a, 0xc410002b, 0x18881fe8, 0x18d4072c, 0x18cc00d1,
1081 0x7cd4c00a, 0x3094000d, 0x38d80000, 0x311c0003, 0x99400006, 0x30940007, 0x1620001f, 0x9940001d,
1082 0x9a000023, 0x800010c4, 0x9580001a, 0x99c00019, 0xccc00041, 0x25140001, 0xc418002c, 0x9940000d,
1083 0x259c007f, 0x95c00013, 0x19a00030, 0xcdc0001b, 0xd8400021, 0xd8400022, 0xc430000f, 0x17300001,
1084 0x9b00fffe, 0x9a000012, 0xd8400023, 0x800010cb, 0x199c0fe8, 0xcdc0001b, 0xd8400021, 0xd8400023,
1085 0xc430000f, 0x17300001, 0x9b00fffe, 0x800010cb, 0xd8c00010, 0xd8000022, 0xd8000023, 0xc430005e,
1086 0x97000aac, 0x7c408001, 0x88000000, 0xc43c000e, 0xc434002e, 0x2bfc0008, 0x2020002c, 0xcfc00013,
1087 0xce01326c, 0x17780001, 0x27740001, 0x07a810d8, 0xcf400010, 0xc421326c, 0x96000001, 0x86800000,
1088 0x80000168, 0x80000aa7, 0x80000bfc, 0x800012e9, 0x8000104c, 0xcc400040, 0xd8800010, 0xc4180032,
1089 0x29980008, 0xcd800013, 0x200c007d, 0xccc1325b, 0xc411325b, 0x95000001, 0x7c408001, 0x88000000,
1090 0x28240007, 0xde430000, 0xd4400078, 0x80001190, 0xcc80003b, 0x24b00008, 0xc418000e, 0x1330000a,
1091 0x18a800e5, 0x1d980008, 0x12a80008, 0x7da9800a, 0x29980008, 0xcd800013, 0xc40d3249, 0x18cc003e,
1092 0x98c00002, 0xd840003d, 0x2b304000, 0xcf01326c, 0xc431326c, 0x7c40c001, 0x7c410001, 0x7c414001,
1093 0x192400fd, 0x50580020, 0x7d59401a, 0x7c41c001, 0x06681110, 0x7c420001, 0xcc400078, 0x18ac0024,
1094 0x19180070, 0x19100078, 0xcec00008, 0x18f40058, 0x5978073a, 0x7f7b400a, 0x97000001, 0x86800000,
1095 0x80001117, 0x80001118, 0x80001122, 0x8000112d, 0x80001130, 0x80001133, 0x8000016a, 0x8000117b,
1096 0x24ec0f00, 0x32ec0600, 0x96c00003, 0xc4300006, 0x9b00ffff, 0xd1400025, 0xcf400024, 0xcdc00026,
1097 0xd8400027, 0x8000117b, 0x24ec0f00, 0x32ec0600, 0x96c00003, 0xc4300006, 0x9b00ffff, 0xd1400025,
1098 0xcf400024, 0xcdc00026, 0xce000026, 0xd8400027, 0x8000117b, 0xc81c001f, 0x55e00020, 0x80001122,
1099 0xc81c0020, 0x55e00020, 0x80001122, 0x8c00116b, 0xd8400013, 0xc02a0200, 0x7e8e8009, 0x22a8003d,
1100 0x22a80074, 0x2774001c, 0x13740014, 0x7eb6800a, 0x25ecffff, 0x55700020, 0x15f40010, 0x13740002,
1101 0x275c001f, 0x95c00027, 0x7c018001, 0x7f41c001, 0x15dc0002, 0x39e00008, 0x25dc0007, 0x7dc1c01e,
1102 0x05dc0001, 0x96000004, 0x05e40008, 0x8c00116e, 0x80001168, 0x7dc2001e, 0x06200001, 0x05e40008,
1103 0x7e62000e, 0x9a000004, 0x7da58001, 0x8c00116e, 0x80001165, 0x7dc2001e, 0x06200001, 0x7e1a0001,
1104 0x05cc0008, 0x7e0d000e, 0x95000007, 0x7e02401e, 0x06640001, 0x06640008, 0x05d80008, 0x8c00116e,
1105 0x80001168, 0x7dc2401e, 0x06640001, 0x7da58001, 0x8c00116e, 0x05e00008, 0x7da2000c, 0x9600ffe6,
1106 0x17640002, 0x8c00116e, 0x80001190, 0xc4200006, 0x9a00ffff, 0x90000000, 0x8c00116b, 0xc420000e,
1107 0x2a200001, 0xce00001a, 0xce81c078, 0xcec1c080, 0xcc01c081, 0xcd41c082, 0xcf01c083, 0x12640002,
1108 0x22640435, 0xce41c084, 0x90000000, 0x0528117e, 0x312c0003, 0x86800000, 0x80001190, 0x80001185,
1109 0x80001182, 0x80001182, 0xc4300012, 0x9b00ffff, 0x9ac0000c, 0xc03a0400, 0xc4340004, 0xd8400013,
1110 0xd8400008, 0xc418000e, 0x15980008, 0x1198001c, 0x7d81c00a, 0xcdc130b7, 0xcf8130b5, 0xcf400008,
1111 0x04240008, 0xc418000e, 0xc41c0049, 0x19a000e8, 0x29a80008, 0x7de2c00c, 0xce800013, 0xc421325e,
1112 0x26200010, 0xc415326d, 0x9a000006, 0xc420007d, 0x96000004, 0x96c00003, 0xce40003e, 0x800011a3,
1113 0x7d654001, 0xcd41326d, 0x7c020001, 0x96000005, 0xc4100026, 0xc4240081, 0xc4140025, 0x800011b6,
1114 0xc4253279, 0xc415326d, 0xc431326c, 0x2730003f, 0x3b380006, 0x97800004, 0x3f38000b, 0x9b800004,
1115 0x800011b4, 0x04300006, 0x800011b4, 0x0430000b, 0x04380002, 0x7fb10004, 0x7e57000f, 0x7e578002,
1116 0x7d67c002, 0x0be40001, 0x97000002, 0x7d3a4002, 0x202c002c, 0xc421325e, 0x04280020, 0xcec1326c,
1117 0x26200010, 0x3e640010, 0x96000003, 0x96400002, 0xce81325e, 0xc4300028, 0xc434002e, 0x17780001,
1118 0x27740001, 0x07a811cf, 0x9b00feb8, 0xcf400010, 0xc414005e, 0x954009a7, 0x86800000, 0x80000168,
1119 0x80000aa7, 0x80000bfc, 0x800012e9, 0x80000168, 0x8c00120d, 0x7c40c001, 0xccc1c07c, 0xcc41c07d,
1120 0xcc41c08c, 0x7c410001, 0xcc41c079, 0xcd01c07e, 0x7c414001, 0x18f0012f, 0x18f40612, 0x18cc00c1,
1121 0x7f73400a, 0x7cf7400a, 0x39600004, 0x9a000002, 0xc0140004, 0x11600001, 0x18fc003e, 0x9740001c,
1122 0xcf400041, 0xc425c07f, 0x97c00003, 0x166c001f, 0x800011ee, 0x1a6c003e, 0x96c00006, 0x04200002,
1123 0x0a200001, 0x9a00ffff, 0xd8400013, 0x800011e8, 0xc428002c, 0x96800010, 0x26ac007f, 0xcec0001b,
1124 0xd8400021, 0x1ab00030, 0x1aac0fe8, 0xc434000f, 0x9b40ffff, 0x97000008, 0xcec0001b, 0xd8400021,
1125 0xc434000f, 0x9b40ffff, 0x80001205, 0x0a200001, 0x9a00ffff, 0xd8400013, 0xc425c07f, 0x166c001f,
1126 0x11600001, 0x9ac0fffa, 0x8c001232, 0x7c408001, 0x88000000, 0xd8000033, 0xc438000b, 0xc43c0009,
1127 0x27fc0001, 0x97c0fffe, 0xd8400013, 0xd841c07f, 0xc43dc07f, 0x1bfc0078, 0x7ffbc00c, 0x97c0fffd,
1128 0x90000000, 0xc03a2800, 0xcf81c07c, 0xcc01c07d, 0xcc01c08c, 0xcc01c079, 0xcc01c07e, 0x04380040,
1129 0xcf80001b, 0xd8400021, 0xc438000f, 0x9b80ffff, 0x04380060, 0xcf80001b, 0xd8400021, 0xc438000f,
1130 0x9b80ffff, 0x04380002, 0x0bb80001, 0x9b80ffff, 0xd8400013, 0xc43dc07f, 0x17fc001f, 0x04380010,
1131 0x9bc0fffa, 0x90000000, 0xd8400013, 0xd801c07f, 0xd8400013, 0xc43dc07f, 0xcfc00078, 0xd8000034,
1132 0x90000000, 0xc03ae000, 0xcf81c200, 0xc03a0800, 0xcf81c07c, 0xcc01c07d, 0xcc01c08c, 0xcc01c079,
1133 0xcc01c07e, 0x04380040, 0xcf80001b, 0xd8400021, 0xc438000f, 0x9b80ffff, 0x04380002, 0x0bb80001,
1134 0x9b80ffff, 0xd8400013, 0xc43dc07f, 0x17fc001f, 0x04380010, 0x9bc0fffa, 0x90000000, 0xc03ae000,
1135 0xcf81c200, 0xc03a4000, 0xcf81c07c, 0xcc01c07d, 0xcc01c08c, 0xcc01c079, 0xcc01c07e, 0x04380002,
1136 0x0bb80001, 0x9b80ffff, 0xd8400013, 0xc43dc07f, 0x17fc001f, 0x04380010, 0x9bc0fffa, 0x90000000,
1137 0xc40c0007, 0x30d00002, 0x99000052, 0xd8400029, 0xc424005e, 0x9640090f, 0x7c410001, 0xc428000e,
1138 0x1514001f, 0x19180038, 0x2aa80008, 0x99400030, 0x30dc0001, 0xce800013, 0x99c0000a, 0xc42d324e,
1139 0xc431324d, 0x52ec0020, 0x7ef2c01a, 0xc435324f, 0xc4293256, 0x1ab0c006, 0x52ec0008, 0x8000127f,
1140 0xc42d3258, 0xc4313257, 0x52ec0020, 0x7ef2c01a, 0xc4353259, 0xc429325a, 0x1ab0c012, 0x07740001,
1141 0x04240002, 0x26a0003f, 0x7e624004, 0x7f67800f, 0x97800002, 0x04340000, 0x53740002, 0x7ef6c011,
1142 0x1ab42010, 0x16a8000c, 0x26a80800, 0x2b740000, 0x7f73400a, 0x7f6b400a, 0xcf40001c, 0xd2c0001e,
1143 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4100011, 0x1514001f, 0x99400006, 0x9980000a, 0x8c0012e1,
1144 0xc40c0007, 0x04100000, 0x80001267, 0xd800002a, 0xc424005e, 0x964008d7, 0xd9800036, 0x80000c16,
1145 0xc42c001d, 0x95c00005, 0xc431325a, 0x1b300677, 0x11dc000c, 0x800012aa, 0xc4313256, 0x1b34060b,
1146 0x1b300077, 0x7f37000a, 0x13300017, 0x04340100, 0x26ec00ff, 0xc03a8002, 0x7ef6c00a, 0x7edec00a,
1147 0x7f3b000a, 0x7ef2c00a, 0xcec1325b, 0x80000c16, 0xc4140032, 0xc410001d, 0x29540008, 0xcd400013,
1148 0xc40d325b, 0x1858003f, 0x251000ff, 0x99800007, 0x7d0cc00a, 0xccc1325b, 0xc411325d, 0x251001ef,
1149 0xcd01325d, 0x80000168, 0x18d0006c, 0x18d407f0, 0x9900000e, 0x04100002, 0xc4193256, 0xc41d324f,
1150 0x2598003f, 0x7d190004, 0x7d5d4001, 0x7d52000f, 0x9a000003, 0xcd41324f, 0x800012d8, 0x7d514002,
1151 0xcd41324f, 0x800012d8, 0xc4193259, 0xc41d325a, 0x7d958001, 0x7dd5c002, 0xcd813259, 0xcdc1325a,
1152 0xc411325d, 0x251001ef, 0xcd01325d, 0x1ccc001e, 0xccc1325b, 0xc40d325b, 0x94c00001, 0x7c408001,
1153 0x88000000, 0xc40c0021, 0xc4340028, 0x14f00010, 0xc4380030, 0xc43c0007, 0x9b000004, 0x9b40000c,
1154 0x9b80000f, 0x90000000, 0x17300001, 0x9b000005, 0xccc00037, 0x8c000190, 0xd8000032, 0x90000000,
1155 0xd8000028, 0xd800002b, 0x80000168, 0xd980003f, 0x97c00002, 0xd9c0003f, 0x80001082, 0xd9800040,
1156 0x97c00002, 0xd9c00040, 0x800010de, 0xc43c0007, 0x33f80003, 0x97800051, 0xcc80003b, 0x24b00008,
1157 0xc418000e, 0x1330000a, 0x18a800e5, 0x1d980008, 0x12a80008, 0x7da9800a, 0x29980008, 0xcd800013,
1158 0xc4353249, 0x1b74003e, 0x9b400002, 0xd840003d, 0x2b304000, 0xcf01326c, 0xc431326c, 0x97000001,
1159 0x7c434001, 0x1b4c00f8, 0x7c410001, 0x7c414001, 0x50700020, 0x04e81324, 0x18ac0024, 0x7c41c001,
1160 0x50600020, 0xcc400078, 0x30e40004, 0x9a400007, 0x7d71401a, 0x596401fc, 0x12640009, 0x1b74008d,
1161 0x7e76400a, 0x2a640000, 0xcec00008, 0x86800000, 0x8000016a, 0x8000016a, 0x8000016a, 0x8000016a,
1162 0x8000132c, 0x8000133b, 0x80001344, 0x8000016a, 0xc4340004, 0xd8400013, 0xd8400008, 0xc42530b5,
1163 0x1a68003a, 0x9a80fffe, 0x2024003a, 0xc418000e, 0x25980700, 0x11980014, 0x7d19000a, 0xcd0130b7,
1164 0xce4130b5, 0xcf400008, 0x80001190, 0xce40001c, 0xd140001e, 0xd8400021, 0xc428000f, 0x9a80ffff,
1165 0xc4240011, 0x7de6800f, 0x9a80ffea, 0x80001190, 0xce40001c, 0xd140001e, 0xd8400021, 0xc428000f,
1166 0x9a80ffff, 0xc8240011, 0x7de1c01a, 0x7de6800f, 0x9a80ffe0, 0x80001190, 0x8c00104f, 0x28182002,
1167 0xc430000d, 0xc4340035, 0xcd800013, 0xc8140023, 0xc4180081, 0x13300005, 0xc4240004, 0x11a00002,
1168 0x12640004, 0x7d614011, 0xc4100026, 0x05980008, 0x7ca4800a, 0x7d1a0002, 0x7cb0800a, 0x3e280008,
1169 0x7cb4800a, 0xc4300027, 0x042c0008, 0xd1400025, 0xcf000024, 0x20240030, 0x7ca48001, 0xcc800026,
1170 0x7c434001, 0x1b4c00f8, 0xcf400026, 0xcc400026, 0x28340000, 0xcf400013, 0x7c414001, 0x507c0020,
1171 0x30e40004, 0x9a400005, 0x7d7d401a, 0xd1400072, 0xc8140072, 0x557c0020, 0x28342002, 0xcf400013,
1172 0xcd400026, 0xcfc00026, 0xd4400026, 0xcc000026, 0x9a800005, 0x32280000, 0x9a800002, 0x9a000000,
1173 0x7c018001, 0xd8400027, 0xd8800013, 0x04380028, 0xcec0003a, 0xcf81a2a4, 0x80001037, 0xd8400029,
1174 0xc40c005e, 0x94c007eb, 0x7c40c001, 0x50500020, 0x7d0d001a, 0xd1000072, 0xc8100072, 0x591c01fc,
1175 0x11dc0009, 0x45140210, 0x595801fc, 0x11980009, 0x29dc0000, 0xcdc0001c, 0xd140001e, 0xd8400021,
1176 0xc418000f, 0x9980ffff, 0xc4200011, 0x1624001f, 0x96400069, 0xc40c000e, 0x28cc0008, 0xccc00013,
1177 0xce013249, 0x1a307fe8, 0xcf00000a, 0x23304076, 0xd1000001, 0xcf000001, 0xc41d3254, 0xc4253256,
1178 0x18cc00e8, 0x10cc0015, 0x4514020c, 0xd140001e, 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4200011,
1179 0xce013248, 0x1a2001e8, 0x12200014, 0x2a204001, 0xce000013, 0x1a64003c, 0x1264001f, 0x11dc0009,
1180 0x15dc000b, 0x7dcdc00a, 0x7e5dc00a, 0xcdc00100, 0xd8800013, 0xd8400010, 0xd800002a, 0xd8400008,
1181 0xcf00000d, 0xcf00000a, 0x8c001427, 0x04340022, 0x07740001, 0x04300010, 0xdf430000, 0x7c434001,
1182 0x7c408001, 0xd4412e01, 0x0434001e, 0xdf430000, 0xd4400078, 0xdf030000, 0xd4412e40, 0xd8400013,
1183 0xcc41c030, 0xcc41c031, 0x248dfffe, 0xccc12e00, 0xd8800013, 0xcc812e00, 0x7c434001, 0x7c434001,
1184 0x8c00142b, 0xd8000010, 0xc40c000e, 0x28cc0008, 0xccc00013, 0x45140248, 0xd140001e, 0xd8400021,
1185 0xc418000f, 0x9980ffff, 0xc8200011, 0xce013257, 0x56200020, 0xce013258, 0x0434000c, 0xdb000024,
1186 0xd1400025, 0xd8000026, 0xd8000026, 0xd8400027, 0x45540008, 0xd140001e, 0xd8400021, 0xc418000f,
1187 0x9980ffff, 0xc8200011, 0xce013259, 0x56200020, 0xc0337fff, 0x7f220009, 0xce01325a, 0x55300020,
1188 0x7d01c001, 0x042c01d0, 0x8c000d61, 0x06ec0004, 0x7f01c001, 0x8c000d61, 0x041c0002, 0x042c01c8,
1189 0x8c000d61, 0xc4380012, 0x9b80ffff, 0xd800002a, 0x80000aa7, 0xd800002a, 0x7c408001, 0x88000000,
1190 0xd8400029, 0x7c40c001, 0x50500020, 0x8c001427, 0x7cd0c01a, 0xc4200007, 0xd0c00072, 0xc8240072,
1191 0xd240001e, 0x7c414001, 0x19682011, 0x5a6c01fc, 0x12ec0009, 0x7eeac00a, 0x2aec0000, 0xcec0001c,
1192 0xd8400021, 0xc430000f, 0x9b00ffff, 0xc4180011, 0x7c438001, 0x99800007, 0xdf830000, 0xcfa0000c,
1193 0x8c00142b, 0xd4400078, 0xd800002a, 0x80001b70, 0x8c00142b, 0xd800002a, 0x80001b70, 0xd8000012,
1194 0xc43c0008, 0x9bc0ffff, 0x90000000, 0xd8400012, 0xc43c0008, 0x97c0ffff, 0x90000000, 0xc4380007,
1195 0x7c40c001, 0x17b80001, 0x18d40038, 0x7c410001, 0x9b800004, 0xd8400029, 0xc414005e, 0x9540073d,
1196 0x18c80066, 0x7c414001, 0x30880001, 0x7c418001, 0x94800008, 0x8c00187c, 0xcf400013, 0xc42c0004,
1197 0xd8400008, 0xcd910000, 0xcec00008, 0x7d410001, 0x043c0000, 0x7c41c001, 0x7c420001, 0x04240001,
1198 0x06200001, 0x4220000c, 0x0a640001, 0xcc000078, 0x9a40fffe, 0x24e80007, 0x24ec0010, 0xd8400013,
1199 0x9ac00006, 0xc42c0004, 0xd8400008, 0xc5310000, 0xcec00008, 0x80001465, 0x51540020, 0x7d15001a,
1200 0xd1000072, 0xc82c0072, 0xd2c0001e, 0x18f02011, 0x5aec01fc, 0x12ec0009, 0x7ef2c00a, 0x2aec0000,
1201 0xcec0001c, 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc4300011, 0x96800012, 0x12a80001, 0x0aa80001,
1202 0x06a8146a, 0x7f1f0009, 0x86800000, 0x7f1b400f, 0x80001478, 0x7f1b400e, 0x80001478, 0x7f1b400c,
1203 0x8000147a, 0x7f1b400d, 0x8000147a, 0x7f1b400f, 0x8000147a, 0x7f1b400e, 0x8000147a, 0x7f334002,
1204 0x97400014, 0x8000147b, 0x9b400012, 0x9b800005, 0x9bc0001f, 0x7e024001, 0x043c0001, 0x8000144a,
1205 0xc40c0032, 0xc438001d, 0x28cc0008, 0xccc00013, 0xc43d325b, 0x1bb81ff0, 0x7fbfc00a, 0xcfc1325b,
1206 0xc411325d, 0x251001ef, 0xcd01325d, 0x80001b70, 0x94800007, 0x8c00187c, 0xcf400013, 0xc42c0004,
1207 0xd8400008, 0xcd910000, 0xcec00008, 0x9b800003, 0xd800002a, 0x80001b70, 0xc40c0032, 0x28cc0008,
1208 0xccc00013, 0xc40d325b, 0x800012c2, 0xc40c000e, 0xc43c0007, 0xc438001d, 0x28cc0008, 0xccc00013,
1209 0x13f4000c, 0x9bc00006, 0xc43d3256, 0x1bf0060b, 0x1bfc0077, 0x7ff3c00a, 0x800014a9, 0xc43d325a,
1210 0x1bfc0677, 0x04300100, 0x1bb81ff0, 0x7f73400a, 0xc0328007, 0x7fb7800a, 0x13fc0017, 0x7ff3c00a,
1211 0x7ffbc00a, 0xcfc1325b, 0xc03a0002, 0xc4340004, 0xd8400013, 0xd8400008, 0xcf8130b5, 0xcf400008,
1212 0x80000c16, 0x043c0000, 0xc414000e, 0x29540008, 0xcd400013, 0xc4193246, 0xc41d3245, 0x51980020,
1213 0x7dd9c01a, 0x45dc0390, 0xc4313267, 0x04183000, 0xcd813267, 0x1b380057, 0x1b340213, 0x1b300199,
1214 0x7f7b400a, 0x7f73400a, 0xcf400024, 0xd1c00025, 0xcc800026, 0x7c420001, 0xce000026, 0x7c424001,
1215 0xce400026, 0x7c428001, 0xce800026, 0x7c42c001, 0xcec00026, 0x7c430001, 0xcf000026, 0x7c434001,
1216 0xcf400026, 0x7c438001, 0xcf800026, 0xd8400027, 0xcd400013, 0x04182000, 0xcd813267, 0xd840004f,
1217 0x1a0800fd, 0x109c000a, 0xc4193265, 0x7dd9c00a, 0xcdc13265, 0x2620ffff, 0xce080228, 0x9880000e,
1218 0xce480250, 0xce880258, 0xd8080230, 0xd8080238, 0xd8080240, 0xd8080248, 0xd8080268, 0xd8080270,
1219 0xd8080278, 0xd8080280, 0xd800004f, 0x97c0ec75, 0x90000000, 0x040c0000, 0x041c0010, 0x26180001,
1220 0x09dc0001, 0x16200001, 0x95800002, 0x04cc0001, 0x99c0fffb, 0xccc80230, 0xd8080238, 0xd8080240,
1221 0xd8080248, 0x040c0000, 0xce480250, 0xce880258, 0x52a80020, 0x7e6a401a, 0x041c0020, 0x66580001,
1222 0x09dc0001, 0x56640001, 0x95800002, 0x04cc0001, 0x99c0fffb, 0xccc80260, 0xd8080268, 0xd8080270,
1223 0xd8080278, 0xd8080280, 0x040c0000, 0xcec80288, 0xcf080290, 0xcec80298, 0xcf0802a0, 0x040c0000,
1224 0x041c0010, 0xcf4802a8, 0x27580001, 0x09dc0001, 0x17740001, 0x95800002, 0x04cc0001, 0x99c0fffb,
1225 0xccc802b0, 0xd80802b8, 0x178c000b, 0x27b8003f, 0x7cf8c001, 0xcf8802c0, 0xccc802c8, 0xcf8802d0,
1226 0xcf8802d8, 0xd800004f, 0x97c00002, 0x90000000, 0x7c408001, 0x88000000, 0xc40c000e, 0x28cc0008,
1227 0xccc00013, 0xc43d3265, 0x1bc800ea, 0x7c418001, 0x25b8ffff, 0xc4930240, 0xc48f0238, 0x04cc0001,
1228 0x24cc000f, 0x7cd2800c, 0x9a80000b, 0xc5230309, 0x2620ffff, 0x7e3a400c, 0x9a400004, 0x05100001,
1229 0x2510000f, 0x80001539, 0xcd08034b, 0xd4400078, 0x80000168, 0xc48f0230, 0xc4930240, 0x98c00004,
1230 0xcd880353, 0x8c00163f, 0xc49b0353, 0xc4930238, 0xc48f0228, 0x05100001, 0x2510000f, 0x7cd14005,
1231 0x25540001, 0x99400004, 0x05100001, 0x2510000f, 0x8000154f, 0xc48f0230, 0x7c41c001, 0xcd080238,
1232 0xcd08034b, 0x08cc0001, 0x2598ffff, 0x3d200008, 0xccc80230, 0xcd900309, 0xd8100319, 0x04340801,
1233 0x2198003f, 0xcf400013, 0xcd910ce7, 0xc4190ce6, 0x7d918005, 0x25980001, 0x9580fffd, 0x7d918004,
1234 0xcd810ce6, 0x9a000003, 0xcdd1054f, 0x8000156e, 0x090c0008, 0xcdcd050e, 0x040c0000, 0x110c0014,
1235 0x28cc4001, 0xccc00013, 0xcc41230a, 0xcc41230b, 0xcc41230c, 0xcc41230d, 0xcc480329, 0xcc48032a,
1236 0xcc4802e0, 0xd8000055, 0xc48f02e0, 0x24d8003f, 0x09940001, 0x44100001, 0x9580002c, 0x95400005,
1237 0x09540001, 0x51100001, 0x69100001, 0x8000157f, 0x24cc003f, 0xc4970290, 0xc49b0288, 0x51540020,
1238 0x7d59401a, 0xc49b02a0, 0xc49f0298, 0x51980020, 0x7d9d801a, 0x041c0040, 0x04200000, 0x7dcdc002,
1239 0x7d924019, 0x7d26400c, 0x09dc0001, 0x9a400008, 0x51100001, 0x06200001, 0x99c0fffa, 0xc48f0230,
1240 0xc4930240, 0x8c00163f, 0x80001579, 0x7d010021, 0x7d914019, 0xc4930238, 0x55580020, 0xcd480298,
1241 0xcd8802a0, 0x10d40010, 0x12180016, 0xc51f0309, 0x7d95800a, 0x7d62000a, 0x7dd9c00a, 0xd8400013,
1242 0xcdd00309, 0xce113320, 0xc48f02e0, 0xc49b02b0, 0x18dc01e8, 0x7dd9400e, 0xc48f0230, 0xc4930240,
1243 0x95c0001d, 0x95400003, 0x8c00163f, 0x800015aa, 0xc48f0238, 0xc4a302b8, 0x12240004, 0x7e5e400a,
1244 0xc4ab02a8, 0x04100000, 0xce4c0319, 0x7d9d8002, 0x7ea14005, 0x25540001, 0x99400004, 0x06200001,
1245 0x2620000f, 0x800015bc, 0x09dc0001, 0x04240001, 0x7e624004, 0x06200001, 0x7d25000a, 0x2620000f,
1246 0x99c0fff4, 0xd8400013, 0xcd0d3330, 0xce0802b8, 0xcd8802b0, 0xc4ab02e0, 0x1aa807f0, 0xc48f02d0,
1247 0xc49702d8, 0xc49b02c8, 0xc49f02c0, 0x96800028, 0x7d4e000f, 0x9600000b, 0x7d964002, 0x7e6a000f,
1248 0x96000003, 0x7d694001, 0x800015e9, 0x7cde4002, 0x7e6a000f, 0x96000008, 0x7de94001, 0x800015e9,
1249 0x7cd64002, 0x7e6a000e, 0x96000003, 0x7d694001, 0x800015e9, 0xc48f0230, 0xc4930240, 0x8c00163f,
1250 0x800015cd, 0xc4930238, 0x7d698002, 0xcd4802d8, 0x129c0008, 0xc50f0319, 0x11a0000e, 0x11140001,
1251 0xc4340004, 0xd8400008, 0xd8400013, 0x7e1e000a, 0x1198000a, 0xcd953300, 0x7e0e000a, 0x12a8000a,
1252 0xce953301, 0xce100319, 0xcf400008, 0xc4b70280, 0xc4b30278, 0x7f73800a, 0x536c0020, 0x7ef2c01a,
1253 0x9780eb68, 0x8c001608, 0xd8080278, 0xd8080280, 0x7c408001, 0x88000000, 0x043c0003, 0x80001609,
1254 0x043c0001, 0x30b40000, 0x9b400011, 0xc4b70258, 0xc4b30250, 0x53780020, 0x7fb3801a, 0x7faf8019,
1255 0x04300020, 0x04280000, 0x67b40001, 0x0b300001, 0x57b80001, 0x97400002, 0x06a80001, 0x9b00fffb,
1256 0xc4bb0260, 0x7fab8001, 0xcf880260, 0x04300020, 0x04280000, 0x66f40001, 0x0b300001, 0x56ec0001,
1257 0x97400005, 0x8c001628, 0xc4353247, 0x7f7f4009, 0x9b40fffe, 0x06a80001, 0x9b00fff7, 0x90000000,
1258 0x269c0007, 0x11dc0008, 0x29dc0008, 0x26a00018, 0x12200003, 0x7de1c00a, 0x26a00060, 0x06200020,
1259 0x16200001, 0x7de1c00a, 0xcdc00013, 0x90000000, 0x269c0018, 0x26a00007, 0x26a40060, 0x11dc0006,
1260 0x12200006, 0x16640001, 0x29dc0008, 0x7de1c00a, 0x7de5c00a, 0xcdc00013, 0x90000000, 0xc4b70228,
1261 0x05100001, 0x04cc0001, 0x2510000f, 0xccc80230, 0x7f514005, 0x25540001, 0x99400004, 0x05100001,
1262 0x2510000f, 0x80001644, 0xc4b30248, 0xcd080240, 0x7f130005, 0x27300001, 0x9b000002, 0x8c001688,
1263 0x8c00120d, 0x8c001219, 0x8c001232, 0x04300001, 0x04340801, 0x7f130004, 0xcf400013, 0xcf01051e,
1264 0xc42d051f, 0x7ed2c005, 0x26ec0001, 0x96c0fffd, 0xcf01051f, 0xd8000055, 0xc5170309, 0x195c07f0,
1265 0x196007f6, 0x04340000, 0x95c00008, 0x09dc0001, 0x04340001, 0x95c00005, 0x09dc0001, 0x53740001,
1266 0x6b740001, 0x80001665, 0xc4a702a0, 0xc4ab0298, 0x52640020, 0x7e6a401a, 0x7f634014, 0x7e76401a,
1267 0xc4300004, 0xd8400008, 0xd8400013, 0x56680020, 0xd8113320, 0xce480298, 0xce8802a0, 0xc5170319,
1268 0xc4b702b0, 0x255c000f, 0x7f5f4001, 0xd8113330, 0xcf4802b0, 0x11340001, 0x195c07e8, 0x196007ee,
1269 0xd8353300, 0x7e1e4001, 0xd8353301, 0xce4802d0, 0xd8100309, 0xd8100319, 0xcf000008, 0x90000000,
1270 0xc4970258, 0xc48f0250, 0x51540020, 0x7cd4c01a, 0xc4af0280, 0xc4b30278, 0x52ec0020, 0x7ef2c01a,
1271 0x04140020, 0x04280000, 0x64d80001, 0x09540001, 0x54cc0001, 0x95800060, 0x8c001628, 0xc4193247,
1272 0x25980001, 0x9580005c, 0x7dc24001, 0xc41d3248, 0x25dc000f, 0x7dd2000c, 0x96000057, 0xc41d3255,
1273 0xc435324f, 0x7df5c00c, 0x99c00004, 0xc4193265, 0x25980040, 0x9580fffe, 0xc439325b, 0x1bb0003f,
1274 0x97000049, 0x1bb000e8, 0x33380003, 0x9b800046, 0x33300002, 0x9700000a, 0xc4393260, 0x1bb000e4,
1275 0x33300004, 0x97000040, 0xc431325d, 0x27300010, 0x9b00fffe, 0x800016f1, 0xce400013, 0xc033ffff,
1276 0x2f3000ff, 0xc439325b, 0x7f3b0009, 0xcf01325b, 0xc439325b, 0x27b800ff, 0x9b80fffe, 0xd8c00033,
1277 0xc4300009, 0x27300008, 0x9700fffe, 0x1a7003e6, 0x27380003, 0x13b80004, 0x27300003, 0x13300003,
1278 0x7fb38001, 0x1a7000e8, 0x7fb38001, 0x13300001, 0x7fb38001, 0x07b80002, 0xd8400013, 0x1a700064,
1279 0x33300002, 0x97000009, 0x17b00005, 0x07300003, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f,
1280 0x0b300003, 0x800016df, 0x17b00005, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f, 0x13300005,
1281 0x7fb30002, 0xc4392083, 0x7fb38005, 0x27b80001, 0x9b80ffdf, 0xd8c00034, 0xce400013, 0xc431325d,
1282 0x27300010, 0x9b00fffe, 0xc439325b, 0x27b000ff, 0x9b00ffca, 0xd841325d, 0x2030007b, 0xcf01325b,
1283 0x800016f2, 0xd841325d, 0x04300001, 0x7f2b0014, 0x7ef2c01a, 0x06a80001, 0x9940ff9c, 0x8c001608,
1284 0xd8080278, 0xd8080280, 0x90000000, 0xd840004f, 0xc414000e, 0x29540008, 0xcd400013, 0xc43d3265,
1285 0x1bc800ea, 0xd80802e9, 0x7c40c001, 0x18fc0064, 0x9bc00042, 0xc4193246, 0xc41d3245, 0x51980020,
1286 0x7dd9801a, 0x45980400, 0xc4313267, 0x043c3000, 0xcfc13267, 0xc43d3267, 0x9bc00001, 0x1b380057,
1287 0x1b340213, 0x1b300199, 0x7f7b400a, 0x7f73400a, 0xcf400024, 0x14f4001d, 0xc4bf02e9, 0x9bc0001c,
1288 0x7c410001, 0x192807fa, 0xc4bf0258, 0xc4a70250, 0x53fc0020, 0x7e7e401a, 0x042c0000, 0x04300000,
1289 0x667c0001, 0x56640001, 0x06ec0001, 0x97c0fffd, 0x07300001, 0x0aec0001, 0x7eebc00c, 0x06ec0001,
1290 0x97c0fff8, 0x0b300001, 0x43300007, 0x53300002, 0x7db30011, 0xd3000025, 0xc03ec005, 0x2bfca200,
1291 0xcfc00026, 0xccc00026, 0xcd000026, 0x192807fa, 0xc01f007f, 0x7d1d0009, 0x2110007d, 0x8c001628,
1292 0x203c003f, 0xcfc13256, 0x8c0017f5, 0xcd013254, 0x18fc01e8, 0xcfc13248, 0x8c00185b, 0xd8413247,
1293 0x0b740001, 0x9b40ffd5, 0xd800004f, 0xc4bf02e9, 0x97c0ea24, 0x90000000, 0x14d4001d, 0xc4930260,
1294 0x7d52400e, 0xc49f0258, 0xc4a30250, 0x51dc0020, 0x7de1801a, 0x96400017, 0x7d534002, 0xc4af0270,
1295 0x7dae4005, 0x26640001, 0x32e0001f, 0x9a400006, 0x06ec0001, 0x96000002, 0x042c0000, 0xcec80270,
1296 0x8000174f, 0x0b740001, 0x8c00178a, 0x05100001, 0x9b40fff3, 0xc4af0280, 0xc4b30278, 0x52ec0020,
1297 0x7ef2c01a, 0x8c001608, 0xd8080278, 0xd8080280, 0xc4ab0268, 0x7daa4005, 0x26640001, 0x32a0001f,
1298 0x9a400005, 0x06a80001, 0x96000002, 0x24280000, 0x80001765, 0x7c410001, 0xc01f007f, 0x09540001,
1299 0x7d1d0009, 0x2110007d, 0x8c001628, 0xd8013256, 0x8c0017f2, 0xcd013254, 0xc4113248, 0x15100004,
1300 0x11100004, 0xc4b3034b, 0x7f13000a, 0xcf013248, 0xc4930260, 0x8c001855, 0x32a4001f, 0xd8413247,
1301 0xd800004f, 0x09100001, 0x06a80001, 0x96400002, 0x24280000, 0xcd080260, 0xce880268, 0x9940ffc0,
1302 0x7c408001, 0x88000000, 0x7ec28001, 0x8c001628, 0x32e0001f, 0xc4253247, 0x26640001, 0x9640005e,
1303 0xc4293265, 0xc4253255, 0xc431324f, 0x7e72400c, 0x26a80040, 0x9a400002, 0x9680fff7, 0xc429325b,
1304 0x1aa4003f, 0x96400049, 0x1aa400e8, 0x32680003, 0x9a800046, 0x32640002, 0x9640000a, 0xc4293260,
1305 0x1aa400e4, 0x32640004, 0x96400040, 0xc425325d, 0x26640010, 0x9a40fffe, 0x800017e2, 0xcdc00013,
1306 0xc027ffff, 0x2e6400ff, 0xc429325b, 0x7e6a4009, 0xce41325b, 0xc429325b, 0x26a800ff, 0x9a80fffe,
1307 0xd8c00033, 0xc4240009, 0x26640008, 0x9640fffe, 0x19e403e6, 0x26680003, 0x12a80004, 0x26640003,
1308 0x12640003, 0x7ea68001, 0x19e400e8, 0x7ea68001, 0x12640001, 0x7ea68001, 0x06a80002, 0xd8400013,
1309 0x19e40064, 0x32640002, 0x96400009, 0x16a40005, 0x06640003, 0xce412082, 0xcc01203f, 0xd8400013,
1310 0xcc01203f, 0x0a640003, 0x800017d0, 0x16a40005, 0xce412082, 0xcc01203f, 0xd8400013, 0xcc01203f,
1311 0x12640005, 0x7ea64002, 0xc4292083, 0x7ea68005, 0x26a80001, 0x9a80ffdf, 0xd8c00034, 0xcdc00013,
1312 0xc425325d, 0x26640010, 0x9a40fffe, 0xc429325b, 0x26a400ff, 0x9a40ffca, 0xd841325d, 0x2024007b,
1313 0xce41325b, 0x800017e3, 0xd841325d, 0xc4a70280, 0xc4ab0278, 0x52640020, 0x7e6a401a, 0x04280001,
1314 0x7eae8014, 0x7e6a401a, 0x56680020, 0xce480278, 0xce880280, 0x06ec0001, 0x96000002, 0x042c0000,
1315 0xcec80270, 0x90000000, 0x7c438001, 0x7c420001, 0x800017fe, 0xc4bf02e9, 0x9bc00006, 0x7c438001,
1316 0x7c420001, 0xcf800026, 0xce000026, 0x800017fe, 0xc43b02eb, 0xc42302ec, 0xcf813245, 0xce013246,
1317 0x52200020, 0x7fa3801a, 0x47b8020c, 0x15e00008, 0x1220000a, 0x2a206032, 0x513c001e, 0x7e3e001a,
1318 0xc4bf02e9, 0x9bc00005, 0xc43c000e, 0x2bfc0008, 0xcfc00013, 0x8000180f, 0xcd400013, 0xc4313267,
1319 0x1b3c0077, 0x1b300199, 0x7ff3000a, 0x1330000a, 0x2b300032, 0x043c3000, 0xcfc13267, 0xc43d3267,
1320 0xd200000b, 0xc4200007, 0xd3800002, 0xcf000002, 0xd8000040, 0x96000002, 0xd8400040, 0xd8400018,
1321 0x043c2000, 0xcfc13267, 0xd8000018, 0xd8800010, 0xcdc00013, 0x7dc30001, 0xdc1e0000, 0x04380032,
1322 0xcf80000e, 0x8c001427, 0xcc413248, 0xc43d3269, 0x27fc000f, 0x33fc0003, 0x97c00011, 0x043c001f,
1323 0xdfc30000, 0xd4413249, 0x7c43c001, 0x7c43c001, 0x043c0024, 0x0bfc0021, 0xdfc30000, 0xd441326a,
1324 0x173c0008, 0x1b300303, 0x7f3f0001, 0x043c0001, 0x7ff3c004, 0xcfc13084, 0x80001842, 0x043c0024,
1325 0xdfc30000, 0xd4413249, 0x7c43c001, 0x23fc003f, 0xcfc1326d, 0x0bb80026, 0xdf830000, 0xd441326e,
1326 0x7c438001, 0x7c438001, 0xc4393265, 0x1fb8ffc6, 0xddc30000, 0xcf813265, 0x9a000003, 0xcdc0000c,
1327 0x80001852, 0xcdc0000d, 0xce000010, 0x8c00142b, 0x90000000, 0x7c41c001, 0x7c420001, 0xcdc13252,
1328 0xce013253, 0x8c001628, 0x80001878, 0xc49f02e9, 0x99c00018, 0x7c41c001, 0x7c420001, 0xcdc13252,
1329 0xce013253, 0xc43c000e, 0x2bfc0008, 0xcfc00013, 0x043c3000, 0xcfc13267, 0xc43d3267, 0x97c0ffff,
1330 0xcdc00026, 0xce000026, 0xd8400027, 0xc41c0012, 0x99c0ffff, 0xc43c000e, 0x2bfc0008, 0xcfc00013,
1331 0x043c2000, 0xcfc13267, 0x8c001628, 0x80001878, 0xc41f02ed, 0xc42302ee, 0xcdc13252, 0xce013253,
1332 0x04200001, 0x7e2a0004, 0xce013084, 0x90000000, 0x28340001, 0x313c0bcc, 0x9bc00010, 0x393c051f,
1333 0x9bc00004, 0x3d3c050e, 0x9bc0000c, 0x97c0000c, 0x393c0560, 0x9bc00004, 0x3d3c054f, 0x9bc00007,
1334 0x97c00007, 0x393c1538, 0x9bc00005, 0x3d3c1537, 0x9bc00002, 0x97c00002, 0x2b740800, 0x90000000,
1335 0xc40c000e, 0x28cc0008, 0xccc00013, 0xc43d3265, 0x1bc800ea, 0x7c40c001, 0x18e8007c, 0x7c42c001,
1336 0x06a8189a, 0x86800000, 0x8000189e, 0x800018c5, 0x800018f2, 0x8000016a, 0x7c414001, 0x18d0007e,
1337 0x50580020, 0x09200001, 0x7d59401a, 0xd1400072, 0xc8140072, 0x09240002, 0x7c418001, 0x7c41c001,
1338 0x99000011, 0xc4340004, 0xd8400013, 0xd8400008, 0xc42130b5, 0x1a24002c, 0x9a40fffe, 0x2020002c,
1339 0xc418000d, 0x1198001c, 0x10cc0004, 0x14cc0004, 0x7cd8c00a, 0xccc130b7, 0xce0130b5, 0xcf400008,
1340 0x80000168, 0xd1400025, 0x5978073a, 0x2bb80002, 0xcf800024, 0xcd800026, 0xcdc00026, 0xd8400027,
1341 0x9600e8a8, 0xc4300012, 0x9b00ffff, 0x9640e8a5, 0x800018a9, 0x04140000, 0xc55b0309, 0x3d5c0010,
1342 0x05540001, 0x2598ffff, 0x09780001, 0x7dad800c, 0x99c0ffd2, 0x9580fff9, 0xc4970258, 0xc4930250,
1343 0x51540020, 0x7d15001a, 0x04140020, 0x04280000, 0x442c0000, 0x65180001, 0x09540001, 0x55100001,
1344 0x9580000b, 0x8c001628, 0xc41d3248, 0x04300001, 0x7f2b0014, 0x25dc000f, 0x7df9c00c, 0x95c00004,
1345 0x7ef2c01a, 0xd8c13260, 0xd901325d, 0x06a80001, 0x9940fff1, 0x04140020, 0x04280000, 0x66d80001,
1346 0x09540001, 0x56ec0001, 0x95800005, 0x8c001628, 0xc421325d, 0x26240007, 0x9a40fffe, 0x06a80001,
1347 0x9940fff7, 0x8000189e, 0x04140020, 0x04280000, 0x09540001, 0x8c001628, 0xc41d3254, 0xc023007f,
1348 0x19e4003e, 0x7de1c009, 0x7dee000c, 0x96400008, 0x96000007, 0xd8c13260, 0xd901325d, 0xc421325d,
1349 0x261c0007, 0x99c0fffe, 0x8000189e, 0x06a80001, 0x9940fff0, 0x8000189e, 0xc40c000e, 0x28cc0008,
1350 0xccc00013, 0xc43d3265, 0x1bc800ea, 0x7c40c001, 0x18e00064, 0x06281911, 0x14f4001d, 0x24cc0003,
1351 0x86800000, 0x80001915, 0x800019af, 0x80001a2b, 0x8000016a, 0xcc48032b, 0xcc480333, 0xcc48033b,
1352 0xcc480343, 0x98800011, 0xc4213246, 0xc4253245, 0x52200020, 0x7e26401a, 0x46640400, 0xc4313267,
1353 0x04203000, 0xce013267, 0xc4213267, 0x9a000001, 0x1b3c0057, 0x1b200213, 0x1b300199, 0x7e3e000a,
1354 0x7e32000a, 0xce000024, 0xc4970258, 0xc4930250, 0x51540020, 0x7d15001a, 0xc4af0280, 0xc4b30278,
1355 0x52ec0020, 0x7ef2c01a, 0x04180000, 0x04140020, 0x04280000, 0x7f438001, 0x8c001628, 0xc41d3247,
1356 0x25dc0001, 0x95c00068, 0xc4213254, 0x1a1c003e, 0x95c00065, 0xc01f007f, 0x7e1e0009, 0x97800062,
1357 0x0bb80001, 0x43bc0008, 0x7fcbc001, 0xc7df032b, 0x7e1fc00c, 0x97c0fffa, 0x043c0101, 0x94c00002,
1358 0x043c0102, 0xc439325b, 0x1bb0003f, 0x97000049, 0x1bb000e8, 0x33380003, 0x9b800046, 0x33300002,
1359 0x97000009, 0xc4393260, 0x1bb000e4, 0x33300004, 0x97000040, 0xc431325d, 0x27300010, 0x9b00fffe,
1360 0x80001994, 0x8c001628, 0xc033ffff, 0x2f3000ff, 0xc439325b, 0x7f3b0009, 0xcf01325b, 0xc439325b,
1361 0x27b800ff, 0x9b80fffe, 0xd8c00033, 0xc4300009, 0x27300008, 0x9700fffe, 0x19f003e6, 0x27380003,
1362 0x13b80004, 0x27300003, 0x13300003, 0x7fb38001, 0x19f000e8, 0x7fb38001, 0x13300001, 0x7fb38001,
1363 0x07b80002, 0xd8400013, 0x19f00064, 0x33300002, 0x97000009, 0x17b00005, 0x07300003, 0xcf012082,
1364 0xcc01203f, 0xd8400013, 0xcc01203f, 0x0b300003, 0x80001982, 0x17b00005, 0xcf012082, 0xcc01203f,
1365 0xd8400013, 0xcc01203f, 0x13300005, 0x7fb30002, 0xc4392083, 0x7fb38005, 0x27b80001, 0x9b80ffdf,
1366 0xd8c00034, 0xcdc00013, 0xc431325d, 0x27300010, 0x9b00fffe, 0xc439325b, 0x27b000ff, 0x9b00ffcb,
1367 0xcfc1325d, 0x2030007b, 0xcf01325b, 0x80001995, 0xcfc1325d, 0x04300001, 0x7f2b0014, 0x7ef2c01a,
1368 0x98800009, 0x41bc0007, 0x53fc0002, 0x7e7fc011, 0xd3c00025, 0xd8000026, 0xd8400027, 0xc43c0012,
1369 0x9bc0ffff, 0x653c0001, 0x7dbd8001, 0x06a80001, 0x09540001, 0x55100001, 0x9940ff8f, 0xc43c000e,
1370 0x2bfc0008, 0xcfc00013, 0x043c2000, 0xcfc13267, 0xd8080278, 0xd8080280, 0x80000168, 0x7c410001,
1371 0x04140000, 0xc55b0309, 0x3d5c0010, 0x2598ffff, 0x05540001, 0x7d91800c, 0x95c00003, 0xd4400078,
1372 0x80000168, 0x9580fff8, 0x09780001, 0xc4970258, 0xc4930250, 0x51540020, 0x7d15001a, 0xc4af0280,
1373 0xc4b30278, 0x52ec0020, 0x7ef2c01a, 0x04140020, 0x04280000, 0x65180001, 0x09540001, 0x55100001,
1374 0x9580005d, 0x8c001628, 0xc4253247, 0x26640001, 0x04200101, 0x96400058, 0x7dc24001, 0xc41d3248,
1375 0x25dc000f, 0x7df9c00c, 0x95c00053, 0x94c00002, 0x04200102, 0x7e41c001, 0xc425325b, 0x1a70003f,
1376 0x97000049, 0x1a7000e8, 0x33240003, 0x9a400046, 0x33300002, 0x9700000a, 0xc4253260, 0x1a7000e4,
1377 0x33300004, 0x97000040, 0xc431325d, 0x27300010, 0x9b00fffe, 0x80001a21, 0xcdc00013, 0xc033ffff,
1378 0x2f3000ff, 0xc425325b, 0x7f270009, 0xcf01325b, 0xc425325b, 0x266400ff, 0x9a40fffe, 0xd8c00033,
1379 0xc4300009, 0x27300008, 0x9700fffe, 0x19f003e6, 0x27240003, 0x12640004, 0x27300003, 0x13300003,
1380 0x7e724001, 0x19f000e8, 0x7e724001, 0x13300001, 0x7e724001, 0x06640002, 0xd8400013, 0x19f00064,
1381 0x33300002, 0x97000009, 0x16700005, 0x07300003, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f,
1382 0x0b300003, 0x80001a0f, 0x16700005, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f, 0x13300005,
1383 0x7e730002, 0xc4252083, 0x7e724005, 0x26640001, 0x9a40ffdf, 0xd8c00034, 0xcdc00013, 0xc431325d,
1384 0x27300010, 0x9b00fffe, 0xc425325b, 0x267000ff, 0x9b00ffca, 0xce01325d, 0x2030007b, 0xcf01325b,
1385 0x80001a22, 0xce01325d, 0x04300001, 0x7f2b0014, 0x7ef2c01a, 0x06a80001, 0x9940ff9f, 0xd4400078,
1386 0xd8080278, 0xd8080280, 0x80000168, 0x8c001a31, 0xd4400078, 0xd8080278, 0xd8080280, 0x7c408001,
1387 0x88000000, 0xc4213246, 0xc4253245, 0x52200020, 0x7e26401a, 0x46640400, 0xc4313267, 0x04203000,
1388 0xce013267, 0xc4213267, 0x9a000001, 0x1b180057, 0x1b200213, 0x1b300199, 0x7e1a000a, 0x7e32000a,
1389 0xce000024, 0xc4970258, 0xc4930250, 0x51540020, 0x7d15001a, 0xc4af0280, 0xc4b30278, 0x52ec0020,
1390 0x7ef2c01a, 0x04140020, 0x04280000, 0x65180001, 0x95800060, 0x8c001628, 0xc4193247, 0x25980001,
1391 0x04200101, 0x94c00005, 0x30f00005, 0x04200005, 0x9b000002, 0x04200102, 0x95800056, 0xc439325b,
1392 0x1bb0003f, 0x97000049, 0x1bb000e8, 0x33380003, 0x9b800046, 0x33300002, 0x9700000a, 0xc4393260,
1393 0x1bb000e4, 0x33300004, 0x97000040, 0xc431325d, 0x27300010, 0x9b00fffe, 0x80001aa2, 0xcdc00013,
1394 0xc033ffff, 0x2f3000ff, 0xc439325b, 0x7f3b0009, 0xcf01325b, 0xc439325b, 0x27b800ff, 0x9b80fffe,
1395 0xd8c00033, 0xc4300009, 0x27300008, 0x9700fffe, 0x19f003e6, 0x27380003, 0x13b80004, 0x27300003,
1396 0x13300003, 0x7fb38001, 0x19f000e8, 0x7fb38001, 0x13300001, 0x7fb38001, 0x07b80002, 0xd8400013,
1397 0x19f00064, 0x33300002, 0x97000009, 0x17b00005, 0x07300003, 0xcf012082, 0xcc01203f, 0xd8400013,
1398 0xcc01203f, 0x0b300003, 0x80001a90, 0x17b00005, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f,
1399 0x13300005, 0x7fb30002, 0xc4392083, 0x7fb38005, 0x27b80001, 0x9b80ffdf, 0xd8c00034, 0xcdc00013,
1400 0xc431325d, 0x27300010, 0x9b00fffe, 0xc439325b, 0x27b000ff, 0x9b00ffca, 0xce01325d, 0x2030007b,
1401 0xcf00325b, 0x80001aa3, 0xce01325d, 0x04300001, 0x7f2b0014, 0x7ef2c01a, 0xc49b02e9, 0x99800005,
1402 0xd2400025, 0x4664001c, 0xd8000026, 0xd8400027, 0x06a80001, 0x09540001, 0x55100001, 0x9940ff9c,
1403 0xc49b02e9, 0x99800008, 0xc430000e, 0x2b300008, 0xcf000013, 0x04302000, 0xcf013267, 0xc4313267,
1404 0x97000001, 0x90000000, 0x244c00ff, 0xcc4c0200, 0x7c408001, 0x88000000, 0xc44f0200, 0xc410000b,
1405 0xc414000c, 0x7d158010, 0x059cc000, 0xd8400013, 0xccdd0000, 0x7c408001, 0x88000000, 0xc40c0037,
1406 0x94c0ffff, 0xcc000049, 0xc40c003a, 0x94c0ffff, 0x7c40c001, 0x24d00001, 0x9500e69a, 0x18d0003b,
1407 0x18d40021, 0x99400006, 0xd840004a, 0xc40c003c, 0x94c0ffff, 0x14cc0001, 0x94c00028, 0xd8000033,
1408 0xc438000b, 0xc43c0009, 0x27fc0001, 0x97c0fffe, 0xd8400013, 0xd841c07f, 0xc43dc07f, 0x1bfc0078,
1409 0x7ffbc00c, 0x97c0fffd, 0x99000004, 0xc0120840, 0x282c0040, 0x80001ae8, 0xc0121841, 0x282c001a,
1410 0xcd01c07c, 0xcc01c07d, 0xcc01c08c, 0xcc01c079, 0xcc01c07e, 0x04200004, 0xcec0001b, 0xd8400021,
1411 0x0a200001, 0x9a00ffff, 0xc425c07f, 0x166c001f, 0x04200004, 0x9ac0fffb, 0xc434000f, 0x9b40ffff,
1412 0xd801c07f, 0xd8400013, 0xc425c07f, 0xce400078, 0xd8000034, 0x9940e66b, 0xd800004a, 0x7c408001,
1413 0x88000000, 0xc40c0036, 0x24d00001, 0x9900fffe, 0x18cc0021, 0xccc00047, 0xcc000046, 0xc40c0039,
1414 0x94c0ffff, 0xc40c003d, 0x98c0ffff, 0x7c40c001, 0x24d003ff, 0x18d47fea, 0x18d87ff4, 0xcd00004c,
1415 0xcd40004e, 0xcd80004d, 0xd8400013, 0xcd41c405, 0xc02a0001, 0x2aa80001, 0xce800013, 0xcd01c406,
1416 0xcc01c406, 0xcc01c406, 0xc40c0006, 0x98c0ffff, 0xc414000e, 0x29540008, 0x295c0001, 0xcd400013,
1417 0xd8c1325e, 0xcdc0001a, 0x11980002, 0x4110000c, 0xc0160800, 0x7d15000a, 0xc0164010, 0xd8400013,
1418 0xcd41c078, 0xcc01c080, 0xcc01c081, 0xcd81c082, 0xcc01c083, 0xcd01c084, 0xc40c0006, 0x98c0ffff,
1419 0xd8400048, 0xc40c003b, 0x94c0ffff, 0x80000c16, 0xd8400013, 0xd801c40a, 0xd901c40d, 0xd801c410,
1420 0xd801c40e, 0xd801c40f, 0xc40c0040, 0x04140001, 0x09540001, 0x9940ffff, 0x04140096, 0xd8400013,
1421 0xccc1c400, 0xc411c401, 0x9500fffa, 0xc424003e, 0x04d00001, 0x11100002, 0xcd01c40c, 0xc0180034,
1422 0xcd81c411, 0xd841c414, 0x0a540001, 0xcd41c412, 0x2468000f, 0xc419c416, 0x41980003, 0xc41c003f,
1423 0x7dda0001, 0x12200002, 0x10cc0002, 0xccc1c40c, 0xd901c411, 0xce41c412, 0xd8800013, 0xce292e40,
1424 0xcc412e01, 0xcc412e02, 0xcc412e03, 0xcc412e00, 0x80000aa7, 0xc43c0007, 0xdc120000, 0x31144000,
1425 0x95400005, 0xdc030000, 0xd800002a, 0xcc3c000c, 0x80001b70, 0x33f80003, 0xd4400078, 0x9780e601,
1426 0x188cfff0, 0x04e40002, 0x80001190, 0x7c408001, 0x88000000, 0xc424005e, 0x96400006, 0x90000000,
1427 0xc424005e, 0x96400003, 0x7c408001, 0x88000000, 0x80001b74, 0x80000168, 0x00000000, 0x00000000,
1428 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1429 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1430 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1431 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1432 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1433 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1434 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1435 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1436 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1437 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1438 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1439 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1440 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1441 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1442 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1443 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1444 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1445 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1446 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1447 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1448 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1449 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1450 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1451 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1452 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1453 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1454 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1455 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1456 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1457 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1458 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1459 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1460 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1461 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1462 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1463 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1464 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1465 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1466 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1467 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1468 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1469 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1470 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1471 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1472 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1473 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1474 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1475 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1476 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
1477 0xbf810000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1478 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1479 },
1480 .dfy_size = 7440
1481};
1482
1483static const PWR_DFY_Section pwr_virus_section4 = {
1484 .dfy_cntl = 0x80000004,
1485 .dfy_addr_hi = 0x000000b4,
1486 .dfy_addr_lo = 0x54106500,
1487 .dfy_data = {
1488 0x7e000200, 0x7e020204, 0xc00a0505, 0x00000000, 0xbf8c007f, 0xb8900904, 0xb8911a04, 0xb8920304,
1489 0xb8930b44, 0x921c0d0c, 0x921c1c13, 0x921d0c12, 0x811c1d1c, 0x811c111c, 0x921cff1c, 0x00000400,
1490 0x921dff10, 0x00000100, 0x81181d1c, 0x7e040218, 0xe0701000, 0x80050002, 0xe0501000, 0x80050302,
1491 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
1492 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
1493 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
1494 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0701000, 0x80050102,
1495 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
1496 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
1497 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
1498 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
1499 0xe0701000, 0x80050002, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
1500 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
1501 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
1502 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
1503 0xe0501000, 0x80050302, 0xe0701000, 0x80050102, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
1504 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
1505 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
1506 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
1507 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0701000, 0x80050002, 0xe0501000, 0x80050302,
1508 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
1509 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
1510 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
1511 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0701000, 0x80050102,
1512 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
1513 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
1514 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
1515 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
1516 0xbf810000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1517 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1518 },
1519 .dfy_size = 240
1520};
1521
1522static const PWR_DFY_Section pwr_virus_section5 = {
1523 .dfy_cntl = 0x80000004,
1524 .dfy_addr_hi = 0x000000b4,
1525 .dfy_addr_lo = 0x54106900,
1526 .dfy_data = {
1527 0x7e080200, 0x7e100204, 0xbefc00ff, 0x00010000, 0x24200087, 0x262200ff, 0x000001f0, 0x20222282,
1528 0x28182111, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1529 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1530 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1531 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1532 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000,
1533 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1534 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000,
1535 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1536 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1537 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1538 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1539 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1540 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1541 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000,
1542 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1543 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000,
1544 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1545 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1546 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1547 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1548 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1549 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1550 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000,
1551 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1552 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000,
1553 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1554 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1555 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1556 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1557 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1558 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1559 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000,
1560 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1561 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000,
1562 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1563 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1564 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1565 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1566 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1567 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1568 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000,
1569 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1570 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000,
1571 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1572 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
1573 0x1100000c, 0xbf810000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1574 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1575 },
1576 .dfy_size = 384
1577};
1578
1579static const PWR_DFY_Section pwr_virus_section6 = {
1580 .dfy_cntl = 0x80000004,
1581 .dfy_addr_hi = 0x000000b4,
1582 .dfy_addr_lo = 0x54116f00,
1583 .dfy_data = {
1584 0xc0310800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1585 0x00000040, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0xb4540fe8, 0x00000000, 0x00000000,
1586 0x00000000, 0x00000000, 0x00000000, 0x00000041, 0x0000000c, 0x00000000, 0x07808000, 0xffffffff,
1587 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1588 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1589 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1590 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1591 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1592 0x00000000, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, 0x55555555,
1593 0x55555555, 0x00000000, 0x00000000, 0x540fee40, 0x000000b4, 0x00000010, 0x00000001, 0x00000004,
1594 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1595 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1596 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1597 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1598 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1599 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1600 0x54116f00, 0x000000b4, 0x00000000, 0x00000000, 0x00005301, 0x00000000, 0x00000000, 0x00000000,
1601 0xb4540fef, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x540fee20, 0x000000b4, 0x00000000,
1602 0x00000000, 0x08000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1603 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1604 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1605 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1606 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1607 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1608 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1609 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1610 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1611 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1612 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1613 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1614 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1615 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1616 0xc0310800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1617 0x00000040, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0xb454105e, 0x00000000, 0x00000000,
1618 0x00000000, 0x00000000, 0x00000000, 0x000000c0, 0x00000010, 0x00000000, 0x07808000, 0xffffffff,
1619 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1620 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1621 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1622 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1623 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1624 0x00000000, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, 0x55555555,
1625 0x55555555, 0x00000000, 0x00000000, 0x540fee40, 0x000000b4, 0x00000010, 0x00000001, 0x00000004,
1626 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1627 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1628 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1629 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1630 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1631 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1632 0x54117300, 0x000000b4, 0x00000000, 0x00000000, 0x00005301, 0x00000000, 0x00000000, 0x00000000,
1633 0xb4540fef, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x540fee20, 0x000000b4, 0x00000000,
1634 0x00000000, 0x08000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1635 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1636 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1637 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1638 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1639 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1640 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1641 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1642 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1643 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1644 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1645 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1646 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1647 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1648 0xc0310800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1649 0x00000040, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0xb4541065, 0x00000000, 0x00000000,
1650 0x00000000, 0x00000000, 0x00000000, 0x00000500, 0x0000001c, 0x00000000, 0x07808000, 0xffffffff,
1651 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1652 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1653 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1654 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1655 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1656 0x00000000, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, 0x55555555,
1657 0x55555555, 0x00000000, 0x00000000, 0x540fee40, 0x000000b4, 0x00000010, 0x00000001, 0x00000004,
1658 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1659 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1660 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1661 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1662 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1663 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1664 0x54117700, 0x000000b4, 0x00000000, 0x00000000, 0x00005301, 0x00000000, 0x00000000, 0x00000000,
1665 0xb4540fef, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x540fee20, 0x000000b4, 0x00000000,
1666 0x00000000, 0x08000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1667 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1668 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1669 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1670 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1671 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1672 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1673 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1674 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1675 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1676 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1677 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1678 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1679 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1680 0xc0310800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1681 0x00000040, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0xb4541069, 0x00000000, 0x00000000,
1682 0x00000000, 0x00000000, 0x00000000, 0x00000444, 0x0000008a, 0x00000000, 0x07808000, 0xffffffff,
1683 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1684 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1685 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1686 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1687 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1688 0x00000000, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, 0x55555555,
1689 0x55555555, 0x00000000, 0x00000000, 0x540fee40, 0x000000b4, 0x00000010, 0x00000001, 0x00000004,
1690 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1691 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1692 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1693 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1694 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1695 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1696 0x54117b00, 0x000000b4, 0x00000000, 0x00000000, 0x00005301, 0x00000000, 0x00000000, 0x00000000,
1697 0xb4540fef, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x540fee20, 0x000000b4, 0x00000000,
1698 0x00000000, 0x08000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1699 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1700 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1701 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1702 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1703 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1704 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1705 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1706 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1707 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1708 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1709 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1710 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1711 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1712 },
1713 .dfy_size = 1024
1714};
1715
1716static const PWR_Command_Table PwrVirusTable_post[] = {
1717 { 0x00000000, mmCP_MEC_CNTL },
1718 { 0x00000000, mmCP_MEC_CNTL },
1719 { 0x00000004, mmSRBM_GFX_CNTL },
1720 { 0x54116f00, mmCP_MQD_BASE_ADDR },
1721 { 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
1722 { 0xb4540fef, mmCP_HQD_PQ_BASE },
1723 { 0x00000000, mmCP_HQD_PQ_BASE_HI },
1724 { 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
1725 { 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
1726 { 0x00005301, mmCP_HQD_PERSISTENT_STATE },
1727 { 0x00010000, mmCP_HQD_VMID },
1728 { 0xc8318509, mmCP_HQD_PQ_CONTROL },
1729 { 0x00000005, mmSRBM_GFX_CNTL },
1730 { 0x54117300, mmCP_MQD_BASE_ADDR },
1731 { 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
1732 { 0xb4540fef, mmCP_HQD_PQ_BASE },
1733 { 0x00000000, mmCP_HQD_PQ_BASE_HI },
1734 { 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
1735 { 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
1736 { 0x00005301, mmCP_HQD_PERSISTENT_STATE },
1737 { 0x00010000, mmCP_HQD_VMID },
1738 { 0xc8318509, mmCP_HQD_PQ_CONTROL },
1739 { 0x00000006, mmSRBM_GFX_CNTL },
1740 { 0x54117700, mmCP_MQD_BASE_ADDR },
1741 { 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
1742 { 0xb4540fef, mmCP_HQD_PQ_BASE },
1743 { 0x00000000, mmCP_HQD_PQ_BASE_HI },
1744 { 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
1745 { 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
1746 { 0x00005301, mmCP_HQD_PERSISTENT_STATE },
1747 { 0x00010000, mmCP_HQD_VMID },
1748 { 0xc8318509, mmCP_HQD_PQ_CONTROL },
1749 { 0x00000007, mmSRBM_GFX_CNTL },
1750 { 0x54117b00, mmCP_MQD_BASE_ADDR },
1751 { 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
1752 { 0xb4540fef, mmCP_HQD_PQ_BASE },
1753 { 0x00000000, mmCP_HQD_PQ_BASE_HI },
1754 { 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
1755 { 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
1756 { 0x00005301, mmCP_HQD_PERSISTENT_STATE },
1757 { 0x00010000, mmCP_HQD_VMID },
1758 { 0xc8318509, mmCP_HQD_PQ_CONTROL },
1759 { 0x00000004, mmSRBM_GFX_CNTL },
1760 { 0x00000000, mmCP_HQD_ACTIVE },
1761 { 0x00000000, mmCP_HQD_PQ_RPTR },
1762 { 0x00000000, mmCP_HQD_PQ_WPTR },
1763 { 0x00000001, mmCP_HQD_ACTIVE },
1764 { 0x00000104, mmSRBM_GFX_CNTL },
1765 { 0x00000000, mmCP_HQD_ACTIVE },
1766 { 0x00000000, mmCP_HQD_PQ_RPTR },
1767 { 0x00000000, mmCP_HQD_PQ_WPTR },
1768 { 0x00000001, mmCP_HQD_ACTIVE },
1769 { 0x00000204, mmSRBM_GFX_CNTL },
1770 { 0x00000000, mmCP_HQD_ACTIVE },
1771 { 0x00000000, mmCP_HQD_PQ_RPTR },
1772 { 0x00000000, mmCP_HQD_PQ_WPTR },
1773 { 0x00000001, mmCP_HQD_ACTIVE },
1774 { 0x00000304, mmSRBM_GFX_CNTL },
1775 { 0x00000000, mmCP_HQD_ACTIVE },
1776 { 0x00000000, mmCP_HQD_PQ_RPTR },
1777 { 0x00000000, mmCP_HQD_PQ_WPTR },
1778 { 0x00000001, mmCP_HQD_ACTIVE },
1779 { 0x00000404, mmSRBM_GFX_CNTL },
1780 { 0x00000000, mmCP_HQD_ACTIVE },
1781 { 0x00000000, mmCP_HQD_PQ_RPTR },
1782 { 0x00000000, mmCP_HQD_PQ_WPTR },
1783 { 0x00000001, mmCP_HQD_ACTIVE },
1784 { 0x00000504, mmSRBM_GFX_CNTL },
1785 { 0x00000000, mmCP_HQD_ACTIVE },
1786 { 0x00000000, mmCP_HQD_PQ_RPTR },
1787 { 0x00000000, mmCP_HQD_PQ_WPTR },
1788 { 0x00000001, mmCP_HQD_ACTIVE },
1789 { 0x00000604, mmSRBM_GFX_CNTL },
1790 { 0x00000000, mmCP_HQD_ACTIVE },
1791 { 0x00000000, mmCP_HQD_PQ_RPTR },
1792 { 0x00000000, mmCP_HQD_PQ_WPTR },
1793 { 0x00000001, mmCP_HQD_ACTIVE },
1794 { 0x00000704, mmSRBM_GFX_CNTL },
1795 { 0x00000000, mmCP_HQD_ACTIVE },
1796 { 0x00000000, mmCP_HQD_PQ_RPTR },
1797 { 0x00000000, mmCP_HQD_PQ_WPTR },
1798 { 0x00000001, mmCP_HQD_ACTIVE },
1799 { 0x00000005, mmSRBM_GFX_CNTL },
1800 { 0x00000000, mmCP_HQD_ACTIVE },
1801 { 0x00000000, mmCP_HQD_PQ_RPTR },
1802 { 0x00000000, mmCP_HQD_PQ_WPTR },
1803 { 0x00000001, mmCP_HQD_ACTIVE },
1804 { 0x00000105, mmSRBM_GFX_CNTL },
1805 { 0x00000000, mmCP_HQD_ACTIVE },
1806 { 0x00000000, mmCP_HQD_PQ_RPTR },
1807 { 0x00000000, mmCP_HQD_PQ_WPTR },
1808 { 0x00000001, mmCP_HQD_ACTIVE },
1809 { 0x00000205, mmSRBM_GFX_CNTL },
1810 { 0x00000000, mmCP_HQD_ACTIVE },
1811 { 0x00000000, mmCP_HQD_PQ_RPTR },
1812 { 0x00000000, mmCP_HQD_PQ_WPTR },
1813 { 0x00000001, mmCP_HQD_ACTIVE },
1814 { 0x00000305, mmSRBM_GFX_CNTL },
1815 { 0x00000000, mmCP_HQD_ACTIVE },
1816 { 0x00000000, mmCP_HQD_PQ_RPTR },
1817 { 0x00000000, mmCP_HQD_PQ_WPTR },
1818 { 0x00000001, mmCP_HQD_ACTIVE },
1819 { 0x00000405, mmSRBM_GFX_CNTL },
1820 { 0x00000000, mmCP_HQD_ACTIVE },
1821 { 0x00000000, mmCP_HQD_PQ_RPTR },
1822 { 0x00000000, mmCP_HQD_PQ_WPTR },
1823 { 0x00000001, mmCP_HQD_ACTIVE },
1824 { 0x00000505, mmSRBM_GFX_CNTL },
1825 { 0x00000000, mmCP_HQD_ACTIVE },
1826 { 0x00000000, mmCP_HQD_PQ_RPTR },
1827 { 0x00000000, mmCP_HQD_PQ_WPTR },
1828 { 0x00000001, mmCP_HQD_ACTIVE },
1829 { 0x00000605, mmSRBM_GFX_CNTL },
1830 { 0x00000000, mmCP_HQD_ACTIVE },
1831 { 0x00000000, mmCP_HQD_PQ_RPTR },
1832 { 0x00000000, mmCP_HQD_PQ_WPTR },
1833 { 0x00000001, mmCP_HQD_ACTIVE },
1834 { 0x00000705, mmSRBM_GFX_CNTL },
1835 { 0x00000000, mmCP_HQD_ACTIVE },
1836 { 0x00000000, mmCP_HQD_PQ_RPTR },
1837 { 0x00000000, mmCP_HQD_PQ_WPTR },
1838 { 0x00000001, mmCP_HQD_ACTIVE },
1839 { 0x00000006, mmSRBM_GFX_CNTL },
1840 { 0x00000000, mmCP_HQD_ACTIVE },
1841 { 0x00000000, mmCP_HQD_PQ_RPTR },
1842 { 0x00000000, mmCP_HQD_PQ_WPTR },
1843 { 0x00000001, mmCP_HQD_ACTIVE },
1844 { 0x00000106, mmSRBM_GFX_CNTL },
1845 { 0x00000000, mmCP_HQD_ACTIVE },
1846 { 0x00000000, mmCP_HQD_PQ_RPTR },
1847 { 0x00000000, mmCP_HQD_PQ_WPTR },
1848 { 0x00000001, mmCP_HQD_ACTIVE },
1849 { 0x00000206, mmSRBM_GFX_CNTL },
1850 { 0x00000000, mmCP_HQD_ACTIVE },
1851 { 0x00000000, mmCP_HQD_PQ_RPTR },
1852 { 0x00000000, mmCP_HQD_PQ_WPTR },
1853 { 0x00000001, mmCP_HQD_ACTIVE },
1854 { 0x00000306, mmSRBM_GFX_CNTL },
1855 { 0x00000000, mmCP_HQD_ACTIVE },
1856 { 0x00000000, mmCP_HQD_PQ_RPTR },
1857 { 0x00000000, mmCP_HQD_PQ_WPTR },
1858 { 0x00000001, mmCP_HQD_ACTIVE },
1859 { 0x00000406, mmSRBM_GFX_CNTL },
1860 { 0x00000000, mmCP_HQD_ACTIVE },
1861 { 0x00000000, mmCP_HQD_PQ_RPTR },
1862 { 0x00000000, mmCP_HQD_PQ_WPTR },
1863 { 0x00000001, mmCP_HQD_ACTIVE },
1864 { 0x00000506, mmSRBM_GFX_CNTL },
1865 { 0x00000000, mmCP_HQD_ACTIVE },
1866 { 0x00000000, mmCP_HQD_PQ_RPTR },
1867 { 0x00000000, mmCP_HQD_PQ_WPTR },
1868 { 0x00000001, mmCP_HQD_ACTIVE },
1869 { 0x00000606, mmSRBM_GFX_CNTL },
1870 { 0x00000000, mmCP_HQD_ACTIVE },
1871 { 0x00000000, mmCP_HQD_PQ_RPTR },
1872 { 0x00000000, mmCP_HQD_PQ_WPTR },
1873 { 0x00000001, mmCP_HQD_ACTIVE },
1874 { 0x00000706, mmSRBM_GFX_CNTL },
1875 { 0x00000000, mmCP_HQD_ACTIVE },
1876 { 0x00000000, mmCP_HQD_PQ_RPTR },
1877 { 0x00000000, mmCP_HQD_PQ_WPTR },
1878 { 0x00000001, mmCP_HQD_ACTIVE },
1879 { 0x00000007, mmSRBM_GFX_CNTL },
1880 { 0x00000000, mmCP_HQD_ACTIVE },
1881 { 0x00000000, mmCP_HQD_PQ_RPTR },
1882 { 0x00000000, mmCP_HQD_PQ_WPTR },
1883 { 0x00000001, mmCP_HQD_ACTIVE },
1884 { 0x00000107, mmSRBM_GFX_CNTL },
1885 { 0x00000000, mmCP_HQD_ACTIVE },
1886 { 0x00000000, mmCP_HQD_PQ_RPTR },
1887 { 0x00000000, mmCP_HQD_PQ_WPTR },
1888 { 0x00000001, mmCP_HQD_ACTIVE },
1889 { 0x00000207, mmSRBM_GFX_CNTL },
1890 { 0x00000000, mmCP_HQD_ACTIVE },
1891 { 0x00000000, mmCP_HQD_PQ_RPTR },
1892 { 0x00000000, mmCP_HQD_PQ_WPTR },
1893 { 0x00000001, mmCP_HQD_ACTIVE },
1894 { 0x00000307, mmSRBM_GFX_CNTL },
1895 { 0x00000000, mmCP_HQD_ACTIVE },
1896 { 0x00000000, mmCP_HQD_PQ_RPTR },
1897 { 0x00000000, mmCP_HQD_PQ_WPTR },
1898 { 0x00000001, mmCP_HQD_ACTIVE },
1899 { 0x00000407, mmSRBM_GFX_CNTL },
1900 { 0x00000000, mmCP_HQD_ACTIVE },
1901 { 0x00000000, mmCP_HQD_PQ_RPTR },
1902 { 0x00000000, mmCP_HQD_PQ_WPTR },
1903 { 0x00000001, mmCP_HQD_ACTIVE },
1904 { 0x00000507, mmSRBM_GFX_CNTL },
1905 { 0x00000000, mmCP_HQD_ACTIVE },
1906 { 0x00000000, mmCP_HQD_PQ_RPTR },
1907 { 0x00000000, mmCP_HQD_PQ_WPTR },
1908 { 0x00000001, mmCP_HQD_ACTIVE },
1909 { 0x00000607, mmSRBM_GFX_CNTL },
1910 { 0x00000000, mmCP_HQD_ACTIVE },
1911 { 0x00000000, mmCP_HQD_PQ_RPTR },
1912 { 0x00000000, mmCP_HQD_PQ_WPTR },
1913 { 0x00000001, mmCP_HQD_ACTIVE },
1914 { 0x00000707, mmSRBM_GFX_CNTL },
1915 { 0x00000000, mmCP_HQD_ACTIVE },
1916 { 0x00000000, mmCP_HQD_PQ_RPTR },
1917 { 0x00000000, mmCP_HQD_PQ_WPTR },
1918 { 0x00000001, mmCP_HQD_ACTIVE },
1919 { 0x00000008, mmSRBM_GFX_CNTL },
1920 { 0x00000000, mmCP_HQD_ACTIVE },
1921 { 0x00000000, mmCP_HQD_PQ_RPTR },
1922 { 0x00000000, mmCP_HQD_PQ_WPTR },
1923 { 0x00000001, mmCP_HQD_ACTIVE },
1924 { 0x00000108, mmSRBM_GFX_CNTL },
1925 { 0x00000000, mmCP_HQD_ACTIVE },
1926 { 0x00000000, mmCP_HQD_PQ_RPTR },
1927 { 0x00000000, mmCP_HQD_PQ_WPTR },
1928 { 0x00000001, mmCP_HQD_ACTIVE },
1929 { 0x00000208, mmSRBM_GFX_CNTL },
1930 { 0x00000000, mmCP_HQD_ACTIVE },
1931 { 0x00000000, mmCP_HQD_PQ_RPTR },
1932 { 0x00000000, mmCP_HQD_PQ_WPTR },
1933 { 0x00000001, mmCP_HQD_ACTIVE },
1934 { 0x00000308, mmSRBM_GFX_CNTL },
1935 { 0x00000000, mmCP_HQD_ACTIVE },
1936 { 0x00000000, mmCP_HQD_PQ_RPTR },
1937 { 0x00000000, mmCP_HQD_PQ_WPTR },
1938 { 0x00000001, mmCP_HQD_ACTIVE },
1939 { 0x00000408, mmSRBM_GFX_CNTL },
1940 { 0x00000000, mmCP_HQD_ACTIVE },
1941 { 0x00000000, mmCP_HQD_PQ_RPTR },
1942 { 0x00000000, mmCP_HQD_PQ_WPTR },
1943 { 0x00000001, mmCP_HQD_ACTIVE },
1944 { 0x00000508, mmSRBM_GFX_CNTL },
1945 { 0x00000000, mmCP_HQD_ACTIVE },
1946 { 0x00000000, mmCP_HQD_PQ_RPTR },
1947 { 0x00000000, mmCP_HQD_PQ_WPTR },
1948 { 0x00000001, mmCP_HQD_ACTIVE },
1949 { 0x00000608, mmSRBM_GFX_CNTL },
1950 { 0x00000000, mmCP_HQD_ACTIVE },
1951 { 0x00000000, mmCP_HQD_PQ_RPTR },
1952 { 0x00000000, mmCP_HQD_PQ_WPTR },
1953 { 0x00000001, mmCP_HQD_ACTIVE },
1954 { 0x00000708, mmSRBM_GFX_CNTL },
1955 { 0x00000000, mmCP_HQD_ACTIVE },
1956 { 0x00000000, mmCP_HQD_PQ_RPTR },
1957 { 0x00000000, mmCP_HQD_PQ_WPTR },
1958 { 0x00000001, mmCP_HQD_ACTIVE },
1959 { 0x00000009, mmSRBM_GFX_CNTL },
1960 { 0x00000000, mmCP_HQD_ACTIVE },
1961 { 0x00000000, mmCP_HQD_PQ_RPTR },
1962 { 0x00000000, mmCP_HQD_PQ_WPTR },
1963 { 0x00000001, mmCP_HQD_ACTIVE },
1964 { 0x00000109, mmSRBM_GFX_CNTL },
1965 { 0x00000000, mmCP_HQD_ACTIVE },
1966 { 0x00000000, mmCP_HQD_PQ_RPTR },
1967 { 0x00000000, mmCP_HQD_PQ_WPTR },
1968 { 0x00000001, mmCP_HQD_ACTIVE },
1969 { 0x00000209, mmSRBM_GFX_CNTL },
1970 { 0x00000000, mmCP_HQD_ACTIVE },
1971 { 0x00000000, mmCP_HQD_PQ_RPTR },
1972 { 0x00000000, mmCP_HQD_PQ_WPTR },
1973 { 0x00000001, mmCP_HQD_ACTIVE },
1974 { 0x00000309, mmSRBM_GFX_CNTL },
1975 { 0x00000000, mmCP_HQD_ACTIVE },
1976 { 0x00000000, mmCP_HQD_PQ_RPTR },
1977 { 0x00000000, mmCP_HQD_PQ_WPTR },
1978 { 0x00000001, mmCP_HQD_ACTIVE },
1979 { 0x00000409, mmSRBM_GFX_CNTL },
1980 { 0x00000000, mmCP_HQD_ACTIVE },
1981 { 0x00000000, mmCP_HQD_PQ_RPTR },
1982 { 0x00000000, mmCP_HQD_PQ_WPTR },
1983 { 0x00000001, mmCP_HQD_ACTIVE },
1984 { 0x00000509, mmSRBM_GFX_CNTL },
1985 { 0x00000000, mmCP_HQD_ACTIVE },
1986 { 0x00000000, mmCP_HQD_PQ_RPTR },
1987 { 0x00000000, mmCP_HQD_PQ_WPTR },
1988 { 0x00000001, mmCP_HQD_ACTIVE },
1989 { 0x00000609, mmSRBM_GFX_CNTL },
1990 { 0x00000000, mmCP_HQD_ACTIVE },
1991 { 0x00000000, mmCP_HQD_PQ_RPTR },
1992 { 0x00000000, mmCP_HQD_PQ_WPTR },
1993 { 0x00000001, mmCP_HQD_ACTIVE },
1994 { 0x00000709, mmSRBM_GFX_CNTL },
1995 { 0x00000000, mmCP_HQD_ACTIVE },
1996 { 0x00000000, mmCP_HQD_PQ_RPTR },
1997 { 0x00000000, mmCP_HQD_PQ_WPTR },
1998 { 0x00000001, mmCP_HQD_ACTIVE },
1999 { 0x00000004, mmSRBM_GFX_CNTL },
2000 { 0x01010101, mmCP_PQ_WPTR_POLL_CNTL1 },
2001 { 0x00000000, mmGRBM_STATUS },
2002 { 0x00000000, mmGRBM_STATUS },
2003 { 0x00000000, mmGRBM_STATUS },
2004 { 0x00000000, 0xffffffff },
2005};
2006
2007#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 126b44d47a99..004a40e88bde 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -363,6 +363,12 @@ struct pp_hwmgr_func {
363 int (*set_active_display_count)(struct pp_hwmgr *hwmgr, uint32_t count); 363 int (*set_active_display_count)(struct pp_hwmgr *hwmgr, uint32_t count);
364 int (*set_deep_sleep_dcefclk)(struct pp_hwmgr *hwmgr, uint32_t clock); 364 int (*set_deep_sleep_dcefclk)(struct pp_hwmgr *hwmgr, uint32_t clock);
365 int (*start_thermal_controller)(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range); 365 int (*start_thermal_controller)(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range);
366 int (*notify_cac_buffer_info)(struct pp_hwmgr *hwmgr,
367 uint32_t virtual_addr_low,
368 uint32_t virtual_addr_hi,
369 uint32_t mc_addr_low,
370 uint32_t mc_addr_hi,
371 uint32_t size);
366}; 372};
367 373
368struct pp_table_func { 374struct pp_table_func {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index 7c9aba81cd6a..b1b27b2128f6 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -75,6 +75,11 @@ enum SMU_MEMBER {
75 VceBootLevel, 75 VceBootLevel,
76 SamuBootLevel, 76 SamuBootLevel,
77 LowSclkInterruptThreshold, 77 LowSclkInterruptThreshold,
78 DRAM_LOG_ADDR_H,
79 DRAM_LOG_ADDR_L,
80 DRAM_LOG_PHY_ADDR_H,
81 DRAM_LOG_PHY_ADDR_L,
82 DRAM_LOG_BUFF_SIZE,
78}; 83};
79 84
80 85
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
index cb070ebc7de1..d06ece4ac47d 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
@@ -124,6 +124,8 @@ typedef uint16_t PPSMC_Result;
124#define PPSMC_MSG_NumOfDisplays 0x56 124#define PPSMC_MSG_NumOfDisplays 0x56
125#define PPSMC_MSG_ReadSerialNumTop32 0x58 125#define PPSMC_MSG_ReadSerialNumTop32 0x58
126#define PPSMC_MSG_ReadSerialNumBottom32 0x59 126#define PPSMC_MSG_ReadSerialNumBottom32 0x59
127#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x5A
128#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x5B
127#define PPSMC_MSG_RunAcgBtc 0x5C 129#define PPSMC_MSG_RunAcgBtc 0x5C
128#define PPSMC_MSG_RunAcgInClosedLoop 0x5D 130#define PPSMC_MSG_RunAcgInClosedLoop 0x5D
129#define PPSMC_MSG_RunAcgInOpenLoop 0x5E 131#define PPSMC_MSG_RunAcgInOpenLoop 0x5E
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index 4e2988825ff6..b24b0f203a51 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -2,9 +2,9 @@
2# Makefile for the 'smu manager' sub-component of powerplay. 2# Makefile for the 'smu manager' sub-component of powerplay.
3# It provides the smu management services for the driver. 3# It provides the smu management services for the driver.
4 4
5SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o fiji_smc.o \ 5SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o \
6 polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o tonga_smc.o \ 6 polaris10_smumgr.o iceland_smumgr.o \
7 smu7_smumgr.o iceland_smc.o vega10_smumgr.o rv_smumgr.o ci_smc.o 7 smu7_smumgr.o vega10_smumgr.o rv_smumgr.o ci_smumgr.o
8 8
9AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) 9AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
10 10
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 0017b9e62404..4d672cd15785 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -2266,6 +2266,16 @@ static uint32_t ci_get_offsetof(uint32_t type, uint32_t member)
2266 return offsetof(SMU7_SoftRegisters, PreVBlankGap); 2266 return offsetof(SMU7_SoftRegisters, PreVBlankGap);
2267 case VBlankTimeout: 2267 case VBlankTimeout:
2268 return offsetof(SMU7_SoftRegisters, VBlankTimeout); 2268 return offsetof(SMU7_SoftRegisters, VBlankTimeout);
2269 case DRAM_LOG_ADDR_H:
2270 return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_H);
2271 case DRAM_LOG_ADDR_L:
2272 return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_L);
2273 case DRAM_LOG_PHY_ADDR_H:
2274 return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
2275 case DRAM_LOG_PHY_ADDR_L:
2276 return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
2277 case DRAM_LOG_BUFF_SIZE:
2278 return offsetof(SMU7_SoftRegisters, DRAM_LOG_BUFF_SIZE);
2269 } 2279 }
2270 case SMU_Discrete_DpmTable: 2280 case SMU_Discrete_DpmTable:
2271 switch (member) { 2281 switch (member) {
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
deleted file mode 100644
index b1a66b5ada4a..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
+++ /dev/null
@@ -1,2486 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "pp_debug.h"
25#include "fiji_smc.h"
26#include "smu7_dyn_defaults.h"
27
28#include "smu7_hwmgr.h"
29#include "hardwaremanager.h"
30#include "ppatomctrl.h"
31#include "cgs_common.h"
32#include "atombios.h"
33#include "fiji_smumgr.h"
34#include "pppcielanes.h"
35#include "smu7_ppsmc.h"
36#include "smu73.h"
37#include "smu/smu_7_1_3_d.h"
38#include "smu/smu_7_1_3_sh_mask.h"
39#include "gmc/gmc_8_1_d.h"
40#include "gmc/gmc_8_1_sh_mask.h"
41#include "bif/bif_5_0_d.h"
42#include "bif/bif_5_0_sh_mask.h"
43#include "dce/dce_10_0_d.h"
44#include "dce/dce_10_0_sh_mask.h"
45#include "smu7_smumgr.h"
46
47#define VOLTAGE_SCALE 4
48#define POWERTUNE_DEFAULT_SET_MAX 1
49#define VOLTAGE_VID_OFFSET_SCALE1 625
50#define VOLTAGE_VID_OFFSET_SCALE2 100
51#define VDDC_VDDCI_DELTA 300
52#define MC_CG_ARB_FREQ_F1 0x0b
53
54/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
55 * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
56 */
57static const uint16_t fiji_clock_stretcher_lookup_table[2][4] = {
58 {600, 1050, 3, 0}, {600, 1050, 6, 1} };
59
60/* [FF, SS] type, [] 4 voltage ranges, and
61 * [Floor Freq, Boundary Freq, VID min , VID max]
62 */
63static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] = {
64 { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
65 { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
66
67/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
68 * (coming from PWR_CKS_CNTL.stretch_amount reg spec)
69 */
70static const uint8_t fiji_clock_stretch_amount_conversion[2][6] = {
71 {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
72
73static const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
74 /*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
75 {1, 0xF, 0xFD,
76 /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
77 0x19, 5, 45}
78};
79
80/* PPGen has the gain setting generated in x * 100 unit
81 * This function is to convert the unit to x * 4096(0x1000) unit.
82 * This is the unit expected by SMC firmware
83 */
84static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
85 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
86 uint32_t clock, uint32_t *voltage, uint32_t *mvdd)
87{
88 uint32_t i;
89 uint16_t vddci;
90 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
91 *voltage = *mvdd = 0;
92
93
94 /* clock - voltage dependency table is empty table */
95 if (dep_table->count == 0)
96 return -EINVAL;
97
98 for (i = 0; i < dep_table->count; i++) {
99 /* find first sclk bigger than request */
100 if (dep_table->entries[i].clk >= clock) {
101 *voltage |= (dep_table->entries[i].vddc *
102 VOLTAGE_SCALE) << VDDC_SHIFT;
103 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
104 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
105 VOLTAGE_SCALE) << VDDCI_SHIFT;
106 else if (dep_table->entries[i].vddci)
107 *voltage |= (dep_table->entries[i].vddci *
108 VOLTAGE_SCALE) << VDDCI_SHIFT;
109 else {
110 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
111 (dep_table->entries[i].vddc -
112 VDDC_VDDCI_DELTA));
113 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
114 }
115
116 if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
117 *mvdd = data->vbios_boot_state.mvdd_bootup_value *
118 VOLTAGE_SCALE;
119 else if (dep_table->entries[i].mvdd)
120 *mvdd = (uint32_t) dep_table->entries[i].mvdd *
121 VOLTAGE_SCALE;
122
123 *voltage |= 1 << PHASES_SHIFT;
124 return 0;
125 }
126 }
127
128 /* sclk is bigger than max sclk in the dependence table */
129 *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
130
131 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
132 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
133 VOLTAGE_SCALE) << VDDCI_SHIFT;
134 else if (dep_table->entries[i-1].vddci) {
135 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
136 (dep_table->entries[i].vddc -
137 VDDC_VDDCI_DELTA));
138 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
139 }
140
141 if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
142 *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
143 else if (dep_table->entries[i].mvdd)
144 *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
145
146 return 0;
147}
148
149
150static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
151{
152 uint32_t tmp;
153 tmp = raw_setting * 4096 / 100;
154 return (uint16_t)tmp;
155}
156
157static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t *sda)
158{
159 switch (line) {
160 case SMU7_I2CLineID_DDC1:
161 *scl = SMU7_I2C_DDC1CLK;
162 *sda = SMU7_I2C_DDC1DATA;
163 break;
164 case SMU7_I2CLineID_DDC2:
165 *scl = SMU7_I2C_DDC2CLK;
166 *sda = SMU7_I2C_DDC2DATA;
167 break;
168 case SMU7_I2CLineID_DDC3:
169 *scl = SMU7_I2C_DDC3CLK;
170 *sda = SMU7_I2C_DDC3DATA;
171 break;
172 case SMU7_I2CLineID_DDC4:
173 *scl = SMU7_I2C_DDC4CLK;
174 *sda = SMU7_I2C_DDC4DATA;
175 break;
176 case SMU7_I2CLineID_DDC5:
177 *scl = SMU7_I2C_DDC5CLK;
178 *sda = SMU7_I2C_DDC5DATA;
179 break;
180 case SMU7_I2CLineID_DDC6:
181 *scl = SMU7_I2C_DDC6CLK;
182 *sda = SMU7_I2C_DDC6DATA;
183 break;
184 case SMU7_I2CLineID_SCLSDA:
185 *scl = SMU7_I2C_SCL;
186 *sda = SMU7_I2C_SDA;
187 break;
188 case SMU7_I2CLineID_DDCVGA:
189 *scl = SMU7_I2C_DDCVGACLK;
190 *sda = SMU7_I2C_DDCVGADATA;
191 break;
192 default:
193 *scl = 0;
194 *sda = 0;
195 break;
196 }
197}
198
199static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
200{
201 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
202 struct phm_ppt_v1_information *table_info =
203 (struct phm_ppt_v1_information *)(hwmgr->pptable);
204
205 if (table_info &&
206 table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
207 table_info->cac_dtp_table->usPowerTuneDataSetID)
208 smu_data->power_tune_defaults =
209 &fiji_power_tune_data_set_array
210 [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
211 else
212 smu_data->power_tune_defaults = &fiji_power_tune_data_set_array[0];
213
214}
215
216static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
217{
218
219 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
220 const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
221
222 SMU73_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
223
224 struct phm_ppt_v1_information *table_info =
225 (struct phm_ppt_v1_information *)(hwmgr->pptable);
226 struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
227 struct pp_advance_fan_control_parameters *fan_table =
228 &hwmgr->thermal_controller.advanceFanControlParameters;
229 uint8_t uc_scl, uc_sda;
230
231 /* TDP number of fraction bits are changed from 8 to 7 for Fiji
232 * as requested by SMC team
233 */
234 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
235 (uint16_t)(cac_dtp_table->usTDP * 128));
236 dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
237 (uint16_t)(cac_dtp_table->usTDP * 128));
238
239 PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
240 "Target Operating Temp is out of Range!",
241 );
242
243 dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
244 dpm_table->GpuTjHyst = 8;
245
246 dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase;
247
248 /* The following are for new Fiji Multi-input fan/thermal control */
249 dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
250 cac_dtp_table->usTargetOperatingTemp * 256);
251 dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
252 cac_dtp_table->usTemperatureLimitHotspot * 256);
253 dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US(
254 cac_dtp_table->usTemperatureLimitLiquid1 * 256);
255 dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US(
256 cac_dtp_table->usTemperatureLimitLiquid2 * 256);
257 dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US(
258 cac_dtp_table->usTemperatureLimitVrVddc * 256);
259 dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US(
260 cac_dtp_table->usTemperatureLimitVrMvdd * 256);
261 dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US(
262 cac_dtp_table->usTemperatureLimitPlx * 256);
263
264 dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
265 scale_fan_gain_settings(fan_table->usFanGainEdge));
266 dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
267 scale_fan_gain_settings(fan_table->usFanGainHotspot));
268 dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US(
269 scale_fan_gain_settings(fan_table->usFanGainLiquid));
270 dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US(
271 scale_fan_gain_settings(fan_table->usFanGainVrVddc));
272 dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US(
273 scale_fan_gain_settings(fan_table->usFanGainVrMvdd));
274 dpm_table->FanGainPlx = PP_HOST_TO_SMC_US(
275 scale_fan_gain_settings(fan_table->usFanGainPlx));
276 dpm_table->FanGainHbm = PP_HOST_TO_SMC_US(
277 scale_fan_gain_settings(fan_table->usFanGainHbm));
278
279 dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address;
280 dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address;
281 dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address;
282 dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address;
283
284 get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda);
285 dpm_table->Liquid_I2C_LineSCL = uc_scl;
286 dpm_table->Liquid_I2C_LineSDA = uc_sda;
287
288 get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda);
289 dpm_table->Vr_I2C_LineSCL = uc_scl;
290 dpm_table->Vr_I2C_LineSDA = uc_sda;
291
292 get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda);
293 dpm_table->Plx_I2C_LineSCL = uc_scl;
294 dpm_table->Plx_I2C_LineSDA = uc_sda;
295
296 return 0;
297}
298
299
300static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
301{
302 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
303 const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
304
305 smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
306 smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
307 smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
308 smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
309
310 return 0;
311}
312
313
314static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
315{
316 uint16_t tdc_limit;
317 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
318 struct phm_ppt_v1_information *table_info =
319 (struct phm_ppt_v1_information *)(hwmgr->pptable);
320 const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
321
322 /* TDC number of fraction bits are changed from 8 to 7
323 * for Fiji as requested by SMC team
324 */
325 tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
326 smu_data->power_tune_table.TDC_VDDC_PkgLimit =
327 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
328 smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
329 defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
330 smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
331
332 return 0;
333}
334
335static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
336{
337 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
338 const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
339 uint32_t temp;
340
341 if (smu7_read_smc_sram_dword(hwmgr,
342 fuse_table_offset +
343 offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl),
344 (uint32_t *)&temp, SMC_RAM_END))
345 PP_ASSERT_WITH_CODE(false,
346 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
347 return -EINVAL);
348 else {
349 smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
350 smu_data->power_tune_table.LPMLTemperatureMin =
351 (uint8_t)((temp >> 16) & 0xff);
352 smu_data->power_tune_table.LPMLTemperatureMax =
353 (uint8_t)((temp >> 8) & 0xff);
354 smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
355 }
356 return 0;
357}
358
359static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
360{
361 int i;
362 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
363
364 /* Currently not used. Set all to zero. */
365 for (i = 0; i < 16; i++)
366 smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
367
368 return 0;
369}
370
371static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
372{
373 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
374
375 if ((hwmgr->thermal_controller.advanceFanControlParameters.
376 usFanOutputSensitivity & (1 << 15)) ||
377 0 == hwmgr->thermal_controller.advanceFanControlParameters.
378 usFanOutputSensitivity)
379 hwmgr->thermal_controller.advanceFanControlParameters.
380 usFanOutputSensitivity = hwmgr->thermal_controller.
381 advanceFanControlParameters.usDefaultFanOutputSensitivity;
382
383 smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
384 PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
385 advanceFanControlParameters.usFanOutputSensitivity);
386 return 0;
387}
388
389static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
390{
391 int i;
392 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
393
394 /* Currently not used. Set all to zero. */
395 for (i = 0; i < 16; i++)
396 smu_data->power_tune_table.GnbLPML[i] = 0;
397
398 return 0;
399}
400
401static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
402{
403 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
404 struct phm_ppt_v1_information *table_info =
405 (struct phm_ppt_v1_information *)(hwmgr->pptable);
406 uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
407 uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
408 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
409
410 HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
411 LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
412
413 smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
414 CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
415 smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
416 CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
417
418 return 0;
419}
420
421static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
422{
423 uint32_t pm_fuse_table_offset;
424 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
425
426 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
427 PHM_PlatformCaps_PowerContainment)) {
428 if (smu7_read_smc_sram_dword(hwmgr,
429 SMU7_FIRMWARE_HEADER_LOCATION +
430 offsetof(SMU73_Firmware_Header, PmFuseTable),
431 &pm_fuse_table_offset, SMC_RAM_END))
432 PP_ASSERT_WITH_CODE(false,
433 "Attempt to get pm_fuse_table_offset Failed!",
434 return -EINVAL);
435
436 /* DW6 */
437 if (fiji_populate_svi_load_line(hwmgr))
438 PP_ASSERT_WITH_CODE(false,
439 "Attempt to populate SviLoadLine Failed!",
440 return -EINVAL);
441 /* DW7 */
442 if (fiji_populate_tdc_limit(hwmgr))
443 PP_ASSERT_WITH_CODE(false,
444 "Attempt to populate TDCLimit Failed!", return -EINVAL);
445 /* DW8 */
446 if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset))
447 PP_ASSERT_WITH_CODE(false,
448 "Attempt to populate TdcWaterfallCtl, "
449 "LPMLTemperature Min and Max Failed!",
450 return -EINVAL);
451
452 /* DW9-DW12 */
453 if (0 != fiji_populate_temperature_scaler(hwmgr))
454 PP_ASSERT_WITH_CODE(false,
455 "Attempt to populate LPMLTemperatureScaler Failed!",
456 return -EINVAL);
457
458 /* DW13-DW14 */
459 if (fiji_populate_fuzzy_fan(hwmgr))
460 PP_ASSERT_WITH_CODE(false,
461 "Attempt to populate Fuzzy Fan Control parameters Failed!",
462 return -EINVAL);
463
464 /* DW15-DW18 */
465 if (fiji_populate_gnb_lpml(hwmgr))
466 PP_ASSERT_WITH_CODE(false,
467 "Attempt to populate GnbLPML Failed!",
468 return -EINVAL);
469
470 /* DW20 */
471 if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr))
472 PP_ASSERT_WITH_CODE(false,
473 "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
474 "Sidd Failed!", return -EINVAL);
475
476 if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
477 (uint8_t *)&smu_data->power_tune_table,
478 sizeof(struct SMU73_Discrete_PmFuses), SMC_RAM_END))
479 PP_ASSERT_WITH_CODE(false,
480 "Attempt to download PmFuseTable Failed!",
481 return -EINVAL);
482 }
483 return 0;
484}
485
486/**
487* Preparation of vddc and vddgfx CAC tables for SMC.
488*
489* @param hwmgr the address of the hardware manager
490* @param table the SMC DPM table structure to be populated
491* @return always 0
492*/
493static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
494 struct SMU73_Discrete_DpmTable *table)
495{
496 uint32_t count;
497 uint8_t index;
498 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
499 struct phm_ppt_v1_information *table_info =
500 (struct phm_ppt_v1_information *)(hwmgr->pptable);
501 struct phm_ppt_v1_voltage_lookup_table *lookup_table =
502 table_info->vddc_lookup_table;
503 /* tables is already swapped, so in order to use the value from it,
504 * we need to swap it back.
505 * We are populating vddc CAC data to BapmVddc table
506 * in split and merged mode
507 */
508
509 for (count = 0; count < lookup_table->count; count++) {
510 index = phm_get_voltage_index(lookup_table,
511 data->vddc_voltage_table.entries[count].value);
512 table->BapmVddcVidLoSidd[count] =
513 convert_to_vid(lookup_table->entries[index].us_cac_low);
514 table->BapmVddcVidHiSidd[count] =
515 convert_to_vid(lookup_table->entries[index].us_cac_high);
516 }
517
518 return 0;
519}
520
521/**
522* Preparation of voltage tables for SMC.
523*
524* @param hwmgr the address of the hardware manager
525* @param table the SMC DPM table structure to be populated
526* @return always 0
527*/
528
529static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
530 struct SMU73_Discrete_DpmTable *table)
531{
532 int result;
533
534 result = fiji_populate_cac_table(hwmgr, table);
535 PP_ASSERT_WITH_CODE(0 == result,
536 "can not populate CAC voltage tables to SMC",
537 return -EINVAL);
538
539 return 0;
540}
541
542static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr,
543 struct SMU73_Discrete_Ulv *state)
544{
545 int result = 0;
546
547 struct phm_ppt_v1_information *table_info =
548 (struct phm_ppt_v1_information *)(hwmgr->pptable);
549
550 state->CcPwrDynRm = 0;
551 state->CcPwrDynRm1 = 0;
552
553 state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
554 state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
555 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
556
557 state->VddcPhase = 1;
558
559 if (!result) {
560 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
561 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
562 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
563 }
564 return result;
565}
566
567static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr,
568 struct SMU73_Discrete_DpmTable *table)
569{
570 return fiji_populate_ulv_level(hwmgr, &table->Ulv);
571}
572
573static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr,
574 struct SMU73_Discrete_DpmTable *table)
575{
576 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
577 struct smu7_dpm_table *dpm_table = &data->dpm_table;
578 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
579 int i;
580
581 /* Index (dpm_table->pcie_speed_table.count)
582 * is reserved for PCIE boot level. */
583 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
584 table->LinkLevel[i].PcieGenSpeed =
585 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
586 table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
587 dpm_table->pcie_speed_table.dpm_levels[i].param1);
588 table->LinkLevel[i].EnabledForActivity = 1;
589 table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
590 table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
591 table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
592 }
593
594 smu_data->smc_state_table.LinkLevelCount =
595 (uint8_t)dpm_table->pcie_speed_table.count;
596 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
597 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
598
599 return 0;
600}
601
602
603/**
604* Calculates the SCLK dividers using the provided engine clock
605*
606* @param hwmgr the address of the hardware manager
607* @param clock the engine clock to use to populate the structure
608* @param sclk the SMC SCLK structure to be populated
609*/
610static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr,
611 uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk)
612{
613 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
614 struct pp_atomctrl_clock_dividers_vi dividers;
615 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
616 uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
617 uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
618 uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
619 uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
620 uint32_t ref_clock;
621 uint32_t ref_divider;
622 uint32_t fbdiv;
623 int result;
624
625 /* get the engine clock dividers for this clock value */
626 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, &dividers);
627
628 PP_ASSERT_WITH_CODE(result == 0,
629 "Error retrieving Engine Clock dividers from VBIOS.",
630 return result);
631
632 /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
633 ref_clock = atomctrl_get_reference_clock(hwmgr);
634 ref_divider = 1 + dividers.uc_pll_ref_div;
635
636 /* low 14 bits is fraction and high 12 bits is divider */
637 fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
638
639 /* SPLL_FUNC_CNTL setup */
640 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
641 SPLL_REF_DIV, dividers.uc_pll_ref_div);
642 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
643 SPLL_PDIV_A, dividers.uc_pll_post_div);
644
645 /* SPLL_FUNC_CNTL_3 setup*/
646 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
647 SPLL_FB_DIV, fbdiv);
648
649 /* set to use fractional accumulation*/
650 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
651 SPLL_DITHEN, 1);
652
653 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
654 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
655 struct pp_atomctrl_internal_ss_info ssInfo;
656
657 uint32_t vco_freq = clock * dividers.uc_pll_post_div;
658 if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
659 vco_freq, &ssInfo)) {
660 /*
661 * ss_info.speed_spectrum_percentage -- in unit of 0.01%
662 * ss_info.speed_spectrum_rate -- in unit of khz
663 *
664 * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2
665 */
666 uint32_t clk_s = ref_clock * 5 /
667 (ref_divider * ssInfo.speed_spectrum_rate);
668 /* clkv = 2 * D * fbdiv / NS */
669 uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage *
670 fbdiv / (clk_s * 10000);
671
672 cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
673 CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
674 cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
675 CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
676 cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
677 CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
678 }
679 }
680
681 sclk->SclkFrequency = clock;
682 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
683 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
684 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
685 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
686 sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
687
688 return 0;
689}
690
691/**
692* Populates single SMC SCLK structure using the provided engine clock
693*
694* @param hwmgr the address of the hardware manager
695* @param clock the engine clock to use to populate the structure
696* @param sclk the SMC SCLK structure to be populated
697*/
698
699static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
700 uint32_t clock, uint16_t sclk_al_threshold,
701 struct SMU73_Discrete_GraphicsLevel *level)
702{
703 int result;
704 /* PP_Clocks minClocks; */
705 uint32_t threshold, mvdd;
706 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
707 struct phm_ppt_v1_information *table_info =
708 (struct phm_ppt_v1_information *)(hwmgr->pptable);
709
710 result = fiji_calculate_sclk_params(hwmgr, clock, level);
711
712 /* populate graphics levels */
713 result = fiji_get_dependency_volt_by_clk(hwmgr,
714 table_info->vdd_dep_on_sclk, clock,
715 (uint32_t *)(&level->MinVoltage), &mvdd);
716 PP_ASSERT_WITH_CODE((0 == result),
717 "can not find VDDC voltage value for "
718 "VDDC engine clock dependency table",
719 return result);
720
721 level->SclkFrequency = clock;
722 level->ActivityLevel = sclk_al_threshold;
723 level->CcPwrDynRm = 0;
724 level->CcPwrDynRm1 = 0;
725 level->EnabledForActivity = 0;
726 level->EnabledForThrottle = 1;
727 level->UpHyst = 10;
728 level->DownHyst = 0;
729 level->VoltageDownHyst = 0;
730 level->PowerThrottle = 0;
731
732 threshold = clock * data->fast_watermark_threshold / 100;
733
734 data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
735
736 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
737 level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
738 hwmgr->display_config.min_core_set_clock_in_sr);
739
740
741 /* Default to slow, highest DPM level will be
742 * set to PPSMC_DISPLAY_WATERMARK_LOW later.
743 */
744 level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
745
746 CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
747 CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
748 CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
749 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
750 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
751 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
752 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
753 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
754 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
755
756 return 0;
757}
758/**
759* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
760*
761* @param hwmgr the address of the hardware manager
762*/
763int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
764{
765 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
766 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
767
768 struct smu7_dpm_table *dpm_table = &data->dpm_table;
769 struct phm_ppt_v1_information *table_info =
770 (struct phm_ppt_v1_information *)(hwmgr->pptable);
771 struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
772 uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
773 int result = 0;
774 uint32_t array = smu_data->smu7_data.dpm_table_start +
775 offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
776 uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
777 SMU73_MAX_LEVELS_GRAPHICS;
778 struct SMU73_Discrete_GraphicsLevel *levels =
779 smu_data->smc_state_table.GraphicsLevel;
780 uint32_t i, max_entry;
781 uint8_t hightest_pcie_level_enabled = 0,
782 lowest_pcie_level_enabled = 0,
783 mid_pcie_level_enabled = 0,
784 count = 0;
785
786 for (i = 0; i < dpm_table->sclk_table.count; i++) {
787 result = fiji_populate_single_graphic_level(hwmgr,
788 dpm_table->sclk_table.dpm_levels[i].value,
789 (uint16_t)smu_data->activity_target[i],
790 &levels[i]);
791 if (result)
792 return result;
793
794 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
795 if (i > 1)
796 levels[i].DeepSleepDivId = 0;
797 }
798
799 /* Only enable level 0 for now.*/
800 levels[0].EnabledForActivity = 1;
801
802 /* set highest level watermark to high */
803 levels[dpm_table->sclk_table.count - 1].DisplayWatermark =
804 PPSMC_DISPLAY_WATERMARK_HIGH;
805
806 smu_data->smc_state_table.GraphicsDpmLevelCount =
807 (uint8_t)dpm_table->sclk_table.count;
808 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
809 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
810
811 if (pcie_table != NULL) {
812 PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
813 "There must be 1 or more PCIE levels defined in PPTable.",
814 return -EINVAL);
815 max_entry = pcie_entry_cnt - 1;
816 for (i = 0; i < dpm_table->sclk_table.count; i++)
817 levels[i].pcieDpmLevel =
818 (uint8_t) ((i < max_entry) ? i : max_entry);
819 } else {
820 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
821 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
822 (1 << (hightest_pcie_level_enabled + 1))) != 0))
823 hightest_pcie_level_enabled++;
824
825 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
826 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
827 (1 << lowest_pcie_level_enabled)) == 0))
828 lowest_pcie_level_enabled++;
829
830 while ((count < hightest_pcie_level_enabled) &&
831 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
832 (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
833 count++;
834
835 mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
836 hightest_pcie_level_enabled ?
837 (lowest_pcie_level_enabled + 1 + count) :
838 hightest_pcie_level_enabled;
839
840 /* set pcieDpmLevel to hightest_pcie_level_enabled */
841 for (i = 2; i < dpm_table->sclk_table.count; i++)
842 levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
843
844 /* set pcieDpmLevel to lowest_pcie_level_enabled */
845 levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
846
847 /* set pcieDpmLevel to mid_pcie_level_enabled */
848 levels[1].pcieDpmLevel = mid_pcie_level_enabled;
849 }
850 /* level count will send to smc once at init smc table and never change */
851 result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
852 (uint32_t)array_size, SMC_RAM_END);
853
854 return result;
855}
856
857
858/**
859 * MCLK Frequency Ratio
860 * SEQ_CG_RESP Bit[31:24] - 0x0
861 * Bit[27:24] \96 DDR3 Frequency ratio
862 * 0x0 <= 100MHz, 450 < 0x8 <= 500MHz
863 * 100 < 0x1 <= 150MHz, 500 < 0x9 <= 550MHz
864 * 150 < 0x2 <= 200MHz, 550 < 0xA <= 600MHz
865 * 200 < 0x3 <= 250MHz, 600 < 0xB <= 650MHz
866 * 250 < 0x4 <= 300MHz, 650 < 0xC <= 700MHz
867 * 300 < 0x5 <= 350MHz, 700 < 0xD <= 750MHz
868 * 350 < 0x6 <= 400MHz, 750 < 0xE <= 800MHz
869 * 400 < 0x7 <= 450MHz, 800 < 0xF
870 */
871static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock)
872{
873 if (mem_clock <= 10000)
874 return 0x0;
875 if (mem_clock <= 15000)
876 return 0x1;
877 if (mem_clock <= 20000)
878 return 0x2;
879 if (mem_clock <= 25000)
880 return 0x3;
881 if (mem_clock <= 30000)
882 return 0x4;
883 if (mem_clock <= 35000)
884 return 0x5;
885 if (mem_clock <= 40000)
886 return 0x6;
887 if (mem_clock <= 45000)
888 return 0x7;
889 if (mem_clock <= 50000)
890 return 0x8;
891 if (mem_clock <= 55000)
892 return 0x9;
893 if (mem_clock <= 60000)
894 return 0xa;
895 if (mem_clock <= 65000)
896 return 0xb;
897 if (mem_clock <= 70000)
898 return 0xc;
899 if (mem_clock <= 75000)
900 return 0xd;
901 if (mem_clock <= 80000)
902 return 0xe;
903 /* mem_clock > 800MHz */
904 return 0xf;
905}
906
907/**
908* Populates the SMC MCLK structure using the provided memory clock
909*
910* @param hwmgr the address of the hardware manager
911* @param clock the memory clock to use to populate the structure
912* @param sclk the SMC SCLK structure to be populated
913*/
914static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr,
915 uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk)
916{
917 struct pp_atomctrl_memory_clock_param mem_param;
918 int result;
919
920 result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param);
921 PP_ASSERT_WITH_CODE((0 == result),
922 "Failed to get Memory PLL Dividers.",
923 );
924
925 /* Save the result data to outpupt memory level structure */
926 mclk->MclkFrequency = clock;
927 mclk->MclkDivider = (uint8_t)mem_param.mpll_post_divider;
928 mclk->FreqRange = fiji_get_mclk_frequency_ratio(clock);
929
930 return result;
931}
932
933static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
934 uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level)
935{
936 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
937 struct phm_ppt_v1_information *table_info =
938 (struct phm_ppt_v1_information *)(hwmgr->pptable);
939 int result = 0;
940 uint32_t mclk_stutter_mode_threshold = 60000;
941
942 if (table_info->vdd_dep_on_mclk) {
943 result = fiji_get_dependency_volt_by_clk(hwmgr,
944 table_info->vdd_dep_on_mclk, clock,
945 (uint32_t *)(&mem_level->MinVoltage), &mem_level->MinMvdd);
946 PP_ASSERT_WITH_CODE((0 == result),
947 "can not find MinVddc voltage value from memory "
948 "VDDC voltage dependency table", return result);
949 }
950
951 mem_level->EnabledForThrottle = 1;
952 mem_level->EnabledForActivity = 0;
953 mem_level->UpHyst = 0;
954 mem_level->DownHyst = 100;
955 mem_level->VoltageDownHyst = 0;
956 mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
957 mem_level->StutterEnable = false;
958
959 mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
960
961 /* enable stutter mode if all the follow condition applied
962 * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI,
963 * &(data->DisplayTiming.numExistingDisplays));
964 */
965 data->display_timing.num_existing_displays = 1;
966
967 if (mclk_stutter_mode_threshold &&
968 (clock <= mclk_stutter_mode_threshold) &&
969 (!data->is_uvd_enabled) &&
970 (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
971 STUTTER_ENABLE) & 0x1))
972 mem_level->StutterEnable = true;
973
974 result = fiji_calculate_mclk_params(hwmgr, clock, mem_level);
975 if (!result) {
976 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
977 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
978 CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
979 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
980 }
981 return result;
982}
983
984/**
985* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
986*
987* @param hwmgr the address of the hardware manager
988*/
989int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
990{
991 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
992 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
993 struct smu7_dpm_table *dpm_table = &data->dpm_table;
994 int result;
995 /* populate MCLK dpm table to SMU7 */
996 uint32_t array = smu_data->smu7_data.dpm_table_start +
997 offsetof(SMU73_Discrete_DpmTable, MemoryLevel);
998 uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) *
999 SMU73_MAX_LEVELS_MEMORY;
1000 struct SMU73_Discrete_MemoryLevel *levels =
1001 smu_data->smc_state_table.MemoryLevel;
1002 uint32_t i;
1003
1004 for (i = 0; i < dpm_table->mclk_table.count; i++) {
1005 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
1006 "can not populate memory level as memory clock is zero",
1007 return -EINVAL);
1008 result = fiji_populate_single_memory_level(hwmgr,
1009 dpm_table->mclk_table.dpm_levels[i].value,
1010 &levels[i]);
1011 if (result)
1012 return result;
1013 }
1014
1015 /* Only enable level 0 for now. */
1016 levels[0].EnabledForActivity = 1;
1017
1018 /* in order to prevent MC activity from stutter mode to push DPM up.
1019 * the UVD change complements this by putting the MCLK in
1020 * a higher state by default such that we are not effected by
1021 * up threshold or and MCLK DPM latency.
1022 */
1023 levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
1024 CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
1025
1026 smu_data->smc_state_table.MemoryDpmLevelCount =
1027 (uint8_t)dpm_table->mclk_table.count;
1028 data->dpm_level_enable_mask.mclk_dpm_enable_mask =
1029 phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1030 /* set highest level watermark to high */
1031 levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
1032 PPSMC_DISPLAY_WATERMARK_HIGH;
1033
1034 /* level count will send to smc once at init smc table and never change */
1035 result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
1036 (uint32_t)array_size, SMC_RAM_END);
1037
1038 return result;
1039}
1040
1041
1042/**
1043* Populates the SMC MVDD structure using the provided memory clock.
1044*
1045* @param hwmgr the address of the hardware manager
1046* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
1047* @param voltage the SMC VOLTAGE structure to be populated
1048*/
1049static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
1050 uint32_t mclk, SMIO_Pattern *smio_pat)
1051{
1052 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1053 struct phm_ppt_v1_information *table_info =
1054 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1055 uint32_t i = 0;
1056
1057 if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
1058 /* find mvdd value which clock is more than request */
1059 for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
1060 if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
1061 smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
1062 break;
1063 }
1064 }
1065 PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
1066 "MVDD Voltage is outside the supported range.",
1067 return -EINVAL);
1068 } else
1069 return -EINVAL;
1070
1071 return 0;
1072}
1073
1074static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1075 SMU73_Discrete_DpmTable *table)
1076{
1077 int result = 0;
1078 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1079 struct phm_ppt_v1_information *table_info =
1080 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1081 struct pp_atomctrl_clock_dividers_vi dividers;
1082 SMIO_Pattern vol_level;
1083 uint32_t mvdd;
1084 uint16_t us_mvdd;
1085 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
1086 uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
1087
1088 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1089
1090 if (!data->sclk_dpm_key_disabled) {
1091 /* Get MinVoltage and Frequency from DPM0,
1092 * already converted to SMC_UL */
1093 table->ACPILevel.SclkFrequency =
1094 data->dpm_table.sclk_table.dpm_levels[0].value;
1095 result = fiji_get_dependency_volt_by_clk(hwmgr,
1096 table_info->vdd_dep_on_sclk,
1097 table->ACPILevel.SclkFrequency,
1098 (uint32_t *)(&table->ACPILevel.MinVoltage), &mvdd);
1099 PP_ASSERT_WITH_CODE((0 == result),
1100 "Cannot find ACPI VDDC voltage value " \
1101 "in Clock Dependency Table",
1102 );
1103 } else {
1104 table->ACPILevel.SclkFrequency =
1105 data->vbios_boot_state.sclk_bootup_value;
1106 table->ACPILevel.MinVoltage =
1107 data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
1108 }
1109
1110 /* get the engine clock dividers for this clock value */
1111 result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
1112 table->ACPILevel.SclkFrequency, &dividers);
1113 PP_ASSERT_WITH_CODE(result == 0,
1114 "Error retrieving Engine Clock dividers from VBIOS.",
1115 return result);
1116
1117 table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
1118 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1119 table->ACPILevel.DeepSleepDivId = 0;
1120
1121 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
1122 SPLL_PWRON, 0);
1123 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
1124 SPLL_RESET, 1);
1125 spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
1126 SCLK_MUX_SEL, 4);
1127
1128 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
1129 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
1130 table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
1131 table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
1132 table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
1133 table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
1134 table->ACPILevel.CcPwrDynRm = 0;
1135 table->ACPILevel.CcPwrDynRm1 = 0;
1136
1137 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1138 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
1139 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
1140 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
1141 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
1142 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
1143 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
1144 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
1145 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
1146 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1147 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1148
1149 if (!data->mclk_dpm_key_disabled) {
1150 /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
1151 table->MemoryACPILevel.MclkFrequency =
1152 data->dpm_table.mclk_table.dpm_levels[0].value;
1153 result = fiji_get_dependency_volt_by_clk(hwmgr,
1154 table_info->vdd_dep_on_mclk,
1155 table->MemoryACPILevel.MclkFrequency,
1156 (uint32_t *)(&table->MemoryACPILevel.MinVoltage), &mvdd);
1157 PP_ASSERT_WITH_CODE((0 == result),
1158 "Cannot find ACPI VDDCI voltage value in Clock Dependency Table",
1159 );
1160 } else {
1161 table->MemoryACPILevel.MclkFrequency =
1162 data->vbios_boot_state.mclk_bootup_value;
1163 table->MemoryACPILevel.MinVoltage =
1164 data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
1165 }
1166
1167 us_mvdd = 0;
1168 if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
1169 (data->mclk_dpm_key_disabled))
1170 us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
1171 else {
1172 if (!fiji_populate_mvdd_value(hwmgr,
1173 data->dpm_table.mclk_table.dpm_levels[0].value,
1174 &vol_level))
1175 us_mvdd = vol_level.Voltage;
1176 }
1177
1178 table->MemoryACPILevel.MinMvdd =
1179 PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE);
1180
1181 table->MemoryACPILevel.EnabledForThrottle = 0;
1182 table->MemoryACPILevel.EnabledForActivity = 0;
1183 table->MemoryACPILevel.UpHyst = 0;
1184 table->MemoryACPILevel.DownHyst = 100;
1185 table->MemoryACPILevel.VoltageDownHyst = 0;
1186 table->MemoryACPILevel.ActivityLevel =
1187 PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
1188
1189 table->MemoryACPILevel.StutterEnable = false;
1190 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
1191 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
1192
1193 return result;
1194}
1195
1196static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1197 SMU73_Discrete_DpmTable *table)
1198{
1199 int result = -EINVAL;
1200 uint8_t count;
1201 struct pp_atomctrl_clock_dividers_vi dividers;
1202 struct phm_ppt_v1_information *table_info =
1203 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1204 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1205 table_info->mm_dep_table;
1206
1207 table->VceLevelCount = (uint8_t)(mm_table->count);
1208 table->VceBootLevel = 0;
1209
1210 for (count = 0; count < table->VceLevelCount; count++) {
1211 table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
1212 table->VceLevel[count].MinVoltage = 0;
1213 table->VceLevel[count].MinVoltage |=
1214 (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
1215 table->VceLevel[count].MinVoltage |=
1216 ((mm_table->entries[count].vddc - VDDC_VDDCI_DELTA) *
1217 VOLTAGE_SCALE) << VDDCI_SHIFT;
1218 table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1219
1220 /*retrieve divider value for VBIOS */
1221 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1222 table->VceLevel[count].Frequency, &dividers);
1223 PP_ASSERT_WITH_CODE((0 == result),
1224 "can not find divide id for VCE engine clock",
1225 return result);
1226
1227 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1228
1229 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1230 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
1231 }
1232 return result;
1233}
1234
1235static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
1236 SMU73_Discrete_DpmTable *table)
1237{
1238 int result = -EINVAL;
1239 uint8_t count;
1240 struct pp_atomctrl_clock_dividers_vi dividers;
1241 struct phm_ppt_v1_information *table_info =
1242 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1243 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1244 table_info->mm_dep_table;
1245
1246 table->AcpLevelCount = (uint8_t)(mm_table->count);
1247 table->AcpBootLevel = 0;
1248
1249 for (count = 0; count < table->AcpLevelCount; count++) {
1250 table->AcpLevel[count].Frequency = mm_table->entries[count].aclk;
1251 table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1252 VOLTAGE_SCALE) << VDDC_SHIFT;
1253 table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
1254 VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
1255 table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1256
1257 /* retrieve divider value for VBIOS */
1258 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1259 table->AcpLevel[count].Frequency, &dividers);
1260 PP_ASSERT_WITH_CODE((0 == result),
1261 "can not find divide id for engine clock", return result);
1262
1263 table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1264
1265 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
1266 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage);
1267 }
1268 return result;
1269}
1270
1271static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1272 SMU73_Discrete_DpmTable *table)
1273{
1274 int result = -EINVAL;
1275 uint8_t count;
1276 struct pp_atomctrl_clock_dividers_vi dividers;
1277 struct phm_ppt_v1_information *table_info =
1278 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1279 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1280 table_info->mm_dep_table;
1281
1282 table->SamuBootLevel = 0;
1283 table->SamuLevelCount = (uint8_t)(mm_table->count);
1284
1285 for (count = 0; count < table->SamuLevelCount; count++) {
1286 /* not sure whether we need evclk or not */
1287 table->SamuLevel[count].MinVoltage = 0;
1288 table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
1289 table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1290 VOLTAGE_SCALE) << VDDC_SHIFT;
1291 table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
1292 VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
1293 table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1294
1295 /* retrieve divider value for VBIOS */
1296 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1297 table->SamuLevel[count].Frequency, &dividers);
1298 PP_ASSERT_WITH_CODE((0 == result),
1299 "can not find divide id for samu clock", return result);
1300
1301 table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1302
1303 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
1304 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
1305 }
1306 return result;
1307}
1308
1309static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
1310 int32_t eng_clock, int32_t mem_clock,
1311 struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
1312{
1313 uint32_t dram_timing;
1314 uint32_t dram_timing2;
1315 uint32_t burstTime;
1316 ULONG state, trrds, trrdl;
1317 int result;
1318
1319 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1320 eng_clock, mem_clock);
1321 PP_ASSERT_WITH_CODE(result == 0,
1322 "Error calling VBIOS to set DRAM_TIMING.", return result);
1323
1324 dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1325 dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1326 burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
1327
1328 state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
1329 trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
1330 trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
1331
1332 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
1333 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
1334 arb_regs->McArbBurstTime = (uint8_t)burstTime;
1335 arb_regs->TRRDS = (uint8_t)trrds;
1336 arb_regs->TRRDL = (uint8_t)trrdl;
1337
1338 return 0;
1339}
1340
1341static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1342{
1343 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1344 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
1345 struct SMU73_Discrete_MCArbDramTimingTable arb_regs;
1346 uint32_t i, j;
1347 int result = 0;
1348
1349 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1350 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1351 result = fiji_populate_memory_timing_parameters(hwmgr,
1352 data->dpm_table.sclk_table.dpm_levels[i].value,
1353 data->dpm_table.mclk_table.dpm_levels[j].value,
1354 &arb_regs.entries[i][j]);
1355 if (result)
1356 break;
1357 }
1358 }
1359
1360 if (!result)
1361 result = smu7_copy_bytes_to_smc(
1362 hwmgr,
1363 smu_data->smu7_data.arb_table_start,
1364 (uint8_t *)&arb_regs,
1365 sizeof(SMU73_Discrete_MCArbDramTimingTable),
1366 SMC_RAM_END);
1367 return result;
1368}
1369
1370static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1371 struct SMU73_Discrete_DpmTable *table)
1372{
1373 int result = -EINVAL;
1374 uint8_t count;
1375 struct pp_atomctrl_clock_dividers_vi dividers;
1376 struct phm_ppt_v1_information *table_info =
1377 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1378 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1379 table_info->mm_dep_table;
1380
1381 table->UvdLevelCount = (uint8_t)(mm_table->count);
1382 table->UvdBootLevel = 0;
1383
1384 for (count = 0; count < table->UvdLevelCount; count++) {
1385 table->UvdLevel[count].MinVoltage = 0;
1386 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
1387 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
1388 table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1389 VOLTAGE_SCALE) << VDDC_SHIFT;
1390 table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
1391 VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
1392 table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1393
1394 /* retrieve divider value for VBIOS */
1395 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1396 table->UvdLevel[count].VclkFrequency, &dividers);
1397 PP_ASSERT_WITH_CODE((0 == result),
1398 "can not find divide id for Vclk clock", return result);
1399
1400 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1401
1402 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1403 table->UvdLevel[count].DclkFrequency, &dividers);
1404 PP_ASSERT_WITH_CODE((0 == result),
1405 "can not find divide id for Dclk clock", return result);
1406
1407 table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
1408
1409 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1410 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1411 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
1412
1413 }
1414 return result;
1415}
1416
1417static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1418 struct SMU73_Discrete_DpmTable *table)
1419{
1420 int result = 0;
1421 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1422
1423 table->GraphicsBootLevel = 0;
1424 table->MemoryBootLevel = 0;
1425
1426 /* find boot level from dpm table */
1427 result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1428 data->vbios_boot_state.sclk_bootup_value,
1429 (uint32_t *)&(table->GraphicsBootLevel));
1430
1431 result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1432 data->vbios_boot_state.mclk_bootup_value,
1433 (uint32_t *)&(table->MemoryBootLevel));
1434
1435 table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
1436 VOLTAGE_SCALE;
1437 table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
1438 VOLTAGE_SCALE;
1439 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
1440 VOLTAGE_SCALE;
1441
1442 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
1443 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
1444 CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
1445
1446 return 0;
1447}
1448
1449static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
1450{
1451 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1452 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
1453 struct phm_ppt_v1_information *table_info =
1454 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1455 uint8_t count, level;
1456
1457 count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
1458 for (level = 0; level < count; level++) {
1459 if (table_info->vdd_dep_on_sclk->entries[level].clk >=
1460 data->vbios_boot_state.sclk_bootup_value) {
1461 smu_data->smc_state_table.GraphicsBootLevel = level;
1462 break;
1463 }
1464 }
1465
1466 count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
1467 for (level = 0; level < count; level++) {
1468 if (table_info->vdd_dep_on_mclk->entries[level].clk >=
1469 data->vbios_boot_state.mclk_bootup_value) {
1470 smu_data->smc_state_table.MemoryBootLevel = level;
1471 break;
1472 }
1473 }
1474
1475 return 0;
1476}
1477
1478static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1479{
1480 uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
1481 volt_with_cks, value;
1482 uint16_t clock_freq_u16;
1483 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
1484 uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
1485 volt_offset = 0;
1486 struct phm_ppt_v1_information *table_info =
1487 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1488 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1489 table_info->vdd_dep_on_sclk;
1490
1491 stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
1492
1493 /* Read SMU_Eefuse to read and calculate RO and determine
1494 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
1495 */
1496 efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1497 ixSMU_EFUSE_0 + (146 * 4));
1498 efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1499 ixSMU_EFUSE_0 + (148 * 4));
1500 efuse &= 0xFF000000;
1501 efuse = efuse >> 24;
1502 efuse2 &= 0xF;
1503
1504 if (efuse2 == 1)
1505 ro = (2300 - 1350) * efuse / 255 + 1350;
1506 else
1507 ro = (2500 - 1000) * efuse / 255 + 1000;
1508
1509 if (ro >= 1660)
1510 type = 0;
1511 else
1512 type = 1;
1513
1514 /* Populate Stretch amount */
1515 smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
1516
1517 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
1518 for (i = 0; i < sclk_table->count; i++) {
1519 smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
1520 sclk_table->entries[i].cks_enable << i;
1521 volt_without_cks = (uint32_t)((14041 *
1522 (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
1523 (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
1524 volt_with_cks = (uint32_t)((13946 *
1525 (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
1526 (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
1527 if (volt_without_cks >= volt_with_cks)
1528 volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
1529 sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
1530 smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
1531 }
1532
1533 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1534 STRETCH_ENABLE, 0x0);
1535 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1536 masterReset, 0x1);
1537 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1538 staticEnable, 0x1);
1539 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1540 masterReset, 0x0);
1541
1542 /* Populate CKS Lookup Table */
1543 if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
1544 stretch_amount2 = 0;
1545 else if (stretch_amount == 3 || stretch_amount == 4)
1546 stretch_amount2 = 1;
1547 else {
1548 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1549 PHM_PlatformCaps_ClockStretcher);
1550 PP_ASSERT_WITH_CODE(false,
1551 "Stretch Amount in PPTable not supported\n",
1552 return -EINVAL);
1553 }
1554
1555 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1556 ixPWR_CKS_CNTL);
1557 value &= 0xFFC2FF87;
1558 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
1559 fiji_clock_stretcher_lookup_table[stretch_amount2][0];
1560 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
1561 fiji_clock_stretcher_lookup_table[stretch_amount2][1];
1562 clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
1563 GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
1564 SclkFrequency) / 100);
1565 if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] <
1566 clock_freq_u16 &&
1567 fiji_clock_stretcher_lookup_table[stretch_amount2][1] >
1568 clock_freq_u16) {
1569 /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
1570 value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
1571 /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
1572 value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
1573 /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
1574 value |= (fiji_clock_stretch_amount_conversion
1575 [fiji_clock_stretcher_lookup_table[stretch_amount2][3]]
1576 [stretch_amount]) << 3;
1577 }
1578 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
1579 CKS_LOOKUPTableEntry[0].minFreq);
1580 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
1581 CKS_LOOKUPTableEntry[0].maxFreq);
1582 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
1583 fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
1584 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
1585 (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
1586
1587 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1588 ixPWR_CKS_CNTL, value);
1589
1590 /* Populate DDT Lookup Table */
1591 for (i = 0; i < 4; i++) {
1592 /* Assign the minimum and maximum VID stored
1593 * in the last row of Clock Stretcher Voltage Table.
1594 */
1595 smu_data->smc_state_table.ClockStretcherDataTable.
1596 ClockStretcherDataTableEntry[i].minVID =
1597 (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2];
1598 smu_data->smc_state_table.ClockStretcherDataTable.
1599 ClockStretcherDataTableEntry[i].maxVID =
1600 (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3];
1601 /* Loop through each SCLK and check the frequency
1602 * to see if it lies within the frequency for clock stretcher.
1603 */
1604 for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
1605 cks_setting = 0;
1606 clock_freq = PP_SMC_TO_HOST_UL(
1607 smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
1608 /* Check the allowed frequency against the sclk level[j].
1609 * Sclk's endianness has already been converted,
1610 * and it's in 10Khz unit,
1611 * as opposed to Data table, which is in Mhz unit.
1612 */
1613 if (clock_freq >=
1614 (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) {
1615 cks_setting |= 0x2;
1616 if (clock_freq <
1617 (fiji_clock_stretcher_ddt_table[type][i][1]) * 100)
1618 cks_setting |= 0x1;
1619 }
1620 smu_data->smc_state_table.ClockStretcherDataTable.
1621 ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
1622 }
1623 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
1624 ClockStretcherDataTable.
1625 ClockStretcherDataTableEntry[i].setting);
1626 }
1627
1628 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
1629 value &= 0xFFFFFFFE;
1630 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
1631
1632 return 0;
1633}
1634
1635/**
1636* Populates the SMC VRConfig field in DPM table.
1637*
1638* @param hwmgr the address of the hardware manager
1639* @param table the SMC DPM table structure to be populated
1640* @return always 0
1641*/
1642static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr,
1643 struct SMU73_Discrete_DpmTable *table)
1644{
1645 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1646 uint16_t config;
1647
1648 config = VR_MERGED_WITH_VDDC;
1649 table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
1650
1651 /* Set Vddc Voltage Controller */
1652 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1653 config = VR_SVI2_PLANE_1;
1654 table->VRConfig |= config;
1655 } else {
1656 PP_ASSERT_WITH_CODE(false,
1657 "VDDC should be on SVI2 control in merged mode!",
1658 );
1659 }
1660 /* Set Vddci Voltage Controller */
1661 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
1662 config = VR_SVI2_PLANE_2; /* only in merged mode */
1663 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1664 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
1665 config = VR_SMIO_PATTERN_1;
1666 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1667 } else {
1668 config = VR_STATIC_VOLTAGE;
1669 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1670 }
1671 /* Set Mvdd Voltage Controller */
1672 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
1673 config = VR_SVI2_PLANE_2;
1674 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1675 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1676 config = VR_SMIO_PATTERN_2;
1677 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1678 } else {
1679 config = VR_STATIC_VOLTAGE;
1680 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1681 }
1682
1683 return 0;
1684}
1685
1686static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr)
1687{
1688 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
1689 uint32_t tmp;
1690 int result;
1691
1692 /* This is a read-modify-write on the first byte of the ARB table.
1693 * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
1694 * is the field 'current'.
1695 * This solution is ugly, but we never write the whole table only
1696 * individual fields in it.
1697 * In reality this field should not be in that structure
1698 * but in a soft register.
1699 */
1700 result = smu7_read_smc_sram_dword(hwmgr,
1701 smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
1702
1703 if (result)
1704 return result;
1705
1706 tmp &= 0x00FFFFFF;
1707 tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
1708
1709 return smu7_write_smc_sram_dword(hwmgr,
1710 smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
1711}
1712
1713static int fiji_save_default_power_profile(struct pp_hwmgr *hwmgr)
1714{
1715 struct fiji_smumgr *data = (struct fiji_smumgr *)(hwmgr->smu_backend);
1716 struct SMU73_Discrete_GraphicsLevel *levels =
1717 data->smc_state_table.GraphicsLevel;
1718 unsigned min_level = 1;
1719
1720 hwmgr->default_gfx_power_profile.activity_threshold =
1721 be16_to_cpu(levels[0].ActivityLevel);
1722 hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
1723 hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
1724 hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
1725
1726 hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
1727 hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
1728
1729 /* Workaround compute SDMA instability: disable lowest SCLK
1730 * DPM level. Optimize compute power profile: Use only highest
1731 * 2 power levels (if more than 2 are available), Hysteresis:
1732 * 0ms up, 5ms down
1733 */
1734 if (data->smc_state_table.GraphicsDpmLevelCount > 2)
1735 min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
1736 else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
1737 min_level = 1;
1738 else
1739 min_level = 0;
1740 hwmgr->default_compute_power_profile.min_sclk =
1741 be32_to_cpu(levels[min_level].SclkFrequency);
1742 hwmgr->default_compute_power_profile.up_hyst = 0;
1743 hwmgr->default_compute_power_profile.down_hyst = 5;
1744
1745 hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
1746 hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
1747
1748 return 0;
1749}
1750
1751static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
1752{
1753 pp_atomctrl_voltage_table param_led_dpm;
1754 int result = 0;
1755 u32 mask = 0;
1756
1757 result = atomctrl_get_voltage_table_v3(hwmgr,
1758 VOLTAGE_TYPE_LEDDPM, VOLTAGE_OBJ_GPIO_LUT,
1759 &param_led_dpm);
1760 if (result == 0) {
1761 int i, j;
1762 u32 tmp = param_led_dpm.mask_low;
1763
1764 for (i = 0, j = 0; i < 32; i++) {
1765 if (tmp & 1) {
1766 mask |= (i << (8 * j));
1767 if (++j >= 3)
1768 break;
1769 }
1770 tmp >>= 1;
1771 }
1772 }
1773 if (mask)
1774 smum_send_msg_to_smc_with_parameter(hwmgr,
1775 PPSMC_MSG_LedConfig,
1776 mask);
1777 return 0;
1778}
1779
1780/**
1781* Initializes the SMC table and uploads it
1782*
1783* @param hwmgr the address of the powerplay hardware manager.
1784* @param pInput the pointer to input data (PowerState)
1785* @return always 0
1786*/
1787int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
1788{
1789 int result;
1790 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1791 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
1792 struct phm_ppt_v1_information *table_info =
1793 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1794 struct SMU73_Discrete_DpmTable *table = &(smu_data->smc_state_table);
1795 uint8_t i;
1796 struct pp_atomctrl_gpio_pin_assignment gpio_pin;
1797
1798 fiji_initialize_power_tune_defaults(hwmgr);
1799
1800 if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
1801 fiji_populate_smc_voltage_tables(hwmgr, table);
1802
1803 table->SystemFlags = 0;
1804
1805 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1806 PHM_PlatformCaps_AutomaticDCTransition))
1807 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1808
1809 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1810 PHM_PlatformCaps_StepVddc))
1811 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1812
1813 if (data->is_memory_gddr5)
1814 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1815
1816 if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
1817 result = fiji_populate_ulv_state(hwmgr, table);
1818 PP_ASSERT_WITH_CODE(0 == result,
1819 "Failed to initialize ULV state!", return result);
1820 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1821 ixCG_ULV_PARAMETER, 0x40035);
1822 }
1823
1824 result = fiji_populate_smc_link_level(hwmgr, table);
1825 PP_ASSERT_WITH_CODE(0 == result,
1826 "Failed to initialize Link Level!", return result);
1827
1828 result = fiji_populate_all_graphic_levels(hwmgr);
1829 PP_ASSERT_WITH_CODE(0 == result,
1830 "Failed to initialize Graphics Level!", return result);
1831
1832 result = fiji_populate_all_memory_levels(hwmgr);
1833 PP_ASSERT_WITH_CODE(0 == result,
1834 "Failed to initialize Memory Level!", return result);
1835
1836 result = fiji_populate_smc_acpi_level(hwmgr, table);
1837 PP_ASSERT_WITH_CODE(0 == result,
1838 "Failed to initialize ACPI Level!", return result);
1839
1840 result = fiji_populate_smc_vce_level(hwmgr, table);
1841 PP_ASSERT_WITH_CODE(0 == result,
1842 "Failed to initialize VCE Level!", return result);
1843
1844 result = fiji_populate_smc_acp_level(hwmgr, table);
1845 PP_ASSERT_WITH_CODE(0 == result,
1846 "Failed to initialize ACP Level!", return result);
1847
1848 result = fiji_populate_smc_samu_level(hwmgr, table);
1849 PP_ASSERT_WITH_CODE(0 == result,
1850 "Failed to initialize SAMU Level!", return result);
1851
1852 /* Since only the initial state is completely set up at this point
1853 * (the other states are just copies of the boot state) we only
1854 * need to populate the ARB settings for the initial state.
1855 */
1856 result = fiji_program_memory_timing_parameters(hwmgr);
1857 PP_ASSERT_WITH_CODE(0 == result,
1858 "Failed to Write ARB settings for the initial state.", return result);
1859
1860 result = fiji_populate_smc_uvd_level(hwmgr, table);
1861 PP_ASSERT_WITH_CODE(0 == result,
1862 "Failed to initialize UVD Level!", return result);
1863
1864 result = fiji_populate_smc_boot_level(hwmgr, table);
1865 PP_ASSERT_WITH_CODE(0 == result,
1866 "Failed to initialize Boot Level!", return result);
1867
1868 result = fiji_populate_smc_initailial_state(hwmgr);
1869 PP_ASSERT_WITH_CODE(0 == result,
1870 "Failed to initialize Boot State!", return result);
1871
1872 result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr);
1873 PP_ASSERT_WITH_CODE(0 == result,
1874 "Failed to populate BAPM Parameters!", return result);
1875
1876 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1877 PHM_PlatformCaps_ClockStretcher)) {
1878 result = fiji_populate_clock_stretcher_data_table(hwmgr);
1879 PP_ASSERT_WITH_CODE(0 == result,
1880 "Failed to populate Clock Stretcher Data Table!",
1881 return result);
1882 }
1883
1884 table->GraphicsVoltageChangeEnable = 1;
1885 table->GraphicsThermThrottleEnable = 1;
1886 table->GraphicsInterval = 1;
1887 table->VoltageInterval = 1;
1888 table->ThermalInterval = 1;
1889 table->TemperatureLimitHigh =
1890 table_info->cac_dtp_table->usTargetOperatingTemp *
1891 SMU7_Q88_FORMAT_CONVERSION_UNIT;
1892 table->TemperatureLimitLow =
1893 (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
1894 SMU7_Q88_FORMAT_CONVERSION_UNIT;
1895 table->MemoryVoltageChangeEnable = 1;
1896 table->MemoryInterval = 1;
1897 table->VoltageResponseTime = 0;
1898 table->PhaseResponseTime = 0;
1899 table->MemoryThermThrottleEnable = 1;
1900 table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
1901 table->PCIeGenInterval = 1;
1902 table->VRConfig = 0;
1903
1904 result = fiji_populate_vr_config(hwmgr, table);
1905 PP_ASSERT_WITH_CODE(0 == result,
1906 "Failed to populate VRConfig setting!", return result);
1907
1908 table->ThermGpio = 17;
1909 table->SclkStepSize = 0x4000;
1910
1911 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
1912 table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
1913 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1914 PHM_PlatformCaps_RegulatorHot);
1915 } else {
1916 table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
1917 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1918 PHM_PlatformCaps_RegulatorHot);
1919 }
1920
1921 if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
1922 &gpio_pin)) {
1923 table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
1924 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1925 PHM_PlatformCaps_AutomaticDCTransition);
1926 } else {
1927 table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
1928 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1929 PHM_PlatformCaps_AutomaticDCTransition);
1930 }
1931
1932 /* Thermal Output GPIO */
1933 if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
1934 &gpio_pin)) {
1935 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1936 PHM_PlatformCaps_ThermalOutGPIO);
1937
1938 table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
1939
1940 /* For porlarity read GPIOPAD_A with assigned Gpio pin
1941 * since VBIOS will program this register to set 'inactive state',
1942 * driver can then determine 'active state' from this and
1943 * program SMU with correct polarity
1944 */
1945 table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
1946 (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
1947 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
1948
1949 /* if required, combine VRHot/PCC with thermal out GPIO */
1950 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1951 PHM_PlatformCaps_RegulatorHot) &&
1952 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1953 PHM_PlatformCaps_CombinePCCWithThermalSignal))
1954 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
1955 } else {
1956 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1957 PHM_PlatformCaps_ThermalOutGPIO);
1958 table->ThermOutGpio = 17;
1959 table->ThermOutPolarity = 1;
1960 table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
1961 }
1962
1963 for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++)
1964 table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
1965
1966 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
1967 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
1968 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
1969 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
1970 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
1971 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
1972 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
1973 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
1974 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
1975
1976 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
1977 result = smu7_copy_bytes_to_smc(hwmgr,
1978 smu_data->smu7_data.dpm_table_start +
1979 offsetof(SMU73_Discrete_DpmTable, SystemFlags),
1980 (uint8_t *)&(table->SystemFlags),
1981 sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController),
1982 SMC_RAM_END);
1983 PP_ASSERT_WITH_CODE(0 == result,
1984 "Failed to upload dpm data to SMC memory!", return result);
1985
1986 result = fiji_init_arb_table_index(hwmgr);
1987 PP_ASSERT_WITH_CODE(0 == result,
1988 "Failed to upload arb data to SMC memory!", return result);
1989
1990 result = fiji_populate_pm_fuses(hwmgr);
1991 PP_ASSERT_WITH_CODE(0 == result,
1992 "Failed to populate PM fuses to SMC memory!", return result);
1993
1994 result = fiji_setup_dpm_led_config(hwmgr);
1995 PP_ASSERT_WITH_CODE(0 == result,
1996 "Failed to setup dpm led config", return result);
1997
1998 fiji_save_default_power_profile(hwmgr);
1999
2000 return 0;
2001}
2002
2003/**
2004* Set up the fan table to control the fan using the SMC.
2005* @param hwmgr the address of the powerplay hardware manager.
2006* @param pInput the pointer to input data
2007* @param pOutput the pointer to output data
2008* @param pStorage the pointer to temporary storage
2009* @param Result the last failure code
2010* @return result from set temperature range routine
2011*/
2012int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
2013{
2014 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
2015
2016 SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
2017 uint32_t duty100;
2018 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
2019 uint16_t fdo_min, slope1, slope2;
2020 uint32_t reference_clock;
2021 int res;
2022 uint64_t tmp64;
2023
2024 if (hwmgr->thermal_controller.fanInfo.bNoFan) {
2025 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2026 PHM_PlatformCaps_MicrocodeFanControl);
2027 return 0;
2028 }
2029
2030 if (smu_data->smu7_data.fan_table_start == 0) {
2031 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2032 PHM_PlatformCaps_MicrocodeFanControl);
2033 return 0;
2034 }
2035
2036 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
2037 CG_FDO_CTRL1, FMAX_DUTY100);
2038
2039 if (duty100 == 0) {
2040 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2041 PHM_PlatformCaps_MicrocodeFanControl);
2042 return 0;
2043 }
2044
2045 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
2046 usPWMMin * duty100;
2047 do_div(tmp64, 10000);
2048 fdo_min = (uint16_t)tmp64;
2049
2050 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
2051 hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
2052 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
2053 hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
2054
2055 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
2056 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
2057 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
2058 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
2059
2060 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
2061 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
2062
2063 fan_table.TempMin = cpu_to_be16((50 + hwmgr->
2064 thermal_controller.advanceFanControlParameters.usTMin) / 100);
2065 fan_table.TempMed = cpu_to_be16((50 + hwmgr->
2066 thermal_controller.advanceFanControlParameters.usTMed) / 100);
2067 fan_table.TempMax = cpu_to_be16((50 + hwmgr->
2068 thermal_controller.advanceFanControlParameters.usTMax) / 100);
2069
2070 fan_table.Slope1 = cpu_to_be16(slope1);
2071 fan_table.Slope2 = cpu_to_be16(slope2);
2072
2073 fan_table.FdoMin = cpu_to_be16(fdo_min);
2074
2075 fan_table.HystDown = cpu_to_be16(hwmgr->
2076 thermal_controller.advanceFanControlParameters.ucTHyst);
2077
2078 fan_table.HystUp = cpu_to_be16(1);
2079
2080 fan_table.HystSlope = cpu_to_be16(1);
2081
2082 fan_table.TempRespLim = cpu_to_be16(5);
2083
2084 reference_clock = smu7_get_xclk(hwmgr);
2085
2086 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
2087 thermal_controller.advanceFanControlParameters.ulCycleDelay *
2088 reference_clock) / 1600);
2089
2090 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
2091
2092 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
2093 hwmgr->device, CGS_IND_REG__SMC,
2094 CG_MULT_THERMAL_CTRL, TEMP_SEL);
2095
2096 res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start,
2097 (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
2098 SMC_RAM_END);
2099
2100 if (!res && hwmgr->thermal_controller.
2101 advanceFanControlParameters.ucMinimumPWMLimit)
2102 res = smum_send_msg_to_smc_with_parameter(hwmgr,
2103 PPSMC_MSG_SetFanMinPwm,
2104 hwmgr->thermal_controller.
2105 advanceFanControlParameters.ucMinimumPWMLimit);
2106
2107 if (!res && hwmgr->thermal_controller.
2108 advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
2109 res = smum_send_msg_to_smc_with_parameter(hwmgr,
2110 PPSMC_MSG_SetFanSclkTarget,
2111 hwmgr->thermal_controller.
2112 advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
2113
2114 if (res)
2115 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2116 PHM_PlatformCaps_MicrocodeFanControl);
2117
2118 return 0;
2119}
2120
2121
2122int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
2123{
2124 int ret;
2125 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
2126
2127 if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS)
2128 return 0;
2129
2130 ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
2131
2132 if (!ret)
2133 /* If this param is not changed, this function could fire unnecessarily */
2134 smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
2135
2136 return ret;
2137}
2138
2139static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
2140{
2141 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2142
2143 if (data->need_update_smu7_dpm_table &
2144 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
2145 return fiji_program_memory_timing_parameters(hwmgr);
2146
2147 return 0;
2148}
2149
2150int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2151{
2152 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2153 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
2154
2155 int result = 0;
2156 uint32_t low_sclk_interrupt_threshold = 0;
2157
2158 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2159 PHM_PlatformCaps_SclkThrottleLowNotification)
2160 && (hwmgr->gfx_arbiter.sclk_threshold !=
2161 data->low_sclk_interrupt_threshold)) {
2162 data->low_sclk_interrupt_threshold =
2163 hwmgr->gfx_arbiter.sclk_threshold;
2164 low_sclk_interrupt_threshold =
2165 data->low_sclk_interrupt_threshold;
2166
2167 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
2168
2169 result = smu7_copy_bytes_to_smc(
2170 hwmgr,
2171 smu_data->smu7_data.dpm_table_start +
2172 offsetof(SMU73_Discrete_DpmTable,
2173 LowSclkInterruptThreshold),
2174 (uint8_t *)&low_sclk_interrupt_threshold,
2175 sizeof(uint32_t),
2176 SMC_RAM_END);
2177 }
2178 result = fiji_program_mem_timing_parameters(hwmgr);
2179 PP_ASSERT_WITH_CODE((result == 0),
2180 "Failed to program memory timing parameters!",
2181 );
2182 return result;
2183}
2184
2185uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
2186{
2187 switch (type) {
2188 case SMU_SoftRegisters:
2189 switch (member) {
2190 case HandshakeDisables:
2191 return offsetof(SMU73_SoftRegisters, HandshakeDisables);
2192 case VoltageChangeTimeout:
2193 return offsetof(SMU73_SoftRegisters, VoltageChangeTimeout);
2194 case AverageGraphicsActivity:
2195 return offsetof(SMU73_SoftRegisters, AverageGraphicsActivity);
2196 case PreVBlankGap:
2197 return offsetof(SMU73_SoftRegisters, PreVBlankGap);
2198 case VBlankTimeout:
2199 return offsetof(SMU73_SoftRegisters, VBlankTimeout);
2200 case UcodeLoadStatus:
2201 return offsetof(SMU73_SoftRegisters, UcodeLoadStatus);
2202 }
2203 case SMU_Discrete_DpmTable:
2204 switch (member) {
2205 case UvdBootLevel:
2206 return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
2207 case VceBootLevel:
2208 return offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
2209 case SamuBootLevel:
2210 return offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
2211 case LowSclkInterruptThreshold:
2212 return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
2213 }
2214 }
2215 pr_warn("can't get the offset of type %x member %x\n", type, member);
2216 return 0;
2217}
2218
2219uint32_t fiji_get_mac_definition(uint32_t value)
2220{
2221 switch (value) {
2222 case SMU_MAX_LEVELS_GRAPHICS:
2223 return SMU73_MAX_LEVELS_GRAPHICS;
2224 case SMU_MAX_LEVELS_MEMORY:
2225 return SMU73_MAX_LEVELS_MEMORY;
2226 case SMU_MAX_LEVELS_LINK:
2227 return SMU73_MAX_LEVELS_LINK;
2228 case SMU_MAX_ENTRIES_SMIO:
2229 return SMU73_MAX_ENTRIES_SMIO;
2230 case SMU_MAX_LEVELS_VDDC:
2231 return SMU73_MAX_LEVELS_VDDC;
2232 case SMU_MAX_LEVELS_VDDGFX:
2233 return SMU73_MAX_LEVELS_VDDGFX;
2234 case SMU_MAX_LEVELS_VDDCI:
2235 return SMU73_MAX_LEVELS_VDDCI;
2236 case SMU_MAX_LEVELS_MVDD:
2237 return SMU73_MAX_LEVELS_MVDD;
2238 }
2239
2240 pr_warn("can't get the mac of %x\n", value);
2241 return 0;
2242}
2243
2244
2245static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
2246{
2247 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
2248 uint32_t mm_boot_level_offset, mm_boot_level_value;
2249 struct phm_ppt_v1_information *table_info =
2250 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2251
2252 smu_data->smc_state_table.UvdBootLevel = 0;
2253 if (table_info->mm_dep_table->count > 0)
2254 smu_data->smc_state_table.UvdBootLevel =
2255 (uint8_t) (table_info->mm_dep_table->count - 1);
2256 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable,
2257 UvdBootLevel);
2258 mm_boot_level_offset /= 4;
2259 mm_boot_level_offset *= 4;
2260 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2261 CGS_IND_REG__SMC, mm_boot_level_offset);
2262 mm_boot_level_value &= 0x00FFFFFF;
2263 mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
2264 cgs_write_ind_register(hwmgr->device,
2265 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2266
2267 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2268 PHM_PlatformCaps_UVDDPM) ||
2269 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2270 PHM_PlatformCaps_StablePState))
2271 smum_send_msg_to_smc_with_parameter(hwmgr,
2272 PPSMC_MSG_UVDDPM_SetEnabledMask,
2273 (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
2274 return 0;
2275}
2276
2277static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
2278{
2279 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
2280 uint32_t mm_boot_level_offset, mm_boot_level_value;
2281 struct phm_ppt_v1_information *table_info =
2282 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2283
2284 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2285 PHM_PlatformCaps_StablePState))
2286 smu_data->smc_state_table.VceBootLevel =
2287 (uint8_t) (table_info->mm_dep_table->count - 1);
2288 else
2289 smu_data->smc_state_table.VceBootLevel = 0;
2290
2291 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2292 offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
2293 mm_boot_level_offset /= 4;
2294 mm_boot_level_offset *= 4;
2295 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2296 CGS_IND_REG__SMC, mm_boot_level_offset);
2297 mm_boot_level_value &= 0xFF00FFFF;
2298 mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
2299 cgs_write_ind_register(hwmgr->device,
2300 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2301
2302 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
2303 smum_send_msg_to_smc_with_parameter(hwmgr,
2304 PPSMC_MSG_VCEDPM_SetEnabledMask,
2305 (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
2306 return 0;
2307}
2308
2309static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr)
2310{
2311 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
2312 uint32_t mm_boot_level_offset, mm_boot_level_value;
2313
2314
2315 smu_data->smc_state_table.SamuBootLevel = 0;
2316 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2317 offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
2318
2319 mm_boot_level_offset /= 4;
2320 mm_boot_level_offset *= 4;
2321 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2322 CGS_IND_REG__SMC, mm_boot_level_offset);
2323 mm_boot_level_value &= 0xFFFFFF00;
2324 mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
2325 cgs_write_ind_register(hwmgr->device,
2326 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2327
2328 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2329 PHM_PlatformCaps_StablePState))
2330 smum_send_msg_to_smc_with_parameter(hwmgr,
2331 PPSMC_MSG_SAMUDPM_SetEnabledMask,
2332 (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
2333 return 0;
2334}
2335
2336int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
2337{
2338 switch (type) {
2339 case SMU_UVD_TABLE:
2340 fiji_update_uvd_smc_table(hwmgr);
2341 break;
2342 case SMU_VCE_TABLE:
2343 fiji_update_vce_smc_table(hwmgr);
2344 break;
2345 case SMU_SAMU_TABLE:
2346 fiji_update_samu_smc_table(hwmgr);
2347 break;
2348 default:
2349 break;
2350 }
2351 return 0;
2352}
2353
2354
2355/**
2356* Get the location of various tables inside the FW image.
2357*
2358* @param hwmgr the address of the powerplay hardware manager.
2359* @return always 0
2360*/
2361int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
2362{
2363 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2364 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
2365 uint32_t tmp;
2366 int result;
2367 bool error = false;
2368
2369 result = smu7_read_smc_sram_dword(hwmgr,
2370 SMU7_FIRMWARE_HEADER_LOCATION +
2371 offsetof(SMU73_Firmware_Header, DpmTable),
2372 &tmp, SMC_RAM_END);
2373
2374 if (0 == result)
2375 smu_data->smu7_data.dpm_table_start = tmp;
2376
2377 error |= (0 != result);
2378
2379 result = smu7_read_smc_sram_dword(hwmgr,
2380 SMU7_FIRMWARE_HEADER_LOCATION +
2381 offsetof(SMU73_Firmware_Header, SoftRegisters),
2382 &tmp, SMC_RAM_END);
2383
2384 if (!result) {
2385 data->soft_regs_start = tmp;
2386 smu_data->smu7_data.soft_regs_start = tmp;
2387 }
2388
2389 error |= (0 != result);
2390
2391 result = smu7_read_smc_sram_dword(hwmgr,
2392 SMU7_FIRMWARE_HEADER_LOCATION +
2393 offsetof(SMU73_Firmware_Header, mcRegisterTable),
2394 &tmp, SMC_RAM_END);
2395
2396 if (!result)
2397 smu_data->smu7_data.mc_reg_table_start = tmp;
2398
2399 result = smu7_read_smc_sram_dword(hwmgr,
2400 SMU7_FIRMWARE_HEADER_LOCATION +
2401 offsetof(SMU73_Firmware_Header, FanTable),
2402 &tmp, SMC_RAM_END);
2403
2404 if (!result)
2405 smu_data->smu7_data.fan_table_start = tmp;
2406
2407 error |= (0 != result);
2408
2409 result = smu7_read_smc_sram_dword(hwmgr,
2410 SMU7_FIRMWARE_HEADER_LOCATION +
2411 offsetof(SMU73_Firmware_Header, mcArbDramTimingTable),
2412 &tmp, SMC_RAM_END);
2413
2414 if (!result)
2415 smu_data->smu7_data.arb_table_start = tmp;
2416
2417 error |= (0 != result);
2418
2419 result = smu7_read_smc_sram_dword(hwmgr,
2420 SMU7_FIRMWARE_HEADER_LOCATION +
2421 offsetof(SMU73_Firmware_Header, Version),
2422 &tmp, SMC_RAM_END);
2423
2424 if (!result)
2425 hwmgr->microcode_version_info.SMC = tmp;
2426
2427 error |= (0 != result);
2428
2429 return error ? -1 : 0;
2430}
2431
2432int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
2433{
2434
2435 /* Program additional LP registers
2436 * that are no longer programmed by VBIOS
2437 */
2438 cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
2439 cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
2440 cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
2441 cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
2442 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
2443 cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
2444 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
2445 cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
2446 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
2447 cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
2448 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
2449 cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
2450 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
2451 cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
2452
2453 return 0;
2454}
2455
2456bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
2457{
2458 return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
2459 CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
2460 ? true : false;
2461}
2462
2463int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
2464 struct amd_pp_profile *request)
2465{
2466 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)
2467 (hwmgr->smu_backend);
2468 struct SMU73_Discrete_GraphicsLevel *levels =
2469 smu_data->smc_state_table.GraphicsLevel;
2470 uint32_t array = smu_data->smu7_data.dpm_table_start +
2471 offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
2472 uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
2473 SMU73_MAX_LEVELS_GRAPHICS;
2474 uint32_t i;
2475
2476 for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
2477 levels[i].ActivityLevel =
2478 cpu_to_be16(request->activity_threshold);
2479 levels[i].EnabledForActivity = 1;
2480 levels[i].UpHyst = request->up_hyst;
2481 levels[i].DownHyst = request->down_hyst;
2482 }
2483
2484 return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
2485 array_size, SMC_RAM_END);
2486}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h
deleted file mode 100644
index d9c72d992e30..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h
+++ /dev/null
@@ -1,53 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef FIJI_SMC_H
24#define FIJI_SMC_H
25
26#include "smumgr.h"
27#include "smu73.h"
28
29struct fiji_pt_defaults {
30 uint8_t SviLoadLineEn;
31 uint8_t SviLoadLineVddC;
32 uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
33 uint8_t TDC_MAWt;
34 uint8_t TdcWaterfallCtl;
35 uint8_t DTEAmbientTempBase;
36};
37
38int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
39int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
40int fiji_init_smc_table(struct pp_hwmgr *hwmgr);
41int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
42int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
43int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr);
44uint32_t fiji_get_offsetof(uint32_t type, uint32_t member);
45uint32_t fiji_get_mac_definition(uint32_t value);
46int fiji_process_firmware_header(struct pp_hwmgr *hwmgr);
47int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
48bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr);
49int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
50 struct amd_pp_profile *request);
51int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr);
52#endif
53
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 592a89aff12b..f572beff197f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -23,6 +23,7 @@
23 23
24#include "pp_debug.h" 24#include "pp_debug.h"
25#include "smumgr.h" 25#include "smumgr.h"
26#include "smu7_dyn_defaults.h"
26#include "smu73.h" 27#include "smu73.h"
27#include "smu_ucode_xfer_vi.h" 28#include "smu_ucode_xfer_vi.h"
28#include "fiji_smumgr.h" 29#include "fiji_smumgr.h"
@@ -37,14 +38,54 @@
37#include "gca/gfx_8_0_d.h" 38#include "gca/gfx_8_0_d.h"
38#include "bif/bif_5_0_d.h" 39#include "bif/bif_5_0_d.h"
39#include "bif/bif_5_0_sh_mask.h" 40#include "bif/bif_5_0_sh_mask.h"
40#include "fiji_pwrvirus.h" 41#include "dce/dce_10_0_d.h"
41#include "fiji_smc.h" 42#include "dce/dce_10_0_sh_mask.h"
43#include "hardwaremanager.h"
44#include "cgs_common.h"
45#include "atombios.h"
46#include "pppcielanes.h"
47#include "hwmgr.h"
48#include "smu7_hwmgr.h"
49
42 50
43#define AVFS_EN_MSB 1568 51#define AVFS_EN_MSB 1568
44#define AVFS_EN_LSB 1568 52#define AVFS_EN_LSB 1568
45 53
46#define FIJI_SMC_SIZE 0x20000 54#define FIJI_SMC_SIZE 0x20000
47 55
56#define VOLTAGE_SCALE 4
57#define POWERTUNE_DEFAULT_SET_MAX 1
58#define VOLTAGE_VID_OFFSET_SCALE1 625
59#define VOLTAGE_VID_OFFSET_SCALE2 100
60#define VDDC_VDDCI_DELTA 300
61#define MC_CG_ARB_FREQ_F1 0x0b
62
63/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
64 * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
65 */
66static const uint16_t fiji_clock_stretcher_lookup_table[2][4] = {
67 {600, 1050, 3, 0}, {600, 1050, 6, 1} };
68
69/* [FF, SS] type, [] 4 voltage ranges, and
70 * [Floor Freq, Boundary Freq, VID min , VID max]
71 */
72static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] = {
73 { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
74 { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
75
76/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
77 * (coming from PWR_CKS_CNTL.stretch_amount reg spec)
78 */
79static const uint8_t fiji_clock_stretch_amount_conversion[2][6] = {
80 {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
81
82static const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
83 /*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
84 {1, 0xF, 0xFD,
85 /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
86 0x19, 5, 45}
87};
88
48static const struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = { 89static const struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = {
49 /* Min Sclk pcie DeepSleep Activity CgSpll CgSpll spllSpread SpllSpread CcPwr CcPwr Sclk Display Enabled Enabled Voltage Power */ 90 /* Min Sclk pcie DeepSleep Activity CgSpll CgSpll spllSpread SpllSpread CcPwr CcPwr Sclk Display Enabled Enabled Voltage Power */
50 /* Voltage, Frequency, DpmLevel, DivId, Level, FuncCntl3, FuncCntl4, Spectrum, Spectrum2, DynRm, DynRm1 Did, Watermark, ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */ 91 /* Voltage, Frequency, DpmLevel, DivId, Level, FuncCntl3, FuncCntl4, Spectrum, Spectrum2, DynRm, DynRm1 Did, Watermark, ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */
@@ -159,46 +200,6 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr)
159 return result; 200 return result;
160} 201}
161 202
162static void execute_pwr_table(struct pp_hwmgr *hwmgr, const PWR_Command_Table *pvirus, int size)
163{
164 int i;
165 uint32_t reg, data;
166
167 for (i = 0; i < size; i++) {
168 reg = pvirus->reg;
169 data = pvirus->data;
170 if (reg != 0xffffffff)
171 cgs_write_register(hwmgr->device, reg, data);
172 else
173 break;
174 pvirus++;
175 }
176}
177
178static void execute_pwr_dfy_table(struct pp_hwmgr *hwmgr, const PWR_DFY_Section *section)
179{
180 int i;
181 cgs_write_register(hwmgr->device, mmCP_DFY_CNTL, section->dfy_cntl);
182 cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_HI, section->dfy_addr_hi);
183 cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_LO, section->dfy_addr_lo);
184 for (i = 0; i < section->dfy_size; i++)
185 cgs_write_register(hwmgr->device, mmCP_DFY_DATA_0, section->dfy_data[i]);
186}
187
188static int fiji_setup_pwr_virus(struct pp_hwmgr *hwmgr)
189{
190 execute_pwr_table(hwmgr, PwrVirusTable_pre, ARRAY_SIZE(PwrVirusTable_pre));
191 execute_pwr_dfy_table(hwmgr, &pwr_virus_section1);
192 execute_pwr_dfy_table(hwmgr, &pwr_virus_section2);
193 execute_pwr_dfy_table(hwmgr, &pwr_virus_section3);
194 execute_pwr_dfy_table(hwmgr, &pwr_virus_section4);
195 execute_pwr_dfy_table(hwmgr, &pwr_virus_section5);
196 execute_pwr_dfy_table(hwmgr, &pwr_virus_section6);
197 execute_pwr_table(hwmgr, PwrVirusTable_post, ARRAY_SIZE(PwrVirusTable_post));
198
199 return 0;
200}
201
202static int fiji_start_avfs_btc(struct pp_hwmgr *hwmgr) 203static int fiji_start_avfs_btc(struct pp_hwmgr *hwmgr)
203{ 204{
204 int result = 0; 205 int result = 0;
@@ -277,7 +278,7 @@ static int fiji_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool smu_started)
277 " table over to SMU", 278 " table over to SMU",
278 return -EINVAL;); 279 return -EINVAL;);
279 smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL; 280 smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
280 PP_ASSERT_WITH_CODE(0 == fiji_setup_pwr_virus(hwmgr), 281 PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
281 "[AVFS][fiji_avfs_event_mgr] Could not setup " 282 "[AVFS][fiji_avfs_event_mgr] Could not setup "
282 "Pwr Virus for AVFS ", 283 "Pwr Virus for AVFS ",
283 return -EINVAL;); 284 return -EINVAL;);
@@ -365,13 +366,6 @@ static bool fiji_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
365 return false; 366 return false;
366} 367}
367 368
368/**
369* Write a 32bit value to the SMC SRAM space.
370* ALL PARAMETERS ARE IN HOST BYTE ORDER.
371* @param smumgr the address of the powerplay hardware manager.
372* @param smc_addr the address in the SMC RAM to access.
373* @param value to write to the SMC SRAM.
374*/
375static int fiji_smu_init(struct pp_hwmgr *hwmgr) 369static int fiji_smu_init(struct pp_hwmgr *hwmgr)
376{ 370{
377 int i; 371 int i;
@@ -393,6 +387,2334 @@ static int fiji_smu_init(struct pp_hwmgr *hwmgr)
393 return 0; 387 return 0;
394} 388}
395 389
390static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
391 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
392 uint32_t clock, uint32_t *voltage, uint32_t *mvdd)
393{
394 uint32_t i;
395 uint16_t vddci;
396 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
397 *voltage = *mvdd = 0;
398
399
400 /* clock - voltage dependency table is empty table */
401 if (dep_table->count == 0)
402 return -EINVAL;
403
404 for (i = 0; i < dep_table->count; i++) {
405 /* find first sclk bigger than request */
406 if (dep_table->entries[i].clk >= clock) {
407 *voltage |= (dep_table->entries[i].vddc *
408 VOLTAGE_SCALE) << VDDC_SHIFT;
409 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
410 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
411 VOLTAGE_SCALE) << VDDCI_SHIFT;
412 else if (dep_table->entries[i].vddci)
413 *voltage |= (dep_table->entries[i].vddci *
414 VOLTAGE_SCALE) << VDDCI_SHIFT;
415 else {
416 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
417 (dep_table->entries[i].vddc -
418 VDDC_VDDCI_DELTA));
419 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
420 }
421
422 if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
423 *mvdd = data->vbios_boot_state.mvdd_bootup_value *
424 VOLTAGE_SCALE;
425 else if (dep_table->entries[i].mvdd)
426 *mvdd = (uint32_t) dep_table->entries[i].mvdd *
427 VOLTAGE_SCALE;
428
429 *voltage |= 1 << PHASES_SHIFT;
430 return 0;
431 }
432 }
433
434 /* sclk is bigger than max sclk in the dependence table */
435 *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
436
437 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
438 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
439 VOLTAGE_SCALE) << VDDCI_SHIFT;
440 else if (dep_table->entries[i-1].vddci) {
441 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
442 (dep_table->entries[i].vddc -
443 VDDC_VDDCI_DELTA));
444 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
445 }
446
447 if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
448 *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
449 else if (dep_table->entries[i].mvdd)
450 *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
451
452 return 0;
453}
454
455
456static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
457{
458 uint32_t tmp;
459 tmp = raw_setting * 4096 / 100;
460 return (uint16_t)tmp;
461}
462
463static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t *sda)
464{
465 switch (line) {
466 case SMU7_I2CLineID_DDC1:
467 *scl = SMU7_I2C_DDC1CLK;
468 *sda = SMU7_I2C_DDC1DATA;
469 break;
470 case SMU7_I2CLineID_DDC2:
471 *scl = SMU7_I2C_DDC2CLK;
472 *sda = SMU7_I2C_DDC2DATA;
473 break;
474 case SMU7_I2CLineID_DDC3:
475 *scl = SMU7_I2C_DDC3CLK;
476 *sda = SMU7_I2C_DDC3DATA;
477 break;
478 case SMU7_I2CLineID_DDC4:
479 *scl = SMU7_I2C_DDC4CLK;
480 *sda = SMU7_I2C_DDC4DATA;
481 break;
482 case SMU7_I2CLineID_DDC5:
483 *scl = SMU7_I2C_DDC5CLK;
484 *sda = SMU7_I2C_DDC5DATA;
485 break;
486 case SMU7_I2CLineID_DDC6:
487 *scl = SMU7_I2C_DDC6CLK;
488 *sda = SMU7_I2C_DDC6DATA;
489 break;
490 case SMU7_I2CLineID_SCLSDA:
491 *scl = SMU7_I2C_SCL;
492 *sda = SMU7_I2C_SDA;
493 break;
494 case SMU7_I2CLineID_DDCVGA:
495 *scl = SMU7_I2C_DDCVGACLK;
496 *sda = SMU7_I2C_DDCVGADATA;
497 break;
498 default:
499 *scl = 0;
500 *sda = 0;
501 break;
502 }
503}
504
505static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
506{
507 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
508 struct phm_ppt_v1_information *table_info =
509 (struct phm_ppt_v1_information *)(hwmgr->pptable);
510
511 if (table_info &&
512 table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
513 table_info->cac_dtp_table->usPowerTuneDataSetID)
514 smu_data->power_tune_defaults =
515 &fiji_power_tune_data_set_array
516 [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
517 else
518 smu_data->power_tune_defaults = &fiji_power_tune_data_set_array[0];
519
520}
521
522static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
523{
524
525 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
526 const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
527
528 SMU73_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
529
530 struct phm_ppt_v1_information *table_info =
531 (struct phm_ppt_v1_information *)(hwmgr->pptable);
532 struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
533 struct pp_advance_fan_control_parameters *fan_table =
534 &hwmgr->thermal_controller.advanceFanControlParameters;
535 uint8_t uc_scl, uc_sda;
536
537 /* TDP number of fraction bits are changed from 8 to 7 for Fiji
538 * as requested by SMC team
539 */
540 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
541 (uint16_t)(cac_dtp_table->usTDP * 128));
542 dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
543 (uint16_t)(cac_dtp_table->usTDP * 128));
544
545 PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
546 "Target Operating Temp is out of Range!",
547 );
548
549 dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
550 dpm_table->GpuTjHyst = 8;
551
552 dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase;
553
554 /* The following are for new Fiji Multi-input fan/thermal control */
555 dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
556 cac_dtp_table->usTargetOperatingTemp * 256);
557 dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
558 cac_dtp_table->usTemperatureLimitHotspot * 256);
559 dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US(
560 cac_dtp_table->usTemperatureLimitLiquid1 * 256);
561 dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US(
562 cac_dtp_table->usTemperatureLimitLiquid2 * 256);
563 dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US(
564 cac_dtp_table->usTemperatureLimitVrVddc * 256);
565 dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US(
566 cac_dtp_table->usTemperatureLimitVrMvdd * 256);
567 dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US(
568 cac_dtp_table->usTemperatureLimitPlx * 256);
569
570 dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
571 scale_fan_gain_settings(fan_table->usFanGainEdge));
572 dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
573 scale_fan_gain_settings(fan_table->usFanGainHotspot));
574 dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US(
575 scale_fan_gain_settings(fan_table->usFanGainLiquid));
576 dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US(
577 scale_fan_gain_settings(fan_table->usFanGainVrVddc));
578 dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US(
579 scale_fan_gain_settings(fan_table->usFanGainVrMvdd));
580 dpm_table->FanGainPlx = PP_HOST_TO_SMC_US(
581 scale_fan_gain_settings(fan_table->usFanGainPlx));
582 dpm_table->FanGainHbm = PP_HOST_TO_SMC_US(
583 scale_fan_gain_settings(fan_table->usFanGainHbm));
584
585 dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address;
586 dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address;
587 dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address;
588 dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address;
589
590 get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda);
591 dpm_table->Liquid_I2C_LineSCL = uc_scl;
592 dpm_table->Liquid_I2C_LineSDA = uc_sda;
593
594 get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda);
595 dpm_table->Vr_I2C_LineSCL = uc_scl;
596 dpm_table->Vr_I2C_LineSDA = uc_sda;
597
598 get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda);
599 dpm_table->Plx_I2C_LineSCL = uc_scl;
600 dpm_table->Plx_I2C_LineSDA = uc_sda;
601
602 return 0;
603}
604
605
606static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
607{
608 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
609 const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
610
611 smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
612 smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
613 smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
614 smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
615
616 return 0;
617}
618
619
620static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
621{
622 uint16_t tdc_limit;
623 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
624 struct phm_ppt_v1_information *table_info =
625 (struct phm_ppt_v1_information *)(hwmgr->pptable);
626 const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
627
628 /* TDC number of fraction bits are changed from 8 to 7
629 * for Fiji as requested by SMC team
630 */
631 tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
632 smu_data->power_tune_table.TDC_VDDC_PkgLimit =
633 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
634 smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
635 defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
636 smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
637
638 return 0;
639}
640
641static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
642{
643 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
644 const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
645 uint32_t temp;
646
647 if (smu7_read_smc_sram_dword(hwmgr,
648 fuse_table_offset +
649 offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl),
650 (uint32_t *)&temp, SMC_RAM_END))
651 PP_ASSERT_WITH_CODE(false,
652 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
653 return -EINVAL);
654 else {
655 smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
656 smu_data->power_tune_table.LPMLTemperatureMin =
657 (uint8_t)((temp >> 16) & 0xff);
658 smu_data->power_tune_table.LPMLTemperatureMax =
659 (uint8_t)((temp >> 8) & 0xff);
660 smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
661 }
662 return 0;
663}
664
665static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
666{
667 int i;
668 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
669
670 /* Currently not used. Set all to zero. */
671 for (i = 0; i < 16; i++)
672 smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
673
674 return 0;
675}
676
677static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
678{
679 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
680
681 if ((hwmgr->thermal_controller.advanceFanControlParameters.
682 usFanOutputSensitivity & (1 << 15)) ||
683 0 == hwmgr->thermal_controller.advanceFanControlParameters.
684 usFanOutputSensitivity)
685 hwmgr->thermal_controller.advanceFanControlParameters.
686 usFanOutputSensitivity = hwmgr->thermal_controller.
687 advanceFanControlParameters.usDefaultFanOutputSensitivity;
688
689 smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
690 PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
691 advanceFanControlParameters.usFanOutputSensitivity);
692 return 0;
693}
694
695static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
696{
697 int i;
698 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
699
700 /* Currently not used. Set all to zero. */
701 for (i = 0; i < 16; i++)
702 smu_data->power_tune_table.GnbLPML[i] = 0;
703
704 return 0;
705}
706
707static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
708{
709 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
710 struct phm_ppt_v1_information *table_info =
711 (struct phm_ppt_v1_information *)(hwmgr->pptable);
712 uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
713 uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
714 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
715
716 HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
717 LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
718
719 smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
720 CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
721 smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
722 CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
723
724 return 0;
725}
726
727static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
728{
729 uint32_t pm_fuse_table_offset;
730 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
731
732 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
733 PHM_PlatformCaps_PowerContainment)) {
734 if (smu7_read_smc_sram_dword(hwmgr,
735 SMU7_FIRMWARE_HEADER_LOCATION +
736 offsetof(SMU73_Firmware_Header, PmFuseTable),
737 &pm_fuse_table_offset, SMC_RAM_END))
738 PP_ASSERT_WITH_CODE(false,
739 "Attempt to get pm_fuse_table_offset Failed!",
740 return -EINVAL);
741
742 /* DW6 */
743 if (fiji_populate_svi_load_line(hwmgr))
744 PP_ASSERT_WITH_CODE(false,
745 "Attempt to populate SviLoadLine Failed!",
746 return -EINVAL);
747 /* DW7 */
748 if (fiji_populate_tdc_limit(hwmgr))
749 PP_ASSERT_WITH_CODE(false,
750 "Attempt to populate TDCLimit Failed!", return -EINVAL);
751 /* DW8 */
752 if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset))
753 PP_ASSERT_WITH_CODE(false,
754 "Attempt to populate TdcWaterfallCtl, "
755 "LPMLTemperature Min and Max Failed!",
756 return -EINVAL);
757
758 /* DW9-DW12 */
759 if (0 != fiji_populate_temperature_scaler(hwmgr))
760 PP_ASSERT_WITH_CODE(false,
761 "Attempt to populate LPMLTemperatureScaler Failed!",
762 return -EINVAL);
763
764 /* DW13-DW14 */
765 if (fiji_populate_fuzzy_fan(hwmgr))
766 PP_ASSERT_WITH_CODE(false,
767 "Attempt to populate Fuzzy Fan Control parameters Failed!",
768 return -EINVAL);
769
770 /* DW15-DW18 */
771 if (fiji_populate_gnb_lpml(hwmgr))
772 PP_ASSERT_WITH_CODE(false,
773 "Attempt to populate GnbLPML Failed!",
774 return -EINVAL);
775
776 /* DW20 */
777 if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr))
778 PP_ASSERT_WITH_CODE(false,
779 "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
780 "Sidd Failed!", return -EINVAL);
781
782 if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
783 (uint8_t *)&smu_data->power_tune_table,
784 sizeof(struct SMU73_Discrete_PmFuses), SMC_RAM_END))
785 PP_ASSERT_WITH_CODE(false,
786 "Attempt to download PmFuseTable Failed!",
787 return -EINVAL);
788 }
789 return 0;
790}
791
792static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
793 struct SMU73_Discrete_DpmTable *table)
794{
795 uint32_t count;
796 uint8_t index;
797 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
798 struct phm_ppt_v1_information *table_info =
799 (struct phm_ppt_v1_information *)(hwmgr->pptable);
800 struct phm_ppt_v1_voltage_lookup_table *lookup_table =
801 table_info->vddc_lookup_table;
802 /* tables is already swapped, so in order to use the value from it,
803 * we need to swap it back.
804 * We are populating vddc CAC data to BapmVddc table
805 * in split and merged mode
806 */
807
808 for (count = 0; count < lookup_table->count; count++) {
809 index = phm_get_voltage_index(lookup_table,
810 data->vddc_voltage_table.entries[count].value);
811 table->BapmVddcVidLoSidd[count] =
812 convert_to_vid(lookup_table->entries[index].us_cac_low);
813 table->BapmVddcVidHiSidd[count] =
814 convert_to_vid(lookup_table->entries[index].us_cac_high);
815 }
816
817 return 0;
818}
819
820static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
821 struct SMU73_Discrete_DpmTable *table)
822{
823 int result;
824
825 result = fiji_populate_cac_table(hwmgr, table);
826 PP_ASSERT_WITH_CODE(0 == result,
827 "can not populate CAC voltage tables to SMC",
828 return -EINVAL);
829
830 return 0;
831}
832
833static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr,
834 struct SMU73_Discrete_Ulv *state)
835{
836 int result = 0;
837
838 struct phm_ppt_v1_information *table_info =
839 (struct phm_ppt_v1_information *)(hwmgr->pptable);
840
841 state->CcPwrDynRm = 0;
842 state->CcPwrDynRm1 = 0;
843
844 state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
845 state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
846 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
847
848 state->VddcPhase = 1;
849
850 if (!result) {
851 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
852 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
853 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
854 }
855 return result;
856}
857
858static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr,
859 struct SMU73_Discrete_DpmTable *table)
860{
861 return fiji_populate_ulv_level(hwmgr, &table->Ulv);
862}
863
864static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr,
865 struct SMU73_Discrete_DpmTable *table)
866{
867 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
868 struct smu7_dpm_table *dpm_table = &data->dpm_table;
869 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
870 int i;
871
872 /* Index (dpm_table->pcie_speed_table.count)
873 * is reserved for PCIE boot level. */
874 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
875 table->LinkLevel[i].PcieGenSpeed =
876 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
877 table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
878 dpm_table->pcie_speed_table.dpm_levels[i].param1);
879 table->LinkLevel[i].EnabledForActivity = 1;
880 table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
881 table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
882 table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
883 }
884
885 smu_data->smc_state_table.LinkLevelCount =
886 (uint8_t)dpm_table->pcie_speed_table.count;
887 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
888 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
889
890 return 0;
891}
892
893static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr,
894 uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk)
895{
896 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
897 struct pp_atomctrl_clock_dividers_vi dividers;
898 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
899 uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
900 uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
901 uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
902 uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
903 uint32_t ref_clock;
904 uint32_t ref_divider;
905 uint32_t fbdiv;
906 int result;
907
908 /* get the engine clock dividers for this clock value */
909 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, &dividers);
910
911 PP_ASSERT_WITH_CODE(result == 0,
912 "Error retrieving Engine Clock dividers from VBIOS.",
913 return result);
914
915 /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
916 ref_clock = atomctrl_get_reference_clock(hwmgr);
917 ref_divider = 1 + dividers.uc_pll_ref_div;
918
919 /* low 14 bits is fraction and high 12 bits is divider */
920 fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
921
922 /* SPLL_FUNC_CNTL setup */
923 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
924 SPLL_REF_DIV, dividers.uc_pll_ref_div);
925 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
926 SPLL_PDIV_A, dividers.uc_pll_post_div);
927
928 /* SPLL_FUNC_CNTL_3 setup*/
929 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
930 SPLL_FB_DIV, fbdiv);
931
932 /* set to use fractional accumulation*/
933 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
934 SPLL_DITHEN, 1);
935
936 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
937 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
938 struct pp_atomctrl_internal_ss_info ssInfo;
939
940 uint32_t vco_freq = clock * dividers.uc_pll_post_div;
941 if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
942 vco_freq, &ssInfo)) {
943 /*
944 * ss_info.speed_spectrum_percentage -- in unit of 0.01%
945 * ss_info.speed_spectrum_rate -- in unit of khz
946 *
947 * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2
948 */
949 uint32_t clk_s = ref_clock * 5 /
950 (ref_divider * ssInfo.speed_spectrum_rate);
951 /* clkv = 2 * D * fbdiv / NS */
952 uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage *
953 fbdiv / (clk_s * 10000);
954
955 cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
956 CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
957 cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
958 CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
959 cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
960 CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
961 }
962 }
963
964 sclk->SclkFrequency = clock;
965 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
966 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
967 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
968 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
969 sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
970
971 return 0;
972}
973
974static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
975 uint32_t clock, uint16_t sclk_al_threshold,
976 struct SMU73_Discrete_GraphicsLevel *level)
977{
978 int result;
979 /* PP_Clocks minClocks; */
980 uint32_t threshold, mvdd;
981 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
982 struct phm_ppt_v1_information *table_info =
983 (struct phm_ppt_v1_information *)(hwmgr->pptable);
984
985 result = fiji_calculate_sclk_params(hwmgr, clock, level);
986
987 /* populate graphics levels */
988 result = fiji_get_dependency_volt_by_clk(hwmgr,
989 table_info->vdd_dep_on_sclk, clock,
990 (uint32_t *)(&level->MinVoltage), &mvdd);
991 PP_ASSERT_WITH_CODE((0 == result),
992 "can not find VDDC voltage value for "
993 "VDDC engine clock dependency table",
994 return result);
995
996 level->SclkFrequency = clock;
997 level->ActivityLevel = sclk_al_threshold;
998 level->CcPwrDynRm = 0;
999 level->CcPwrDynRm1 = 0;
1000 level->EnabledForActivity = 0;
1001 level->EnabledForThrottle = 1;
1002 level->UpHyst = 10;
1003 level->DownHyst = 0;
1004 level->VoltageDownHyst = 0;
1005 level->PowerThrottle = 0;
1006
1007 threshold = clock * data->fast_watermark_threshold / 100;
1008
1009 data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
1010
1011 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
1012 level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
1013 hwmgr->display_config.min_core_set_clock_in_sr);
1014
1015
1016 /* Default to slow, highest DPM level will be
1017 * set to PPSMC_DISPLAY_WATERMARK_LOW later.
1018 */
1019 level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1020
1021 CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
1022 CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
1023 CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
1024 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
1025 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
1026 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
1027 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
1028 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
1029 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
1030
1031 return 0;
1032}
1033
1034static int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1035{
1036 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1037 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
1038
1039 struct smu7_dpm_table *dpm_table = &data->dpm_table;
1040 struct phm_ppt_v1_information *table_info =
1041 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1042 struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
1043 uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
1044 int result = 0;
1045 uint32_t array = smu_data->smu7_data.dpm_table_start +
1046 offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
1047 uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
1048 SMU73_MAX_LEVELS_GRAPHICS;
1049 struct SMU73_Discrete_GraphicsLevel *levels =
1050 smu_data->smc_state_table.GraphicsLevel;
1051 uint32_t i, max_entry;
1052 uint8_t hightest_pcie_level_enabled = 0,
1053 lowest_pcie_level_enabled = 0,
1054 mid_pcie_level_enabled = 0,
1055 count = 0;
1056
1057 for (i = 0; i < dpm_table->sclk_table.count; i++) {
1058 result = fiji_populate_single_graphic_level(hwmgr,
1059 dpm_table->sclk_table.dpm_levels[i].value,
1060 (uint16_t)smu_data->activity_target[i],
1061 &levels[i]);
1062 if (result)
1063 return result;
1064
1065 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
1066 if (i > 1)
1067 levels[i].DeepSleepDivId = 0;
1068 }
1069
1070 /* Only enable level 0 for now.*/
1071 levels[0].EnabledForActivity = 1;
1072
1073 /* set highest level watermark to high */
1074 levels[dpm_table->sclk_table.count - 1].DisplayWatermark =
1075 PPSMC_DISPLAY_WATERMARK_HIGH;
1076
1077 smu_data->smc_state_table.GraphicsDpmLevelCount =
1078 (uint8_t)dpm_table->sclk_table.count;
1079 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
1080 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
1081
1082 if (pcie_table != NULL) {
1083 PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
1084 "There must be 1 or more PCIE levels defined in PPTable.",
1085 return -EINVAL);
1086 max_entry = pcie_entry_cnt - 1;
1087 for (i = 0; i < dpm_table->sclk_table.count; i++)
1088 levels[i].pcieDpmLevel =
1089 (uint8_t) ((i < max_entry) ? i : max_entry);
1090 } else {
1091 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
1092 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
1093 (1 << (hightest_pcie_level_enabled + 1))) != 0))
1094 hightest_pcie_level_enabled++;
1095
1096 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
1097 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
1098 (1 << lowest_pcie_level_enabled)) == 0))
1099 lowest_pcie_level_enabled++;
1100
1101 while ((count < hightest_pcie_level_enabled) &&
1102 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
1103 (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
1104 count++;
1105
1106 mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
1107 hightest_pcie_level_enabled ?
1108 (lowest_pcie_level_enabled + 1 + count) :
1109 hightest_pcie_level_enabled;
1110
1111 /* set pcieDpmLevel to hightest_pcie_level_enabled */
1112 for (i = 2; i < dpm_table->sclk_table.count; i++)
1113 levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
1114
1115 /* set pcieDpmLevel to lowest_pcie_level_enabled */
1116 levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
1117
1118 /* set pcieDpmLevel to mid_pcie_level_enabled */
1119 levels[1].pcieDpmLevel = mid_pcie_level_enabled;
1120 }
1121 /* level count will send to smc once at init smc table and never change */
1122 result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
1123 (uint32_t)array_size, SMC_RAM_END);
1124
1125 return result;
1126}
1127
1128
1129/**
1130 * MCLK Frequency Ratio
1131 * SEQ_CG_RESP Bit[31:24] - 0x0
1132 * Bit[27:24] \96 DDR3 Frequency ratio
1133 * 0x0 <= 100MHz, 450 < 0x8 <= 500MHz
1134 * 100 < 0x1 <= 150MHz, 500 < 0x9 <= 550MHz
1135 * 150 < 0x2 <= 200MHz, 550 < 0xA <= 600MHz
1136 * 200 < 0x3 <= 250MHz, 600 < 0xB <= 650MHz
1137 * 250 < 0x4 <= 300MHz, 650 < 0xC <= 700MHz
1138 * 300 < 0x5 <= 350MHz, 700 < 0xD <= 750MHz
1139 * 350 < 0x6 <= 400MHz, 750 < 0xE <= 800MHz
1140 * 400 < 0x7 <= 450MHz, 800 < 0xF
1141 */
1142static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock)
1143{
1144 if (mem_clock <= 10000)
1145 return 0x0;
1146 if (mem_clock <= 15000)
1147 return 0x1;
1148 if (mem_clock <= 20000)
1149 return 0x2;
1150 if (mem_clock <= 25000)
1151 return 0x3;
1152 if (mem_clock <= 30000)
1153 return 0x4;
1154 if (mem_clock <= 35000)
1155 return 0x5;
1156 if (mem_clock <= 40000)
1157 return 0x6;
1158 if (mem_clock <= 45000)
1159 return 0x7;
1160 if (mem_clock <= 50000)
1161 return 0x8;
1162 if (mem_clock <= 55000)
1163 return 0x9;
1164 if (mem_clock <= 60000)
1165 return 0xa;
1166 if (mem_clock <= 65000)
1167 return 0xb;
1168 if (mem_clock <= 70000)
1169 return 0xc;
1170 if (mem_clock <= 75000)
1171 return 0xd;
1172 if (mem_clock <= 80000)
1173 return 0xe;
1174 /* mem_clock > 800MHz */
1175 return 0xf;
1176}
1177
1178static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr,
1179 uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk)
1180{
1181 struct pp_atomctrl_memory_clock_param mem_param;
1182 int result;
1183
1184 result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param);
1185 PP_ASSERT_WITH_CODE((0 == result),
1186 "Failed to get Memory PLL Dividers.",
1187 );
1188
1189 /* Save the result data to outpupt memory level structure */
1190 mclk->MclkFrequency = clock;
1191 mclk->MclkDivider = (uint8_t)mem_param.mpll_post_divider;
1192 mclk->FreqRange = fiji_get_mclk_frequency_ratio(clock);
1193
1194 return result;
1195}
1196
1197static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1198 uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level)
1199{
1200 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1201 struct phm_ppt_v1_information *table_info =
1202 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1203 int result = 0;
1204 uint32_t mclk_stutter_mode_threshold = 60000;
1205
1206 if (table_info->vdd_dep_on_mclk) {
1207 result = fiji_get_dependency_volt_by_clk(hwmgr,
1208 table_info->vdd_dep_on_mclk, clock,
1209 (uint32_t *)(&mem_level->MinVoltage), &mem_level->MinMvdd);
1210 PP_ASSERT_WITH_CODE((0 == result),
1211 "can not find MinVddc voltage value from memory "
1212 "VDDC voltage dependency table", return result);
1213 }
1214
1215 mem_level->EnabledForThrottle = 1;
1216 mem_level->EnabledForActivity = 0;
1217 mem_level->UpHyst = 0;
1218 mem_level->DownHyst = 100;
1219 mem_level->VoltageDownHyst = 0;
1220 mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
1221 mem_level->StutterEnable = false;
1222
1223 mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1224
1225 /* enable stutter mode if all the follow condition applied
1226 * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI,
1227 * &(data->DisplayTiming.numExistingDisplays));
1228 */
1229 data->display_timing.num_existing_displays = 1;
1230
1231 if (mclk_stutter_mode_threshold &&
1232 (clock <= mclk_stutter_mode_threshold) &&
1233 (!data->is_uvd_enabled) &&
1234 (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
1235 STUTTER_ENABLE) & 0x1))
1236 mem_level->StutterEnable = true;
1237
1238 result = fiji_calculate_mclk_params(hwmgr, clock, mem_level);
1239 if (!result) {
1240 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
1241 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
1242 CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
1243 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
1244 }
1245 return result;
1246}
1247
1248static int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1249{
1250 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1251 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
1252 struct smu7_dpm_table *dpm_table = &data->dpm_table;
1253 int result;
1254 /* populate MCLK dpm table to SMU7 */
1255 uint32_t array = smu_data->smu7_data.dpm_table_start +
1256 offsetof(SMU73_Discrete_DpmTable, MemoryLevel);
1257 uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) *
1258 SMU73_MAX_LEVELS_MEMORY;
1259 struct SMU73_Discrete_MemoryLevel *levels =
1260 smu_data->smc_state_table.MemoryLevel;
1261 uint32_t i;
1262
1263 for (i = 0; i < dpm_table->mclk_table.count; i++) {
1264 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
1265 "can not populate memory level as memory clock is zero",
1266 return -EINVAL);
1267 result = fiji_populate_single_memory_level(hwmgr,
1268 dpm_table->mclk_table.dpm_levels[i].value,
1269 &levels[i]);
1270 if (result)
1271 return result;
1272 }
1273
1274 /* Only enable level 0 for now. */
1275 levels[0].EnabledForActivity = 1;
1276
1277 /* in order to prevent MC activity from stutter mode to push DPM up.
1278 * the UVD change complements this by putting the MCLK in
1279 * a higher state by default such that we are not effected by
1280 * up threshold or and MCLK DPM latency.
1281 */
1282 levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
1283 CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
1284
1285 smu_data->smc_state_table.MemoryDpmLevelCount =
1286 (uint8_t)dpm_table->mclk_table.count;
1287 data->dpm_level_enable_mask.mclk_dpm_enable_mask =
1288 phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1289 /* set highest level watermark to high */
1290 levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
1291 PPSMC_DISPLAY_WATERMARK_HIGH;
1292
1293 /* level count will send to smc once at init smc table and never change */
1294 result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
1295 (uint32_t)array_size, SMC_RAM_END);
1296
1297 return result;
1298}
1299
1300static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
1301 uint32_t mclk, SMIO_Pattern *smio_pat)
1302{
1303 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1304 struct phm_ppt_v1_information *table_info =
1305 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1306 uint32_t i = 0;
1307
1308 if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
1309 /* find mvdd value which clock is more than request */
1310 for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
1311 if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
1312 smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
1313 break;
1314 }
1315 }
1316 PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
1317 "MVDD Voltage is outside the supported range.",
1318 return -EINVAL);
1319 } else
1320 return -EINVAL;
1321
1322 return 0;
1323}
1324
1325static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1326 SMU73_Discrete_DpmTable *table)
1327{
1328 int result = 0;
1329 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1330 struct phm_ppt_v1_information *table_info =
1331 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1332 struct pp_atomctrl_clock_dividers_vi dividers;
1333 SMIO_Pattern vol_level;
1334 uint32_t mvdd;
1335 uint16_t us_mvdd;
1336 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
1337 uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
1338
1339 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1340
1341 if (!data->sclk_dpm_key_disabled) {
1342 /* Get MinVoltage and Frequency from DPM0,
1343 * already converted to SMC_UL */
1344 table->ACPILevel.SclkFrequency =
1345 data->dpm_table.sclk_table.dpm_levels[0].value;
1346 result = fiji_get_dependency_volt_by_clk(hwmgr,
1347 table_info->vdd_dep_on_sclk,
1348 table->ACPILevel.SclkFrequency,
1349 (uint32_t *)(&table->ACPILevel.MinVoltage), &mvdd);
1350 PP_ASSERT_WITH_CODE((0 == result),
1351 "Cannot find ACPI VDDC voltage value " \
1352 "in Clock Dependency Table",
1353 );
1354 } else {
1355 table->ACPILevel.SclkFrequency =
1356 data->vbios_boot_state.sclk_bootup_value;
1357 table->ACPILevel.MinVoltage =
1358 data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
1359 }
1360
1361 /* get the engine clock dividers for this clock value */
1362 result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
1363 table->ACPILevel.SclkFrequency, &dividers);
1364 PP_ASSERT_WITH_CODE(result == 0,
1365 "Error retrieving Engine Clock dividers from VBIOS.",
1366 return result);
1367
1368 table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
1369 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1370 table->ACPILevel.DeepSleepDivId = 0;
1371
1372 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
1373 SPLL_PWRON, 0);
1374 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
1375 SPLL_RESET, 1);
1376 spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
1377 SCLK_MUX_SEL, 4);
1378
1379 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
1380 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
1381 table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
1382 table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
1383 table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
1384 table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
1385 table->ACPILevel.CcPwrDynRm = 0;
1386 table->ACPILevel.CcPwrDynRm1 = 0;
1387
1388 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1389 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
1390 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
1391 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
1392 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
1393 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
1394 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
1395 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
1396 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
1397 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1398 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1399
1400 if (!data->mclk_dpm_key_disabled) {
1401 /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
1402 table->MemoryACPILevel.MclkFrequency =
1403 data->dpm_table.mclk_table.dpm_levels[0].value;
1404 result = fiji_get_dependency_volt_by_clk(hwmgr,
1405 table_info->vdd_dep_on_mclk,
1406 table->MemoryACPILevel.MclkFrequency,
1407 (uint32_t *)(&table->MemoryACPILevel.MinVoltage), &mvdd);
1408 PP_ASSERT_WITH_CODE((0 == result),
1409 "Cannot find ACPI VDDCI voltage value in Clock Dependency Table",
1410 );
1411 } else {
1412 table->MemoryACPILevel.MclkFrequency =
1413 data->vbios_boot_state.mclk_bootup_value;
1414 table->MemoryACPILevel.MinVoltage =
1415 data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
1416 }
1417
1418 us_mvdd = 0;
1419 if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
1420 (data->mclk_dpm_key_disabled))
1421 us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
1422 else {
1423 if (!fiji_populate_mvdd_value(hwmgr,
1424 data->dpm_table.mclk_table.dpm_levels[0].value,
1425 &vol_level))
1426 us_mvdd = vol_level.Voltage;
1427 }
1428
1429 table->MemoryACPILevel.MinMvdd =
1430 PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE);
1431
1432 table->MemoryACPILevel.EnabledForThrottle = 0;
1433 table->MemoryACPILevel.EnabledForActivity = 0;
1434 table->MemoryACPILevel.UpHyst = 0;
1435 table->MemoryACPILevel.DownHyst = 100;
1436 table->MemoryACPILevel.VoltageDownHyst = 0;
1437 table->MemoryACPILevel.ActivityLevel =
1438 PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
1439
1440 table->MemoryACPILevel.StutterEnable = false;
1441 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
1442 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
1443
1444 return result;
1445}
1446
1447static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1448 SMU73_Discrete_DpmTable *table)
1449{
1450 int result = -EINVAL;
1451 uint8_t count;
1452 struct pp_atomctrl_clock_dividers_vi dividers;
1453 struct phm_ppt_v1_information *table_info =
1454 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1455 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1456 table_info->mm_dep_table;
1457
1458 table->VceLevelCount = (uint8_t)(mm_table->count);
1459 table->VceBootLevel = 0;
1460
1461 for (count = 0; count < table->VceLevelCount; count++) {
1462 table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
1463 table->VceLevel[count].MinVoltage = 0;
1464 table->VceLevel[count].MinVoltage |=
1465 (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
1466 table->VceLevel[count].MinVoltage |=
1467 ((mm_table->entries[count].vddc - VDDC_VDDCI_DELTA) *
1468 VOLTAGE_SCALE) << VDDCI_SHIFT;
1469 table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1470
1471 /*retrieve divider value for VBIOS */
1472 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1473 table->VceLevel[count].Frequency, &dividers);
1474 PP_ASSERT_WITH_CODE((0 == result),
1475 "can not find divide id for VCE engine clock",
1476 return result);
1477
1478 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1479
1480 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1481 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
1482 }
1483 return result;
1484}
1485
1486static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
1487 SMU73_Discrete_DpmTable *table)
1488{
1489 int result = -EINVAL;
1490 uint8_t count;
1491 struct pp_atomctrl_clock_dividers_vi dividers;
1492 struct phm_ppt_v1_information *table_info =
1493 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1494 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1495 table_info->mm_dep_table;
1496
1497 table->AcpLevelCount = (uint8_t)(mm_table->count);
1498 table->AcpBootLevel = 0;
1499
1500 for (count = 0; count < table->AcpLevelCount; count++) {
1501 table->AcpLevel[count].Frequency = mm_table->entries[count].aclk;
1502 table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1503 VOLTAGE_SCALE) << VDDC_SHIFT;
1504 table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
1505 VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
1506 table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1507
1508 /* retrieve divider value for VBIOS */
1509 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1510 table->AcpLevel[count].Frequency, &dividers);
1511 PP_ASSERT_WITH_CODE((0 == result),
1512 "can not find divide id for engine clock", return result);
1513
1514 table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1515
1516 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
1517 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage);
1518 }
1519 return result;
1520}
1521
1522static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1523 SMU73_Discrete_DpmTable *table)
1524{
1525 int result = -EINVAL;
1526 uint8_t count;
1527 struct pp_atomctrl_clock_dividers_vi dividers;
1528 struct phm_ppt_v1_information *table_info =
1529 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1530 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1531 table_info->mm_dep_table;
1532
1533 table->SamuBootLevel = 0;
1534 table->SamuLevelCount = (uint8_t)(mm_table->count);
1535
1536 for (count = 0; count < table->SamuLevelCount; count++) {
1537 /* not sure whether we need evclk or not */
1538 table->SamuLevel[count].MinVoltage = 0;
1539 table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
1540 table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1541 VOLTAGE_SCALE) << VDDC_SHIFT;
1542 table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
1543 VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
1544 table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1545
1546 /* retrieve divider value for VBIOS */
1547 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1548 table->SamuLevel[count].Frequency, &dividers);
1549 PP_ASSERT_WITH_CODE((0 == result),
1550 "can not find divide id for samu clock", return result);
1551
1552 table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1553
1554 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
1555 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
1556 }
1557 return result;
1558}
1559
1560static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
1561 int32_t eng_clock, int32_t mem_clock,
1562 struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
1563{
1564 uint32_t dram_timing;
1565 uint32_t dram_timing2;
1566 uint32_t burstTime;
1567 ULONG state, trrds, trrdl;
1568 int result;
1569
1570 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1571 eng_clock, mem_clock);
1572 PP_ASSERT_WITH_CODE(result == 0,
1573 "Error calling VBIOS to set DRAM_TIMING.", return result);
1574
1575 dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1576 dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1577 burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
1578
1579 state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
1580 trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
1581 trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
1582
1583 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
1584 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
1585 arb_regs->McArbBurstTime = (uint8_t)burstTime;
1586 arb_regs->TRRDS = (uint8_t)trrds;
1587 arb_regs->TRRDL = (uint8_t)trrdl;
1588
1589 return 0;
1590}
1591
1592static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1593{
1594 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1595 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
1596 struct SMU73_Discrete_MCArbDramTimingTable arb_regs;
1597 uint32_t i, j;
1598 int result = 0;
1599
1600 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1601 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1602 result = fiji_populate_memory_timing_parameters(hwmgr,
1603 data->dpm_table.sclk_table.dpm_levels[i].value,
1604 data->dpm_table.mclk_table.dpm_levels[j].value,
1605 &arb_regs.entries[i][j]);
1606 if (result)
1607 break;
1608 }
1609 }
1610
1611 if (!result)
1612 result = smu7_copy_bytes_to_smc(
1613 hwmgr,
1614 smu_data->smu7_data.arb_table_start,
1615 (uint8_t *)&arb_regs,
1616 sizeof(SMU73_Discrete_MCArbDramTimingTable),
1617 SMC_RAM_END);
1618 return result;
1619}
1620
1621static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1622 struct SMU73_Discrete_DpmTable *table)
1623{
1624 int result = -EINVAL;
1625 uint8_t count;
1626 struct pp_atomctrl_clock_dividers_vi dividers;
1627 struct phm_ppt_v1_information *table_info =
1628 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1629 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1630 table_info->mm_dep_table;
1631
1632 table->UvdLevelCount = (uint8_t)(mm_table->count);
1633 table->UvdBootLevel = 0;
1634
1635 for (count = 0; count < table->UvdLevelCount; count++) {
1636 table->UvdLevel[count].MinVoltage = 0;
1637 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
1638 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
1639 table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1640 VOLTAGE_SCALE) << VDDC_SHIFT;
1641 table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
1642 VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
1643 table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1644
1645 /* retrieve divider value for VBIOS */
1646 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1647 table->UvdLevel[count].VclkFrequency, &dividers);
1648 PP_ASSERT_WITH_CODE((0 == result),
1649 "can not find divide id for Vclk clock", return result);
1650
1651 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1652
1653 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1654 table->UvdLevel[count].DclkFrequency, &dividers);
1655 PP_ASSERT_WITH_CODE((0 == result),
1656 "can not find divide id for Dclk clock", return result);
1657
1658 table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
1659
1660 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1661 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1662 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
1663
1664 }
1665 return result;
1666}
1667
1668static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1669 struct SMU73_Discrete_DpmTable *table)
1670{
1671 int result = 0;
1672 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1673
1674 table->GraphicsBootLevel = 0;
1675 table->MemoryBootLevel = 0;
1676
1677 /* find boot level from dpm table */
1678 result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1679 data->vbios_boot_state.sclk_bootup_value,
1680 (uint32_t *)&(table->GraphicsBootLevel));
1681
1682 result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1683 data->vbios_boot_state.mclk_bootup_value,
1684 (uint32_t *)&(table->MemoryBootLevel));
1685
1686 table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
1687 VOLTAGE_SCALE;
1688 table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
1689 VOLTAGE_SCALE;
1690 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
1691 VOLTAGE_SCALE;
1692
1693 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
1694 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
1695 CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
1696
1697 return 0;
1698}
1699
1700static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
1701{
1702 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1703 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
1704 struct phm_ppt_v1_information *table_info =
1705 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1706 uint8_t count, level;
1707
1708 count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
1709 for (level = 0; level < count; level++) {
1710 if (table_info->vdd_dep_on_sclk->entries[level].clk >=
1711 data->vbios_boot_state.sclk_bootup_value) {
1712 smu_data->smc_state_table.GraphicsBootLevel = level;
1713 break;
1714 }
1715 }
1716
1717 count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
1718 for (level = 0; level < count; level++) {
1719 if (table_info->vdd_dep_on_mclk->entries[level].clk >=
1720 data->vbios_boot_state.mclk_bootup_value) {
1721 smu_data->smc_state_table.MemoryBootLevel = level;
1722 break;
1723 }
1724 }
1725
1726 return 0;
1727}
1728
1729static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1730{
1731 uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
1732 volt_with_cks, value;
1733 uint16_t clock_freq_u16;
1734 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
1735 uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
1736 volt_offset = 0;
1737 struct phm_ppt_v1_information *table_info =
1738 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1739 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1740 table_info->vdd_dep_on_sclk;
1741
1742 stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
1743
1744 /* Read SMU_Eefuse to read and calculate RO and determine
1745 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
1746 */
1747 efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1748 ixSMU_EFUSE_0 + (146 * 4));
1749 efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1750 ixSMU_EFUSE_0 + (148 * 4));
1751 efuse &= 0xFF000000;
1752 efuse = efuse >> 24;
1753 efuse2 &= 0xF;
1754
1755 if (efuse2 == 1)
1756 ro = (2300 - 1350) * efuse / 255 + 1350;
1757 else
1758 ro = (2500 - 1000) * efuse / 255 + 1000;
1759
1760 if (ro >= 1660)
1761 type = 0;
1762 else
1763 type = 1;
1764
1765 /* Populate Stretch amount */
1766 smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
1767
1768 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
1769 for (i = 0; i < sclk_table->count; i++) {
1770 smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
1771 sclk_table->entries[i].cks_enable << i;
1772 volt_without_cks = (uint32_t)((14041 *
1773 (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
1774 (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
1775 volt_with_cks = (uint32_t)((13946 *
1776 (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
1777 (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
1778 if (volt_without_cks >= volt_with_cks)
1779 volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
1780 sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
1781 smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
1782 }
1783
1784 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1785 STRETCH_ENABLE, 0x0);
1786 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1787 masterReset, 0x1);
1788 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1789 staticEnable, 0x1);
1790 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1791 masterReset, 0x0);
1792
1793 /* Populate CKS Lookup Table */
1794 if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
1795 stretch_amount2 = 0;
1796 else if (stretch_amount == 3 || stretch_amount == 4)
1797 stretch_amount2 = 1;
1798 else {
1799 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1800 PHM_PlatformCaps_ClockStretcher);
1801 PP_ASSERT_WITH_CODE(false,
1802 "Stretch Amount in PPTable not supported\n",
1803 return -EINVAL);
1804 }
1805
1806 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1807 ixPWR_CKS_CNTL);
1808 value &= 0xFFC2FF87;
1809 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
1810 fiji_clock_stretcher_lookup_table[stretch_amount2][0];
1811 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
1812 fiji_clock_stretcher_lookup_table[stretch_amount2][1];
1813 clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
1814 GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
1815 SclkFrequency) / 100);
1816 if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] <
1817 clock_freq_u16 &&
1818 fiji_clock_stretcher_lookup_table[stretch_amount2][1] >
1819 clock_freq_u16) {
1820 /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
1821 value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
1822 /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
1823 value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
1824 /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
1825 value |= (fiji_clock_stretch_amount_conversion
1826 [fiji_clock_stretcher_lookup_table[stretch_amount2][3]]
1827 [stretch_amount]) << 3;
1828 }
1829 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
1830 CKS_LOOKUPTableEntry[0].minFreq);
1831 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
1832 CKS_LOOKUPTableEntry[0].maxFreq);
1833 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
1834 fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
1835 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
1836 (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
1837
1838 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1839 ixPWR_CKS_CNTL, value);
1840
1841 /* Populate DDT Lookup Table */
1842 for (i = 0; i < 4; i++) {
1843 /* Assign the minimum and maximum VID stored
1844 * in the last row of Clock Stretcher Voltage Table.
1845 */
1846 smu_data->smc_state_table.ClockStretcherDataTable.
1847 ClockStretcherDataTableEntry[i].minVID =
1848 (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2];
1849 smu_data->smc_state_table.ClockStretcherDataTable.
1850 ClockStretcherDataTableEntry[i].maxVID =
1851 (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3];
1852 /* Loop through each SCLK and check the frequency
1853 * to see if it lies within the frequency for clock stretcher.
1854 */
1855 for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
1856 cks_setting = 0;
1857 clock_freq = PP_SMC_TO_HOST_UL(
1858 smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
1859 /* Check the allowed frequency against the sclk level[j].
1860 * Sclk's endianness has already been converted,
1861 * and it's in 10Khz unit,
1862 * as opposed to Data table, which is in Mhz unit.
1863 */
1864 if (clock_freq >=
1865 (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) {
1866 cks_setting |= 0x2;
1867 if (clock_freq <
1868 (fiji_clock_stretcher_ddt_table[type][i][1]) * 100)
1869 cks_setting |= 0x1;
1870 }
1871 smu_data->smc_state_table.ClockStretcherDataTable.
1872 ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
1873 }
1874 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
1875 ClockStretcherDataTable.
1876 ClockStretcherDataTableEntry[i].setting);
1877 }
1878
1879 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
1880 value &= 0xFFFFFFFE;
1881 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
1882
1883 return 0;
1884}
1885
1886static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr,
1887 struct SMU73_Discrete_DpmTable *table)
1888{
1889 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1890 uint16_t config;
1891
1892 config = VR_MERGED_WITH_VDDC;
1893 table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
1894
1895 /* Set Vddc Voltage Controller */
1896 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1897 config = VR_SVI2_PLANE_1;
1898 table->VRConfig |= config;
1899 } else {
1900 PP_ASSERT_WITH_CODE(false,
1901 "VDDC should be on SVI2 control in merged mode!",
1902 );
1903 }
1904 /* Set Vddci Voltage Controller */
1905 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
1906 config = VR_SVI2_PLANE_2; /* only in merged mode */
1907 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1908 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
1909 config = VR_SMIO_PATTERN_1;
1910 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1911 } else {
1912 config = VR_STATIC_VOLTAGE;
1913 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1914 }
1915 /* Set Mvdd Voltage Controller */
1916 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
1917 config = VR_SVI2_PLANE_2;
1918 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1919 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1920 config = VR_SMIO_PATTERN_2;
1921 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1922 } else {
1923 config = VR_STATIC_VOLTAGE;
1924 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1925 }
1926
1927 return 0;
1928}
1929
1930static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr)
1931{
1932 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
1933 uint32_t tmp;
1934 int result;
1935
1936 /* This is a read-modify-write on the first byte of the ARB table.
1937 * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
1938 * is the field 'current'.
1939 * This solution is ugly, but we never write the whole table only
1940 * individual fields in it.
1941 * In reality this field should not be in that structure
1942 * but in a soft register.
1943 */
1944 result = smu7_read_smc_sram_dword(hwmgr,
1945 smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
1946
1947 if (result)
1948 return result;
1949
1950 tmp &= 0x00FFFFFF;
1951 tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
1952
1953 return smu7_write_smc_sram_dword(hwmgr,
1954 smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
1955}
1956
1957static int fiji_save_default_power_profile(struct pp_hwmgr *hwmgr)
1958{
1959 struct fiji_smumgr *data = (struct fiji_smumgr *)(hwmgr->smu_backend);
1960 struct SMU73_Discrete_GraphicsLevel *levels =
1961 data->smc_state_table.GraphicsLevel;
1962 unsigned min_level = 1;
1963
1964 hwmgr->default_gfx_power_profile.activity_threshold =
1965 be16_to_cpu(levels[0].ActivityLevel);
1966 hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
1967 hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
1968 hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
1969
1970 hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
1971 hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
1972
1973 /* Workaround compute SDMA instability: disable lowest SCLK
1974 * DPM level. Optimize compute power profile: Use only highest
1975 * 2 power levels (if more than 2 are available), Hysteresis:
1976 * 0ms up, 5ms down
1977 */
1978 if (data->smc_state_table.GraphicsDpmLevelCount > 2)
1979 min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
1980 else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
1981 min_level = 1;
1982 else
1983 min_level = 0;
1984 hwmgr->default_compute_power_profile.min_sclk =
1985 be32_to_cpu(levels[min_level].SclkFrequency);
1986 hwmgr->default_compute_power_profile.up_hyst = 0;
1987 hwmgr->default_compute_power_profile.down_hyst = 5;
1988
1989 hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
1990 hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
1991
1992 return 0;
1993}
1994
1995static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
1996{
1997 pp_atomctrl_voltage_table param_led_dpm;
1998 int result = 0;
1999 u32 mask = 0;
2000
2001 result = atomctrl_get_voltage_table_v3(hwmgr,
2002 VOLTAGE_TYPE_LEDDPM, VOLTAGE_OBJ_GPIO_LUT,
2003 &param_led_dpm);
2004 if (result == 0) {
2005 int i, j;
2006 u32 tmp = param_led_dpm.mask_low;
2007
2008 for (i = 0, j = 0; i < 32; i++) {
2009 if (tmp & 1) {
2010 mask |= (i << (8 * j));
2011 if (++j >= 3)
2012 break;
2013 }
2014 tmp >>= 1;
2015 }
2016 }
2017 if (mask)
2018 smum_send_msg_to_smc_with_parameter(hwmgr,
2019 PPSMC_MSG_LedConfig,
2020 mask);
2021 return 0;
2022}
2023
2024static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
2025{
2026 int result;
2027 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2028 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
2029 struct phm_ppt_v1_information *table_info =
2030 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2031 struct SMU73_Discrete_DpmTable *table = &(smu_data->smc_state_table);
2032 uint8_t i;
2033 struct pp_atomctrl_gpio_pin_assignment gpio_pin;
2034
2035 fiji_initialize_power_tune_defaults(hwmgr);
2036
2037 if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
2038 fiji_populate_smc_voltage_tables(hwmgr, table);
2039
2040 table->SystemFlags = 0;
2041
2042 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2043 PHM_PlatformCaps_AutomaticDCTransition))
2044 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
2045
2046 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2047 PHM_PlatformCaps_StepVddc))
2048 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
2049
2050 if (data->is_memory_gddr5)
2051 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
2052
2053 if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
2054 result = fiji_populate_ulv_state(hwmgr, table);
2055 PP_ASSERT_WITH_CODE(0 == result,
2056 "Failed to initialize ULV state!", return result);
2057 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2058 ixCG_ULV_PARAMETER, 0x40035);
2059 }
2060
2061 result = fiji_populate_smc_link_level(hwmgr, table);
2062 PP_ASSERT_WITH_CODE(0 == result,
2063 "Failed to initialize Link Level!", return result);
2064
2065 result = fiji_populate_all_graphic_levels(hwmgr);
2066 PP_ASSERT_WITH_CODE(0 == result,
2067 "Failed to initialize Graphics Level!", return result);
2068
2069 result = fiji_populate_all_memory_levels(hwmgr);
2070 PP_ASSERT_WITH_CODE(0 == result,
2071 "Failed to initialize Memory Level!", return result);
2072
2073 result = fiji_populate_smc_acpi_level(hwmgr, table);
2074 PP_ASSERT_WITH_CODE(0 == result,
2075 "Failed to initialize ACPI Level!", return result);
2076
2077 result = fiji_populate_smc_vce_level(hwmgr, table);
2078 PP_ASSERT_WITH_CODE(0 == result,
2079 "Failed to initialize VCE Level!", return result);
2080
2081 result = fiji_populate_smc_acp_level(hwmgr, table);
2082 PP_ASSERT_WITH_CODE(0 == result,
2083 "Failed to initialize ACP Level!", return result);
2084
2085 result = fiji_populate_smc_samu_level(hwmgr, table);
2086 PP_ASSERT_WITH_CODE(0 == result,
2087 "Failed to initialize SAMU Level!", return result);
2088
2089 /* Since only the initial state is completely set up at this point
2090 * (the other states are just copies of the boot state) we only
2091 * need to populate the ARB settings for the initial state.
2092 */
2093 result = fiji_program_memory_timing_parameters(hwmgr);
2094 PP_ASSERT_WITH_CODE(0 == result,
2095 "Failed to Write ARB settings for the initial state.", return result);
2096
2097 result = fiji_populate_smc_uvd_level(hwmgr, table);
2098 PP_ASSERT_WITH_CODE(0 == result,
2099 "Failed to initialize UVD Level!", return result);
2100
2101 result = fiji_populate_smc_boot_level(hwmgr, table);
2102 PP_ASSERT_WITH_CODE(0 == result,
2103 "Failed to initialize Boot Level!", return result);
2104
2105 result = fiji_populate_smc_initailial_state(hwmgr);
2106 PP_ASSERT_WITH_CODE(0 == result,
2107 "Failed to initialize Boot State!", return result);
2108
2109 result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr);
2110 PP_ASSERT_WITH_CODE(0 == result,
2111 "Failed to populate BAPM Parameters!", return result);
2112
2113 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2114 PHM_PlatformCaps_ClockStretcher)) {
2115 result = fiji_populate_clock_stretcher_data_table(hwmgr);
2116 PP_ASSERT_WITH_CODE(0 == result,
2117 "Failed to populate Clock Stretcher Data Table!",
2118 return result);
2119 }
2120
2121 table->GraphicsVoltageChangeEnable = 1;
2122 table->GraphicsThermThrottleEnable = 1;
2123 table->GraphicsInterval = 1;
2124 table->VoltageInterval = 1;
2125 table->ThermalInterval = 1;
2126 table->TemperatureLimitHigh =
2127 table_info->cac_dtp_table->usTargetOperatingTemp *
2128 SMU7_Q88_FORMAT_CONVERSION_UNIT;
2129 table->TemperatureLimitLow =
2130 (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
2131 SMU7_Q88_FORMAT_CONVERSION_UNIT;
2132 table->MemoryVoltageChangeEnable = 1;
2133 table->MemoryInterval = 1;
2134 table->VoltageResponseTime = 0;
2135 table->PhaseResponseTime = 0;
2136 table->MemoryThermThrottleEnable = 1;
2137 table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
2138 table->PCIeGenInterval = 1;
2139 table->VRConfig = 0;
2140
2141 result = fiji_populate_vr_config(hwmgr, table);
2142 PP_ASSERT_WITH_CODE(0 == result,
2143 "Failed to populate VRConfig setting!", return result);
2144
2145 table->ThermGpio = 17;
2146 table->SclkStepSize = 0x4000;
2147
2148 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
2149 table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
2150 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2151 PHM_PlatformCaps_RegulatorHot);
2152 } else {
2153 table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
2154 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2155 PHM_PlatformCaps_RegulatorHot);
2156 }
2157
2158 if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
2159 &gpio_pin)) {
2160 table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
2161 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2162 PHM_PlatformCaps_AutomaticDCTransition);
2163 } else {
2164 table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
2165 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2166 PHM_PlatformCaps_AutomaticDCTransition);
2167 }
2168
2169 /* Thermal Output GPIO */
2170 if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
2171 &gpio_pin)) {
2172 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2173 PHM_PlatformCaps_ThermalOutGPIO);
2174
2175 table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
2176
2177 /* For porlarity read GPIOPAD_A with assigned Gpio pin
2178 * since VBIOS will program this register to set 'inactive state',
2179 * driver can then determine 'active state' from this and
2180 * program SMU with correct polarity
2181 */
2182 table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
2183 (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
2184 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
2185
2186 /* if required, combine VRHot/PCC with thermal out GPIO */
2187 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2188 PHM_PlatformCaps_RegulatorHot) &&
2189 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2190 PHM_PlatformCaps_CombinePCCWithThermalSignal))
2191 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
2192 } else {
2193 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2194 PHM_PlatformCaps_ThermalOutGPIO);
2195 table->ThermOutGpio = 17;
2196 table->ThermOutPolarity = 1;
2197 table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
2198 }
2199
2200 for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++)
2201 table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
2202
2203 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
2204 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
2205 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
2206 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
2207 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
2208 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
2209 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
2210 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
2211 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
2212
2213 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
2214 result = smu7_copy_bytes_to_smc(hwmgr,
2215 smu_data->smu7_data.dpm_table_start +
2216 offsetof(SMU73_Discrete_DpmTable, SystemFlags),
2217 (uint8_t *)&(table->SystemFlags),
2218 sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController),
2219 SMC_RAM_END);
2220 PP_ASSERT_WITH_CODE(0 == result,
2221 "Failed to upload dpm data to SMC memory!", return result);
2222
2223 result = fiji_init_arb_table_index(hwmgr);
2224 PP_ASSERT_WITH_CODE(0 == result,
2225 "Failed to upload arb data to SMC memory!", return result);
2226
2227 result = fiji_populate_pm_fuses(hwmgr);
2228 PP_ASSERT_WITH_CODE(0 == result,
2229 "Failed to populate PM fuses to SMC memory!", return result);
2230
2231 result = fiji_setup_dpm_led_config(hwmgr);
2232 PP_ASSERT_WITH_CODE(0 == result,
2233 "Failed to setup dpm led config", return result);
2234
2235 fiji_save_default_power_profile(hwmgr);
2236
2237 return 0;
2238}
2239
2240static int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
2241{
2242 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
2243
2244 SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
2245 uint32_t duty100;
2246 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
2247 uint16_t fdo_min, slope1, slope2;
2248 uint32_t reference_clock;
2249 int res;
2250 uint64_t tmp64;
2251
2252 if (hwmgr->thermal_controller.fanInfo.bNoFan) {
2253 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2254 PHM_PlatformCaps_MicrocodeFanControl);
2255 return 0;
2256 }
2257
2258 if (smu_data->smu7_data.fan_table_start == 0) {
2259 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2260 PHM_PlatformCaps_MicrocodeFanControl);
2261 return 0;
2262 }
2263
2264 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
2265 CG_FDO_CTRL1, FMAX_DUTY100);
2266
2267 if (duty100 == 0) {
2268 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2269 PHM_PlatformCaps_MicrocodeFanControl);
2270 return 0;
2271 }
2272
2273 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
2274 usPWMMin * duty100;
2275 do_div(tmp64, 10000);
2276 fdo_min = (uint16_t)tmp64;
2277
2278 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
2279 hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
2280 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
2281 hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
2282
2283 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
2284 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
2285 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
2286 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
2287
2288 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
2289 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
2290
2291 fan_table.TempMin = cpu_to_be16((50 + hwmgr->
2292 thermal_controller.advanceFanControlParameters.usTMin) / 100);
2293 fan_table.TempMed = cpu_to_be16((50 + hwmgr->
2294 thermal_controller.advanceFanControlParameters.usTMed) / 100);
2295 fan_table.TempMax = cpu_to_be16((50 + hwmgr->
2296 thermal_controller.advanceFanControlParameters.usTMax) / 100);
2297
2298 fan_table.Slope1 = cpu_to_be16(slope1);
2299 fan_table.Slope2 = cpu_to_be16(slope2);
2300
2301 fan_table.FdoMin = cpu_to_be16(fdo_min);
2302
2303 fan_table.HystDown = cpu_to_be16(hwmgr->
2304 thermal_controller.advanceFanControlParameters.ucTHyst);
2305
2306 fan_table.HystUp = cpu_to_be16(1);
2307
2308 fan_table.HystSlope = cpu_to_be16(1);
2309
2310 fan_table.TempRespLim = cpu_to_be16(5);
2311
2312 reference_clock = smu7_get_xclk(hwmgr);
2313
2314 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
2315 thermal_controller.advanceFanControlParameters.ulCycleDelay *
2316 reference_clock) / 1600);
2317
2318 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
2319
2320 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
2321 hwmgr->device, CGS_IND_REG__SMC,
2322 CG_MULT_THERMAL_CTRL, TEMP_SEL);
2323
2324 res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start,
2325 (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
2326 SMC_RAM_END);
2327
2328 if (!res && hwmgr->thermal_controller.
2329 advanceFanControlParameters.ucMinimumPWMLimit)
2330 res = smum_send_msg_to_smc_with_parameter(hwmgr,
2331 PPSMC_MSG_SetFanMinPwm,
2332 hwmgr->thermal_controller.
2333 advanceFanControlParameters.ucMinimumPWMLimit);
2334
2335 if (!res && hwmgr->thermal_controller.
2336 advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
2337 res = smum_send_msg_to_smc_with_parameter(hwmgr,
2338 PPSMC_MSG_SetFanSclkTarget,
2339 hwmgr->thermal_controller.
2340 advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
2341
2342 if (res)
2343 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2344 PHM_PlatformCaps_MicrocodeFanControl);
2345
2346 return 0;
2347}
2348
2349
2350static int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
2351{
2352 int ret;
2353 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
2354
2355 if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS)
2356 return 0;
2357
2358 ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
2359
2360 if (!ret)
2361 /* If this param is not changed, this function could fire unnecessarily */
2362 smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
2363
2364 return ret;
2365}
2366
2367static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
2368{
2369 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2370
2371 if (data->need_update_smu7_dpm_table &
2372 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
2373 return fiji_program_memory_timing_parameters(hwmgr);
2374
2375 return 0;
2376}
2377
2378static int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2379{
2380 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2381 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
2382
2383 int result = 0;
2384 uint32_t low_sclk_interrupt_threshold = 0;
2385
2386 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2387 PHM_PlatformCaps_SclkThrottleLowNotification)
2388 && (hwmgr->gfx_arbiter.sclk_threshold !=
2389 data->low_sclk_interrupt_threshold)) {
2390 data->low_sclk_interrupt_threshold =
2391 hwmgr->gfx_arbiter.sclk_threshold;
2392 low_sclk_interrupt_threshold =
2393 data->low_sclk_interrupt_threshold;
2394
2395 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
2396
2397 result = smu7_copy_bytes_to_smc(
2398 hwmgr,
2399 smu_data->smu7_data.dpm_table_start +
2400 offsetof(SMU73_Discrete_DpmTable,
2401 LowSclkInterruptThreshold),
2402 (uint8_t *)&low_sclk_interrupt_threshold,
2403 sizeof(uint32_t),
2404 SMC_RAM_END);
2405 }
2406 result = fiji_program_mem_timing_parameters(hwmgr);
2407 PP_ASSERT_WITH_CODE((result == 0),
2408 "Failed to program memory timing parameters!",
2409 );
2410 return result;
2411}
2412
2413static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
2414{
2415 switch (type) {
2416 case SMU_SoftRegisters:
2417 switch (member) {
2418 case HandshakeDisables:
2419 return offsetof(SMU73_SoftRegisters, HandshakeDisables);
2420 case VoltageChangeTimeout:
2421 return offsetof(SMU73_SoftRegisters, VoltageChangeTimeout);
2422 case AverageGraphicsActivity:
2423 return offsetof(SMU73_SoftRegisters, AverageGraphicsActivity);
2424 case PreVBlankGap:
2425 return offsetof(SMU73_SoftRegisters, PreVBlankGap);
2426 case VBlankTimeout:
2427 return offsetof(SMU73_SoftRegisters, VBlankTimeout);
2428 case UcodeLoadStatus:
2429 return offsetof(SMU73_SoftRegisters, UcodeLoadStatus);
2430 case DRAM_LOG_ADDR_H:
2431 return offsetof(SMU73_SoftRegisters, DRAM_LOG_ADDR_H);
2432 case DRAM_LOG_ADDR_L:
2433 return offsetof(SMU73_SoftRegisters, DRAM_LOG_ADDR_L);
2434 case DRAM_LOG_PHY_ADDR_H:
2435 return offsetof(SMU73_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
2436 case DRAM_LOG_PHY_ADDR_L:
2437 return offsetof(SMU73_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
2438 case DRAM_LOG_BUFF_SIZE:
2439 return offsetof(SMU73_SoftRegisters, DRAM_LOG_BUFF_SIZE);
2440 }
2441 case SMU_Discrete_DpmTable:
2442 switch (member) {
2443 case UvdBootLevel:
2444 return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
2445 case VceBootLevel:
2446 return offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
2447 case SamuBootLevel:
2448 return offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
2449 case LowSclkInterruptThreshold:
2450 return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
2451 }
2452 }
2453 pr_warn("can't get the offset of type %x member %x\n", type, member);
2454 return 0;
2455}
2456
2457static uint32_t fiji_get_mac_definition(uint32_t value)
2458{
2459 switch (value) {
2460 case SMU_MAX_LEVELS_GRAPHICS:
2461 return SMU73_MAX_LEVELS_GRAPHICS;
2462 case SMU_MAX_LEVELS_MEMORY:
2463 return SMU73_MAX_LEVELS_MEMORY;
2464 case SMU_MAX_LEVELS_LINK:
2465 return SMU73_MAX_LEVELS_LINK;
2466 case SMU_MAX_ENTRIES_SMIO:
2467 return SMU73_MAX_ENTRIES_SMIO;
2468 case SMU_MAX_LEVELS_VDDC:
2469 return SMU73_MAX_LEVELS_VDDC;
2470 case SMU_MAX_LEVELS_VDDGFX:
2471 return SMU73_MAX_LEVELS_VDDGFX;
2472 case SMU_MAX_LEVELS_VDDCI:
2473 return SMU73_MAX_LEVELS_VDDCI;
2474 case SMU_MAX_LEVELS_MVDD:
2475 return SMU73_MAX_LEVELS_MVDD;
2476 }
2477
2478 pr_warn("can't get the mac of %x\n", value);
2479 return 0;
2480}
2481
2482
2483static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
2484{
2485 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
2486 uint32_t mm_boot_level_offset, mm_boot_level_value;
2487 struct phm_ppt_v1_information *table_info =
2488 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2489
2490 smu_data->smc_state_table.UvdBootLevel = 0;
2491 if (table_info->mm_dep_table->count > 0)
2492 smu_data->smc_state_table.UvdBootLevel =
2493 (uint8_t) (table_info->mm_dep_table->count - 1);
2494 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable,
2495 UvdBootLevel);
2496 mm_boot_level_offset /= 4;
2497 mm_boot_level_offset *= 4;
2498 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2499 CGS_IND_REG__SMC, mm_boot_level_offset);
2500 mm_boot_level_value &= 0x00FFFFFF;
2501 mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
2502 cgs_write_ind_register(hwmgr->device,
2503 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2504
2505 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2506 PHM_PlatformCaps_UVDDPM) ||
2507 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2508 PHM_PlatformCaps_StablePState))
2509 smum_send_msg_to_smc_with_parameter(hwmgr,
2510 PPSMC_MSG_UVDDPM_SetEnabledMask,
2511 (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
2512 return 0;
2513}
2514
2515static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
2516{
2517 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
2518 uint32_t mm_boot_level_offset, mm_boot_level_value;
2519 struct phm_ppt_v1_information *table_info =
2520 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2521
2522 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2523 PHM_PlatformCaps_StablePState))
2524 smu_data->smc_state_table.VceBootLevel =
2525 (uint8_t) (table_info->mm_dep_table->count - 1);
2526 else
2527 smu_data->smc_state_table.VceBootLevel = 0;
2528
2529 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2530 offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
2531 mm_boot_level_offset /= 4;
2532 mm_boot_level_offset *= 4;
2533 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2534 CGS_IND_REG__SMC, mm_boot_level_offset);
2535 mm_boot_level_value &= 0xFF00FFFF;
2536 mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
2537 cgs_write_ind_register(hwmgr->device,
2538 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2539
2540 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
2541 smum_send_msg_to_smc_with_parameter(hwmgr,
2542 PPSMC_MSG_VCEDPM_SetEnabledMask,
2543 (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
2544 return 0;
2545}
2546
2547static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr)
2548{
2549 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
2550 uint32_t mm_boot_level_offset, mm_boot_level_value;
2551
2552
2553 smu_data->smc_state_table.SamuBootLevel = 0;
2554 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2555 offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
2556
2557 mm_boot_level_offset /= 4;
2558 mm_boot_level_offset *= 4;
2559 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2560 CGS_IND_REG__SMC, mm_boot_level_offset);
2561 mm_boot_level_value &= 0xFFFFFF00;
2562 mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
2563 cgs_write_ind_register(hwmgr->device,
2564 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2565
2566 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2567 PHM_PlatformCaps_StablePState))
2568 smum_send_msg_to_smc_with_parameter(hwmgr,
2569 PPSMC_MSG_SAMUDPM_SetEnabledMask,
2570 (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
2571 return 0;
2572}
2573
2574static int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
2575{
2576 switch (type) {
2577 case SMU_UVD_TABLE:
2578 fiji_update_uvd_smc_table(hwmgr);
2579 break;
2580 case SMU_VCE_TABLE:
2581 fiji_update_vce_smc_table(hwmgr);
2582 break;
2583 case SMU_SAMU_TABLE:
2584 fiji_update_samu_smc_table(hwmgr);
2585 break;
2586 default:
2587 break;
2588 }
2589 return 0;
2590}
2591
2592static int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
2593{
2594 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2595 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
2596 uint32_t tmp;
2597 int result;
2598 bool error = false;
2599
2600 result = smu7_read_smc_sram_dword(hwmgr,
2601 SMU7_FIRMWARE_HEADER_LOCATION +
2602 offsetof(SMU73_Firmware_Header, DpmTable),
2603 &tmp, SMC_RAM_END);
2604
2605 if (0 == result)
2606 smu_data->smu7_data.dpm_table_start = tmp;
2607
2608 error |= (0 != result);
2609
2610 result = smu7_read_smc_sram_dword(hwmgr,
2611 SMU7_FIRMWARE_HEADER_LOCATION +
2612 offsetof(SMU73_Firmware_Header, SoftRegisters),
2613 &tmp, SMC_RAM_END);
2614
2615 if (!result) {
2616 data->soft_regs_start = tmp;
2617 smu_data->smu7_data.soft_regs_start = tmp;
2618 }
2619
2620 error |= (0 != result);
2621
2622 result = smu7_read_smc_sram_dword(hwmgr,
2623 SMU7_FIRMWARE_HEADER_LOCATION +
2624 offsetof(SMU73_Firmware_Header, mcRegisterTable),
2625 &tmp, SMC_RAM_END);
2626
2627 if (!result)
2628 smu_data->smu7_data.mc_reg_table_start = tmp;
2629
2630 result = smu7_read_smc_sram_dword(hwmgr,
2631 SMU7_FIRMWARE_HEADER_LOCATION +
2632 offsetof(SMU73_Firmware_Header, FanTable),
2633 &tmp, SMC_RAM_END);
2634
2635 if (!result)
2636 smu_data->smu7_data.fan_table_start = tmp;
2637
2638 error |= (0 != result);
2639
2640 result = smu7_read_smc_sram_dword(hwmgr,
2641 SMU7_FIRMWARE_HEADER_LOCATION +
2642 offsetof(SMU73_Firmware_Header, mcArbDramTimingTable),
2643 &tmp, SMC_RAM_END);
2644
2645 if (!result)
2646 smu_data->smu7_data.arb_table_start = tmp;
2647
2648 error |= (0 != result);
2649
2650 result = smu7_read_smc_sram_dword(hwmgr,
2651 SMU7_FIRMWARE_HEADER_LOCATION +
2652 offsetof(SMU73_Firmware_Header, Version),
2653 &tmp, SMC_RAM_END);
2654
2655 if (!result)
2656 hwmgr->microcode_version_info.SMC = tmp;
2657
2658 error |= (0 != result);
2659
2660 return error ? -1 : 0;
2661}
2662
2663static int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
2664{
2665
2666 /* Program additional LP registers
2667 * that are no longer programmed by VBIOS
2668 */
2669 cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
2670 cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
2671 cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
2672 cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
2673 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
2674 cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
2675 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
2676 cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
2677 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
2678 cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
2679 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
2680 cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
2681 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
2682 cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
2683
2684 return 0;
2685}
2686
2687static bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
2688{
2689 return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
2690 CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
2691 ? true : false;
2692}
2693
2694static int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
2695 struct amd_pp_profile *request)
2696{
2697 struct fiji_smumgr *smu_data = (struct fiji_smumgr *)
2698 (hwmgr->smu_backend);
2699 struct SMU73_Discrete_GraphicsLevel *levels =
2700 smu_data->smc_state_table.GraphicsLevel;
2701 uint32_t array = smu_data->smu7_data.dpm_table_start +
2702 offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
2703 uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
2704 SMU73_MAX_LEVELS_GRAPHICS;
2705 uint32_t i;
2706
2707 for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
2708 levels[i].ActivityLevel =
2709 cpu_to_be16(request->activity_threshold);
2710 levels[i].EnabledForActivity = 1;
2711 levels[i].UpHyst = request->up_hyst;
2712 levels[i].DownHyst = request->down_hyst;
2713 }
2714
2715 return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
2716 array_size, SMC_RAM_END);
2717}
396 2718
397const struct pp_smumgr_func fiji_smu_funcs = { 2719const struct pp_smumgr_func fiji_smu_funcs = {
398 .smu_init = &fiji_smu_init, 2720 .smu_init = &fiji_smu_init,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
index 175bf9f8ef9c..279647772578 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
@@ -28,6 +28,15 @@
28#include "smu7_smumgr.h" 28#include "smu7_smumgr.h"
29 29
30 30
31struct fiji_pt_defaults {
32 uint8_t SviLoadLineEn;
33 uint8_t SviLoadLineVddC;
34 uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
35 uint8_t TDC_MAWt;
36 uint8_t TdcWaterfallCtl;
37 uint8_t DTEAmbientTempBase;
38};
39
31struct fiji_smumgr { 40struct fiji_smumgr {
32 struct smu7_smumgr smu7_data; 41 struct smu7_smumgr smu7_data;
33 struct SMU73_Discrete_DpmTable smc_state_table; 42 struct SMU73_Discrete_DpmTable smc_state_table;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
deleted file mode 100644
index efb0fc033274..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
+++ /dev/null
@@ -1,2568 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
9 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
10 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
11 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
12 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
13 * OTHER DEALINGS IN THE SOFTWARE.
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 *
22 */
23
24#include "pp_debug.h"
25#include "iceland_smc.h"
26#include "smu7_dyn_defaults.h"
27
28#include "smu7_hwmgr.h"
29#include "hardwaremanager.h"
30#include "ppatomctrl.h"
31#include "cgs_common.h"
32#include "atombios.h"
33#include "pppcielanes.h"
34#include "pp_endian.h"
35#include "smu7_ppsmc.h"
36
37#include "smu71_discrete.h"
38
39#include "smu/smu_7_1_1_d.h"
40#include "smu/smu_7_1_1_sh_mask.h"
41
42#include "gmc/gmc_8_1_d.h"
43#include "gmc/gmc_8_1_sh_mask.h"
44
45#include "bif/bif_5_0_d.h"
46#include "bif/bif_5_0_sh_mask.h"
47
48#include "dce/dce_10_0_d.h"
49#include "dce/dce_10_0_sh_mask.h"
50#include "processpptables.h"
51
52#include "iceland_smumgr.h"
53
54#define VOLTAGE_SCALE 4
55#define POWERTUNE_DEFAULT_SET_MAX 1
56#define VOLTAGE_VID_OFFSET_SCALE1 625
57#define VOLTAGE_VID_OFFSET_SCALE2 100
58#define MC_CG_ARB_FREQ_F1 0x0b
59#define VDDC_VDDCI_DELTA 200
60
61#define DEVICE_ID_VI_ICELAND_M_6900 0x6900
62#define DEVICE_ID_VI_ICELAND_M_6901 0x6901
63#define DEVICE_ID_VI_ICELAND_M_6902 0x6902
64#define DEVICE_ID_VI_ICELAND_M_6903 0x6903
65
66static const struct iceland_pt_defaults defaults_iceland = {
67 /*
68 * sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,
69 * TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
70 */
71 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
72 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
73 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
74};
75
76/* 35W - XT, XTL */
77static const struct iceland_pt_defaults defaults_icelandxt = {
78 /*
79 * sviLoadLIneEn, SviLoadLineVddC,
80 * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
81 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
82 * BAPM_TEMP_GRADIENT
83 */
84 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
85 { 0xA7, 0x0, 0x0, 0xB5, 0x0, 0x0, 0x9F, 0x0, 0x0, 0xD6, 0x0, 0x0, 0xD7, 0x0, 0x0},
86 { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
87};
88
89/* 25W - PRO, LE */
90static const struct iceland_pt_defaults defaults_icelandpro = {
91 /*
92 * sviLoadLIneEn, SviLoadLineVddC,
93 * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
94 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
95 * BAPM_TEMP_GRADIENT
96 */
97 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
98 { 0xB7, 0x0, 0x0, 0xC3, 0x0, 0x0, 0xB5, 0x0, 0x0, 0xEA, 0x0, 0x0, 0xE6, 0x0, 0x0},
99 { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
100};
101
102static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
103{
104 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
105 struct cgs_system_info sys_info = {0};
106 uint32_t dev_id;
107
108 sys_info.size = sizeof(struct cgs_system_info);
109 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
110 cgs_query_system_info(hwmgr->device, &sys_info);
111 dev_id = (uint32_t)sys_info.value;
112
113 switch (dev_id) {
114 case DEVICE_ID_VI_ICELAND_M_6900:
115 case DEVICE_ID_VI_ICELAND_M_6903:
116 smu_data->power_tune_defaults = &defaults_icelandxt;
117 break;
118
119 case DEVICE_ID_VI_ICELAND_M_6901:
120 case DEVICE_ID_VI_ICELAND_M_6902:
121 smu_data->power_tune_defaults = &defaults_icelandpro;
122 break;
123 default:
124 smu_data->power_tune_defaults = &defaults_iceland;
125 pr_warn("Unknown V.I. Device ID.\n");
126 break;
127 }
128 return;
129}
130
131static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr)
132{
133 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
134 const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
135
136 smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
137 smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
138 smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
139 smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
140
141 return 0;
142}
143
144static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr)
145{
146 uint16_t tdc_limit;
147 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
148 const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
149
150 tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
151 smu_data->power_tune_table.TDC_VDDC_PkgLimit =
152 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
153 smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
154 defaults->tdc_vddc_throttle_release_limit_perc;
155 smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
156
157 return 0;
158}
159
160static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
161{
162 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
163 const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
164 uint32_t temp;
165
166 if (smu7_read_smc_sram_dword(hwmgr,
167 fuse_table_offset +
168 offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl),
169 (uint32_t *)&temp, SMC_RAM_END))
170 PP_ASSERT_WITH_CODE(false,
171 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
172 return -EINVAL);
173 else
174 smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
175
176 return 0;
177}
178
179static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
180{
181 return 0;
182}
183
184static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
185{
186 int i;
187 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
188
189 /* Currently not used. Set all to zero. */
190 for (i = 0; i < 8; i++)
191 smu_data->power_tune_table.GnbLPML[i] = 0;
192
193 return 0;
194}
195
196static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
197{
198 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
199 uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
200 uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
201 struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
202
203 HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
204 LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
205
206 smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
207 CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
208 smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
209 CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
210
211 return 0;
212}
213
214static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
215{
216 int i;
217 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
218 uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
219 uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
220
221 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
222 "The CAC Leakage table does not exist!", return -EINVAL);
223 PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
224 "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
225 PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
226 "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
227
228 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
229 for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
230 lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
231 hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
232 }
233 } else {
234 PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL);
235 }
236
237 return 0;
238}
239
240static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr)
241{
242 int i;
243 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
244 uint8_t *vid = smu_data->power_tune_table.VddCVid;
245 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
246
247 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
248 "There should never be more than 8 entries for VddcVid!!!",
249 return -EINVAL);
250
251 for (i = 0; i < (int)data->vddc_voltage_table.count; i++) {
252 vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
253 }
254
255 return 0;
256}
257
258
259
260static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr)
261{
262 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
263 uint32_t pm_fuse_table_offset;
264
265 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
266 PHM_PlatformCaps_PowerContainment)) {
267 if (smu7_read_smc_sram_dword(hwmgr,
268 SMU71_FIRMWARE_HEADER_LOCATION +
269 offsetof(SMU71_Firmware_Header, PmFuseTable),
270 &pm_fuse_table_offset, SMC_RAM_END))
271 PP_ASSERT_WITH_CODE(false,
272 "Attempt to get pm_fuse_table_offset Failed!",
273 return -EINVAL);
274
275 /* DW0 - DW3 */
276 if (iceland_populate_bapm_vddc_vid_sidd(hwmgr))
277 PP_ASSERT_WITH_CODE(false,
278 "Attempt to populate bapm vddc vid Failed!",
279 return -EINVAL);
280
281 /* DW4 - DW5 */
282 if (iceland_populate_vddc_vid(hwmgr))
283 PP_ASSERT_WITH_CODE(false,
284 "Attempt to populate vddc vid Failed!",
285 return -EINVAL);
286
287 /* DW6 */
288 if (iceland_populate_svi_load_line(hwmgr))
289 PP_ASSERT_WITH_CODE(false,
290 "Attempt to populate SviLoadLine Failed!",
291 return -EINVAL);
292 /* DW7 */
293 if (iceland_populate_tdc_limit(hwmgr))
294 PP_ASSERT_WITH_CODE(false,
295 "Attempt to populate TDCLimit Failed!", return -EINVAL);
296 /* DW8 */
297 if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset))
298 PP_ASSERT_WITH_CODE(false,
299 "Attempt to populate TdcWaterfallCtl, "
300 "LPMLTemperature Min and Max Failed!",
301 return -EINVAL);
302
303 /* DW9-DW12 */
304 if (0 != iceland_populate_temperature_scaler(hwmgr))
305 PP_ASSERT_WITH_CODE(false,
306 "Attempt to populate LPMLTemperatureScaler Failed!",
307 return -EINVAL);
308
309 /* DW13-DW16 */
310 if (iceland_populate_gnb_lpml(hwmgr))
311 PP_ASSERT_WITH_CODE(false,
312 "Attempt to populate GnbLPML Failed!",
313 return -EINVAL);
314
315 /* DW18 */
316 if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr))
317 PP_ASSERT_WITH_CODE(false,
318 "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!",
319 return -EINVAL);
320
321 if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
322 (uint8_t *)&smu_data->power_tune_table,
323 sizeof(struct SMU71_Discrete_PmFuses), SMC_RAM_END))
324 PP_ASSERT_WITH_CODE(false,
325 "Attempt to download PmFuseTable Failed!",
326 return -EINVAL);
327 }
328 return 0;
329}
330
331static int iceland_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
332 struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
333 uint32_t clock, uint32_t *vol)
334{
335 uint32_t i = 0;
336
337 /* clock - voltage dependency table is empty table */
338 if (allowed_clock_voltage_table->count == 0)
339 return -EINVAL;
340
341 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
342 /* find first sclk bigger than request */
343 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
344 *vol = allowed_clock_voltage_table->entries[i].v;
345 return 0;
346 }
347 }
348
349 /* sclk is bigger than max sclk in the dependence table */
350 *vol = allowed_clock_voltage_table->entries[i - 1].v;
351
352 return 0;
353}
354
355static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
356 pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
357 uint16_t *lo)
358{
359 uint16_t v_index;
360 bool vol_found = false;
361 *hi = tab->value * VOLTAGE_SCALE;
362 *lo = tab->value * VOLTAGE_SCALE;
363
364 /* SCLK/VDDC Dependency Table has to exist. */
365 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
366 "The SCLK/VDDC Dependency Table does not exist.\n",
367 return -EINVAL);
368
369 if (NULL == hwmgr->dyn_state.cac_leakage_table) {
370 pr_warn("CAC Leakage Table does not exist, using vddc.\n");
371 return 0;
372 }
373
374 /*
375 * Since voltage in the sclk/vddc dependency table is not
376 * necessarily in ascending order because of ELB voltage
377 * patching, loop through entire list to find exact voltage.
378 */
379 for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
380 if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
381 vol_found = true;
382 if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
383 *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
384 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
385 } else {
386 pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
387 *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
388 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
389 }
390 break;
391 }
392 }
393
394 /*
395 * If voltage is not found in the first pass, loop again to
396 * find the best match, equal or higher value.
397 */
398 if (!vol_found) {
399 for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
400 if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
401 vol_found = true;
402 if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
403 *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
404 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
405 } else {
406 pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
407 *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
408 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
409 }
410 break;
411 }
412 }
413
414 if (!vol_found)
415 pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
416 }
417
418 return 0;
419}
420
421static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
422 pp_atomctrl_voltage_table_entry *tab,
423 SMU71_Discrete_VoltageLevel *smc_voltage_tab)
424{
425 int result;
426
427 result = iceland_get_std_voltage_value_sidd(hwmgr, tab,
428 &smc_voltage_tab->StdVoltageHiSidd,
429 &smc_voltage_tab->StdVoltageLoSidd);
430 if (0 != result) {
431 smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
432 smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
433 }
434
435 smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
436 CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
437 CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
438
439 return 0;
440}
441
442static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
443 SMU71_Discrete_DpmTable *table)
444{
445 unsigned int count;
446 int result;
447 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
448
449 table->VddcLevelCount = data->vddc_voltage_table.count;
450 for (count = 0; count < table->VddcLevelCount; count++) {
451 result = iceland_populate_smc_voltage_table(hwmgr,
452 &(data->vddc_voltage_table.entries[count]),
453 &(table->VddcLevel[count]));
454 PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
455
456 /* GPIO voltage control */
457 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
458 table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
459 else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
460 table->VddcLevel[count].Smio = 0;
461 }
462
463 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
464
465 return 0;
466}
467
468static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
469 SMU71_Discrete_DpmTable *table)
470{
471 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
472 uint32_t count;
473 int result;
474
475 table->VddciLevelCount = data->vddci_voltage_table.count;
476
477 for (count = 0; count < table->VddciLevelCount; count++) {
478 result = iceland_populate_smc_voltage_table(hwmgr,
479 &(data->vddci_voltage_table.entries[count]),
480 &(table->VddciLevel[count]));
481 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
482 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
483 table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low;
484 else
485 table->VddciLevel[count].Smio |= 0;
486 }
487
488 CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
489
490 return 0;
491}
492
493static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
494 SMU71_Discrete_DpmTable *table)
495{
496 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
497 uint32_t count;
498 int result;
499
500 table->MvddLevelCount = data->mvdd_voltage_table.count;
501
502 for (count = 0; count < table->VddciLevelCount; count++) {
503 result = iceland_populate_smc_voltage_table(hwmgr,
504 &(data->mvdd_voltage_table.entries[count]),
505 &table->MvddLevel[count]);
506 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
507 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control)
508 table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low;
509 else
510 table->MvddLevel[count].Smio |= 0;
511 }
512
513 CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
514
515 return 0;
516}
517
518
519static int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
520 SMU71_Discrete_DpmTable *table)
521{
522 int result;
523
524 result = iceland_populate_smc_vddc_table(hwmgr, table);
525 PP_ASSERT_WITH_CODE(0 == result,
526 "can not populate VDDC voltage table to SMC", return -EINVAL);
527
528 result = iceland_populate_smc_vdd_ci_table(hwmgr, table);
529 PP_ASSERT_WITH_CODE(0 == result,
530 "can not populate VDDCI voltage table to SMC", return -EINVAL);
531
532 result = iceland_populate_smc_mvdd_table(hwmgr, table);
533 PP_ASSERT_WITH_CODE(0 == result,
534 "can not populate MVDD voltage table to SMC", return -EINVAL);
535
536 return 0;
537}
538
539static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr,
540 struct SMU71_Discrete_Ulv *state)
541{
542 uint32_t voltage_response_time, ulv_voltage;
543 int result;
544 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
545
546 state->CcPwrDynRm = 0;
547 state->CcPwrDynRm1 = 0;
548
549 result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
550 PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
551
552 if (ulv_voltage == 0) {
553 data->ulv_supported = false;
554 return 0;
555 }
556
557 if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
558 /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
559 if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
560 state->VddcOffset = 0;
561 else
562 /* used in SMIO Mode. not implemented for now. this is backup only for CI. */
563 state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
564 } else {
565 /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
566 if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
567 state->VddcOffsetVid = 0;
568 else /* used in SVI2 Mode */
569 state->VddcOffsetVid = (uint8_t)(
570 (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
571 * VOLTAGE_VID_OFFSET_SCALE2
572 / VOLTAGE_VID_OFFSET_SCALE1);
573 }
574 state->VddcPhase = 1;
575
576 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
577 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
578 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
579
580 return 0;
581}
582
583static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr,
584 SMU71_Discrete_Ulv *ulv_level)
585{
586 return iceland_populate_ulv_level(hwmgr, ulv_level);
587}
588
589static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table)
590{
591 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
592 struct smu7_dpm_table *dpm_table = &data->dpm_table;
593 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
594 uint32_t i;
595
596 /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
597 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
598 table->LinkLevel[i].PcieGenSpeed =
599 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
600 table->LinkLevel[i].PcieLaneCount =
601 (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
602 table->LinkLevel[i].EnabledForActivity =
603 1;
604 table->LinkLevel[i].SPC =
605 (uint8_t)(data->pcie_spc_cap & 0xff);
606 table->LinkLevel[i].DownThreshold =
607 PP_HOST_TO_SMC_UL(5);
608 table->LinkLevel[i].UpThreshold =
609 PP_HOST_TO_SMC_UL(30);
610 }
611
612 smu_data->smc_state_table.LinkLevelCount =
613 (uint8_t)dpm_table->pcie_speed_table.count;
614 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
615 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
616
617 return 0;
618}
619
620/**
621 * Calculates the SCLK dividers using the provided engine clock
622 *
623 * @param hwmgr the address of the hardware manager
624 * @param engine_clock the engine clock to use to populate the structure
625 * @param sclk the SMC SCLK structure to be populated
626 */
627static int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr,
628 uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk)
629{
630 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
631 pp_atomctrl_clock_dividers_vi dividers;
632 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
633 uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
634 uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
635 uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
636 uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
637 uint32_t reference_clock;
638 uint32_t reference_divider;
639 uint32_t fbdiv;
640 int result;
641
642 /* get the engine clock dividers for this clock value*/
643 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
644
645 PP_ASSERT_WITH_CODE(result == 0,
646 "Error retrieving Engine Clock dividers from VBIOS.", return result);
647
648 /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
649 reference_clock = atomctrl_get_reference_clock(hwmgr);
650
651 reference_divider = 1 + dividers.uc_pll_ref_div;
652
653 /* low 14 bits is fraction and high 12 bits is divider*/
654 fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
655
656 /* SPLL_FUNC_CNTL setup*/
657 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
658 CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
659 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
660 CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
661
662 /* SPLL_FUNC_CNTL_3 setup*/
663 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
664 CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
665
666 /* set to use fractional accumulation*/
667 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
668 CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
669
670 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
671 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
672 pp_atomctrl_internal_ss_info ss_info;
673
674 uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
675 if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
676 /*
677 * ss_info.speed_spectrum_percentage -- in unit of 0.01%
678 * ss_info.speed_spectrum_rate -- in unit of khz
679 */
680 /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
681 uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
682
683 /* clkv = 2 * D * fbdiv / NS */
684 uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
685
686 cg_spll_spread_spectrum =
687 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
688 cg_spll_spread_spectrum =
689 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
690 cg_spll_spread_spectrum_2 =
691 PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
692 }
693 }
694
695 sclk->SclkFrequency = engine_clock;
696 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
697 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
698 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
699 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
700 sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
701
702 return 0;
703}
704
705static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
706 const struct phm_phase_shedding_limits_table *pl,
707 uint32_t sclk, uint32_t *p_shed)
708{
709 unsigned int i;
710
711 /* use the minimum phase shedding */
712 *p_shed = 1;
713
714 for (i = 0; i < pl->count; i++) {
715 if (sclk < pl->entries[i].Sclk) {
716 *p_shed = i;
717 break;
718 }
719 }
720 return 0;
721}
722
723/**
724 * Populates single SMC SCLK structure using the provided engine clock
725 *
726 * @param hwmgr the address of the hardware manager
727 * @param engine_clock the engine clock to use to populate the structure
728 * @param sclk the SMC SCLK structure to be populated
729 */
730static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
731 uint32_t engine_clock,
732 uint16_t sclk_activity_level_threshold,
733 SMU71_Discrete_GraphicsLevel *graphic_level)
734{
735 int result;
736 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
737
738 result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
739
740 /* populate graphics levels*/
741 result = iceland_get_dependency_volt_by_clk(hwmgr,
742 hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock,
743 &graphic_level->MinVddc);
744 PP_ASSERT_WITH_CODE((0 == result),
745 "can not find VDDC voltage value for VDDC \
746 engine clock dependency table", return result);
747
748 /* SCLK frequency in units of 10KHz*/
749 graphic_level->SclkFrequency = engine_clock;
750 graphic_level->MinVddcPhases = 1;
751
752 if (data->vddc_phase_shed_control)
753 iceland_populate_phase_value_based_on_sclk(hwmgr,
754 hwmgr->dyn_state.vddc_phase_shed_limits_table,
755 engine_clock,
756 &graphic_level->MinVddcPhases);
757
758 /* Indicates maximum activity level for this performance level. 50% for now*/
759 graphic_level->ActivityLevel = sclk_activity_level_threshold;
760
761 graphic_level->CcPwrDynRm = 0;
762 graphic_level->CcPwrDynRm1 = 0;
763 /* this level can be used if activity is high enough.*/
764 graphic_level->EnabledForActivity = 0;
765 /* this level can be used for throttling.*/
766 graphic_level->EnabledForThrottle = 1;
767 graphic_level->UpHyst = 0;
768 graphic_level->DownHyst = 100;
769 graphic_level->VoltageDownHyst = 0;
770 graphic_level->PowerThrottle = 0;
771
772 data->display_timing.min_clock_in_sr =
773 hwmgr->display_config.min_core_set_clock_in_sr;
774
775 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
776 PHM_PlatformCaps_SclkDeepSleep))
777 graphic_level->DeepSleepDivId =
778 smu7_get_sleep_divider_id_from_clock(engine_clock,
779 data->display_timing.min_clock_in_sr);
780
781 /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
782 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
783
784 if (0 == result) {
785 graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE);
786 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);
787 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
788 CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
789 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
790 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
791 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
792 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
793 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
794 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
795 }
796
797 return result;
798}
799
800/**
801 * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
802 *
803 * @param hwmgr the address of the hardware manager
804 */
805int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
806{
807 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
808 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
809 struct smu7_dpm_table *dpm_table = &data->dpm_table;
810 uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start +
811 offsetof(SMU71_Discrete_DpmTable, GraphicsLevel);
812
813 uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) *
814 SMU71_MAX_LEVELS_GRAPHICS;
815
816 SMU71_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
817
818 uint32_t i;
819 uint8_t highest_pcie_level_enabled = 0;
820 uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
821 uint8_t count = 0;
822 int result = 0;
823
824 memset(levels, 0x00, level_array_size);
825
826 for (i = 0; i < dpm_table->sclk_table.count; i++) {
827 result = iceland_populate_single_graphic_level(hwmgr,
828 dpm_table->sclk_table.dpm_levels[i].value,
829 (uint16_t)smu_data->activity_target[i],
830 &(smu_data->smc_state_table.GraphicsLevel[i]));
831 if (result != 0)
832 return result;
833
834 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
835 if (i > 1)
836 smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
837 }
838
839 /* Only enable level 0 for now. */
840 smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
841
842 /* set highest level watermark to high */
843 if (dpm_table->sclk_table.count > 1)
844 smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
845 PPSMC_DISPLAY_WATERMARK_HIGH;
846
847 smu_data->smc_state_table.GraphicsDpmLevelCount =
848 (uint8_t)dpm_table->sclk_table.count;
849 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
850 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
851
852 while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
853 (1 << (highest_pcie_level_enabled + 1))) != 0) {
854 highest_pcie_level_enabled++;
855 }
856
857 while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
858 (1 << lowest_pcie_level_enabled)) == 0) {
859 lowest_pcie_level_enabled++;
860 }
861
862 while ((count < highest_pcie_level_enabled) &&
863 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
864 (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) {
865 count++;
866 }
867
868 mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
869 (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
870
871
872 /* set pcieDpmLevel to highest_pcie_level_enabled*/
873 for (i = 2; i < dpm_table->sclk_table.count; i++) {
874 smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
875 }
876
877 /* set pcieDpmLevel to lowest_pcie_level_enabled*/
878 smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
879
880 /* set pcieDpmLevel to mid_pcie_level_enabled*/
881 smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
882
883 /* level count will send to smc once at init smc table and never change*/
884 result = smu7_copy_bytes_to_smc(hwmgr, level_array_adress,
885 (uint8_t *)levels, (uint32_t)level_array_size,
886 SMC_RAM_END);
887
888 return result;
889}
890
891/**
892 * Populates the SMC MCLK structure using the provided memory clock
893 *
894 * @param hwmgr the address of the hardware manager
895 * @param memory_clock the memory clock to use to populate the structure
896 * @param sclk the SMC SCLK structure to be populated
897 */
898static int iceland_calculate_mclk_params(
899 struct pp_hwmgr *hwmgr,
900 uint32_t memory_clock,
901 SMU71_Discrete_MemoryLevel *mclk,
902 bool strobe_mode,
903 bool dllStateOn
904 )
905{
906 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
907
908 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
909 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
910 uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
911 uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
912 uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
913 uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
914 uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
915 uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
916 uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
917
918 pp_atomctrl_memory_clock_param mpll_param;
919 int result;
920
921 result = atomctrl_get_memory_pll_dividers_si(hwmgr,
922 memory_clock, &mpll_param, strobe_mode);
923 PP_ASSERT_WITH_CODE(0 == result,
924 "Error retrieving Memory Clock Parameters from VBIOS.", return result);
925
926 /* MPLL_FUNC_CNTL setup*/
927 mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
928
929 /* MPLL_FUNC_CNTL_1 setup*/
930 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
931 MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
932 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
933 MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
934 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
935 MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
936
937 /* MPLL_AD_FUNC_CNTL setup*/
938 mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
939 MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
940
941 if (data->is_memory_gddr5) {
942 /* MPLL_DQ_FUNC_CNTL setup*/
943 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
944 MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
945 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
946 MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
947 }
948
949 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
950 PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
951 /*
952 ************************************
953 Fref = Reference Frequency
954 NF = Feedback divider ratio
955 NR = Reference divider ratio
956 Fnom = Nominal VCO output frequency = Fref * NF / NR
957 Fs = Spreading Rate
958 D = Percentage down-spread / 2
959 Fint = Reference input frequency to PFD = Fref / NR
960 NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
961 CLKS = NS - 1 = ISS_STEP_NUM[11:0]
962 NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
963 CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
964 *************************************
965 */
966 pp_atomctrl_internal_ss_info ss_info;
967 uint32_t freq_nom;
968 uint32_t tmp;
969 uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
970
971 /* for GDDR5 for all modes and DDR3 */
972 if (1 == mpll_param.qdr)
973 freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
974 else
975 freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
976
977 /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
978 tmp = (freq_nom / reference_clock);
979 tmp = tmp * tmp;
980
981 if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
982 /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
983 /* ss.Info.speed_spectrum_rate -- in unit of khz */
984 /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
985 /* = reference_clock * 5 / speed_spectrum_rate */
986 uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
987
988 /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
989 /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
990 uint32_t clkv =
991 (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
992 ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
993
994 mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
995 mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
996 }
997 }
998
999 /* MCLK_PWRMGT_CNTL setup */
1000 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1001 MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
1002 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1003 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
1004 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1005 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
1006
1007
1008 /* Save the result data to outpupt memory level structure */
1009 mclk->MclkFrequency = memory_clock;
1010 mclk->MpllFuncCntl = mpll_func_cntl;
1011 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
1012 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
1013 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
1014 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
1015 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
1016 mclk->DllCntl = dll_cntl;
1017 mclk->MpllSs1 = mpll_ss1;
1018 mclk->MpllSs2 = mpll_ss2;
1019
1020 return 0;
1021}
1022
1023static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock,
1024 bool strobe_mode)
1025{
1026 uint8_t mc_para_index;
1027
1028 if (strobe_mode) {
1029 if (memory_clock < 12500) {
1030 mc_para_index = 0x00;
1031 } else if (memory_clock > 47500) {
1032 mc_para_index = 0x0f;
1033 } else {
1034 mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
1035 }
1036 } else {
1037 if (memory_clock < 65000) {
1038 mc_para_index = 0x00;
1039 } else if (memory_clock > 135000) {
1040 mc_para_index = 0x0f;
1041 } else {
1042 mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
1043 }
1044 }
1045
1046 return mc_para_index;
1047}
1048
1049static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
1050{
1051 uint8_t mc_para_index;
1052
1053 if (memory_clock < 10000) {
1054 mc_para_index = 0;
1055 } else if (memory_clock >= 80000) {
1056 mc_para_index = 0x0f;
1057 } else {
1058 mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
1059 }
1060
1061 return mc_para_index;
1062}
1063
1064static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
1065 uint32_t memory_clock, uint32_t *p_shed)
1066{
1067 unsigned int i;
1068
1069 *p_shed = 1;
1070
1071 for (i = 0; i < pl->count; i++) {
1072 if (memory_clock < pl->entries[i].Mclk) {
1073 *p_shed = i;
1074 break;
1075 }
1076 }
1077
1078 return 0;
1079}
1080
1081static int iceland_populate_single_memory_level(
1082 struct pp_hwmgr *hwmgr,
1083 uint32_t memory_clock,
1084 SMU71_Discrete_MemoryLevel *memory_level
1085 )
1086{
1087 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1088 int result = 0;
1089 bool dll_state_on;
1090 struct cgs_display_info info = {0};
1091 uint32_t mclk_edc_wr_enable_threshold = 40000;
1092 uint32_t mclk_edc_enable_threshold = 40000;
1093 uint32_t mclk_strobe_mode_threshold = 40000;
1094
1095 if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
1096 result = iceland_get_dependency_volt_by_clk(hwmgr,
1097 hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
1098 PP_ASSERT_WITH_CODE((0 == result),
1099 "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
1100 }
1101
1102 if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) {
1103 memory_level->MinVddci = memory_level->MinVddc;
1104 } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
1105 result = iceland_get_dependency_volt_by_clk(hwmgr,
1106 hwmgr->dyn_state.vddci_dependency_on_mclk,
1107 memory_clock,
1108 &memory_level->MinVddci);
1109 PP_ASSERT_WITH_CODE((0 == result),
1110 "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
1111 }
1112
1113 memory_level->MinVddcPhases = 1;
1114
1115 if (data->vddc_phase_shed_control) {
1116 iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
1117 memory_clock, &memory_level->MinVddcPhases);
1118 }
1119
1120 memory_level->EnabledForThrottle = 1;
1121 memory_level->EnabledForActivity = 0;
1122 memory_level->UpHyst = 0;
1123 memory_level->DownHyst = 100;
1124 memory_level->VoltageDownHyst = 0;
1125
1126 /* Indicates maximum activity level for this performance level.*/
1127 memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
1128 memory_level->StutterEnable = 0;
1129 memory_level->StrobeEnable = 0;
1130 memory_level->EdcReadEnable = 0;
1131 memory_level->EdcWriteEnable = 0;
1132 memory_level->RttEnable = 0;
1133
1134 /* default set to low watermark. Highest level will be set to high later.*/
1135 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1136
1137 cgs_get_active_displays_info(hwmgr->device, &info);
1138 data->display_timing.num_existing_displays = info.display_count;
1139
1140 /* stutter mode not support on iceland */
1141
1142 /* decide strobe mode*/
1143 memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
1144 (memory_clock <= mclk_strobe_mode_threshold);
1145
1146 /* decide EDC mode and memory clock ratio*/
1147 if (data->is_memory_gddr5) {
1148 memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock,
1149 memory_level->StrobeEnable);
1150
1151 if ((mclk_edc_enable_threshold != 0) &&
1152 (memory_clock > mclk_edc_enable_threshold)) {
1153 memory_level->EdcReadEnable = 1;
1154 }
1155
1156 if ((mclk_edc_wr_enable_threshold != 0) &&
1157 (memory_clock > mclk_edc_wr_enable_threshold)) {
1158 memory_level->EdcWriteEnable = 1;
1159 }
1160
1161 if (memory_level->StrobeEnable) {
1162 if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >=
1163 ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
1164 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1165 else
1166 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
1167 } else
1168 dll_state_on = data->dll_default_on;
1169 } else {
1170 memory_level->StrobeRatio =
1171 iceland_get_ddr3_mclk_frequency_ratio(memory_clock);
1172 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1173 }
1174
1175 result = iceland_calculate_mclk_params(hwmgr,
1176 memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
1177
1178 if (0 == result) {
1179 memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
1180 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
1181 memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
1182 memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
1183 /* MCLK frequency in units of 10KHz*/
1184 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
1185 /* Indicates maximum activity level for this performance level.*/
1186 CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
1187 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
1188 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
1189 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
1190 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
1191 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
1192 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
1193 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
1194 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
1195 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
1196 }
1197
1198 return result;
1199}
1200
1201/**
1202 * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
1203 *
1204 * @param hwmgr the address of the hardware manager
1205 */
1206
1207int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1208{
1209 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1210 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
1211 struct smu7_dpm_table *dpm_table = &data->dpm_table;
1212 int result;
1213
1214 /* populate MCLK dpm table to SMU7 */
1215 uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel);
1216 uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY;
1217 SMU71_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
1218 uint32_t i;
1219
1220 memset(levels, 0x00, level_array_size);
1221
1222 for (i = 0; i < dpm_table->mclk_table.count; i++) {
1223 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
1224 "can not populate memory level as memory clock is zero", return -EINVAL);
1225 result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
1226 &(smu_data->smc_state_table.MemoryLevel[i]));
1227 if (0 != result) {
1228 return result;
1229 }
1230 }
1231
1232 /* Only enable level 0 for now.*/
1233 smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
1234
1235 /*
1236 * in order to prevent MC activity from stutter mode to push DPM up.
1237 * the UVD change complements this by putting the MCLK in a higher state
1238 * by default such that we are not effected by up threshold or and MCLK DPM latency.
1239 */
1240 smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
1241 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
1242
1243 smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
1244 data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1245 /* set highest level watermark to high*/
1246 smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
1247
1248 /* level count will send to smc once at init smc table and never change*/
1249 result = smu7_copy_bytes_to_smc(hwmgr,
1250 level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size,
1251 SMC_RAM_END);
1252
1253 return result;
1254}
1255
1256static int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
1257 SMU71_Discrete_VoltageLevel *voltage)
1258{
1259 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1260
1261 uint32_t i = 0;
1262
1263 if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
1264 /* find mvdd value which clock is more than request */
1265 for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
1266 if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
1267 /* Always round to higher voltage. */
1268 voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
1269 break;
1270 }
1271 }
1272
1273 PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
1274 "MVDD Voltage is outside the supported range.", return -EINVAL);
1275
1276 } else {
1277 return -EINVAL;
1278 }
1279
1280 return 0;
1281}
1282
1283static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1284 SMU71_Discrete_DpmTable *table)
1285{
1286 int result = 0;
1287 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1288 struct pp_atomctrl_clock_dividers_vi dividers;
1289 uint32_t vddc_phase_shed_control = 0;
1290
1291 SMU71_Discrete_VoltageLevel voltage_level;
1292 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
1293 uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
1294 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1295 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1296
1297
1298 /* The ACPI state should not do DPM on DC (or ever).*/
1299 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1300
1301 if (data->acpi_vddc)
1302 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
1303 else
1304 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
1305
1306 table->ACPILevel.MinVddcPhases = vddc_phase_shed_control ? 0 : 1;
1307 /* assign zero for now*/
1308 table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
1309
1310 /* get the engine clock dividers for this clock value*/
1311 result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
1312 table->ACPILevel.SclkFrequency, &dividers);
1313
1314 PP_ASSERT_WITH_CODE(result == 0,
1315 "Error retrieving Engine Clock dividers from VBIOS.", return result);
1316
1317 /* divider ID for required SCLK*/
1318 table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
1319 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1320 table->ACPILevel.DeepSleepDivId = 0;
1321
1322 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
1323 CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0);
1324 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
1325 CG_SPLL_FUNC_CNTL, SPLL_RESET, 1);
1326 spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2,
1327 CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4);
1328
1329 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
1330 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
1331 table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
1332 table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
1333 table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
1334 table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
1335 table->ACPILevel.CcPwrDynRm = 0;
1336 table->ACPILevel.CcPwrDynRm1 = 0;
1337
1338
1339 /* For various features to be enabled/disabled while this level is active.*/
1340 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1341 /* SCLK frequency in units of 10KHz*/
1342 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
1343 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
1344 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
1345 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
1346 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
1347 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
1348 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
1349 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1350 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1351
1352 /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
1353 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
1354 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
1355
1356 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
1357 table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
1358 else {
1359 if (data->acpi_vddci != 0)
1360 table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
1361 else
1362 table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
1363 }
1364
1365 if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level))
1366 table->MemoryACPILevel.MinMvdd =
1367 PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
1368 else
1369 table->MemoryACPILevel.MinMvdd = 0;
1370
1371 /* Force reset on DLL*/
1372 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1373 MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
1374 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1375 MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
1376
1377 /* Disable DLL in ACPIState*/
1378 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1379 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
1380 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1381 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
1382
1383 /* Enable DLL bypass signal*/
1384 dll_cntl = PHM_SET_FIELD(dll_cntl,
1385 DLL_CNTL, MRDCK0_BYPASS, 0);
1386 dll_cntl = PHM_SET_FIELD(dll_cntl,
1387 DLL_CNTL, MRDCK1_BYPASS, 0);
1388
1389 table->MemoryACPILevel.DllCntl =
1390 PP_HOST_TO_SMC_UL(dll_cntl);
1391 table->MemoryACPILevel.MclkPwrmgtCntl =
1392 PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
1393 table->MemoryACPILevel.MpllAdFuncCntl =
1394 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
1395 table->MemoryACPILevel.MpllDqFuncCntl =
1396 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
1397 table->MemoryACPILevel.MpllFuncCntl =
1398 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
1399 table->MemoryACPILevel.MpllFuncCntl_1 =
1400 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
1401 table->MemoryACPILevel.MpllFuncCntl_2 =
1402 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
1403 table->MemoryACPILevel.MpllSs1 =
1404 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
1405 table->MemoryACPILevel.MpllSs2 =
1406 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
1407
1408 table->MemoryACPILevel.EnabledForThrottle = 0;
1409 table->MemoryACPILevel.EnabledForActivity = 0;
1410 table->MemoryACPILevel.UpHyst = 0;
1411 table->MemoryACPILevel.DownHyst = 100;
1412 table->MemoryACPILevel.VoltageDownHyst = 0;
1413 /* Indicates maximum activity level for this performance level.*/
1414 table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
1415
1416 table->MemoryACPILevel.StutterEnable = 0;
1417 table->MemoryACPILevel.StrobeEnable = 0;
1418 table->MemoryACPILevel.EdcReadEnable = 0;
1419 table->MemoryACPILevel.EdcWriteEnable = 0;
1420 table->MemoryACPILevel.RttEnable = 0;
1421
1422 return result;
1423}
1424
1425static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1426 SMU71_Discrete_DpmTable *table)
1427{
1428 return 0;
1429}
1430
1431static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1432 SMU71_Discrete_DpmTable *table)
1433{
1434 return 0;
1435}
1436
1437static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
1438 SMU71_Discrete_DpmTable *table)
1439{
1440 return 0;
1441}
1442
1443static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1444 SMU71_Discrete_DpmTable *table)
1445{
1446 return 0;
1447}
1448
1449static int iceland_populate_memory_timing_parameters(
1450 struct pp_hwmgr *hwmgr,
1451 uint32_t engine_clock,
1452 uint32_t memory_clock,
1453 struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs
1454 )
1455{
1456 uint32_t dramTiming;
1457 uint32_t dramTiming2;
1458 uint32_t burstTime;
1459 int result;
1460
1461 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1462 engine_clock, memory_clock);
1463
1464 PP_ASSERT_WITH_CODE(result == 0,
1465 "Error calling VBIOS to set DRAM_TIMING.", return result);
1466
1467 dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1468 dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1469 burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1470
1471 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
1472 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
1473 arb_regs->McArbBurstTime = (uint8_t)burstTime;
1474
1475 return 0;
1476}
1477
1478/**
1479 * Setup parameters for the MC ARB.
1480 *
1481 * @param hwmgr the address of the powerplay hardware manager.
1482 * @return always 0
1483 * This function is to be called from the SetPowerState table.
1484 */
1485static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1486{
1487 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1488 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
1489 int result = 0;
1490 SMU71_Discrete_MCArbDramTimingTable arb_regs;
1491 uint32_t i, j;
1492
1493 memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable));
1494
1495 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1496 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1497 result = iceland_populate_memory_timing_parameters
1498 (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
1499 data->dpm_table.mclk_table.dpm_levels[j].value,
1500 &arb_regs.entries[i][j]);
1501
1502 if (0 != result) {
1503 break;
1504 }
1505 }
1506 }
1507
1508 if (0 == result) {
1509 result = smu7_copy_bytes_to_smc(
1510 hwmgr,
1511 smu_data->smu7_data.arb_table_start,
1512 (uint8_t *)&arb_regs,
1513 sizeof(SMU71_Discrete_MCArbDramTimingTable),
1514 SMC_RAM_END
1515 );
1516 }
1517
1518 return result;
1519}
1520
1521static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1522 SMU71_Discrete_DpmTable *table)
1523{
1524 int result = 0;
1525 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1526 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
1527 table->GraphicsBootLevel = 0;
1528 table->MemoryBootLevel = 0;
1529
1530 /* find boot level from dpm table*/
1531 result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1532 data->vbios_boot_state.sclk_bootup_value,
1533 (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
1534
1535 if (0 != result) {
1536 smu_data->smc_state_table.GraphicsBootLevel = 0;
1537 pr_err("VBIOS did not find boot engine clock value \
1538 in dependency table. Using Graphics DPM level 0!");
1539 result = 0;
1540 }
1541
1542 result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1543 data->vbios_boot_state.mclk_bootup_value,
1544 (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
1545
1546 if (0 != result) {
1547 smu_data->smc_state_table.MemoryBootLevel = 0;
1548 pr_err("VBIOS did not find boot engine clock value \
1549 in dependency table. Using Memory DPM level 0!");
1550 result = 0;
1551 }
1552
1553 table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
1554 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
1555 table->BootVddci = table->BootVddc;
1556 else
1557 table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
1558
1559 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
1560
1561 return result;
1562}
1563
1564static int iceland_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
1565 SMU71_Discrete_MCRegisters *mc_reg_table)
1566{
1567 const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)hwmgr->smu_backend;
1568
1569 uint32_t i, j;
1570
1571 for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
1572 if (smu_data->mc_reg_table.validflag & 1<<j) {
1573 PP_ASSERT_WITH_CODE(i < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE,
1574 "Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
1575 mc_reg_table->address[i].s0 =
1576 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
1577 mc_reg_table->address[i].s1 =
1578 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
1579 i++;
1580 }
1581 }
1582
1583 mc_reg_table->last = (uint8_t)i;
1584
1585 return 0;
1586}
1587
1588/*convert register values from driver to SMC format */
1589static void iceland_convert_mc_registers(
1590 const struct iceland_mc_reg_entry *entry,
1591 SMU71_Discrete_MCRegisterSet *data,
1592 uint32_t num_entries, uint32_t valid_flag)
1593{
1594 uint32_t i, j;
1595
1596 for (i = 0, j = 0; j < num_entries; j++) {
1597 if (valid_flag & 1<<j) {
1598 data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
1599 i++;
1600 }
1601 }
1602}
1603
1604static int iceland_convert_mc_reg_table_entry_to_smc(struct pp_hwmgr *hwmgr,
1605 const uint32_t memory_clock,
1606 SMU71_Discrete_MCRegisterSet *mc_reg_table_data
1607 )
1608{
1609 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
1610 uint32_t i = 0;
1611
1612 for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
1613 if (memory_clock <=
1614 smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
1615 break;
1616 }
1617 }
1618
1619 if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
1620 --i;
1621
1622 iceland_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
1623 mc_reg_table_data, smu_data->mc_reg_table.last,
1624 smu_data->mc_reg_table.validflag);
1625
1626 return 0;
1627}
1628
1629static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
1630 SMU71_Discrete_MCRegisters *mc_regs)
1631{
1632 int result = 0;
1633 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1634 int res;
1635 uint32_t i;
1636
1637 for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
1638 res = iceland_convert_mc_reg_table_entry_to_smc(
1639 hwmgr,
1640 data->dpm_table.mclk_table.dpm_levels[i].value,
1641 &mc_regs->data[i]
1642 );
1643
1644 if (0 != res)
1645 result = res;
1646 }
1647
1648 return result;
1649}
1650
1651static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
1652{
1653 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
1654 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1655 uint32_t address;
1656 int32_t result;
1657
1658 if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
1659 return 0;
1660
1661
1662 memset(&smu_data->mc_regs, 0, sizeof(SMU71_Discrete_MCRegisters));
1663
1664 result = iceland_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
1665
1666 if (result != 0)
1667 return result;
1668
1669
1670 address = smu_data->smu7_data.mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]);
1671
1672 return smu7_copy_bytes_to_smc(hwmgr, address,
1673 (uint8_t *)&smu_data->mc_regs.data[0],
1674 sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
1675 SMC_RAM_END);
1676}
1677
1678static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
1679{
1680 int result;
1681 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
1682
1683 memset(&smu_data->mc_regs, 0x00, sizeof(SMU71_Discrete_MCRegisters));
1684 result = iceland_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
1685 PP_ASSERT_WITH_CODE(0 == result,
1686 "Failed to initialize MCRegTable for the MC register addresses!", return result;);
1687
1688 result = iceland_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
1689 PP_ASSERT_WITH_CODE(0 == result,
1690 "Failed to initialize MCRegTable for driver state!", return result;);
1691
1692 return smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.mc_reg_table_start,
1693 (uint8_t *)&smu_data->mc_regs, sizeof(SMU71_Discrete_MCRegisters), SMC_RAM_END);
1694}
1695
1696static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
1697{
1698 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1699 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
1700 uint8_t count, level;
1701
1702 count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
1703
1704 for (level = 0; level < count; level++) {
1705 if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
1706 >= data->vbios_boot_state.sclk_bootup_value) {
1707 smu_data->smc_state_table.GraphicsBootLevel = level;
1708 break;
1709 }
1710 }
1711
1712 count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
1713
1714 for (level = 0; level < count; level++) {
1715 if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
1716 >= data->vbios_boot_state.mclk_bootup_value) {
1717 smu_data->smc_state_table.MemoryBootLevel = level;
1718 break;
1719 }
1720 }
1721
1722 return 0;
1723}
1724
1725static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
1726{
1727 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1728 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
1729 const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
1730 SMU71_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
1731 struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
1732 struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
1733 const uint16_t *def1, *def2;
1734 int i, j, k;
1735
1736
1737 /*
1738 * TDP number of fraction bits are changed from 8 to 7 for Iceland
1739 * as requested by SMC team
1740 */
1741
1742 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
1743 dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
1744
1745
1746 dpm_table->DTETjOffset = 0;
1747
1748 dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
1749 dpm_table->GpuTjHyst = 8;
1750
1751 dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
1752
1753 /* The following are for new Iceland Multi-input fan/thermal control */
1754 if (NULL != ppm) {
1755 dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
1756 dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
1757 } else {
1758 dpm_table->PPM_PkgPwrLimit = 0;
1759 dpm_table->PPM_TemperatureLimit = 0;
1760 }
1761
1762 CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
1763 CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
1764
1765 dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
1766 def1 = defaults->bapmti_r;
1767 def2 = defaults->bapmti_rc;
1768
1769 for (i = 0; i < SMU71_DTE_ITERATIONS; i++) {
1770 for (j = 0; j < SMU71_DTE_SOURCES; j++) {
1771 for (k = 0; k < SMU71_DTE_SINKS; k++) {
1772 dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
1773 dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
1774 def1++;
1775 def2++;
1776 }
1777 }
1778 }
1779
1780 return 0;
1781}
1782
1783static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
1784 SMU71_Discrete_DpmTable *tab)
1785{
1786 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1787
1788 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
1789 tab->SVI2Enable |= VDDC_ON_SVI2;
1790
1791 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1792 tab->SVI2Enable |= VDDCI_ON_SVI2;
1793 else
1794 tab->MergedVddci = 1;
1795
1796 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control)
1797 tab->SVI2Enable |= MVDD_ON_SVI2;
1798
1799 PP_ASSERT_WITH_CODE(tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) &&
1800 (tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL);
1801
1802 return 0;
1803}
1804
1805/**
1806 * Initializes the SMC table and uploads it
1807 *
1808 * @param hwmgr the address of the powerplay hardware manager.
1809 * @param pInput the pointer to input data (PowerState)
1810 * @return always 0
1811 */
1812int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
1813{
1814 int result;
1815 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1816 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
1817 SMU71_Discrete_DpmTable *table = &(smu_data->smc_state_table);
1818
1819
1820 iceland_initialize_power_tune_defaults(hwmgr);
1821 memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
1822
1823 if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) {
1824 iceland_populate_smc_voltage_tables(hwmgr, table);
1825 }
1826
1827 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1828 PHM_PlatformCaps_AutomaticDCTransition))
1829 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1830
1831
1832 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1833 PHM_PlatformCaps_StepVddc))
1834 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1835
1836 if (data->is_memory_gddr5)
1837 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1838
1839
1840 if (data->ulv_supported) {
1841 result = iceland_populate_ulv_state(hwmgr, &(smu_data->ulv_setting));
1842 PP_ASSERT_WITH_CODE(0 == result,
1843 "Failed to initialize ULV state!", return result;);
1844
1845 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1846 ixCG_ULV_PARAMETER, 0x40035);
1847 }
1848
1849 result = iceland_populate_smc_link_level(hwmgr, table);
1850 PP_ASSERT_WITH_CODE(0 == result,
1851 "Failed to initialize Link Level!", return result;);
1852
1853 result = iceland_populate_all_graphic_levels(hwmgr);
1854 PP_ASSERT_WITH_CODE(0 == result,
1855 "Failed to initialize Graphics Level!", return result;);
1856
1857 result = iceland_populate_all_memory_levels(hwmgr);
1858 PP_ASSERT_WITH_CODE(0 == result,
1859 "Failed to initialize Memory Level!", return result;);
1860
1861 result = iceland_populate_smc_acpi_level(hwmgr, table);
1862 PP_ASSERT_WITH_CODE(0 == result,
1863 "Failed to initialize ACPI Level!", return result;);
1864
1865 result = iceland_populate_smc_vce_level(hwmgr, table);
1866 PP_ASSERT_WITH_CODE(0 == result,
1867 "Failed to initialize VCE Level!", return result;);
1868
1869 result = iceland_populate_smc_acp_level(hwmgr, table);
1870 PP_ASSERT_WITH_CODE(0 == result,
1871 "Failed to initialize ACP Level!", return result;);
1872
1873 result = iceland_populate_smc_samu_level(hwmgr, table);
1874 PP_ASSERT_WITH_CODE(0 == result,
1875 "Failed to initialize SAMU Level!", return result;);
1876
1877 /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
1878 /* need to populate the ARB settings for the initial state. */
1879 result = iceland_program_memory_timing_parameters(hwmgr);
1880 PP_ASSERT_WITH_CODE(0 == result,
1881 "Failed to Write ARB settings for the initial state.", return result;);
1882
1883 result = iceland_populate_smc_uvd_level(hwmgr, table);
1884 PP_ASSERT_WITH_CODE(0 == result,
1885 "Failed to initialize UVD Level!", return result;);
1886
1887 table->GraphicsBootLevel = 0;
1888 table->MemoryBootLevel = 0;
1889
1890 result = iceland_populate_smc_boot_level(hwmgr, table);
1891 PP_ASSERT_WITH_CODE(0 == result,
1892 "Failed to initialize Boot Level!", return result;);
1893
1894 result = iceland_populate_smc_initial_state(hwmgr);
1895 PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
1896
1897 result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr);
1898 PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
1899
1900 table->GraphicsVoltageChangeEnable = 1;
1901 table->GraphicsThermThrottleEnable = 1;
1902 table->GraphicsInterval = 1;
1903 table->VoltageInterval = 1;
1904 table->ThermalInterval = 1;
1905
1906 table->TemperatureLimitHigh =
1907 (data->thermal_temp_setting.temperature_high *
1908 SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1909 table->TemperatureLimitLow =
1910 (data->thermal_temp_setting.temperature_low *
1911 SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1912
1913 table->MemoryVoltageChangeEnable = 1;
1914 table->MemoryInterval = 1;
1915 table->VoltageResponseTime = 0;
1916 table->PhaseResponseTime = 0;
1917 table->MemoryThermThrottleEnable = 1;
1918 table->PCIeBootLinkLevel = 0;
1919 table->PCIeGenInterval = 1;
1920
1921 result = iceland_populate_smc_svi2_config(hwmgr, table);
1922 PP_ASSERT_WITH_CODE(0 == result,
1923 "Failed to populate SVI2 setting!", return result);
1924
1925 table->ThermGpio = 17;
1926 table->SclkStepSize = 0x4000;
1927
1928 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
1929 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
1930 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
1931 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
1932 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
1933 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
1934 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
1935 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
1936 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
1937 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
1938
1939 table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
1940 table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
1941 table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
1942
1943 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
1944 result = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.dpm_table_start +
1945 offsetof(SMU71_Discrete_DpmTable, SystemFlags),
1946 (uint8_t *)&(table->SystemFlags),
1947 sizeof(SMU71_Discrete_DpmTable)-3 * sizeof(SMU71_PIDController),
1948 SMC_RAM_END);
1949
1950 PP_ASSERT_WITH_CODE(0 == result,
1951 "Failed to upload dpm data to SMC memory!", return result;);
1952
1953 /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */
1954 result = smu7_copy_bytes_to_smc(hwmgr,
1955 smu_data->smu7_data.ulv_setting_starts,
1956 (uint8_t *)&(smu_data->ulv_setting),
1957 sizeof(SMU71_Discrete_Ulv),
1958 SMC_RAM_END);
1959
1960
1961 result = iceland_populate_initial_mc_reg_table(hwmgr);
1962 PP_ASSERT_WITH_CODE((0 == result),
1963 "Failed to populate initialize MC Reg table!", return result);
1964
1965 result = iceland_populate_pm_fuses(hwmgr);
1966 PP_ASSERT_WITH_CODE(0 == result,
1967 "Failed to populate PM fuses to SMC memory!", return result);
1968
1969 return 0;
1970}
1971
1972/**
1973* Set up the fan table to control the fan using the SMC.
1974* @param hwmgr the address of the powerplay hardware manager.
1975* @param pInput the pointer to input data
1976* @param pOutput the pointer to output data
1977* @param pStorage the pointer to temporary storage
1978* @param Result the last failure code
1979* @return result from set temperature range routine
1980*/
1981int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
1982{
1983 struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
1984 SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
1985 uint32_t duty100;
1986 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
1987 uint16_t fdo_min, slope1, slope2;
1988 uint32_t reference_clock;
1989 int res;
1990 uint64_t tmp64;
1991
1992 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
1993 return 0;
1994
1995 if (hwmgr->thermal_controller.fanInfo.bNoFan) {
1996 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1997 PHM_PlatformCaps_MicrocodeFanControl);
1998 return 0;
1999 }
2000
2001 if (0 == smu7_data->fan_table_start) {
2002 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2003 return 0;
2004 }
2005
2006 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
2007
2008 if (0 == duty100) {
2009 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2010 return 0;
2011 }
2012
2013 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
2014 do_div(tmp64, 10000);
2015 fdo_min = (uint16_t)tmp64;
2016
2017 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
2018 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
2019
2020 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
2021 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
2022
2023 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
2024 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
2025
2026 fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
2027 fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
2028 fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
2029
2030 fan_table.Slope1 = cpu_to_be16(slope1);
2031 fan_table.Slope2 = cpu_to_be16(slope2);
2032
2033 fan_table.FdoMin = cpu_to_be16(fdo_min);
2034
2035 fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
2036
2037 fan_table.HystUp = cpu_to_be16(1);
2038
2039 fan_table.HystSlope = cpu_to_be16(1);
2040
2041 fan_table.TempRespLim = cpu_to_be16(5);
2042
2043 reference_clock = smu7_get_xclk(hwmgr);
2044
2045 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
2046
2047 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
2048
2049 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
2050
2051 /* fan_table.FanControl_GL_Flag = 1; */
2052
2053 res = smu7_copy_bytes_to_smc(hwmgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
2054
2055 return 0;
2056}
2057
2058
2059static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
2060{
2061 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2062
2063 if (data->need_update_smu7_dpm_table &
2064 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
2065 return iceland_program_memory_timing_parameters(hwmgr);
2066
2067 return 0;
2068}
2069
2070int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2071{
2072 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2073 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
2074
2075 int result = 0;
2076 uint32_t low_sclk_interrupt_threshold = 0;
2077
2078 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2079 PHM_PlatformCaps_SclkThrottleLowNotification)
2080 && (hwmgr->gfx_arbiter.sclk_threshold !=
2081 data->low_sclk_interrupt_threshold)) {
2082 data->low_sclk_interrupt_threshold =
2083 hwmgr->gfx_arbiter.sclk_threshold;
2084 low_sclk_interrupt_threshold =
2085 data->low_sclk_interrupt_threshold;
2086
2087 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
2088
2089 result = smu7_copy_bytes_to_smc(
2090 hwmgr,
2091 smu_data->smu7_data.dpm_table_start +
2092 offsetof(SMU71_Discrete_DpmTable,
2093 LowSclkInterruptThreshold),
2094 (uint8_t *)&low_sclk_interrupt_threshold,
2095 sizeof(uint32_t),
2096 SMC_RAM_END);
2097 }
2098
2099 result = iceland_update_and_upload_mc_reg_table(hwmgr);
2100
2101 PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
2102
2103 result = iceland_program_mem_timing_parameters(hwmgr);
2104 PP_ASSERT_WITH_CODE((result == 0),
2105 "Failed to program memory timing parameters!",
2106 );
2107
2108 return result;
2109}
2110
2111uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
2112{
2113 switch (type) {
2114 case SMU_SoftRegisters:
2115 switch (member) {
2116 case HandshakeDisables:
2117 return offsetof(SMU71_SoftRegisters, HandshakeDisables);
2118 case VoltageChangeTimeout:
2119 return offsetof(SMU71_SoftRegisters, VoltageChangeTimeout);
2120 case AverageGraphicsActivity:
2121 return offsetof(SMU71_SoftRegisters, AverageGraphicsActivity);
2122 case PreVBlankGap:
2123 return offsetof(SMU71_SoftRegisters, PreVBlankGap);
2124 case VBlankTimeout:
2125 return offsetof(SMU71_SoftRegisters, VBlankTimeout);
2126 case UcodeLoadStatus:
2127 return offsetof(SMU71_SoftRegisters, UcodeLoadStatus);
2128 }
2129 case SMU_Discrete_DpmTable:
2130 switch (member) {
2131 case LowSclkInterruptThreshold:
2132 return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
2133 }
2134 }
2135 pr_warn("can't get the offset of type %x member %x\n", type, member);
2136 return 0;
2137}
2138
2139uint32_t iceland_get_mac_definition(uint32_t value)
2140{
2141 switch (value) {
2142 case SMU_MAX_LEVELS_GRAPHICS:
2143 return SMU71_MAX_LEVELS_GRAPHICS;
2144 case SMU_MAX_LEVELS_MEMORY:
2145 return SMU71_MAX_LEVELS_MEMORY;
2146 case SMU_MAX_LEVELS_LINK:
2147 return SMU71_MAX_LEVELS_LINK;
2148 case SMU_MAX_ENTRIES_SMIO:
2149 return SMU71_MAX_ENTRIES_SMIO;
2150 case SMU_MAX_LEVELS_VDDC:
2151 return SMU71_MAX_LEVELS_VDDC;
2152 case SMU_MAX_LEVELS_VDDCI:
2153 return SMU71_MAX_LEVELS_VDDCI;
2154 case SMU_MAX_LEVELS_MVDD:
2155 return SMU71_MAX_LEVELS_MVDD;
2156 }
2157
2158 pr_warn("can't get the mac of %x\n", value);
2159 return 0;
2160}
2161
2162/**
2163 * Get the location of various tables inside the FW image.
2164 *
2165 * @param hwmgr the address of the powerplay hardware manager.
2166 * @return always 0
2167 */
2168int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
2169{
2170 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2171 struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
2172
2173 uint32_t tmp;
2174 int result;
2175 bool error = false;
2176
2177 result = smu7_read_smc_sram_dword(hwmgr,
2178 SMU71_FIRMWARE_HEADER_LOCATION +
2179 offsetof(SMU71_Firmware_Header, DpmTable),
2180 &tmp, SMC_RAM_END);
2181
2182 if (0 == result) {
2183 smu7_data->dpm_table_start = tmp;
2184 }
2185
2186 error |= (0 != result);
2187
2188 result = smu7_read_smc_sram_dword(hwmgr,
2189 SMU71_FIRMWARE_HEADER_LOCATION +
2190 offsetof(SMU71_Firmware_Header, SoftRegisters),
2191 &tmp, SMC_RAM_END);
2192
2193 if (0 == result) {
2194 data->soft_regs_start = tmp;
2195 smu7_data->soft_regs_start = tmp;
2196 }
2197
2198 error |= (0 != result);
2199
2200
2201 result = smu7_read_smc_sram_dword(hwmgr,
2202 SMU71_FIRMWARE_HEADER_LOCATION +
2203 offsetof(SMU71_Firmware_Header, mcRegisterTable),
2204 &tmp, SMC_RAM_END);
2205
2206 if (0 == result) {
2207 smu7_data->mc_reg_table_start = tmp;
2208 }
2209
2210 result = smu7_read_smc_sram_dword(hwmgr,
2211 SMU71_FIRMWARE_HEADER_LOCATION +
2212 offsetof(SMU71_Firmware_Header, FanTable),
2213 &tmp, SMC_RAM_END);
2214
2215 if (0 == result) {
2216 smu7_data->fan_table_start = tmp;
2217 }
2218
2219 error |= (0 != result);
2220
2221 result = smu7_read_smc_sram_dword(hwmgr,
2222 SMU71_FIRMWARE_HEADER_LOCATION +
2223 offsetof(SMU71_Firmware_Header, mcArbDramTimingTable),
2224 &tmp, SMC_RAM_END);
2225
2226 if (0 == result) {
2227 smu7_data->arb_table_start = tmp;
2228 }
2229
2230 error |= (0 != result);
2231
2232
2233 result = smu7_read_smc_sram_dword(hwmgr,
2234 SMU71_FIRMWARE_HEADER_LOCATION +
2235 offsetof(SMU71_Firmware_Header, Version),
2236 &tmp, SMC_RAM_END);
2237
2238 if (0 == result) {
2239 hwmgr->microcode_version_info.SMC = tmp;
2240 }
2241
2242 error |= (0 != result);
2243
2244 result = smu7_read_smc_sram_dword(hwmgr,
2245 SMU71_FIRMWARE_HEADER_LOCATION +
2246 offsetof(SMU71_Firmware_Header, UlvSettings),
2247 &tmp, SMC_RAM_END);
2248
2249 if (0 == result) {
2250 smu7_data->ulv_setting_starts = tmp;
2251 }
2252
2253 error |= (0 != result);
2254
2255 return error ? 1 : 0;
2256}
2257
2258/*---------------------------MC----------------------------*/
2259
2260static uint8_t iceland_get_memory_modile_index(struct pp_hwmgr *hwmgr)
2261{
2262 return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
2263}
2264
2265static bool iceland_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
2266{
2267 bool result = true;
2268
2269 switch (in_reg) {
2270 case mmMC_SEQ_RAS_TIMING:
2271 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
2272 break;
2273
2274 case mmMC_SEQ_DLL_STBY:
2275 *out_reg = mmMC_SEQ_DLL_STBY_LP;
2276 break;
2277
2278 case mmMC_SEQ_G5PDX_CMD0:
2279 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
2280 break;
2281
2282 case mmMC_SEQ_G5PDX_CMD1:
2283 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
2284 break;
2285
2286 case mmMC_SEQ_G5PDX_CTRL:
2287 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
2288 break;
2289
2290 case mmMC_SEQ_CAS_TIMING:
2291 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
2292 break;
2293
2294 case mmMC_SEQ_MISC_TIMING:
2295 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
2296 break;
2297
2298 case mmMC_SEQ_MISC_TIMING2:
2299 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
2300 break;
2301
2302 case mmMC_SEQ_PMG_DVS_CMD:
2303 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
2304 break;
2305
2306 case mmMC_SEQ_PMG_DVS_CTL:
2307 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
2308 break;
2309
2310 case mmMC_SEQ_RD_CTL_D0:
2311 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
2312 break;
2313
2314 case mmMC_SEQ_RD_CTL_D1:
2315 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
2316 break;
2317
2318 case mmMC_SEQ_WR_CTL_D0:
2319 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
2320 break;
2321
2322 case mmMC_SEQ_WR_CTL_D1:
2323 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
2324 break;
2325
2326 case mmMC_PMG_CMD_EMRS:
2327 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
2328 break;
2329
2330 case mmMC_PMG_CMD_MRS:
2331 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
2332 break;
2333
2334 case mmMC_PMG_CMD_MRS1:
2335 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
2336 break;
2337
2338 case mmMC_SEQ_PMG_TIMING:
2339 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
2340 break;
2341
2342 case mmMC_PMG_CMD_MRS2:
2343 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
2344 break;
2345
2346 case mmMC_SEQ_WR_CTL_2:
2347 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
2348 break;
2349
2350 default:
2351 result = false;
2352 break;
2353 }
2354
2355 return result;
2356}
2357
2358static int iceland_set_s0_mc_reg_index(struct iceland_mc_reg_table *table)
2359{
2360 uint32_t i;
2361 uint16_t address;
2362
2363 for (i = 0; i < table->last; i++) {
2364 table->mc_reg_address[i].s0 =
2365 iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
2366 ? address : table->mc_reg_address[i].s1;
2367 }
2368 return 0;
2369}
2370
2371static int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
2372 struct iceland_mc_reg_table *ni_table)
2373{
2374 uint8_t i, j;
2375
2376 PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2377 "Invalid VramInfo table.", return -EINVAL);
2378 PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
2379 "Invalid VramInfo table.", return -EINVAL);
2380
2381 for (i = 0; i < table->last; i++) {
2382 ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
2383 }
2384 ni_table->last = table->last;
2385
2386 for (i = 0; i < table->num_entries; i++) {
2387 ni_table->mc_reg_table_entry[i].mclk_max =
2388 table->mc_reg_table_entry[i].mclk_max;
2389 for (j = 0; j < table->last; j++) {
2390 ni_table->mc_reg_table_entry[i].mc_data[j] =
2391 table->mc_reg_table_entry[i].mc_data[j];
2392 }
2393 }
2394
2395 ni_table->num_entries = table->num_entries;
2396
2397 return 0;
2398}
2399
2400/**
2401 * VBIOS omits some information to reduce size, we need to recover them here.
2402 * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0].
2403 * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
2404 * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0].
2405 * 3. need to set these data for each clock range
2406 *
2407 * @param hwmgr the address of the powerplay hardware manager.
2408 * @param table the address of MCRegTable
2409 * @return always 0
2410 */
2411static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
2412 struct iceland_mc_reg_table *table)
2413{
2414 uint8_t i, j, k;
2415 uint32_t temp_reg;
2416 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2417
2418 for (i = 0, j = table->last; i < table->last; i++) {
2419 PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2420 "Invalid VramInfo table.", return -EINVAL);
2421
2422 switch (table->mc_reg_address[i].s1) {
2423
2424 case mmMC_SEQ_MISC1:
2425 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
2426 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
2427 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
2428 for (k = 0; k < table->num_entries; k++) {
2429 table->mc_reg_table_entry[k].mc_data[j] =
2430 ((temp_reg & 0xffff0000)) |
2431 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
2432 }
2433 j++;
2434 PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2435 "Invalid VramInfo table.", return -EINVAL);
2436
2437 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
2438 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
2439 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
2440 for (k = 0; k < table->num_entries; k++) {
2441 table->mc_reg_table_entry[k].mc_data[j] =
2442 (temp_reg & 0xffff0000) |
2443 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2444
2445 if (!data->is_memory_gddr5) {
2446 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
2447 }
2448 }
2449 j++;
2450 PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2451 "Invalid VramInfo table.", return -EINVAL);
2452
2453 if (!data->is_memory_gddr5 && j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
2454 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
2455 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
2456 for (k = 0; k < table->num_entries; k++) {
2457 table->mc_reg_table_entry[k].mc_data[j] =
2458 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
2459 }
2460 j++;
2461 PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2462 "Invalid VramInfo table.", return -EINVAL);
2463 }
2464
2465 break;
2466
2467 case mmMC_SEQ_RESERVE_M:
2468 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
2469 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
2470 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
2471 for (k = 0; k < table->num_entries; k++) {
2472 table->mc_reg_table_entry[k].mc_data[j] =
2473 (temp_reg & 0xffff0000) |
2474 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2475 }
2476 j++;
2477 PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2478 "Invalid VramInfo table.", return -EINVAL);
2479 break;
2480
2481 default:
2482 break;
2483 }
2484
2485 }
2486
2487 table->last = j;
2488
2489 return 0;
2490}
2491
2492static int iceland_set_valid_flag(struct iceland_mc_reg_table *table)
2493{
2494 uint8_t i, j;
2495 for (i = 0; i < table->last; i++) {
2496 for (j = 1; j < table->num_entries; j++) {
2497 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
2498 table->mc_reg_table_entry[j].mc_data[i]) {
2499 table->validflag |= (1<<i);
2500 break;
2501 }
2502 }
2503 }
2504
2505 return 0;
2506}
2507
2508int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
2509{
2510 int result;
2511 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
2512 pp_atomctrl_mc_reg_table *table;
2513 struct iceland_mc_reg_table *ni_table = &smu_data->mc_reg_table;
2514 uint8_t module_index = iceland_get_memory_modile_index(hwmgr);
2515
2516 table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
2517
2518 if (NULL == table)
2519 return -ENOMEM;
2520
2521 /* Program additional LP registers that are no longer programmed by VBIOS */
2522 cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
2523 cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
2524 cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
2525 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
2526 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
2527 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
2528 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
2529 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
2530 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
2531 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
2532 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
2533 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
2534 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
2535 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
2536 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
2537 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
2538 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
2539 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
2540 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
2541 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
2542
2543 memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
2544
2545 result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
2546
2547 if (0 == result)
2548 result = iceland_copy_vbios_smc_reg_table(table, ni_table);
2549
2550 if (0 == result) {
2551 iceland_set_s0_mc_reg_index(ni_table);
2552 result = iceland_set_mc_special_registers(hwmgr, ni_table);
2553 }
2554
2555 if (0 == result)
2556 iceland_set_valid_flag(ni_table);
2557
2558 kfree(table);
2559
2560 return result;
2561}
2562
2563bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr)
2564{
2565 return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
2566 CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
2567 ? true : false;
2568}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index a778e174ba01..34128822b8fb 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -30,15 +30,84 @@
30 30
31#include "smumgr.h" 31#include "smumgr.h"
32#include "iceland_smumgr.h" 32#include "iceland_smumgr.h"
33#include "smu_ucode_xfer_vi.h" 33
34#include "ppsmc.h" 34#include "ppsmc.h"
35
36#include "cgs_common.h"
37
38#include "smu7_dyn_defaults.h"
39#include "smu7_hwmgr.h"
40#include "hardwaremanager.h"
41#include "ppatomctrl.h"
42#include "atombios.h"
43#include "pppcielanes.h"
44#include "pp_endian.h"
45#include "processpptables.h"
46
47
35#include "smu/smu_7_1_1_d.h" 48#include "smu/smu_7_1_1_d.h"
36#include "smu/smu_7_1_1_sh_mask.h" 49#include "smu/smu_7_1_1_sh_mask.h"
37#include "cgs_common.h" 50#include "smu71_discrete.h"
38#include "iceland_smc.h" 51
52#include "smu_ucode_xfer_vi.h"
53#include "gmc/gmc_8_1_d.h"
54#include "gmc/gmc_8_1_sh_mask.h"
55#include "bif/bif_5_0_d.h"
56#include "bif/bif_5_0_sh_mask.h"
57#include "dce/dce_10_0_d.h"
58#include "dce/dce_10_0_sh_mask.h"
59
39 60
40#define ICELAND_SMC_SIZE 0x20000 61#define ICELAND_SMC_SIZE 0x20000
41 62
63#define VOLTAGE_SCALE 4
64#define POWERTUNE_DEFAULT_SET_MAX 1
65#define VOLTAGE_VID_OFFSET_SCALE1 625
66#define VOLTAGE_VID_OFFSET_SCALE2 100
67#define MC_CG_ARB_FREQ_F1 0x0b
68#define VDDC_VDDCI_DELTA 200
69
70#define DEVICE_ID_VI_ICELAND_M_6900 0x6900
71#define DEVICE_ID_VI_ICELAND_M_6901 0x6901
72#define DEVICE_ID_VI_ICELAND_M_6902 0x6902
73#define DEVICE_ID_VI_ICELAND_M_6903 0x6903
74
75static const struct iceland_pt_defaults defaults_iceland = {
76 /*
77 * sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,
78 * TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
79 */
80 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
81 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
82 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
83};
84
85/* 35W - XT, XTL */
86static const struct iceland_pt_defaults defaults_icelandxt = {
87 /*
88 * sviLoadLIneEn, SviLoadLineVddC,
89 * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
90 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
91 * BAPM_TEMP_GRADIENT
92 */
93 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
94 { 0xA7, 0x0, 0x0, 0xB5, 0x0, 0x0, 0x9F, 0x0, 0x0, 0xD6, 0x0, 0x0, 0xD7, 0x0, 0x0},
95 { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
96};
97
98/* 25W - PRO, LE */
99static const struct iceland_pt_defaults defaults_icelandpro = {
100 /*
101 * sviLoadLIneEn, SviLoadLineVddC,
102 * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
103 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
104 * BAPM_TEMP_GRADIENT
105 */
106 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
107 { 0xB7, 0x0, 0x0, 0xC3, 0x0, 0x0, 0xB5, 0x0, 0x0, 0xEA, 0x0, 0x0, 0xE6, 0x0, 0x0},
108 { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
109};
110
42static int iceland_start_smc(struct pp_hwmgr *hwmgr) 111static int iceland_start_smc(struct pp_hwmgr *hwmgr)
43{ 112{
44 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 113 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@@ -191,13 +260,6 @@ static int iceland_start_smu(struct pp_hwmgr *hwmgr)
191 return result; 260 return result;
192} 261}
193 262
194/**
195 * Write a 32bit value to the SMC SRAM space.
196 * ALL PARAMETERS ARE IN HOST BYTE ORDER.
197 * @param smumgr the address of the powerplay hardware manager.
198 * @param smcAddress the address in the SMC RAM to access.
199 * @param value to write to the SMC SRAM.
200 */
201static int iceland_smu_init(struct pp_hwmgr *hwmgr) 263static int iceland_smu_init(struct pp_hwmgr *hwmgr)
202{ 264{
203 int i; 265 int i;
@@ -219,6 +281,2413 @@ static int iceland_smu_init(struct pp_hwmgr *hwmgr)
219 return 0; 281 return 0;
220} 282}
221 283
284
285static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
286{
287 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
288 struct cgs_system_info sys_info = {0};
289 uint32_t dev_id;
290
291 sys_info.size = sizeof(struct cgs_system_info);
292 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
293 cgs_query_system_info(hwmgr->device, &sys_info);
294 dev_id = (uint32_t)sys_info.value;
295
296 switch (dev_id) {
297 case DEVICE_ID_VI_ICELAND_M_6900:
298 case DEVICE_ID_VI_ICELAND_M_6903:
299 smu_data->power_tune_defaults = &defaults_icelandxt;
300 break;
301
302 case DEVICE_ID_VI_ICELAND_M_6901:
303 case DEVICE_ID_VI_ICELAND_M_6902:
304 smu_data->power_tune_defaults = &defaults_icelandpro;
305 break;
306 default:
307 smu_data->power_tune_defaults = &defaults_iceland;
308 pr_warn("Unknown V.I. Device ID.\n");
309 break;
310 }
311 return;
312}
313
314static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr)
315{
316 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
317 const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
318
319 smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
320 smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
321 smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
322 smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
323
324 return 0;
325}
326
327static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr)
328{
329 uint16_t tdc_limit;
330 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
331 const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
332
333 tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
334 smu_data->power_tune_table.TDC_VDDC_PkgLimit =
335 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
336 smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
337 defaults->tdc_vddc_throttle_release_limit_perc;
338 smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
339
340 return 0;
341}
342
343static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
344{
345 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
346 const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
347 uint32_t temp;
348
349 if (smu7_read_smc_sram_dword(hwmgr,
350 fuse_table_offset +
351 offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl),
352 (uint32_t *)&temp, SMC_RAM_END))
353 PP_ASSERT_WITH_CODE(false,
354 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
355 return -EINVAL);
356 else
357 smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
358
359 return 0;
360}
361
362static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
363{
364 return 0;
365}
366
367static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
368{
369 int i;
370 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
371
372 /* Currently not used. Set all to zero. */
373 for (i = 0; i < 8; i++)
374 smu_data->power_tune_table.GnbLPML[i] = 0;
375
376 return 0;
377}
378
379static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
380{
381 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
382 uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
383 uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
384 struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
385
386 HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
387 LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
388
389 smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
390 CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
391 smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
392 CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
393
394 return 0;
395}
396
397static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
398{
399 int i;
400 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
401 uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
402 uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
403
404 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
405 "The CAC Leakage table does not exist!", return -EINVAL);
406 PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
407 "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
408 PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
409 "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
410
411 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
412 for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
413 lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
414 hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
415 }
416 } else {
417 PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL);
418 }
419
420 return 0;
421}
422
423static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr)
424{
425 int i;
426 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
427 uint8_t *vid = smu_data->power_tune_table.VddCVid;
428 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
429
430 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
431 "There should never be more than 8 entries for VddcVid!!!",
432 return -EINVAL);
433
434 for (i = 0; i < (int)data->vddc_voltage_table.count; i++) {
435 vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
436 }
437
438 return 0;
439}
440
441
442
443static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr)
444{
445 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
446 uint32_t pm_fuse_table_offset;
447
448 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
449 PHM_PlatformCaps_PowerContainment)) {
450 if (smu7_read_smc_sram_dword(hwmgr,
451 SMU71_FIRMWARE_HEADER_LOCATION +
452 offsetof(SMU71_Firmware_Header, PmFuseTable),
453 &pm_fuse_table_offset, SMC_RAM_END))
454 PP_ASSERT_WITH_CODE(false,
455 "Attempt to get pm_fuse_table_offset Failed!",
456 return -EINVAL);
457
458 /* DW0 - DW3 */
459 if (iceland_populate_bapm_vddc_vid_sidd(hwmgr))
460 PP_ASSERT_WITH_CODE(false,
461 "Attempt to populate bapm vddc vid Failed!",
462 return -EINVAL);
463
464 /* DW4 - DW5 */
465 if (iceland_populate_vddc_vid(hwmgr))
466 PP_ASSERT_WITH_CODE(false,
467 "Attempt to populate vddc vid Failed!",
468 return -EINVAL);
469
470 /* DW6 */
471 if (iceland_populate_svi_load_line(hwmgr))
472 PP_ASSERT_WITH_CODE(false,
473 "Attempt to populate SviLoadLine Failed!",
474 return -EINVAL);
475 /* DW7 */
476 if (iceland_populate_tdc_limit(hwmgr))
477 PP_ASSERT_WITH_CODE(false,
478 "Attempt to populate TDCLimit Failed!", return -EINVAL);
479 /* DW8 */
480 if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset))
481 PP_ASSERT_WITH_CODE(false,
482 "Attempt to populate TdcWaterfallCtl, "
483 "LPMLTemperature Min and Max Failed!",
484 return -EINVAL);
485
486 /* DW9-DW12 */
487 if (0 != iceland_populate_temperature_scaler(hwmgr))
488 PP_ASSERT_WITH_CODE(false,
489 "Attempt to populate LPMLTemperatureScaler Failed!",
490 return -EINVAL);
491
492 /* DW13-DW16 */
493 if (iceland_populate_gnb_lpml(hwmgr))
494 PP_ASSERT_WITH_CODE(false,
495 "Attempt to populate GnbLPML Failed!",
496 return -EINVAL);
497
498 /* DW18 */
499 if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr))
500 PP_ASSERT_WITH_CODE(false,
501 "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!",
502 return -EINVAL);
503
504 if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
505 (uint8_t *)&smu_data->power_tune_table,
506 sizeof(struct SMU71_Discrete_PmFuses), SMC_RAM_END))
507 PP_ASSERT_WITH_CODE(false,
508 "Attempt to download PmFuseTable Failed!",
509 return -EINVAL);
510 }
511 return 0;
512}
513
514static int iceland_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
515 struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
516 uint32_t clock, uint32_t *vol)
517{
518 uint32_t i = 0;
519
520 /* clock - voltage dependency table is empty table */
521 if (allowed_clock_voltage_table->count == 0)
522 return -EINVAL;
523
524 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
525 /* find first sclk bigger than request */
526 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
527 *vol = allowed_clock_voltage_table->entries[i].v;
528 return 0;
529 }
530 }
531
532 /* sclk is bigger than max sclk in the dependence table */
533 *vol = allowed_clock_voltage_table->entries[i - 1].v;
534
535 return 0;
536}
537
538static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
539 pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
540 uint16_t *lo)
541{
542 uint16_t v_index;
543 bool vol_found = false;
544 *hi = tab->value * VOLTAGE_SCALE;
545 *lo = tab->value * VOLTAGE_SCALE;
546
547 /* SCLK/VDDC Dependency Table has to exist. */
548 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
549 "The SCLK/VDDC Dependency Table does not exist.\n",
550 return -EINVAL);
551
552 if (NULL == hwmgr->dyn_state.cac_leakage_table) {
553 pr_warn("CAC Leakage Table does not exist, using vddc.\n");
554 return 0;
555 }
556
557 /*
558 * Since voltage in the sclk/vddc dependency table is not
559 * necessarily in ascending order because of ELB voltage
560 * patching, loop through entire list to find exact voltage.
561 */
562 for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
563 if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
564 vol_found = true;
565 if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
566 *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
567 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
568 } else {
569 pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
570 *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
571 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
572 }
573 break;
574 }
575 }
576
577 /*
578 * If voltage is not found in the first pass, loop again to
579 * find the best match, equal or higher value.
580 */
581 if (!vol_found) {
582 for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
583 if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
584 vol_found = true;
585 if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
586 *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
587 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
588 } else {
589 pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
590 *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
591 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
592 }
593 break;
594 }
595 }
596
597 if (!vol_found)
598 pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
599 }
600
601 return 0;
602}
603
604static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
605 pp_atomctrl_voltage_table_entry *tab,
606 SMU71_Discrete_VoltageLevel *smc_voltage_tab)
607{
608 int result;
609
610 result = iceland_get_std_voltage_value_sidd(hwmgr, tab,
611 &smc_voltage_tab->StdVoltageHiSidd,
612 &smc_voltage_tab->StdVoltageLoSidd);
613 if (0 != result) {
614 smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
615 smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
616 }
617
618 smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
619 CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
620 CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
621
622 return 0;
623}
624
625static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
626 SMU71_Discrete_DpmTable *table)
627{
628 unsigned int count;
629 int result;
630 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
631
632 table->VddcLevelCount = data->vddc_voltage_table.count;
633 for (count = 0; count < table->VddcLevelCount; count++) {
634 result = iceland_populate_smc_voltage_table(hwmgr,
635 &(data->vddc_voltage_table.entries[count]),
636 &(table->VddcLevel[count]));
637 PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
638
639 /* GPIO voltage control */
640 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
641 table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
642 else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
643 table->VddcLevel[count].Smio = 0;
644 }
645
646 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
647
648 return 0;
649}
650
651static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
652 SMU71_Discrete_DpmTable *table)
653{
654 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
655 uint32_t count;
656 int result;
657
658 table->VddciLevelCount = data->vddci_voltage_table.count;
659
660 for (count = 0; count < table->VddciLevelCount; count++) {
661 result = iceland_populate_smc_voltage_table(hwmgr,
662 &(data->vddci_voltage_table.entries[count]),
663 &(table->VddciLevel[count]));
664 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
665 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
666 table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low;
667 else
668 table->VddciLevel[count].Smio |= 0;
669 }
670
671 CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
672
673 return 0;
674}
675
676static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
677 SMU71_Discrete_DpmTable *table)
678{
679 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
680 uint32_t count;
681 int result;
682
683 table->MvddLevelCount = data->mvdd_voltage_table.count;
684
685 for (count = 0; count < table->VddciLevelCount; count++) {
686 result = iceland_populate_smc_voltage_table(hwmgr,
687 &(data->mvdd_voltage_table.entries[count]),
688 &table->MvddLevel[count]);
689 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
690 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control)
691 table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low;
692 else
693 table->MvddLevel[count].Smio |= 0;
694 }
695
696 CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
697
698 return 0;
699}
700
701
702static int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
703 SMU71_Discrete_DpmTable *table)
704{
705 int result;
706
707 result = iceland_populate_smc_vddc_table(hwmgr, table);
708 PP_ASSERT_WITH_CODE(0 == result,
709 "can not populate VDDC voltage table to SMC", return -EINVAL);
710
711 result = iceland_populate_smc_vdd_ci_table(hwmgr, table);
712 PP_ASSERT_WITH_CODE(0 == result,
713 "can not populate VDDCI voltage table to SMC", return -EINVAL);
714
715 result = iceland_populate_smc_mvdd_table(hwmgr, table);
716 PP_ASSERT_WITH_CODE(0 == result,
717 "can not populate MVDD voltage table to SMC", return -EINVAL);
718
719 return 0;
720}
721
722static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr,
723 struct SMU71_Discrete_Ulv *state)
724{
725 uint32_t voltage_response_time, ulv_voltage;
726 int result;
727 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
728
729 state->CcPwrDynRm = 0;
730 state->CcPwrDynRm1 = 0;
731
732 result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
733 PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
734
735 if (ulv_voltage == 0) {
736 data->ulv_supported = false;
737 return 0;
738 }
739
740 if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
741 /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
742 if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
743 state->VddcOffset = 0;
744 else
745 /* used in SMIO Mode. not implemented for now. this is backup only for CI. */
746 state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
747 } else {
748 /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
749 if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
750 state->VddcOffsetVid = 0;
751 else /* used in SVI2 Mode */
752 state->VddcOffsetVid = (uint8_t)(
753 (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
754 * VOLTAGE_VID_OFFSET_SCALE2
755 / VOLTAGE_VID_OFFSET_SCALE1);
756 }
757 state->VddcPhase = 1;
758
759 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
760 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
761 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
762
763 return 0;
764}
765
766static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr,
767 SMU71_Discrete_Ulv *ulv_level)
768{
769 return iceland_populate_ulv_level(hwmgr, ulv_level);
770}
771
772static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table)
773{
774 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
775 struct smu7_dpm_table *dpm_table = &data->dpm_table;
776 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
777 uint32_t i;
778
779 /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
780 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
781 table->LinkLevel[i].PcieGenSpeed =
782 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
783 table->LinkLevel[i].PcieLaneCount =
784 (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
785 table->LinkLevel[i].EnabledForActivity =
786 1;
787 table->LinkLevel[i].SPC =
788 (uint8_t)(data->pcie_spc_cap & 0xff);
789 table->LinkLevel[i].DownThreshold =
790 PP_HOST_TO_SMC_UL(5);
791 table->LinkLevel[i].UpThreshold =
792 PP_HOST_TO_SMC_UL(30);
793 }
794
795 smu_data->smc_state_table.LinkLevelCount =
796 (uint8_t)dpm_table->pcie_speed_table.count;
797 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
798 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
799
800 return 0;
801}
802
803static int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr,
804 uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk)
805{
806 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
807 pp_atomctrl_clock_dividers_vi dividers;
808 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
809 uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
810 uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
811 uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
812 uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
813 uint32_t reference_clock;
814 uint32_t reference_divider;
815 uint32_t fbdiv;
816 int result;
817
818 /* get the engine clock dividers for this clock value*/
819 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
820
821 PP_ASSERT_WITH_CODE(result == 0,
822 "Error retrieving Engine Clock dividers from VBIOS.", return result);
823
824 /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
825 reference_clock = atomctrl_get_reference_clock(hwmgr);
826
827 reference_divider = 1 + dividers.uc_pll_ref_div;
828
829 /* low 14 bits is fraction and high 12 bits is divider*/
830 fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
831
832 /* SPLL_FUNC_CNTL setup*/
833 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
834 CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
835 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
836 CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
837
838 /* SPLL_FUNC_CNTL_3 setup*/
839 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
840 CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
841
842 /* set to use fractional accumulation*/
843 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
844 CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
845
846 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
847 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
848 pp_atomctrl_internal_ss_info ss_info;
849
850 uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
851 if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
852 /*
853 * ss_info.speed_spectrum_percentage -- in unit of 0.01%
854 * ss_info.speed_spectrum_rate -- in unit of khz
855 */
856 /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
857 uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
858
859 /* clkv = 2 * D * fbdiv / NS */
860 uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
861
862 cg_spll_spread_spectrum =
863 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
864 cg_spll_spread_spectrum =
865 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
866 cg_spll_spread_spectrum_2 =
867 PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
868 }
869 }
870
871 sclk->SclkFrequency = engine_clock;
872 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
873 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
874 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
875 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
876 sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
877
878 return 0;
879}
880
881static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
882 const struct phm_phase_shedding_limits_table *pl,
883 uint32_t sclk, uint32_t *p_shed)
884{
885 unsigned int i;
886
887 /* use the minimum phase shedding */
888 *p_shed = 1;
889
890 for (i = 0; i < pl->count; i++) {
891 if (sclk < pl->entries[i].Sclk) {
892 *p_shed = i;
893 break;
894 }
895 }
896 return 0;
897}
898
899static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
900 uint32_t engine_clock,
901 uint16_t sclk_activity_level_threshold,
902 SMU71_Discrete_GraphicsLevel *graphic_level)
903{
904 int result;
905 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
906
907 result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
908
909 /* populate graphics levels*/
910 result = iceland_get_dependency_volt_by_clk(hwmgr,
911 hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock,
912 &graphic_level->MinVddc);
913 PP_ASSERT_WITH_CODE((0 == result),
914 "can not find VDDC voltage value for VDDC \
915 engine clock dependency table", return result);
916
917 /* SCLK frequency in units of 10KHz*/
918 graphic_level->SclkFrequency = engine_clock;
919 graphic_level->MinVddcPhases = 1;
920
921 if (data->vddc_phase_shed_control)
922 iceland_populate_phase_value_based_on_sclk(hwmgr,
923 hwmgr->dyn_state.vddc_phase_shed_limits_table,
924 engine_clock,
925 &graphic_level->MinVddcPhases);
926
927 /* Indicates maximum activity level for this performance level. 50% for now*/
928 graphic_level->ActivityLevel = sclk_activity_level_threshold;
929
930 graphic_level->CcPwrDynRm = 0;
931 graphic_level->CcPwrDynRm1 = 0;
932 /* this level can be used if activity is high enough.*/
933 graphic_level->EnabledForActivity = 0;
934 /* this level can be used for throttling.*/
935 graphic_level->EnabledForThrottle = 1;
936 graphic_level->UpHyst = 0;
937 graphic_level->DownHyst = 100;
938 graphic_level->VoltageDownHyst = 0;
939 graphic_level->PowerThrottle = 0;
940
941 data->display_timing.min_clock_in_sr =
942 hwmgr->display_config.min_core_set_clock_in_sr;
943
944 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
945 PHM_PlatformCaps_SclkDeepSleep))
946 graphic_level->DeepSleepDivId =
947 smu7_get_sleep_divider_id_from_clock(engine_clock,
948 data->display_timing.min_clock_in_sr);
949
950 /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
951 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
952
953 if (0 == result) {
954 graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE);
955 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);
956 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
957 CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
958 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
959 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
960 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
961 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
962 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
963 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
964 }
965
966 return result;
967}
968
969static int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
970{
971 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
972 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
973 struct smu7_dpm_table *dpm_table = &data->dpm_table;
974 uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start +
975 offsetof(SMU71_Discrete_DpmTable, GraphicsLevel);
976
977 uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) *
978 SMU71_MAX_LEVELS_GRAPHICS;
979
980 SMU71_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
981
982 uint32_t i;
983 uint8_t highest_pcie_level_enabled = 0;
984 uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
985 uint8_t count = 0;
986 int result = 0;
987
988 memset(levels, 0x00, level_array_size);
989
990 for (i = 0; i < dpm_table->sclk_table.count; i++) {
991 result = iceland_populate_single_graphic_level(hwmgr,
992 dpm_table->sclk_table.dpm_levels[i].value,
993 (uint16_t)smu_data->activity_target[i],
994 &(smu_data->smc_state_table.GraphicsLevel[i]));
995 if (result != 0)
996 return result;
997
998 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
999 if (i > 1)
1000 smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
1001 }
1002
1003 /* Only enable level 0 for now. */
1004 smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
1005
1006 /* set highest level watermark to high */
1007 if (dpm_table->sclk_table.count > 1)
1008 smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
1009 PPSMC_DISPLAY_WATERMARK_HIGH;
1010
1011 smu_data->smc_state_table.GraphicsDpmLevelCount =
1012 (uint8_t)dpm_table->sclk_table.count;
1013 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
1014 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
1015
1016 while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
1017 (1 << (highest_pcie_level_enabled + 1))) != 0) {
1018 highest_pcie_level_enabled++;
1019 }
1020
1021 while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
1022 (1 << lowest_pcie_level_enabled)) == 0) {
1023 lowest_pcie_level_enabled++;
1024 }
1025
1026 while ((count < highest_pcie_level_enabled) &&
1027 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
1028 (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) {
1029 count++;
1030 }
1031
1032 mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
1033 (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
1034
1035
1036 /* set pcieDpmLevel to highest_pcie_level_enabled*/
1037 for (i = 2; i < dpm_table->sclk_table.count; i++) {
1038 smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
1039 }
1040
1041 /* set pcieDpmLevel to lowest_pcie_level_enabled*/
1042 smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
1043
1044 /* set pcieDpmLevel to mid_pcie_level_enabled*/
1045 smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
1046
1047 /* level count will send to smc once at init smc table and never change*/
1048 result = smu7_copy_bytes_to_smc(hwmgr, level_array_adress,
1049 (uint8_t *)levels, (uint32_t)level_array_size,
1050 SMC_RAM_END);
1051
1052 return result;
1053}
1054
1055static int iceland_calculate_mclk_params(
1056 struct pp_hwmgr *hwmgr,
1057 uint32_t memory_clock,
1058 SMU71_Discrete_MemoryLevel *mclk,
1059 bool strobe_mode,
1060 bool dllStateOn
1061 )
1062{
1063 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1064
1065 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1066 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1067 uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
1068 uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
1069 uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
1070 uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
1071 uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
1072 uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
1073 uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
1074
1075 pp_atomctrl_memory_clock_param mpll_param;
1076 int result;
1077
1078 result = atomctrl_get_memory_pll_dividers_si(hwmgr,
1079 memory_clock, &mpll_param, strobe_mode);
1080 PP_ASSERT_WITH_CODE(0 == result,
1081 "Error retrieving Memory Clock Parameters from VBIOS.", return result);
1082
1083 /* MPLL_FUNC_CNTL setup*/
1084 mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
1085
1086 /* MPLL_FUNC_CNTL_1 setup*/
1087 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1088 MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
1089 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1090 MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
1091 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1092 MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
1093
1094 /* MPLL_AD_FUNC_CNTL setup*/
1095 mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
1096 MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1097
1098 if (data->is_memory_gddr5) {
1099 /* MPLL_DQ_FUNC_CNTL setup*/
1100 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
1101 MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
1102 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
1103 MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1104 }
1105
1106 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1107 PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
1108 /*
1109 ************************************
1110 Fref = Reference Frequency
1111 NF = Feedback divider ratio
1112 NR = Reference divider ratio
1113 Fnom = Nominal VCO output frequency = Fref * NF / NR
1114 Fs = Spreading Rate
1115 D = Percentage down-spread / 2
1116 Fint = Reference input frequency to PFD = Fref / NR
1117 NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
1118 CLKS = NS - 1 = ISS_STEP_NUM[11:0]
1119 NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
1120 CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
1121 *************************************
1122 */
1123 pp_atomctrl_internal_ss_info ss_info;
1124 uint32_t freq_nom;
1125 uint32_t tmp;
1126 uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
1127
1128 /* for GDDR5 for all modes and DDR3 */
1129 if (1 == mpll_param.qdr)
1130 freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
1131 else
1132 freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
1133
1134 /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
1135 tmp = (freq_nom / reference_clock);
1136 tmp = tmp * tmp;
1137
1138 if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
1139 /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
1140 /* ss.Info.speed_spectrum_rate -- in unit of khz */
1141 /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
1142 /* = reference_clock * 5 / speed_spectrum_rate */
1143 uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
1144
1145 /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
1146 /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
1147 uint32_t clkv =
1148 (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
1149 ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
1150
1151 mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
1152 mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
1153 }
1154 }
1155
1156 /* MCLK_PWRMGT_CNTL setup */
1157 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1158 MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
1159 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1160 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
1161 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1162 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
1163
1164
1165 /* Save the result data to outpupt memory level structure */
1166 mclk->MclkFrequency = memory_clock;
1167 mclk->MpllFuncCntl = mpll_func_cntl;
1168 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
1169 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
1170 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
1171 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
1172 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
1173 mclk->DllCntl = dll_cntl;
1174 mclk->MpllSs1 = mpll_ss1;
1175 mclk->MpllSs2 = mpll_ss2;
1176
1177 return 0;
1178}
1179
1180static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock,
1181 bool strobe_mode)
1182{
1183 uint8_t mc_para_index;
1184
1185 if (strobe_mode) {
1186 if (memory_clock < 12500) {
1187 mc_para_index = 0x00;
1188 } else if (memory_clock > 47500) {
1189 mc_para_index = 0x0f;
1190 } else {
1191 mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
1192 }
1193 } else {
1194 if (memory_clock < 65000) {
1195 mc_para_index = 0x00;
1196 } else if (memory_clock > 135000) {
1197 mc_para_index = 0x0f;
1198 } else {
1199 mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
1200 }
1201 }
1202
1203 return mc_para_index;
1204}
1205
1206static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
1207{
1208 uint8_t mc_para_index;
1209
1210 if (memory_clock < 10000) {
1211 mc_para_index = 0;
1212 } else if (memory_clock >= 80000) {
1213 mc_para_index = 0x0f;
1214 } else {
1215 mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
1216 }
1217
1218 return mc_para_index;
1219}
1220
1221static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
1222 uint32_t memory_clock, uint32_t *p_shed)
1223{
1224 unsigned int i;
1225
1226 *p_shed = 1;
1227
1228 for (i = 0; i < pl->count; i++) {
1229 if (memory_clock < pl->entries[i].Mclk) {
1230 *p_shed = i;
1231 break;
1232 }
1233 }
1234
1235 return 0;
1236}
1237
1238static int iceland_populate_single_memory_level(
1239 struct pp_hwmgr *hwmgr,
1240 uint32_t memory_clock,
1241 SMU71_Discrete_MemoryLevel *memory_level
1242 )
1243{
1244 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1245 int result = 0;
1246 bool dll_state_on;
1247 struct cgs_display_info info = {0};
1248 uint32_t mclk_edc_wr_enable_threshold = 40000;
1249 uint32_t mclk_edc_enable_threshold = 40000;
1250 uint32_t mclk_strobe_mode_threshold = 40000;
1251
1252 if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
1253 result = iceland_get_dependency_volt_by_clk(hwmgr,
1254 hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
1255 PP_ASSERT_WITH_CODE((0 == result),
1256 "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
1257 }
1258
1259 if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) {
1260 memory_level->MinVddci = memory_level->MinVddc;
1261 } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
1262 result = iceland_get_dependency_volt_by_clk(hwmgr,
1263 hwmgr->dyn_state.vddci_dependency_on_mclk,
1264 memory_clock,
1265 &memory_level->MinVddci);
1266 PP_ASSERT_WITH_CODE((0 == result),
1267 "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
1268 }
1269
1270 memory_level->MinVddcPhases = 1;
1271
1272 if (data->vddc_phase_shed_control) {
1273 iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
1274 memory_clock, &memory_level->MinVddcPhases);
1275 }
1276
1277 memory_level->EnabledForThrottle = 1;
1278 memory_level->EnabledForActivity = 0;
1279 memory_level->UpHyst = 0;
1280 memory_level->DownHyst = 100;
1281 memory_level->VoltageDownHyst = 0;
1282
1283 /* Indicates maximum activity level for this performance level.*/
1284 memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
1285 memory_level->StutterEnable = 0;
1286 memory_level->StrobeEnable = 0;
1287 memory_level->EdcReadEnable = 0;
1288 memory_level->EdcWriteEnable = 0;
1289 memory_level->RttEnable = 0;
1290
1291 /* default set to low watermark. Highest level will be set to high later.*/
1292 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1293
1294 cgs_get_active_displays_info(hwmgr->device, &info);
1295 data->display_timing.num_existing_displays = info.display_count;
1296
1297 /* stutter mode not support on iceland */
1298
1299 /* decide strobe mode*/
1300 memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
1301 (memory_clock <= mclk_strobe_mode_threshold);
1302
1303 /* decide EDC mode and memory clock ratio*/
1304 if (data->is_memory_gddr5) {
1305 memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock,
1306 memory_level->StrobeEnable);
1307
1308 if ((mclk_edc_enable_threshold != 0) &&
1309 (memory_clock > mclk_edc_enable_threshold)) {
1310 memory_level->EdcReadEnable = 1;
1311 }
1312
1313 if ((mclk_edc_wr_enable_threshold != 0) &&
1314 (memory_clock > mclk_edc_wr_enable_threshold)) {
1315 memory_level->EdcWriteEnable = 1;
1316 }
1317
1318 if (memory_level->StrobeEnable) {
1319 if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >=
1320 ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
1321 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1322 else
1323 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
1324 } else
1325 dll_state_on = data->dll_default_on;
1326 } else {
1327 memory_level->StrobeRatio =
1328 iceland_get_ddr3_mclk_frequency_ratio(memory_clock);
1329 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1330 }
1331
1332 result = iceland_calculate_mclk_params(hwmgr,
1333 memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
1334
1335 if (0 == result) {
1336 memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
1337 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
1338 memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
1339 memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
1340 /* MCLK frequency in units of 10KHz*/
1341 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
1342 /* Indicates maximum activity level for this performance level.*/
1343 CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
1344 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
1345 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
1346 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
1347 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
1348 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
1349 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
1350 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
1351 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
1352 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
1353 }
1354
1355 return result;
1356}
1357
1358static int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1359{
1360 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1361 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
1362 struct smu7_dpm_table *dpm_table = &data->dpm_table;
1363 int result;
1364
1365 /* populate MCLK dpm table to SMU7 */
1366 uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel);
1367 uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY;
1368 SMU71_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
1369 uint32_t i;
1370
1371 memset(levels, 0x00, level_array_size);
1372
1373 for (i = 0; i < dpm_table->mclk_table.count; i++) {
1374 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
1375 "can not populate memory level as memory clock is zero", return -EINVAL);
1376 result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
1377 &(smu_data->smc_state_table.MemoryLevel[i]));
1378 if (0 != result) {
1379 return result;
1380 }
1381 }
1382
1383 /* Only enable level 0 for now.*/
1384 smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
1385
1386 /*
1387 * in order to prevent MC activity from stutter mode to push DPM up.
1388 * the UVD change complements this by putting the MCLK in a higher state
1389 * by default such that we are not effected by up threshold or and MCLK DPM latency.
1390 */
1391 smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
1392 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
1393
1394 smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
1395 data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1396 /* set highest level watermark to high*/
1397 smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
1398
1399 /* level count will send to smc once at init smc table and never change*/
1400 result = smu7_copy_bytes_to_smc(hwmgr,
1401 level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size,
1402 SMC_RAM_END);
1403
1404 return result;
1405}
1406
1407static int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
1408 SMU71_Discrete_VoltageLevel *voltage)
1409{
1410 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1411
1412 uint32_t i = 0;
1413
1414 if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
1415 /* find mvdd value which clock is more than request */
1416 for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
1417 if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
1418 /* Always round to higher voltage. */
1419 voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
1420 break;
1421 }
1422 }
1423
1424 PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
1425 "MVDD Voltage is outside the supported range.", return -EINVAL);
1426
1427 } else {
1428 return -EINVAL;
1429 }
1430
1431 return 0;
1432}
1433
1434static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1435 SMU71_Discrete_DpmTable *table)
1436{
1437 int result = 0;
1438 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1439 struct pp_atomctrl_clock_dividers_vi dividers;
1440 uint32_t vddc_phase_shed_control = 0;
1441
1442 SMU71_Discrete_VoltageLevel voltage_level;
1443 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
1444 uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
1445 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1446 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1447
1448
1449 /* The ACPI state should not do DPM on DC (or ever).*/
1450 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1451
1452 if (data->acpi_vddc)
1453 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
1454 else
1455 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
1456
1457 table->ACPILevel.MinVddcPhases = vddc_phase_shed_control ? 0 : 1;
1458 /* assign zero for now*/
1459 table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
1460
1461 /* get the engine clock dividers for this clock value*/
1462 result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
1463 table->ACPILevel.SclkFrequency, &dividers);
1464
1465 PP_ASSERT_WITH_CODE(result == 0,
1466 "Error retrieving Engine Clock dividers from VBIOS.", return result);
1467
1468 /* divider ID for required SCLK*/
1469 table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
1470 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1471 table->ACPILevel.DeepSleepDivId = 0;
1472
1473 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
1474 CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0);
1475 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
1476 CG_SPLL_FUNC_CNTL, SPLL_RESET, 1);
1477 spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2,
1478 CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4);
1479
1480 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
1481 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
1482 table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
1483 table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
1484 table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
1485 table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
1486 table->ACPILevel.CcPwrDynRm = 0;
1487 table->ACPILevel.CcPwrDynRm1 = 0;
1488
1489
1490 /* For various features to be enabled/disabled while this level is active.*/
1491 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1492 /* SCLK frequency in units of 10KHz*/
1493 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
1494 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
1495 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
1496 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
1497 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
1498 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
1499 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
1500 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1501 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1502
1503 /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
1504 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
1505 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
1506
1507 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
1508 table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
1509 else {
1510 if (data->acpi_vddci != 0)
1511 table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
1512 else
1513 table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
1514 }
1515
1516 if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level))
1517 table->MemoryACPILevel.MinMvdd =
1518 PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
1519 else
1520 table->MemoryACPILevel.MinMvdd = 0;
1521
1522 /* Force reset on DLL*/
1523 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1524 MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
1525 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1526 MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
1527
1528 /* Disable DLL in ACPIState*/
1529 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1530 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
1531 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1532 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
1533
1534 /* Enable DLL bypass signal*/
1535 dll_cntl = PHM_SET_FIELD(dll_cntl,
1536 DLL_CNTL, MRDCK0_BYPASS, 0);
1537 dll_cntl = PHM_SET_FIELD(dll_cntl,
1538 DLL_CNTL, MRDCK1_BYPASS, 0);
1539
1540 table->MemoryACPILevel.DllCntl =
1541 PP_HOST_TO_SMC_UL(dll_cntl);
1542 table->MemoryACPILevel.MclkPwrmgtCntl =
1543 PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
1544 table->MemoryACPILevel.MpllAdFuncCntl =
1545 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
1546 table->MemoryACPILevel.MpllDqFuncCntl =
1547 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
1548 table->MemoryACPILevel.MpllFuncCntl =
1549 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
1550 table->MemoryACPILevel.MpllFuncCntl_1 =
1551 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
1552 table->MemoryACPILevel.MpllFuncCntl_2 =
1553 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
1554 table->MemoryACPILevel.MpllSs1 =
1555 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
1556 table->MemoryACPILevel.MpllSs2 =
1557 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
1558
1559 table->MemoryACPILevel.EnabledForThrottle = 0;
1560 table->MemoryACPILevel.EnabledForActivity = 0;
1561 table->MemoryACPILevel.UpHyst = 0;
1562 table->MemoryACPILevel.DownHyst = 100;
1563 table->MemoryACPILevel.VoltageDownHyst = 0;
1564 /* Indicates maximum activity level for this performance level.*/
1565 table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
1566
1567 table->MemoryACPILevel.StutterEnable = 0;
1568 table->MemoryACPILevel.StrobeEnable = 0;
1569 table->MemoryACPILevel.EdcReadEnable = 0;
1570 table->MemoryACPILevel.EdcWriteEnable = 0;
1571 table->MemoryACPILevel.RttEnable = 0;
1572
1573 return result;
1574}
1575
1576static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1577 SMU71_Discrete_DpmTable *table)
1578{
1579 return 0;
1580}
1581
1582static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1583 SMU71_Discrete_DpmTable *table)
1584{
1585 return 0;
1586}
1587
1588static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
1589 SMU71_Discrete_DpmTable *table)
1590{
1591 return 0;
1592}
1593
1594static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1595 SMU71_Discrete_DpmTable *table)
1596{
1597 return 0;
1598}
1599
1600static int iceland_populate_memory_timing_parameters(
1601 struct pp_hwmgr *hwmgr,
1602 uint32_t engine_clock,
1603 uint32_t memory_clock,
1604 struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs
1605 )
1606{
1607 uint32_t dramTiming;
1608 uint32_t dramTiming2;
1609 uint32_t burstTime;
1610 int result;
1611
1612 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1613 engine_clock, memory_clock);
1614
1615 PP_ASSERT_WITH_CODE(result == 0,
1616 "Error calling VBIOS to set DRAM_TIMING.", return result);
1617
1618 dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1619 dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1620 burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1621
1622 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
1623 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
1624 arb_regs->McArbBurstTime = (uint8_t)burstTime;
1625
1626 return 0;
1627}
1628
1629static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1630{
1631 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1632 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
1633 int result = 0;
1634 SMU71_Discrete_MCArbDramTimingTable arb_regs;
1635 uint32_t i, j;
1636
1637 memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable));
1638
1639 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1640 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1641 result = iceland_populate_memory_timing_parameters
1642 (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
1643 data->dpm_table.mclk_table.dpm_levels[j].value,
1644 &arb_regs.entries[i][j]);
1645
1646 if (0 != result) {
1647 break;
1648 }
1649 }
1650 }
1651
1652 if (0 == result) {
1653 result = smu7_copy_bytes_to_smc(
1654 hwmgr,
1655 smu_data->smu7_data.arb_table_start,
1656 (uint8_t *)&arb_regs,
1657 sizeof(SMU71_Discrete_MCArbDramTimingTable),
1658 SMC_RAM_END
1659 );
1660 }
1661
1662 return result;
1663}
1664
1665static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1666 SMU71_Discrete_DpmTable *table)
1667{
1668 int result = 0;
1669 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1670 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
1671 table->GraphicsBootLevel = 0;
1672 table->MemoryBootLevel = 0;
1673
1674 /* find boot level from dpm table*/
1675 result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1676 data->vbios_boot_state.sclk_bootup_value,
1677 (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
1678
1679 if (0 != result) {
1680 smu_data->smc_state_table.GraphicsBootLevel = 0;
1681 pr_err("VBIOS did not find boot engine clock value \
1682 in dependency table. Using Graphics DPM level 0!");
1683 result = 0;
1684 }
1685
1686 result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1687 data->vbios_boot_state.mclk_bootup_value,
1688 (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
1689
1690 if (0 != result) {
1691 smu_data->smc_state_table.MemoryBootLevel = 0;
1692 pr_err("VBIOS did not find boot engine clock value \
1693 in dependency table. Using Memory DPM level 0!");
1694 result = 0;
1695 }
1696
1697 table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
1698 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
1699 table->BootVddci = table->BootVddc;
1700 else
1701 table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
1702
1703 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
1704
1705 return result;
1706}
1707
1708static int iceland_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
1709 SMU71_Discrete_MCRegisters *mc_reg_table)
1710{
1711 const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)hwmgr->smu_backend;
1712
1713 uint32_t i, j;
1714
1715 for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
1716 if (smu_data->mc_reg_table.validflag & 1<<j) {
1717 PP_ASSERT_WITH_CODE(i < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE,
1718 "Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
1719 mc_reg_table->address[i].s0 =
1720 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
1721 mc_reg_table->address[i].s1 =
1722 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
1723 i++;
1724 }
1725 }
1726
1727 mc_reg_table->last = (uint8_t)i;
1728
1729 return 0;
1730}
1731
1732/*convert register values from driver to SMC format */
1733static void iceland_convert_mc_registers(
1734 const struct iceland_mc_reg_entry *entry,
1735 SMU71_Discrete_MCRegisterSet *data,
1736 uint32_t num_entries, uint32_t valid_flag)
1737{
1738 uint32_t i, j;
1739
1740 for (i = 0, j = 0; j < num_entries; j++) {
1741 if (valid_flag & 1<<j) {
1742 data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
1743 i++;
1744 }
1745 }
1746}
1747
1748static int iceland_convert_mc_reg_table_entry_to_smc(struct pp_hwmgr *hwmgr,
1749 const uint32_t memory_clock,
1750 SMU71_Discrete_MCRegisterSet *mc_reg_table_data
1751 )
1752{
1753 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
1754 uint32_t i = 0;
1755
1756 for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
1757 if (memory_clock <=
1758 smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
1759 break;
1760 }
1761 }
1762
1763 if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
1764 --i;
1765
1766 iceland_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
1767 mc_reg_table_data, smu_data->mc_reg_table.last,
1768 smu_data->mc_reg_table.validflag);
1769
1770 return 0;
1771}
1772
1773static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
1774 SMU71_Discrete_MCRegisters *mc_regs)
1775{
1776 int result = 0;
1777 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1778 int res;
1779 uint32_t i;
1780
1781 for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
1782 res = iceland_convert_mc_reg_table_entry_to_smc(
1783 hwmgr,
1784 data->dpm_table.mclk_table.dpm_levels[i].value,
1785 &mc_regs->data[i]
1786 );
1787
1788 if (0 != res)
1789 result = res;
1790 }
1791
1792 return result;
1793}
1794
1795static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
1796{
1797 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
1798 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1799 uint32_t address;
1800 int32_t result;
1801
1802 if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
1803 return 0;
1804
1805
1806 memset(&smu_data->mc_regs, 0, sizeof(SMU71_Discrete_MCRegisters));
1807
1808 result = iceland_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
1809
1810 if (result != 0)
1811 return result;
1812
1813
1814 address = smu_data->smu7_data.mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]);
1815
1816 return smu7_copy_bytes_to_smc(hwmgr, address,
1817 (uint8_t *)&smu_data->mc_regs.data[0],
1818 sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
1819 SMC_RAM_END);
1820}
1821
1822static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
1823{
1824 int result;
1825 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
1826
1827 memset(&smu_data->mc_regs, 0x00, sizeof(SMU71_Discrete_MCRegisters));
1828 result = iceland_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
1829 PP_ASSERT_WITH_CODE(0 == result,
1830 "Failed to initialize MCRegTable for the MC register addresses!", return result;);
1831
1832 result = iceland_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
1833 PP_ASSERT_WITH_CODE(0 == result,
1834 "Failed to initialize MCRegTable for driver state!", return result;);
1835
1836 return smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.mc_reg_table_start,
1837 (uint8_t *)&smu_data->mc_regs, sizeof(SMU71_Discrete_MCRegisters), SMC_RAM_END);
1838}
1839
1840static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
1841{
1842 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1843 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
1844 uint8_t count, level;
1845
1846 count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
1847
1848 for (level = 0; level < count; level++) {
1849 if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
1850 >= data->vbios_boot_state.sclk_bootup_value) {
1851 smu_data->smc_state_table.GraphicsBootLevel = level;
1852 break;
1853 }
1854 }
1855
1856 count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
1857
1858 for (level = 0; level < count; level++) {
1859 if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
1860 >= data->vbios_boot_state.mclk_bootup_value) {
1861 smu_data->smc_state_table.MemoryBootLevel = level;
1862 break;
1863 }
1864 }
1865
1866 return 0;
1867}
1868
1869static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
1870{
1871 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1872 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
1873 const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
1874 SMU71_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
1875 struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
1876 struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
1877 const uint16_t *def1, *def2;
1878 int i, j, k;
1879
1880
1881 /*
1882 * TDP number of fraction bits are changed from 8 to 7 for Iceland
1883 * as requested by SMC team
1884 */
1885
1886 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
1887 dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
1888
1889
1890 dpm_table->DTETjOffset = 0;
1891
1892 dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
1893 dpm_table->GpuTjHyst = 8;
1894
1895 dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
1896
1897 /* The following are for new Iceland Multi-input fan/thermal control */
1898 if (NULL != ppm) {
1899 dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
1900 dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
1901 } else {
1902 dpm_table->PPM_PkgPwrLimit = 0;
1903 dpm_table->PPM_TemperatureLimit = 0;
1904 }
1905
1906 CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
1907 CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
1908
1909 dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
1910 def1 = defaults->bapmti_r;
1911 def2 = defaults->bapmti_rc;
1912
1913 for (i = 0; i < SMU71_DTE_ITERATIONS; i++) {
1914 for (j = 0; j < SMU71_DTE_SOURCES; j++) {
1915 for (k = 0; k < SMU71_DTE_SINKS; k++) {
1916 dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
1917 dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
1918 def1++;
1919 def2++;
1920 }
1921 }
1922 }
1923
1924 return 0;
1925}
1926
1927static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
1928 SMU71_Discrete_DpmTable *tab)
1929{
1930 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1931
1932 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
1933 tab->SVI2Enable |= VDDC_ON_SVI2;
1934
1935 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1936 tab->SVI2Enable |= VDDCI_ON_SVI2;
1937 else
1938 tab->MergedVddci = 1;
1939
1940 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control)
1941 tab->SVI2Enable |= MVDD_ON_SVI2;
1942
1943 PP_ASSERT_WITH_CODE(tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) &&
1944 (tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL);
1945
1946 return 0;
1947}
1948
1949static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
1950{
1951 int result;
1952 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1953 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
1954 SMU71_Discrete_DpmTable *table = &(smu_data->smc_state_table);
1955
1956
1957 iceland_initialize_power_tune_defaults(hwmgr);
1958 memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
1959
1960 if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) {
1961 iceland_populate_smc_voltage_tables(hwmgr, table);
1962 }
1963
1964 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1965 PHM_PlatformCaps_AutomaticDCTransition))
1966 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1967
1968
1969 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1970 PHM_PlatformCaps_StepVddc))
1971 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1972
1973 if (data->is_memory_gddr5)
1974 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1975
1976
1977 if (data->ulv_supported) {
1978 result = iceland_populate_ulv_state(hwmgr, &(smu_data->ulv_setting));
1979 PP_ASSERT_WITH_CODE(0 == result,
1980 "Failed to initialize ULV state!", return result;);
1981
1982 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1983 ixCG_ULV_PARAMETER, 0x40035);
1984 }
1985
1986 result = iceland_populate_smc_link_level(hwmgr, table);
1987 PP_ASSERT_WITH_CODE(0 == result,
1988 "Failed to initialize Link Level!", return result;);
1989
1990 result = iceland_populate_all_graphic_levels(hwmgr);
1991 PP_ASSERT_WITH_CODE(0 == result,
1992 "Failed to initialize Graphics Level!", return result;);
1993
1994 result = iceland_populate_all_memory_levels(hwmgr);
1995 PP_ASSERT_WITH_CODE(0 == result,
1996 "Failed to initialize Memory Level!", return result;);
1997
1998 result = iceland_populate_smc_acpi_level(hwmgr, table);
1999 PP_ASSERT_WITH_CODE(0 == result,
2000 "Failed to initialize ACPI Level!", return result;);
2001
2002 result = iceland_populate_smc_vce_level(hwmgr, table);
2003 PP_ASSERT_WITH_CODE(0 == result,
2004 "Failed to initialize VCE Level!", return result;);
2005
2006 result = iceland_populate_smc_acp_level(hwmgr, table);
2007 PP_ASSERT_WITH_CODE(0 == result,
2008 "Failed to initialize ACP Level!", return result;);
2009
2010 result = iceland_populate_smc_samu_level(hwmgr, table);
2011 PP_ASSERT_WITH_CODE(0 == result,
2012 "Failed to initialize SAMU Level!", return result;);
2013
2014 /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
2015 /* need to populate the ARB settings for the initial state. */
2016 result = iceland_program_memory_timing_parameters(hwmgr);
2017 PP_ASSERT_WITH_CODE(0 == result,
2018 "Failed to Write ARB settings for the initial state.", return result;);
2019
2020 result = iceland_populate_smc_uvd_level(hwmgr, table);
2021 PP_ASSERT_WITH_CODE(0 == result,
2022 "Failed to initialize UVD Level!", return result;);
2023
2024 table->GraphicsBootLevel = 0;
2025 table->MemoryBootLevel = 0;
2026
2027 result = iceland_populate_smc_boot_level(hwmgr, table);
2028 PP_ASSERT_WITH_CODE(0 == result,
2029 "Failed to initialize Boot Level!", return result;);
2030
2031 result = iceland_populate_smc_initial_state(hwmgr);
2032 PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
2033
2034 result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr);
2035 PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
2036
2037 table->GraphicsVoltageChangeEnable = 1;
2038 table->GraphicsThermThrottleEnable = 1;
2039 table->GraphicsInterval = 1;
2040 table->VoltageInterval = 1;
2041 table->ThermalInterval = 1;
2042
2043 table->TemperatureLimitHigh =
2044 (data->thermal_temp_setting.temperature_high *
2045 SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2046 table->TemperatureLimitLow =
2047 (data->thermal_temp_setting.temperature_low *
2048 SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2049
2050 table->MemoryVoltageChangeEnable = 1;
2051 table->MemoryInterval = 1;
2052 table->VoltageResponseTime = 0;
2053 table->PhaseResponseTime = 0;
2054 table->MemoryThermThrottleEnable = 1;
2055 table->PCIeBootLinkLevel = 0;
2056 table->PCIeGenInterval = 1;
2057
2058 result = iceland_populate_smc_svi2_config(hwmgr, table);
2059 PP_ASSERT_WITH_CODE(0 == result,
2060 "Failed to populate SVI2 setting!", return result);
2061
2062 table->ThermGpio = 17;
2063 table->SclkStepSize = 0x4000;
2064
2065 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
2066 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
2067 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
2068 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
2069 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
2070 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
2071 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
2072 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
2073 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
2074 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
2075
2076 table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
2077 table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
2078 table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
2079
2080 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
2081 result = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.dpm_table_start +
2082 offsetof(SMU71_Discrete_DpmTable, SystemFlags),
2083 (uint8_t *)&(table->SystemFlags),
2084 sizeof(SMU71_Discrete_DpmTable)-3 * sizeof(SMU71_PIDController),
2085 SMC_RAM_END);
2086
2087 PP_ASSERT_WITH_CODE(0 == result,
2088 "Failed to upload dpm data to SMC memory!", return result;);
2089
2090 /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */
2091 result = smu7_copy_bytes_to_smc(hwmgr,
2092 smu_data->smu7_data.ulv_setting_starts,
2093 (uint8_t *)&(smu_data->ulv_setting),
2094 sizeof(SMU71_Discrete_Ulv),
2095 SMC_RAM_END);
2096
2097
2098 result = iceland_populate_initial_mc_reg_table(hwmgr);
2099 PP_ASSERT_WITH_CODE((0 == result),
2100 "Failed to populate initialize MC Reg table!", return result);
2101
2102 result = iceland_populate_pm_fuses(hwmgr);
2103 PP_ASSERT_WITH_CODE(0 == result,
2104 "Failed to populate PM fuses to SMC memory!", return result);
2105
2106 return 0;
2107}
2108
2109int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
2110{
2111 struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
2112 SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
2113 uint32_t duty100;
2114 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
2115 uint16_t fdo_min, slope1, slope2;
2116 uint32_t reference_clock;
2117 int res;
2118 uint64_t tmp64;
2119
2120 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
2121 return 0;
2122
2123 if (hwmgr->thermal_controller.fanInfo.bNoFan) {
2124 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2125 PHM_PlatformCaps_MicrocodeFanControl);
2126 return 0;
2127 }
2128
2129 if (0 == smu7_data->fan_table_start) {
2130 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2131 return 0;
2132 }
2133
2134 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
2135
2136 if (0 == duty100) {
2137 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2138 return 0;
2139 }
2140
2141 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
2142 do_div(tmp64, 10000);
2143 fdo_min = (uint16_t)tmp64;
2144
2145 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
2146 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
2147
2148 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
2149 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
2150
2151 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
2152 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
2153
2154 fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
2155 fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
2156 fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
2157
2158 fan_table.Slope1 = cpu_to_be16(slope1);
2159 fan_table.Slope2 = cpu_to_be16(slope2);
2160
2161 fan_table.FdoMin = cpu_to_be16(fdo_min);
2162
2163 fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
2164
2165 fan_table.HystUp = cpu_to_be16(1);
2166
2167 fan_table.HystSlope = cpu_to_be16(1);
2168
2169 fan_table.TempRespLim = cpu_to_be16(5);
2170
2171 reference_clock = smu7_get_xclk(hwmgr);
2172
2173 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
2174
2175 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
2176
2177 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
2178
2179 /* fan_table.FanControl_GL_Flag = 1; */
2180
2181 res = smu7_copy_bytes_to_smc(hwmgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
2182
2183 return 0;
2184}
2185
2186
2187static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
2188{
2189 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2190
2191 if (data->need_update_smu7_dpm_table &
2192 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
2193 return iceland_program_memory_timing_parameters(hwmgr);
2194
2195 return 0;
2196}
2197
2198static int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2199{
2200 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2201 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
2202
2203 int result = 0;
2204 uint32_t low_sclk_interrupt_threshold = 0;
2205
2206 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2207 PHM_PlatformCaps_SclkThrottleLowNotification)
2208 && (hwmgr->gfx_arbiter.sclk_threshold !=
2209 data->low_sclk_interrupt_threshold)) {
2210 data->low_sclk_interrupt_threshold =
2211 hwmgr->gfx_arbiter.sclk_threshold;
2212 low_sclk_interrupt_threshold =
2213 data->low_sclk_interrupt_threshold;
2214
2215 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
2216
2217 result = smu7_copy_bytes_to_smc(
2218 hwmgr,
2219 smu_data->smu7_data.dpm_table_start +
2220 offsetof(SMU71_Discrete_DpmTable,
2221 LowSclkInterruptThreshold),
2222 (uint8_t *)&low_sclk_interrupt_threshold,
2223 sizeof(uint32_t),
2224 SMC_RAM_END);
2225 }
2226
2227 result = iceland_update_and_upload_mc_reg_table(hwmgr);
2228
2229 PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
2230
2231 result = iceland_program_mem_timing_parameters(hwmgr);
2232 PP_ASSERT_WITH_CODE((result == 0),
2233 "Failed to program memory timing parameters!",
2234 );
2235
2236 return result;
2237}
2238
2239static uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
2240{
2241 switch (type) {
2242 case SMU_SoftRegisters:
2243 switch (member) {
2244 case HandshakeDisables:
2245 return offsetof(SMU71_SoftRegisters, HandshakeDisables);
2246 case VoltageChangeTimeout:
2247 return offsetof(SMU71_SoftRegisters, VoltageChangeTimeout);
2248 case AverageGraphicsActivity:
2249 return offsetof(SMU71_SoftRegisters, AverageGraphicsActivity);
2250 case PreVBlankGap:
2251 return offsetof(SMU71_SoftRegisters, PreVBlankGap);
2252 case VBlankTimeout:
2253 return offsetof(SMU71_SoftRegisters, VBlankTimeout);
2254 case UcodeLoadStatus:
2255 return offsetof(SMU71_SoftRegisters, UcodeLoadStatus);
2256 case DRAM_LOG_ADDR_H:
2257 return offsetof(SMU71_SoftRegisters, DRAM_LOG_ADDR_H);
2258 case DRAM_LOG_ADDR_L:
2259 return offsetof(SMU71_SoftRegisters, DRAM_LOG_ADDR_L);
2260 case DRAM_LOG_PHY_ADDR_H:
2261 return offsetof(SMU71_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
2262 case DRAM_LOG_PHY_ADDR_L:
2263 return offsetof(SMU71_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
2264 case DRAM_LOG_BUFF_SIZE:
2265 return offsetof(SMU71_SoftRegisters, DRAM_LOG_BUFF_SIZE);
2266 }
2267 case SMU_Discrete_DpmTable:
2268 switch (member) {
2269 case LowSclkInterruptThreshold:
2270 return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
2271 }
2272 }
2273 pr_warn("can't get the offset of type %x member %x\n", type, member);
2274 return 0;
2275}
2276
2277static uint32_t iceland_get_mac_definition(uint32_t value)
2278{
2279 switch (value) {
2280 case SMU_MAX_LEVELS_GRAPHICS:
2281 return SMU71_MAX_LEVELS_GRAPHICS;
2282 case SMU_MAX_LEVELS_MEMORY:
2283 return SMU71_MAX_LEVELS_MEMORY;
2284 case SMU_MAX_LEVELS_LINK:
2285 return SMU71_MAX_LEVELS_LINK;
2286 case SMU_MAX_ENTRIES_SMIO:
2287 return SMU71_MAX_ENTRIES_SMIO;
2288 case SMU_MAX_LEVELS_VDDC:
2289 return SMU71_MAX_LEVELS_VDDC;
2290 case SMU_MAX_LEVELS_VDDCI:
2291 return SMU71_MAX_LEVELS_VDDCI;
2292 case SMU_MAX_LEVELS_MVDD:
2293 return SMU71_MAX_LEVELS_MVDD;
2294 }
2295
2296 pr_warn("can't get the mac of %x\n", value);
2297 return 0;
2298}
2299
2300static int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
2301{
2302 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2303 struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
2304
2305 uint32_t tmp;
2306 int result;
2307 bool error = false;
2308
2309 result = smu7_read_smc_sram_dword(hwmgr,
2310 SMU71_FIRMWARE_HEADER_LOCATION +
2311 offsetof(SMU71_Firmware_Header, DpmTable),
2312 &tmp, SMC_RAM_END);
2313
2314 if (0 == result) {
2315 smu7_data->dpm_table_start = tmp;
2316 }
2317
2318 error |= (0 != result);
2319
2320 result = smu7_read_smc_sram_dword(hwmgr,
2321 SMU71_FIRMWARE_HEADER_LOCATION +
2322 offsetof(SMU71_Firmware_Header, SoftRegisters),
2323 &tmp, SMC_RAM_END);
2324
2325 if (0 == result) {
2326 data->soft_regs_start = tmp;
2327 smu7_data->soft_regs_start = tmp;
2328 }
2329
2330 error |= (0 != result);
2331
2332
2333 result = smu7_read_smc_sram_dword(hwmgr,
2334 SMU71_FIRMWARE_HEADER_LOCATION +
2335 offsetof(SMU71_Firmware_Header, mcRegisterTable),
2336 &tmp, SMC_RAM_END);
2337
2338 if (0 == result) {
2339 smu7_data->mc_reg_table_start = tmp;
2340 }
2341
2342 result = smu7_read_smc_sram_dword(hwmgr,
2343 SMU71_FIRMWARE_HEADER_LOCATION +
2344 offsetof(SMU71_Firmware_Header, FanTable),
2345 &tmp, SMC_RAM_END);
2346
2347 if (0 == result) {
2348 smu7_data->fan_table_start = tmp;
2349 }
2350
2351 error |= (0 != result);
2352
2353 result = smu7_read_smc_sram_dword(hwmgr,
2354 SMU71_FIRMWARE_HEADER_LOCATION +
2355 offsetof(SMU71_Firmware_Header, mcArbDramTimingTable),
2356 &tmp, SMC_RAM_END);
2357
2358 if (0 == result) {
2359 smu7_data->arb_table_start = tmp;
2360 }
2361
2362 error |= (0 != result);
2363
2364
2365 result = smu7_read_smc_sram_dword(hwmgr,
2366 SMU71_FIRMWARE_HEADER_LOCATION +
2367 offsetof(SMU71_Firmware_Header, Version),
2368 &tmp, SMC_RAM_END);
2369
2370 if (0 == result) {
2371 hwmgr->microcode_version_info.SMC = tmp;
2372 }
2373
2374 error |= (0 != result);
2375
2376 result = smu7_read_smc_sram_dword(hwmgr,
2377 SMU71_FIRMWARE_HEADER_LOCATION +
2378 offsetof(SMU71_Firmware_Header, UlvSettings),
2379 &tmp, SMC_RAM_END);
2380
2381 if (0 == result) {
2382 smu7_data->ulv_setting_starts = tmp;
2383 }
2384
2385 error |= (0 != result);
2386
2387 return error ? 1 : 0;
2388}
2389
2390/*---------------------------MC----------------------------*/
2391
2392static uint8_t iceland_get_memory_modile_index(struct pp_hwmgr *hwmgr)
2393{
2394 return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
2395}
2396
2397static bool iceland_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
2398{
2399 bool result = true;
2400
2401 switch (in_reg) {
2402 case mmMC_SEQ_RAS_TIMING:
2403 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
2404 break;
2405
2406 case mmMC_SEQ_DLL_STBY:
2407 *out_reg = mmMC_SEQ_DLL_STBY_LP;
2408 break;
2409
2410 case mmMC_SEQ_G5PDX_CMD0:
2411 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
2412 break;
2413
2414 case mmMC_SEQ_G5PDX_CMD1:
2415 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
2416 break;
2417
2418 case mmMC_SEQ_G5PDX_CTRL:
2419 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
2420 break;
2421
2422 case mmMC_SEQ_CAS_TIMING:
2423 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
2424 break;
2425
2426 case mmMC_SEQ_MISC_TIMING:
2427 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
2428 break;
2429
2430 case mmMC_SEQ_MISC_TIMING2:
2431 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
2432 break;
2433
2434 case mmMC_SEQ_PMG_DVS_CMD:
2435 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
2436 break;
2437
2438 case mmMC_SEQ_PMG_DVS_CTL:
2439 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
2440 break;
2441
2442 case mmMC_SEQ_RD_CTL_D0:
2443 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
2444 break;
2445
2446 case mmMC_SEQ_RD_CTL_D1:
2447 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
2448 break;
2449
2450 case mmMC_SEQ_WR_CTL_D0:
2451 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
2452 break;
2453
2454 case mmMC_SEQ_WR_CTL_D1:
2455 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
2456 break;
2457
2458 case mmMC_PMG_CMD_EMRS:
2459 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
2460 break;
2461
2462 case mmMC_PMG_CMD_MRS:
2463 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
2464 break;
2465
2466 case mmMC_PMG_CMD_MRS1:
2467 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
2468 break;
2469
2470 case mmMC_SEQ_PMG_TIMING:
2471 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
2472 break;
2473
2474 case mmMC_PMG_CMD_MRS2:
2475 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
2476 break;
2477
2478 case mmMC_SEQ_WR_CTL_2:
2479 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
2480 break;
2481
2482 default:
2483 result = false;
2484 break;
2485 }
2486
2487 return result;
2488}
2489
2490static int iceland_set_s0_mc_reg_index(struct iceland_mc_reg_table *table)
2491{
2492 uint32_t i;
2493 uint16_t address;
2494
2495 for (i = 0; i < table->last; i++) {
2496 table->mc_reg_address[i].s0 =
2497 iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
2498 ? address : table->mc_reg_address[i].s1;
2499 }
2500 return 0;
2501}
2502
2503static int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
2504 struct iceland_mc_reg_table *ni_table)
2505{
2506 uint8_t i, j;
2507
2508 PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2509 "Invalid VramInfo table.", return -EINVAL);
2510 PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
2511 "Invalid VramInfo table.", return -EINVAL);
2512
2513 for (i = 0; i < table->last; i++) {
2514 ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
2515 }
2516 ni_table->last = table->last;
2517
2518 for (i = 0; i < table->num_entries; i++) {
2519 ni_table->mc_reg_table_entry[i].mclk_max =
2520 table->mc_reg_table_entry[i].mclk_max;
2521 for (j = 0; j < table->last; j++) {
2522 ni_table->mc_reg_table_entry[i].mc_data[j] =
2523 table->mc_reg_table_entry[i].mc_data[j];
2524 }
2525 }
2526
2527 ni_table->num_entries = table->num_entries;
2528
2529 return 0;
2530}
2531
2532static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
2533 struct iceland_mc_reg_table *table)
2534{
2535 uint8_t i, j, k;
2536 uint32_t temp_reg;
2537 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2538
2539 for (i = 0, j = table->last; i < table->last; i++) {
2540 PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2541 "Invalid VramInfo table.", return -EINVAL);
2542
2543 switch (table->mc_reg_address[i].s1) {
2544
2545 case mmMC_SEQ_MISC1:
2546 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
2547 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
2548 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
2549 for (k = 0; k < table->num_entries; k++) {
2550 table->mc_reg_table_entry[k].mc_data[j] =
2551 ((temp_reg & 0xffff0000)) |
2552 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
2553 }
2554 j++;
2555 PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2556 "Invalid VramInfo table.", return -EINVAL);
2557
2558 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
2559 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
2560 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
2561 for (k = 0; k < table->num_entries; k++) {
2562 table->mc_reg_table_entry[k].mc_data[j] =
2563 (temp_reg & 0xffff0000) |
2564 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2565
2566 if (!data->is_memory_gddr5) {
2567 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
2568 }
2569 }
2570 j++;
2571 PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2572 "Invalid VramInfo table.", return -EINVAL);
2573
2574 if (!data->is_memory_gddr5 && j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
2575 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
2576 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
2577 for (k = 0; k < table->num_entries; k++) {
2578 table->mc_reg_table_entry[k].mc_data[j] =
2579 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
2580 }
2581 j++;
2582 PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2583 "Invalid VramInfo table.", return -EINVAL);
2584 }
2585
2586 break;
2587
2588 case mmMC_SEQ_RESERVE_M:
2589 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
2590 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
2591 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
2592 for (k = 0; k < table->num_entries; k++) {
2593 table->mc_reg_table_entry[k].mc_data[j] =
2594 (temp_reg & 0xffff0000) |
2595 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2596 }
2597 j++;
2598 PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2599 "Invalid VramInfo table.", return -EINVAL);
2600 break;
2601
2602 default:
2603 break;
2604 }
2605
2606 }
2607
2608 table->last = j;
2609
2610 return 0;
2611}
2612
2613static int iceland_set_valid_flag(struct iceland_mc_reg_table *table)
2614{
2615 uint8_t i, j;
2616 for (i = 0; i < table->last; i++) {
2617 for (j = 1; j < table->num_entries; j++) {
2618 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
2619 table->mc_reg_table_entry[j].mc_data[i]) {
2620 table->validflag |= (1<<i);
2621 break;
2622 }
2623 }
2624 }
2625
2626 return 0;
2627}
2628
2629static int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
2630{
2631 int result;
2632 struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
2633 pp_atomctrl_mc_reg_table *table;
2634 struct iceland_mc_reg_table *ni_table = &smu_data->mc_reg_table;
2635 uint8_t module_index = iceland_get_memory_modile_index(hwmgr);
2636
2637 table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
2638
2639 if (NULL == table)
2640 return -ENOMEM;
2641
2642 /* Program additional LP registers that are no longer programmed by VBIOS */
2643 cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
2644 cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
2645 cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
2646 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
2647 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
2648 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
2649 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
2650 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
2651 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
2652 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
2653 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
2654 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
2655 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
2656 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
2657 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
2658 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
2659 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
2660 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
2661 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
2662 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
2663
2664 memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
2665
2666 result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
2667
2668 if (0 == result)
2669 result = iceland_copy_vbios_smc_reg_table(table, ni_table);
2670
2671 if (0 == result) {
2672 iceland_set_s0_mc_reg_index(ni_table);
2673 result = iceland_set_mc_special_registers(hwmgr, ni_table);
2674 }
2675
2676 if (0 == result)
2677 iceland_set_valid_flag(ni_table);
2678
2679 kfree(table);
2680
2681 return result;
2682}
2683
2684static bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr)
2685{
2686 return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
2687 CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
2688 ? true : false;
2689}
2690
222const struct pp_smumgr_func iceland_smu_funcs = { 2691const struct pp_smumgr_func iceland_smu_funcs = {
223 .smu_init = &iceland_smu_init, 2692 .smu_init = &iceland_smu_init,
224 .smu_fini = &smu7_smu_fini, 2693 .smu_fini = &smu7_smu_fini,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
deleted file mode 100644
index c92ea38d2e15..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
+++ /dev/null
@@ -1,2344 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "pp_debug.h"
25#include "polaris10_smc.h"
26#include "smu7_dyn_defaults.h"
27
28#include "smu7_hwmgr.h"
29#include "hardwaremanager.h"
30#include "ppatomctrl.h"
31#include "cgs_common.h"
32#include "atombios.h"
33#include "polaris10_smumgr.h"
34#include "pppcielanes.h"
35
36#include "smu_ucode_xfer_vi.h"
37#include "smu74_discrete.h"
38#include "smu/smu_7_1_3_d.h"
39#include "smu/smu_7_1_3_sh_mask.h"
40#include "gmc/gmc_8_1_d.h"
41#include "gmc/gmc_8_1_sh_mask.h"
42#include "oss/oss_3_0_d.h"
43#include "gca/gfx_8_0_d.h"
44#include "bif/bif_5_0_d.h"
45#include "bif/bif_5_0_sh_mask.h"
46#include "dce/dce_10_0_d.h"
47#include "dce/dce_10_0_sh_mask.h"
48#include "polaris10_pwrvirus.h"
49#include "smu7_ppsmc.h"
50#include "smu7_smumgr.h"
51
52#define POLARIS10_SMC_SIZE 0x20000
53#define VOLTAGE_VID_OFFSET_SCALE1 625
54#define VOLTAGE_VID_OFFSET_SCALE2 100
55#define POWERTUNE_DEFAULT_SET_MAX 1
56#define VDDC_VDDCI_DELTA 200
57#define MC_CG_ARB_FREQ_F1 0x0b
58
59static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
60 /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
61 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
62 { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
63 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
64 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
65};
66
67static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = {
68 {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112},
69 {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
70 {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112},
71 {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160},
72 {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112},
73 {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160},
74 {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108},
75 {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} };
76
77static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
78 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
79 uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
80{
81 uint32_t i;
82 uint16_t vddci;
83 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
84
85 *voltage = *mvdd = 0;
86
87 /* clock - voltage dependency table is empty table */
88 if (dep_table->count == 0)
89 return -EINVAL;
90
91 for (i = 0; i < dep_table->count; i++) {
92 /* find first sclk bigger than request */
93 if (dep_table->entries[i].clk >= clock) {
94 *voltage |= (dep_table->entries[i].vddc *
95 VOLTAGE_SCALE) << VDDC_SHIFT;
96 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
97 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
98 VOLTAGE_SCALE) << VDDCI_SHIFT;
99 else if (dep_table->entries[i].vddci)
100 *voltage |= (dep_table->entries[i].vddci *
101 VOLTAGE_SCALE) << VDDCI_SHIFT;
102 else {
103 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
104 (dep_table->entries[i].vddc -
105 (uint16_t)VDDC_VDDCI_DELTA));
106 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
107 }
108
109 if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
110 *mvdd = data->vbios_boot_state.mvdd_bootup_value *
111 VOLTAGE_SCALE;
112 else if (dep_table->entries[i].mvdd)
113 *mvdd = (uint32_t) dep_table->entries[i].mvdd *
114 VOLTAGE_SCALE;
115
116 *voltage |= 1 << PHASES_SHIFT;
117 return 0;
118 }
119 }
120
121 /* sclk is bigger than max sclk in the dependence table */
122 *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
123
124 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
125 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
126 VOLTAGE_SCALE) << VDDCI_SHIFT;
127 else if (dep_table->entries[i-1].vddci) {
128 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
129 (dep_table->entries[i].vddc -
130 (uint16_t)VDDC_VDDCI_DELTA));
131 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
132 }
133
134 if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
135 *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
136 else if (dep_table->entries[i].mvdd)
137 *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
138
139 return 0;
140}
141
142static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
143{
144 uint32_t tmp;
145 tmp = raw_setting * 4096 / 100;
146 return (uint16_t)tmp;
147}
148
149static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
150{
151 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
152
153 const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
154 SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
155 struct phm_ppt_v1_information *table_info =
156 (struct phm_ppt_v1_information *)(hwmgr->pptable);
157 struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
158 struct pp_advance_fan_control_parameters *fan_table =
159 &hwmgr->thermal_controller.advanceFanControlParameters;
160 int i, j, k;
161 const uint16_t *pdef1;
162 const uint16_t *pdef2;
163
164 table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
165 table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
166
167 PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
168 "Target Operating Temp is out of Range!",
169 );
170
171 table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
172 cac_dtp_table->usTargetOperatingTemp * 256);
173 table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
174 cac_dtp_table->usTemperatureLimitHotspot * 256);
175 table->FanGainEdge = PP_HOST_TO_SMC_US(
176 scale_fan_gain_settings(fan_table->usFanGainEdge));
177 table->FanGainHotspot = PP_HOST_TO_SMC_US(
178 scale_fan_gain_settings(fan_table->usFanGainHotspot));
179
180 pdef1 = defaults->BAPMTI_R;
181 pdef2 = defaults->BAPMTI_RC;
182
183 for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
184 for (j = 0; j < SMU74_DTE_SOURCES; j++) {
185 for (k = 0; k < SMU74_DTE_SINKS; k++) {
186 table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
187 table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
188 pdef1++;
189 pdef2++;
190 }
191 }
192 }
193
194 return 0;
195}
196
197static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
198{
199 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
200 const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
201
202 smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
203 smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
204 smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
205 smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
206
207 return 0;
208}
209
210static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
211{
212 uint16_t tdc_limit;
213 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
214 struct phm_ppt_v1_information *table_info =
215 (struct phm_ppt_v1_information *)(hwmgr->pptable);
216 const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
217
218 tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
219 smu_data->power_tune_table.TDC_VDDC_PkgLimit =
220 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
221 smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
222 defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
223 smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
224
225 return 0;
226}
227
228static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
229{
230 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
231 const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
232 uint32_t temp;
233
234 if (smu7_read_smc_sram_dword(hwmgr,
235 fuse_table_offset +
236 offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
237 (uint32_t *)&temp, SMC_RAM_END))
238 PP_ASSERT_WITH_CODE(false,
239 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
240 return -EINVAL);
241 else {
242 smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
243 smu_data->power_tune_table.LPMLTemperatureMin =
244 (uint8_t)((temp >> 16) & 0xff);
245 smu_data->power_tune_table.LPMLTemperatureMax =
246 (uint8_t)((temp >> 8) & 0xff);
247 smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
248 }
249 return 0;
250}
251
252static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
253{
254 int i;
255 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
256
257 /* Currently not used. Set all to zero. */
258 for (i = 0; i < 16; i++)
259 smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
260
261 return 0;
262}
263
264static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
265{
266 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
267
268/* TO DO move to hwmgr */
269 if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
270 || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
271 hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
272 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
273
274 smu_data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
275 hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
276 return 0;
277}
278
279static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
280{
281 int i;
282 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
283
284 /* Currently not used. Set all to zero. */
285 for (i = 0; i < 16; i++)
286 smu_data->power_tune_table.GnbLPML[i] = 0;
287
288 return 0;
289}
290
291static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
292{
293 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
294 struct phm_ppt_v1_information *table_info =
295 (struct phm_ppt_v1_information *)(hwmgr->pptable);
296 uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
297 uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
298 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
299
300 hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
301 lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
302
303 smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
304 CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
305 smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
306 CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
307
308 return 0;
309}
310
311static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
312{
313 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
314 uint32_t pm_fuse_table_offset;
315
316 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
317 PHM_PlatformCaps_PowerContainment)) {
318 if (smu7_read_smc_sram_dword(hwmgr,
319 SMU7_FIRMWARE_HEADER_LOCATION +
320 offsetof(SMU74_Firmware_Header, PmFuseTable),
321 &pm_fuse_table_offset, SMC_RAM_END))
322 PP_ASSERT_WITH_CODE(false,
323 "Attempt to get pm_fuse_table_offset Failed!",
324 return -EINVAL);
325
326 if (polaris10_populate_svi_load_line(hwmgr))
327 PP_ASSERT_WITH_CODE(false,
328 "Attempt to populate SviLoadLine Failed!",
329 return -EINVAL);
330
331 if (polaris10_populate_tdc_limit(hwmgr))
332 PP_ASSERT_WITH_CODE(false,
333 "Attempt to populate TDCLimit Failed!", return -EINVAL);
334
335 if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
336 PP_ASSERT_WITH_CODE(false,
337 "Attempt to populate TdcWaterfallCtl, "
338 "LPMLTemperature Min and Max Failed!",
339 return -EINVAL);
340
341 if (0 != polaris10_populate_temperature_scaler(hwmgr))
342 PP_ASSERT_WITH_CODE(false,
343 "Attempt to populate LPMLTemperatureScaler Failed!",
344 return -EINVAL);
345
346 if (polaris10_populate_fuzzy_fan(hwmgr))
347 PP_ASSERT_WITH_CODE(false,
348 "Attempt to populate Fuzzy Fan Control parameters Failed!",
349 return -EINVAL);
350
351 if (polaris10_populate_gnb_lpml(hwmgr))
352 PP_ASSERT_WITH_CODE(false,
353 "Attempt to populate GnbLPML Failed!",
354 return -EINVAL);
355
356 if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
357 PP_ASSERT_WITH_CODE(false,
358 "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
359 "Sidd Failed!", return -EINVAL);
360
361 if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
362 (uint8_t *)&smu_data->power_tune_table,
363 (sizeof(struct SMU74_Discrete_PmFuses) - 92), SMC_RAM_END))
364 PP_ASSERT_WITH_CODE(false,
365 "Attempt to download PmFuseTable Failed!",
366 return -EINVAL);
367 }
368 return 0;
369}
370
371/**
372 * Mvdd table preparation for SMC.
373 *
374 * @param *hwmgr The address of the hardware manager.
375 * @param *table The SMC DPM table structure to be populated.
376 * @return 0
377 */
378static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
379 SMU74_Discrete_DpmTable *table)
380{
381 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
382 uint32_t count, level;
383
384 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
385 count = data->mvdd_voltage_table.count;
386 if (count > SMU_MAX_SMIO_LEVELS)
387 count = SMU_MAX_SMIO_LEVELS;
388 for (level = 0; level < count; level++) {
389 table->SmioTable2.Pattern[level].Voltage =
390 PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
391 /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
392 table->SmioTable2.Pattern[level].Smio =
393 (uint8_t) level;
394 table->Smio[level] |=
395 data->mvdd_voltage_table.entries[level].smio_low;
396 }
397 table->SmioMask2 = data->mvdd_voltage_table.mask_low;
398
399 table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
400 }
401
402 return 0;
403}
404
405static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
406 struct SMU74_Discrete_DpmTable *table)
407{
408 uint32_t count, level;
409 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
410
411 count = data->vddci_voltage_table.count;
412
413 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
414 if (count > SMU_MAX_SMIO_LEVELS)
415 count = SMU_MAX_SMIO_LEVELS;
416 for (level = 0; level < count; ++level) {
417 table->SmioTable1.Pattern[level].Voltage =
418 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
419 table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
420
421 table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
422 }
423 }
424
425 table->SmioMask1 = data->vddci_voltage_table.mask_low;
426
427 return 0;
428}
429
430/**
431* Preparation of vddc and vddgfx CAC tables for SMC.
432*
433* @param hwmgr the address of the hardware manager
434* @param table the SMC DPM table structure to be populated
435* @return always 0
436*/
437static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
438 struct SMU74_Discrete_DpmTable *table)
439{
440 uint32_t count;
441 uint8_t index;
442 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
443 struct phm_ppt_v1_information *table_info =
444 (struct phm_ppt_v1_information *)(hwmgr->pptable);
445 struct phm_ppt_v1_voltage_lookup_table *lookup_table =
446 table_info->vddc_lookup_table;
447 /* tables is already swapped, so in order to use the value from it,
448 * we need to swap it back.
449 * We are populating vddc CAC data to BapmVddc table
450 * in split and merged mode
451 */
452 for (count = 0; count < lookup_table->count; count++) {
453 index = phm_get_voltage_index(lookup_table,
454 data->vddc_voltage_table.entries[count].value);
455 table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
456 table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
457 table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
458 }
459
460 return 0;
461}
462
463/**
464* Preparation of voltage tables for SMC.
465*
466* @param hwmgr the address of the hardware manager
467* @param table the SMC DPM table structure to be populated
468* @return always 0
469*/
470
471static int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
472 struct SMU74_Discrete_DpmTable *table)
473{
474 polaris10_populate_smc_vddci_table(hwmgr, table);
475 polaris10_populate_smc_mvdd_table(hwmgr, table);
476 polaris10_populate_cac_table(hwmgr, table);
477
478 return 0;
479}
480
481static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
482 struct SMU74_Discrete_Ulv *state)
483{
484 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
485 struct phm_ppt_v1_information *table_info =
486 (struct phm_ppt_v1_information *)(hwmgr->pptable);
487
488 state->CcPwrDynRm = 0;
489 state->CcPwrDynRm1 = 0;
490
491 state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
492 state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
493 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
494
495 if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker)
496 state->VddcPhase = data->vddc_phase_shed_control ^ 0x3;
497 else
498 state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
499
500 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
501 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
502 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
503
504 return 0;
505}
506
507static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr,
508 struct SMU74_Discrete_DpmTable *table)
509{
510 return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
511}
512
513static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
514 struct SMU74_Discrete_DpmTable *table)
515{
516 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
517 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
518 struct smu7_dpm_table *dpm_table = &data->dpm_table;
519 int i;
520
521 /* Index (dpm_table->pcie_speed_table.count)
522 * is reserved for PCIE boot level. */
523 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
524 table->LinkLevel[i].PcieGenSpeed =
525 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
526 table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
527 dpm_table->pcie_speed_table.dpm_levels[i].param1);
528 table->LinkLevel[i].EnabledForActivity = 1;
529 table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
530 table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
531 table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
532 }
533
534 smu_data->smc_state_table.LinkLevelCount =
535 (uint8_t)dpm_table->pcie_speed_table.count;
536
537/* To Do move to hwmgr */
538 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
539 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
540
541 return 0;
542}
543
544
545static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr,
546 SMU74_Discrete_DpmTable *table)
547{
548 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
549 uint32_t i, ref_clk;
550
551 struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
552
553 ref_clk = smu7_get_xclk(hwmgr);
554
555 if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
556 for (i = 0; i < NUM_SCLK_RANGE; i++) {
557 table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
558 table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
559 table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
560
561 table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
562 table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
563
564 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
565 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
566 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
567 }
568 return;
569 }
570
571 for (i = 0; i < NUM_SCLK_RANGE; i++) {
572 smu_data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
573 smu_data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
574
575 table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
576 table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
577 table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
578
579 table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
580 table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
581
582 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
583 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
584 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
585 }
586}
587
588/**
589* Calculates the SCLK dividers using the provided engine clock
590*
591* @param hwmgr the address of the hardware manager
592* @param clock the engine clock to use to populate the structure
593* @param sclk the SMC SCLK structure to be populated
594*/
595static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
596 uint32_t clock, SMU_SclkSetting *sclk_setting)
597{
598 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
599 const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
600 struct pp_atomctrl_clock_dividers_ai dividers;
601 uint32_t ref_clock;
602 uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
603 uint8_t i;
604 int result;
605 uint64_t temp;
606
607 sclk_setting->SclkFrequency = clock;
608 /* get the engine clock dividers for this clock value */
609 result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, &dividers);
610 if (result == 0) {
611 sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
612 sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
613 sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
614 sclk_setting->PllRange = dividers.ucSclkPllRange;
615 sclk_setting->Sclk_slew_rate = 0x400;
616 sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
617 sclk_setting->Pcc_down_slew_rate = 0xffff;
618 sclk_setting->SSc_En = dividers.ucSscEnable;
619 sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
620 sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
621 sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
622 return result;
623 }
624
625 ref_clock = smu7_get_xclk(hwmgr);
626
627 for (i = 0; i < NUM_SCLK_RANGE; i++) {
628 if (clock > smu_data->range_table[i].trans_lower_frequency
629 && clock <= smu_data->range_table[i].trans_upper_frequency) {
630 sclk_setting->PllRange = i;
631 break;
632 }
633 }
634
635 sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
636 temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
637 temp <<= 0x10;
638 do_div(temp, ref_clock);
639 sclk_setting->Fcw_frac = temp & 0xffff;
640
641 pcc_target_percent = 10; /* Hardcode 10% for now. */
642 pcc_target_freq = clock - (clock * pcc_target_percent / 100);
643 sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
644
645 ss_target_percent = 2; /* Hardcode 2% for now. */
646 sclk_setting->SSc_En = 0;
647 if (ss_target_percent) {
648 sclk_setting->SSc_En = 1;
649 ss_target_freq = clock - (clock * ss_target_percent / 100);
650 sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
651 temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
652 temp <<= 0x10;
653 do_div(temp, ref_clock);
654 sclk_setting->Fcw1_frac = temp & 0xffff;
655 }
656
657 return 0;
658}
659
660/**
661* Populates single SMC SCLK structure using the provided engine clock
662*
663* @param hwmgr the address of the hardware manager
664* @param clock the engine clock to use to populate the structure
665* @param sclk the SMC SCLK structure to be populated
666*/
667
668static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
669 uint32_t clock, uint16_t sclk_al_threshold,
670 struct SMU74_Discrete_GraphicsLevel *level)
671{
672 int result;
673 /* PP_Clocks minClocks; */
674 uint32_t mvdd;
675 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
676 struct phm_ppt_v1_information *table_info =
677 (struct phm_ppt_v1_information *)(hwmgr->pptable);
678 SMU_SclkSetting curr_sclk_setting = { 0 };
679
680 result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
681
682 /* populate graphics levels */
683 result = polaris10_get_dependency_volt_by_clk(hwmgr,
684 table_info->vdd_dep_on_sclk, clock,
685 &level->MinVoltage, &mvdd);
686
687 PP_ASSERT_WITH_CODE((0 == result),
688 "can not find VDDC voltage value for "
689 "VDDC engine clock dependency table",
690 return result);
691 level->ActivityLevel = sclk_al_threshold;
692
693 level->CcPwrDynRm = 0;
694 level->CcPwrDynRm1 = 0;
695 level->EnabledForActivity = 0;
696 level->EnabledForThrottle = 1;
697 level->UpHyst = 10;
698 level->DownHyst = 0;
699 level->VoltageDownHyst = 0;
700 level->PowerThrottle = 0;
701 data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
702
703 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
704 level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
705 hwmgr->display_config.min_core_set_clock_in_sr);
706
707 /* Default to slow, highest DPM level will be
708 * set to PPSMC_DISPLAY_WATERMARK_LOW later.
709 */
710 if (data->update_up_hyst)
711 level->UpHyst = (uint8_t)data->up_hyst;
712 if (data->update_down_hyst)
713 level->DownHyst = (uint8_t)data->down_hyst;
714
715 level->SclkSetting = curr_sclk_setting;
716
717 CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
718 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
719 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
720 CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
721 CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
722 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
723 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
724 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
725 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
726 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
727 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
728 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
729 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
730 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
731 return 0;
732}
733
734/**
735* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
736*
737* @param hwmgr the address of the hardware manager
738*/
739int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
740{
741 struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
742 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
743 struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
744 struct phm_ppt_v1_information *table_info =
745 (struct phm_ppt_v1_information *)(hwmgr->pptable);
746 struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
747 uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count;
748 int result = 0;
749 uint32_t array = smu_data->smu7_data.dpm_table_start +
750 offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
751 uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
752 SMU74_MAX_LEVELS_GRAPHICS;
753 struct SMU74_Discrete_GraphicsLevel *levels =
754 smu_data->smc_state_table.GraphicsLevel;
755 uint32_t i, max_entry;
756 uint8_t hightest_pcie_level_enabled = 0,
757 lowest_pcie_level_enabled = 0,
758 mid_pcie_level_enabled = 0,
759 count = 0;
760
761 polaris10_get_sclk_range_table(hwmgr, &(smu_data->smc_state_table));
762
763 for (i = 0; i < dpm_table->sclk_table.count; i++) {
764
765 result = polaris10_populate_single_graphic_level(hwmgr,
766 dpm_table->sclk_table.dpm_levels[i].value,
767 (uint16_t)smu_data->activity_target[i],
768 &(smu_data->smc_state_table.GraphicsLevel[i]));
769 if (result)
770 return result;
771
772 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
773 if (i > 1)
774 levels[i].DeepSleepDivId = 0;
775 }
776 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
777 PHM_PlatformCaps_SPLLShutdownSupport))
778 smu_data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
779
780 smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
781 smu_data->smc_state_table.GraphicsDpmLevelCount =
782 (uint8_t)dpm_table->sclk_table.count;
783 hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask =
784 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
785
786
787 if (pcie_table != NULL) {
788 PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
789 "There must be 1 or more PCIE levels defined in PPTable.",
790 return -EINVAL);
791 max_entry = pcie_entry_cnt - 1;
792 for (i = 0; i < dpm_table->sclk_table.count; i++)
793 levels[i].pcieDpmLevel =
794 (uint8_t) ((i < max_entry) ? i : max_entry);
795 } else {
796 while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
797 ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
798 (1 << (hightest_pcie_level_enabled + 1))) != 0))
799 hightest_pcie_level_enabled++;
800
801 while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
802 ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
803 (1 << lowest_pcie_level_enabled)) == 0))
804 lowest_pcie_level_enabled++;
805
806 while ((count < hightest_pcie_level_enabled) &&
807 ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
808 (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
809 count++;
810
811 mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
812 hightest_pcie_level_enabled ?
813 (lowest_pcie_level_enabled + 1 + count) :
814 hightest_pcie_level_enabled;
815
816 /* set pcieDpmLevel to hightest_pcie_level_enabled */
817 for (i = 2; i < dpm_table->sclk_table.count; i++)
818 levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
819
820 /* set pcieDpmLevel to lowest_pcie_level_enabled */
821 levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
822
823 /* set pcieDpmLevel to mid_pcie_level_enabled */
824 levels[1].pcieDpmLevel = mid_pcie_level_enabled;
825 }
826 /* level count will send to smc once at init smc table and never change */
827 result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
828 (uint32_t)array_size, SMC_RAM_END);
829
830 return result;
831}
832
833
834static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
835 uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
836{
837 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
838 struct phm_ppt_v1_information *table_info =
839 (struct phm_ppt_v1_information *)(hwmgr->pptable);
840 int result = 0;
841 struct cgs_display_info info = {0, 0, NULL};
842 uint32_t mclk_stutter_mode_threshold = 40000;
843
844 cgs_get_active_displays_info(hwmgr->device, &info);
845
846 if (table_info->vdd_dep_on_mclk) {
847 result = polaris10_get_dependency_volt_by_clk(hwmgr,
848 table_info->vdd_dep_on_mclk, clock,
849 &mem_level->MinVoltage, &mem_level->MinMvdd);
850 PP_ASSERT_WITH_CODE((0 == result),
851 "can not find MinVddc voltage value from memory "
852 "VDDC voltage dependency table", return result);
853 }
854
855 mem_level->MclkFrequency = clock;
856 mem_level->EnabledForThrottle = 1;
857 mem_level->EnabledForActivity = 0;
858 mem_level->UpHyst = 0;
859 mem_level->DownHyst = 100;
860 mem_level->VoltageDownHyst = 0;
861 mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
862 mem_level->StutterEnable = false;
863 mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
864
865 data->display_timing.num_existing_displays = info.display_count;
866
867 if (mclk_stutter_mode_threshold &&
868 (clock <= mclk_stutter_mode_threshold) &&
869 (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
870 STUTTER_ENABLE) & 0x1))
871 mem_level->StutterEnable = true;
872
873 if (!result) {
874 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
875 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
876 CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
877 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
878 }
879 return result;
880}
881
882/**
883* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
884*
885* @param hwmgr the address of the hardware manager
886*/
887int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
888{
889 struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
890 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
891 struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
892 int result;
893 /* populate MCLK dpm table to SMU7 */
894 uint32_t array = smu_data->smu7_data.dpm_table_start +
895 offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
896 uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
897 SMU74_MAX_LEVELS_MEMORY;
898 struct SMU74_Discrete_MemoryLevel *levels =
899 smu_data->smc_state_table.MemoryLevel;
900 uint32_t i;
901
902 for (i = 0; i < dpm_table->mclk_table.count; i++) {
903 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
904 "can not populate memory level as memory clock is zero",
905 return -EINVAL);
906 result = polaris10_populate_single_memory_level(hwmgr,
907 dpm_table->mclk_table.dpm_levels[i].value,
908 &levels[i]);
909 if (i == dpm_table->mclk_table.count - 1) {
910 levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
911 levels[i].EnabledForActivity = 1;
912 }
913 if (result)
914 return result;
915 }
916
917 /* In order to prevent MC activity from stutter mode to push DPM up,
918 * the UVD change complements this by putting the MCLK in
919 * a higher state by default such that we are not affected by
920 * up threshold or and MCLK DPM latency.
921 */
922 levels[0].ActivityLevel = 0x1f;
923 CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
924
925 smu_data->smc_state_table.MemoryDpmLevelCount =
926 (uint8_t)dpm_table->mclk_table.count;
927 hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask =
928 phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
929
930 /* level count will send to smc once at init smc table and never change */
931 result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
932 (uint32_t)array_size, SMC_RAM_END);
933
934 return result;
935}
936
937/**
938* Populates the SMC MVDD structure using the provided memory clock.
939*
940* @param hwmgr the address of the hardware manager
941* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
942* @param voltage the SMC VOLTAGE structure to be populated
943*/
944static int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
945 uint32_t mclk, SMIO_Pattern *smio_pat)
946{
947 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
948 struct phm_ppt_v1_information *table_info =
949 (struct phm_ppt_v1_information *)(hwmgr->pptable);
950 uint32_t i = 0;
951
952 if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
953 /* find mvdd value which clock is more than request */
954 for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
955 if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
956 smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
957 break;
958 }
959 }
960 PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
961 "MVDD Voltage is outside the supported range.",
962 return -EINVAL);
963 } else
964 return -EINVAL;
965
966 return 0;
967}
968
969static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
970 SMU74_Discrete_DpmTable *table)
971{
972 int result = 0;
973 uint32_t sclk_frequency;
974 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
975 struct phm_ppt_v1_information *table_info =
976 (struct phm_ppt_v1_information *)(hwmgr->pptable);
977 SMIO_Pattern vol_level;
978 uint32_t mvdd;
979 uint16_t us_mvdd;
980
981 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
982
983 /* Get MinVoltage and Frequency from DPM0,
984 * already converted to SMC_UL */
985 sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
986 result = polaris10_get_dependency_volt_by_clk(hwmgr,
987 table_info->vdd_dep_on_sclk,
988 sclk_frequency,
989 &table->ACPILevel.MinVoltage, &mvdd);
990 PP_ASSERT_WITH_CODE((0 == result),
991 "Cannot find ACPI VDDC voltage value "
992 "in Clock Dependency Table",
993 );
994
995 result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting));
996 PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
997
998 table->ACPILevel.DeepSleepDivId = 0;
999 table->ACPILevel.CcPwrDynRm = 0;
1000 table->ACPILevel.CcPwrDynRm1 = 0;
1001
1002 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1003 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
1004 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1005 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1006
1007 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
1008 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
1009 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
1010 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
1011 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
1012 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
1013 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
1014 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
1015 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
1016 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
1017
1018
1019 /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
1020 table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
1021 result = polaris10_get_dependency_volt_by_clk(hwmgr,
1022 table_info->vdd_dep_on_mclk,
1023 table->MemoryACPILevel.MclkFrequency,
1024 &table->MemoryACPILevel.MinVoltage, &mvdd);
1025 PP_ASSERT_WITH_CODE((0 == result),
1026 "Cannot find ACPI VDDCI voltage value "
1027 "in Clock Dependency Table",
1028 );
1029
1030 us_mvdd = 0;
1031 if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
1032 (data->mclk_dpm_key_disabled))
1033 us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
1034 else {
1035 if (!polaris10_populate_mvdd_value(hwmgr,
1036 data->dpm_table.mclk_table.dpm_levels[0].value,
1037 &vol_level))
1038 us_mvdd = vol_level.Voltage;
1039 }
1040
1041 if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
1042 table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
1043 else
1044 table->MemoryACPILevel.MinMvdd = 0;
1045
1046 table->MemoryACPILevel.StutterEnable = false;
1047
1048 table->MemoryACPILevel.EnabledForThrottle = 0;
1049 table->MemoryACPILevel.EnabledForActivity = 0;
1050 table->MemoryACPILevel.UpHyst = 0;
1051 table->MemoryACPILevel.DownHyst = 100;
1052 table->MemoryACPILevel.VoltageDownHyst = 0;
1053 table->MemoryACPILevel.ActivityLevel =
1054 PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
1055
1056 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
1057 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
1058
1059 return result;
1060}
1061
1062static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1063 SMU74_Discrete_DpmTable *table)
1064{
1065 int result = -EINVAL;
1066 uint8_t count;
1067 struct pp_atomctrl_clock_dividers_vi dividers;
1068 struct phm_ppt_v1_information *table_info =
1069 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1070 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1071 table_info->mm_dep_table;
1072 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1073 uint32_t vddci;
1074
1075 table->VceLevelCount = (uint8_t)(mm_table->count);
1076 table->VceBootLevel = 0;
1077
1078 for (count = 0; count < table->VceLevelCount; count++) {
1079 table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
1080 table->VceLevel[count].MinVoltage = 0;
1081 table->VceLevel[count].MinVoltage |=
1082 (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
1083
1084 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1085 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1086 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1087 else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1088 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1089 else
1090 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1091
1092
1093 table->VceLevel[count].MinVoltage |=
1094 (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1095 table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1096
1097 /*retrieve divider value for VBIOS */
1098 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1099 table->VceLevel[count].Frequency, &dividers);
1100 PP_ASSERT_WITH_CODE((0 == result),
1101 "can not find divide id for VCE engine clock",
1102 return result);
1103
1104 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1105
1106 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1107 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
1108 }
1109 return result;
1110}
1111
1112
1113static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1114 SMU74_Discrete_DpmTable *table)
1115{
1116 int result = -EINVAL;
1117 uint8_t count;
1118 struct pp_atomctrl_clock_dividers_vi dividers;
1119 struct phm_ppt_v1_information *table_info =
1120 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1121 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1122 table_info->mm_dep_table;
1123 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1124 uint32_t vddci;
1125
1126 table->SamuBootLevel = 0;
1127 table->SamuLevelCount = (uint8_t)(mm_table->count);
1128
1129 for (count = 0; count < table->SamuLevelCount; count++) {
1130 /* not sure whether we need evclk or not */
1131 table->SamuLevel[count].MinVoltage = 0;
1132 table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
1133 table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1134 VOLTAGE_SCALE) << VDDC_SHIFT;
1135
1136 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1137 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1138 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1139 else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1140 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1141 else
1142 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1143
1144 table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1145 table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1146
1147 /* retrieve divider value for VBIOS */
1148 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1149 table->SamuLevel[count].Frequency, &dividers);
1150 PP_ASSERT_WITH_CODE((0 == result),
1151 "can not find divide id for samu clock", return result);
1152
1153 table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1154
1155 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
1156 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
1157 }
1158 return result;
1159}
1160
1161static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
1162 int32_t eng_clock, int32_t mem_clock,
1163 SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
1164{
1165 uint32_t dram_timing;
1166 uint32_t dram_timing2;
1167 uint32_t burst_time;
1168 int result;
1169
1170 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1171 eng_clock, mem_clock);
1172 PP_ASSERT_WITH_CODE(result == 0,
1173 "Error calling VBIOS to set DRAM_TIMING.", return result);
1174
1175 dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1176 dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1177 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1178
1179
1180 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
1181 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
1182 arb_regs->McArbBurstTime = (uint8_t)burst_time;
1183
1184 return 0;
1185}
1186
1187static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1188{
1189 struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
1190 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1191 struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
1192 uint32_t i, j;
1193 int result = 0;
1194
1195 for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) {
1196 for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) {
1197 result = polaris10_populate_memory_timing_parameters(hwmgr,
1198 hw_data->dpm_table.sclk_table.dpm_levels[i].value,
1199 hw_data->dpm_table.mclk_table.dpm_levels[j].value,
1200 &arb_regs.entries[i][j]);
1201 if (result == 0)
1202 result = atomctrl_set_ac_timing_ai(hwmgr, hw_data->dpm_table.mclk_table.dpm_levels[j].value, j);
1203 if (result != 0)
1204 return result;
1205 }
1206 }
1207
1208 result = smu7_copy_bytes_to_smc(
1209 hwmgr,
1210 smu_data->smu7_data.arb_table_start,
1211 (uint8_t *)&arb_regs,
1212 sizeof(SMU74_Discrete_MCArbDramTimingTable),
1213 SMC_RAM_END);
1214 return result;
1215}
1216
1217static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1218 struct SMU74_Discrete_DpmTable *table)
1219{
1220 int result = -EINVAL;
1221 uint8_t count;
1222 struct pp_atomctrl_clock_dividers_vi dividers;
1223 struct phm_ppt_v1_information *table_info =
1224 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1225 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1226 table_info->mm_dep_table;
1227 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1228 uint32_t vddci;
1229
1230 table->UvdLevelCount = (uint8_t)(mm_table->count);
1231 table->UvdBootLevel = 0;
1232
1233 for (count = 0; count < table->UvdLevelCount; count++) {
1234 table->UvdLevel[count].MinVoltage = 0;
1235 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
1236 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
1237 table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1238 VOLTAGE_SCALE) << VDDC_SHIFT;
1239
1240 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1241 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1242 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1243 else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1244 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1245 else
1246 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1247
1248 table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1249 table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1250
1251 /* retrieve divider value for VBIOS */
1252 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1253 table->UvdLevel[count].VclkFrequency, &dividers);
1254 PP_ASSERT_WITH_CODE((0 == result),
1255 "can not find divide id for Vclk clock", return result);
1256
1257 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1258
1259 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1260 table->UvdLevel[count].DclkFrequency, &dividers);
1261 PP_ASSERT_WITH_CODE((0 == result),
1262 "can not find divide id for Dclk clock", return result);
1263
1264 table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
1265
1266 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1267 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1268 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
1269 }
1270
1271 return result;
1272}
1273
1274static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1275 struct SMU74_Discrete_DpmTable *table)
1276{
1277 int result = 0;
1278 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1279
1280 table->GraphicsBootLevel = 0;
1281 table->MemoryBootLevel = 0;
1282
1283 /* find boot level from dpm table */
1284 result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1285 data->vbios_boot_state.sclk_bootup_value,
1286 (uint32_t *)&(table->GraphicsBootLevel));
1287
1288 result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1289 data->vbios_boot_state.mclk_bootup_value,
1290 (uint32_t *)&(table->MemoryBootLevel));
1291
1292 table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
1293 VOLTAGE_SCALE;
1294 table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
1295 VOLTAGE_SCALE;
1296 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
1297 VOLTAGE_SCALE;
1298
1299 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
1300 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
1301 CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
1302
1303 return 0;
1304}
1305
1306static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
1307{
1308 struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
1309 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1310 struct phm_ppt_v1_information *table_info =
1311 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1312 uint8_t count, level;
1313
1314 count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
1315
1316 for (level = 0; level < count; level++) {
1317 if (table_info->vdd_dep_on_sclk->entries[level].clk >=
1318 hw_data->vbios_boot_state.sclk_bootup_value) {
1319 smu_data->smc_state_table.GraphicsBootLevel = level;
1320 break;
1321 }
1322 }
1323
1324 count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
1325 for (level = 0; level < count; level++) {
1326 if (table_info->vdd_dep_on_mclk->entries[level].clk >=
1327 hw_data->vbios_boot_state.mclk_bootup_value) {
1328 smu_data->smc_state_table.MemoryBootLevel = level;
1329 break;
1330 }
1331 }
1332
1333 return 0;
1334}
1335
1336
1337static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1338{
1339 uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
1340 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1341
1342 uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
1343 struct phm_ppt_v1_information *table_info =
1344 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1345 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1346 table_info->vdd_dep_on_sclk;
1347
1348 stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
1349
1350 /* Read SMU_Eefuse to read and calculate RO and determine
1351 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
1352 */
1353 efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1354 ixSMU_EFUSE_0 + (67 * 4));
1355 efuse &= 0xFF000000;
1356 efuse = efuse >> 24;
1357
1358 if (hwmgr->chip_id == CHIP_POLARIS10) {
1359 min = 1000;
1360 max = 2300;
1361 } else {
1362 min = 1100;
1363 max = 2100;
1364 }
1365
1366 ro = efuse * (max - min) / 255 + min;
1367
1368 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
1369 for (i = 0; i < sclk_table->count; i++) {
1370 smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
1371 sclk_table->entries[i].cks_enable << i;
1372 if (hwmgr->chip_id == CHIP_POLARIS10) {
1373 volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 - (ro - 70) * 1000000) / \
1374 (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
1375 volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
1376 (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
1377 } else {
1378 volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 - (ro - 50) * 1000000) / \
1379 (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
1380 volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
1381 (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
1382 }
1383
1384 if (volt_without_cks >= volt_with_cks)
1385 volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
1386 sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
1387
1388 smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
1389 }
1390
1391 smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
1392 /* Populate CKS Lookup Table */
1393 if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
1394 stretch_amount2 = 0;
1395 else if (stretch_amount == 3 || stretch_amount == 4)
1396 stretch_amount2 = 1;
1397 else {
1398 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1399 PHM_PlatformCaps_ClockStretcher);
1400 PP_ASSERT_WITH_CODE(false,
1401 "Stretch Amount in PPTable not supported\n",
1402 return -EINVAL);
1403 }
1404
1405 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
1406 value &= 0xFFFFFFFE;
1407 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
1408
1409 return 0;
1410}
1411
1412/**
1413* Populates the SMC VRConfig field in DPM table.
1414*
1415* @param hwmgr the address of the hardware manager
1416* @param table the SMC DPM table structure to be populated
1417* @return always 0
1418*/
1419static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
1420 struct SMU74_Discrete_DpmTable *table)
1421{
1422 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1423 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1424 uint16_t config;
1425
1426 config = VR_MERGED_WITH_VDDC;
1427 table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
1428
1429 /* Set Vddc Voltage Controller */
1430 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1431 config = VR_SVI2_PLANE_1;
1432 table->VRConfig |= config;
1433 } else {
1434 PP_ASSERT_WITH_CODE(false,
1435 "VDDC should be on SVI2 control in merged mode!",
1436 );
1437 }
1438 /* Set Vddci Voltage Controller */
1439 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
1440 config = VR_SVI2_PLANE_2; /* only in merged mode */
1441 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1442 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
1443 config = VR_SMIO_PATTERN_1;
1444 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1445 } else {
1446 config = VR_STATIC_VOLTAGE;
1447 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1448 }
1449 /* Set Mvdd Voltage Controller */
1450 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
1451 config = VR_SVI2_PLANE_2;
1452 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1453 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, smu_data->smu7_data.soft_regs_start +
1454 offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
1455 } else {
1456 config = VR_STATIC_VOLTAGE;
1457 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1458 }
1459
1460 return 0;
1461}
1462
1463
1464static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
1465{
1466 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1467 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1468
1469 SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
1470 int result = 0;
1471 struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
1472 AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
1473 AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
1474 uint32_t tmp, i;
1475
1476 struct phm_ppt_v1_information *table_info =
1477 (struct phm_ppt_v1_information *)hwmgr->pptable;
1478 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1479 table_info->vdd_dep_on_sclk;
1480
1481
1482 if (((struct smu7_smumgr *)smu_data)->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
1483 return result;
1484
1485 result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
1486
1487 if (0 == result) {
1488 table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
1489 table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
1490 table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
1491 table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
1492 table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
1493 table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
1494 table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
1495 table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
1496 table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
1497 table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
1498 table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12;
1499 table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
1500 table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
1501 table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
1502 table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
1503 table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12;
1504 table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
1505 AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
1506 AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
1507 AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
1508 AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
1509 AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
1510 AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
1511 AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
1512
1513 for (i = 0; i < NUM_VFT_COLUMNS; i++) {
1514 AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
1515 AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
1516 }
1517
1518 result = smu7_read_smc_sram_dword(hwmgr,
1519 SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
1520 &tmp, SMC_RAM_END);
1521
1522 smu7_copy_bytes_to_smc(hwmgr,
1523 tmp,
1524 (uint8_t *)&AVFS_meanNsigma,
1525 sizeof(AVFS_meanNsigma_t),
1526 SMC_RAM_END);
1527
1528 result = smu7_read_smc_sram_dword(hwmgr,
1529 SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
1530 &tmp, SMC_RAM_END);
1531 smu7_copy_bytes_to_smc(hwmgr,
1532 tmp,
1533 (uint8_t *)&AVFS_SclkOffset,
1534 sizeof(AVFS_Sclk_Offset_t),
1535 SMC_RAM_END);
1536
1537 data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
1538 (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
1539 (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
1540 (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
1541 data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
1542 }
1543 return result;
1544}
1545
1546
1547/**
1548* Initialize the ARB DRAM timing table's index field.
1549*
1550* @param hwmgr the address of the powerplay hardware manager.
1551* @return always 0
1552*/
1553static int polaris10_init_arb_table_index(struct pp_hwmgr *hwmgr)
1554{
1555 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1556 uint32_t tmp;
1557 int result;
1558
1559 /* This is a read-modify-write on the first byte of the ARB table.
1560 * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
1561 * is the field 'current'.
1562 * This solution is ugly, but we never write the whole table only
1563 * individual fields in it.
1564 * In reality this field should not be in that structure
1565 * but in a soft register.
1566 */
1567 result = smu7_read_smc_sram_dword(hwmgr,
1568 smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
1569
1570 if (result)
1571 return result;
1572
1573 tmp &= 0x00FFFFFF;
1574 tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
1575
1576 return smu7_write_smc_sram_dword(hwmgr,
1577 smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
1578}
1579
1580static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
1581{
1582 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1583 struct phm_ppt_v1_information *table_info =
1584 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1585
1586 if (table_info &&
1587 table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
1588 table_info->cac_dtp_table->usPowerTuneDataSetID)
1589 smu_data->power_tune_defaults =
1590 &polaris10_power_tune_data_set_array
1591 [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
1592 else
1593 smu_data->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
1594
1595}
1596
1597static void polaris10_save_default_power_profile(struct pp_hwmgr *hwmgr)
1598{
1599 struct polaris10_smumgr *data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1600 struct SMU74_Discrete_GraphicsLevel *levels =
1601 data->smc_state_table.GraphicsLevel;
1602 unsigned min_level = 1;
1603
1604 hwmgr->default_gfx_power_profile.activity_threshold =
1605 be16_to_cpu(levels[0].ActivityLevel);
1606 hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
1607 hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
1608 hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
1609
1610 hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
1611 hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
1612
1613 /* Workaround compute SDMA instability: disable lowest SCLK
1614 * DPM level. Optimize compute power profile: Use only highest
1615 * 2 power levels (if more than 2 are available), Hysteresis:
1616 * 0ms up, 5ms down
1617 */
1618 if (data->smc_state_table.GraphicsDpmLevelCount > 2)
1619 min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
1620 else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
1621 min_level = 1;
1622 else
1623 min_level = 0;
1624 hwmgr->default_compute_power_profile.min_sclk =
1625 be32_to_cpu(levels[min_level].SclkSetting.SclkFrequency);
1626 hwmgr->default_compute_power_profile.up_hyst = 0;
1627 hwmgr->default_compute_power_profile.down_hyst = 5;
1628
1629 hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
1630 hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
1631}
1632
1633/**
1634* Initializes the SMC table and uploads it
1635*
1636* @param hwmgr the address of the powerplay hardware manager.
1637* @return always 0
1638*/
1639int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
1640{
1641 int result;
1642 struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
1643 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1644
1645 struct phm_ppt_v1_information *table_info =
1646 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1647 struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
1648 uint8_t i;
1649 struct pp_atomctrl_gpio_pin_assignment gpio_pin;
1650 pp_atomctrl_clock_dividers_vi dividers;
1651
1652 polaris10_initialize_power_tune_defaults(hwmgr);
1653
1654 if (SMU7_VOLTAGE_CONTROL_NONE != hw_data->voltage_control)
1655 polaris10_populate_smc_voltage_tables(hwmgr, table);
1656
1657 table->SystemFlags = 0;
1658 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1659 PHM_PlatformCaps_AutomaticDCTransition))
1660 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1661
1662 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1663 PHM_PlatformCaps_StepVddc))
1664 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1665
1666 if (hw_data->is_memory_gddr5)
1667 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1668
1669 if (hw_data->ulv_supported && table_info->us_ulv_voltage_offset) {
1670 result = polaris10_populate_ulv_state(hwmgr, table);
1671 PP_ASSERT_WITH_CODE(0 == result,
1672 "Failed to initialize ULV state!", return result);
1673 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1674 ixCG_ULV_PARAMETER, SMU7_CGULVPARAMETER_DFLT);
1675 }
1676
1677 result = polaris10_populate_smc_link_level(hwmgr, table);
1678 PP_ASSERT_WITH_CODE(0 == result,
1679 "Failed to initialize Link Level!", return result);
1680
1681 result = polaris10_populate_all_graphic_levels(hwmgr);
1682 PP_ASSERT_WITH_CODE(0 == result,
1683 "Failed to initialize Graphics Level!", return result);
1684
1685 result = polaris10_populate_all_memory_levels(hwmgr);
1686 PP_ASSERT_WITH_CODE(0 == result,
1687 "Failed to initialize Memory Level!", return result);
1688
1689 result = polaris10_populate_smc_acpi_level(hwmgr, table);
1690 PP_ASSERT_WITH_CODE(0 == result,
1691 "Failed to initialize ACPI Level!", return result);
1692
1693 result = polaris10_populate_smc_vce_level(hwmgr, table);
1694 PP_ASSERT_WITH_CODE(0 == result,
1695 "Failed to initialize VCE Level!", return result);
1696
1697 result = polaris10_populate_smc_samu_level(hwmgr, table);
1698 PP_ASSERT_WITH_CODE(0 == result,
1699 "Failed to initialize SAMU Level!", return result);
1700
1701 /* Since only the initial state is completely set up at this point
1702 * (the other states are just copies of the boot state) we only
1703 * need to populate the ARB settings for the initial state.
1704 */
1705 result = polaris10_program_memory_timing_parameters(hwmgr);
1706 PP_ASSERT_WITH_CODE(0 == result,
1707 "Failed to Write ARB settings for the initial state.", return result);
1708
1709 result = polaris10_populate_smc_uvd_level(hwmgr, table);
1710 PP_ASSERT_WITH_CODE(0 == result,
1711 "Failed to initialize UVD Level!", return result);
1712
1713 result = polaris10_populate_smc_boot_level(hwmgr, table);
1714 PP_ASSERT_WITH_CODE(0 == result,
1715 "Failed to initialize Boot Level!", return result);
1716
1717 result = polaris10_populate_smc_initailial_state(hwmgr);
1718 PP_ASSERT_WITH_CODE(0 == result,
1719 "Failed to initialize Boot State!", return result);
1720
1721 result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr);
1722 PP_ASSERT_WITH_CODE(0 == result,
1723 "Failed to populate BAPM Parameters!", return result);
1724
1725 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1726 PHM_PlatformCaps_ClockStretcher)) {
1727 result = polaris10_populate_clock_stretcher_data_table(hwmgr);
1728 PP_ASSERT_WITH_CODE(0 == result,
1729 "Failed to populate Clock Stretcher Data Table!",
1730 return result);
1731 }
1732
1733 result = polaris10_populate_avfs_parameters(hwmgr);
1734 PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
1735
1736 table->CurrSclkPllRange = 0xff;
1737 table->GraphicsVoltageChangeEnable = 1;
1738 table->GraphicsThermThrottleEnable = 1;
1739 table->GraphicsInterval = 1;
1740 table->VoltageInterval = 1;
1741 table->ThermalInterval = 1;
1742 table->TemperatureLimitHigh =
1743 table_info->cac_dtp_table->usTargetOperatingTemp *
1744 SMU7_Q88_FORMAT_CONVERSION_UNIT;
1745 table->TemperatureLimitLow =
1746 (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
1747 SMU7_Q88_FORMAT_CONVERSION_UNIT;
1748 table->MemoryVoltageChangeEnable = 1;
1749 table->MemoryInterval = 1;
1750 table->VoltageResponseTime = 0;
1751 table->PhaseResponseTime = 0;
1752 table->MemoryThermThrottleEnable = 1;
1753 table->PCIeBootLinkLevel = 0;
1754 table->PCIeGenInterval = 1;
1755 table->VRConfig = 0;
1756
1757 result = polaris10_populate_vr_config(hwmgr, table);
1758 PP_ASSERT_WITH_CODE(0 == result,
1759 "Failed to populate VRConfig setting!", return result);
1760
1761 table->ThermGpio = 17;
1762 table->SclkStepSize = 0x4000;
1763
1764 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
1765 table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
1766 } else {
1767 table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
1768 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1769 PHM_PlatformCaps_RegulatorHot);
1770 }
1771
1772 if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
1773 &gpio_pin)) {
1774 table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
1775 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1776 PHM_PlatformCaps_AutomaticDCTransition);
1777 } else {
1778 table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
1779 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1780 PHM_PlatformCaps_AutomaticDCTransition);
1781 }
1782
1783 /* Thermal Output GPIO */
1784 if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
1785 &gpio_pin)) {
1786 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1787 PHM_PlatformCaps_ThermalOutGPIO);
1788
1789 table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
1790
1791 /* For porlarity read GPIOPAD_A with assigned Gpio pin
1792 * since VBIOS will program this register to set 'inactive state',
1793 * driver can then determine 'active state' from this and
1794 * program SMU with correct polarity
1795 */
1796 table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
1797 & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
1798 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
1799
1800 /* if required, combine VRHot/PCC with thermal out GPIO */
1801 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
1802 && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
1803 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
1804 } else {
1805 table->ThermOutGpio = 17;
1806 table->ThermOutPolarity = 1;
1807 table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
1808 }
1809
1810 /* Populate BIF_SCLK levels into SMC DPM table */
1811 for (i = 0; i <= hw_data->dpm_table.pcie_speed_table.count; i++) {
1812 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, smu_data->bif_sclk_table[i], &dividers);
1813 PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
1814
1815 if (i == 0)
1816 table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
1817 else
1818 table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
1819 }
1820
1821 for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
1822 table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
1823
1824 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
1825 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
1826 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
1827 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
1828 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
1829 CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
1830 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
1831 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
1832 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
1833 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
1834
1835 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
1836 result = smu7_copy_bytes_to_smc(hwmgr,
1837 smu_data->smu7_data.dpm_table_start +
1838 offsetof(SMU74_Discrete_DpmTable, SystemFlags),
1839 (uint8_t *)&(table->SystemFlags),
1840 sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
1841 SMC_RAM_END);
1842 PP_ASSERT_WITH_CODE(0 == result,
1843 "Failed to upload dpm data to SMC memory!", return result);
1844
1845 result = polaris10_init_arb_table_index(hwmgr);
1846 PP_ASSERT_WITH_CODE(0 == result,
1847 "Failed to upload arb data to SMC memory!", return result);
1848
1849 result = polaris10_populate_pm_fuses(hwmgr);
1850 PP_ASSERT_WITH_CODE(0 == result,
1851 "Failed to populate PM fuses to SMC memory!", return result);
1852
1853 polaris10_save_default_power_profile(hwmgr);
1854
1855 return 0;
1856}
1857
1858static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
1859{
1860 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1861
1862 if (data->need_update_smu7_dpm_table &
1863 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
1864 return polaris10_program_memory_timing_parameters(hwmgr);
1865
1866 return 0;
1867}
1868
1869int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
1870{
1871 int ret;
1872 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
1873 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1874
1875 if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
1876 return 0;
1877
1878 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1879 PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
1880
1881 ret = (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs) == 0) ?
1882 0 : -1;
1883
1884 if (!ret)
1885 /* If this param is not changed, this function could fire unnecessarily */
1886 smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
1887
1888 return ret;
1889}
1890
1891/**
1892* Set up the fan table to control the fan using the SMC.
1893* @param hwmgr the address of the powerplay hardware manager.
1894* @param pInput the pointer to input data
1895* @param pOutput the pointer to output data
1896* @param pStorage the pointer to temporary storage
1897* @param Result the last failure code
1898* @return result from set temperature range routine
1899*/
1900int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
1901{
1902 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1903 SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
1904 uint32_t duty100;
1905 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
1906 uint16_t fdo_min, slope1, slope2;
1907 uint32_t reference_clock;
1908 int res;
1909 uint64_t tmp64;
1910
1911 if (hwmgr->thermal_controller.fanInfo.bNoFan) {
1912 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1913 PHM_PlatformCaps_MicrocodeFanControl);
1914 return 0;
1915 }
1916
1917 if (smu_data->smu7_data.fan_table_start == 0) {
1918 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1919 PHM_PlatformCaps_MicrocodeFanControl);
1920 return 0;
1921 }
1922
1923 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1924 CG_FDO_CTRL1, FMAX_DUTY100);
1925
1926 if (duty100 == 0) {
1927 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1928 PHM_PlatformCaps_MicrocodeFanControl);
1929 return 0;
1930 }
1931
1932 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
1933 usPWMMin * duty100;
1934 do_div(tmp64, 10000);
1935 fdo_min = (uint16_t)tmp64;
1936
1937 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
1938 hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
1939 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
1940 hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
1941
1942 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
1943 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
1944 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
1945 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
1946
1947 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
1948 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
1949
1950 fan_table.TempMin = cpu_to_be16((50 + hwmgr->
1951 thermal_controller.advanceFanControlParameters.usTMin) / 100);
1952 fan_table.TempMed = cpu_to_be16((50 + hwmgr->
1953 thermal_controller.advanceFanControlParameters.usTMed) / 100);
1954 fan_table.TempMax = cpu_to_be16((50 + hwmgr->
1955 thermal_controller.advanceFanControlParameters.usTMax) / 100);
1956
1957 fan_table.Slope1 = cpu_to_be16(slope1);
1958 fan_table.Slope2 = cpu_to_be16(slope2);
1959
1960 fan_table.FdoMin = cpu_to_be16(fdo_min);
1961
1962 fan_table.HystDown = cpu_to_be16(hwmgr->
1963 thermal_controller.advanceFanControlParameters.ucTHyst);
1964
1965 fan_table.HystUp = cpu_to_be16(1);
1966
1967 fan_table.HystSlope = cpu_to_be16(1);
1968
1969 fan_table.TempRespLim = cpu_to_be16(5);
1970
1971 reference_clock = smu7_get_xclk(hwmgr);
1972
1973 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
1974 thermal_controller.advanceFanControlParameters.ulCycleDelay *
1975 reference_clock) / 1600);
1976
1977 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
1978
1979 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
1980 hwmgr->device, CGS_IND_REG__SMC,
1981 CG_MULT_THERMAL_CTRL, TEMP_SEL);
1982
1983 res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start,
1984 (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
1985 SMC_RAM_END);
1986
1987 if (!res && hwmgr->thermal_controller.
1988 advanceFanControlParameters.ucMinimumPWMLimit)
1989 res = smum_send_msg_to_smc_with_parameter(hwmgr,
1990 PPSMC_MSG_SetFanMinPwm,
1991 hwmgr->thermal_controller.
1992 advanceFanControlParameters.ucMinimumPWMLimit);
1993
1994 if (!res && hwmgr->thermal_controller.
1995 advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
1996 res = smum_send_msg_to_smc_with_parameter(hwmgr,
1997 PPSMC_MSG_SetFanSclkTarget,
1998 hwmgr->thermal_controller.
1999 advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
2000
2001 if (res)
2002 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2003 PHM_PlatformCaps_MicrocodeFanControl);
2004
2005 return 0;
2006}
2007
2008static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
2009{
2010 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
2011 uint32_t mm_boot_level_offset, mm_boot_level_value;
2012 struct phm_ppt_v1_information *table_info =
2013 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2014
2015 smu_data->smc_state_table.UvdBootLevel = 0;
2016 if (table_info->mm_dep_table->count > 0)
2017 smu_data->smc_state_table.UvdBootLevel =
2018 (uint8_t) (table_info->mm_dep_table->count - 1);
2019 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable,
2020 UvdBootLevel);
2021 mm_boot_level_offset /= 4;
2022 mm_boot_level_offset *= 4;
2023 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2024 CGS_IND_REG__SMC, mm_boot_level_offset);
2025 mm_boot_level_value &= 0x00FFFFFF;
2026 mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
2027 cgs_write_ind_register(hwmgr->device,
2028 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2029
2030 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2031 PHM_PlatformCaps_UVDDPM) ||
2032 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2033 PHM_PlatformCaps_StablePState))
2034 smum_send_msg_to_smc_with_parameter(hwmgr,
2035 PPSMC_MSG_UVDDPM_SetEnabledMask,
2036 (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
2037 return 0;
2038}
2039
2040static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
2041{
2042 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
2043 uint32_t mm_boot_level_offset, mm_boot_level_value;
2044 struct phm_ppt_v1_information *table_info =
2045 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2046
2047 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2048 PHM_PlatformCaps_StablePState))
2049 smu_data->smc_state_table.VceBootLevel =
2050 (uint8_t) (table_info->mm_dep_table->count - 1);
2051 else
2052 smu_data->smc_state_table.VceBootLevel = 0;
2053
2054 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2055 offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
2056 mm_boot_level_offset /= 4;
2057 mm_boot_level_offset *= 4;
2058 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2059 CGS_IND_REG__SMC, mm_boot_level_offset);
2060 mm_boot_level_value &= 0xFF00FFFF;
2061 mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
2062 cgs_write_ind_register(hwmgr->device,
2063 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2064
2065 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
2066 smum_send_msg_to_smc_with_parameter(hwmgr,
2067 PPSMC_MSG_VCEDPM_SetEnabledMask,
2068 (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
2069 return 0;
2070}
2071
2072static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr)
2073{
2074 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
2075 uint32_t mm_boot_level_offset, mm_boot_level_value;
2076
2077
2078 smu_data->smc_state_table.SamuBootLevel = 0;
2079 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2080 offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
2081
2082 mm_boot_level_offset /= 4;
2083 mm_boot_level_offset *= 4;
2084 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2085 CGS_IND_REG__SMC, mm_boot_level_offset);
2086 mm_boot_level_value &= 0xFFFFFF00;
2087 mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
2088 cgs_write_ind_register(hwmgr->device,
2089 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2090
2091 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2092 PHM_PlatformCaps_StablePState))
2093 smum_send_msg_to_smc_with_parameter(hwmgr,
2094 PPSMC_MSG_SAMUDPM_SetEnabledMask,
2095 (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
2096 return 0;
2097}
2098
2099
2100static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr)
2101{
2102 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
2103 struct phm_ppt_v1_information *table_info =
2104 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2105 struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
2106 int max_entry, i;
2107
2108 max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
2109 SMU74_MAX_LEVELS_LINK :
2110 pcie_table->count;
2111 /* Setup BIF_SCLK levels */
2112 for (i = 0; i < max_entry; i++)
2113 smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
2114 return 0;
2115}
2116
2117int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
2118{
2119 switch (type) {
2120 case SMU_UVD_TABLE:
2121 polaris10_update_uvd_smc_table(hwmgr);
2122 break;
2123 case SMU_VCE_TABLE:
2124 polaris10_update_vce_smc_table(hwmgr);
2125 break;
2126 case SMU_SAMU_TABLE:
2127 polaris10_update_samu_smc_table(hwmgr);
2128 break;
2129 case SMU_BIF_TABLE:
2130 polaris10_update_bif_smc_table(hwmgr);
2131 default:
2132 break;
2133 }
2134 return 0;
2135}
2136
2137int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2138{
2139 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2140 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
2141
2142 int result = 0;
2143 uint32_t low_sclk_interrupt_threshold = 0;
2144
2145 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2146 PHM_PlatformCaps_SclkThrottleLowNotification)
2147 && (hwmgr->gfx_arbiter.sclk_threshold !=
2148 data->low_sclk_interrupt_threshold)) {
2149 data->low_sclk_interrupt_threshold =
2150 hwmgr->gfx_arbiter.sclk_threshold;
2151 low_sclk_interrupt_threshold =
2152 data->low_sclk_interrupt_threshold;
2153
2154 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
2155
2156 result = smu7_copy_bytes_to_smc(
2157 hwmgr,
2158 smu_data->smu7_data.dpm_table_start +
2159 offsetof(SMU74_Discrete_DpmTable,
2160 LowSclkInterruptThreshold),
2161 (uint8_t *)&low_sclk_interrupt_threshold,
2162 sizeof(uint32_t),
2163 SMC_RAM_END);
2164 }
2165 PP_ASSERT_WITH_CODE((result == 0),
2166 "Failed to update SCLK threshold!", return result);
2167
2168 result = polaris10_program_mem_timing_parameters(hwmgr);
2169 PP_ASSERT_WITH_CODE((result == 0),
2170 "Failed to program memory timing parameters!",
2171 );
2172
2173 return result;
2174}
2175
2176uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
2177{
2178 switch (type) {
2179 case SMU_SoftRegisters:
2180 switch (member) {
2181 case HandshakeDisables:
2182 return offsetof(SMU74_SoftRegisters, HandshakeDisables);
2183 case VoltageChangeTimeout:
2184 return offsetof(SMU74_SoftRegisters, VoltageChangeTimeout);
2185 case AverageGraphicsActivity:
2186 return offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
2187 case PreVBlankGap:
2188 return offsetof(SMU74_SoftRegisters, PreVBlankGap);
2189 case VBlankTimeout:
2190 return offsetof(SMU74_SoftRegisters, VBlankTimeout);
2191 case UcodeLoadStatus:
2192 return offsetof(SMU74_SoftRegisters, UcodeLoadStatus);
2193 }
2194 case SMU_Discrete_DpmTable:
2195 switch (member) {
2196 case UvdBootLevel:
2197 return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
2198 case VceBootLevel:
2199 return offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
2200 case SamuBootLevel:
2201 return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
2202 case LowSclkInterruptThreshold:
2203 return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
2204 }
2205 }
2206 pr_warn("can't get the offset of type %x member %x\n", type, member);
2207 return 0;
2208}
2209
2210uint32_t polaris10_get_mac_definition(uint32_t value)
2211{
2212 switch (value) {
2213 case SMU_MAX_LEVELS_GRAPHICS:
2214 return SMU74_MAX_LEVELS_GRAPHICS;
2215 case SMU_MAX_LEVELS_MEMORY:
2216 return SMU74_MAX_LEVELS_MEMORY;
2217 case SMU_MAX_LEVELS_LINK:
2218 return SMU74_MAX_LEVELS_LINK;
2219 case SMU_MAX_ENTRIES_SMIO:
2220 return SMU74_MAX_ENTRIES_SMIO;
2221 case SMU_MAX_LEVELS_VDDC:
2222 return SMU74_MAX_LEVELS_VDDC;
2223 case SMU_MAX_LEVELS_VDDGFX:
2224 return SMU74_MAX_LEVELS_VDDGFX;
2225 case SMU_MAX_LEVELS_VDDCI:
2226 return SMU74_MAX_LEVELS_VDDCI;
2227 case SMU_MAX_LEVELS_MVDD:
2228 return SMU74_MAX_LEVELS_MVDD;
2229 case SMU_UVD_MCLK_HANDSHAKE_DISABLE:
2230 return SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
2231 }
2232
2233 pr_warn("can't get the mac of %x\n", value);
2234 return 0;
2235}
2236
2237/**
2238* Get the location of various tables inside the FW image.
2239*
2240* @param hwmgr the address of the powerplay hardware manager.
2241* @return always 0
2242*/
2243int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
2244{
2245 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
2246 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2247 uint32_t tmp;
2248 int result;
2249 bool error = false;
2250
2251 result = smu7_read_smc_sram_dword(hwmgr,
2252 SMU7_FIRMWARE_HEADER_LOCATION +
2253 offsetof(SMU74_Firmware_Header, DpmTable),
2254 &tmp, SMC_RAM_END);
2255
2256 if (0 == result)
2257 smu_data->smu7_data.dpm_table_start = tmp;
2258
2259 error |= (0 != result);
2260
2261 result = smu7_read_smc_sram_dword(hwmgr,
2262 SMU7_FIRMWARE_HEADER_LOCATION +
2263 offsetof(SMU74_Firmware_Header, SoftRegisters),
2264 &tmp, SMC_RAM_END);
2265
2266 if (!result) {
2267 data->soft_regs_start = tmp;
2268 smu_data->smu7_data.soft_regs_start = tmp;
2269 }
2270
2271 error |= (0 != result);
2272
2273 result = smu7_read_smc_sram_dword(hwmgr,
2274 SMU7_FIRMWARE_HEADER_LOCATION +
2275 offsetof(SMU74_Firmware_Header, mcRegisterTable),
2276 &tmp, SMC_RAM_END);
2277
2278 if (!result)
2279 smu_data->smu7_data.mc_reg_table_start = tmp;
2280
2281 result = smu7_read_smc_sram_dword(hwmgr,
2282 SMU7_FIRMWARE_HEADER_LOCATION +
2283 offsetof(SMU74_Firmware_Header, FanTable),
2284 &tmp, SMC_RAM_END);
2285
2286 if (!result)
2287 smu_data->smu7_data.fan_table_start = tmp;
2288
2289 error |= (0 != result);
2290
2291 result = smu7_read_smc_sram_dword(hwmgr,
2292 SMU7_FIRMWARE_HEADER_LOCATION +
2293 offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
2294 &tmp, SMC_RAM_END);
2295
2296 if (!result)
2297 smu_data->smu7_data.arb_table_start = tmp;
2298
2299 error |= (0 != result);
2300
2301 result = smu7_read_smc_sram_dword(hwmgr,
2302 SMU7_FIRMWARE_HEADER_LOCATION +
2303 offsetof(SMU74_Firmware_Header, Version),
2304 &tmp, SMC_RAM_END);
2305
2306 if (!result)
2307 hwmgr->microcode_version_info.SMC = tmp;
2308
2309 error |= (0 != result);
2310
2311 return error ? -1 : 0;
2312}
2313
2314bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
2315{
2316 return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
2317 CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
2318 ? true : false;
2319}
2320
2321int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
2322 struct amd_pp_profile *request)
2323{
2324 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)
2325 (hwmgr->smu_backend);
2326 struct SMU74_Discrete_GraphicsLevel *levels =
2327 smu_data->smc_state_table.GraphicsLevel;
2328 uint32_t array = smu_data->smu7_data.dpm_table_start +
2329 offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
2330 uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
2331 SMU74_MAX_LEVELS_GRAPHICS;
2332 uint32_t i;
2333
2334 for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
2335 levels[i].ActivityLevel =
2336 cpu_to_be16(request->activity_threshold);
2337 levels[i].EnabledForActivity = 1;
2338 levels[i].UpHyst = request->up_hyst;
2339 levels[i].DownHyst = request->down_hyst;
2340 }
2341
2342 return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
2343 array_size, SMC_RAM_END);
2344}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h
deleted file mode 100644
index 1df8154d0626..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h
+++ /dev/null
@@ -1,44 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef POLARIS10_SMC_H
24#define POLARIS10_SMC_H
25
26#include "smumgr.h"
27
28
29int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
30int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
31int polaris10_init_smc_table(struct pp_hwmgr *hwmgr);
32int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
33int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr);
34int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
35int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr);
36uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member);
37uint32_t polaris10_get_mac_definition(uint32_t value);
38int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr);
39bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr);
40int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
41 struct amd_pp_profile *request);
42
43#endif
44
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 61ee6281fbda..bd6be7793ca7 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -35,13 +35,47 @@
35#include "gca/gfx_8_0_d.h" 35#include "gca/gfx_8_0_d.h"
36#include "bif/bif_5_0_d.h" 36#include "bif/bif_5_0_d.h"
37#include "bif/bif_5_0_sh_mask.h" 37#include "bif/bif_5_0_sh_mask.h"
38#include "polaris10_pwrvirus.h"
39#include "ppatomctrl.h" 38#include "ppatomctrl.h"
40#include "cgs_common.h" 39#include "cgs_common.h"
41#include "polaris10_smc.h"
42#include "smu7_ppsmc.h" 40#include "smu7_ppsmc.h"
43#include "smu7_smumgr.h" 41#include "smu7_smumgr.h"
44 42
43#include "smu7_dyn_defaults.h"
44
45#include "smu7_hwmgr.h"
46#include "hardwaremanager.h"
47#include "ppatomctrl.h"
48#include "atombios.h"
49#include "pppcielanes.h"
50
51#include "dce/dce_10_0_d.h"
52#include "dce/dce_10_0_sh_mask.h"
53
54#define POLARIS10_SMC_SIZE 0x20000
55#define VOLTAGE_VID_OFFSET_SCALE1 625
56#define VOLTAGE_VID_OFFSET_SCALE2 100
57#define POWERTUNE_DEFAULT_SET_MAX 1
58#define VDDC_VDDCI_DELTA 200
59#define MC_CG_ARB_FREQ_F1 0x0b
60
61static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
62 /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
63 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
64 { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
65 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
66 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
67};
68
69static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = {
70 {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112},
71 {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
72 {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112},
73 {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160},
74 {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112},
75 {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160},
76 {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108},
77 {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} };
78
45#define PPPOLARIS10_TARGETACTIVITY_DFLT 50 79#define PPPOLARIS10_TARGETACTIVITY_DFLT 50
46 80
47static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = { 81static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
@@ -60,46 +94,6 @@ static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
60static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = { 94static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = {
61 0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00}; 95 0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
62 96
63static void execute_pwr_table(struct pp_hwmgr *hwmgr, const PWR_Command_Table *pvirus, int size)
64{
65 int i;
66 uint32_t reg, data;
67
68 for (i = 0; i < size; i++) {
69 reg = pvirus->reg;
70 data = pvirus->data;
71 if (reg != 0xffffffff)
72 cgs_write_register(hwmgr->device, reg, data);
73 else
74 break;
75 pvirus++;
76 }
77}
78
79static void execute_pwr_dfy_table(struct pp_hwmgr *hwmgr, const PWR_DFY_Section *section)
80{
81 int i;
82 cgs_write_register(hwmgr->device, mmCP_DFY_CNTL, section->dfy_cntl);
83 cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_HI, section->dfy_addr_hi);
84 cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_LO, section->dfy_addr_lo);
85 for (i = 0; i < section->dfy_size; i++)
86 cgs_write_register(hwmgr->device, mmCP_DFY_DATA_0, section->dfy_data[i]);
87}
88
89static int polaris10_setup_pwr_virus(struct pp_hwmgr *hwmgr)
90{
91 execute_pwr_table(hwmgr, pwr_virus_table_pre, ARRAY_SIZE(pwr_virus_table_pre));
92 execute_pwr_dfy_table(hwmgr, &pwr_virus_section1);
93 execute_pwr_dfy_table(hwmgr, &pwr_virus_section2);
94 execute_pwr_dfy_table(hwmgr, &pwr_virus_section3);
95 execute_pwr_dfy_table(hwmgr, &pwr_virus_section4);
96 execute_pwr_dfy_table(hwmgr, &pwr_virus_section5);
97 execute_pwr_dfy_table(hwmgr, &pwr_virus_section6);
98 execute_pwr_table(hwmgr, pwr_virus_table_post, ARRAY_SIZE(pwr_virus_table_post));
99
100 return 0;
101}
102
103static int polaris10_perform_btc(struct pp_hwmgr *hwmgr) 97static int polaris10_perform_btc(struct pp_hwmgr *hwmgr)
104{ 98{
105 int result = 0; 99 int result = 0;
@@ -197,7 +191,7 @@ polaris10_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool SMU_VFT_INTACT)
197 if (smu_data->avfs.avfs_btc_param > 1) { 191 if (smu_data->avfs.avfs_btc_param > 1) {
198 pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting."); 192 pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting.");
199 smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL; 193 smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
200 PP_ASSERT_WITH_CODE(0 == polaris10_setup_pwr_virus(hwmgr), 194 PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
201 "[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ", 195 "[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ",
202 return -EINVAL); 196 return -EINVAL);
203 } 197 }
@@ -389,6 +383,2195 @@ static int polaris10_smu_init(struct pp_hwmgr *hwmgr)
389 return 0; 383 return 0;
390} 384}
391 385
386static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
387 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
388 uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
389{
390 uint32_t i;
391 uint16_t vddci;
392 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
393
394 *voltage = *mvdd = 0;
395
396 /* clock - voltage dependency table is empty table */
397 if (dep_table->count == 0)
398 return -EINVAL;
399
400 for (i = 0; i < dep_table->count; i++) {
401 /* find first sclk bigger than request */
402 if (dep_table->entries[i].clk >= clock) {
403 *voltage |= (dep_table->entries[i].vddc *
404 VOLTAGE_SCALE) << VDDC_SHIFT;
405 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
406 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
407 VOLTAGE_SCALE) << VDDCI_SHIFT;
408 else if (dep_table->entries[i].vddci)
409 *voltage |= (dep_table->entries[i].vddci *
410 VOLTAGE_SCALE) << VDDCI_SHIFT;
411 else {
412 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
413 (dep_table->entries[i].vddc -
414 (uint16_t)VDDC_VDDCI_DELTA));
415 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
416 }
417
418 if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
419 *mvdd = data->vbios_boot_state.mvdd_bootup_value *
420 VOLTAGE_SCALE;
421 else if (dep_table->entries[i].mvdd)
422 *mvdd = (uint32_t) dep_table->entries[i].mvdd *
423 VOLTAGE_SCALE;
424
425 *voltage |= 1 << PHASES_SHIFT;
426 return 0;
427 }
428 }
429
430 /* sclk is bigger than max sclk in the dependence table */
431 *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
432
433 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
434 *voltage |= (data->vbios_boot_state.vddci_bootup_value *
435 VOLTAGE_SCALE) << VDDCI_SHIFT;
436 else if (dep_table->entries[i-1].vddci) {
437 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
438 (dep_table->entries[i].vddc -
439 (uint16_t)VDDC_VDDCI_DELTA));
440 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
441 }
442
443 if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
444 *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
445 else if (dep_table->entries[i].mvdd)
446 *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
447
448 return 0;
449}
450
451static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
452{
453 uint32_t tmp;
454 tmp = raw_setting * 4096 / 100;
455 return (uint16_t)tmp;
456}
457
458static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
459{
460 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
461
462 const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
463 SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
464 struct phm_ppt_v1_information *table_info =
465 (struct phm_ppt_v1_information *)(hwmgr->pptable);
466 struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
467 struct pp_advance_fan_control_parameters *fan_table =
468 &hwmgr->thermal_controller.advanceFanControlParameters;
469 int i, j, k;
470 const uint16_t *pdef1;
471 const uint16_t *pdef2;
472
473 table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
474 table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
475
476 PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
477 "Target Operating Temp is out of Range!",
478 );
479
480 table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
481 cac_dtp_table->usTargetOperatingTemp * 256);
482 table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
483 cac_dtp_table->usTemperatureLimitHotspot * 256);
484 table->FanGainEdge = PP_HOST_TO_SMC_US(
485 scale_fan_gain_settings(fan_table->usFanGainEdge));
486 table->FanGainHotspot = PP_HOST_TO_SMC_US(
487 scale_fan_gain_settings(fan_table->usFanGainHotspot));
488
489 pdef1 = defaults->BAPMTI_R;
490 pdef2 = defaults->BAPMTI_RC;
491
492 for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
493 for (j = 0; j < SMU74_DTE_SOURCES; j++) {
494 for (k = 0; k < SMU74_DTE_SINKS; k++) {
495 table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
496 table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
497 pdef1++;
498 pdef2++;
499 }
500 }
501 }
502
503 return 0;
504}
505
506static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
507{
508 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
509 const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
510
511 smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
512 smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
513 smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
514 smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
515
516 return 0;
517}
518
519static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
520{
521 uint16_t tdc_limit;
522 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
523 struct phm_ppt_v1_information *table_info =
524 (struct phm_ppt_v1_information *)(hwmgr->pptable);
525 const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
526
527 tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
528 smu_data->power_tune_table.TDC_VDDC_PkgLimit =
529 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
530 smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
531 defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
532 smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
533
534 return 0;
535}
536
537static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
538{
539 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
540 const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
541 uint32_t temp;
542
543 if (smu7_read_smc_sram_dword(hwmgr,
544 fuse_table_offset +
545 offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
546 (uint32_t *)&temp, SMC_RAM_END))
547 PP_ASSERT_WITH_CODE(false,
548 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
549 return -EINVAL);
550 else {
551 smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
552 smu_data->power_tune_table.LPMLTemperatureMin =
553 (uint8_t)((temp >> 16) & 0xff);
554 smu_data->power_tune_table.LPMLTemperatureMax =
555 (uint8_t)((temp >> 8) & 0xff);
556 smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
557 }
558 return 0;
559}
560
561static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
562{
563 int i;
564 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
565
566 /* Currently not used. Set all to zero. */
567 for (i = 0; i < 16; i++)
568 smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
569
570 return 0;
571}
572
573static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
574{
575 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
576
577/* TO DO move to hwmgr */
578 if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
579 || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
580 hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
581 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
582
583 smu_data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
584 hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
585 return 0;
586}
587
588static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
589{
590 int i;
591 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
592
593 /* Currently not used. Set all to zero. */
594 for (i = 0; i < 16; i++)
595 smu_data->power_tune_table.GnbLPML[i] = 0;
596
597 return 0;
598}
599
600static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
601{
602 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
603 struct phm_ppt_v1_information *table_info =
604 (struct phm_ppt_v1_information *)(hwmgr->pptable);
605 uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
606 uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
607 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
608
609 hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
610 lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
611
612 smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
613 CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
614 smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
615 CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
616
617 return 0;
618}
619
620static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
621{
622 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
623 uint32_t pm_fuse_table_offset;
624
625 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
626 PHM_PlatformCaps_PowerContainment)) {
627 if (smu7_read_smc_sram_dword(hwmgr,
628 SMU7_FIRMWARE_HEADER_LOCATION +
629 offsetof(SMU74_Firmware_Header, PmFuseTable),
630 &pm_fuse_table_offset, SMC_RAM_END))
631 PP_ASSERT_WITH_CODE(false,
632 "Attempt to get pm_fuse_table_offset Failed!",
633 return -EINVAL);
634
635 if (polaris10_populate_svi_load_line(hwmgr))
636 PP_ASSERT_WITH_CODE(false,
637 "Attempt to populate SviLoadLine Failed!",
638 return -EINVAL);
639
640 if (polaris10_populate_tdc_limit(hwmgr))
641 PP_ASSERT_WITH_CODE(false,
642 "Attempt to populate TDCLimit Failed!", return -EINVAL);
643
644 if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
645 PP_ASSERT_WITH_CODE(false,
646 "Attempt to populate TdcWaterfallCtl, "
647 "LPMLTemperature Min and Max Failed!",
648 return -EINVAL);
649
650 if (0 != polaris10_populate_temperature_scaler(hwmgr))
651 PP_ASSERT_WITH_CODE(false,
652 "Attempt to populate LPMLTemperatureScaler Failed!",
653 return -EINVAL);
654
655 if (polaris10_populate_fuzzy_fan(hwmgr))
656 PP_ASSERT_WITH_CODE(false,
657 "Attempt to populate Fuzzy Fan Control parameters Failed!",
658 return -EINVAL);
659
660 if (polaris10_populate_gnb_lpml(hwmgr))
661 PP_ASSERT_WITH_CODE(false,
662 "Attempt to populate GnbLPML Failed!",
663 return -EINVAL);
664
665 if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
666 PP_ASSERT_WITH_CODE(false,
667 "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
668 "Sidd Failed!", return -EINVAL);
669
670 if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
671 (uint8_t *)&smu_data->power_tune_table,
672 (sizeof(struct SMU74_Discrete_PmFuses) - 92), SMC_RAM_END))
673 PP_ASSERT_WITH_CODE(false,
674 "Attempt to download PmFuseTable Failed!",
675 return -EINVAL);
676 }
677 return 0;
678}
679
680static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
681 SMU74_Discrete_DpmTable *table)
682{
683 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
684 uint32_t count, level;
685
686 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
687 count = data->mvdd_voltage_table.count;
688 if (count > SMU_MAX_SMIO_LEVELS)
689 count = SMU_MAX_SMIO_LEVELS;
690 for (level = 0; level < count; level++) {
691 table->SmioTable2.Pattern[level].Voltage =
692 PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
693 /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
694 table->SmioTable2.Pattern[level].Smio =
695 (uint8_t) level;
696 table->Smio[level] |=
697 data->mvdd_voltage_table.entries[level].smio_low;
698 }
699 table->SmioMask2 = data->mvdd_voltage_table.mask_low;
700
701 table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
702 }
703
704 return 0;
705}
706
707static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
708 struct SMU74_Discrete_DpmTable *table)
709{
710 uint32_t count, level;
711 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
712
713 count = data->vddci_voltage_table.count;
714
715 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
716 if (count > SMU_MAX_SMIO_LEVELS)
717 count = SMU_MAX_SMIO_LEVELS;
718 for (level = 0; level < count; ++level) {
719 table->SmioTable1.Pattern[level].Voltage =
720 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
721 table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
722
723 table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
724 }
725 }
726
727 table->SmioMask1 = data->vddci_voltage_table.mask_low;
728
729 return 0;
730}
731
732static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
733 struct SMU74_Discrete_DpmTable *table)
734{
735 uint32_t count;
736 uint8_t index;
737 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
738 struct phm_ppt_v1_information *table_info =
739 (struct phm_ppt_v1_information *)(hwmgr->pptable);
740 struct phm_ppt_v1_voltage_lookup_table *lookup_table =
741 table_info->vddc_lookup_table;
742 /* tables is already swapped, so in order to use the value from it,
743 * we need to swap it back.
744 * We are populating vddc CAC data to BapmVddc table
745 * in split and merged mode
746 */
747 for (count = 0; count < lookup_table->count; count++) {
748 index = phm_get_voltage_index(lookup_table,
749 data->vddc_voltage_table.entries[count].value);
750 table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
751 table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
752 table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
753 }
754
755 return 0;
756}
757
758static int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
759 struct SMU74_Discrete_DpmTable *table)
760{
761 polaris10_populate_smc_vddci_table(hwmgr, table);
762 polaris10_populate_smc_mvdd_table(hwmgr, table);
763 polaris10_populate_cac_table(hwmgr, table);
764
765 return 0;
766}
767
768static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
769 struct SMU74_Discrete_Ulv *state)
770{
771 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
772 struct phm_ppt_v1_information *table_info =
773 (struct phm_ppt_v1_information *)(hwmgr->pptable);
774
775 state->CcPwrDynRm = 0;
776 state->CcPwrDynRm1 = 0;
777
778 state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
779 state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
780 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
781
782 if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker)
783 state->VddcPhase = data->vddc_phase_shed_control ^ 0x3;
784 else
785 state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
786
787 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
788 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
789 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
790
791 return 0;
792}
793
794static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr,
795 struct SMU74_Discrete_DpmTable *table)
796{
797 return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
798}
799
800static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
801 struct SMU74_Discrete_DpmTable *table)
802{
803 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
804 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
805 struct smu7_dpm_table *dpm_table = &data->dpm_table;
806 int i;
807
808 /* Index (dpm_table->pcie_speed_table.count)
809 * is reserved for PCIE boot level. */
810 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
811 table->LinkLevel[i].PcieGenSpeed =
812 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
813 table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
814 dpm_table->pcie_speed_table.dpm_levels[i].param1);
815 table->LinkLevel[i].EnabledForActivity = 1;
816 table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
817 table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
818 table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
819 }
820
821 smu_data->smc_state_table.LinkLevelCount =
822 (uint8_t)dpm_table->pcie_speed_table.count;
823
824/* To Do move to hwmgr */
825 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
826 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
827
828 return 0;
829}
830
831
832static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr,
833 SMU74_Discrete_DpmTable *table)
834{
835 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
836 uint32_t i, ref_clk;
837
838 struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
839
840 ref_clk = smu7_get_xclk(hwmgr);
841
842 if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
843 for (i = 0; i < NUM_SCLK_RANGE; i++) {
844 table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
845 table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
846 table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
847
848 table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
849 table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
850
851 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
852 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
853 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
854 }
855 return;
856 }
857
858 for (i = 0; i < NUM_SCLK_RANGE; i++) {
859 smu_data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
860 smu_data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
861
862 table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
863 table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
864 table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
865
866 table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
867 table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
868
869 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
870 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
871 CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
872 }
873}
874
875static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
876 uint32_t clock, SMU_SclkSetting *sclk_setting)
877{
878 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
879 const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
880 struct pp_atomctrl_clock_dividers_ai dividers;
881 uint32_t ref_clock;
882 uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
883 uint8_t i;
884 int result;
885 uint64_t temp;
886
887 sclk_setting->SclkFrequency = clock;
888 /* get the engine clock dividers for this clock value */
889 result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, &dividers);
890 if (result == 0) {
891 sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
892 sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
893 sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
894 sclk_setting->PllRange = dividers.ucSclkPllRange;
895 sclk_setting->Sclk_slew_rate = 0x400;
896 sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
897 sclk_setting->Pcc_down_slew_rate = 0xffff;
898 sclk_setting->SSc_En = dividers.ucSscEnable;
899 sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
900 sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
901 sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
902 return result;
903 }
904
905 ref_clock = smu7_get_xclk(hwmgr);
906
907 for (i = 0; i < NUM_SCLK_RANGE; i++) {
908 if (clock > smu_data->range_table[i].trans_lower_frequency
909 && clock <= smu_data->range_table[i].trans_upper_frequency) {
910 sclk_setting->PllRange = i;
911 break;
912 }
913 }
914
915 sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
916 temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
917 temp <<= 0x10;
918 do_div(temp, ref_clock);
919 sclk_setting->Fcw_frac = temp & 0xffff;
920
921 pcc_target_percent = 10; /* Hardcode 10% for now. */
922 pcc_target_freq = clock - (clock * pcc_target_percent / 100);
923 sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
924
925 ss_target_percent = 2; /* Hardcode 2% for now. */
926 sclk_setting->SSc_En = 0;
927 if (ss_target_percent) {
928 sclk_setting->SSc_En = 1;
929 ss_target_freq = clock - (clock * ss_target_percent / 100);
930 sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
931 temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
932 temp <<= 0x10;
933 do_div(temp, ref_clock);
934 sclk_setting->Fcw1_frac = temp & 0xffff;
935 }
936
937 return 0;
938}
939
940static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
941 uint32_t clock, uint16_t sclk_al_threshold,
942 struct SMU74_Discrete_GraphicsLevel *level)
943{
944 int result;
945 /* PP_Clocks minClocks; */
946 uint32_t mvdd;
947 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
948 struct phm_ppt_v1_information *table_info =
949 (struct phm_ppt_v1_information *)(hwmgr->pptable);
950 SMU_SclkSetting curr_sclk_setting = { 0 };
951
952 result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
953
954 /* populate graphics levels */
955 result = polaris10_get_dependency_volt_by_clk(hwmgr,
956 table_info->vdd_dep_on_sclk, clock,
957 &level->MinVoltage, &mvdd);
958
959 PP_ASSERT_WITH_CODE((0 == result),
960 "can not find VDDC voltage value for "
961 "VDDC engine clock dependency table",
962 return result);
963 level->ActivityLevel = sclk_al_threshold;
964
965 level->CcPwrDynRm = 0;
966 level->CcPwrDynRm1 = 0;
967 level->EnabledForActivity = 0;
968 level->EnabledForThrottle = 1;
969 level->UpHyst = 10;
970 level->DownHyst = 0;
971 level->VoltageDownHyst = 0;
972 level->PowerThrottle = 0;
973 data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
974
975 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
976 level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
977 hwmgr->display_config.min_core_set_clock_in_sr);
978
979 /* Default to slow, highest DPM level will be
980 * set to PPSMC_DISPLAY_WATERMARK_LOW later.
981 */
982 if (data->update_up_hyst)
983 level->UpHyst = (uint8_t)data->up_hyst;
984 if (data->update_down_hyst)
985 level->DownHyst = (uint8_t)data->down_hyst;
986
987 level->SclkSetting = curr_sclk_setting;
988
989 CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
990 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
991 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
992 CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
993 CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
994 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
995 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
996 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
997 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
998 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
999 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
1000 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
1001 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
1002 CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
1003 return 0;
1004}
1005
1006static int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1007{
1008 struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
1009 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1010 struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
1011 struct phm_ppt_v1_information *table_info =
1012 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1013 struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
1014 uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count;
1015 int result = 0;
1016 uint32_t array = smu_data->smu7_data.dpm_table_start +
1017 offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
1018 uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
1019 SMU74_MAX_LEVELS_GRAPHICS;
1020 struct SMU74_Discrete_GraphicsLevel *levels =
1021 smu_data->smc_state_table.GraphicsLevel;
1022 uint32_t i, max_entry;
1023 uint8_t hightest_pcie_level_enabled = 0,
1024 lowest_pcie_level_enabled = 0,
1025 mid_pcie_level_enabled = 0,
1026 count = 0;
1027
1028 polaris10_get_sclk_range_table(hwmgr, &(smu_data->smc_state_table));
1029
1030 for (i = 0; i < dpm_table->sclk_table.count; i++) {
1031
1032 result = polaris10_populate_single_graphic_level(hwmgr,
1033 dpm_table->sclk_table.dpm_levels[i].value,
1034 (uint16_t)smu_data->activity_target[i],
1035 &(smu_data->smc_state_table.GraphicsLevel[i]));
1036 if (result)
1037 return result;
1038
1039 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
1040 if (i > 1)
1041 levels[i].DeepSleepDivId = 0;
1042 }
1043 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1044 PHM_PlatformCaps_SPLLShutdownSupport))
1045 smu_data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
1046
1047 smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
1048 smu_data->smc_state_table.GraphicsDpmLevelCount =
1049 (uint8_t)dpm_table->sclk_table.count;
1050 hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask =
1051 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
1052
1053
1054 if (pcie_table != NULL) {
1055 PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
1056 "There must be 1 or more PCIE levels defined in PPTable.",
1057 return -EINVAL);
1058 max_entry = pcie_entry_cnt - 1;
1059 for (i = 0; i < dpm_table->sclk_table.count; i++)
1060 levels[i].pcieDpmLevel =
1061 (uint8_t) ((i < max_entry) ? i : max_entry);
1062 } else {
1063 while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
1064 ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
1065 (1 << (hightest_pcie_level_enabled + 1))) != 0))
1066 hightest_pcie_level_enabled++;
1067
1068 while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
1069 ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
1070 (1 << lowest_pcie_level_enabled)) == 0))
1071 lowest_pcie_level_enabled++;
1072
1073 while ((count < hightest_pcie_level_enabled) &&
1074 ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
1075 (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
1076 count++;
1077
1078 mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
1079 hightest_pcie_level_enabled ?
1080 (lowest_pcie_level_enabled + 1 + count) :
1081 hightest_pcie_level_enabled;
1082
1083 /* set pcieDpmLevel to hightest_pcie_level_enabled */
1084 for (i = 2; i < dpm_table->sclk_table.count; i++)
1085 levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
1086
1087 /* set pcieDpmLevel to lowest_pcie_level_enabled */
1088 levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
1089
1090 /* set pcieDpmLevel to mid_pcie_level_enabled */
1091 levels[1].pcieDpmLevel = mid_pcie_level_enabled;
1092 }
1093 /* level count will send to smc once at init smc table and never change */
1094 result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
1095 (uint32_t)array_size, SMC_RAM_END);
1096
1097 return result;
1098}
1099
1100
1101static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1102 uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
1103{
1104 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1105 struct phm_ppt_v1_information *table_info =
1106 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1107 int result = 0;
1108 struct cgs_display_info info = {0, 0, NULL};
1109 uint32_t mclk_stutter_mode_threshold = 40000;
1110
1111 cgs_get_active_displays_info(hwmgr->device, &info);
1112
1113 if (table_info->vdd_dep_on_mclk) {
1114 result = polaris10_get_dependency_volt_by_clk(hwmgr,
1115 table_info->vdd_dep_on_mclk, clock,
1116 &mem_level->MinVoltage, &mem_level->MinMvdd);
1117 PP_ASSERT_WITH_CODE((0 == result),
1118 "can not find MinVddc voltage value from memory "
1119 "VDDC voltage dependency table", return result);
1120 }
1121
1122 mem_level->MclkFrequency = clock;
1123 mem_level->EnabledForThrottle = 1;
1124 mem_level->EnabledForActivity = 0;
1125 mem_level->UpHyst = 0;
1126 mem_level->DownHyst = 100;
1127 mem_level->VoltageDownHyst = 0;
1128 mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
1129 mem_level->StutterEnable = false;
1130 mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1131
1132 data->display_timing.num_existing_displays = info.display_count;
1133
1134 if (mclk_stutter_mode_threshold &&
1135 (clock <= mclk_stutter_mode_threshold) &&
1136 (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
1137 STUTTER_ENABLE) & 0x1))
1138 mem_level->StutterEnable = true;
1139
1140 if (!result) {
1141 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
1142 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
1143 CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
1144 CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
1145 }
1146 return result;
1147}
1148
1149static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1150{
1151 struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
1152 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1153 struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
1154 int result;
1155 /* populate MCLK dpm table to SMU7 */
1156 uint32_t array = smu_data->smu7_data.dpm_table_start +
1157 offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
1158 uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
1159 SMU74_MAX_LEVELS_MEMORY;
1160 struct SMU74_Discrete_MemoryLevel *levels =
1161 smu_data->smc_state_table.MemoryLevel;
1162 uint32_t i;
1163
1164 for (i = 0; i < dpm_table->mclk_table.count; i++) {
1165 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
1166 "can not populate memory level as memory clock is zero",
1167 return -EINVAL);
1168 result = polaris10_populate_single_memory_level(hwmgr,
1169 dpm_table->mclk_table.dpm_levels[i].value,
1170 &levels[i]);
1171 if (i == dpm_table->mclk_table.count - 1) {
1172 levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
1173 levels[i].EnabledForActivity = 1;
1174 }
1175 if (result)
1176 return result;
1177 }
1178
1179 /* In order to prevent MC activity from stutter mode to push DPM up,
1180 * the UVD change complements this by putting the MCLK in
1181 * a higher state by default such that we are not affected by
1182 * up threshold or and MCLK DPM latency.
1183 */
1184 levels[0].ActivityLevel = 0x1f;
1185 CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
1186
1187 smu_data->smc_state_table.MemoryDpmLevelCount =
1188 (uint8_t)dpm_table->mclk_table.count;
1189 hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask =
1190 phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1191
1192 /* level count will send to smc once at init smc table and never change */
1193 result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
1194 (uint32_t)array_size, SMC_RAM_END);
1195
1196 return result;
1197}
1198
1199static int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
1200 uint32_t mclk, SMIO_Pattern *smio_pat)
1201{
1202 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1203 struct phm_ppt_v1_information *table_info =
1204 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1205 uint32_t i = 0;
1206
1207 if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
1208 /* find mvdd value which clock is more than request */
1209 for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
1210 if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
1211 smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
1212 break;
1213 }
1214 }
1215 PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
1216 "MVDD Voltage is outside the supported range.",
1217 return -EINVAL);
1218 } else
1219 return -EINVAL;
1220
1221 return 0;
1222}
1223
1224static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1225 SMU74_Discrete_DpmTable *table)
1226{
1227 int result = 0;
1228 uint32_t sclk_frequency;
1229 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1230 struct phm_ppt_v1_information *table_info =
1231 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1232 SMIO_Pattern vol_level;
1233 uint32_t mvdd;
1234 uint16_t us_mvdd;
1235
1236 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1237
1238 /* Get MinVoltage and Frequency from DPM0,
1239 * already converted to SMC_UL */
1240 sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
1241 result = polaris10_get_dependency_volt_by_clk(hwmgr,
1242 table_info->vdd_dep_on_sclk,
1243 sclk_frequency,
1244 &table->ACPILevel.MinVoltage, &mvdd);
1245 PP_ASSERT_WITH_CODE((0 == result),
1246 "Cannot find ACPI VDDC voltage value "
1247 "in Clock Dependency Table",
1248 );
1249
1250 result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting));
1251 PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
1252
1253 table->ACPILevel.DeepSleepDivId = 0;
1254 table->ACPILevel.CcPwrDynRm = 0;
1255 table->ACPILevel.CcPwrDynRm1 = 0;
1256
1257 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1258 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
1259 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1260 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1261
1262 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
1263 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
1264 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
1265 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
1266 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
1267 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
1268 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
1269 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
1270 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
1271 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
1272
1273
1274 /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
1275 table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
1276 result = polaris10_get_dependency_volt_by_clk(hwmgr,
1277 table_info->vdd_dep_on_mclk,
1278 table->MemoryACPILevel.MclkFrequency,
1279 &table->MemoryACPILevel.MinVoltage, &mvdd);
1280 PP_ASSERT_WITH_CODE((0 == result),
1281 "Cannot find ACPI VDDCI voltage value "
1282 "in Clock Dependency Table",
1283 );
1284
1285 us_mvdd = 0;
1286 if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
1287 (data->mclk_dpm_key_disabled))
1288 us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
1289 else {
1290 if (!polaris10_populate_mvdd_value(hwmgr,
1291 data->dpm_table.mclk_table.dpm_levels[0].value,
1292 &vol_level))
1293 us_mvdd = vol_level.Voltage;
1294 }
1295
1296 if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
1297 table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
1298 else
1299 table->MemoryACPILevel.MinMvdd = 0;
1300
1301 table->MemoryACPILevel.StutterEnable = false;
1302
1303 table->MemoryACPILevel.EnabledForThrottle = 0;
1304 table->MemoryACPILevel.EnabledForActivity = 0;
1305 table->MemoryACPILevel.UpHyst = 0;
1306 table->MemoryACPILevel.DownHyst = 100;
1307 table->MemoryACPILevel.VoltageDownHyst = 0;
1308 table->MemoryACPILevel.ActivityLevel =
1309 PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
1310
1311 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
1312 CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
1313
1314 return result;
1315}
1316
1317static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1318 SMU74_Discrete_DpmTable *table)
1319{
1320 int result = -EINVAL;
1321 uint8_t count;
1322 struct pp_atomctrl_clock_dividers_vi dividers;
1323 struct phm_ppt_v1_information *table_info =
1324 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1325 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1326 table_info->mm_dep_table;
1327 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1328 uint32_t vddci;
1329
1330 table->VceLevelCount = (uint8_t)(mm_table->count);
1331 table->VceBootLevel = 0;
1332
1333 for (count = 0; count < table->VceLevelCount; count++) {
1334 table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
1335 table->VceLevel[count].MinVoltage = 0;
1336 table->VceLevel[count].MinVoltage |=
1337 (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
1338
1339 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1340 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1341 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1342 else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1343 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1344 else
1345 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1346
1347
1348 table->VceLevel[count].MinVoltage |=
1349 (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1350 table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1351
1352 /*retrieve divider value for VBIOS */
1353 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1354 table->VceLevel[count].Frequency, &dividers);
1355 PP_ASSERT_WITH_CODE((0 == result),
1356 "can not find divide id for VCE engine clock",
1357 return result);
1358
1359 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1360
1361 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1362 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
1363 }
1364 return result;
1365}
1366
1367
1368static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1369 SMU74_Discrete_DpmTable *table)
1370{
1371 int result = -EINVAL;
1372 uint8_t count;
1373 struct pp_atomctrl_clock_dividers_vi dividers;
1374 struct phm_ppt_v1_information *table_info =
1375 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1376 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1377 table_info->mm_dep_table;
1378 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1379 uint32_t vddci;
1380
1381 table->SamuBootLevel = 0;
1382 table->SamuLevelCount = (uint8_t)(mm_table->count);
1383
1384 for (count = 0; count < table->SamuLevelCount; count++) {
1385 /* not sure whether we need evclk or not */
1386 table->SamuLevel[count].MinVoltage = 0;
1387 table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
1388 table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1389 VOLTAGE_SCALE) << VDDC_SHIFT;
1390
1391 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1392 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1393 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1394 else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1395 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1396 else
1397 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1398
1399 table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1400 table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1401
1402 /* retrieve divider value for VBIOS */
1403 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1404 table->SamuLevel[count].Frequency, &dividers);
1405 PP_ASSERT_WITH_CODE((0 == result),
1406 "can not find divide id for samu clock", return result);
1407
1408 table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1409
1410 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
1411 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
1412 }
1413 return result;
1414}
1415
1416static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
1417 int32_t eng_clock, int32_t mem_clock,
1418 SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
1419{
1420 uint32_t dram_timing;
1421 uint32_t dram_timing2;
1422 uint32_t burst_time;
1423 int result;
1424
1425 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1426 eng_clock, mem_clock);
1427 PP_ASSERT_WITH_CODE(result == 0,
1428 "Error calling VBIOS to set DRAM_TIMING.", return result);
1429
1430 dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1431 dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1432 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1433
1434
1435 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
1436 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
1437 arb_regs->McArbBurstTime = (uint8_t)burst_time;
1438
1439 return 0;
1440}
1441
1442static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1443{
1444 struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
1445 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1446 struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
1447 uint32_t i, j;
1448 int result = 0;
1449
1450 for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) {
1451 for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) {
1452 result = polaris10_populate_memory_timing_parameters(hwmgr,
1453 hw_data->dpm_table.sclk_table.dpm_levels[i].value,
1454 hw_data->dpm_table.mclk_table.dpm_levels[j].value,
1455 &arb_regs.entries[i][j]);
1456 if (result == 0)
1457 result = atomctrl_set_ac_timing_ai(hwmgr, hw_data->dpm_table.mclk_table.dpm_levels[j].value, j);
1458 if (result != 0)
1459 return result;
1460 }
1461 }
1462
1463 result = smu7_copy_bytes_to_smc(
1464 hwmgr,
1465 smu_data->smu7_data.arb_table_start,
1466 (uint8_t *)&arb_regs,
1467 sizeof(SMU74_Discrete_MCArbDramTimingTable),
1468 SMC_RAM_END);
1469 return result;
1470}
1471
1472static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1473 struct SMU74_Discrete_DpmTable *table)
1474{
1475 int result = -EINVAL;
1476 uint8_t count;
1477 struct pp_atomctrl_clock_dividers_vi dividers;
1478 struct phm_ppt_v1_information *table_info =
1479 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1480 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1481 table_info->mm_dep_table;
1482 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1483 uint32_t vddci;
1484
1485 table->UvdLevelCount = (uint8_t)(mm_table->count);
1486 table->UvdBootLevel = 0;
1487
1488 for (count = 0; count < table->UvdLevelCount; count++) {
1489 table->UvdLevel[count].MinVoltage = 0;
1490 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
1491 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
1492 table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1493 VOLTAGE_SCALE) << VDDC_SHIFT;
1494
1495 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1496 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1497 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1498 else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1499 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1500 else
1501 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1502
1503 table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1504 table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1505
1506 /* retrieve divider value for VBIOS */
1507 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1508 table->UvdLevel[count].VclkFrequency, &dividers);
1509 PP_ASSERT_WITH_CODE((0 == result),
1510 "can not find divide id for Vclk clock", return result);
1511
1512 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1513
1514 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1515 table->UvdLevel[count].DclkFrequency, &dividers);
1516 PP_ASSERT_WITH_CODE((0 == result),
1517 "can not find divide id for Dclk clock", return result);
1518
1519 table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
1520
1521 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1522 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1523 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
1524 }
1525
1526 return result;
1527}
1528
1529static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1530 struct SMU74_Discrete_DpmTable *table)
1531{
1532 int result = 0;
1533 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1534
1535 table->GraphicsBootLevel = 0;
1536 table->MemoryBootLevel = 0;
1537
1538 /* find boot level from dpm table */
1539 result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1540 data->vbios_boot_state.sclk_bootup_value,
1541 (uint32_t *)&(table->GraphicsBootLevel));
1542
1543 result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1544 data->vbios_boot_state.mclk_bootup_value,
1545 (uint32_t *)&(table->MemoryBootLevel));
1546
1547 table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
1548 VOLTAGE_SCALE;
1549 table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
1550 VOLTAGE_SCALE;
1551 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
1552 VOLTAGE_SCALE;
1553
1554 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
1555 CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
1556 CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
1557
1558 return 0;
1559}
1560
1561static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
1562{
1563 struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
1564 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1565 struct phm_ppt_v1_information *table_info =
1566 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1567 uint8_t count, level;
1568
1569 count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
1570
1571 for (level = 0; level < count; level++) {
1572 if (table_info->vdd_dep_on_sclk->entries[level].clk >=
1573 hw_data->vbios_boot_state.sclk_bootup_value) {
1574 smu_data->smc_state_table.GraphicsBootLevel = level;
1575 break;
1576 }
1577 }
1578
1579 count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
1580 for (level = 0; level < count; level++) {
1581 if (table_info->vdd_dep_on_mclk->entries[level].clk >=
1582 hw_data->vbios_boot_state.mclk_bootup_value) {
1583 smu_data->smc_state_table.MemoryBootLevel = level;
1584 break;
1585 }
1586 }
1587
1588 return 0;
1589}
1590
1591static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1592{
1593 uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
1594 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1595
1596 uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
1597 struct phm_ppt_v1_information *table_info =
1598 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1599 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1600 table_info->vdd_dep_on_sclk;
1601
1602 stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
1603
1604 /* Read SMU_Eefuse to read and calculate RO and determine
1605 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
1606 */
1607 efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1608 ixSMU_EFUSE_0 + (67 * 4));
1609 efuse &= 0xFF000000;
1610 efuse = efuse >> 24;
1611
1612 if (hwmgr->chip_id == CHIP_POLARIS10) {
1613 min = 1000;
1614 max = 2300;
1615 } else {
1616 min = 1100;
1617 max = 2100;
1618 }
1619
1620 ro = efuse * (max - min) / 255 + min;
1621
1622 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
1623 for (i = 0; i < sclk_table->count; i++) {
1624 smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
1625 sclk_table->entries[i].cks_enable << i;
1626 if (hwmgr->chip_id == CHIP_POLARIS10) {
1627 volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 - (ro - 70) * 1000000) / \
1628 (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
1629 volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
1630 (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
1631 } else {
1632 volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 - (ro - 50) * 1000000) / \
1633 (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
1634 volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
1635 (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
1636 }
1637
1638 if (volt_without_cks >= volt_with_cks)
1639 volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
1640 sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
1641
1642 smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
1643 }
1644
1645 smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
1646 /* Populate CKS Lookup Table */
1647 if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
1648 stretch_amount2 = 0;
1649 else if (stretch_amount == 3 || stretch_amount == 4)
1650 stretch_amount2 = 1;
1651 else {
1652 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1653 PHM_PlatformCaps_ClockStretcher);
1654 PP_ASSERT_WITH_CODE(false,
1655 "Stretch Amount in PPTable not supported\n",
1656 return -EINVAL);
1657 }
1658
1659 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
1660 value &= 0xFFFFFFFE;
1661 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
1662
1663 return 0;
1664}
1665
1666static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
1667 struct SMU74_Discrete_DpmTable *table)
1668{
1669 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1670 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1671 uint16_t config;
1672
1673 config = VR_MERGED_WITH_VDDC;
1674 table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
1675
1676 /* Set Vddc Voltage Controller */
1677 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1678 config = VR_SVI2_PLANE_1;
1679 table->VRConfig |= config;
1680 } else {
1681 PP_ASSERT_WITH_CODE(false,
1682 "VDDC should be on SVI2 control in merged mode!",
1683 );
1684 }
1685 /* Set Vddci Voltage Controller */
1686 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
1687 config = VR_SVI2_PLANE_2; /* only in merged mode */
1688 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1689 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
1690 config = VR_SMIO_PATTERN_1;
1691 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1692 } else {
1693 config = VR_STATIC_VOLTAGE;
1694 table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1695 }
1696 /* Set Mvdd Voltage Controller */
1697 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
1698 config = VR_SVI2_PLANE_2;
1699 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1700 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, smu_data->smu7_data.soft_regs_start +
1701 offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
1702 } else {
1703 config = VR_STATIC_VOLTAGE;
1704 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1705 }
1706
1707 return 0;
1708}
1709
1710
1711static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
1712{
1713 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1714 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1715
1716 SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
1717 int result = 0;
1718 struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
1719 AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
1720 AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
1721 uint32_t tmp, i;
1722
1723 struct phm_ppt_v1_information *table_info =
1724 (struct phm_ppt_v1_information *)hwmgr->pptable;
1725 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1726 table_info->vdd_dep_on_sclk;
1727
1728
1729 if (((struct smu7_smumgr *)smu_data)->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
1730 return result;
1731
1732 result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
1733
1734 if (0 == result) {
1735 table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
1736 table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
1737 table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
1738 table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
1739 table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
1740 table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
1741 table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
1742 table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
1743 table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
1744 table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
1745 table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12;
1746 table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
1747 table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
1748 table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
1749 table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
1750 table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12;
1751 table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
1752 AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
1753 AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
1754 AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
1755 AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
1756 AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
1757 AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
1758 AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
1759
1760 for (i = 0; i < NUM_VFT_COLUMNS; i++) {
1761 AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
1762 AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
1763 }
1764
1765 result = smu7_read_smc_sram_dword(hwmgr,
1766 SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
1767 &tmp, SMC_RAM_END);
1768
1769 smu7_copy_bytes_to_smc(hwmgr,
1770 tmp,
1771 (uint8_t *)&AVFS_meanNsigma,
1772 sizeof(AVFS_meanNsigma_t),
1773 SMC_RAM_END);
1774
1775 result = smu7_read_smc_sram_dword(hwmgr,
1776 SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
1777 &tmp, SMC_RAM_END);
1778 smu7_copy_bytes_to_smc(hwmgr,
1779 tmp,
1780 (uint8_t *)&AVFS_SclkOffset,
1781 sizeof(AVFS_Sclk_Offset_t),
1782 SMC_RAM_END);
1783
1784 data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
1785 (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
1786 (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
1787 (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
1788 data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
1789 }
1790 return result;
1791}
1792
1793static int polaris10_init_arb_table_index(struct pp_hwmgr *hwmgr)
1794{
1795 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1796 uint32_t tmp;
1797 int result;
1798
1799 /* This is a read-modify-write on the first byte of the ARB table.
1800 * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
1801 * is the field 'current'.
1802 * This solution is ugly, but we never write the whole table only
1803 * individual fields in it.
1804 * In reality this field should not be in that structure
1805 * but in a soft register.
1806 */
1807 result = smu7_read_smc_sram_dword(hwmgr,
1808 smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
1809
1810 if (result)
1811 return result;
1812
1813 tmp &= 0x00FFFFFF;
1814 tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
1815
1816 return smu7_write_smc_sram_dword(hwmgr,
1817 smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
1818}
1819
1820static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
1821{
1822 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1823 struct phm_ppt_v1_information *table_info =
1824 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1825
1826 if (table_info &&
1827 table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
1828 table_info->cac_dtp_table->usPowerTuneDataSetID)
1829 smu_data->power_tune_defaults =
1830 &polaris10_power_tune_data_set_array
1831 [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
1832 else
1833 smu_data->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
1834
1835}
1836
1837static void polaris10_save_default_power_profile(struct pp_hwmgr *hwmgr)
1838{
1839 struct polaris10_smumgr *data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1840 struct SMU74_Discrete_GraphicsLevel *levels =
1841 data->smc_state_table.GraphicsLevel;
1842 unsigned min_level = 1;
1843
1844 hwmgr->default_gfx_power_profile.activity_threshold =
1845 be16_to_cpu(levels[0].ActivityLevel);
1846 hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
1847 hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
1848 hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
1849
1850 hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
1851 hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
1852
1853 /* Workaround compute SDMA instability: disable lowest SCLK
1854 * DPM level. Optimize compute power profile: Use only highest
1855 * 2 power levels (if more than 2 are available), Hysteresis:
1856 * 0ms up, 5ms down
1857 */
1858 if (data->smc_state_table.GraphicsDpmLevelCount > 2)
1859 min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
1860 else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
1861 min_level = 1;
1862 else
1863 min_level = 0;
1864 hwmgr->default_compute_power_profile.min_sclk =
1865 be32_to_cpu(levels[min_level].SclkSetting.SclkFrequency);
1866 hwmgr->default_compute_power_profile.up_hyst = 0;
1867 hwmgr->default_compute_power_profile.down_hyst = 5;
1868
1869 hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
1870 hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
1871}
1872
1873static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
1874{
1875 int result;
1876 struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
1877 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1878
1879 struct phm_ppt_v1_information *table_info =
1880 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1881 struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
1882 uint8_t i;
1883 struct pp_atomctrl_gpio_pin_assignment gpio_pin;
1884 pp_atomctrl_clock_dividers_vi dividers;
1885
1886 polaris10_initialize_power_tune_defaults(hwmgr);
1887
1888 if (SMU7_VOLTAGE_CONTROL_NONE != hw_data->voltage_control)
1889 polaris10_populate_smc_voltage_tables(hwmgr, table);
1890
1891 table->SystemFlags = 0;
1892 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1893 PHM_PlatformCaps_AutomaticDCTransition))
1894 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1895
1896 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1897 PHM_PlatformCaps_StepVddc))
1898 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1899
1900 if (hw_data->is_memory_gddr5)
1901 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1902
1903 if (hw_data->ulv_supported && table_info->us_ulv_voltage_offset) {
1904 result = polaris10_populate_ulv_state(hwmgr, table);
1905 PP_ASSERT_WITH_CODE(0 == result,
1906 "Failed to initialize ULV state!", return result);
1907 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1908 ixCG_ULV_PARAMETER, SMU7_CGULVPARAMETER_DFLT);
1909 }
1910
1911 result = polaris10_populate_smc_link_level(hwmgr, table);
1912 PP_ASSERT_WITH_CODE(0 == result,
1913 "Failed to initialize Link Level!", return result);
1914
1915 result = polaris10_populate_all_graphic_levels(hwmgr);
1916 PP_ASSERT_WITH_CODE(0 == result,
1917 "Failed to initialize Graphics Level!", return result);
1918
1919 result = polaris10_populate_all_memory_levels(hwmgr);
1920 PP_ASSERT_WITH_CODE(0 == result,
1921 "Failed to initialize Memory Level!", return result);
1922
1923 result = polaris10_populate_smc_acpi_level(hwmgr, table);
1924 PP_ASSERT_WITH_CODE(0 == result,
1925 "Failed to initialize ACPI Level!", return result);
1926
1927 result = polaris10_populate_smc_vce_level(hwmgr, table);
1928 PP_ASSERT_WITH_CODE(0 == result,
1929 "Failed to initialize VCE Level!", return result);
1930
1931 result = polaris10_populate_smc_samu_level(hwmgr, table);
1932 PP_ASSERT_WITH_CODE(0 == result,
1933 "Failed to initialize SAMU Level!", return result);
1934
1935 /* Since only the initial state is completely set up at this point
1936 * (the other states are just copies of the boot state) we only
1937 * need to populate the ARB settings for the initial state.
1938 */
1939 result = polaris10_program_memory_timing_parameters(hwmgr);
1940 PP_ASSERT_WITH_CODE(0 == result,
1941 "Failed to Write ARB settings for the initial state.", return result);
1942
1943 result = polaris10_populate_smc_uvd_level(hwmgr, table);
1944 PP_ASSERT_WITH_CODE(0 == result,
1945 "Failed to initialize UVD Level!", return result);
1946
1947 result = polaris10_populate_smc_boot_level(hwmgr, table);
1948 PP_ASSERT_WITH_CODE(0 == result,
1949 "Failed to initialize Boot Level!", return result);
1950
1951 result = polaris10_populate_smc_initailial_state(hwmgr);
1952 PP_ASSERT_WITH_CODE(0 == result,
1953 "Failed to initialize Boot State!", return result);
1954
1955 result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr);
1956 PP_ASSERT_WITH_CODE(0 == result,
1957 "Failed to populate BAPM Parameters!", return result);
1958
1959 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1960 PHM_PlatformCaps_ClockStretcher)) {
1961 result = polaris10_populate_clock_stretcher_data_table(hwmgr);
1962 PP_ASSERT_WITH_CODE(0 == result,
1963 "Failed to populate Clock Stretcher Data Table!",
1964 return result);
1965 }
1966
1967 result = polaris10_populate_avfs_parameters(hwmgr);
1968 PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
1969
1970 table->CurrSclkPllRange = 0xff;
1971 table->GraphicsVoltageChangeEnable = 1;
1972 table->GraphicsThermThrottleEnable = 1;
1973 table->GraphicsInterval = 1;
1974 table->VoltageInterval = 1;
1975 table->ThermalInterval = 1;
1976 table->TemperatureLimitHigh =
1977 table_info->cac_dtp_table->usTargetOperatingTemp *
1978 SMU7_Q88_FORMAT_CONVERSION_UNIT;
1979 table->TemperatureLimitLow =
1980 (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
1981 SMU7_Q88_FORMAT_CONVERSION_UNIT;
1982 table->MemoryVoltageChangeEnable = 1;
1983 table->MemoryInterval = 1;
1984 table->VoltageResponseTime = 0;
1985 table->PhaseResponseTime = 0;
1986 table->MemoryThermThrottleEnable = 1;
1987 table->PCIeBootLinkLevel = 0;
1988 table->PCIeGenInterval = 1;
1989 table->VRConfig = 0;
1990
1991 result = polaris10_populate_vr_config(hwmgr, table);
1992 PP_ASSERT_WITH_CODE(0 == result,
1993 "Failed to populate VRConfig setting!", return result);
1994
1995 table->ThermGpio = 17;
1996 table->SclkStepSize = 0x4000;
1997
1998 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
1999 table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
2000 } else {
2001 table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
2002 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2003 PHM_PlatformCaps_RegulatorHot);
2004 }
2005
2006 if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
2007 &gpio_pin)) {
2008 table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
2009 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2010 PHM_PlatformCaps_AutomaticDCTransition);
2011 } else {
2012 table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
2013 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2014 PHM_PlatformCaps_AutomaticDCTransition);
2015 }
2016
2017 /* Thermal Output GPIO */
2018 if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
2019 &gpio_pin)) {
2020 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2021 PHM_PlatformCaps_ThermalOutGPIO);
2022
2023 table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
2024
2025 /* For porlarity read GPIOPAD_A with assigned Gpio pin
2026 * since VBIOS will program this register to set 'inactive state',
2027 * driver can then determine 'active state' from this and
2028 * program SMU with correct polarity
2029 */
2030 table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
2031 & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
2032 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
2033
2034 /* if required, combine VRHot/PCC with thermal out GPIO */
2035 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
2036 && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
2037 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
2038 } else {
2039 table->ThermOutGpio = 17;
2040 table->ThermOutPolarity = 1;
2041 table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
2042 }
2043
2044 /* Populate BIF_SCLK levels into SMC DPM table */
2045 for (i = 0; i <= hw_data->dpm_table.pcie_speed_table.count; i++) {
2046 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, smu_data->bif_sclk_table[i], &dividers);
2047 PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
2048
2049 if (i == 0)
2050 table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
2051 else
2052 table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
2053 }
2054
2055 for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
2056 table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
2057
2058 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
2059 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
2060 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
2061 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
2062 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
2063 CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
2064 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
2065 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
2066 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
2067 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
2068
2069 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
2070 result = smu7_copy_bytes_to_smc(hwmgr,
2071 smu_data->smu7_data.dpm_table_start +
2072 offsetof(SMU74_Discrete_DpmTable, SystemFlags),
2073 (uint8_t *)&(table->SystemFlags),
2074 sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
2075 SMC_RAM_END);
2076 PP_ASSERT_WITH_CODE(0 == result,
2077 "Failed to upload dpm data to SMC memory!", return result);
2078
2079 result = polaris10_init_arb_table_index(hwmgr);
2080 PP_ASSERT_WITH_CODE(0 == result,
2081 "Failed to upload arb data to SMC memory!", return result);
2082
2083 result = polaris10_populate_pm_fuses(hwmgr);
2084 PP_ASSERT_WITH_CODE(0 == result,
2085 "Failed to populate PM fuses to SMC memory!", return result);
2086
2087 polaris10_save_default_power_profile(hwmgr);
2088
2089 return 0;
2090}
2091
2092static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
2093{
2094 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2095
2096 if (data->need_update_smu7_dpm_table &
2097 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
2098 return polaris10_program_memory_timing_parameters(hwmgr);
2099
2100 return 0;
2101}
2102
2103int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
2104{
2105 int ret;
2106 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
2107 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2108
2109 if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
2110 return 0;
2111
2112 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2113 PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
2114
2115 ret = (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs) == 0) ?
2116 0 : -1;
2117
2118 if (!ret)
2119 /* If this param is not changed, this function could fire unnecessarily */
2120 smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
2121
2122 return ret;
2123}
2124
2125static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
2126{
2127 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
2128 SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
2129 uint32_t duty100;
2130 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
2131 uint16_t fdo_min, slope1, slope2;
2132 uint32_t reference_clock;
2133 int res;
2134 uint64_t tmp64;
2135
2136 if (hwmgr->thermal_controller.fanInfo.bNoFan) {
2137 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2138 PHM_PlatformCaps_MicrocodeFanControl);
2139 return 0;
2140 }
2141
2142 if (smu_data->smu7_data.fan_table_start == 0) {
2143 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2144 PHM_PlatformCaps_MicrocodeFanControl);
2145 return 0;
2146 }
2147
2148 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
2149 CG_FDO_CTRL1, FMAX_DUTY100);
2150
2151 if (duty100 == 0) {
2152 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2153 PHM_PlatformCaps_MicrocodeFanControl);
2154 return 0;
2155 }
2156
2157 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
2158 usPWMMin * duty100;
2159 do_div(tmp64, 10000);
2160 fdo_min = (uint16_t)tmp64;
2161
2162 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
2163 hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
2164 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
2165 hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
2166
2167 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
2168 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
2169 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
2170 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
2171
2172 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
2173 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
2174
2175 fan_table.TempMin = cpu_to_be16((50 + hwmgr->
2176 thermal_controller.advanceFanControlParameters.usTMin) / 100);
2177 fan_table.TempMed = cpu_to_be16((50 + hwmgr->
2178 thermal_controller.advanceFanControlParameters.usTMed) / 100);
2179 fan_table.TempMax = cpu_to_be16((50 + hwmgr->
2180 thermal_controller.advanceFanControlParameters.usTMax) / 100);
2181
2182 fan_table.Slope1 = cpu_to_be16(slope1);
2183 fan_table.Slope2 = cpu_to_be16(slope2);
2184
2185 fan_table.FdoMin = cpu_to_be16(fdo_min);
2186
2187 fan_table.HystDown = cpu_to_be16(hwmgr->
2188 thermal_controller.advanceFanControlParameters.ucTHyst);
2189
2190 fan_table.HystUp = cpu_to_be16(1);
2191
2192 fan_table.HystSlope = cpu_to_be16(1);
2193
2194 fan_table.TempRespLim = cpu_to_be16(5);
2195
2196 reference_clock = smu7_get_xclk(hwmgr);
2197
2198 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
2199 thermal_controller.advanceFanControlParameters.ulCycleDelay *
2200 reference_clock) / 1600);
2201
2202 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
2203
2204 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
2205 hwmgr->device, CGS_IND_REG__SMC,
2206 CG_MULT_THERMAL_CTRL, TEMP_SEL);
2207
2208 res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start,
2209 (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
2210 SMC_RAM_END);
2211
2212 if (!res && hwmgr->thermal_controller.
2213 advanceFanControlParameters.ucMinimumPWMLimit)
2214 res = smum_send_msg_to_smc_with_parameter(hwmgr,
2215 PPSMC_MSG_SetFanMinPwm,
2216 hwmgr->thermal_controller.
2217 advanceFanControlParameters.ucMinimumPWMLimit);
2218
2219 if (!res && hwmgr->thermal_controller.
2220 advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
2221 res = smum_send_msg_to_smc_with_parameter(hwmgr,
2222 PPSMC_MSG_SetFanSclkTarget,
2223 hwmgr->thermal_controller.
2224 advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
2225
2226 if (res)
2227 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2228 PHM_PlatformCaps_MicrocodeFanControl);
2229
2230 return 0;
2231}
2232
2233static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
2234{
2235 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
2236 uint32_t mm_boot_level_offset, mm_boot_level_value;
2237 struct phm_ppt_v1_information *table_info =
2238 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2239
2240 smu_data->smc_state_table.UvdBootLevel = 0;
2241 if (table_info->mm_dep_table->count > 0)
2242 smu_data->smc_state_table.UvdBootLevel =
2243 (uint8_t) (table_info->mm_dep_table->count - 1);
2244 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable,
2245 UvdBootLevel);
2246 mm_boot_level_offset /= 4;
2247 mm_boot_level_offset *= 4;
2248 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2249 CGS_IND_REG__SMC, mm_boot_level_offset);
2250 mm_boot_level_value &= 0x00FFFFFF;
2251 mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
2252 cgs_write_ind_register(hwmgr->device,
2253 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2254
2255 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2256 PHM_PlatformCaps_UVDDPM) ||
2257 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2258 PHM_PlatformCaps_StablePState))
2259 smum_send_msg_to_smc_with_parameter(hwmgr,
2260 PPSMC_MSG_UVDDPM_SetEnabledMask,
2261 (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
2262 return 0;
2263}
2264
2265static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
2266{
2267 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
2268 uint32_t mm_boot_level_offset, mm_boot_level_value;
2269 struct phm_ppt_v1_information *table_info =
2270 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2271
2272 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2273 PHM_PlatformCaps_StablePState))
2274 smu_data->smc_state_table.VceBootLevel =
2275 (uint8_t) (table_info->mm_dep_table->count - 1);
2276 else
2277 smu_data->smc_state_table.VceBootLevel = 0;
2278
2279 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2280 offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
2281 mm_boot_level_offset /= 4;
2282 mm_boot_level_offset *= 4;
2283 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2284 CGS_IND_REG__SMC, mm_boot_level_offset);
2285 mm_boot_level_value &= 0xFF00FFFF;
2286 mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
2287 cgs_write_ind_register(hwmgr->device,
2288 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2289
2290 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
2291 smum_send_msg_to_smc_with_parameter(hwmgr,
2292 PPSMC_MSG_VCEDPM_SetEnabledMask,
2293 (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
2294 return 0;
2295}
2296
2297static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr)
2298{
2299 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
2300 uint32_t mm_boot_level_offset, mm_boot_level_value;
2301
2302
2303 smu_data->smc_state_table.SamuBootLevel = 0;
2304 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2305 offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
2306
2307 mm_boot_level_offset /= 4;
2308 mm_boot_level_offset *= 4;
2309 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2310 CGS_IND_REG__SMC, mm_boot_level_offset);
2311 mm_boot_level_value &= 0xFFFFFF00;
2312 mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
2313 cgs_write_ind_register(hwmgr->device,
2314 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2315
2316 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2317 PHM_PlatformCaps_StablePState))
2318 smum_send_msg_to_smc_with_parameter(hwmgr,
2319 PPSMC_MSG_SAMUDPM_SetEnabledMask,
2320 (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
2321 return 0;
2322}
2323
2324
2325static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr)
2326{
2327 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
2328 struct phm_ppt_v1_information *table_info =
2329 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2330 struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
2331 int max_entry, i;
2332
2333 max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
2334 SMU74_MAX_LEVELS_LINK :
2335 pcie_table->count;
2336 /* Setup BIF_SCLK levels */
2337 for (i = 0; i < max_entry; i++)
2338 smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
2339 return 0;
2340}
2341
2342static int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
2343{
2344 switch (type) {
2345 case SMU_UVD_TABLE:
2346 polaris10_update_uvd_smc_table(hwmgr);
2347 break;
2348 case SMU_VCE_TABLE:
2349 polaris10_update_vce_smc_table(hwmgr);
2350 break;
2351 case SMU_SAMU_TABLE:
2352 polaris10_update_samu_smc_table(hwmgr);
2353 break;
2354 case SMU_BIF_TABLE:
2355 polaris10_update_bif_smc_table(hwmgr);
2356 default:
2357 break;
2358 }
2359 return 0;
2360}
2361
2362static int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2363{
2364 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2365 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
2366
2367 int result = 0;
2368 uint32_t low_sclk_interrupt_threshold = 0;
2369
2370 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2371 PHM_PlatformCaps_SclkThrottleLowNotification)
2372 && (hwmgr->gfx_arbiter.sclk_threshold !=
2373 data->low_sclk_interrupt_threshold)) {
2374 data->low_sclk_interrupt_threshold =
2375 hwmgr->gfx_arbiter.sclk_threshold;
2376 low_sclk_interrupt_threshold =
2377 data->low_sclk_interrupt_threshold;
2378
2379 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
2380
2381 result = smu7_copy_bytes_to_smc(
2382 hwmgr,
2383 smu_data->smu7_data.dpm_table_start +
2384 offsetof(SMU74_Discrete_DpmTable,
2385 LowSclkInterruptThreshold),
2386 (uint8_t *)&low_sclk_interrupt_threshold,
2387 sizeof(uint32_t),
2388 SMC_RAM_END);
2389 }
2390 PP_ASSERT_WITH_CODE((result == 0),
2391 "Failed to update SCLK threshold!", return result);
2392
2393 result = polaris10_program_mem_timing_parameters(hwmgr);
2394 PP_ASSERT_WITH_CODE((result == 0),
2395 "Failed to program memory timing parameters!",
2396 );
2397
2398 return result;
2399}
2400
2401static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
2402{
2403 switch (type) {
2404 case SMU_SoftRegisters:
2405 switch (member) {
2406 case HandshakeDisables:
2407 return offsetof(SMU74_SoftRegisters, HandshakeDisables);
2408 case VoltageChangeTimeout:
2409 return offsetof(SMU74_SoftRegisters, VoltageChangeTimeout);
2410 case AverageGraphicsActivity:
2411 return offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
2412 case PreVBlankGap:
2413 return offsetof(SMU74_SoftRegisters, PreVBlankGap);
2414 case VBlankTimeout:
2415 return offsetof(SMU74_SoftRegisters, VBlankTimeout);
2416 case UcodeLoadStatus:
2417 return offsetof(SMU74_SoftRegisters, UcodeLoadStatus);
2418 case DRAM_LOG_ADDR_H:
2419 return offsetof(SMU74_SoftRegisters, DRAM_LOG_ADDR_H);
2420 case DRAM_LOG_ADDR_L:
2421 return offsetof(SMU74_SoftRegisters, DRAM_LOG_ADDR_L);
2422 case DRAM_LOG_PHY_ADDR_H:
2423 return offsetof(SMU74_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
2424 case DRAM_LOG_PHY_ADDR_L:
2425 return offsetof(SMU74_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
2426 case DRAM_LOG_BUFF_SIZE:
2427 return offsetof(SMU74_SoftRegisters, DRAM_LOG_BUFF_SIZE);
2428 }
2429 case SMU_Discrete_DpmTable:
2430 switch (member) {
2431 case UvdBootLevel:
2432 return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
2433 case VceBootLevel:
2434 return offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
2435 case SamuBootLevel:
2436 return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
2437 case LowSclkInterruptThreshold:
2438 return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
2439 }
2440 }
2441 pr_warn("can't get the offset of type %x member %x\n", type, member);
2442 return 0;
2443}
2444
2445static uint32_t polaris10_get_mac_definition(uint32_t value)
2446{
2447 switch (value) {
2448 case SMU_MAX_LEVELS_GRAPHICS:
2449 return SMU74_MAX_LEVELS_GRAPHICS;
2450 case SMU_MAX_LEVELS_MEMORY:
2451 return SMU74_MAX_LEVELS_MEMORY;
2452 case SMU_MAX_LEVELS_LINK:
2453 return SMU74_MAX_LEVELS_LINK;
2454 case SMU_MAX_ENTRIES_SMIO:
2455 return SMU74_MAX_ENTRIES_SMIO;
2456 case SMU_MAX_LEVELS_VDDC:
2457 return SMU74_MAX_LEVELS_VDDC;
2458 case SMU_MAX_LEVELS_VDDGFX:
2459 return SMU74_MAX_LEVELS_VDDGFX;
2460 case SMU_MAX_LEVELS_VDDCI:
2461 return SMU74_MAX_LEVELS_VDDCI;
2462 case SMU_MAX_LEVELS_MVDD:
2463 return SMU74_MAX_LEVELS_MVDD;
2464 case SMU_UVD_MCLK_HANDSHAKE_DISABLE:
2465 return SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
2466 }
2467
2468 pr_warn("can't get the mac of %x\n", value);
2469 return 0;
2470}
2471
2472static int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
2473{
2474 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
2475 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2476 uint32_t tmp;
2477 int result;
2478 bool error = false;
2479
2480 result = smu7_read_smc_sram_dword(hwmgr,
2481 SMU7_FIRMWARE_HEADER_LOCATION +
2482 offsetof(SMU74_Firmware_Header, DpmTable),
2483 &tmp, SMC_RAM_END);
2484
2485 if (0 == result)
2486 smu_data->smu7_data.dpm_table_start = tmp;
2487
2488 error |= (0 != result);
2489
2490 result = smu7_read_smc_sram_dword(hwmgr,
2491 SMU7_FIRMWARE_HEADER_LOCATION +
2492 offsetof(SMU74_Firmware_Header, SoftRegisters),
2493 &tmp, SMC_RAM_END);
2494
2495 if (!result) {
2496 data->soft_regs_start = tmp;
2497 smu_data->smu7_data.soft_regs_start = tmp;
2498 }
2499
2500 error |= (0 != result);
2501
2502 result = smu7_read_smc_sram_dword(hwmgr,
2503 SMU7_FIRMWARE_HEADER_LOCATION +
2504 offsetof(SMU74_Firmware_Header, mcRegisterTable),
2505 &tmp, SMC_RAM_END);
2506
2507 if (!result)
2508 smu_data->smu7_data.mc_reg_table_start = tmp;
2509
2510 result = smu7_read_smc_sram_dword(hwmgr,
2511 SMU7_FIRMWARE_HEADER_LOCATION +
2512 offsetof(SMU74_Firmware_Header, FanTable),
2513 &tmp, SMC_RAM_END);
2514
2515 if (!result)
2516 smu_data->smu7_data.fan_table_start = tmp;
2517
2518 error |= (0 != result);
2519
2520 result = smu7_read_smc_sram_dword(hwmgr,
2521 SMU7_FIRMWARE_HEADER_LOCATION +
2522 offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
2523 &tmp, SMC_RAM_END);
2524
2525 if (!result)
2526 smu_data->smu7_data.arb_table_start = tmp;
2527
2528 error |= (0 != result);
2529
2530 result = smu7_read_smc_sram_dword(hwmgr,
2531 SMU7_FIRMWARE_HEADER_LOCATION +
2532 offsetof(SMU74_Firmware_Header, Version),
2533 &tmp, SMC_RAM_END);
2534
2535 if (!result)
2536 hwmgr->microcode_version_info.SMC = tmp;
2537
2538 error |= (0 != result);
2539
2540 return error ? -1 : 0;
2541}
2542
2543static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
2544{
2545 return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
2546 CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
2547 ? true : false;
2548}
2549
2550static int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
2551 struct amd_pp_profile *request)
2552{
2553 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)
2554 (hwmgr->smu_backend);
2555 struct SMU74_Discrete_GraphicsLevel *levels =
2556 smu_data->smc_state_table.GraphicsLevel;
2557 uint32_t array = smu_data->smu7_data.dpm_table_start +
2558 offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
2559 uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
2560 SMU74_MAX_LEVELS_GRAPHICS;
2561 uint32_t i;
2562
2563 for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
2564 levels[i].ActivityLevel =
2565 cpu_to_be16(request->activity_threshold);
2566 levels[i].EnabledForActivity = 1;
2567 levels[i].UpHyst = request->up_hyst;
2568 levels[i].DownHyst = request->down_hyst;
2569 }
2570
2571 return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
2572 array_size, SMC_RAM_END);
2573}
2574
392const struct pp_smumgr_func polaris10_smu_funcs = { 2575const struct pp_smumgr_func polaris10_smu_funcs = {
393 .smu_init = polaris10_smu_init, 2576 .smu_init = polaris10_smu_init,
394 .smu_fini = smu7_smu_fini, 2577 .smu_fini = smu7_smu_fini,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index c997117f2461..7f5359a97ef2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -25,12 +25,13 @@
25#include "pp_debug.h" 25#include "pp_debug.h"
26#include "smumgr.h" 26#include "smumgr.h"
27#include "smu_ucode_xfer_vi.h" 27#include "smu_ucode_xfer_vi.h"
28#include "smu/smu_7_1_3_d.h"
29#include "smu/smu_7_1_3_sh_mask.h"
30#include "ppatomctrl.h" 28#include "ppatomctrl.h"
31#include "cgs_common.h" 29#include "cgs_common.h"
32#include "smu7_ppsmc.h" 30#include "smu7_ppsmc.h"
33#include "smu7_smumgr.h" 31#include "smu7_smumgr.h"
32#include "smu7_common.h"
33
34#include "polaris10_pwrvirus.h"
34 35
35#define SMU7_SMC_SIZE 0x20000 36#define SMU7_SMC_SIZE 0x20000
36 37
@@ -540,6 +541,47 @@ int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr)
540 return result; 541 return result;
541} 542}
542 543
544static void execute_pwr_table(struct pp_hwmgr *hwmgr, const PWR_Command_Table *pvirus, int size)
545{
546 int i;
547 uint32_t reg, data;
548
549 for (i = 0; i < size; i++) {
550 reg = pvirus->reg;
551 data = pvirus->data;
552 if (reg != 0xffffffff)
553 cgs_write_register(hwmgr->device, reg, data);
554 else
555 break;
556 pvirus++;
557 }
558}
559
560static void execute_pwr_dfy_table(struct pp_hwmgr *hwmgr, const PWR_DFY_Section *section)
561{
562 int i;
563
564 cgs_write_register(hwmgr->device, mmCP_DFY_CNTL, section->dfy_cntl);
565 cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_HI, section->dfy_addr_hi);
566 cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_LO, section->dfy_addr_lo);
567 for (i = 0; i < section->dfy_size; i++)
568 cgs_write_register(hwmgr->device, mmCP_DFY_DATA_0, section->dfy_data[i]);
569}
570
571int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr)
572{
573 execute_pwr_table(hwmgr, pwr_virus_table_pre, ARRAY_SIZE(pwr_virus_table_pre));
574 execute_pwr_dfy_table(hwmgr, &pwr_virus_section1);
575 execute_pwr_dfy_table(hwmgr, &pwr_virus_section2);
576 execute_pwr_dfy_table(hwmgr, &pwr_virus_section3);
577 execute_pwr_dfy_table(hwmgr, &pwr_virus_section4);
578 execute_pwr_dfy_table(hwmgr, &pwr_virus_section5);
579 execute_pwr_dfy_table(hwmgr, &pwr_virus_section6);
580 execute_pwr_table(hwmgr, pwr_virus_table_post, ARRAY_SIZE(pwr_virus_table_post));
581
582 return 0;
583}
584
543int smu7_init(struct pp_hwmgr *hwmgr) 585int smu7_init(struct pp_hwmgr *hwmgr)
544{ 586{
545 struct smu7_smumgr *smu_data; 587 struct smu7_smumgr *smu_data;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index 0b63c5c1043c..c87263bc0caa 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -88,4 +88,6 @@ int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr);
88int smu7_init(struct pp_hwmgr *hwmgr); 88int smu7_init(struct pp_hwmgr *hwmgr);
89int smu7_smu_fini(struct pp_hwmgr *hwmgr); 89int smu7_smu_fini(struct pp_hwmgr *hwmgr);
90 90
91#endif \ No newline at end of file 91int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr);
92
93#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
deleted file mode 100644
index 1f720ccdaf99..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
+++ /dev/null
@@ -1,3261 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
9 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
10 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
11 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
12 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
13 * OTHER DEALINGS IN THE SOFTWARE.
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 *
22 */
23
24#include "pp_debug.h"
25#include "tonga_smc.h"
26#include "smu7_dyn_defaults.h"
27
28#include "smu7_hwmgr.h"
29#include "hardwaremanager.h"
30#include "ppatomctrl.h"
31#include "cgs_common.h"
32#include "atombios.h"
33#include "tonga_smumgr.h"
34#include "pppcielanes.h"
35#include "pp_endian.h"
36#include "smu7_ppsmc.h"
37
38#include "smu72_discrete.h"
39
40#include "smu/smu_7_1_2_d.h"
41#include "smu/smu_7_1_2_sh_mask.h"
42
43#include "gmc/gmc_8_1_d.h"
44#include "gmc/gmc_8_1_sh_mask.h"
45
46#include "bif/bif_5_0_d.h"
47#include "bif/bif_5_0_sh_mask.h"
48
49#include "dce/dce_10_0_d.h"
50#include "dce/dce_10_0_sh_mask.h"
51
52
53#define VOLTAGE_SCALE 4
54#define POWERTUNE_DEFAULT_SET_MAX 1
55#define VOLTAGE_VID_OFFSET_SCALE1 625
56#define VOLTAGE_VID_OFFSET_SCALE2 100
57#define MC_CG_ARB_FREQ_F1 0x0b
58#define VDDC_VDDCI_DELTA 200
59
60
61static const struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
62/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
63 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
64 */
65 {1, 0xF, 0xFD, 0x19,
66 5, 45, 0, 0xB0000,
67 {0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8,
68 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
69 {0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203,
70 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4}
71 },
72};
73
74/* [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */
75static const uint16_t tonga_clock_stretcher_lookup_table[2][4] = {
76 {600, 1050, 3, 0},
77 {600, 1050, 6, 1}
78};
79
80/* [FF, SS] type, [] 4 voltage ranges,
81 * and [Floor Freq, Boundary Freq, VID min , VID max]
82 */
83static const uint32_t tonga_clock_stretcher_ddt_table[2][4][4] = {
84 { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
85 { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} }
86};
87
88/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] */
89static const uint8_t tonga_clock_stretch_amount_conversion[2][6] = {
90 {0, 1, 3, 2, 4, 5},
91 {0, 2, 4, 5, 6, 5}
92};
93
94/* PPGen has the gain setting generated in x * 100 unit
95 * This function is to convert the unit to x * 4096(0x1000) unit.
96 * This is the unit expected by SMC firmware
97 */
98
99
100static int tonga_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
101 phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table,
102 uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
103{
104 uint32_t i = 0;
105 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
106 struct phm_ppt_v1_information *pptable_info =
107 (struct phm_ppt_v1_information *)(hwmgr->pptable);
108
109 /* clock - voltage dependency table is empty table */
110 if (allowed_clock_voltage_table->count == 0)
111 return -EINVAL;
112
113 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
114 /* find first sclk bigger than request */
115 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
116 voltage->VddGfx = phm_get_voltage_index(
117 pptable_info->vddgfx_lookup_table,
118 allowed_clock_voltage_table->entries[i].vddgfx);
119 voltage->Vddc = phm_get_voltage_index(
120 pptable_info->vddc_lookup_table,
121 allowed_clock_voltage_table->entries[i].vddc);
122
123 if (allowed_clock_voltage_table->entries[i].vddci)
124 voltage->Vddci =
125 phm_get_voltage_id(&data->vddci_voltage_table, allowed_clock_voltage_table->entries[i].vddci);
126 else
127 voltage->Vddci =
128 phm_get_voltage_id(&data->vddci_voltage_table,
129 allowed_clock_voltage_table->entries[i].vddc - VDDC_VDDCI_DELTA);
130
131
132 if (allowed_clock_voltage_table->entries[i].mvdd)
133 *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd;
134
135 voltage->Phases = 1;
136 return 0;
137 }
138 }
139
140 /* sclk is bigger than max sclk in the dependence table */
141 voltage->VddGfx = phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
142 allowed_clock_voltage_table->entries[i-1].vddgfx);
143 voltage->Vddc = phm_get_voltage_index(pptable_info->vddc_lookup_table,
144 allowed_clock_voltage_table->entries[i-1].vddc);
145
146 if (allowed_clock_voltage_table->entries[i-1].vddci)
147 voltage->Vddci = phm_get_voltage_id(&data->vddci_voltage_table,
148 allowed_clock_voltage_table->entries[i-1].vddci);
149
150 if (allowed_clock_voltage_table->entries[i-1].mvdd)
151 *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd;
152
153 return 0;
154}
155
156
157/**
158 * Vddc table preparation for SMC.
159 *
160 * @param hwmgr the address of the hardware manager
161 * @param table the SMC DPM table structure to be populated
162 * @return always 0
163 */
164static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
165 SMU72_Discrete_DpmTable *table)
166{
167 unsigned int count;
168 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
169
170 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
171 table->VddcLevelCount = data->vddc_voltage_table.count;
172 for (count = 0; count < table->VddcLevelCount; count++) {
173 table->VddcTable[count] =
174 PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE);
175 }
176 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
177 }
178 return 0;
179}
180
181/**
182 * VddGfx table preparation for SMC.
183 *
184 * @param hwmgr the address of the hardware manager
185 * @param table the SMC DPM table structure to be populated
186 * @return always 0
187 */
188static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr,
189 SMU72_Discrete_DpmTable *table)
190{
191 unsigned int count;
192 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
193
194 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
195 table->VddGfxLevelCount = data->vddgfx_voltage_table.count;
196 for (count = 0; count < data->vddgfx_voltage_table.count; count++) {
197 table->VddGfxTable[count] =
198 PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE);
199 }
200 CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount);
201 }
202 return 0;
203}
204
205/**
206 * Vddci table preparation for SMC.
207 *
208 * @param *hwmgr The address of the hardware manager.
209 * @param *table The SMC DPM table structure to be populated.
210 * @return 0
211 */
212static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
213 SMU72_Discrete_DpmTable *table)
214{
215 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
216 uint32_t count;
217
218 table->VddciLevelCount = data->vddci_voltage_table.count;
219 for (count = 0; count < table->VddciLevelCount; count++) {
220 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
221 table->VddciTable[count] =
222 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
223 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
224 table->SmioTable1.Pattern[count].Voltage =
225 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
226 /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */
227 table->SmioTable1.Pattern[count].Smio =
228 (uint8_t) count;
229 table->Smio[count] |=
230 data->vddci_voltage_table.entries[count].smio_low;
231 table->VddciTable[count] =
232 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
233 }
234 }
235
236 table->SmioMask1 = data->vddci_voltage_table.mask_low;
237 CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
238
239 return 0;
240}
241
242/**
243 * Mvdd table preparation for SMC.
244 *
245 * @param *hwmgr The address of the hardware manager.
246 * @param *table The SMC DPM table structure to be populated.
247 * @return 0
248 */
249static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
250 SMU72_Discrete_DpmTable *table)
251{
252 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
253 uint32_t count;
254
255 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
256 table->MvddLevelCount = data->mvdd_voltage_table.count;
257 for (count = 0; count < table->MvddLevelCount; count++) {
258 table->SmioTable2.Pattern[count].Voltage =
259 PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
260 /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
261 table->SmioTable2.Pattern[count].Smio =
262 (uint8_t) count;
263 table->Smio[count] |=
264 data->mvdd_voltage_table.entries[count].smio_low;
265 }
266 table->SmioMask2 = data->mvdd_voltage_table.mask_low;
267
268 CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
269 }
270
271 return 0;
272}
273
274/**
275 * Preparation of vddc and vddgfx CAC tables for SMC.
276 *
277 * @param hwmgr the address of the hardware manager
278 * @param table the SMC DPM table structure to be populated
279 * @return always 0
280 */
281static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
282 SMU72_Discrete_DpmTable *table)
283{
284 uint32_t count;
285 uint8_t index = 0;
286 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
287 struct phm_ppt_v1_information *pptable_info =
288 (struct phm_ppt_v1_information *)(hwmgr->pptable);
289 struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table =
290 pptable_info->vddgfx_lookup_table;
291 struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table =
292 pptable_info->vddc_lookup_table;
293
294 /* table is already swapped, so in order to use the value from it
295 * we need to swap it back.
296 */
297 uint32_t vddc_level_count = PP_SMC_TO_HOST_UL(table->VddcLevelCount);
298 uint32_t vddgfx_level_count = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount);
299
300 for (count = 0; count < vddc_level_count; count++) {
301 /* We are populating vddc CAC data to BapmVddc table in split and merged mode */
302 index = phm_get_voltage_index(vddc_lookup_table,
303 data->vddc_voltage_table.entries[count].value);
304 table->BapmVddcVidLoSidd[count] =
305 convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
306 table->BapmVddcVidHiSidd[count] =
307 convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
308 table->BapmVddcVidHiSidd2[count] =
309 convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
310 }
311
312 if ((data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2)) {
313 /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */
314 for (count = 0; count < vddgfx_level_count; count++) {
315 index = phm_get_voltage_index(vddgfx_lookup_table,
316 convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid));
317 table->BapmVddGfxVidHiSidd2[count] =
318 convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high);
319 }
320 } else {
321 for (count = 0; count < vddc_level_count; count++) {
322 index = phm_get_voltage_index(vddc_lookup_table,
323 data->vddc_voltage_table.entries[count].value);
324 table->BapmVddGfxVidLoSidd[count] =
325 convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
326 table->BapmVddGfxVidHiSidd[count] =
327 convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
328 table->BapmVddGfxVidHiSidd2[count] =
329 convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
330 }
331 }
332
333 return 0;
334}
335
336/**
337 * Preparation of voltage tables for SMC.
338 *
339 * @param hwmgr the address of the hardware manager
340 * @param table the SMC DPM table structure to be populated
341 * @return always 0
342 */
343
344static int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
345 SMU72_Discrete_DpmTable *table)
346{
347 int result;
348
349 result = tonga_populate_smc_vddc_table(hwmgr, table);
350 PP_ASSERT_WITH_CODE(!result,
351 "can not populate VDDC voltage table to SMC",
352 return -EINVAL);
353
354 result = tonga_populate_smc_vdd_ci_table(hwmgr, table);
355 PP_ASSERT_WITH_CODE(!result,
356 "can not populate VDDCI voltage table to SMC",
357 return -EINVAL);
358
359 result = tonga_populate_smc_vdd_gfx_table(hwmgr, table);
360 PP_ASSERT_WITH_CODE(!result,
361 "can not populate VDDGFX voltage table to SMC",
362 return -EINVAL);
363
364 result = tonga_populate_smc_mvdd_table(hwmgr, table);
365 PP_ASSERT_WITH_CODE(!result,
366 "can not populate MVDD voltage table to SMC",
367 return -EINVAL);
368
369 result = tonga_populate_cac_tables(hwmgr, table);
370 PP_ASSERT_WITH_CODE(!result,
371 "can not populate CAC voltage tables to SMC",
372 return -EINVAL);
373
374 return 0;
375}
376
377static int tonga_populate_ulv_level(struct pp_hwmgr *hwmgr,
378 struct SMU72_Discrete_Ulv *state)
379{
380 struct phm_ppt_v1_information *table_info =
381 (struct phm_ppt_v1_information *)(hwmgr->pptable);
382
383 state->CcPwrDynRm = 0;
384 state->CcPwrDynRm1 = 0;
385
386 state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
387 state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
388 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
389
390 state->VddcPhase = 1;
391
392 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
393 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
394 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
395
396 return 0;
397}
398
399static int tonga_populate_ulv_state(struct pp_hwmgr *hwmgr,
400 struct SMU72_Discrete_DpmTable *table)
401{
402 return tonga_populate_ulv_level(hwmgr, &table->Ulv);
403}
404
405static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table)
406{
407 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
408 struct smu7_dpm_table *dpm_table = &data->dpm_table;
409 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
410 uint32_t i;
411
412 /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
413 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
414 table->LinkLevel[i].PcieGenSpeed =
415 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
416 table->LinkLevel[i].PcieLaneCount =
417 (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
418 table->LinkLevel[i].EnabledForActivity =
419 1;
420 table->LinkLevel[i].SPC =
421 (uint8_t)(data->pcie_spc_cap & 0xff);
422 table->LinkLevel[i].DownThreshold =
423 PP_HOST_TO_SMC_UL(5);
424 table->LinkLevel[i].UpThreshold =
425 PP_HOST_TO_SMC_UL(30);
426 }
427
428 smu_data->smc_state_table.LinkLevelCount =
429 (uint8_t)dpm_table->pcie_speed_table.count;
430 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
431 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
432
433 return 0;
434}
435
436/**
437 * Calculates the SCLK dividers using the provided engine clock
438 *
439 * @param hwmgr the address of the hardware manager
440 * @param engine_clock the engine clock to use to populate the structure
441 * @param sclk the SMC SCLK structure to be populated
442 */
443static int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
444 uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk)
445{
446 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
447 pp_atomctrl_clock_dividers_vi dividers;
448 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
449 uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
450 uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
451 uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
452 uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
453 uint32_t reference_clock;
454 uint32_t reference_divider;
455 uint32_t fbdiv;
456 int result;
457
458 /* get the engine clock dividers for this clock value*/
459 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
460
461 PP_ASSERT_WITH_CODE(result == 0,
462 "Error retrieving Engine Clock dividers from VBIOS.", return result);
463
464 /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
465 reference_clock = atomctrl_get_reference_clock(hwmgr);
466
467 reference_divider = 1 + dividers.uc_pll_ref_div;
468
469 /* low 14 bits is fraction and high 12 bits is divider*/
470 fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
471
472 /* SPLL_FUNC_CNTL setup*/
473 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
474 CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
475 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
476 CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
477
478 /* SPLL_FUNC_CNTL_3 setup*/
479 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
480 CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
481
482 /* set to use fractional accumulation*/
483 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
484 CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
485
486 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
487 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
488 pp_atomctrl_internal_ss_info ss_info;
489
490 uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
491 if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
492 /*
493 * ss_info.speed_spectrum_percentage -- in unit of 0.01%
494 * ss_info.speed_spectrum_rate -- in unit of khz
495 */
496 /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
497 uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
498
499 /* clkv = 2 * D * fbdiv / NS */
500 uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
501
502 cg_spll_spread_spectrum =
503 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
504 cg_spll_spread_spectrum =
505 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
506 cg_spll_spread_spectrum_2 =
507 PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
508 }
509 }
510
511 sclk->SclkFrequency = engine_clock;
512 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
513 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
514 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
515 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
516 sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
517
518 return 0;
519}
520
521/**
522 * Populates single SMC SCLK structure using the provided engine clock
523 *
524 * @param hwmgr the address of the hardware manager
525 * @param engine_clock the engine clock to use to populate the structure
526 * @param sclk the SMC SCLK structure to be populated
527 */
528static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
529 uint32_t engine_clock,
530 uint16_t sclk_activity_level_threshold,
531 SMU72_Discrete_GraphicsLevel *graphic_level)
532{
533 int result;
534 uint32_t mvdd;
535 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
536 struct phm_ppt_v1_information *pptable_info =
537 (struct phm_ppt_v1_information *)(hwmgr->pptable);
538
539 result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
540
541 /* populate graphics levels*/
542 result = tonga_get_dependency_volt_by_clk(hwmgr,
543 pptable_info->vdd_dep_on_sclk, engine_clock,
544 &graphic_level->MinVoltage, &mvdd);
545 PP_ASSERT_WITH_CODE((!result),
546 "can not find VDDC voltage value for VDDC "
547 "engine clock dependency table", return result);
548
549 /* SCLK frequency in units of 10KHz*/
550 graphic_level->SclkFrequency = engine_clock;
551 /* Indicates maximum activity level for this performance level. 50% for now*/
552 graphic_level->ActivityLevel = sclk_activity_level_threshold;
553
554 graphic_level->CcPwrDynRm = 0;
555 graphic_level->CcPwrDynRm1 = 0;
556 /* this level can be used if activity is high enough.*/
557 graphic_level->EnabledForActivity = 0;
558 /* this level can be used for throttling.*/
559 graphic_level->EnabledForThrottle = 1;
560 graphic_level->UpHyst = 0;
561 graphic_level->DownHyst = 0;
562 graphic_level->VoltageDownHyst = 0;
563 graphic_level->PowerThrottle = 0;
564
565 data->display_timing.min_clock_in_sr =
566 hwmgr->display_config.min_core_set_clock_in_sr;
567
568 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
569 PHM_PlatformCaps_SclkDeepSleep))
570 graphic_level->DeepSleepDivId =
571 smu7_get_sleep_divider_id_from_clock(engine_clock,
572 data->display_timing.min_clock_in_sr);
573
574 /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
575 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
576
577 if (!result) {
578 /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
579 /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/
580 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
581 CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
582 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
583 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
584 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
585 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
586 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
587 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
588 }
589
590 return result;
591}
592
593/**
594 * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
595 *
596 * @param hwmgr the address of the hardware manager
597 */
598int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
599{
600 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
601 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
602 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
603 struct smu7_dpm_table *dpm_table = &data->dpm_table;
604 struct phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
605 uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count;
606 uint32_t level_array_address = smu_data->smu7_data.dpm_table_start +
607 offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
608
609 uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) *
610 SMU72_MAX_LEVELS_GRAPHICS;
611
612 SMU72_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
613
614 uint32_t i, max_entry;
615 uint8_t highest_pcie_level_enabled = 0;
616 uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
617 uint8_t count = 0;
618 int result = 0;
619
620 memset(levels, 0x00, level_array_size);
621
622 for (i = 0; i < dpm_table->sclk_table.count; i++) {
623 result = tonga_populate_single_graphic_level(hwmgr,
624 dpm_table->sclk_table.dpm_levels[i].value,
625 (uint16_t)smu_data->activity_target[i],
626 &(smu_data->smc_state_table.GraphicsLevel[i]));
627 if (result != 0)
628 return result;
629
630 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
631 if (i > 1)
632 smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
633 }
634
635 /* Only enable level 0 for now. */
636 smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
637
638 /* set highest level watermark to high */
639 if (dpm_table->sclk_table.count > 1)
640 smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
641 PPSMC_DISPLAY_WATERMARK_HIGH;
642
643 smu_data->smc_state_table.GraphicsDpmLevelCount =
644 (uint8_t)dpm_table->sclk_table.count;
645 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
646 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
647
648 if (pcie_table != NULL) {
649 PP_ASSERT_WITH_CODE((pcie_entry_count >= 1),
650 "There must be 1 or more PCIE levels defined in PPTable.",
651 return -EINVAL);
652 max_entry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/
653 for (i = 0; i < dpm_table->sclk_table.count; i++) {
654 smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel =
655 (uint8_t) ((i < max_entry) ? i : max_entry);
656 }
657 } else {
658 if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask)
659 pr_err("Pcie Dpm Enablemask is 0 !");
660
661 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
662 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
663 (1<<(highest_pcie_level_enabled+1))) != 0)) {
664 highest_pcie_level_enabled++;
665 }
666
667 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
668 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
669 (1<<lowest_pcie_level_enabled)) == 0)) {
670 lowest_pcie_level_enabled++;
671 }
672
673 while ((count < highest_pcie_level_enabled) &&
674 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
675 (1<<(lowest_pcie_level_enabled+1+count))) == 0)) {
676 count++;
677 }
678 mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
679 (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
680
681
682 /* set pcieDpmLevel to highest_pcie_level_enabled*/
683 for (i = 2; i < dpm_table->sclk_table.count; i++)
684 smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
685
686 /* set pcieDpmLevel to lowest_pcie_level_enabled*/
687 smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
688
689 /* set pcieDpmLevel to mid_pcie_level_enabled*/
690 smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
691 }
692 /* level count will send to smc once at init smc table and never change*/
693 result = smu7_copy_bytes_to_smc(hwmgr, level_array_address,
694 (uint8_t *)levels, (uint32_t)level_array_size,
695 SMC_RAM_END);
696
697 return result;
698}
699
700/**
701 * Populates the SMC MCLK structure using the provided memory clock
702 *
703 * @param hwmgr the address of the hardware manager
704 * @param memory_clock the memory clock to use to populate the structure
705 * @param sclk the SMC SCLK structure to be populated
706 */
707static int tonga_calculate_mclk_params(
708 struct pp_hwmgr *hwmgr,
709 uint32_t memory_clock,
710 SMU72_Discrete_MemoryLevel *mclk,
711 bool strobe_mode,
712 bool dllStateOn
713 )
714{
715 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
716
717 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
718 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
719 uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
720 uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
721 uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
722 uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
723 uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
724 uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
725 uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
726
727 pp_atomctrl_memory_clock_param mpll_param;
728 int result;
729
730 result = atomctrl_get_memory_pll_dividers_si(hwmgr,
731 memory_clock, &mpll_param, strobe_mode);
732 PP_ASSERT_WITH_CODE(
733 !result,
734 "Error retrieving Memory Clock Parameters from VBIOS.",
735 return result);
736
737 /* MPLL_FUNC_CNTL setup*/
738 mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL,
739 mpll_param.bw_ctrl);
740
741 /* MPLL_FUNC_CNTL_1 setup*/
742 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
743 MPLL_FUNC_CNTL_1, CLKF,
744 mpll_param.mpll_fb_divider.cl_kf);
745 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
746 MPLL_FUNC_CNTL_1, CLKFRAC,
747 mpll_param.mpll_fb_divider.clk_frac);
748 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
749 MPLL_FUNC_CNTL_1, VCO_MODE,
750 mpll_param.vco_mode);
751
752 /* MPLL_AD_FUNC_CNTL setup*/
753 mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
754 MPLL_AD_FUNC_CNTL, YCLK_POST_DIV,
755 mpll_param.mpll_post_divider);
756
757 if (data->is_memory_gddr5) {
758 /* MPLL_DQ_FUNC_CNTL setup*/
759 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
760 MPLL_DQ_FUNC_CNTL, YCLK_SEL,
761 mpll_param.yclk_sel);
762 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
763 MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV,
764 mpll_param.mpll_post_divider);
765 }
766
767 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
768 PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
769 /*
770 ************************************
771 Fref = Reference Frequency
772 NF = Feedback divider ratio
773 NR = Reference divider ratio
774 Fnom = Nominal VCO output frequency = Fref * NF / NR
775 Fs = Spreading Rate
776 D = Percentage down-spread / 2
777 Fint = Reference input frequency to PFD = Fref / NR
778 NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
779 CLKS = NS - 1 = ISS_STEP_NUM[11:0]
780 NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
781 CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
782 *************************************
783 */
784 pp_atomctrl_internal_ss_info ss_info;
785 uint32_t freq_nom;
786 uint32_t tmp;
787 uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
788
789 /* for GDDR5 for all modes and DDR3 */
790 if (1 == mpll_param.qdr)
791 freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
792 else
793 freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
794
795 /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
796 tmp = (freq_nom / reference_clock);
797 tmp = tmp * tmp;
798
799 if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
800 /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
801 /* ss.Info.speed_spectrum_rate -- in unit of khz */
802 /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
803 /* = reference_clock * 5 / speed_spectrum_rate */
804 uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
805
806 /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
807 /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
808 uint32_t clkv =
809 (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
810 ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
811
812 mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
813 mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
814 }
815 }
816
817 /* MCLK_PWRMGT_CNTL setup */
818 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
819 MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
820 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
821 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
822 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
823 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
824
825 /* Save the result data to outpupt memory level structure */
826 mclk->MclkFrequency = memory_clock;
827 mclk->MpllFuncCntl = mpll_func_cntl;
828 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
829 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
830 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
831 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
832 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
833 mclk->DllCntl = dll_cntl;
834 mclk->MpllSs1 = mpll_ss1;
835 mclk->MpllSs2 = mpll_ss2;
836
837 return 0;
838}
839
840static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock,
841 bool strobe_mode)
842{
843 uint8_t mc_para_index;
844
845 if (strobe_mode) {
846 if (memory_clock < 12500)
847 mc_para_index = 0x00;
848 else if (memory_clock > 47500)
849 mc_para_index = 0x0f;
850 else
851 mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
852 } else {
853 if (memory_clock < 65000)
854 mc_para_index = 0x00;
855 else if (memory_clock > 135000)
856 mc_para_index = 0x0f;
857 else
858 mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
859 }
860
861 return mc_para_index;
862}
863
864static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
865{
866 uint8_t mc_para_index;
867
868 if (memory_clock < 10000)
869 mc_para_index = 0;
870 else if (memory_clock >= 80000)
871 mc_para_index = 0x0f;
872 else
873 mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
874
875 return mc_para_index;
876}
877
878
879static int tonga_populate_single_memory_level(
880 struct pp_hwmgr *hwmgr,
881 uint32_t memory_clock,
882 SMU72_Discrete_MemoryLevel *memory_level
883 )
884{
885 uint32_t mvdd = 0;
886 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
887 struct phm_ppt_v1_information *pptable_info =
888 (struct phm_ppt_v1_information *)(hwmgr->pptable);
889 int result = 0;
890 bool dll_state_on;
891 struct cgs_display_info info = {0};
892 uint32_t mclk_edc_wr_enable_threshold = 40000;
893 uint32_t mclk_stutter_mode_threshold = 30000;
894 uint32_t mclk_edc_enable_threshold = 40000;
895 uint32_t mclk_strobe_mode_threshold = 40000;
896
897 if (NULL != pptable_info->vdd_dep_on_mclk) {
898 result = tonga_get_dependency_volt_by_clk(hwmgr,
899 pptable_info->vdd_dep_on_mclk,
900 memory_clock,
901 &memory_level->MinVoltage, &mvdd);
902 PP_ASSERT_WITH_CODE(
903 !result,
904 "can not find MinVddc voltage value from memory VDDC "
905 "voltage dependency table",
906 return result);
907 }
908
909 if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
910 memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value;
911 else
912 memory_level->MinMvdd = mvdd;
913
914 memory_level->EnabledForThrottle = 1;
915 memory_level->EnabledForActivity = 0;
916 memory_level->UpHyst = 0;
917 memory_level->DownHyst = 100;
918 memory_level->VoltageDownHyst = 0;
919
920 /* Indicates maximum activity level for this performance level.*/
921 memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
922 memory_level->StutterEnable = 0;
923 memory_level->StrobeEnable = 0;
924 memory_level->EdcReadEnable = 0;
925 memory_level->EdcWriteEnable = 0;
926 memory_level->RttEnable = 0;
927
928 /* default set to low watermark. Highest level will be set to high later.*/
929 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
930
931 cgs_get_active_displays_info(hwmgr->device, &info);
932 data->display_timing.num_existing_displays = info.display_count;
933
934 if ((mclk_stutter_mode_threshold != 0) &&
935 (memory_clock <= mclk_stutter_mode_threshold) &&
936 (!data->is_uvd_enabled)
937 && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
938 && (data->display_timing.num_existing_displays <= 2)
939 && (data->display_timing.num_existing_displays != 0))
940 memory_level->StutterEnable = 1;
941
942 /* decide strobe mode*/
943 memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
944 (memory_clock <= mclk_strobe_mode_threshold);
945
946 /* decide EDC mode and memory clock ratio*/
947 if (data->is_memory_gddr5) {
948 memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock,
949 memory_level->StrobeEnable);
950
951 if ((mclk_edc_enable_threshold != 0) &&
952 (memory_clock > mclk_edc_enable_threshold)) {
953 memory_level->EdcReadEnable = 1;
954 }
955
956 if ((mclk_edc_wr_enable_threshold != 0) &&
957 (memory_clock > mclk_edc_wr_enable_threshold)) {
958 memory_level->EdcWriteEnable = 1;
959 }
960
961 if (memory_level->StrobeEnable) {
962 if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >=
963 ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
964 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
965 } else {
966 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
967 }
968
969 } else {
970 dll_state_on = data->dll_default_on;
971 }
972 } else {
973 memory_level->StrobeRatio =
974 tonga_get_ddr3_mclk_frequency_ratio(memory_clock);
975 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
976 }
977
978 result = tonga_calculate_mclk_params(hwmgr,
979 memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
980
981 if (!result) {
982 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd);
983 /* MCLK frequency in units of 10KHz*/
984 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
985 /* Indicates maximum activity level for this performance level.*/
986 CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
987 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
988 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
989 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
990 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
991 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
992 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
993 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
994 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
995 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
996 }
997
998 return result;
999}
1000
1001int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1002{
1003 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1004 struct tonga_smumgr *smu_data =
1005 (struct tonga_smumgr *)(hwmgr->smu_backend);
1006 struct smu7_dpm_table *dpm_table = &data->dpm_table;
1007 int result;
1008
1009 /* populate MCLK dpm table to SMU7 */
1010 uint32_t level_array_address =
1011 smu_data->smu7_data.dpm_table_start +
1012 offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
1013 uint32_t level_array_size =
1014 sizeof(SMU72_Discrete_MemoryLevel) *
1015 SMU72_MAX_LEVELS_MEMORY;
1016 SMU72_Discrete_MemoryLevel *levels =
1017 smu_data->smc_state_table.MemoryLevel;
1018 uint32_t i;
1019
1020 memset(levels, 0x00, level_array_size);
1021
1022 for (i = 0; i < dpm_table->mclk_table.count; i++) {
1023 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
1024 "can not populate memory level as memory clock is zero",
1025 return -EINVAL);
1026 result = tonga_populate_single_memory_level(
1027 hwmgr,
1028 dpm_table->mclk_table.dpm_levels[i].value,
1029 &(smu_data->smc_state_table.MemoryLevel[i]));
1030 if (result)
1031 return result;
1032 }
1033
1034 /* Only enable level 0 for now.*/
1035 smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
1036
1037 /*
1038 * in order to prevent MC activity from stutter mode to push DPM up.
1039 * the UVD change complements this by putting the MCLK in a higher state
1040 * by default such that we are not effected by up threshold or and MCLK DPM latency.
1041 */
1042 smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
1043 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
1044
1045 smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
1046 data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1047 /* set highest level watermark to high*/
1048 smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
1049
1050 /* level count will send to smc once at init smc table and never change*/
1051 result = smu7_copy_bytes_to_smc(hwmgr,
1052 level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
1053 SMC_RAM_END);
1054
1055 return result;
1056}
1057
1058static int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr,
1059 uint32_t mclk, SMIO_Pattern *smio_pattern)
1060{
1061 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1062 struct phm_ppt_v1_information *table_info =
1063 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1064 uint32_t i = 0;
1065
1066 if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
1067 /* find mvdd value which clock is more than request */
1068 for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
1069 if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
1070 /* Always round to higher voltage. */
1071 smio_pattern->Voltage =
1072 data->mvdd_voltage_table.entries[i].value;
1073 break;
1074 }
1075 }
1076
1077 PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
1078 "MVDD Voltage is outside the supported range.",
1079 return -EINVAL);
1080 } else {
1081 return -EINVAL;
1082 }
1083
1084 return 0;
1085}
1086
1087
1088static int tonga_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1089 SMU72_Discrete_DpmTable *table)
1090{
1091 int result = 0;
1092 struct tonga_smumgr *smu_data =
1093 (struct tonga_smumgr *)(hwmgr->smu_backend);
1094 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1095 struct pp_atomctrl_clock_dividers_vi dividers;
1096
1097 SMIO_Pattern voltage_level;
1098 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
1099 uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
1100 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1101 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1102
1103 /* The ACPI state should not do DPM on DC (or ever).*/
1104 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1105
1106 table->ACPILevel.MinVoltage =
1107 smu_data->smc_state_table.GraphicsLevel[0].MinVoltage;
1108
1109 /* assign zero for now*/
1110 table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
1111
1112 /* get the engine clock dividers for this clock value*/
1113 result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
1114 table->ACPILevel.SclkFrequency, &dividers);
1115
1116 PP_ASSERT_WITH_CODE(result == 0,
1117 "Error retrieving Engine Clock dividers from VBIOS.",
1118 return result);
1119
1120 /* divider ID for required SCLK*/
1121 table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
1122 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1123 table->ACPILevel.DeepSleepDivId = 0;
1124
1125 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
1126 SPLL_PWRON, 0);
1127 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
1128 SPLL_RESET, 1);
1129 spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
1130 SCLK_MUX_SEL, 4);
1131
1132 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
1133 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
1134 table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
1135 table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
1136 table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
1137 table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
1138 table->ACPILevel.CcPwrDynRm = 0;
1139 table->ACPILevel.CcPwrDynRm1 = 0;
1140
1141
1142 /* For various features to be enabled/disabled while this level is active.*/
1143 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1144 /* SCLK frequency in units of 10KHz*/
1145 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
1146 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
1147 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
1148 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
1149 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
1150 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
1151 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
1152 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1153 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1154
1155 /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
1156 table->MemoryACPILevel.MinVoltage =
1157 smu_data->smc_state_table.MemoryLevel[0].MinVoltage;
1158
1159 /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
1160
1161 if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level))
1162 table->MemoryACPILevel.MinMvdd =
1163 PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
1164 else
1165 table->MemoryACPILevel.MinMvdd = 0;
1166
1167 /* Force reset on DLL*/
1168 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1169 MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
1170 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1171 MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
1172
1173 /* Disable DLL in ACPIState*/
1174 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1175 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
1176 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1177 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
1178
1179 /* Enable DLL bypass signal*/
1180 dll_cntl = PHM_SET_FIELD(dll_cntl,
1181 DLL_CNTL, MRDCK0_BYPASS, 0);
1182 dll_cntl = PHM_SET_FIELD(dll_cntl,
1183 DLL_CNTL, MRDCK1_BYPASS, 0);
1184
1185 table->MemoryACPILevel.DllCntl =
1186 PP_HOST_TO_SMC_UL(dll_cntl);
1187 table->MemoryACPILevel.MclkPwrmgtCntl =
1188 PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
1189 table->MemoryACPILevel.MpllAdFuncCntl =
1190 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
1191 table->MemoryACPILevel.MpllDqFuncCntl =
1192 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
1193 table->MemoryACPILevel.MpllFuncCntl =
1194 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
1195 table->MemoryACPILevel.MpllFuncCntl_1 =
1196 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
1197 table->MemoryACPILevel.MpllFuncCntl_2 =
1198 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
1199 table->MemoryACPILevel.MpllSs1 =
1200 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
1201 table->MemoryACPILevel.MpllSs2 =
1202 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
1203
1204 table->MemoryACPILevel.EnabledForThrottle = 0;
1205 table->MemoryACPILevel.EnabledForActivity = 0;
1206 table->MemoryACPILevel.UpHyst = 0;
1207 table->MemoryACPILevel.DownHyst = 100;
1208 table->MemoryACPILevel.VoltageDownHyst = 0;
1209 /* Indicates maximum activity level for this performance level.*/
1210 table->MemoryACPILevel.ActivityLevel =
1211 PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
1212
1213 table->MemoryACPILevel.StutterEnable = 0;
1214 table->MemoryACPILevel.StrobeEnable = 0;
1215 table->MemoryACPILevel.EdcReadEnable = 0;
1216 table->MemoryACPILevel.EdcWriteEnable = 0;
1217 table->MemoryACPILevel.RttEnable = 0;
1218
1219 return result;
1220}
1221
1222static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1223 SMU72_Discrete_DpmTable *table)
1224{
1225 int result = 0;
1226
1227 uint8_t count;
1228 pp_atomctrl_clock_dividers_vi dividers;
1229 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1230 struct phm_ppt_v1_information *pptable_info =
1231 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1232 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1233 pptable_info->mm_dep_table;
1234
1235 table->UvdLevelCount = (uint8_t) (mm_table->count);
1236 table->UvdBootLevel = 0;
1237
1238 for (count = 0; count < table->UvdLevelCount; count++) {
1239 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
1240 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
1241 table->UvdLevel[count].MinVoltage.Vddc =
1242 phm_get_voltage_index(pptable_info->vddc_lookup_table,
1243 mm_table->entries[count].vddc);
1244 table->UvdLevel[count].MinVoltage.VddGfx =
1245 (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
1246 phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
1247 mm_table->entries[count].vddgfx) : 0;
1248 table->UvdLevel[count].MinVoltage.Vddci =
1249 phm_get_voltage_id(&data->vddci_voltage_table,
1250 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1251 table->UvdLevel[count].MinVoltage.Phases = 1;
1252
1253 /* retrieve divider value for VBIOS */
1254 result = atomctrl_get_dfs_pll_dividers_vi(
1255 hwmgr,
1256 table->UvdLevel[count].VclkFrequency,
1257 &dividers);
1258
1259 PP_ASSERT_WITH_CODE((!result),
1260 "can not find divide id for Vclk clock",
1261 return result);
1262
1263 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1264
1265 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1266 table->UvdLevel[count].DclkFrequency, &dividers);
1267 PP_ASSERT_WITH_CODE((!result),
1268 "can not find divide id for Dclk clock",
1269 return result);
1270
1271 table->UvdLevel[count].DclkDivider =
1272 (uint8_t)dividers.pll_post_divider;
1273
1274 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1275 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1276 }
1277
1278 return result;
1279
1280}
1281
1282static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1283 SMU72_Discrete_DpmTable *table)
1284{
1285 int result = 0;
1286
1287 uint8_t count;
1288 pp_atomctrl_clock_dividers_vi dividers;
1289 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1290 struct phm_ppt_v1_information *pptable_info =
1291 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1292 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1293 pptable_info->mm_dep_table;
1294
1295 table->VceLevelCount = (uint8_t) (mm_table->count);
1296 table->VceBootLevel = 0;
1297
1298 for (count = 0; count < table->VceLevelCount; count++) {
1299 table->VceLevel[count].Frequency =
1300 mm_table->entries[count].eclk;
1301 table->VceLevel[count].MinVoltage.Vddc =
1302 phm_get_voltage_index(pptable_info->vddc_lookup_table,
1303 mm_table->entries[count].vddc);
1304 table->VceLevel[count].MinVoltage.VddGfx =
1305 (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
1306 phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
1307 mm_table->entries[count].vddgfx) : 0;
1308 table->VceLevel[count].MinVoltage.Vddci =
1309 phm_get_voltage_id(&data->vddci_voltage_table,
1310 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1311 table->VceLevel[count].MinVoltage.Phases = 1;
1312
1313 /* retrieve divider value for VBIOS */
1314 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1315 table->VceLevel[count].Frequency, &dividers);
1316 PP_ASSERT_WITH_CODE((!result),
1317 "can not find divide id for VCE engine clock",
1318 return result);
1319
1320 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1321
1322 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1323 }
1324
1325 return result;
1326}
1327
1328static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
1329 SMU72_Discrete_DpmTable *table)
1330{
1331 int result = 0;
1332 uint8_t count;
1333 pp_atomctrl_clock_dividers_vi dividers;
1334 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1335 struct phm_ppt_v1_information *pptable_info =
1336 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1337 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1338 pptable_info->mm_dep_table;
1339
1340 table->AcpLevelCount = (uint8_t) (mm_table->count);
1341 table->AcpBootLevel = 0;
1342
1343 for (count = 0; count < table->AcpLevelCount; count++) {
1344 table->AcpLevel[count].Frequency =
1345 pptable_info->mm_dep_table->entries[count].aclk;
1346 table->AcpLevel[count].MinVoltage.Vddc =
1347 phm_get_voltage_index(pptable_info->vddc_lookup_table,
1348 mm_table->entries[count].vddc);
1349 table->AcpLevel[count].MinVoltage.VddGfx =
1350 (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
1351 phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
1352 mm_table->entries[count].vddgfx) : 0;
1353 table->AcpLevel[count].MinVoltage.Vddci =
1354 phm_get_voltage_id(&data->vddci_voltage_table,
1355 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1356 table->AcpLevel[count].MinVoltage.Phases = 1;
1357
1358 /* retrieve divider value for VBIOS */
1359 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1360 table->AcpLevel[count].Frequency, &dividers);
1361 PP_ASSERT_WITH_CODE((!result),
1362 "can not find divide id for engine clock", return result);
1363
1364 table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1365
1366 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
1367 }
1368
1369 return result;
1370}
1371
1372static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1373 SMU72_Discrete_DpmTable *table)
1374{
1375 int result = 0;
1376 uint8_t count;
1377 pp_atomctrl_clock_dividers_vi dividers;
1378 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1379 struct phm_ppt_v1_information *pptable_info =
1380 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1381 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1382 pptable_info->mm_dep_table;
1383
1384 table->SamuBootLevel = 0;
1385 table->SamuLevelCount = (uint8_t) (mm_table->count);
1386
1387 for (count = 0; count < table->SamuLevelCount; count++) {
1388 /* not sure whether we need evclk or not */
1389 table->SamuLevel[count].Frequency =
1390 pptable_info->mm_dep_table->entries[count].samclock;
1391 table->SamuLevel[count].MinVoltage.Vddc =
1392 phm_get_voltage_index(pptable_info->vddc_lookup_table,
1393 mm_table->entries[count].vddc);
1394 table->SamuLevel[count].MinVoltage.VddGfx =
1395 (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
1396 phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
1397 mm_table->entries[count].vddgfx) : 0;
1398 table->SamuLevel[count].MinVoltage.Vddci =
1399 phm_get_voltage_id(&data->vddci_voltage_table,
1400 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1401 table->SamuLevel[count].MinVoltage.Phases = 1;
1402
1403 /* retrieve divider value for VBIOS */
1404 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1405 table->SamuLevel[count].Frequency, &dividers);
1406 PP_ASSERT_WITH_CODE((!result),
1407 "can not find divide id for samu clock", return result);
1408
1409 table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1410
1411 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
1412 }
1413
1414 return result;
1415}
1416
1417static int tonga_populate_memory_timing_parameters(
1418 struct pp_hwmgr *hwmgr,
1419 uint32_t engine_clock,
1420 uint32_t memory_clock,
1421 struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs
1422 )
1423{
1424 uint32_t dramTiming;
1425 uint32_t dramTiming2;
1426 uint32_t burstTime;
1427 int result;
1428
1429 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1430 engine_clock, memory_clock);
1431
1432 PP_ASSERT_WITH_CODE(result == 0,
1433 "Error calling VBIOS to set DRAM_TIMING.", return result);
1434
1435 dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1436 dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1437 burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1438
1439 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
1440 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
1441 arb_regs->McArbBurstTime = (uint8_t)burstTime;
1442
1443 return 0;
1444}
1445
1446/**
1447 * Setup parameters for the MC ARB.
1448 *
1449 * @param hwmgr the address of the powerplay hardware manager.
1450 * @return always 0
1451 * This function is to be called from the SetPowerState table.
1452 */
1453static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1454{
1455 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1456 struct tonga_smumgr *smu_data =
1457 (struct tonga_smumgr *)(hwmgr->smu_backend);
1458 int result = 0;
1459 SMU72_Discrete_MCArbDramTimingTable arb_regs;
1460 uint32_t i, j;
1461
1462 memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable));
1463
1464 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1465 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1466 result = tonga_populate_memory_timing_parameters
1467 (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
1468 data->dpm_table.mclk_table.dpm_levels[j].value,
1469 &arb_regs.entries[i][j]);
1470
1471 if (result)
1472 break;
1473 }
1474 }
1475
1476 if (!result) {
1477 result = smu7_copy_bytes_to_smc(
1478 hwmgr,
1479 smu_data->smu7_data.arb_table_start,
1480 (uint8_t *)&arb_regs,
1481 sizeof(SMU72_Discrete_MCArbDramTimingTable),
1482 SMC_RAM_END
1483 );
1484 }
1485
1486 return result;
1487}
1488
1489static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1490 SMU72_Discrete_DpmTable *table)
1491{
1492 int result = 0;
1493 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1494 struct tonga_smumgr *smu_data =
1495 (struct tonga_smumgr *)(hwmgr->smu_backend);
1496 table->GraphicsBootLevel = 0;
1497 table->MemoryBootLevel = 0;
1498
1499 /* find boot level from dpm table*/
1500 result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1501 data->vbios_boot_state.sclk_bootup_value,
1502 (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
1503
1504 if (result != 0) {
1505 smu_data->smc_state_table.GraphicsBootLevel = 0;
1506 pr_err("[powerplay] VBIOS did not find boot engine "
1507 "clock value in dependency table. "
1508 "Using Graphics DPM level 0 !");
1509 result = 0;
1510 }
1511
1512 result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1513 data->vbios_boot_state.mclk_bootup_value,
1514 (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
1515
1516 if (result != 0) {
1517 smu_data->smc_state_table.MemoryBootLevel = 0;
1518 pr_err("[powerplay] VBIOS did not find boot "
1519 "engine clock value in dependency table."
1520 "Using Memory DPM level 0 !");
1521 result = 0;
1522 }
1523
1524 table->BootVoltage.Vddc =
1525 phm_get_voltage_id(&(data->vddc_voltage_table),
1526 data->vbios_boot_state.vddc_bootup_value);
1527 table->BootVoltage.VddGfx =
1528 phm_get_voltage_id(&(data->vddgfx_voltage_table),
1529 data->vbios_boot_state.vddgfx_bootup_value);
1530 table->BootVoltage.Vddci =
1531 phm_get_voltage_id(&(data->vddci_voltage_table),
1532 data->vbios_boot_state.vddci_bootup_value);
1533 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
1534
1535 CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
1536
1537 return result;
1538}
1539
1540static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1541{
1542 uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
1543 volt_with_cks, value;
1544 uint16_t clock_freq_u16;
1545 struct tonga_smumgr *smu_data =
1546 (struct tonga_smumgr *)(hwmgr->smu_backend);
1547 uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
1548 volt_offset = 0;
1549 struct phm_ppt_v1_information *table_info =
1550 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1551 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1552 table_info->vdd_dep_on_sclk;
1553 uint32_t hw_revision, dev_id;
1554 struct cgs_system_info sys_info = {0};
1555
1556 stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
1557
1558 sys_info.size = sizeof(struct cgs_system_info);
1559
1560 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
1561 cgs_query_system_info(hwmgr->device, &sys_info);
1562 hw_revision = (uint32_t)sys_info.value;
1563
1564 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
1565 cgs_query_system_info(hwmgr->device, &sys_info);
1566 dev_id = (uint32_t)sys_info.value;
1567
1568 /* Read SMU_Eefuse to read and calculate RO and determine
1569 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
1570 */
1571 efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1572 ixSMU_EFUSE_0 + (146 * 4));
1573 efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1574 ixSMU_EFUSE_0 + (148 * 4));
1575 efuse &= 0xFF000000;
1576 efuse = efuse >> 24;
1577 efuse2 &= 0xF;
1578
1579 if (efuse2 == 1)
1580 ro = (2300 - 1350) * efuse / 255 + 1350;
1581 else
1582 ro = (2500 - 1000) * efuse / 255 + 1000;
1583
1584 if (ro >= 1660)
1585 type = 0;
1586 else
1587 type = 1;
1588
1589 /* Populate Stretch amount */
1590 smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
1591
1592
1593 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
1594 for (i = 0; i < sclk_table->count; i++) {
1595 smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
1596 sclk_table->entries[i].cks_enable << i;
1597 if (ASICID_IS_TONGA_P(dev_id, hw_revision)) {
1598 volt_without_cks = (uint32_t)((7732 + 60 - ro - 20838 *
1599 (sclk_table->entries[i].clk/100) / 10000) * 1000 /
1600 (8730 - (5301 * (sclk_table->entries[i].clk/100) / 1000)));
1601 volt_with_cks = (uint32_t)((5250 + 51 - ro - 2404 *
1602 (sclk_table->entries[i].clk/100) / 100000) * 1000 /
1603 (6146 - (3193 * (sclk_table->entries[i].clk/100) / 1000)));
1604 } else {
1605 volt_without_cks = (uint32_t)((14041 *
1606 (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
1607 (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
1608 volt_with_cks = (uint32_t)((13946 *
1609 (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
1610 (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
1611 }
1612 if (volt_without_cks >= volt_with_cks)
1613 volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
1614 sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
1615 smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
1616 }
1617
1618 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1619 STRETCH_ENABLE, 0x0);
1620 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1621 masterReset, 0x1);
1622 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1623 staticEnable, 0x1);
1624 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1625 masterReset, 0x0);
1626
1627 /* Populate CKS Lookup Table */
1628 if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
1629 stretch_amount2 = 0;
1630 else if (stretch_amount == 3 || stretch_amount == 4)
1631 stretch_amount2 = 1;
1632 else {
1633 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1634 PHM_PlatformCaps_ClockStretcher);
1635 PP_ASSERT_WITH_CODE(false,
1636 "Stretch Amount in PPTable not supported\n",
1637 return -EINVAL);
1638 }
1639
1640 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1641 ixPWR_CKS_CNTL);
1642 value &= 0xFFC2FF87;
1643 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
1644 tonga_clock_stretcher_lookup_table[stretch_amount2][0];
1645 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
1646 tonga_clock_stretcher_lookup_table[stretch_amount2][1];
1647 clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
1648 GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
1649 SclkFrequency) / 100);
1650 if (tonga_clock_stretcher_lookup_table[stretch_amount2][0] <
1651 clock_freq_u16 &&
1652 tonga_clock_stretcher_lookup_table[stretch_amount2][1] >
1653 clock_freq_u16) {
1654 /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
1655 value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
1656 /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
1657 value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
1658 /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
1659 value |= (tonga_clock_stretch_amount_conversion
1660 [tonga_clock_stretcher_lookup_table[stretch_amount2][3]]
1661 [stretch_amount]) << 3;
1662 }
1663 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
1664 CKS_LOOKUPTableEntry[0].minFreq);
1665 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
1666 CKS_LOOKUPTableEntry[0].maxFreq);
1667 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
1668 tonga_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
1669 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
1670 (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
1671
1672 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1673 ixPWR_CKS_CNTL, value);
1674
1675 /* Populate DDT Lookup Table */
1676 for (i = 0; i < 4; i++) {
1677 /* Assign the minimum and maximum VID stored
1678 * in the last row of Clock Stretcher Voltage Table.
1679 */
1680 smu_data->smc_state_table.ClockStretcherDataTable.
1681 ClockStretcherDataTableEntry[i].minVID =
1682 (uint8_t) tonga_clock_stretcher_ddt_table[type][i][2];
1683 smu_data->smc_state_table.ClockStretcherDataTable.
1684 ClockStretcherDataTableEntry[i].maxVID =
1685 (uint8_t) tonga_clock_stretcher_ddt_table[type][i][3];
1686 /* Loop through each SCLK and check the frequency
1687 * to see if it lies within the frequency for clock stretcher.
1688 */
1689 for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
1690 cks_setting = 0;
1691 clock_freq = PP_SMC_TO_HOST_UL(
1692 smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
1693 /* Check the allowed frequency against the sclk level[j].
1694 * Sclk's endianness has already been converted,
1695 * and it's in 10Khz unit,
1696 * as opposed to Data table, which is in Mhz unit.
1697 */
1698 if (clock_freq >= tonga_clock_stretcher_ddt_table[type][i][0] * 100) {
1699 cks_setting |= 0x2;
1700 if (clock_freq < tonga_clock_stretcher_ddt_table[type][i][1] * 100)
1701 cks_setting |= 0x1;
1702 }
1703 smu_data->smc_state_table.ClockStretcherDataTable.
1704 ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
1705 }
1706 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
1707 ClockStretcherDataTable.
1708 ClockStretcherDataTableEntry[i].setting);
1709 }
1710
1711 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1712 ixPWR_CKS_CNTL);
1713 value &= 0xFFFFFFFE;
1714 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1715 ixPWR_CKS_CNTL, value);
1716
1717 return 0;
1718}
1719
1720/**
1721 * Populates the SMC VRConfig field in DPM table.
1722 *
1723 * @param hwmgr the address of the hardware manager
1724 * @param table the SMC DPM table structure to be populated
1725 * @return always 0
1726 */
1727static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
1728 SMU72_Discrete_DpmTable *table)
1729{
1730 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1731 uint16_t config;
1732
1733 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
1734 /* Splitted mode */
1735 config = VR_SVI2_PLANE_1;
1736 table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
1737
1738 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1739 config = VR_SVI2_PLANE_2;
1740 table->VRConfig |= config;
1741 } else {
1742 pr_err("VDDC and VDDGFX should "
1743 "be both on SVI2 control in splitted mode !\n");
1744 }
1745 } else {
1746 /* Merged mode */
1747 config = VR_MERGED_WITH_VDDC;
1748 table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
1749
1750 /* Set Vddc Voltage Controller */
1751 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1752 config = VR_SVI2_PLANE_1;
1753 table->VRConfig |= config;
1754 } else {
1755 pr_err("VDDC should be on "
1756 "SVI2 control in merged mode !\n");
1757 }
1758 }
1759
1760 /* Set Vddci Voltage Controller */
1761 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
1762 config = VR_SVI2_PLANE_2; /* only in merged mode */
1763 table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1764 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
1765 config = VR_SMIO_PATTERN_1;
1766 table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1767 }
1768
1769 /* Set Mvdd Voltage Controller */
1770 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1771 config = VR_SMIO_PATTERN_2;
1772 table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
1773 }
1774
1775 return 0;
1776}
1777
1778
1779/**
1780 * Initialize the ARB DRAM timing table's index field.
1781 *
1782 * @param hwmgr the address of the powerplay hardware manager.
1783 * @return always 0
1784 */
1785static int tonga_init_arb_table_index(struct pp_hwmgr *hwmgr)
1786{
1787 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
1788 uint32_t tmp;
1789 int result;
1790
1791 /*
1792 * This is a read-modify-write on the first byte of the ARB table.
1793 * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure
1794 * is the field 'current'.
1795 * This solution is ugly, but we never write the whole table only
1796 * individual fields in it.
1797 * In reality this field should not be in that structure
1798 * but in a soft register.
1799 */
1800 result = smu7_read_smc_sram_dword(hwmgr,
1801 smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
1802
1803 if (result != 0)
1804 return result;
1805
1806 tmp &= 0x00FFFFFF;
1807 tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
1808
1809 return smu7_write_smc_sram_dword(hwmgr,
1810 smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
1811}
1812
1813
1814static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
1815{
1816 struct tonga_smumgr *smu_data =
1817 (struct tonga_smumgr *)(hwmgr->smu_backend);
1818 const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
1819 SMU72_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
1820 struct phm_ppt_v1_information *table_info =
1821 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1822 struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
1823 int i, j, k;
1824 const uint16_t *pdef1, *pdef2;
1825
1826 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
1827 (uint16_t)(cac_dtp_table->usTDP * 256));
1828 dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
1829 (uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
1830
1831 PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
1832 "Target Operating Temp is out of Range !",
1833 );
1834
1835 dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
1836 dpm_table->GpuTjHyst = 8;
1837
1838 dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
1839
1840 dpm_table->BAPM_TEMP_GRADIENT =
1841 PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
1842 pdef1 = defaults->bapmti_r;
1843 pdef2 = defaults->bapmti_rc;
1844
1845 for (i = 0; i < SMU72_DTE_ITERATIONS; i++) {
1846 for (j = 0; j < SMU72_DTE_SOURCES; j++) {
1847 for (k = 0; k < SMU72_DTE_SINKS; k++) {
1848 dpm_table->BAPMTI_R[i][j][k] =
1849 PP_HOST_TO_SMC_US(*pdef1);
1850 dpm_table->BAPMTI_RC[i][j][k] =
1851 PP_HOST_TO_SMC_US(*pdef2);
1852 pdef1++;
1853 pdef2++;
1854 }
1855 }
1856 }
1857
1858 return 0;
1859}
1860
1861static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr)
1862{
1863 struct tonga_smumgr *smu_data =
1864 (struct tonga_smumgr *)(hwmgr->smu_backend);
1865 const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
1866
1867 smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
1868 smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC;
1869 smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
1870 smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
1871
1872 return 0;
1873}
1874
1875static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr)
1876{
1877 uint16_t tdc_limit;
1878 struct tonga_smumgr *smu_data =
1879 (struct tonga_smumgr *)(hwmgr->smu_backend);
1880 const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
1881 struct phm_ppt_v1_information *table_info =
1882 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1883
1884 /* TDC number of fraction bits are changed from 8 to 7
1885 * for Fiji as requested by SMC team
1886 */
1887 tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256);
1888 smu_data->power_tune_table.TDC_VDDC_PkgLimit =
1889 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
1890 smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
1891 defaults->tdc_vddc_throttle_release_limit_perc;
1892 smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
1893
1894 return 0;
1895}
1896
1897static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
1898{
1899 struct tonga_smumgr *smu_data =
1900 (struct tonga_smumgr *)(hwmgr->smu_backend);
1901 const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
1902 uint32_t temp;
1903
1904 if (smu7_read_smc_sram_dword(hwmgr,
1905 fuse_table_offset +
1906 offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl),
1907 (uint32_t *)&temp, SMC_RAM_END))
1908 PP_ASSERT_WITH_CODE(false,
1909 "Attempt to read PmFuses.DW6 "
1910 "(SviLoadLineEn) from SMC Failed !",
1911 return -EINVAL);
1912 else
1913 smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
1914
1915 return 0;
1916}
1917
1918static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
1919{
1920 int i;
1921 struct tonga_smumgr *smu_data =
1922 (struct tonga_smumgr *)(hwmgr->smu_backend);
1923
1924 /* Currently not used. Set all to zero. */
1925 for (i = 0; i < 16; i++)
1926 smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
1927
1928 return 0;
1929}
1930
1931static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
1932{
1933 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
1934
1935 if ((hwmgr->thermal_controller.advanceFanControlParameters.
1936 usFanOutputSensitivity & (1 << 15)) ||
1937 (hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0))
1938 hwmgr->thermal_controller.advanceFanControlParameters.
1939 usFanOutputSensitivity = hwmgr->thermal_controller.
1940 advanceFanControlParameters.usDefaultFanOutputSensitivity;
1941
1942 smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
1943 PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
1944 advanceFanControlParameters.usFanOutputSensitivity);
1945 return 0;
1946}
1947
1948static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
1949{
1950 int i;
1951 struct tonga_smumgr *smu_data =
1952 (struct tonga_smumgr *)(hwmgr->smu_backend);
1953
1954 /* Currently not used. Set all to zero. */
1955 for (i = 0; i < 16; i++)
1956 smu_data->power_tune_table.GnbLPML[i] = 0;
1957
1958 return 0;
1959}
1960
1961static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
1962{
1963 struct tonga_smumgr *smu_data =
1964 (struct tonga_smumgr *)(hwmgr->smu_backend);
1965 struct phm_ppt_v1_information *table_info =
1966 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1967 uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
1968 uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
1969 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
1970
1971 hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
1972 lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
1973
1974 smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
1975 CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
1976 smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
1977 CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
1978
1979 return 0;
1980}
1981
1982static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr)
1983{
1984 struct tonga_smumgr *smu_data =
1985 (struct tonga_smumgr *)(hwmgr->smu_backend);
1986 uint32_t pm_fuse_table_offset;
1987
1988 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1989 PHM_PlatformCaps_PowerContainment)) {
1990 if (smu7_read_smc_sram_dword(hwmgr,
1991 SMU72_FIRMWARE_HEADER_LOCATION +
1992 offsetof(SMU72_Firmware_Header, PmFuseTable),
1993 &pm_fuse_table_offset, SMC_RAM_END))
1994 PP_ASSERT_WITH_CODE(false,
1995 "Attempt to get pm_fuse_table_offset Failed !",
1996 return -EINVAL);
1997
1998 /* DW6 */
1999 if (tonga_populate_svi_load_line(hwmgr))
2000 PP_ASSERT_WITH_CODE(false,
2001 "Attempt to populate SviLoadLine Failed !",
2002 return -EINVAL);
2003 /* DW7 */
2004 if (tonga_populate_tdc_limit(hwmgr))
2005 PP_ASSERT_WITH_CODE(false,
2006 "Attempt to populate TDCLimit Failed !",
2007 return -EINVAL);
2008 /* DW8 */
2009 if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset))
2010 PP_ASSERT_WITH_CODE(false,
2011 "Attempt to populate TdcWaterfallCtl Failed !",
2012 return -EINVAL);
2013
2014 /* DW9-DW12 */
2015 if (tonga_populate_temperature_scaler(hwmgr) != 0)
2016 PP_ASSERT_WITH_CODE(false,
2017 "Attempt to populate LPMLTemperatureScaler Failed !",
2018 return -EINVAL);
2019
2020 /* DW13-DW14 */
2021 if (tonga_populate_fuzzy_fan(hwmgr))
2022 PP_ASSERT_WITH_CODE(false,
2023 "Attempt to populate Fuzzy Fan "
2024 "Control parameters Failed !",
2025 return -EINVAL);
2026
2027 /* DW15-DW18 */
2028 if (tonga_populate_gnb_lpml(hwmgr))
2029 PP_ASSERT_WITH_CODE(false,
2030 "Attempt to populate GnbLPML Failed !",
2031 return -EINVAL);
2032
2033 /* DW20 */
2034 if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr))
2035 PP_ASSERT_WITH_CODE(
2036 false,
2037 "Attempt to populate BapmVddCBaseLeakage "
2038 "Hi and Lo Sidd Failed !",
2039 return -EINVAL);
2040
2041 if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
2042 (uint8_t *)&smu_data->power_tune_table,
2043 sizeof(struct SMU72_Discrete_PmFuses), SMC_RAM_END))
2044 PP_ASSERT_WITH_CODE(false,
2045 "Attempt to download PmFuseTable Failed !",
2046 return -EINVAL);
2047 }
2048 return 0;
2049}
2050
2051static int tonga_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
2052 SMU72_Discrete_MCRegisters *mc_reg_table)
2053{
2054 const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)hwmgr->smu_backend;
2055
2056 uint32_t i, j;
2057
2058 for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
2059 if (smu_data->mc_reg_table.validflag & 1<<j) {
2060 PP_ASSERT_WITH_CODE(
2061 i < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE,
2062 "Index of mc_reg_table->address[] array "
2063 "out of boundary",
2064 return -EINVAL);
2065 mc_reg_table->address[i].s0 =
2066 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
2067 mc_reg_table->address[i].s1 =
2068 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
2069 i++;
2070 }
2071 }
2072
2073 mc_reg_table->last = (uint8_t)i;
2074
2075 return 0;
2076}
2077
2078/*convert register values from driver to SMC format */
2079static void tonga_convert_mc_registers(
2080 const struct tonga_mc_reg_entry *entry,
2081 SMU72_Discrete_MCRegisterSet *data,
2082 uint32_t num_entries, uint32_t valid_flag)
2083{
2084 uint32_t i, j;
2085
2086 for (i = 0, j = 0; j < num_entries; j++) {
2087 if (valid_flag & 1<<j) {
2088 data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
2089 i++;
2090 }
2091 }
2092}
2093
2094static int tonga_convert_mc_reg_table_entry_to_smc(
2095 struct pp_hwmgr *hwmgr,
2096 const uint32_t memory_clock,
2097 SMU72_Discrete_MCRegisterSet *mc_reg_table_data
2098 )
2099{
2100 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
2101 uint32_t i = 0;
2102
2103 for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
2104 if (memory_clock <=
2105 smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
2106 break;
2107 }
2108 }
2109
2110 if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
2111 --i;
2112
2113 tonga_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
2114 mc_reg_table_data, smu_data->mc_reg_table.last,
2115 smu_data->mc_reg_table.validflag);
2116
2117 return 0;
2118}
2119
2120static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
2121 SMU72_Discrete_MCRegisters *mc_regs)
2122{
2123 int result = 0;
2124 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2125 int res;
2126 uint32_t i;
2127
2128 for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
2129 res = tonga_convert_mc_reg_table_entry_to_smc(
2130 hwmgr,
2131 data->dpm_table.mclk_table.dpm_levels[i].value,
2132 &mc_regs->data[i]
2133 );
2134
2135 if (0 != res)
2136 result = res;
2137 }
2138
2139 return result;
2140}
2141
2142static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
2143{
2144 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
2145 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2146 uint32_t address;
2147 int32_t result;
2148
2149 if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
2150 return 0;
2151
2152
2153 memset(&smu_data->mc_regs, 0, sizeof(SMU72_Discrete_MCRegisters));
2154
2155 result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
2156
2157 if (result != 0)
2158 return result;
2159
2160
2161 address = smu_data->smu7_data.mc_reg_table_start +
2162 (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]);
2163
2164 return smu7_copy_bytes_to_smc(
2165 hwmgr, address,
2166 (uint8_t *)&smu_data->mc_regs.data[0],
2167 sizeof(SMU72_Discrete_MCRegisterSet) *
2168 data->dpm_table.mclk_table.count,
2169 SMC_RAM_END);
2170}
2171
2172static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
2173{
2174 int result;
2175 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
2176
2177 memset(&smu_data->mc_regs, 0x00, sizeof(SMU72_Discrete_MCRegisters));
2178 result = tonga_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
2179 PP_ASSERT_WITH_CODE(!result,
2180 "Failed to initialize MCRegTable for the MC register addresses !",
2181 return result;);
2182
2183 result = tonga_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
2184 PP_ASSERT_WITH_CODE(!result,
2185 "Failed to initialize MCRegTable for driver state !",
2186 return result;);
2187
2188 return smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.mc_reg_table_start,
2189 (uint8_t *)&smu_data->mc_regs, sizeof(SMU72_Discrete_MCRegisters), SMC_RAM_END);
2190}
2191
2192static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
2193{
2194 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
2195 struct phm_ppt_v1_information *table_info =
2196 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2197
2198 if (table_info &&
2199 table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
2200 table_info->cac_dtp_table->usPowerTuneDataSetID)
2201 smu_data->power_tune_defaults =
2202 &tonga_power_tune_data_set_array
2203 [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
2204 else
2205 smu_data->power_tune_defaults = &tonga_power_tune_data_set_array[0];
2206}
2207
2208static void tonga_save_default_power_profile(struct pp_hwmgr *hwmgr)
2209{
2210 struct tonga_smumgr *data = (struct tonga_smumgr *)(hwmgr->smu_backend);
2211 struct SMU72_Discrete_GraphicsLevel *levels =
2212 data->smc_state_table.GraphicsLevel;
2213 unsigned min_level = 1;
2214
2215 hwmgr->default_gfx_power_profile.activity_threshold =
2216 be16_to_cpu(levels[0].ActivityLevel);
2217 hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
2218 hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
2219 hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
2220
2221 hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
2222 hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
2223
2224 /* Workaround compute SDMA instability: disable lowest SCLK
2225 * DPM level. Optimize compute power profile: Use only highest
2226 * 2 power levels (if more than 2 are available), Hysteresis:
2227 * 0ms up, 5ms down
2228 */
2229 if (data->smc_state_table.GraphicsDpmLevelCount > 2)
2230 min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
2231 else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
2232 min_level = 1;
2233 else
2234 min_level = 0;
2235 hwmgr->default_compute_power_profile.min_sclk =
2236 be32_to_cpu(levels[min_level].SclkFrequency);
2237 hwmgr->default_compute_power_profile.up_hyst = 0;
2238 hwmgr->default_compute_power_profile.down_hyst = 5;
2239
2240 hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
2241 hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
2242}
2243
2244/**
2245 * Initializes the SMC table and uploads it
2246 *
2247 * @param hwmgr the address of the powerplay hardware manager.
2248 * @param pInput the pointer to input data (PowerState)
2249 * @return always 0
2250 */
2251int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
2252{
2253 int result;
2254 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2255 struct tonga_smumgr *smu_data =
2256 (struct tonga_smumgr *)(hwmgr->smu_backend);
2257 SMU72_Discrete_DpmTable *table = &(smu_data->smc_state_table);
2258 struct phm_ppt_v1_information *table_info =
2259 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2260
2261 uint8_t i;
2262 pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
2263
2264
2265 memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
2266
2267 tonga_initialize_power_tune_defaults(hwmgr);
2268
2269 if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
2270 tonga_populate_smc_voltage_tables(hwmgr, table);
2271
2272 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2273 PHM_PlatformCaps_AutomaticDCTransition))
2274 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
2275
2276
2277 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2278 PHM_PlatformCaps_StepVddc))
2279 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
2280
2281 if (data->is_memory_gddr5)
2282 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
2283
2284 i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN);
2285
2286 if (i == 1 || i == 0)
2287 table->SystemFlags |= 0x40;
2288
2289 if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
2290 result = tonga_populate_ulv_state(hwmgr, table);
2291 PP_ASSERT_WITH_CODE(!result,
2292 "Failed to initialize ULV state !",
2293 return result;);
2294
2295 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2296 ixCG_ULV_PARAMETER, 0x40035);
2297 }
2298
2299 result = tonga_populate_smc_link_level(hwmgr, table);
2300 PP_ASSERT_WITH_CODE(!result,
2301 "Failed to initialize Link Level !", return result);
2302
2303 result = tonga_populate_all_graphic_levels(hwmgr);
2304 PP_ASSERT_WITH_CODE(!result,
2305 "Failed to initialize Graphics Level !", return result);
2306
2307 result = tonga_populate_all_memory_levels(hwmgr);
2308 PP_ASSERT_WITH_CODE(!result,
2309 "Failed to initialize Memory Level !", return result);
2310
2311 result = tonga_populate_smc_acpi_level(hwmgr, table);
2312 PP_ASSERT_WITH_CODE(!result,
2313 "Failed to initialize ACPI Level !", return result);
2314
2315 result = tonga_populate_smc_vce_level(hwmgr, table);
2316 PP_ASSERT_WITH_CODE(!result,
2317 "Failed to initialize VCE Level !", return result);
2318
2319 result = tonga_populate_smc_acp_level(hwmgr, table);
2320 PP_ASSERT_WITH_CODE(!result,
2321 "Failed to initialize ACP Level !", return result);
2322
2323 result = tonga_populate_smc_samu_level(hwmgr, table);
2324 PP_ASSERT_WITH_CODE(!result,
2325 "Failed to initialize SAMU Level !", return result);
2326
2327 /* Since only the initial state is completely set up at this
2328 * point (the other states are just copies of the boot state) we only
2329 * need to populate the ARB settings for the initial state.
2330 */
2331 result = tonga_program_memory_timing_parameters(hwmgr);
2332 PP_ASSERT_WITH_CODE(!result,
2333 "Failed to Write ARB settings for the initial state.",
2334 return result;);
2335
2336 result = tonga_populate_smc_uvd_level(hwmgr, table);
2337 PP_ASSERT_WITH_CODE(!result,
2338 "Failed to initialize UVD Level !", return result);
2339
2340 result = tonga_populate_smc_boot_level(hwmgr, table);
2341 PP_ASSERT_WITH_CODE(!result,
2342 "Failed to initialize Boot Level !", return result);
2343
2344 tonga_populate_bapm_parameters_in_dpm_table(hwmgr);
2345 PP_ASSERT_WITH_CODE(!result,
2346 "Failed to populate BAPM Parameters !", return result);
2347
2348 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2349 PHM_PlatformCaps_ClockStretcher)) {
2350 result = tonga_populate_clock_stretcher_data_table(hwmgr);
2351 PP_ASSERT_WITH_CODE(!result,
2352 "Failed to populate Clock Stretcher Data Table !",
2353 return result;);
2354 }
2355 table->GraphicsVoltageChangeEnable = 1;
2356 table->GraphicsThermThrottleEnable = 1;
2357 table->GraphicsInterval = 1;
2358 table->VoltageInterval = 1;
2359 table->ThermalInterval = 1;
2360 table->TemperatureLimitHigh =
2361 table_info->cac_dtp_table->usTargetOperatingTemp *
2362 SMU7_Q88_FORMAT_CONVERSION_UNIT;
2363 table->TemperatureLimitLow =
2364 (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
2365 SMU7_Q88_FORMAT_CONVERSION_UNIT;
2366 table->MemoryVoltageChangeEnable = 1;
2367 table->MemoryInterval = 1;
2368 table->VoltageResponseTime = 0;
2369 table->PhaseResponseTime = 0;
2370 table->MemoryThermThrottleEnable = 1;
2371
2372 /*
2373 * Cail reads current link status and reports it as cap (we cannot
2374 * change this due to some previous issues we had)
2375 * SMC drops the link status to lowest level after enabling
2376 * DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again
2377 * but this time Cail reads current link status which was set to low by
2378 * SMC and reports it as cap to powerplay
2379 * To avoid it, we set PCIeBootLinkLevel to highest dpm level
2380 */
2381 PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
2382 "There must be 1 or more PCIE levels defined in PPTable.",
2383 return -EINVAL);
2384
2385 table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
2386
2387 table->PCIeGenInterval = 1;
2388
2389 result = tonga_populate_vr_config(hwmgr, table);
2390 PP_ASSERT_WITH_CODE(!result,
2391 "Failed to populate VRConfig setting !", return result);
2392
2393 table->ThermGpio = 17;
2394 table->SclkStepSize = 0x4000;
2395
2396 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
2397 &gpio_pin_assignment)) {
2398 table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
2399 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2400 PHM_PlatformCaps_RegulatorHot);
2401 } else {
2402 table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
2403 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2404 PHM_PlatformCaps_RegulatorHot);
2405 }
2406
2407 if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
2408 &gpio_pin_assignment)) {
2409 table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
2410 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2411 PHM_PlatformCaps_AutomaticDCTransition);
2412 } else {
2413 table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
2414 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2415 PHM_PlatformCaps_AutomaticDCTransition);
2416 }
2417
2418 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2419 PHM_PlatformCaps_Falcon_QuickTransition);
2420
2421 if (0) {
2422 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2423 PHM_PlatformCaps_AutomaticDCTransition);
2424 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2425 PHM_PlatformCaps_Falcon_QuickTransition);
2426 }
2427
2428 if (atomctrl_get_pp_assign_pin(hwmgr,
2429 THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment)) {
2430 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2431 PHM_PlatformCaps_ThermalOutGPIO);
2432
2433 table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
2434
2435 table->ThermOutPolarity =
2436 (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
2437 (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1 : 0;
2438
2439 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
2440
2441 /* if required, combine VRHot/PCC with thermal out GPIO*/
2442 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2443 PHM_PlatformCaps_RegulatorHot) &&
2444 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2445 PHM_PlatformCaps_CombinePCCWithThermalSignal)){
2446 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
2447 }
2448 } else {
2449 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2450 PHM_PlatformCaps_ThermalOutGPIO);
2451
2452 table->ThermOutGpio = 17;
2453 table->ThermOutPolarity = 1;
2454 table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
2455 }
2456
2457 for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++)
2458 table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
2459
2460 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
2461 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
2462 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
2463 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
2464 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
2465 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
2466 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
2467 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
2468 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
2469
2470 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
2471 result = smu7_copy_bytes_to_smc(
2472 hwmgr,
2473 smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, SystemFlags),
2474 (uint8_t *)&(table->SystemFlags),
2475 sizeof(SMU72_Discrete_DpmTable) - 3 * sizeof(SMU72_PIDController),
2476 SMC_RAM_END);
2477
2478 PP_ASSERT_WITH_CODE(!result,
2479 "Failed to upload dpm data to SMC memory !", return result;);
2480
2481 result = tonga_init_arb_table_index(hwmgr);
2482 PP_ASSERT_WITH_CODE(!result,
2483 "Failed to upload arb data to SMC memory !", return result);
2484
2485 tonga_populate_pm_fuses(hwmgr);
2486 PP_ASSERT_WITH_CODE((!result),
2487 "Failed to populate initialize pm fuses !", return result);
2488
2489 result = tonga_populate_initial_mc_reg_table(hwmgr);
2490 PP_ASSERT_WITH_CODE((!result),
2491 "Failed to populate initialize MC Reg table !", return result);
2492
2493 tonga_save_default_power_profile(hwmgr);
2494
2495 return 0;
2496}
2497
2498/**
2499* Set up the fan table to control the fan using the SMC.
2500* @param hwmgr the address of the powerplay hardware manager.
2501* @param pInput the pointer to input data
2502* @param pOutput the pointer to output data
2503* @param pStorage the pointer to temporary storage
2504* @param Result the last failure code
2505* @return result from set temperature range routine
2506*/
2507int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
2508{
2509 struct tonga_smumgr *smu_data =
2510 (struct tonga_smumgr *)(hwmgr->smu_backend);
2511 SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
2512 uint32_t duty100;
2513 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
2514 uint16_t fdo_min, slope1, slope2;
2515 uint32_t reference_clock;
2516 int res;
2517 uint64_t tmp64;
2518
2519 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2520 PHM_PlatformCaps_MicrocodeFanControl))
2521 return 0;
2522
2523 if (hwmgr->thermal_controller.fanInfo.bNoFan) {
2524 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2525 PHM_PlatformCaps_MicrocodeFanControl);
2526 return 0;
2527 }
2528
2529 if (0 == smu_data->smu7_data.fan_table_start) {
2530 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2531 PHM_PlatformCaps_MicrocodeFanControl);
2532 return 0;
2533 }
2534
2535 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
2536 CGS_IND_REG__SMC,
2537 CG_FDO_CTRL1, FMAX_DUTY100);
2538
2539 if (0 == duty100) {
2540 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2541 PHM_PlatformCaps_MicrocodeFanControl);
2542 return 0;
2543 }
2544
2545 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
2546 do_div(tmp64, 10000);
2547 fdo_min = (uint16_t)tmp64;
2548
2549 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
2550 hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
2551 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
2552 hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
2553
2554 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
2555 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
2556 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
2557 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
2558
2559 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
2560 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
2561
2562 fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
2563 fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
2564 fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
2565
2566 fan_table.Slope1 = cpu_to_be16(slope1);
2567 fan_table.Slope2 = cpu_to_be16(slope2);
2568
2569 fan_table.FdoMin = cpu_to_be16(fdo_min);
2570
2571 fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
2572
2573 fan_table.HystUp = cpu_to_be16(1);
2574
2575 fan_table.HystSlope = cpu_to_be16(1);
2576
2577 fan_table.TempRespLim = cpu_to_be16(5);
2578
2579 reference_clock = smu7_get_xclk(hwmgr);
2580
2581 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
2582
2583 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
2584
2585 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
2586
2587 fan_table.FanControl_GL_Flag = 1;
2588
2589 res = smu7_copy_bytes_to_smc(hwmgr,
2590 smu_data->smu7_data.fan_table_start,
2591 (uint8_t *)&fan_table,
2592 (uint32_t)sizeof(fan_table),
2593 SMC_RAM_END);
2594
2595 return 0;
2596}
2597
2598
2599static int tonga_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
2600{
2601 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2602
2603 if (data->need_update_smu7_dpm_table &
2604 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
2605 return tonga_program_memory_timing_parameters(hwmgr);
2606
2607 return 0;
2608}
2609
2610int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2611{
2612 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2613 struct tonga_smumgr *smu_data =
2614 (struct tonga_smumgr *)(hwmgr->smu_backend);
2615
2616 int result = 0;
2617 uint32_t low_sclk_interrupt_threshold = 0;
2618
2619 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2620 PHM_PlatformCaps_SclkThrottleLowNotification)
2621 && (hwmgr->gfx_arbiter.sclk_threshold !=
2622 data->low_sclk_interrupt_threshold)) {
2623 data->low_sclk_interrupt_threshold =
2624 hwmgr->gfx_arbiter.sclk_threshold;
2625 low_sclk_interrupt_threshold =
2626 data->low_sclk_interrupt_threshold;
2627
2628 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
2629
2630 result = smu7_copy_bytes_to_smc(
2631 hwmgr,
2632 smu_data->smu7_data.dpm_table_start +
2633 offsetof(SMU72_Discrete_DpmTable,
2634 LowSclkInterruptThreshold),
2635 (uint8_t *)&low_sclk_interrupt_threshold,
2636 sizeof(uint32_t),
2637 SMC_RAM_END);
2638 }
2639
2640 result = tonga_update_and_upload_mc_reg_table(hwmgr);
2641
2642 PP_ASSERT_WITH_CODE((!result),
2643 "Failed to upload MC reg table !",
2644 return result);
2645
2646 result = tonga_program_mem_timing_parameters(hwmgr);
2647 PP_ASSERT_WITH_CODE((result == 0),
2648 "Failed to program memory timing parameters !",
2649 );
2650
2651 return result;
2652}
2653
2654uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
2655{
2656 switch (type) {
2657 case SMU_SoftRegisters:
2658 switch (member) {
2659 case HandshakeDisables:
2660 return offsetof(SMU72_SoftRegisters, HandshakeDisables);
2661 case VoltageChangeTimeout:
2662 return offsetof(SMU72_SoftRegisters, VoltageChangeTimeout);
2663 case AverageGraphicsActivity:
2664 return offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
2665 case PreVBlankGap:
2666 return offsetof(SMU72_SoftRegisters, PreVBlankGap);
2667 case VBlankTimeout:
2668 return offsetof(SMU72_SoftRegisters, VBlankTimeout);
2669 case UcodeLoadStatus:
2670 return offsetof(SMU72_SoftRegisters, UcodeLoadStatus);
2671 }
2672 case SMU_Discrete_DpmTable:
2673 switch (member) {
2674 case UvdBootLevel:
2675 return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
2676 case VceBootLevel:
2677 return offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
2678 case SamuBootLevel:
2679 return offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
2680 case LowSclkInterruptThreshold:
2681 return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
2682 }
2683 }
2684 pr_warn("can't get the offset of type %x member %x\n", type, member);
2685 return 0;
2686}
2687
2688uint32_t tonga_get_mac_definition(uint32_t value)
2689{
2690 switch (value) {
2691 case SMU_MAX_LEVELS_GRAPHICS:
2692 return SMU72_MAX_LEVELS_GRAPHICS;
2693 case SMU_MAX_LEVELS_MEMORY:
2694 return SMU72_MAX_LEVELS_MEMORY;
2695 case SMU_MAX_LEVELS_LINK:
2696 return SMU72_MAX_LEVELS_LINK;
2697 case SMU_MAX_ENTRIES_SMIO:
2698 return SMU72_MAX_ENTRIES_SMIO;
2699 case SMU_MAX_LEVELS_VDDC:
2700 return SMU72_MAX_LEVELS_VDDC;
2701 case SMU_MAX_LEVELS_VDDGFX:
2702 return SMU72_MAX_LEVELS_VDDGFX;
2703 case SMU_MAX_LEVELS_VDDCI:
2704 return SMU72_MAX_LEVELS_VDDCI;
2705 case SMU_MAX_LEVELS_MVDD:
2706 return SMU72_MAX_LEVELS_MVDD;
2707 }
2708 pr_warn("can't get the mac value %x\n", value);
2709
2710 return 0;
2711}
2712
2713
2714static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
2715{
2716 struct tonga_smumgr *smu_data =
2717 (struct tonga_smumgr *)(hwmgr->smu_backend);
2718 uint32_t mm_boot_level_offset, mm_boot_level_value;
2719 struct phm_ppt_v1_information *table_info =
2720 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2721
2722 smu_data->smc_state_table.UvdBootLevel = 0;
2723 if (table_info->mm_dep_table->count > 0)
2724 smu_data->smc_state_table.UvdBootLevel =
2725 (uint8_t) (table_info->mm_dep_table->count - 1);
2726 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2727 offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
2728 mm_boot_level_offset /= 4;
2729 mm_boot_level_offset *= 4;
2730 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2731 CGS_IND_REG__SMC, mm_boot_level_offset);
2732 mm_boot_level_value &= 0x00FFFFFF;
2733 mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
2734 cgs_write_ind_register(hwmgr->device,
2735 CGS_IND_REG__SMC,
2736 mm_boot_level_offset, mm_boot_level_value);
2737
2738 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2739 PHM_PlatformCaps_UVDDPM) ||
2740 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2741 PHM_PlatformCaps_StablePState))
2742 smum_send_msg_to_smc_with_parameter(hwmgr,
2743 PPSMC_MSG_UVDDPM_SetEnabledMask,
2744 (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
2745 return 0;
2746}
2747
2748static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
2749{
2750 struct tonga_smumgr *smu_data =
2751 (struct tonga_smumgr *)(hwmgr->smu_backend);
2752 uint32_t mm_boot_level_offset, mm_boot_level_value;
2753 struct phm_ppt_v1_information *table_info =
2754 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2755
2756
2757 smu_data->smc_state_table.VceBootLevel =
2758 (uint8_t) (table_info->mm_dep_table->count - 1);
2759
2760 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2761 offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
2762 mm_boot_level_offset /= 4;
2763 mm_boot_level_offset *= 4;
2764 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2765 CGS_IND_REG__SMC, mm_boot_level_offset);
2766 mm_boot_level_value &= 0xFF00FFFF;
2767 mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
2768 cgs_write_ind_register(hwmgr->device,
2769 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2770
2771 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2772 PHM_PlatformCaps_StablePState))
2773 smum_send_msg_to_smc_with_parameter(hwmgr,
2774 PPSMC_MSG_VCEDPM_SetEnabledMask,
2775 (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
2776 return 0;
2777}
2778
2779static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr)
2780{
2781 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
2782 uint32_t mm_boot_level_offset, mm_boot_level_value;
2783
2784 smu_data->smc_state_table.SamuBootLevel = 0;
2785 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2786 offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
2787
2788 mm_boot_level_offset /= 4;
2789 mm_boot_level_offset *= 4;
2790 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2791 CGS_IND_REG__SMC, mm_boot_level_offset);
2792 mm_boot_level_value &= 0xFFFFFF00;
2793 mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
2794 cgs_write_ind_register(hwmgr->device,
2795 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2796
2797 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2798 PHM_PlatformCaps_StablePState))
2799 smum_send_msg_to_smc_with_parameter(hwmgr,
2800 PPSMC_MSG_SAMUDPM_SetEnabledMask,
2801 (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
2802 return 0;
2803}
2804
2805int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
2806{
2807 switch (type) {
2808 case SMU_UVD_TABLE:
2809 tonga_update_uvd_smc_table(hwmgr);
2810 break;
2811 case SMU_VCE_TABLE:
2812 tonga_update_vce_smc_table(hwmgr);
2813 break;
2814 case SMU_SAMU_TABLE:
2815 tonga_update_samu_smc_table(hwmgr);
2816 break;
2817 default:
2818 break;
2819 }
2820 return 0;
2821}
2822
2823
2824/**
2825 * Get the location of various tables inside the FW image.
2826 *
2827 * @param hwmgr the address of the powerplay hardware manager.
2828 * @return always 0
2829 */
2830int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
2831{
2832 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2833 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
2834
2835 uint32_t tmp;
2836 int result;
2837 bool error = false;
2838
2839 result = smu7_read_smc_sram_dword(hwmgr,
2840 SMU72_FIRMWARE_HEADER_LOCATION +
2841 offsetof(SMU72_Firmware_Header, DpmTable),
2842 &tmp, SMC_RAM_END);
2843
2844 if (!result)
2845 smu_data->smu7_data.dpm_table_start = tmp;
2846
2847 error |= (result != 0);
2848
2849 result = smu7_read_smc_sram_dword(hwmgr,
2850 SMU72_FIRMWARE_HEADER_LOCATION +
2851 offsetof(SMU72_Firmware_Header, SoftRegisters),
2852 &tmp, SMC_RAM_END);
2853
2854 if (!result) {
2855 data->soft_regs_start = tmp;
2856 smu_data->smu7_data.soft_regs_start = tmp;
2857 }
2858
2859 error |= (result != 0);
2860
2861
2862 result = smu7_read_smc_sram_dword(hwmgr,
2863 SMU72_FIRMWARE_HEADER_LOCATION +
2864 offsetof(SMU72_Firmware_Header, mcRegisterTable),
2865 &tmp, SMC_RAM_END);
2866
2867 if (!result)
2868 smu_data->smu7_data.mc_reg_table_start = tmp;
2869
2870 result = smu7_read_smc_sram_dword(hwmgr,
2871 SMU72_FIRMWARE_HEADER_LOCATION +
2872 offsetof(SMU72_Firmware_Header, FanTable),
2873 &tmp, SMC_RAM_END);
2874
2875 if (!result)
2876 smu_data->smu7_data.fan_table_start = tmp;
2877
2878 error |= (result != 0);
2879
2880 result = smu7_read_smc_sram_dword(hwmgr,
2881 SMU72_FIRMWARE_HEADER_LOCATION +
2882 offsetof(SMU72_Firmware_Header, mcArbDramTimingTable),
2883 &tmp, SMC_RAM_END);
2884
2885 if (!result)
2886 smu_data->smu7_data.arb_table_start = tmp;
2887
2888 error |= (result != 0);
2889
2890 result = smu7_read_smc_sram_dword(hwmgr,
2891 SMU72_FIRMWARE_HEADER_LOCATION +
2892 offsetof(SMU72_Firmware_Header, Version),
2893 &tmp, SMC_RAM_END);
2894
2895 if (!result)
2896 hwmgr->microcode_version_info.SMC = tmp;
2897
2898 error |= (result != 0);
2899
2900 return error ? 1 : 0;
2901}
2902
2903/*---------------------------MC----------------------------*/
2904
2905static uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
2906{
2907 return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
2908}
2909
2910static bool tonga_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
2911{
2912 bool result = true;
2913
2914 switch (in_reg) {
2915 case mmMC_SEQ_RAS_TIMING:
2916 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
2917 break;
2918
2919 case mmMC_SEQ_DLL_STBY:
2920 *out_reg = mmMC_SEQ_DLL_STBY_LP;
2921 break;
2922
2923 case mmMC_SEQ_G5PDX_CMD0:
2924 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
2925 break;
2926
2927 case mmMC_SEQ_G5PDX_CMD1:
2928 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
2929 break;
2930
2931 case mmMC_SEQ_G5PDX_CTRL:
2932 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
2933 break;
2934
2935 case mmMC_SEQ_CAS_TIMING:
2936 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
2937 break;
2938
2939 case mmMC_SEQ_MISC_TIMING:
2940 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
2941 break;
2942
2943 case mmMC_SEQ_MISC_TIMING2:
2944 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
2945 break;
2946
2947 case mmMC_SEQ_PMG_DVS_CMD:
2948 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
2949 break;
2950
2951 case mmMC_SEQ_PMG_DVS_CTL:
2952 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
2953 break;
2954
2955 case mmMC_SEQ_RD_CTL_D0:
2956 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
2957 break;
2958
2959 case mmMC_SEQ_RD_CTL_D1:
2960 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
2961 break;
2962
2963 case mmMC_SEQ_WR_CTL_D0:
2964 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
2965 break;
2966
2967 case mmMC_SEQ_WR_CTL_D1:
2968 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
2969 break;
2970
2971 case mmMC_PMG_CMD_EMRS:
2972 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
2973 break;
2974
2975 case mmMC_PMG_CMD_MRS:
2976 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
2977 break;
2978
2979 case mmMC_PMG_CMD_MRS1:
2980 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
2981 break;
2982
2983 case mmMC_SEQ_PMG_TIMING:
2984 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
2985 break;
2986
2987 case mmMC_PMG_CMD_MRS2:
2988 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
2989 break;
2990
2991 case mmMC_SEQ_WR_CTL_2:
2992 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
2993 break;
2994
2995 default:
2996 result = false;
2997 break;
2998 }
2999
3000 return result;
3001}
3002
3003static int tonga_set_s0_mc_reg_index(struct tonga_mc_reg_table *table)
3004{
3005 uint32_t i;
3006 uint16_t address;
3007
3008 for (i = 0; i < table->last; i++) {
3009 table->mc_reg_address[i].s0 =
3010 tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1,
3011 &address) ?
3012 address :
3013 table->mc_reg_address[i].s1;
3014 }
3015 return 0;
3016}
3017
3018static int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
3019 struct tonga_mc_reg_table *ni_table)
3020{
3021 uint8_t i, j;
3022
3023 PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3024 "Invalid VramInfo table.", return -EINVAL);
3025 PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
3026 "Invalid VramInfo table.", return -EINVAL);
3027
3028 for (i = 0; i < table->last; i++)
3029 ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
3030
3031 ni_table->last = table->last;
3032
3033 for (i = 0; i < table->num_entries; i++) {
3034 ni_table->mc_reg_table_entry[i].mclk_max =
3035 table->mc_reg_table_entry[i].mclk_max;
3036 for (j = 0; j < table->last; j++) {
3037 ni_table->mc_reg_table_entry[i].mc_data[j] =
3038 table->mc_reg_table_entry[i].mc_data[j];
3039 }
3040 }
3041
3042 ni_table->num_entries = table->num_entries;
3043
3044 return 0;
3045}
3046
3047/**
3048 * VBIOS omits some information to reduce size, we need to recover them here.
3049 * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to
3050 * mmMC_PMG_CMD_EMRS /_LP[15:0]. Bit[15:0] MRS, need to be update
3051 * mmMC_PMG_CMD_MRS/_LP[15:0]
3052 * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to
3053 * mmMC_PMG_CMD_MRS1/_LP[15:0].
3054 * 3. need to set these data for each clock range
3055 * @param hwmgr the address of the powerplay hardware manager.
3056 * @param table the address of MCRegTable
3057 * @return always 0
3058 */
3059static int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr,
3060 struct tonga_mc_reg_table *table)
3061{
3062 uint8_t i, j, k;
3063 uint32_t temp_reg;
3064 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3065
3066 for (i = 0, j = table->last; i < table->last; i++) {
3067 PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3068 "Invalid VramInfo table.", return -EINVAL);
3069
3070 switch (table->mc_reg_address[i].s1) {
3071
3072 case mmMC_SEQ_MISC1:
3073 temp_reg = cgs_read_register(hwmgr->device,
3074 mmMC_PMG_CMD_EMRS);
3075 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
3076 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
3077 for (k = 0; k < table->num_entries; k++) {
3078 table->mc_reg_table_entry[k].mc_data[j] =
3079 ((temp_reg & 0xffff0000)) |
3080 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
3081 }
3082 j++;
3083 PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3084 "Invalid VramInfo table.", return -EINVAL);
3085
3086 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
3087 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
3088 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
3089 for (k = 0; k < table->num_entries; k++) {
3090 table->mc_reg_table_entry[k].mc_data[j] =
3091 (temp_reg & 0xffff0000) |
3092 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3093
3094 if (!data->is_memory_gddr5)
3095 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
3096 }
3097 j++;
3098 PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3099 "Invalid VramInfo table.", return -EINVAL);
3100
3101 if (!data->is_memory_gddr5) {
3102 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
3103 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
3104 for (k = 0; k < table->num_entries; k++)
3105 table->mc_reg_table_entry[k].mc_data[j] =
3106 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
3107 j++;
3108 PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3109 "Invalid VramInfo table.", return -EINVAL);
3110 }
3111
3112 break;
3113
3114 case mmMC_SEQ_RESERVE_M:
3115 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
3116 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
3117 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
3118 for (k = 0; k < table->num_entries; k++) {
3119 table->mc_reg_table_entry[k].mc_data[j] =
3120 (temp_reg & 0xffff0000) |
3121 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3122 }
3123 j++;
3124 PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3125 "Invalid VramInfo table.", return -EINVAL);
3126 break;
3127
3128 default:
3129 break;
3130 }
3131
3132 }
3133
3134 table->last = j;
3135
3136 return 0;
3137}
3138
3139static int tonga_set_valid_flag(struct tonga_mc_reg_table *table)
3140{
3141 uint8_t i, j;
3142
3143 for (i = 0; i < table->last; i++) {
3144 for (j = 1; j < table->num_entries; j++) {
3145 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
3146 table->mc_reg_table_entry[j].mc_data[i]) {
3147 table->validflag |= (1<<i);
3148 break;
3149 }
3150 }
3151 }
3152
3153 return 0;
3154}
3155
3156int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
3157{
3158 int result;
3159 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
3160 pp_atomctrl_mc_reg_table *table;
3161 struct tonga_mc_reg_table *ni_table = &smu_data->mc_reg_table;
3162 uint8_t module_index = tonga_get_memory_modile_index(hwmgr);
3163
3164 table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
3165
3166 if (table == NULL)
3167 return -ENOMEM;
3168
3169 /* Program additional LP registers that are no longer programmed by VBIOS */
3170 cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
3171 cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
3172 cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
3173 cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
3174 cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP,
3175 cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
3176 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP,
3177 cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
3178 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP,
3179 cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
3180 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP,
3181 cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
3182 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP,
3183 cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
3184 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP,
3185 cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
3186 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP,
3187 cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
3188 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
3189 cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
3190 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP,
3191 cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
3192 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP,
3193 cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
3194 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP,
3195 cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
3196 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP,
3197 cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
3198 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
3199 cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
3200 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
3201 cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
3202 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
3203 cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
3204 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
3205 cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
3206 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP,
3207 cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
3208 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP,
3209 cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
3210
3211 memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
3212
3213 result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
3214
3215 if (!result)
3216 result = tonga_copy_vbios_smc_reg_table(table, ni_table);
3217
3218 if (!result) {
3219 tonga_set_s0_mc_reg_index(ni_table);
3220 result = tonga_set_mc_special_registers(hwmgr, ni_table);
3221 }
3222
3223 if (!result)
3224 tonga_set_valid_flag(ni_table);
3225
3226 kfree(table);
3227
3228 return result;
3229}
3230
3231bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
3232{
3233 return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
3234 CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
3235 ? true : false;
3236}
3237
3238int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
3239 struct amd_pp_profile *request)
3240{
3241 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)
3242 (hwmgr->smu_backend);
3243 struct SMU72_Discrete_GraphicsLevel *levels =
3244 smu_data->smc_state_table.GraphicsLevel;
3245 uint32_t array = smu_data->smu7_data.dpm_table_start +
3246 offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
3247 uint32_t array_size = sizeof(struct SMU72_Discrete_GraphicsLevel) *
3248 SMU72_MAX_LEVELS_GRAPHICS;
3249 uint32_t i;
3250
3251 for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
3252 levels[i].ActivityLevel =
3253 cpu_to_be16(request->activity_threshold);
3254 levels[i].EnabledForActivity = 1;
3255 levels[i].UpHyst = request->up_hyst;
3256 levels[i].DownHyst = request->down_hyst;
3257 }
3258
3259 return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
3260 array_size, SMC_RAM_END);
3261}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h
deleted file mode 100644
index 9d6a78a65976..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef _TONGA_SMC_H
24#define _TONGA_SMC_H
25
26#include "smumgr.h"
27#include "smu72.h"
28
29
30#define ASICID_IS_TONGA_P(wDID, bRID) \
31 (((wDID == 0x6930) && ((bRID == 0xF0) || (bRID == 0xF1) || (bRID == 0xFF))) \
32 || ((wDID == 0x6920) && ((bRID == 0) || (bRID == 1))))
33
34
35struct tonga_pt_defaults {
36 uint8_t svi_load_line_en;
37 uint8_t svi_load_line_vddC;
38 uint8_t tdc_vddc_throttle_release_limit_perc;
39 uint8_t tdc_mawt;
40 uint8_t tdc_waterfall_ctl;
41 uint8_t dte_ambient_temp_base;
42 uint32_t display_cac;
43 uint32_t bapm_temp_gradient;
44 uint16_t bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
45 uint16_t bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
46};
47
48int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
49int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
50int tonga_init_smc_table(struct pp_hwmgr *hwmgr);
51int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
52int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
53int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr);
54uint32_t tonga_get_offsetof(uint32_t type, uint32_t member);
55uint32_t tonga_get_mac_definition(uint32_t value);
56int tonga_process_firmware_header(struct pp_hwmgr *hwmgr);
57int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
58bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr);
59int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
60 struct amd_pp_profile *request);
61#endif
62
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index d22cf218cf18..0a8e48bff219 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -33,9 +33,69 @@
33#include "smu/smu_7_1_2_d.h" 33#include "smu/smu_7_1_2_d.h"
34#include "smu/smu_7_1_2_sh_mask.h" 34#include "smu/smu_7_1_2_sh_mask.h"
35#include "cgs_common.h" 35#include "cgs_common.h"
36#include "tonga_smc.h"
37#include "smu7_smumgr.h" 36#include "smu7_smumgr.h"
38 37
38#include "smu7_dyn_defaults.h"
39
40#include "smu7_hwmgr.h"
41#include "hardwaremanager.h"
42#include "ppatomctrl.h"
43
44#include "atombios.h"
45
46#include "pppcielanes.h"
47#include "pp_endian.h"
48
49#include "gmc/gmc_8_1_d.h"
50#include "gmc/gmc_8_1_sh_mask.h"
51
52#include "bif/bif_5_0_d.h"
53#include "bif/bif_5_0_sh_mask.h"
54
55#include "dce/dce_10_0_d.h"
56#include "dce/dce_10_0_sh_mask.h"
57
58
59#define VOLTAGE_SCALE 4
60#define POWERTUNE_DEFAULT_SET_MAX 1
61#define VOLTAGE_VID_OFFSET_SCALE1 625
62#define VOLTAGE_VID_OFFSET_SCALE2 100
63#define MC_CG_ARB_FREQ_F1 0x0b
64#define VDDC_VDDCI_DELTA 200
65
66
67static const struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
68/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
69 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
70 */
71 {1, 0xF, 0xFD, 0x19,
72 5, 45, 0, 0xB0000,
73 {0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8,
74 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
75 {0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203,
76 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4}
77 },
78};
79
80/* [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */
81static const uint16_t tonga_clock_stretcher_lookup_table[2][4] = {
82 {600, 1050, 3, 0},
83 {600, 1050, 6, 1}
84};
85
86/* [FF, SS] type, [] 4 voltage ranges,
87 * and [Floor Freq, Boundary Freq, VID min , VID max]
88 */
89static const uint32_t tonga_clock_stretcher_ddt_table[2][4][4] = {
90 { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
91 { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} }
92};
93
94/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] */
95static const uint8_t tonga_clock_stretch_amount_conversion[2][6] = {
96 {0, 1, 3, 2, 4, 5},
97 {0, 2, 4, 5, 6, 5}
98};
39 99
40static int tonga_start_in_protection_mode(struct pp_hwmgr *hwmgr) 100static int tonga_start_in_protection_mode(struct pp_hwmgr *hwmgr)
41{ 101{
@@ -95,7 +155,6 @@ static int tonga_start_in_protection_mode(struct pp_hwmgr *hwmgr)
95 return 0; 155 return 0;
96} 156}
97 157
98
99static int tonga_start_in_non_protection_mode(struct pp_hwmgr *hwmgr) 158static int tonga_start_in_non_protection_mode(struct pp_hwmgr *hwmgr)
100{ 159{
101 int result = 0; 160 int result = 0;
@@ -160,13 +219,6 @@ static int tonga_start_smu(struct pp_hwmgr *hwmgr)
160 return result; 219 return result;
161} 220}
162 221
163/**
164 * Write a 32bit value to the SMC SRAM space.
165 * ALL PARAMETERS ARE IN HOST BYTE ORDER.
166 * @param smumgr the address of the powerplay hardware manager.
167 * @param smcAddress the address in the SMC RAM to access.
168 * @param value to write to the SMC SRAM.
169 */
170static int tonga_smu_init(struct pp_hwmgr *hwmgr) 222static int tonga_smu_init(struct pp_hwmgr *hwmgr)
171{ 223{
172 struct tonga_smumgr *tonga_priv = NULL; 224 struct tonga_smumgr *tonga_priv = NULL;
@@ -187,6 +239,3053 @@ static int tonga_smu_init(struct pp_hwmgr *hwmgr)
187 return 0; 239 return 0;
188} 240}
189 241
242
243static int tonga_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
244 phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table,
245 uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
246{
247 uint32_t i = 0;
248 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
249 struct phm_ppt_v1_information *pptable_info =
250 (struct phm_ppt_v1_information *)(hwmgr->pptable);
251
252 /* clock - voltage dependency table is empty table */
253 if (allowed_clock_voltage_table->count == 0)
254 return -EINVAL;
255
256 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
257 /* find first sclk bigger than request */
258 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
259 voltage->VddGfx = phm_get_voltage_index(
260 pptable_info->vddgfx_lookup_table,
261 allowed_clock_voltage_table->entries[i].vddgfx);
262 voltage->Vddc = phm_get_voltage_index(
263 pptable_info->vddc_lookup_table,
264 allowed_clock_voltage_table->entries[i].vddc);
265
266 if (allowed_clock_voltage_table->entries[i].vddci)
267 voltage->Vddci =
268 phm_get_voltage_id(&data->vddci_voltage_table, allowed_clock_voltage_table->entries[i].vddci);
269 else
270 voltage->Vddci =
271 phm_get_voltage_id(&data->vddci_voltage_table,
272 allowed_clock_voltage_table->entries[i].vddc - VDDC_VDDCI_DELTA);
273
274
275 if (allowed_clock_voltage_table->entries[i].mvdd)
276 *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd;
277
278 voltage->Phases = 1;
279 return 0;
280 }
281 }
282
283 /* sclk is bigger than max sclk in the dependence table */
284 voltage->VddGfx = phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
285 allowed_clock_voltage_table->entries[i-1].vddgfx);
286 voltage->Vddc = phm_get_voltage_index(pptable_info->vddc_lookup_table,
287 allowed_clock_voltage_table->entries[i-1].vddc);
288
289 if (allowed_clock_voltage_table->entries[i-1].vddci)
290 voltage->Vddci = phm_get_voltage_id(&data->vddci_voltage_table,
291 allowed_clock_voltage_table->entries[i-1].vddci);
292
293 if (allowed_clock_voltage_table->entries[i-1].mvdd)
294 *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd;
295
296 return 0;
297}
298
299static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
300 SMU72_Discrete_DpmTable *table)
301{
302 unsigned int count;
303 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
304
305 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
306 table->VddcLevelCount = data->vddc_voltage_table.count;
307 for (count = 0; count < table->VddcLevelCount; count++) {
308 table->VddcTable[count] =
309 PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE);
310 }
311 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
312 }
313 return 0;
314}
315
316static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr,
317 SMU72_Discrete_DpmTable *table)
318{
319 unsigned int count;
320 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
321
322 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
323 table->VddGfxLevelCount = data->vddgfx_voltage_table.count;
324 for (count = 0; count < data->vddgfx_voltage_table.count; count++) {
325 table->VddGfxTable[count] =
326 PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE);
327 }
328 CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount);
329 }
330 return 0;
331}
332
333static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
334 SMU72_Discrete_DpmTable *table)
335{
336 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
337 uint32_t count;
338
339 table->VddciLevelCount = data->vddci_voltage_table.count;
340 for (count = 0; count < table->VddciLevelCount; count++) {
341 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
342 table->VddciTable[count] =
343 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
344 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
345 table->SmioTable1.Pattern[count].Voltage =
346 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
347 /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */
348 table->SmioTable1.Pattern[count].Smio =
349 (uint8_t) count;
350 table->Smio[count] |=
351 data->vddci_voltage_table.entries[count].smio_low;
352 table->VddciTable[count] =
353 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
354 }
355 }
356
357 table->SmioMask1 = data->vddci_voltage_table.mask_low;
358 CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
359
360 return 0;
361}
362
363static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
364 SMU72_Discrete_DpmTable *table)
365{
366 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
367 uint32_t count;
368
369 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
370 table->MvddLevelCount = data->mvdd_voltage_table.count;
371 for (count = 0; count < table->MvddLevelCount; count++) {
372 table->SmioTable2.Pattern[count].Voltage =
373 PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
374 /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
375 table->SmioTable2.Pattern[count].Smio =
376 (uint8_t) count;
377 table->Smio[count] |=
378 data->mvdd_voltage_table.entries[count].smio_low;
379 }
380 table->SmioMask2 = data->mvdd_voltage_table.mask_low;
381
382 CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
383 }
384
385 return 0;
386}
387
388static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
389 SMU72_Discrete_DpmTable *table)
390{
391 uint32_t count;
392 uint8_t index = 0;
393 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
394 struct phm_ppt_v1_information *pptable_info =
395 (struct phm_ppt_v1_information *)(hwmgr->pptable);
396 struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table =
397 pptable_info->vddgfx_lookup_table;
398 struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table =
399 pptable_info->vddc_lookup_table;
400
401 /* table is already swapped, so in order to use the value from it
402 * we need to swap it back.
403 */
404 uint32_t vddc_level_count = PP_SMC_TO_HOST_UL(table->VddcLevelCount);
405 uint32_t vddgfx_level_count = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount);
406
407 for (count = 0; count < vddc_level_count; count++) {
408 /* We are populating vddc CAC data to BapmVddc table in split and merged mode */
409 index = phm_get_voltage_index(vddc_lookup_table,
410 data->vddc_voltage_table.entries[count].value);
411 table->BapmVddcVidLoSidd[count] =
412 convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
413 table->BapmVddcVidHiSidd[count] =
414 convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
415 table->BapmVddcVidHiSidd2[count] =
416 convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
417 }
418
419 if ((data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2)) {
420 /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */
421 for (count = 0; count < vddgfx_level_count; count++) {
422 index = phm_get_voltage_index(vddgfx_lookup_table,
423 convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid));
424 table->BapmVddGfxVidHiSidd2[count] =
425 convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high);
426 }
427 } else {
428 for (count = 0; count < vddc_level_count; count++) {
429 index = phm_get_voltage_index(vddc_lookup_table,
430 data->vddc_voltage_table.entries[count].value);
431 table->BapmVddGfxVidLoSidd[count] =
432 convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
433 table->BapmVddGfxVidHiSidd[count] =
434 convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
435 table->BapmVddGfxVidHiSidd2[count] =
436 convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
437 }
438 }
439
440 return 0;
441}
442
443static int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
444 SMU72_Discrete_DpmTable *table)
445{
446 int result;
447
448 result = tonga_populate_smc_vddc_table(hwmgr, table);
449 PP_ASSERT_WITH_CODE(!result,
450 "can not populate VDDC voltage table to SMC",
451 return -EINVAL);
452
453 result = tonga_populate_smc_vdd_ci_table(hwmgr, table);
454 PP_ASSERT_WITH_CODE(!result,
455 "can not populate VDDCI voltage table to SMC",
456 return -EINVAL);
457
458 result = tonga_populate_smc_vdd_gfx_table(hwmgr, table);
459 PP_ASSERT_WITH_CODE(!result,
460 "can not populate VDDGFX voltage table to SMC",
461 return -EINVAL);
462
463 result = tonga_populate_smc_mvdd_table(hwmgr, table);
464 PP_ASSERT_WITH_CODE(!result,
465 "can not populate MVDD voltage table to SMC",
466 return -EINVAL);
467
468 result = tonga_populate_cac_tables(hwmgr, table);
469 PP_ASSERT_WITH_CODE(!result,
470 "can not populate CAC voltage tables to SMC",
471 return -EINVAL);
472
473 return 0;
474}
475
476static int tonga_populate_ulv_level(struct pp_hwmgr *hwmgr,
477 struct SMU72_Discrete_Ulv *state)
478{
479 struct phm_ppt_v1_information *table_info =
480 (struct phm_ppt_v1_information *)(hwmgr->pptable);
481
482 state->CcPwrDynRm = 0;
483 state->CcPwrDynRm1 = 0;
484
485 state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
486 state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
487 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
488
489 state->VddcPhase = 1;
490
491 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
492 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
493 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
494
495 return 0;
496}
497
498static int tonga_populate_ulv_state(struct pp_hwmgr *hwmgr,
499 struct SMU72_Discrete_DpmTable *table)
500{
501 return tonga_populate_ulv_level(hwmgr, &table->Ulv);
502}
503
504static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table)
505{
506 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
507 struct smu7_dpm_table *dpm_table = &data->dpm_table;
508 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
509 uint32_t i;
510
511 /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
512 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
513 table->LinkLevel[i].PcieGenSpeed =
514 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
515 table->LinkLevel[i].PcieLaneCount =
516 (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
517 table->LinkLevel[i].EnabledForActivity =
518 1;
519 table->LinkLevel[i].SPC =
520 (uint8_t)(data->pcie_spc_cap & 0xff);
521 table->LinkLevel[i].DownThreshold =
522 PP_HOST_TO_SMC_UL(5);
523 table->LinkLevel[i].UpThreshold =
524 PP_HOST_TO_SMC_UL(30);
525 }
526
527 smu_data->smc_state_table.LinkLevelCount =
528 (uint8_t)dpm_table->pcie_speed_table.count;
529 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
530 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
531
532 return 0;
533}
534
535static int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
536 uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk)
537{
538 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
539 pp_atomctrl_clock_dividers_vi dividers;
540 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
541 uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
542 uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
543 uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
544 uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
545 uint32_t reference_clock;
546 uint32_t reference_divider;
547 uint32_t fbdiv;
548 int result;
549
550 /* get the engine clock dividers for this clock value*/
551 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
552
553 PP_ASSERT_WITH_CODE(result == 0,
554 "Error retrieving Engine Clock dividers from VBIOS.", return result);
555
556 /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
557 reference_clock = atomctrl_get_reference_clock(hwmgr);
558
559 reference_divider = 1 + dividers.uc_pll_ref_div;
560
561 /* low 14 bits is fraction and high 12 bits is divider*/
562 fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
563
564 /* SPLL_FUNC_CNTL setup*/
565 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
566 CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
567 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
568 CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
569
570 /* SPLL_FUNC_CNTL_3 setup*/
571 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
572 CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
573
574 /* set to use fractional accumulation*/
575 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
576 CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
577
578 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
579 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
580 pp_atomctrl_internal_ss_info ss_info;
581
582 uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
583 if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
584 /*
585 * ss_info.speed_spectrum_percentage -- in unit of 0.01%
586 * ss_info.speed_spectrum_rate -- in unit of khz
587 */
588 /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
589 uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
590
591 /* clkv = 2 * D * fbdiv / NS */
592 uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
593
594 cg_spll_spread_spectrum =
595 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
596 cg_spll_spread_spectrum =
597 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
598 cg_spll_spread_spectrum_2 =
599 PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
600 }
601 }
602
603 sclk->SclkFrequency = engine_clock;
604 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
605 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
606 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
607 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
608 sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
609
610 return 0;
611}
612
613static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
614 uint32_t engine_clock,
615 uint16_t sclk_activity_level_threshold,
616 SMU72_Discrete_GraphicsLevel *graphic_level)
617{
618 int result;
619 uint32_t mvdd;
620 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
621 struct phm_ppt_v1_information *pptable_info =
622 (struct phm_ppt_v1_information *)(hwmgr->pptable);
623
624 result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
625
626 /* populate graphics levels*/
627 result = tonga_get_dependency_volt_by_clk(hwmgr,
628 pptable_info->vdd_dep_on_sclk, engine_clock,
629 &graphic_level->MinVoltage, &mvdd);
630 PP_ASSERT_WITH_CODE((!result),
631 "can not find VDDC voltage value for VDDC "
632 "engine clock dependency table", return result);
633
634 /* SCLK frequency in units of 10KHz*/
635 graphic_level->SclkFrequency = engine_clock;
636 /* Indicates maximum activity level for this performance level. 50% for now*/
637 graphic_level->ActivityLevel = sclk_activity_level_threshold;
638
639 graphic_level->CcPwrDynRm = 0;
640 graphic_level->CcPwrDynRm1 = 0;
641 /* this level can be used if activity is high enough.*/
642 graphic_level->EnabledForActivity = 0;
643 /* this level can be used for throttling.*/
644 graphic_level->EnabledForThrottle = 1;
645 graphic_level->UpHyst = 0;
646 graphic_level->DownHyst = 0;
647 graphic_level->VoltageDownHyst = 0;
648 graphic_level->PowerThrottle = 0;
649
650 data->display_timing.min_clock_in_sr =
651 hwmgr->display_config.min_core_set_clock_in_sr;
652
653 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
654 PHM_PlatformCaps_SclkDeepSleep))
655 graphic_level->DeepSleepDivId =
656 smu7_get_sleep_divider_id_from_clock(engine_clock,
657 data->display_timing.min_clock_in_sr);
658
659 /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
660 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
661
662 if (!result) {
663 /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
664 /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/
665 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
666 CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
667 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
668 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
669 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
670 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
671 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
672 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
673 }
674
675 return result;
676}
677
678static int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
679{
680 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
681 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
682 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
683 struct smu7_dpm_table *dpm_table = &data->dpm_table;
684 struct phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
685 uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count;
686 uint32_t level_array_address = smu_data->smu7_data.dpm_table_start +
687 offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
688
689 uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) *
690 SMU72_MAX_LEVELS_GRAPHICS;
691
692 SMU72_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
693
694 uint32_t i, max_entry;
695 uint8_t highest_pcie_level_enabled = 0;
696 uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
697 uint8_t count = 0;
698 int result = 0;
699
700 memset(levels, 0x00, level_array_size);
701
702 for (i = 0; i < dpm_table->sclk_table.count; i++) {
703 result = tonga_populate_single_graphic_level(hwmgr,
704 dpm_table->sclk_table.dpm_levels[i].value,
705 (uint16_t)smu_data->activity_target[i],
706 &(smu_data->smc_state_table.GraphicsLevel[i]));
707 if (result != 0)
708 return result;
709
710 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
711 if (i > 1)
712 smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
713 }
714
715 /* Only enable level 0 for now. */
716 smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
717
718 /* set highest level watermark to high */
719 if (dpm_table->sclk_table.count > 1)
720 smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
721 PPSMC_DISPLAY_WATERMARK_HIGH;
722
723 smu_data->smc_state_table.GraphicsDpmLevelCount =
724 (uint8_t)dpm_table->sclk_table.count;
725 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
726 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
727
728 if (pcie_table != NULL) {
729 PP_ASSERT_WITH_CODE((pcie_entry_count >= 1),
730 "There must be 1 or more PCIE levels defined in PPTable.",
731 return -EINVAL);
732 max_entry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/
733 for (i = 0; i < dpm_table->sclk_table.count; i++) {
734 smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel =
735 (uint8_t) ((i < max_entry) ? i : max_entry);
736 }
737 } else {
738 if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask)
739 pr_err("Pcie Dpm Enablemask is 0 !");
740
741 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
742 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
743 (1<<(highest_pcie_level_enabled+1))) != 0)) {
744 highest_pcie_level_enabled++;
745 }
746
747 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
748 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
749 (1<<lowest_pcie_level_enabled)) == 0)) {
750 lowest_pcie_level_enabled++;
751 }
752
753 while ((count < highest_pcie_level_enabled) &&
754 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
755 (1<<(lowest_pcie_level_enabled+1+count))) == 0)) {
756 count++;
757 }
758 mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
759 (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
760
761
762 /* set pcieDpmLevel to highest_pcie_level_enabled*/
763 for (i = 2; i < dpm_table->sclk_table.count; i++)
764 smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
765
766 /* set pcieDpmLevel to lowest_pcie_level_enabled*/
767 smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
768
769 /* set pcieDpmLevel to mid_pcie_level_enabled*/
770 smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
771 }
772 /* level count will send to smc once at init smc table and never change*/
773 result = smu7_copy_bytes_to_smc(hwmgr, level_array_address,
774 (uint8_t *)levels, (uint32_t)level_array_size,
775 SMC_RAM_END);
776
777 return result;
778}
779
780static int tonga_calculate_mclk_params(
781 struct pp_hwmgr *hwmgr,
782 uint32_t memory_clock,
783 SMU72_Discrete_MemoryLevel *mclk,
784 bool strobe_mode,
785 bool dllStateOn
786 )
787{
788 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
789
790 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
791 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
792 uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
793 uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
794 uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
795 uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
796 uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
797 uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
798 uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
799
800 pp_atomctrl_memory_clock_param mpll_param;
801 int result;
802
803 result = atomctrl_get_memory_pll_dividers_si(hwmgr,
804 memory_clock, &mpll_param, strobe_mode);
805 PP_ASSERT_WITH_CODE(
806 !result,
807 "Error retrieving Memory Clock Parameters from VBIOS.",
808 return result);
809
810 /* MPLL_FUNC_CNTL setup*/
811 mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL,
812 mpll_param.bw_ctrl);
813
814 /* MPLL_FUNC_CNTL_1 setup*/
815 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
816 MPLL_FUNC_CNTL_1, CLKF,
817 mpll_param.mpll_fb_divider.cl_kf);
818 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
819 MPLL_FUNC_CNTL_1, CLKFRAC,
820 mpll_param.mpll_fb_divider.clk_frac);
821 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
822 MPLL_FUNC_CNTL_1, VCO_MODE,
823 mpll_param.vco_mode);
824
825 /* MPLL_AD_FUNC_CNTL setup*/
826 mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
827 MPLL_AD_FUNC_CNTL, YCLK_POST_DIV,
828 mpll_param.mpll_post_divider);
829
830 if (data->is_memory_gddr5) {
831 /* MPLL_DQ_FUNC_CNTL setup*/
832 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
833 MPLL_DQ_FUNC_CNTL, YCLK_SEL,
834 mpll_param.yclk_sel);
835 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
836 MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV,
837 mpll_param.mpll_post_divider);
838 }
839
840 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
841 PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
842 /*
843 ************************************
844 Fref = Reference Frequency
845 NF = Feedback divider ratio
846 NR = Reference divider ratio
847 Fnom = Nominal VCO output frequency = Fref * NF / NR
848 Fs = Spreading Rate
849 D = Percentage down-spread / 2
850 Fint = Reference input frequency to PFD = Fref / NR
851 NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
852 CLKS = NS - 1 = ISS_STEP_NUM[11:0]
853 NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
854 CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
855 *************************************
856 */
857 pp_atomctrl_internal_ss_info ss_info;
858 uint32_t freq_nom;
859 uint32_t tmp;
860 uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
861
862 /* for GDDR5 for all modes and DDR3 */
863 if (1 == mpll_param.qdr)
864 freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
865 else
866 freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
867
868 /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
869 tmp = (freq_nom / reference_clock);
870 tmp = tmp * tmp;
871
872 if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
873 /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
874 /* ss.Info.speed_spectrum_rate -- in unit of khz */
875 /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
876 /* = reference_clock * 5 / speed_spectrum_rate */
877 uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
878
879 /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
880 /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
881 uint32_t clkv =
882 (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
883 ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
884
885 mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
886 mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
887 }
888 }
889
890 /* MCLK_PWRMGT_CNTL setup */
891 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
892 MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
893 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
894 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
895 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
896 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
897
898 /* Save the result data to outpupt memory level structure */
899 mclk->MclkFrequency = memory_clock;
900 mclk->MpllFuncCntl = mpll_func_cntl;
901 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
902 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
903 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
904 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
905 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
906 mclk->DllCntl = dll_cntl;
907 mclk->MpllSs1 = mpll_ss1;
908 mclk->MpllSs2 = mpll_ss2;
909
910 return 0;
911}
912
913static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock,
914 bool strobe_mode)
915{
916 uint8_t mc_para_index;
917
918 if (strobe_mode) {
919 if (memory_clock < 12500)
920 mc_para_index = 0x00;
921 else if (memory_clock > 47500)
922 mc_para_index = 0x0f;
923 else
924 mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
925 } else {
926 if (memory_clock < 65000)
927 mc_para_index = 0x00;
928 else if (memory_clock > 135000)
929 mc_para_index = 0x0f;
930 else
931 mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
932 }
933
934 return mc_para_index;
935}
936
937static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
938{
939 uint8_t mc_para_index;
940
941 if (memory_clock < 10000)
942 mc_para_index = 0;
943 else if (memory_clock >= 80000)
944 mc_para_index = 0x0f;
945 else
946 mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
947
948 return mc_para_index;
949}
950
951
952static int tonga_populate_single_memory_level(
953 struct pp_hwmgr *hwmgr,
954 uint32_t memory_clock,
955 SMU72_Discrete_MemoryLevel *memory_level
956 )
957{
958 uint32_t mvdd = 0;
959 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
960 struct phm_ppt_v1_information *pptable_info =
961 (struct phm_ppt_v1_information *)(hwmgr->pptable);
962 int result = 0;
963 bool dll_state_on;
964 struct cgs_display_info info = {0};
965 uint32_t mclk_edc_wr_enable_threshold = 40000;
966 uint32_t mclk_stutter_mode_threshold = 30000;
967 uint32_t mclk_edc_enable_threshold = 40000;
968 uint32_t mclk_strobe_mode_threshold = 40000;
969
970 if (NULL != pptable_info->vdd_dep_on_mclk) {
971 result = tonga_get_dependency_volt_by_clk(hwmgr,
972 pptable_info->vdd_dep_on_mclk,
973 memory_clock,
974 &memory_level->MinVoltage, &mvdd);
975 PP_ASSERT_WITH_CODE(
976 !result,
977 "can not find MinVddc voltage value from memory VDDC "
978 "voltage dependency table",
979 return result);
980 }
981
982 if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
983 memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value;
984 else
985 memory_level->MinMvdd = mvdd;
986
987 memory_level->EnabledForThrottle = 1;
988 memory_level->EnabledForActivity = 0;
989 memory_level->UpHyst = 0;
990 memory_level->DownHyst = 100;
991 memory_level->VoltageDownHyst = 0;
992
993 /* Indicates maximum activity level for this performance level.*/
994 memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
995 memory_level->StutterEnable = 0;
996 memory_level->StrobeEnable = 0;
997 memory_level->EdcReadEnable = 0;
998 memory_level->EdcWriteEnable = 0;
999 memory_level->RttEnable = 0;
1000
1001 /* default set to low watermark. Highest level will be set to high later.*/
1002 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1003
1004 cgs_get_active_displays_info(hwmgr->device, &info);
1005 data->display_timing.num_existing_displays = info.display_count;
1006
1007 if ((mclk_stutter_mode_threshold != 0) &&
1008 (memory_clock <= mclk_stutter_mode_threshold) &&
1009 (!data->is_uvd_enabled)
1010 && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
1011 && (data->display_timing.num_existing_displays <= 2)
1012 && (data->display_timing.num_existing_displays != 0))
1013 memory_level->StutterEnable = 1;
1014
1015 /* decide strobe mode*/
1016 memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
1017 (memory_clock <= mclk_strobe_mode_threshold);
1018
1019 /* decide EDC mode and memory clock ratio*/
1020 if (data->is_memory_gddr5) {
1021 memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock,
1022 memory_level->StrobeEnable);
1023
1024 if ((mclk_edc_enable_threshold != 0) &&
1025 (memory_clock > mclk_edc_enable_threshold)) {
1026 memory_level->EdcReadEnable = 1;
1027 }
1028
1029 if ((mclk_edc_wr_enable_threshold != 0) &&
1030 (memory_clock > mclk_edc_wr_enable_threshold)) {
1031 memory_level->EdcWriteEnable = 1;
1032 }
1033
1034 if (memory_level->StrobeEnable) {
1035 if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >=
1036 ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
1037 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1038 } else {
1039 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
1040 }
1041
1042 } else {
1043 dll_state_on = data->dll_default_on;
1044 }
1045 } else {
1046 memory_level->StrobeRatio =
1047 tonga_get_ddr3_mclk_frequency_ratio(memory_clock);
1048 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1049 }
1050
1051 result = tonga_calculate_mclk_params(hwmgr,
1052 memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
1053
1054 if (!result) {
1055 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd);
1056 /* MCLK frequency in units of 10KHz*/
1057 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
1058 /* Indicates maximum activity level for this performance level.*/
1059 CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
1060 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
1061 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
1062 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
1063 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
1064 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
1065 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
1066 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
1067 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
1068 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
1069 }
1070
1071 return result;
1072}
1073
1074int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1075{
1076 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1077 struct tonga_smumgr *smu_data =
1078 (struct tonga_smumgr *)(hwmgr->smu_backend);
1079 struct smu7_dpm_table *dpm_table = &data->dpm_table;
1080 int result;
1081
1082 /* populate MCLK dpm table to SMU7 */
1083 uint32_t level_array_address =
1084 smu_data->smu7_data.dpm_table_start +
1085 offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
1086 uint32_t level_array_size =
1087 sizeof(SMU72_Discrete_MemoryLevel) *
1088 SMU72_MAX_LEVELS_MEMORY;
1089 SMU72_Discrete_MemoryLevel *levels =
1090 smu_data->smc_state_table.MemoryLevel;
1091 uint32_t i;
1092
1093 memset(levels, 0x00, level_array_size);
1094
1095 for (i = 0; i < dpm_table->mclk_table.count; i++) {
1096 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
1097 "can not populate memory level as memory clock is zero",
1098 return -EINVAL);
1099 result = tonga_populate_single_memory_level(
1100 hwmgr,
1101 dpm_table->mclk_table.dpm_levels[i].value,
1102 &(smu_data->smc_state_table.MemoryLevel[i]));
1103 if (result)
1104 return result;
1105 }
1106
1107 /* Only enable level 0 for now.*/
1108 smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
1109
1110 /*
1111 * in order to prevent MC activity from stutter mode to push DPM up.
1112 * the UVD change complements this by putting the MCLK in a higher state
1113 * by default such that we are not effected by up threshold or and MCLK DPM latency.
1114 */
1115 smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
1116 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
1117
1118 smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
1119 data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1120 /* set highest level watermark to high*/
1121 smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
1122
1123 /* level count will send to smc once at init smc table and never change*/
1124 result = smu7_copy_bytes_to_smc(hwmgr,
1125 level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
1126 SMC_RAM_END);
1127
1128 return result;
1129}
1130
1131static int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr,
1132 uint32_t mclk, SMIO_Pattern *smio_pattern)
1133{
1134 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1135 struct phm_ppt_v1_information *table_info =
1136 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1137 uint32_t i = 0;
1138
1139 if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
1140 /* find mvdd value which clock is more than request */
1141 for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
1142 if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
1143 /* Always round to higher voltage. */
1144 smio_pattern->Voltage =
1145 data->mvdd_voltage_table.entries[i].value;
1146 break;
1147 }
1148 }
1149
1150 PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
1151 "MVDD Voltage is outside the supported range.",
1152 return -EINVAL);
1153 } else {
1154 return -EINVAL;
1155 }
1156
1157 return 0;
1158}
1159
1160
1161static int tonga_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1162 SMU72_Discrete_DpmTable *table)
1163{
1164 int result = 0;
1165 struct tonga_smumgr *smu_data =
1166 (struct tonga_smumgr *)(hwmgr->smu_backend);
1167 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1168 struct pp_atomctrl_clock_dividers_vi dividers;
1169
1170 SMIO_Pattern voltage_level;
1171 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
1172 uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
1173 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1174 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1175
1176 /* The ACPI state should not do DPM on DC (or ever).*/
1177 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1178
1179 table->ACPILevel.MinVoltage =
1180 smu_data->smc_state_table.GraphicsLevel[0].MinVoltage;
1181
1182 /* assign zero for now*/
1183 table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
1184
1185 /* get the engine clock dividers for this clock value*/
1186 result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
1187 table->ACPILevel.SclkFrequency, &dividers);
1188
1189 PP_ASSERT_WITH_CODE(result == 0,
1190 "Error retrieving Engine Clock dividers from VBIOS.",
1191 return result);
1192
1193 /* divider ID for required SCLK*/
1194 table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
1195 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1196 table->ACPILevel.DeepSleepDivId = 0;
1197
1198 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
1199 SPLL_PWRON, 0);
1200 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
1201 SPLL_RESET, 1);
1202 spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
1203 SCLK_MUX_SEL, 4);
1204
1205 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
1206 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
1207 table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
1208 table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
1209 table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
1210 table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
1211 table->ACPILevel.CcPwrDynRm = 0;
1212 table->ACPILevel.CcPwrDynRm1 = 0;
1213
1214
1215 /* For various features to be enabled/disabled while this level is active.*/
1216 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1217 /* SCLK frequency in units of 10KHz*/
1218 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
1219 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
1220 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
1221 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
1222 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
1223 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
1224 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
1225 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1226 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1227
1228 /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
1229 table->MemoryACPILevel.MinVoltage =
1230 smu_data->smc_state_table.MemoryLevel[0].MinVoltage;
1231
1232 /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
1233
1234 if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level))
1235 table->MemoryACPILevel.MinMvdd =
1236 PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
1237 else
1238 table->MemoryACPILevel.MinMvdd = 0;
1239
1240 /* Force reset on DLL*/
1241 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1242 MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
1243 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1244 MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
1245
1246 /* Disable DLL in ACPIState*/
1247 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1248 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
1249 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1250 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
1251
1252 /* Enable DLL bypass signal*/
1253 dll_cntl = PHM_SET_FIELD(dll_cntl,
1254 DLL_CNTL, MRDCK0_BYPASS, 0);
1255 dll_cntl = PHM_SET_FIELD(dll_cntl,
1256 DLL_CNTL, MRDCK1_BYPASS, 0);
1257
1258 table->MemoryACPILevel.DllCntl =
1259 PP_HOST_TO_SMC_UL(dll_cntl);
1260 table->MemoryACPILevel.MclkPwrmgtCntl =
1261 PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
1262 table->MemoryACPILevel.MpllAdFuncCntl =
1263 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
1264 table->MemoryACPILevel.MpllDqFuncCntl =
1265 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
1266 table->MemoryACPILevel.MpllFuncCntl =
1267 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
1268 table->MemoryACPILevel.MpllFuncCntl_1 =
1269 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
1270 table->MemoryACPILevel.MpllFuncCntl_2 =
1271 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
1272 table->MemoryACPILevel.MpllSs1 =
1273 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
1274 table->MemoryACPILevel.MpllSs2 =
1275 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
1276
1277 table->MemoryACPILevel.EnabledForThrottle = 0;
1278 table->MemoryACPILevel.EnabledForActivity = 0;
1279 table->MemoryACPILevel.UpHyst = 0;
1280 table->MemoryACPILevel.DownHyst = 100;
1281 table->MemoryACPILevel.VoltageDownHyst = 0;
1282 /* Indicates maximum activity level for this performance level.*/
1283 table->MemoryACPILevel.ActivityLevel =
1284 PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
1285
1286 table->MemoryACPILevel.StutterEnable = 0;
1287 table->MemoryACPILevel.StrobeEnable = 0;
1288 table->MemoryACPILevel.EdcReadEnable = 0;
1289 table->MemoryACPILevel.EdcWriteEnable = 0;
1290 table->MemoryACPILevel.RttEnable = 0;
1291
1292 return result;
1293}
1294
1295static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1296 SMU72_Discrete_DpmTable *table)
1297{
1298 int result = 0;
1299
1300 uint8_t count;
1301 pp_atomctrl_clock_dividers_vi dividers;
1302 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1303 struct phm_ppt_v1_information *pptable_info =
1304 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1305 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1306 pptable_info->mm_dep_table;
1307
1308 table->UvdLevelCount = (uint8_t) (mm_table->count);
1309 table->UvdBootLevel = 0;
1310
1311 for (count = 0; count < table->UvdLevelCount; count++) {
1312 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
1313 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
1314 table->UvdLevel[count].MinVoltage.Vddc =
1315 phm_get_voltage_index(pptable_info->vddc_lookup_table,
1316 mm_table->entries[count].vddc);
1317 table->UvdLevel[count].MinVoltage.VddGfx =
1318 (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
1319 phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
1320 mm_table->entries[count].vddgfx) : 0;
1321 table->UvdLevel[count].MinVoltage.Vddci =
1322 phm_get_voltage_id(&data->vddci_voltage_table,
1323 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1324 table->UvdLevel[count].MinVoltage.Phases = 1;
1325
1326 /* retrieve divider value for VBIOS */
1327 result = atomctrl_get_dfs_pll_dividers_vi(
1328 hwmgr,
1329 table->UvdLevel[count].VclkFrequency,
1330 &dividers);
1331
1332 PP_ASSERT_WITH_CODE((!result),
1333 "can not find divide id for Vclk clock",
1334 return result);
1335
1336 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1337
1338 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1339 table->UvdLevel[count].DclkFrequency, &dividers);
1340 PP_ASSERT_WITH_CODE((!result),
1341 "can not find divide id for Dclk clock",
1342 return result);
1343
1344 table->UvdLevel[count].DclkDivider =
1345 (uint8_t)dividers.pll_post_divider;
1346
1347 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1348 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1349 }
1350
1351 return result;
1352
1353}
1354
1355static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1356 SMU72_Discrete_DpmTable *table)
1357{
1358 int result = 0;
1359
1360 uint8_t count;
1361 pp_atomctrl_clock_dividers_vi dividers;
1362 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1363 struct phm_ppt_v1_information *pptable_info =
1364 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1365 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1366 pptable_info->mm_dep_table;
1367
1368 table->VceLevelCount = (uint8_t) (mm_table->count);
1369 table->VceBootLevel = 0;
1370
1371 for (count = 0; count < table->VceLevelCount; count++) {
1372 table->VceLevel[count].Frequency =
1373 mm_table->entries[count].eclk;
1374 table->VceLevel[count].MinVoltage.Vddc =
1375 phm_get_voltage_index(pptable_info->vddc_lookup_table,
1376 mm_table->entries[count].vddc);
1377 table->VceLevel[count].MinVoltage.VddGfx =
1378 (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
1379 phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
1380 mm_table->entries[count].vddgfx) : 0;
1381 table->VceLevel[count].MinVoltage.Vddci =
1382 phm_get_voltage_id(&data->vddci_voltage_table,
1383 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1384 table->VceLevel[count].MinVoltage.Phases = 1;
1385
1386 /* retrieve divider value for VBIOS */
1387 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1388 table->VceLevel[count].Frequency, &dividers);
1389 PP_ASSERT_WITH_CODE((!result),
1390 "can not find divide id for VCE engine clock",
1391 return result);
1392
1393 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1394
1395 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1396 }
1397
1398 return result;
1399}
1400
1401static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
1402 SMU72_Discrete_DpmTable *table)
1403{
1404 int result = 0;
1405 uint8_t count;
1406 pp_atomctrl_clock_dividers_vi dividers;
1407 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1408 struct phm_ppt_v1_information *pptable_info =
1409 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1410 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1411 pptable_info->mm_dep_table;
1412
1413 table->AcpLevelCount = (uint8_t) (mm_table->count);
1414 table->AcpBootLevel = 0;
1415
1416 for (count = 0; count < table->AcpLevelCount; count++) {
1417 table->AcpLevel[count].Frequency =
1418 pptable_info->mm_dep_table->entries[count].aclk;
1419 table->AcpLevel[count].MinVoltage.Vddc =
1420 phm_get_voltage_index(pptable_info->vddc_lookup_table,
1421 mm_table->entries[count].vddc);
1422 table->AcpLevel[count].MinVoltage.VddGfx =
1423 (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
1424 phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
1425 mm_table->entries[count].vddgfx) : 0;
1426 table->AcpLevel[count].MinVoltage.Vddci =
1427 phm_get_voltage_id(&data->vddci_voltage_table,
1428 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1429 table->AcpLevel[count].MinVoltage.Phases = 1;
1430
1431 /* retrieve divider value for VBIOS */
1432 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1433 table->AcpLevel[count].Frequency, &dividers);
1434 PP_ASSERT_WITH_CODE((!result),
1435 "can not find divide id for engine clock", return result);
1436
1437 table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1438
1439 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
1440 }
1441
1442 return result;
1443}
1444
1445static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1446 SMU72_Discrete_DpmTable *table)
1447{
1448 int result = 0;
1449 uint8_t count;
1450 pp_atomctrl_clock_dividers_vi dividers;
1451 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1452 struct phm_ppt_v1_information *pptable_info =
1453 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1454 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1455 pptable_info->mm_dep_table;
1456
1457 table->SamuBootLevel = 0;
1458 table->SamuLevelCount = (uint8_t) (mm_table->count);
1459
1460 for (count = 0; count < table->SamuLevelCount; count++) {
1461 /* not sure whether we need evclk or not */
1462 table->SamuLevel[count].Frequency =
1463 pptable_info->mm_dep_table->entries[count].samclock;
1464 table->SamuLevel[count].MinVoltage.Vddc =
1465 phm_get_voltage_index(pptable_info->vddc_lookup_table,
1466 mm_table->entries[count].vddc);
1467 table->SamuLevel[count].MinVoltage.VddGfx =
1468 (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
1469 phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
1470 mm_table->entries[count].vddgfx) : 0;
1471 table->SamuLevel[count].MinVoltage.Vddci =
1472 phm_get_voltage_id(&data->vddci_voltage_table,
1473 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1474 table->SamuLevel[count].MinVoltage.Phases = 1;
1475
1476 /* retrieve divider value for VBIOS */
1477 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1478 table->SamuLevel[count].Frequency, &dividers);
1479 PP_ASSERT_WITH_CODE((!result),
1480 "can not find divide id for samu clock", return result);
1481
1482 table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1483
1484 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
1485 }
1486
1487 return result;
1488}
1489
1490static int tonga_populate_memory_timing_parameters(
1491 struct pp_hwmgr *hwmgr,
1492 uint32_t engine_clock,
1493 uint32_t memory_clock,
1494 struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs
1495 )
1496{
1497 uint32_t dramTiming;
1498 uint32_t dramTiming2;
1499 uint32_t burstTime;
1500 int result;
1501
1502 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1503 engine_clock, memory_clock);
1504
1505 PP_ASSERT_WITH_CODE(result == 0,
1506 "Error calling VBIOS to set DRAM_TIMING.", return result);
1507
1508 dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1509 dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1510 burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1511
1512 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
1513 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
1514 arb_regs->McArbBurstTime = (uint8_t)burstTime;
1515
1516 return 0;
1517}
1518
1519static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1520{
1521 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1522 struct tonga_smumgr *smu_data =
1523 (struct tonga_smumgr *)(hwmgr->smu_backend);
1524 int result = 0;
1525 SMU72_Discrete_MCArbDramTimingTable arb_regs;
1526 uint32_t i, j;
1527
1528 memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable));
1529
1530 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1531 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1532 result = tonga_populate_memory_timing_parameters
1533 (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
1534 data->dpm_table.mclk_table.dpm_levels[j].value,
1535 &arb_regs.entries[i][j]);
1536
1537 if (result)
1538 break;
1539 }
1540 }
1541
1542 if (!result) {
1543 result = smu7_copy_bytes_to_smc(
1544 hwmgr,
1545 smu_data->smu7_data.arb_table_start,
1546 (uint8_t *)&arb_regs,
1547 sizeof(SMU72_Discrete_MCArbDramTimingTable),
1548 SMC_RAM_END
1549 );
1550 }
1551
1552 return result;
1553}
1554
1555static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1556 SMU72_Discrete_DpmTable *table)
1557{
1558 int result = 0;
1559 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1560 struct tonga_smumgr *smu_data =
1561 (struct tonga_smumgr *)(hwmgr->smu_backend);
1562 table->GraphicsBootLevel = 0;
1563 table->MemoryBootLevel = 0;
1564
1565 /* find boot level from dpm table*/
1566 result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1567 data->vbios_boot_state.sclk_bootup_value,
1568 (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
1569
1570 if (result != 0) {
1571 smu_data->smc_state_table.GraphicsBootLevel = 0;
1572 pr_err("[powerplay] VBIOS did not find boot engine "
1573 "clock value in dependency table. "
1574 "Using Graphics DPM level 0 !");
1575 result = 0;
1576 }
1577
1578 result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1579 data->vbios_boot_state.mclk_bootup_value,
1580 (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
1581
1582 if (result != 0) {
1583 smu_data->smc_state_table.MemoryBootLevel = 0;
1584 pr_err("[powerplay] VBIOS did not find boot "
1585 "engine clock value in dependency table."
1586 "Using Memory DPM level 0 !");
1587 result = 0;
1588 }
1589
1590 table->BootVoltage.Vddc =
1591 phm_get_voltage_id(&(data->vddc_voltage_table),
1592 data->vbios_boot_state.vddc_bootup_value);
1593 table->BootVoltage.VddGfx =
1594 phm_get_voltage_id(&(data->vddgfx_voltage_table),
1595 data->vbios_boot_state.vddgfx_bootup_value);
1596 table->BootVoltage.Vddci =
1597 phm_get_voltage_id(&(data->vddci_voltage_table),
1598 data->vbios_boot_state.vddci_bootup_value);
1599 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
1600
1601 CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
1602
1603 return result;
1604}
1605
1606static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1607{
1608 uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
1609 volt_with_cks, value;
1610 uint16_t clock_freq_u16;
1611 struct tonga_smumgr *smu_data =
1612 (struct tonga_smumgr *)(hwmgr->smu_backend);
1613 uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
1614 volt_offset = 0;
1615 struct phm_ppt_v1_information *table_info =
1616 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1617 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1618 table_info->vdd_dep_on_sclk;
1619 uint32_t hw_revision, dev_id;
1620 struct cgs_system_info sys_info = {0};
1621
1622 stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
1623
1624 sys_info.size = sizeof(struct cgs_system_info);
1625
1626 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
1627 cgs_query_system_info(hwmgr->device, &sys_info);
1628 hw_revision = (uint32_t)sys_info.value;
1629
1630 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
1631 cgs_query_system_info(hwmgr->device, &sys_info);
1632 dev_id = (uint32_t)sys_info.value;
1633
1634 /* Read SMU_Eefuse to read and calculate RO and determine
1635 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
1636 */
1637 efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1638 ixSMU_EFUSE_0 + (146 * 4));
1639 efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1640 ixSMU_EFUSE_0 + (148 * 4));
1641 efuse &= 0xFF000000;
1642 efuse = efuse >> 24;
1643 efuse2 &= 0xF;
1644
1645 if (efuse2 == 1)
1646 ro = (2300 - 1350) * efuse / 255 + 1350;
1647 else
1648 ro = (2500 - 1000) * efuse / 255 + 1000;
1649
1650 if (ro >= 1660)
1651 type = 0;
1652 else
1653 type = 1;
1654
1655 /* Populate Stretch amount */
1656 smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
1657
1658
1659 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
1660 for (i = 0; i < sclk_table->count; i++) {
1661 smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
1662 sclk_table->entries[i].cks_enable << i;
1663 if (ASICID_IS_TONGA_P(dev_id, hw_revision)) {
1664 volt_without_cks = (uint32_t)((7732 + 60 - ro - 20838 *
1665 (sclk_table->entries[i].clk/100) / 10000) * 1000 /
1666 (8730 - (5301 * (sclk_table->entries[i].clk/100) / 1000)));
1667 volt_with_cks = (uint32_t)((5250 + 51 - ro - 2404 *
1668 (sclk_table->entries[i].clk/100) / 100000) * 1000 /
1669 (6146 - (3193 * (sclk_table->entries[i].clk/100) / 1000)));
1670 } else {
1671 volt_without_cks = (uint32_t)((14041 *
1672 (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
1673 (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
1674 volt_with_cks = (uint32_t)((13946 *
1675 (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
1676 (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
1677 }
1678 if (volt_without_cks >= volt_with_cks)
1679 volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
1680 sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
1681 smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
1682 }
1683
1684 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1685 STRETCH_ENABLE, 0x0);
1686 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1687 masterReset, 0x1);
1688 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1689 staticEnable, 0x1);
1690 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1691 masterReset, 0x0);
1692
1693 /* Populate CKS Lookup Table */
1694 if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
1695 stretch_amount2 = 0;
1696 else if (stretch_amount == 3 || stretch_amount == 4)
1697 stretch_amount2 = 1;
1698 else {
1699 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1700 PHM_PlatformCaps_ClockStretcher);
1701 PP_ASSERT_WITH_CODE(false,
1702 "Stretch Amount in PPTable not supported\n",
1703 return -EINVAL);
1704 }
1705
1706 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1707 ixPWR_CKS_CNTL);
1708 value &= 0xFFC2FF87;
1709 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
1710 tonga_clock_stretcher_lookup_table[stretch_amount2][0];
1711 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
1712 tonga_clock_stretcher_lookup_table[stretch_amount2][1];
1713 clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
1714 GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
1715 SclkFrequency) / 100);
1716 if (tonga_clock_stretcher_lookup_table[stretch_amount2][0] <
1717 clock_freq_u16 &&
1718 tonga_clock_stretcher_lookup_table[stretch_amount2][1] >
1719 clock_freq_u16) {
1720 /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
1721 value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
1722 /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
1723 value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
1724 /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
1725 value |= (tonga_clock_stretch_amount_conversion
1726 [tonga_clock_stretcher_lookup_table[stretch_amount2][3]]
1727 [stretch_amount]) << 3;
1728 }
1729 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
1730 CKS_LOOKUPTableEntry[0].minFreq);
1731 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
1732 CKS_LOOKUPTableEntry[0].maxFreq);
1733 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
1734 tonga_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
1735 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
1736 (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
1737
1738 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1739 ixPWR_CKS_CNTL, value);
1740
1741 /* Populate DDT Lookup Table */
1742 for (i = 0; i < 4; i++) {
1743 /* Assign the minimum and maximum VID stored
1744 * in the last row of Clock Stretcher Voltage Table.
1745 */
1746 smu_data->smc_state_table.ClockStretcherDataTable.
1747 ClockStretcherDataTableEntry[i].minVID =
1748 (uint8_t) tonga_clock_stretcher_ddt_table[type][i][2];
1749 smu_data->smc_state_table.ClockStretcherDataTable.
1750 ClockStretcherDataTableEntry[i].maxVID =
1751 (uint8_t) tonga_clock_stretcher_ddt_table[type][i][3];
1752 /* Loop through each SCLK and check the frequency
1753 * to see if it lies within the frequency for clock stretcher.
1754 */
1755 for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
1756 cks_setting = 0;
1757 clock_freq = PP_SMC_TO_HOST_UL(
1758 smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
1759 /* Check the allowed frequency against the sclk level[j].
1760 * Sclk's endianness has already been converted,
1761 * and it's in 10Khz unit,
1762 * as opposed to Data table, which is in Mhz unit.
1763 */
1764 if (clock_freq >= tonga_clock_stretcher_ddt_table[type][i][0] * 100) {
1765 cks_setting |= 0x2;
1766 if (clock_freq < tonga_clock_stretcher_ddt_table[type][i][1] * 100)
1767 cks_setting |= 0x1;
1768 }
1769 smu_data->smc_state_table.ClockStretcherDataTable.
1770 ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
1771 }
1772 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
1773 ClockStretcherDataTable.
1774 ClockStretcherDataTableEntry[i].setting);
1775 }
1776
1777 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1778 ixPWR_CKS_CNTL);
1779 value &= 0xFFFFFFFE;
1780 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1781 ixPWR_CKS_CNTL, value);
1782
1783 return 0;
1784}
1785
1786static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
1787 SMU72_Discrete_DpmTable *table)
1788{
1789 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1790 uint16_t config;
1791
1792 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
1793 /* Splitted mode */
1794 config = VR_SVI2_PLANE_1;
1795 table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
1796
1797 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1798 config = VR_SVI2_PLANE_2;
1799 table->VRConfig |= config;
1800 } else {
1801 pr_err("VDDC and VDDGFX should "
1802 "be both on SVI2 control in splitted mode !\n");
1803 }
1804 } else {
1805 /* Merged mode */
1806 config = VR_MERGED_WITH_VDDC;
1807 table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
1808
1809 /* Set Vddc Voltage Controller */
1810 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1811 config = VR_SVI2_PLANE_1;
1812 table->VRConfig |= config;
1813 } else {
1814 pr_err("VDDC should be on "
1815 "SVI2 control in merged mode !\n");
1816 }
1817 }
1818
1819 /* Set Vddci Voltage Controller */
1820 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
1821 config = VR_SVI2_PLANE_2; /* only in merged mode */
1822 table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1823 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
1824 config = VR_SMIO_PATTERN_1;
1825 table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1826 }
1827
1828 /* Set Mvdd Voltage Controller */
1829 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1830 config = VR_SMIO_PATTERN_2;
1831 table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
1832 }
1833
1834 return 0;
1835}
1836
1837static int tonga_init_arb_table_index(struct pp_hwmgr *hwmgr)
1838{
1839 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
1840 uint32_t tmp;
1841 int result;
1842
1843 /*
1844 * This is a read-modify-write on the first byte of the ARB table.
1845 * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure
1846 * is the field 'current'.
1847 * This solution is ugly, but we never write the whole table only
1848 * individual fields in it.
1849 * In reality this field should not be in that structure
1850 * but in a soft register.
1851 */
1852 result = smu7_read_smc_sram_dword(hwmgr,
1853 smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
1854
1855 if (result != 0)
1856 return result;
1857
1858 tmp &= 0x00FFFFFF;
1859 tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
1860
1861 return smu7_write_smc_sram_dword(hwmgr,
1862 smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
1863}
1864
1865
1866static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
1867{
1868 struct tonga_smumgr *smu_data =
1869 (struct tonga_smumgr *)(hwmgr->smu_backend);
1870 const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
1871 SMU72_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
1872 struct phm_ppt_v1_information *table_info =
1873 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1874 struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
1875 int i, j, k;
1876 const uint16_t *pdef1, *pdef2;
1877
1878 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
1879 (uint16_t)(cac_dtp_table->usTDP * 256));
1880 dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
1881 (uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
1882
1883 PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
1884 "Target Operating Temp is out of Range !",
1885 );
1886
1887 dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
1888 dpm_table->GpuTjHyst = 8;
1889
1890 dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
1891
1892 dpm_table->BAPM_TEMP_GRADIENT =
1893 PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
1894 pdef1 = defaults->bapmti_r;
1895 pdef2 = defaults->bapmti_rc;
1896
1897 for (i = 0; i < SMU72_DTE_ITERATIONS; i++) {
1898 for (j = 0; j < SMU72_DTE_SOURCES; j++) {
1899 for (k = 0; k < SMU72_DTE_SINKS; k++) {
1900 dpm_table->BAPMTI_R[i][j][k] =
1901 PP_HOST_TO_SMC_US(*pdef1);
1902 dpm_table->BAPMTI_RC[i][j][k] =
1903 PP_HOST_TO_SMC_US(*pdef2);
1904 pdef1++;
1905 pdef2++;
1906 }
1907 }
1908 }
1909
1910 return 0;
1911}
1912
1913static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr)
1914{
1915 struct tonga_smumgr *smu_data =
1916 (struct tonga_smumgr *)(hwmgr->smu_backend);
1917 const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
1918
1919 smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
1920 smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC;
1921 smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
1922 smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
1923
1924 return 0;
1925}
1926
1927static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr)
1928{
1929 uint16_t tdc_limit;
1930 struct tonga_smumgr *smu_data =
1931 (struct tonga_smumgr *)(hwmgr->smu_backend);
1932 const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
1933 struct phm_ppt_v1_information *table_info =
1934 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1935
1936 /* TDC number of fraction bits are changed from 8 to 7
1937 * for Fiji as requested by SMC team
1938 */
1939 tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256);
1940 smu_data->power_tune_table.TDC_VDDC_PkgLimit =
1941 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
1942 smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
1943 defaults->tdc_vddc_throttle_release_limit_perc;
1944 smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
1945
1946 return 0;
1947}
1948
1949static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
1950{
1951 struct tonga_smumgr *smu_data =
1952 (struct tonga_smumgr *)(hwmgr->smu_backend);
1953 const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
1954 uint32_t temp;
1955
1956 if (smu7_read_smc_sram_dword(hwmgr,
1957 fuse_table_offset +
1958 offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl),
1959 (uint32_t *)&temp, SMC_RAM_END))
1960 PP_ASSERT_WITH_CODE(false,
1961 "Attempt to read PmFuses.DW6 "
1962 "(SviLoadLineEn) from SMC Failed !",
1963 return -EINVAL);
1964 else
1965 smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
1966
1967 return 0;
1968}
1969
1970static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
1971{
1972 int i;
1973 struct tonga_smumgr *smu_data =
1974 (struct tonga_smumgr *)(hwmgr->smu_backend);
1975
1976 /* Currently not used. Set all to zero. */
1977 for (i = 0; i < 16; i++)
1978 smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
1979
1980 return 0;
1981}
1982
1983static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
1984{
1985 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
1986
1987 if ((hwmgr->thermal_controller.advanceFanControlParameters.
1988 usFanOutputSensitivity & (1 << 15)) ||
1989 (hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0))
1990 hwmgr->thermal_controller.advanceFanControlParameters.
1991 usFanOutputSensitivity = hwmgr->thermal_controller.
1992 advanceFanControlParameters.usDefaultFanOutputSensitivity;
1993
1994 smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
1995 PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
1996 advanceFanControlParameters.usFanOutputSensitivity);
1997 return 0;
1998}
1999
2000static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
2001{
2002 int i;
2003 struct tonga_smumgr *smu_data =
2004 (struct tonga_smumgr *)(hwmgr->smu_backend);
2005
2006 /* Currently not used. Set all to zero. */
2007 for (i = 0; i < 16; i++)
2008 smu_data->power_tune_table.GnbLPML[i] = 0;
2009
2010 return 0;
2011}
2012
2013static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
2014{
2015 struct tonga_smumgr *smu_data =
2016 (struct tonga_smumgr *)(hwmgr->smu_backend);
2017 struct phm_ppt_v1_information *table_info =
2018 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2019 uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
2020 uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
2021 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
2022
2023 hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
2024 lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
2025
2026 smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
2027 CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
2028 smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
2029 CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
2030
2031 return 0;
2032}
2033
2034static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr)
2035{
2036 struct tonga_smumgr *smu_data =
2037 (struct tonga_smumgr *)(hwmgr->smu_backend);
2038 uint32_t pm_fuse_table_offset;
2039
2040 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2041 PHM_PlatformCaps_PowerContainment)) {
2042 if (smu7_read_smc_sram_dword(hwmgr,
2043 SMU72_FIRMWARE_HEADER_LOCATION +
2044 offsetof(SMU72_Firmware_Header, PmFuseTable),
2045 &pm_fuse_table_offset, SMC_RAM_END))
2046 PP_ASSERT_WITH_CODE(false,
2047 "Attempt to get pm_fuse_table_offset Failed !",
2048 return -EINVAL);
2049
2050 /* DW6 */
2051 if (tonga_populate_svi_load_line(hwmgr))
2052 PP_ASSERT_WITH_CODE(false,
2053 "Attempt to populate SviLoadLine Failed !",
2054 return -EINVAL);
2055 /* DW7 */
2056 if (tonga_populate_tdc_limit(hwmgr))
2057 PP_ASSERT_WITH_CODE(false,
2058 "Attempt to populate TDCLimit Failed !",
2059 return -EINVAL);
2060 /* DW8 */
2061 if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset))
2062 PP_ASSERT_WITH_CODE(false,
2063 "Attempt to populate TdcWaterfallCtl Failed !",
2064 return -EINVAL);
2065
2066 /* DW9-DW12 */
2067 if (tonga_populate_temperature_scaler(hwmgr) != 0)
2068 PP_ASSERT_WITH_CODE(false,
2069 "Attempt to populate LPMLTemperatureScaler Failed !",
2070 return -EINVAL);
2071
2072 /* DW13-DW14 */
2073 if (tonga_populate_fuzzy_fan(hwmgr))
2074 PP_ASSERT_WITH_CODE(false,
2075 "Attempt to populate Fuzzy Fan "
2076 "Control parameters Failed !",
2077 return -EINVAL);
2078
2079 /* DW15-DW18 */
2080 if (tonga_populate_gnb_lpml(hwmgr))
2081 PP_ASSERT_WITH_CODE(false,
2082 "Attempt to populate GnbLPML Failed !",
2083 return -EINVAL);
2084
2085 /* DW20 */
2086 if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr))
2087 PP_ASSERT_WITH_CODE(
2088 false,
2089 "Attempt to populate BapmVddCBaseLeakage "
2090 "Hi and Lo Sidd Failed !",
2091 return -EINVAL);
2092
2093 if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
2094 (uint8_t *)&smu_data->power_tune_table,
2095 sizeof(struct SMU72_Discrete_PmFuses), SMC_RAM_END))
2096 PP_ASSERT_WITH_CODE(false,
2097 "Attempt to download PmFuseTable Failed !",
2098 return -EINVAL);
2099 }
2100 return 0;
2101}
2102
2103static int tonga_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
2104 SMU72_Discrete_MCRegisters *mc_reg_table)
2105{
2106 const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)hwmgr->smu_backend;
2107
2108 uint32_t i, j;
2109
2110 for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
2111 if (smu_data->mc_reg_table.validflag & 1<<j) {
2112 PP_ASSERT_WITH_CODE(
2113 i < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE,
2114 "Index of mc_reg_table->address[] array "
2115 "out of boundary",
2116 return -EINVAL);
2117 mc_reg_table->address[i].s0 =
2118 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
2119 mc_reg_table->address[i].s1 =
2120 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
2121 i++;
2122 }
2123 }
2124
2125 mc_reg_table->last = (uint8_t)i;
2126
2127 return 0;
2128}
2129
2130/*convert register values from driver to SMC format */
2131static void tonga_convert_mc_registers(
2132 const struct tonga_mc_reg_entry *entry,
2133 SMU72_Discrete_MCRegisterSet *data,
2134 uint32_t num_entries, uint32_t valid_flag)
2135{
2136 uint32_t i, j;
2137
2138 for (i = 0, j = 0; j < num_entries; j++) {
2139 if (valid_flag & 1<<j) {
2140 data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
2141 i++;
2142 }
2143 }
2144}
2145
2146static int tonga_convert_mc_reg_table_entry_to_smc(
2147 struct pp_hwmgr *hwmgr,
2148 const uint32_t memory_clock,
2149 SMU72_Discrete_MCRegisterSet *mc_reg_table_data
2150 )
2151{
2152 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
2153 uint32_t i = 0;
2154
2155 for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
2156 if (memory_clock <=
2157 smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
2158 break;
2159 }
2160 }
2161
2162 if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
2163 --i;
2164
2165 tonga_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
2166 mc_reg_table_data, smu_data->mc_reg_table.last,
2167 smu_data->mc_reg_table.validflag);
2168
2169 return 0;
2170}
2171
2172static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
2173 SMU72_Discrete_MCRegisters *mc_regs)
2174{
2175 int result = 0;
2176 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2177 int res;
2178 uint32_t i;
2179
2180 for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
2181 res = tonga_convert_mc_reg_table_entry_to_smc(
2182 hwmgr,
2183 data->dpm_table.mclk_table.dpm_levels[i].value,
2184 &mc_regs->data[i]
2185 );
2186
2187 if (0 != res)
2188 result = res;
2189 }
2190
2191 return result;
2192}
2193
2194static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
2195{
2196 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
2197 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2198 uint32_t address;
2199 int32_t result;
2200
2201 if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
2202 return 0;
2203
2204
2205 memset(&smu_data->mc_regs, 0, sizeof(SMU72_Discrete_MCRegisters));
2206
2207 result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
2208
2209 if (result != 0)
2210 return result;
2211
2212
2213 address = smu_data->smu7_data.mc_reg_table_start +
2214 (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]);
2215
2216 return smu7_copy_bytes_to_smc(
2217 hwmgr, address,
2218 (uint8_t *)&smu_data->mc_regs.data[0],
2219 sizeof(SMU72_Discrete_MCRegisterSet) *
2220 data->dpm_table.mclk_table.count,
2221 SMC_RAM_END);
2222}
2223
2224static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
2225{
2226 int result;
2227 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
2228
2229 memset(&smu_data->mc_regs, 0x00, sizeof(SMU72_Discrete_MCRegisters));
2230 result = tonga_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
2231 PP_ASSERT_WITH_CODE(!result,
2232 "Failed to initialize MCRegTable for the MC register addresses !",
2233 return result;);
2234
2235 result = tonga_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
2236 PP_ASSERT_WITH_CODE(!result,
2237 "Failed to initialize MCRegTable for driver state !",
2238 return result;);
2239
2240 return smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.mc_reg_table_start,
2241 (uint8_t *)&smu_data->mc_regs, sizeof(SMU72_Discrete_MCRegisters), SMC_RAM_END);
2242}
2243
2244static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
2245{
2246 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
2247 struct phm_ppt_v1_information *table_info =
2248 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2249
2250 if (table_info &&
2251 table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
2252 table_info->cac_dtp_table->usPowerTuneDataSetID)
2253 smu_data->power_tune_defaults =
2254 &tonga_power_tune_data_set_array
2255 [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
2256 else
2257 smu_data->power_tune_defaults = &tonga_power_tune_data_set_array[0];
2258}
2259
2260static void tonga_save_default_power_profile(struct pp_hwmgr *hwmgr)
2261{
2262 struct tonga_smumgr *data = (struct tonga_smumgr *)(hwmgr->smu_backend);
2263 struct SMU72_Discrete_GraphicsLevel *levels =
2264 data->smc_state_table.GraphicsLevel;
2265 unsigned min_level = 1;
2266
2267 hwmgr->default_gfx_power_profile.activity_threshold =
2268 be16_to_cpu(levels[0].ActivityLevel);
2269 hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
2270 hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
2271 hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
2272
2273 hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
2274 hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
2275
2276 /* Workaround compute SDMA instability: disable lowest SCLK
2277 * DPM level. Optimize compute power profile: Use only highest
2278 * 2 power levels (if more than 2 are available), Hysteresis:
2279 * 0ms up, 5ms down
2280 */
2281 if (data->smc_state_table.GraphicsDpmLevelCount > 2)
2282 min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
2283 else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
2284 min_level = 1;
2285 else
2286 min_level = 0;
2287 hwmgr->default_compute_power_profile.min_sclk =
2288 be32_to_cpu(levels[min_level].SclkFrequency);
2289 hwmgr->default_compute_power_profile.up_hyst = 0;
2290 hwmgr->default_compute_power_profile.down_hyst = 5;
2291
2292 hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
2293 hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
2294}
2295
2296static int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
2297{
2298 int result;
2299 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2300 struct tonga_smumgr *smu_data =
2301 (struct tonga_smumgr *)(hwmgr->smu_backend);
2302 SMU72_Discrete_DpmTable *table = &(smu_data->smc_state_table);
2303 struct phm_ppt_v1_information *table_info =
2304 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2305
2306 uint8_t i;
2307 pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
2308
2309
2310 memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
2311
2312 tonga_initialize_power_tune_defaults(hwmgr);
2313
2314 if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
2315 tonga_populate_smc_voltage_tables(hwmgr, table);
2316
2317 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2318 PHM_PlatformCaps_AutomaticDCTransition))
2319 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
2320
2321
2322 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2323 PHM_PlatformCaps_StepVddc))
2324 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
2325
2326 if (data->is_memory_gddr5)
2327 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
2328
2329 i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN);
2330
2331 if (i == 1 || i == 0)
2332 table->SystemFlags |= 0x40;
2333
2334 if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
2335 result = tonga_populate_ulv_state(hwmgr, table);
2336 PP_ASSERT_WITH_CODE(!result,
2337 "Failed to initialize ULV state !",
2338 return result;);
2339
2340 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2341 ixCG_ULV_PARAMETER, 0x40035);
2342 }
2343
2344 result = tonga_populate_smc_link_level(hwmgr, table);
2345 PP_ASSERT_WITH_CODE(!result,
2346 "Failed to initialize Link Level !", return result);
2347
2348 result = tonga_populate_all_graphic_levels(hwmgr);
2349 PP_ASSERT_WITH_CODE(!result,
2350 "Failed to initialize Graphics Level !", return result);
2351
2352 result = tonga_populate_all_memory_levels(hwmgr);
2353 PP_ASSERT_WITH_CODE(!result,
2354 "Failed to initialize Memory Level !", return result);
2355
2356 result = tonga_populate_smc_acpi_level(hwmgr, table);
2357 PP_ASSERT_WITH_CODE(!result,
2358 "Failed to initialize ACPI Level !", return result);
2359
2360 result = tonga_populate_smc_vce_level(hwmgr, table);
2361 PP_ASSERT_WITH_CODE(!result,
2362 "Failed to initialize VCE Level !", return result);
2363
2364 result = tonga_populate_smc_acp_level(hwmgr, table);
2365 PP_ASSERT_WITH_CODE(!result,
2366 "Failed to initialize ACP Level !", return result);
2367
2368 result = tonga_populate_smc_samu_level(hwmgr, table);
2369 PP_ASSERT_WITH_CODE(!result,
2370 "Failed to initialize SAMU Level !", return result);
2371
2372 /* Since only the initial state is completely set up at this
2373 * point (the other states are just copies of the boot state) we only
2374 * need to populate the ARB settings for the initial state.
2375 */
2376 result = tonga_program_memory_timing_parameters(hwmgr);
2377 PP_ASSERT_WITH_CODE(!result,
2378 "Failed to Write ARB settings for the initial state.",
2379 return result;);
2380
2381 result = tonga_populate_smc_uvd_level(hwmgr, table);
2382 PP_ASSERT_WITH_CODE(!result,
2383 "Failed to initialize UVD Level !", return result);
2384
2385 result = tonga_populate_smc_boot_level(hwmgr, table);
2386 PP_ASSERT_WITH_CODE(!result,
2387 "Failed to initialize Boot Level !", return result);
2388
2389 tonga_populate_bapm_parameters_in_dpm_table(hwmgr);
2390 PP_ASSERT_WITH_CODE(!result,
2391 "Failed to populate BAPM Parameters !", return result);
2392
2393 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2394 PHM_PlatformCaps_ClockStretcher)) {
2395 result = tonga_populate_clock_stretcher_data_table(hwmgr);
2396 PP_ASSERT_WITH_CODE(!result,
2397 "Failed to populate Clock Stretcher Data Table !",
2398 return result;);
2399 }
2400 table->GraphicsVoltageChangeEnable = 1;
2401 table->GraphicsThermThrottleEnable = 1;
2402 table->GraphicsInterval = 1;
2403 table->VoltageInterval = 1;
2404 table->ThermalInterval = 1;
2405 table->TemperatureLimitHigh =
2406 table_info->cac_dtp_table->usTargetOperatingTemp *
2407 SMU7_Q88_FORMAT_CONVERSION_UNIT;
2408 table->TemperatureLimitLow =
2409 (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
2410 SMU7_Q88_FORMAT_CONVERSION_UNIT;
2411 table->MemoryVoltageChangeEnable = 1;
2412 table->MemoryInterval = 1;
2413 table->VoltageResponseTime = 0;
2414 table->PhaseResponseTime = 0;
2415 table->MemoryThermThrottleEnable = 1;
2416
2417 /*
2418 * Cail reads current link status and reports it as cap (we cannot
2419 * change this due to some previous issues we had)
2420 * SMC drops the link status to lowest level after enabling
2421 * DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again
2422 * but this time Cail reads current link status which was set to low by
2423 * SMC and reports it as cap to powerplay
2424 * To avoid it, we set PCIeBootLinkLevel to highest dpm level
2425 */
2426 PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
2427 "There must be 1 or more PCIE levels defined in PPTable.",
2428 return -EINVAL);
2429
2430 table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
2431
2432 table->PCIeGenInterval = 1;
2433
2434 result = tonga_populate_vr_config(hwmgr, table);
2435 PP_ASSERT_WITH_CODE(!result,
2436 "Failed to populate VRConfig setting !", return result);
2437
2438 table->ThermGpio = 17;
2439 table->SclkStepSize = 0x4000;
2440
2441 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
2442 &gpio_pin_assignment)) {
2443 table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
2444 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2445 PHM_PlatformCaps_RegulatorHot);
2446 } else {
2447 table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
2448 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2449 PHM_PlatformCaps_RegulatorHot);
2450 }
2451
2452 if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
2453 &gpio_pin_assignment)) {
2454 table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
2455 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2456 PHM_PlatformCaps_AutomaticDCTransition);
2457 } else {
2458 table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
2459 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2460 PHM_PlatformCaps_AutomaticDCTransition);
2461 }
2462
2463 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2464 PHM_PlatformCaps_Falcon_QuickTransition);
2465
2466 if (0) {
2467 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2468 PHM_PlatformCaps_AutomaticDCTransition);
2469 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2470 PHM_PlatformCaps_Falcon_QuickTransition);
2471 }
2472
2473 if (atomctrl_get_pp_assign_pin(hwmgr,
2474 THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment)) {
2475 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2476 PHM_PlatformCaps_ThermalOutGPIO);
2477
2478 table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
2479
2480 table->ThermOutPolarity =
2481 (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
2482 (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1 : 0;
2483
2484 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
2485
2486 /* if required, combine VRHot/PCC with thermal out GPIO*/
2487 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2488 PHM_PlatformCaps_RegulatorHot) &&
2489 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2490 PHM_PlatformCaps_CombinePCCWithThermalSignal)){
2491 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
2492 }
2493 } else {
2494 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2495 PHM_PlatformCaps_ThermalOutGPIO);
2496
2497 table->ThermOutGpio = 17;
2498 table->ThermOutPolarity = 1;
2499 table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
2500 }
2501
2502 for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++)
2503 table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
2504
2505 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
2506 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
2507 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
2508 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
2509 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
2510 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
2511 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
2512 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
2513 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
2514
2515 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
2516 result = smu7_copy_bytes_to_smc(
2517 hwmgr,
2518 smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, SystemFlags),
2519 (uint8_t *)&(table->SystemFlags),
2520 sizeof(SMU72_Discrete_DpmTable) - 3 * sizeof(SMU72_PIDController),
2521 SMC_RAM_END);
2522
2523 PP_ASSERT_WITH_CODE(!result,
2524 "Failed to upload dpm data to SMC memory !", return result;);
2525
2526 result = tonga_init_arb_table_index(hwmgr);
2527 PP_ASSERT_WITH_CODE(!result,
2528 "Failed to upload arb data to SMC memory !", return result);
2529
2530 tonga_populate_pm_fuses(hwmgr);
2531 PP_ASSERT_WITH_CODE((!result),
2532 "Failed to populate initialize pm fuses !", return result);
2533
2534 result = tonga_populate_initial_mc_reg_table(hwmgr);
2535 PP_ASSERT_WITH_CODE((!result),
2536 "Failed to populate initialize MC Reg table !", return result);
2537
2538 tonga_save_default_power_profile(hwmgr);
2539
2540 return 0;
2541}
2542
2543static int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
2544{
2545 struct tonga_smumgr *smu_data =
2546 (struct tonga_smumgr *)(hwmgr->smu_backend);
2547 SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
2548 uint32_t duty100;
2549 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
2550 uint16_t fdo_min, slope1, slope2;
2551 uint32_t reference_clock;
2552 int res;
2553 uint64_t tmp64;
2554
2555 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2556 PHM_PlatformCaps_MicrocodeFanControl))
2557 return 0;
2558
2559 if (hwmgr->thermal_controller.fanInfo.bNoFan) {
2560 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2561 PHM_PlatformCaps_MicrocodeFanControl);
2562 return 0;
2563 }
2564
2565 if (0 == smu_data->smu7_data.fan_table_start) {
2566 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2567 PHM_PlatformCaps_MicrocodeFanControl);
2568 return 0;
2569 }
2570
2571 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
2572 CGS_IND_REG__SMC,
2573 CG_FDO_CTRL1, FMAX_DUTY100);
2574
2575 if (0 == duty100) {
2576 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2577 PHM_PlatformCaps_MicrocodeFanControl);
2578 return 0;
2579 }
2580
2581 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
2582 do_div(tmp64, 10000);
2583 fdo_min = (uint16_t)tmp64;
2584
2585 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
2586 hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
2587 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
2588 hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
2589
2590 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
2591 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
2592 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
2593 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
2594
2595 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
2596 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
2597
2598 fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
2599 fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
2600 fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
2601
2602 fan_table.Slope1 = cpu_to_be16(slope1);
2603 fan_table.Slope2 = cpu_to_be16(slope2);
2604
2605 fan_table.FdoMin = cpu_to_be16(fdo_min);
2606
2607 fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
2608
2609 fan_table.HystUp = cpu_to_be16(1);
2610
2611 fan_table.HystSlope = cpu_to_be16(1);
2612
2613 fan_table.TempRespLim = cpu_to_be16(5);
2614
2615 reference_clock = smu7_get_xclk(hwmgr);
2616
2617 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
2618
2619 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
2620
2621 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
2622
2623 fan_table.FanControl_GL_Flag = 1;
2624
2625 res = smu7_copy_bytes_to_smc(hwmgr,
2626 smu_data->smu7_data.fan_table_start,
2627 (uint8_t *)&fan_table,
2628 (uint32_t)sizeof(fan_table),
2629 SMC_RAM_END);
2630
2631 return 0;
2632}
2633
2634
2635static int tonga_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
2636{
2637 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2638
2639 if (data->need_update_smu7_dpm_table &
2640 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
2641 return tonga_program_memory_timing_parameters(hwmgr);
2642
2643 return 0;
2644}
2645
2646static int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2647{
2648 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2649 struct tonga_smumgr *smu_data =
2650 (struct tonga_smumgr *)(hwmgr->smu_backend);
2651
2652 int result = 0;
2653 uint32_t low_sclk_interrupt_threshold = 0;
2654
2655 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2656 PHM_PlatformCaps_SclkThrottleLowNotification)
2657 && (hwmgr->gfx_arbiter.sclk_threshold !=
2658 data->low_sclk_interrupt_threshold)) {
2659 data->low_sclk_interrupt_threshold =
2660 hwmgr->gfx_arbiter.sclk_threshold;
2661 low_sclk_interrupt_threshold =
2662 data->low_sclk_interrupt_threshold;
2663
2664 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
2665
2666 result = smu7_copy_bytes_to_smc(
2667 hwmgr,
2668 smu_data->smu7_data.dpm_table_start +
2669 offsetof(SMU72_Discrete_DpmTable,
2670 LowSclkInterruptThreshold),
2671 (uint8_t *)&low_sclk_interrupt_threshold,
2672 sizeof(uint32_t),
2673 SMC_RAM_END);
2674 }
2675
2676 result = tonga_update_and_upload_mc_reg_table(hwmgr);
2677
2678 PP_ASSERT_WITH_CODE((!result),
2679 "Failed to upload MC reg table !",
2680 return result);
2681
2682 result = tonga_program_mem_timing_parameters(hwmgr);
2683 PP_ASSERT_WITH_CODE((result == 0),
2684 "Failed to program memory timing parameters !",
2685 );
2686
2687 return result;
2688}
2689
2690static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
2691{
2692 switch (type) {
2693 case SMU_SoftRegisters:
2694 switch (member) {
2695 case HandshakeDisables:
2696 return offsetof(SMU72_SoftRegisters, HandshakeDisables);
2697 case VoltageChangeTimeout:
2698 return offsetof(SMU72_SoftRegisters, VoltageChangeTimeout);
2699 case AverageGraphicsActivity:
2700 return offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
2701 case PreVBlankGap:
2702 return offsetof(SMU72_SoftRegisters, PreVBlankGap);
2703 case VBlankTimeout:
2704 return offsetof(SMU72_SoftRegisters, VBlankTimeout);
2705 case UcodeLoadStatus:
2706 return offsetof(SMU72_SoftRegisters, UcodeLoadStatus);
2707 case DRAM_LOG_ADDR_H:
2708 return offsetof(SMU72_SoftRegisters, DRAM_LOG_ADDR_H);
2709 case DRAM_LOG_ADDR_L:
2710 return offsetof(SMU72_SoftRegisters, DRAM_LOG_ADDR_L);
2711 case DRAM_LOG_PHY_ADDR_H:
2712 return offsetof(SMU72_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
2713 case DRAM_LOG_PHY_ADDR_L:
2714 return offsetof(SMU72_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
2715 case DRAM_LOG_BUFF_SIZE:
2716 return offsetof(SMU72_SoftRegisters, DRAM_LOG_BUFF_SIZE);
2717 }
2718 case SMU_Discrete_DpmTable:
2719 switch (member) {
2720 case UvdBootLevel:
2721 return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
2722 case VceBootLevel:
2723 return offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
2724 case SamuBootLevel:
2725 return offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
2726 case LowSclkInterruptThreshold:
2727 return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
2728 }
2729 }
2730 pr_warn("can't get the offset of type %x member %x\n", type, member);
2731 return 0;
2732}
2733
2734static uint32_t tonga_get_mac_definition(uint32_t value)
2735{
2736 switch (value) {
2737 case SMU_MAX_LEVELS_GRAPHICS:
2738 return SMU72_MAX_LEVELS_GRAPHICS;
2739 case SMU_MAX_LEVELS_MEMORY:
2740 return SMU72_MAX_LEVELS_MEMORY;
2741 case SMU_MAX_LEVELS_LINK:
2742 return SMU72_MAX_LEVELS_LINK;
2743 case SMU_MAX_ENTRIES_SMIO:
2744 return SMU72_MAX_ENTRIES_SMIO;
2745 case SMU_MAX_LEVELS_VDDC:
2746 return SMU72_MAX_LEVELS_VDDC;
2747 case SMU_MAX_LEVELS_VDDGFX:
2748 return SMU72_MAX_LEVELS_VDDGFX;
2749 case SMU_MAX_LEVELS_VDDCI:
2750 return SMU72_MAX_LEVELS_VDDCI;
2751 case SMU_MAX_LEVELS_MVDD:
2752 return SMU72_MAX_LEVELS_MVDD;
2753 }
2754 pr_warn("can't get the mac value %x\n", value);
2755
2756 return 0;
2757}
2758
2759static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
2760{
2761 struct tonga_smumgr *smu_data =
2762 (struct tonga_smumgr *)(hwmgr->smu_backend);
2763 uint32_t mm_boot_level_offset, mm_boot_level_value;
2764 struct phm_ppt_v1_information *table_info =
2765 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2766
2767 smu_data->smc_state_table.UvdBootLevel = 0;
2768 if (table_info->mm_dep_table->count > 0)
2769 smu_data->smc_state_table.UvdBootLevel =
2770 (uint8_t) (table_info->mm_dep_table->count - 1);
2771 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2772 offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
2773 mm_boot_level_offset /= 4;
2774 mm_boot_level_offset *= 4;
2775 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2776 CGS_IND_REG__SMC, mm_boot_level_offset);
2777 mm_boot_level_value &= 0x00FFFFFF;
2778 mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
2779 cgs_write_ind_register(hwmgr->device,
2780 CGS_IND_REG__SMC,
2781 mm_boot_level_offset, mm_boot_level_value);
2782
2783 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2784 PHM_PlatformCaps_UVDDPM) ||
2785 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2786 PHM_PlatformCaps_StablePState))
2787 smum_send_msg_to_smc_with_parameter(hwmgr,
2788 PPSMC_MSG_UVDDPM_SetEnabledMask,
2789 (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
2790 return 0;
2791}
2792
2793static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
2794{
2795 struct tonga_smumgr *smu_data =
2796 (struct tonga_smumgr *)(hwmgr->smu_backend);
2797 uint32_t mm_boot_level_offset, mm_boot_level_value;
2798 struct phm_ppt_v1_information *table_info =
2799 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2800
2801
2802 smu_data->smc_state_table.VceBootLevel =
2803 (uint8_t) (table_info->mm_dep_table->count - 1);
2804
2805 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2806 offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
2807 mm_boot_level_offset /= 4;
2808 mm_boot_level_offset *= 4;
2809 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2810 CGS_IND_REG__SMC, mm_boot_level_offset);
2811 mm_boot_level_value &= 0xFF00FFFF;
2812 mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
2813 cgs_write_ind_register(hwmgr->device,
2814 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2815
2816 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2817 PHM_PlatformCaps_StablePState))
2818 smum_send_msg_to_smc_with_parameter(hwmgr,
2819 PPSMC_MSG_VCEDPM_SetEnabledMask,
2820 (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
2821 return 0;
2822}
2823
2824static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr)
2825{
2826 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
2827 uint32_t mm_boot_level_offset, mm_boot_level_value;
2828
2829 smu_data->smc_state_table.SamuBootLevel = 0;
2830 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2831 offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
2832
2833 mm_boot_level_offset /= 4;
2834 mm_boot_level_offset *= 4;
2835 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2836 CGS_IND_REG__SMC, mm_boot_level_offset);
2837 mm_boot_level_value &= 0xFFFFFF00;
2838 mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
2839 cgs_write_ind_register(hwmgr->device,
2840 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2841
2842 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2843 PHM_PlatformCaps_StablePState))
2844 smum_send_msg_to_smc_with_parameter(hwmgr,
2845 PPSMC_MSG_SAMUDPM_SetEnabledMask,
2846 (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
2847 return 0;
2848}
2849
2850static int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
2851{
2852 switch (type) {
2853 case SMU_UVD_TABLE:
2854 tonga_update_uvd_smc_table(hwmgr);
2855 break;
2856 case SMU_VCE_TABLE:
2857 tonga_update_vce_smc_table(hwmgr);
2858 break;
2859 case SMU_SAMU_TABLE:
2860 tonga_update_samu_smc_table(hwmgr);
2861 break;
2862 default:
2863 break;
2864 }
2865 return 0;
2866}
2867
2868static int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
2869{
2870 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2871 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
2872
2873 uint32_t tmp;
2874 int result;
2875 bool error = false;
2876
2877 result = smu7_read_smc_sram_dword(hwmgr,
2878 SMU72_FIRMWARE_HEADER_LOCATION +
2879 offsetof(SMU72_Firmware_Header, DpmTable),
2880 &tmp, SMC_RAM_END);
2881
2882 if (!result)
2883 smu_data->smu7_data.dpm_table_start = tmp;
2884
2885 error |= (result != 0);
2886
2887 result = smu7_read_smc_sram_dword(hwmgr,
2888 SMU72_FIRMWARE_HEADER_LOCATION +
2889 offsetof(SMU72_Firmware_Header, SoftRegisters),
2890 &tmp, SMC_RAM_END);
2891
2892 if (!result) {
2893 data->soft_regs_start = tmp;
2894 smu_data->smu7_data.soft_regs_start = tmp;
2895 }
2896
2897 error |= (result != 0);
2898
2899
2900 result = smu7_read_smc_sram_dword(hwmgr,
2901 SMU72_FIRMWARE_HEADER_LOCATION +
2902 offsetof(SMU72_Firmware_Header, mcRegisterTable),
2903 &tmp, SMC_RAM_END);
2904
2905 if (!result)
2906 smu_data->smu7_data.mc_reg_table_start = tmp;
2907
2908 result = smu7_read_smc_sram_dword(hwmgr,
2909 SMU72_FIRMWARE_HEADER_LOCATION +
2910 offsetof(SMU72_Firmware_Header, FanTable),
2911 &tmp, SMC_RAM_END);
2912
2913 if (!result)
2914 smu_data->smu7_data.fan_table_start = tmp;
2915
2916 error |= (result != 0);
2917
2918 result = smu7_read_smc_sram_dword(hwmgr,
2919 SMU72_FIRMWARE_HEADER_LOCATION +
2920 offsetof(SMU72_Firmware_Header, mcArbDramTimingTable),
2921 &tmp, SMC_RAM_END);
2922
2923 if (!result)
2924 smu_data->smu7_data.arb_table_start = tmp;
2925
2926 error |= (result != 0);
2927
2928 result = smu7_read_smc_sram_dword(hwmgr,
2929 SMU72_FIRMWARE_HEADER_LOCATION +
2930 offsetof(SMU72_Firmware_Header, Version),
2931 &tmp, SMC_RAM_END);
2932
2933 if (!result)
2934 hwmgr->microcode_version_info.SMC = tmp;
2935
2936 error |= (result != 0);
2937
2938 return error ? 1 : 0;
2939}
2940
2941/*---------------------------MC----------------------------*/
2942
2943static uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
2944{
2945 return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
2946}
2947
2948static bool tonga_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
2949{
2950 bool result = true;
2951
2952 switch (in_reg) {
2953 case mmMC_SEQ_RAS_TIMING:
2954 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
2955 break;
2956
2957 case mmMC_SEQ_DLL_STBY:
2958 *out_reg = mmMC_SEQ_DLL_STBY_LP;
2959 break;
2960
2961 case mmMC_SEQ_G5PDX_CMD0:
2962 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
2963 break;
2964
2965 case mmMC_SEQ_G5PDX_CMD1:
2966 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
2967 break;
2968
2969 case mmMC_SEQ_G5PDX_CTRL:
2970 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
2971 break;
2972
2973 case mmMC_SEQ_CAS_TIMING:
2974 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
2975 break;
2976
2977 case mmMC_SEQ_MISC_TIMING:
2978 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
2979 break;
2980
2981 case mmMC_SEQ_MISC_TIMING2:
2982 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
2983 break;
2984
2985 case mmMC_SEQ_PMG_DVS_CMD:
2986 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
2987 break;
2988
2989 case mmMC_SEQ_PMG_DVS_CTL:
2990 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
2991 break;
2992
2993 case mmMC_SEQ_RD_CTL_D0:
2994 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
2995 break;
2996
2997 case mmMC_SEQ_RD_CTL_D1:
2998 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
2999 break;
3000
3001 case mmMC_SEQ_WR_CTL_D0:
3002 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
3003 break;
3004
3005 case mmMC_SEQ_WR_CTL_D1:
3006 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
3007 break;
3008
3009 case mmMC_PMG_CMD_EMRS:
3010 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
3011 break;
3012
3013 case mmMC_PMG_CMD_MRS:
3014 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
3015 break;
3016
3017 case mmMC_PMG_CMD_MRS1:
3018 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
3019 break;
3020
3021 case mmMC_SEQ_PMG_TIMING:
3022 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
3023 break;
3024
3025 case mmMC_PMG_CMD_MRS2:
3026 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
3027 break;
3028
3029 case mmMC_SEQ_WR_CTL_2:
3030 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
3031 break;
3032
3033 default:
3034 result = false;
3035 break;
3036 }
3037
3038 return result;
3039}
3040
3041static int tonga_set_s0_mc_reg_index(struct tonga_mc_reg_table *table)
3042{
3043 uint32_t i;
3044 uint16_t address;
3045
3046 for (i = 0; i < table->last; i++) {
3047 table->mc_reg_address[i].s0 =
3048 tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1,
3049 &address) ?
3050 address :
3051 table->mc_reg_address[i].s1;
3052 }
3053 return 0;
3054}
3055
3056static int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
3057 struct tonga_mc_reg_table *ni_table)
3058{
3059 uint8_t i, j;
3060
3061 PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3062 "Invalid VramInfo table.", return -EINVAL);
3063 PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
3064 "Invalid VramInfo table.", return -EINVAL);
3065
3066 for (i = 0; i < table->last; i++)
3067 ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
3068
3069 ni_table->last = table->last;
3070
3071 for (i = 0; i < table->num_entries; i++) {
3072 ni_table->mc_reg_table_entry[i].mclk_max =
3073 table->mc_reg_table_entry[i].mclk_max;
3074 for (j = 0; j < table->last; j++) {
3075 ni_table->mc_reg_table_entry[i].mc_data[j] =
3076 table->mc_reg_table_entry[i].mc_data[j];
3077 }
3078 }
3079
3080 ni_table->num_entries = table->num_entries;
3081
3082 return 0;
3083}
3084
3085static int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr,
3086 struct tonga_mc_reg_table *table)
3087{
3088 uint8_t i, j, k;
3089 uint32_t temp_reg;
3090 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3091
3092 for (i = 0, j = table->last; i < table->last; i++) {
3093 PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3094 "Invalid VramInfo table.", return -EINVAL);
3095
3096 switch (table->mc_reg_address[i].s1) {
3097
3098 case mmMC_SEQ_MISC1:
3099 temp_reg = cgs_read_register(hwmgr->device,
3100 mmMC_PMG_CMD_EMRS);
3101 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
3102 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
3103 for (k = 0; k < table->num_entries; k++) {
3104 table->mc_reg_table_entry[k].mc_data[j] =
3105 ((temp_reg & 0xffff0000)) |
3106 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
3107 }
3108 j++;
3109 PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3110 "Invalid VramInfo table.", return -EINVAL);
3111
3112 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
3113 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
3114 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
3115 for (k = 0; k < table->num_entries; k++) {
3116 table->mc_reg_table_entry[k].mc_data[j] =
3117 (temp_reg & 0xffff0000) |
3118 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3119
3120 if (!data->is_memory_gddr5)
3121 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
3122 }
3123 j++;
3124 PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3125 "Invalid VramInfo table.", return -EINVAL);
3126
3127 if (!data->is_memory_gddr5) {
3128 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
3129 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
3130 for (k = 0; k < table->num_entries; k++)
3131 table->mc_reg_table_entry[k].mc_data[j] =
3132 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
3133 j++;
3134 PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3135 "Invalid VramInfo table.", return -EINVAL);
3136 }
3137
3138 break;
3139
3140 case mmMC_SEQ_RESERVE_M:
3141 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
3142 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
3143 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
3144 for (k = 0; k < table->num_entries; k++) {
3145 table->mc_reg_table_entry[k].mc_data[j] =
3146 (temp_reg & 0xffff0000) |
3147 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3148 }
3149 j++;
3150 PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
3151 "Invalid VramInfo table.", return -EINVAL);
3152 break;
3153
3154 default:
3155 break;
3156 }
3157
3158 }
3159
3160 table->last = j;
3161
3162 return 0;
3163}
3164
3165static int tonga_set_valid_flag(struct tonga_mc_reg_table *table)
3166{
3167 uint8_t i, j;
3168
3169 for (i = 0; i < table->last; i++) {
3170 for (j = 1; j < table->num_entries; j++) {
3171 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
3172 table->mc_reg_table_entry[j].mc_data[i]) {
3173 table->validflag |= (1<<i);
3174 break;
3175 }
3176 }
3177 }
3178
3179 return 0;
3180}
3181
3182static int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
3183{
3184 int result;
3185 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
3186 pp_atomctrl_mc_reg_table *table;
3187 struct tonga_mc_reg_table *ni_table = &smu_data->mc_reg_table;
3188 uint8_t module_index = tonga_get_memory_modile_index(hwmgr);
3189
3190 table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
3191
3192 if (table == NULL)
3193 return -ENOMEM;
3194
3195 /* Program additional LP registers that are no longer programmed by VBIOS */
3196 cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
3197 cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
3198 cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
3199 cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
3200 cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP,
3201 cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
3202 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP,
3203 cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
3204 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP,
3205 cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
3206 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP,
3207 cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
3208 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP,
3209 cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
3210 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP,
3211 cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
3212 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP,
3213 cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
3214 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
3215 cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
3216 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP,
3217 cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
3218 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP,
3219 cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
3220 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP,
3221 cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
3222 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP,
3223 cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
3224 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
3225 cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
3226 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
3227 cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
3228 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
3229 cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
3230 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
3231 cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
3232 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP,
3233 cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
3234 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP,
3235 cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
3236
3237 memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
3238
3239 result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
3240
3241 if (!result)
3242 result = tonga_copy_vbios_smc_reg_table(table, ni_table);
3243
3244 if (!result) {
3245 tonga_set_s0_mc_reg_index(ni_table);
3246 result = tonga_set_mc_special_registers(hwmgr, ni_table);
3247 }
3248
3249 if (!result)
3250 tonga_set_valid_flag(ni_table);
3251
3252 kfree(table);
3253
3254 return result;
3255}
3256
3257static bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
3258{
3259 return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
3260 CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
3261 ? true : false;
3262}
3263
3264static int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
3265 struct amd_pp_profile *request)
3266{
3267 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)
3268 (hwmgr->smu_backend);
3269 struct SMU72_Discrete_GraphicsLevel *levels =
3270 smu_data->smc_state_table.GraphicsLevel;
3271 uint32_t array = smu_data->smu7_data.dpm_table_start +
3272 offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
3273 uint32_t array_size = sizeof(struct SMU72_Discrete_GraphicsLevel) *
3274 SMU72_MAX_LEVELS_GRAPHICS;
3275 uint32_t i;
3276
3277 for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
3278 levels[i].ActivityLevel =
3279 cpu_to_be16(request->activity_threshold);
3280 levels[i].EnabledForActivity = 1;
3281 levels[i].UpHyst = request->up_hyst;
3282 levels[i].DownHyst = request->down_hyst;
3283 }
3284
3285 return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
3286 array_size, SMC_RAM_END);
3287}
3288
190const struct pp_smumgr_func tonga_smu_funcs = { 3289const struct pp_smumgr_func tonga_smu_funcs = {
191 .smu_init = &tonga_smu_init, 3290 .smu_init = &tonga_smu_init,
192 .smu_fini = &smu7_smu_fini, 3291 .smu_fini = &smu7_smu_fini,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
index 8c4f761d5bc8..5d70a00348e2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
@@ -25,8 +25,26 @@
25#define _TONGA_SMUMGR_H_ 25#define _TONGA_SMUMGR_H_
26 26
27#include "smu72_discrete.h" 27#include "smu72_discrete.h"
28
29#include "smu7_smumgr.h" 28#include "smu7_smumgr.h"
29#include "smu72.h"
30
31
32#define ASICID_IS_TONGA_P(wDID, bRID) \
33 (((wDID == 0x6930) && ((bRID == 0xF0) || (bRID == 0xF1) || (bRID == 0xFF))) \
34 || ((wDID == 0x6920) && ((bRID == 0) || (bRID == 1))))
35
36struct tonga_pt_defaults {
37 uint8_t svi_load_line_en;
38 uint8_t svi_load_line_vddC;
39 uint8_t tdc_vddc_throttle_release_limit_perc;
40 uint8_t tdc_mawt;
41 uint8_t tdc_waterfall_ctl;
42 uint8_t dte_ambient_temp_base;
43 uint32_t display_cac;
44 uint32_t bapm_temp_gradient;
45 uint16_t bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
46 uint16_t bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
47};
30 48
31struct tonga_mc_reg_entry { 49struct tonga_mc_reg_entry {
32 uint32_t mclk_max; 50 uint32_t mclk_max;
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 08e1332d814a..e4d3b4ec4e92 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -133,6 +133,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
133 entity->rq = rq; 133 entity->rq = rq;
134 entity->sched = sched; 134 entity->sched = sched;
135 135
136 spin_lock_init(&entity->rq_lock);
136 spin_lock_init(&entity->queue_lock); 137 spin_lock_init(&entity->queue_lock);
137 r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL); 138 r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
138 if (r) 139 if (r)
@@ -204,7 +205,6 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
204void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, 205void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
205 struct amd_sched_entity *entity) 206 struct amd_sched_entity *entity)
206{ 207{
207 struct amd_sched_rq *rq = entity->rq;
208 int r; 208 int r;
209 209
210 if (!amd_sched_entity_is_initialized(sched, entity)) 210 if (!amd_sched_entity_is_initialized(sched, entity))
@@ -218,7 +218,7 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
218 else 218 else
219 r = wait_event_killable(sched->job_scheduled, 219 r = wait_event_killable(sched->job_scheduled,
220 amd_sched_entity_is_idle(entity)); 220 amd_sched_entity_is_idle(entity));
221 amd_sched_rq_remove_entity(rq, entity); 221 amd_sched_entity_set_rq(entity, NULL);
222 if (r) { 222 if (r) {
223 struct amd_sched_job *job; 223 struct amd_sched_job *job;
224 224
@@ -257,6 +257,24 @@ static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb
257 dma_fence_put(f); 257 dma_fence_put(f);
258} 258}
259 259
260void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
261 struct amd_sched_rq *rq)
262{
263 if (entity->rq == rq)
264 return;
265
266 spin_lock(&entity->rq_lock);
267
268 if (entity->rq)
269 amd_sched_rq_remove_entity(entity->rq, entity);
270
271 entity->rq = rq;
272 if (rq)
273 amd_sched_rq_add_entity(rq, entity);
274
275 spin_unlock(&entity->rq_lock);
276}
277
260bool amd_sched_dependency_optimized(struct dma_fence* fence, 278bool amd_sched_dependency_optimized(struct dma_fence* fence,
261 struct amd_sched_entity *entity) 279 struct amd_sched_entity *entity)
262{ 280{
@@ -354,7 +372,9 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
354 /* first job wakes up scheduler */ 372 /* first job wakes up scheduler */
355 if (first) { 373 if (first) {
356 /* Add the entity to the run queue */ 374 /* Add the entity to the run queue */
375 spin_lock(&entity->rq_lock);
357 amd_sched_rq_add_entity(entity->rq, entity); 376 amd_sched_rq_add_entity(entity->rq, entity);
377 spin_unlock(&entity->rq_lock);
358 amd_sched_wakeup(sched); 378 amd_sched_wakeup(sched);
359 } 379 }
360 return added; 380 return added;
@@ -386,6 +406,7 @@ static void amd_sched_job_finish(struct work_struct *work)
386 schedule_delayed_work(&next->work_tdr, sched->timeout); 406 schedule_delayed_work(&next->work_tdr, sched->timeout);
387 } 407 }
388 spin_unlock(&sched->job_list_lock); 408 spin_unlock(&sched->job_list_lock);
409 dma_fence_put(&s_job->s_fence->finished);
389 sched->ops->free_job(s_job); 410 sched->ops->free_job(s_job);
390} 411}
391 412
@@ -566,6 +587,7 @@ static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
566 container_of(cb, struct amd_sched_fence, cb); 587 container_of(cb, struct amd_sched_fence, cb);
567 struct amd_gpu_scheduler *sched = s_fence->sched; 588 struct amd_gpu_scheduler *sched = s_fence->sched;
568 589
590 dma_fence_get(&s_fence->finished);
569 atomic_dec(&sched->hw_rq_count); 591 atomic_dec(&sched->hw_rq_count);
570 amd_sched_fence_finished(s_fence); 592 amd_sched_fence_finished(s_fence);
571 593
@@ -618,9 +640,6 @@ static int amd_sched_main(void *param)
618 fence = sched->ops->run_job(sched_job); 640 fence = sched->ops->run_job(sched_job);
619 amd_sched_fence_scheduled(s_fence); 641 amd_sched_fence_scheduled(s_fence);
620 642
621 /* amd_sched_process_job drops the job's reference of the fence. */
622 sched_job->s_fence = NULL;
623
624 if (fence) { 643 if (fence) {
625 s_fence->parent = dma_fence_get(fence); 644 s_fence->parent = dma_fence_get(fence);
626 r = dma_fence_add_callback(fence, &s_fence->cb, 645 r = dma_fence_add_callback(fence, &s_fence->cb,
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index f9d8f28efd16..52c8e5447624 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -39,6 +39,7 @@ struct amd_sched_rq;
39struct amd_sched_entity { 39struct amd_sched_entity {
40 struct list_head list; 40 struct list_head list;
41 struct amd_sched_rq *rq; 41 struct amd_sched_rq *rq;
42 spinlock_t rq_lock;
42 struct amd_gpu_scheduler *sched; 43 struct amd_gpu_scheduler *sched;
43 44
44 spinlock_t queue_lock; 45 spinlock_t queue_lock;
@@ -115,9 +116,14 @@ struct amd_sched_backend_ops {
115 116
116enum amd_sched_priority { 117enum amd_sched_priority {
117 AMD_SCHED_PRIORITY_MIN, 118 AMD_SCHED_PRIORITY_MIN,
118 AMD_SCHED_PRIORITY_NORMAL = AMD_SCHED_PRIORITY_MIN, 119 AMD_SCHED_PRIORITY_LOW = AMD_SCHED_PRIORITY_MIN,
120 AMD_SCHED_PRIORITY_NORMAL,
121 AMD_SCHED_PRIORITY_HIGH_SW,
122 AMD_SCHED_PRIORITY_HIGH_HW,
119 AMD_SCHED_PRIORITY_KERNEL, 123 AMD_SCHED_PRIORITY_KERNEL,
120 AMD_SCHED_PRIORITY_MAX 124 AMD_SCHED_PRIORITY_MAX,
125 AMD_SCHED_PRIORITY_INVALID = -1,
126 AMD_SCHED_PRIORITY_UNSET = -2
121}; 127};
122 128
123/** 129/**
@@ -150,6 +156,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
150void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, 156void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
151 struct amd_sched_entity *entity); 157 struct amd_sched_entity *entity);
152void amd_sched_entity_push_job(struct amd_sched_job *sched_job); 158void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
159void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
160 struct amd_sched_rq *rq);
153 161
154int amd_sched_fence_slab_init(void); 162int amd_sched_fence_slab_init(void);
155void amd_sched_fence_slab_fini(void); 163void amd_sched_fence_slab_fini(void);
@@ -167,4 +175,11 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
167bool amd_sched_dependency_optimized(struct dma_fence* fence, 175bool amd_sched_dependency_optimized(struct dma_fence* fence,
168 struct amd_sched_entity *entity); 176 struct amd_sched_entity *entity);
169void amd_sched_job_kickout(struct amd_sched_job *s_job); 177void amd_sched_job_kickout(struct amd_sched_job *s_job);
178
179static inline enum amd_sched_priority
180amd_sched_get_job_priority(struct amd_sched_job *job)
181{
182 return (job->s_entity->rq - job->sched->sched_rq);
183}
184
170#endif 185#endif
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index e11fd76e06f4..4d688c8d7853 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -95,7 +95,7 @@ struct ttm_pool_opts {
95 unsigned small; 95 unsigned small;
96}; 96};
97 97
98#define NUM_POOLS 4 98#define NUM_POOLS 6
99 99
100/** 100/**
101 * struct ttm_pool_manager - Holds memory pools for fst allocation 101 * struct ttm_pool_manager - Holds memory pools for fst allocation
@@ -122,6 +122,8 @@ struct ttm_pool_manager {
122 struct ttm_page_pool uc_pool; 122 struct ttm_page_pool uc_pool;
123 struct ttm_page_pool wc_pool_dma32; 123 struct ttm_page_pool wc_pool_dma32;
124 struct ttm_page_pool uc_pool_dma32; 124 struct ttm_page_pool uc_pool_dma32;
125 struct ttm_page_pool wc_pool_huge;
126 struct ttm_page_pool uc_pool_huge;
125 } ; 127 } ;
126 }; 128 };
127}; 129};
@@ -256,8 +258,8 @@ static int set_pages_array_uc(struct page **pages, int addrinarray)
256 258
257/** 259/**
258 * Select the right pool or requested caching state and ttm flags. */ 260 * Select the right pool or requested caching state and ttm flags. */
259static struct ttm_page_pool *ttm_get_pool(int flags, 261static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
260 enum ttm_caching_state cstate) 262 enum ttm_caching_state cstate)
261{ 263{
262 int pool_index; 264 int pool_index;
263 265
@@ -269,9 +271,15 @@ static struct ttm_page_pool *ttm_get_pool(int flags,
269 else 271 else
270 pool_index = 0x1; 272 pool_index = 0x1;
271 273
272 if (flags & TTM_PAGE_FLAG_DMA32) 274 if (flags & TTM_PAGE_FLAG_DMA32) {
275 if (huge)
276 return NULL;
273 pool_index |= 0x2; 277 pool_index |= 0x2;
274 278
279 } else if (huge) {
280 pool_index |= 0x4;
281 }
282
275 return &_manager->pools[pool_index]; 283 return &_manager->pools[pool_index];
276} 284}
277 285
@@ -494,12 +502,14 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
494 * pages returned in pages array. 502 * pages returned in pages array.
495 */ 503 */
496static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, 504static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
497 int ttm_flags, enum ttm_caching_state cstate, unsigned count) 505 int ttm_flags, enum ttm_caching_state cstate,
506 unsigned count, unsigned order)
498{ 507{
499 struct page **caching_array; 508 struct page **caching_array;
500 struct page *p; 509 struct page *p;
501 int r = 0; 510 int r = 0;
502 unsigned i, cpages; 511 unsigned i, j, cpages;
512 unsigned npages = 1 << order;
503 unsigned max_cpages = min(count, 513 unsigned max_cpages = min(count,
504 (unsigned)(PAGE_SIZE/sizeof(struct page *))); 514 (unsigned)(PAGE_SIZE/sizeof(struct page *)));
505 515
@@ -512,7 +522,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
512 } 522 }
513 523
514 for (i = 0, cpages = 0; i < count; ++i) { 524 for (i = 0, cpages = 0; i < count; ++i) {
515 p = alloc_page(gfp_flags); 525 p = alloc_pages(gfp_flags, order);
516 526
517 if (!p) { 527 if (!p) {
518 pr_err("Unable to get page %u\n", i); 528 pr_err("Unable to get page %u\n", i);
@@ -531,14 +541,18 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
531 goto out; 541 goto out;
532 } 542 }
533 543
544 list_add(&p->lru, pages);
545
534#ifdef CONFIG_HIGHMEM 546#ifdef CONFIG_HIGHMEM
535 /* gfp flags of highmem page should never be dma32 so we 547 /* gfp flags of highmem page should never be dma32 so we
536 * we should be fine in such case 548 * we should be fine in such case
537 */ 549 */
538 if (!PageHighMem(p)) 550 if (PageHighMem(p))
551 continue;
552
539#endif 553#endif
540 { 554 for (j = 0; j < npages; ++j) {
541 caching_array[cpages++] = p; 555 caching_array[cpages++] = p++;
542 if (cpages == max_cpages) { 556 if (cpages == max_cpages) {
543 557
544 r = ttm_set_pages_caching(caching_array, 558 r = ttm_set_pages_caching(caching_array,
@@ -552,8 +566,6 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
552 cpages = 0; 566 cpages = 0;
553 } 567 }
554 } 568 }
555
556 list_add(&p->lru, pages);
557 } 569 }
558 570
559 if (cpages) { 571 if (cpages) {
@@ -573,9 +585,9 @@ out:
573 * Fill the given pool if there aren't enough pages and the requested number of 585 * Fill the given pool if there aren't enough pages and the requested number of
574 * pages is small. 586 * pages is small.
575 */ 587 */
576static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, 588static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
577 int ttm_flags, enum ttm_caching_state cstate, unsigned count, 589 enum ttm_caching_state cstate,
578 unsigned long *irq_flags) 590 unsigned count, unsigned long *irq_flags)
579{ 591{
580 struct page *p; 592 struct page *p;
581 int r; 593 int r;
@@ -605,7 +617,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
605 617
606 INIT_LIST_HEAD(&new_pages); 618 INIT_LIST_HEAD(&new_pages);
607 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags, 619 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
608 cstate, alloc_size); 620 cstate, alloc_size, 0);
609 spin_lock_irqsave(&pool->lock, *irq_flags); 621 spin_lock_irqsave(&pool->lock, *irq_flags);
610 622
611 if (!r) { 623 if (!r) {
@@ -627,22 +639,25 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
627} 639}
628 640
629/** 641/**
630 * Cut 'count' number of pages from the pool and put them on the return list. 642 * Allocate pages from the pool and put them on the return list.
631 * 643 *
632 * @return count of pages still required to fulfill the request. 644 * @return zero for success or negative error code.
633 */ 645 */
634static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, 646static int ttm_page_pool_get_pages(struct ttm_page_pool *pool,
635 struct list_head *pages, 647 struct list_head *pages,
636 int ttm_flags, 648 int ttm_flags,
637 enum ttm_caching_state cstate, 649 enum ttm_caching_state cstate,
638 unsigned count) 650 unsigned count, unsigned order)
639{ 651{
640 unsigned long irq_flags; 652 unsigned long irq_flags;
641 struct list_head *p; 653 struct list_head *p;
642 unsigned i; 654 unsigned i;
655 int r = 0;
643 656
644 spin_lock_irqsave(&pool->lock, irq_flags); 657 spin_lock_irqsave(&pool->lock, irq_flags);
645 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags); 658 if (!order)
659 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count,
660 &irq_flags);
646 661
647 if (count >= pool->npages) { 662 if (count >= pool->npages) {
648 /* take all pages from the pool */ 663 /* take all pages from the pool */
@@ -672,32 +687,126 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
672 count = 0; 687 count = 0;
673out: 688out:
674 spin_unlock_irqrestore(&pool->lock, irq_flags); 689 spin_unlock_irqrestore(&pool->lock, irq_flags);
675 return count; 690
691 /* clear the pages coming from the pool if requested */
692 if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
693 struct page *page;
694
695 list_for_each_entry(page, pages, lru) {
696 if (PageHighMem(page))
697 clear_highpage(page);
698 else
699 clear_page(page_address(page));
700 }
701 }
702
703 /* If pool didn't have enough pages allocate new one. */
704 if (count) {
705 gfp_t gfp_flags = pool->gfp_flags;
706
707 /* set zero flag for page allocation if required */
708 if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
709 gfp_flags |= __GFP_ZERO;
710
711 /* ttm_alloc_new_pages doesn't reference pool so we can run
712 * multiple requests in parallel.
713 **/
714 r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate,
715 count, order);
716 }
717
718 return r;
676} 719}
677 720
678/* Put all pages in pages list to correct pool to wait for reuse */ 721/* Put all pages in pages list to correct pool to wait for reuse */
679static void ttm_put_pages(struct page **pages, unsigned npages, int flags, 722static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
680 enum ttm_caching_state cstate) 723 enum ttm_caching_state cstate)
681{ 724{
725 struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
726#ifdef CONFIG_TRANSPARENT_HUGEPAGE
727 struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
728#endif
682 unsigned long irq_flags; 729 unsigned long irq_flags;
683 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
684 unsigned i; 730 unsigned i;
685 731
686 if (pool == NULL) { 732 if (pool == NULL) {
687 /* No pool for this memory type so free the pages */ 733 /* No pool for this memory type so free the pages */
688 for (i = 0; i < npages; i++) { 734 i = 0;
689 if (pages[i]) { 735 while (i < npages) {
690 if (page_count(pages[i]) != 1) 736#ifdef CONFIG_TRANSPARENT_HUGEPAGE
691 pr_err("Erroneous page count. Leaking pages.\n"); 737 struct page *p = pages[i];
692 __free_page(pages[i]); 738#endif
693 pages[i] = NULL; 739 unsigned order = 0, j;
740
741 if (!pages[i]) {
742 ++i;
743 continue;
744 }
745
746#ifdef CONFIG_TRANSPARENT_HUGEPAGE
747 for (j = 0; j < HPAGE_PMD_NR; ++j)
748 if (p++ != pages[i + j])
749 break;
750
751 if (j == HPAGE_PMD_NR)
752 order = HPAGE_PMD_ORDER;
753#endif
754
755 if (page_count(pages[i]) != 1)
756 pr_err("Erroneous page count. Leaking pages.\n");
757 __free_pages(pages[i], order);
758
759 j = 1 << order;
760 while (j) {
761 pages[i++] = NULL;
762 --j;
694 } 763 }
695 } 764 }
696 return; 765 return;
697 } 766 }
698 767
768 i = 0;
769#ifdef CONFIG_TRANSPARENT_HUGEPAGE
770 if (huge) {
771 unsigned max_size, n2free;
772
773 spin_lock_irqsave(&huge->lock, irq_flags);
774 while (i < npages) {
775 struct page *p = pages[i];
776 unsigned j;
777
778 if (!p)
779 break;
780
781 for (j = 0; j < HPAGE_PMD_NR; ++j)
782 if (p++ != pages[i + j])
783 break;
784
785 if (j != HPAGE_PMD_NR)
786 break;
787
788 list_add_tail(&pages[i]->lru, &huge->list);
789
790 for (j = 0; j < HPAGE_PMD_NR; ++j)
791 pages[i++] = NULL;
792 huge->npages++;
793 }
794
795 /* Check that we don't go over the pool limit */
796 max_size = _manager->options.max_size;
797 max_size /= HPAGE_PMD_NR;
798 if (huge->npages > max_size)
799 n2free = huge->npages - max_size;
800 else
801 n2free = 0;
802 spin_unlock_irqrestore(&huge->lock, irq_flags);
803 if (n2free)
804 ttm_page_pool_free(huge, n2free, false);
805 }
806#endif
807
699 spin_lock_irqsave(&pool->lock, irq_flags); 808 spin_lock_irqsave(&pool->lock, irq_flags);
700 for (i = 0; i < npages; i++) { 809 while (i < npages) {
701 if (pages[i]) { 810 if (pages[i]) {
702 if (page_count(pages[i]) != 1) 811 if (page_count(pages[i]) != 1)
703 pr_err("Erroneous page count. Leaking pages.\n"); 812 pr_err("Erroneous page count. Leaking pages.\n");
@@ -705,6 +814,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
705 pages[i] = NULL; 814 pages[i] = NULL;
706 pool->npages++; 815 pool->npages++;
707 } 816 }
817 ++i;
708 } 818 }
709 /* Check that we don't go over the pool limit */ 819 /* Check that we don't go over the pool limit */
710 npages = 0; 820 npages = 0;
@@ -727,25 +837,52 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
727static int ttm_get_pages(struct page **pages, unsigned npages, int flags, 837static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
728 enum ttm_caching_state cstate) 838 enum ttm_caching_state cstate)
729{ 839{
730 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 840 struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
841#ifdef CONFIG_TRANSPARENT_HUGEPAGE
842 struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
843#endif
731 struct list_head plist; 844 struct list_head plist;
732 struct page *p = NULL; 845 struct page *p = NULL;
733 gfp_t gfp_flags = GFP_USER;
734 unsigned count; 846 unsigned count;
735 int r; 847 int r;
736 848
737 /* set zero flag for page allocation if required */
738 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
739 gfp_flags |= __GFP_ZERO;
740
741 /* No pool for cached pages */ 849 /* No pool for cached pages */
742 if (pool == NULL) { 850 if (pool == NULL) {
851 gfp_t gfp_flags = GFP_USER;
852 unsigned i;
853#ifdef CONFIG_TRANSPARENT_HUGEPAGE
854 unsigned j;
855#endif
856
857 /* set zero flag for page allocation if required */
858 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
859 gfp_flags |= __GFP_ZERO;
860
743 if (flags & TTM_PAGE_FLAG_DMA32) 861 if (flags & TTM_PAGE_FLAG_DMA32)
744 gfp_flags |= GFP_DMA32; 862 gfp_flags |= GFP_DMA32;
745 else 863 else
746 gfp_flags |= GFP_HIGHUSER; 864 gfp_flags |= GFP_HIGHUSER;
747 865
748 for (r = 0; r < npages; ++r) { 866 i = 0;
867#ifdef CONFIG_TRANSPARENT_HUGEPAGE
868 while (npages >= HPAGE_PMD_NR) {
869 gfp_t huge_flags = gfp_flags;
870
871 huge_flags |= GFP_TRANSHUGE;
872 huge_flags &= ~__GFP_MOVABLE;
873 huge_flags &= ~__GFP_COMP;
874 p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
875 if (!p)
876 break;
877
878 for (j = 0; j < HPAGE_PMD_NR; ++j)
879 pages[i++] = p++;
880
881 npages -= HPAGE_PMD_NR;
882 }
883#endif
884
885 while (npages) {
749 p = alloc_page(gfp_flags); 886 p = alloc_page(gfp_flags);
750 if (!p) { 887 if (!p) {
751 888
@@ -753,49 +890,44 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
753 return -ENOMEM; 890 return -ENOMEM;
754 } 891 }
755 892
756 pages[r] = p; 893 pages[i++] = p;
894 --npages;
757 } 895 }
758 return 0; 896 return 0;
759 } 897 }
760 898
761 /* combine zero flag to pool flags */
762 gfp_flags |= pool->gfp_flags;
763
764 /* First we take pages from the pool */
765 INIT_LIST_HEAD(&plist);
766 npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
767 count = 0; 899 count = 0;
768 list_for_each_entry(p, &plist, lru) {
769 pages[count++] = p;
770 }
771 900
772 /* clear the pages coming from the pool if requested */ 901#ifdef CONFIG_TRANSPARENT_HUGEPAGE
773 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { 902 if (huge && npages >= HPAGE_PMD_NR) {
903 INIT_LIST_HEAD(&plist);
904 ttm_page_pool_get_pages(huge, &plist, flags, cstate,
905 npages / HPAGE_PMD_NR,
906 HPAGE_PMD_ORDER);
907
774 list_for_each_entry(p, &plist, lru) { 908 list_for_each_entry(p, &plist, lru) {
775 if (PageHighMem(p)) 909 unsigned j;
776 clear_highpage(p); 910
777 else 911 for (j = 0; j < HPAGE_PMD_NR; ++j)
778 clear_page(page_address(p)); 912 pages[count++] = &p[j];
779 } 913 }
780 } 914 }
915#endif
781 916
782 /* If pool didn't have enough pages allocate new one. */ 917 INIT_LIST_HEAD(&plist);
783 if (npages > 0) { 918 r = ttm_page_pool_get_pages(pool, &plist, flags, cstate,
784 /* ttm_alloc_new_pages doesn't reference pool so we can run 919 npages - count, 0);
785 * multiple requests in parallel. 920
786 **/ 921 list_for_each_entry(p, &plist, lru)
787 INIT_LIST_HEAD(&plist); 922 pages[count++] = p;
788 r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages); 923
789 list_for_each_entry(p, &plist, lru) { 924 if (r) {
790 pages[count++] = p; 925 /* If there is any pages in the list put them back to
791 } 926 * the pool.
792 if (r) { 927 */
793 /* If there is any pages in the list put them back to 928 pr_err("Failed to allocate extra pages for large request\n");
794 * the pool. */ 929 ttm_put_pages(pages, count, flags, cstate);
795 pr_err("Failed to allocate extra pages for large request\n"); 930 return r;
796 ttm_put_pages(pages, count, flags, cstate);
797 return r;
798 }
799 } 931 }
800 932
801 return 0; 933 return 0;
@@ -832,6 +964,14 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
832 ttm_page_pool_init_locked(&_manager->uc_pool_dma32, 964 ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
833 GFP_USER | GFP_DMA32, "uc dma"); 965 GFP_USER | GFP_DMA32, "uc dma");
834 966
967 ttm_page_pool_init_locked(&_manager->wc_pool_huge,
968 GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP),
969 "wc huge");
970
971 ttm_page_pool_init_locked(&_manager->uc_pool_huge,
972 GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP)
973 , "uc huge");
974
835 _manager->options.max_size = max_pages; 975 _manager->options.max_size = max_pages;
836 _manager->options.small = SMALL_ALLOCATION; 976 _manager->options.small = SMALL_ALLOCATION;
837 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; 977 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
@@ -873,15 +1013,14 @@ int ttm_pool_populate(struct ttm_tt *ttm)
873 if (ttm->state != tt_unpopulated) 1013 if (ttm->state != tt_unpopulated)
874 return 0; 1014 return 0;
875 1015
876 for (i = 0; i < ttm->num_pages; ++i) { 1016 ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
877 ret = ttm_get_pages(&ttm->pages[i], 1, 1017 ttm->caching_state);
878 ttm->page_flags, 1018 if (unlikely(ret != 0)) {
879 ttm->caching_state); 1019 ttm_pool_unpopulate(ttm);
880 if (ret != 0) { 1020 return ret;
881 ttm_pool_unpopulate(ttm); 1021 }
882 return -ENOMEM;
883 }
884 1022
1023 for (i = 0; i < ttm->num_pages; ++i) {
885 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], 1024 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
886 PAGE_SIZE); 1025 PAGE_SIZE);
887 if (unlikely(ret != 0)) { 1026 if (unlikely(ret != 0)) {
@@ -908,14 +1047,14 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
908 unsigned i; 1047 unsigned i;
909 1048
910 for (i = 0; i < ttm->num_pages; ++i) { 1049 for (i = 0; i < ttm->num_pages; ++i) {
911 if (ttm->pages[i]) { 1050 if (!ttm->pages[i])
912 ttm_mem_global_free_page(ttm->glob->mem_glob, 1051 continue;
913 ttm->pages[i], PAGE_SIZE); 1052
914 ttm_put_pages(&ttm->pages[i], 1, 1053 ttm_mem_global_free_page(ttm->glob->mem_glob, ttm->pages[i],
915 ttm->page_flags, 1054 PAGE_SIZE);
916 ttm->caching_state);
917 }
918 } 1055 }
1056 ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
1057 ttm->caching_state);
919 ttm->state = tt_unpopulated; 1058 ttm->state = tt_unpopulated;
920} 1059}
921EXPORT_SYMBOL(ttm_pool_unpopulate); 1060EXPORT_SYMBOL(ttm_pool_unpopulate);
@@ -923,16 +1062,26 @@ EXPORT_SYMBOL(ttm_pool_unpopulate);
923#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU) 1062#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
924int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt) 1063int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
925{ 1064{
926 unsigned i; 1065 unsigned i, j;
927 int r; 1066 int r;
928 1067
929 r = ttm_pool_populate(&tt->ttm); 1068 r = ttm_pool_populate(&tt->ttm);
930 if (r) 1069 if (r)
931 return r; 1070 return r;
932 1071
933 for (i = 0; i < tt->ttm.num_pages; i++) { 1072 for (i = 0; i < tt->ttm.num_pages; ++i) {
1073 struct page *p = tt->ttm.pages[i];
1074 size_t num_pages = 1;
1075
1076 for (j = i + 1; j < tt->ttm.num_pages; ++j) {
1077 if (++p != tt->ttm.pages[j])
1078 break;
1079
1080 ++num_pages;
1081 }
1082
934 tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i], 1083 tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i],
935 0, PAGE_SIZE, 1084 0, num_pages * PAGE_SIZE,
936 DMA_BIDIRECTIONAL); 1085 DMA_BIDIRECTIONAL);
937 if (dma_mapping_error(dev, tt->dma_address[i])) { 1086 if (dma_mapping_error(dev, tt->dma_address[i])) {
938 while (i--) { 1087 while (i--) {
@@ -943,6 +1092,11 @@ int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
943 ttm_pool_unpopulate(&tt->ttm); 1092 ttm_pool_unpopulate(&tt->ttm);
944 return -EFAULT; 1093 return -EFAULT;
945 } 1094 }
1095
1096 for (j = 1; j < num_pages; ++j) {
1097 tt->dma_address[i + 1] = tt->dma_address[i] + PAGE_SIZE;
1098 ++i;
1099 }
946 } 1100 }
947 return 0; 1101 return 0;
948} 1102}
@@ -950,13 +1104,28 @@ EXPORT_SYMBOL(ttm_populate_and_map_pages);
950 1104
951void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) 1105void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
952{ 1106{
953 unsigned i; 1107 unsigned i, j;
954 1108
955 for (i = 0; i < tt->ttm.num_pages; i++) { 1109 for (i = 0; i < tt->ttm.num_pages;) {
956 if (tt->dma_address[i]) { 1110 struct page *p = tt->ttm.pages[i];
957 dma_unmap_page(dev, tt->dma_address[i], 1111 size_t num_pages = 1;
958 PAGE_SIZE, DMA_BIDIRECTIONAL); 1112
1113 if (!tt->dma_address[i] || !tt->ttm.pages[i]) {
1114 ++i;
1115 continue;
959 } 1116 }
1117
1118 for (j = i + 1; j < tt->ttm.num_pages; ++j) {
1119 if (++p != tt->ttm.pages[j])
1120 break;
1121
1122 ++num_pages;
1123 }
1124
1125 dma_unmap_page(dev, tt->dma_address[i], num_pages * PAGE_SIZE,
1126 DMA_BIDIRECTIONAL);
1127
1128 i += num_pages;
960 } 1129 }
961 ttm_pool_unpopulate(&tt->ttm); 1130 ttm_pool_unpopulate(&tt->ttm);
962} 1131}
@@ -972,12 +1141,12 @@ int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
972 seq_printf(m, "No pool allocator running.\n"); 1141 seq_printf(m, "No pool allocator running.\n");
973 return 0; 1142 return 0;
974 } 1143 }
975 seq_printf(m, "%6s %12s %13s %8s\n", 1144 seq_printf(m, "%7s %12s %13s %8s\n",
976 h[0], h[1], h[2], h[3]); 1145 h[0], h[1], h[2], h[3]);
977 for (i = 0; i < NUM_POOLS; ++i) { 1146 for (i = 0; i < NUM_POOLS; ++i) {
978 p = &_manager->pools[i]; 1147 p = &_manager->pools[i];
979 1148
980 seq_printf(m, "%6s %12ld %13ld %8d\n", 1149 seq_printf(m, "%7s %12ld %13ld %8d\n",
981 p->name, p->nrefills, 1150 p->name, p->nrefills,
982 p->nfrees, p->npages); 1151 p->nfrees, p->npages);
983 } 1152 }
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index e5ef10d34748..96ad12906621 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -913,6 +913,7 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
913 if (huge) { 913 if (huge) {
914 gfp_flags |= GFP_TRANSHUGE; 914 gfp_flags |= GFP_TRANSHUGE;
915 gfp_flags &= ~__GFP_MOVABLE; 915 gfp_flags &= ~__GFP_MOVABLE;
916 gfp_flags &= ~__GFP_COMP;
916 } 917 }
917 918
918 return gfp_flags; 919 return gfp_flags;
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 4c6e8c482ee4..ff0181829f3d 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -53,6 +53,7 @@ extern "C" {
53#define DRM_AMDGPU_WAIT_FENCES 0x12 53#define DRM_AMDGPU_WAIT_FENCES 0x12
54#define DRM_AMDGPU_VM 0x13 54#define DRM_AMDGPU_VM 0x13
55#define DRM_AMDGPU_FENCE_TO_HANDLE 0x14 55#define DRM_AMDGPU_FENCE_TO_HANDLE 0x14
56#define DRM_AMDGPU_SCHED 0x15
56 57
57#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create) 58#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
58#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap) 59#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@@ -69,6 +70,7 @@ extern "C" {
69#define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences) 70#define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
70#define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm) 71#define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm)
71#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle) 72#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
73#define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
72 74
73#define AMDGPU_GEM_DOMAIN_CPU 0x1 75#define AMDGPU_GEM_DOMAIN_CPU 0x1
74#define AMDGPU_GEM_DOMAIN_GTT 0x2 76#define AMDGPU_GEM_DOMAIN_GTT 0x2
@@ -91,6 +93,8 @@ extern "C" {
91#define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5) 93#define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5)
92/* Flag that BO is always valid in this VM */ 94/* Flag that BO is always valid in this VM */
93#define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6) 95#define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6)
96/* Flag that BO sharing will be explicitly synchronized */
97#define AMDGPU_GEM_CREATE_EXPLICIT_SYNC (1 << 7)
94 98
95struct drm_amdgpu_gem_create_in { 99struct drm_amdgpu_gem_create_in {
96 /** the requested memory size */ 100 /** the requested memory size */
@@ -166,13 +170,22 @@ union drm_amdgpu_bo_list {
166/* unknown cause */ 170/* unknown cause */
167#define AMDGPU_CTX_UNKNOWN_RESET 3 171#define AMDGPU_CTX_UNKNOWN_RESET 3
168 172
173/* Context priority level */
174#define AMDGPU_CTX_PRIORITY_UNSET -2048
175#define AMDGPU_CTX_PRIORITY_VERY_LOW -1023
176#define AMDGPU_CTX_PRIORITY_LOW -512
177#define AMDGPU_CTX_PRIORITY_NORMAL 0
178/* Selecting a priority above NORMAL requires CAP_SYS_NICE or DRM_MASTER */
179#define AMDGPU_CTX_PRIORITY_HIGH 512
180#define AMDGPU_CTX_PRIORITY_VERY_HIGH 1023
181
169struct drm_amdgpu_ctx_in { 182struct drm_amdgpu_ctx_in {
170 /** AMDGPU_CTX_OP_* */ 183 /** AMDGPU_CTX_OP_* */
171 __u32 op; 184 __u32 op;
172 /** For future use, no flags defined so far */ 185 /** For future use, no flags defined so far */
173 __u32 flags; 186 __u32 flags;
174 __u32 ctx_id; 187 __u32 ctx_id;
175 __u32 _pad; 188 __s32 priority;
176}; 189};
177 190
178union drm_amdgpu_ctx_out { 191union drm_amdgpu_ctx_out {
@@ -216,6 +229,21 @@ union drm_amdgpu_vm {
216 struct drm_amdgpu_vm_out out; 229 struct drm_amdgpu_vm_out out;
217}; 230};
218 231
232/* sched ioctl */
233#define AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE 1
234
235struct drm_amdgpu_sched_in {
236 /* AMDGPU_SCHED_OP_* */
237 __u32 op;
238 __u32 fd;
239 __s32 priority;
240 __u32 flags;
241};
242
243union drm_amdgpu_sched {
244 struct drm_amdgpu_sched_in in;
245};
246
219/* 247/*
220 * This is not a reliable API and you should expect it to fail for any 248 * This is not a reliable API and you should expect it to fail for any
221 * number of reasons and have fallback path that do not use userptr to 249 * number of reasons and have fallback path that do not use userptr to
@@ -629,6 +657,7 @@ struct drm_amdgpu_cs_chunk_data {
629 #define AMDGPU_INFO_SENSOR_VDDGFX 0x7 657 #define AMDGPU_INFO_SENSOR_VDDGFX 0x7
630/* Number of VRAM page faults on CPU access. */ 658/* Number of VRAM page faults on CPU access. */
631#define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E 659#define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E
660#define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F
632 661
633#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0 662#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
634#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff 663#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff