aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c13
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.c1364
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.h53
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c11
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h14
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c4
-rw-r--r--sound/pci/hda/hda_intel.c4
40 files changed, 432 insertions, 1640 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index f44a83ab2bf4..c8b605f3dc05 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -890,6 +890,7 @@ struct amdgpu_gfx_funcs {
890 void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields); 890 void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields);
891 void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst); 891 void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst);
892 void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst); 892 void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst);
893 void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue);
893}; 894};
894 895
895struct amdgpu_ngg_buf { 896struct amdgpu_ngg_buf {
@@ -1378,6 +1379,7 @@ enum amd_hw_ip_block_type {
1378 ATHUB_HWIP, 1379 ATHUB_HWIP,
1379 NBIO_HWIP, 1380 NBIO_HWIP,
1380 MP0_HWIP, 1381 MP0_HWIP,
1382 MP1_HWIP,
1381 UVD_HWIP, 1383 UVD_HWIP,
1382 VCN_HWIP = UVD_HWIP, 1384 VCN_HWIP = UVD_HWIP,
1383 VCE_HWIP, 1385 VCE_HWIP,
@@ -1387,6 +1389,7 @@ enum amd_hw_ip_block_type {
1387 SMUIO_HWIP, 1389 SMUIO_HWIP,
1388 PWR_HWIP, 1390 PWR_HWIP,
1389 NBIF_HWIP, 1391 NBIF_HWIP,
1392 THM_HWIP,
1390 MAX_HWIP 1393 MAX_HWIP
1391}; 1394};
1392 1395
@@ -1812,6 +1815,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
1812#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) 1815#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
1813#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a)) 1816#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
1814#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i)) 1817#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
1818#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q))
1815 1819
1816/* Common functions */ 1820/* Common functions */
1817int amdgpu_device_gpu_recover(struct amdgpu_device *adev, 1821int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 369beb5041a2..448d69fe3756 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -64,16 +64,21 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
64 64
65#if defined(CONFIG_DEBUG_FS) 65#if defined(CONFIG_DEBUG_FS)
66 66
67static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, 67
68 size_t size, loff_t *pos) 68static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
69 char __user *buf, size_t size, loff_t *pos)
69{ 70{
70 struct amdgpu_device *adev = file_inode(f)->i_private; 71 struct amdgpu_device *adev = file_inode(f)->i_private;
71 ssize_t result = 0; 72 ssize_t result = 0;
72 int r; 73 int r;
73 bool pm_pg_lock, use_bank; 74 bool pm_pg_lock, use_bank, use_ring;
74 unsigned instance_bank, sh_bank, se_bank; 75 unsigned instance_bank, sh_bank, se_bank, me, pipe, queue;
75 76
76 if (size & 0x3 || *pos & 0x3) 77 pm_pg_lock = use_bank = use_ring = false;
78 instance_bank = sh_bank = se_bank = me = pipe = queue = 0;
79
80 if (size & 0x3 || *pos & 0x3 ||
81 ((*pos & (1ULL << 62)) && (*pos & (1ULL << 61))))
77 return -EINVAL; 82 return -EINVAL;
78 83
79 /* are we reading registers for which a PG lock is necessary? */ 84 /* are we reading registers for which a PG lock is necessary? */
@@ -91,8 +96,15 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
91 if (instance_bank == 0x3FF) 96 if (instance_bank == 0x3FF)
92 instance_bank = 0xFFFFFFFF; 97 instance_bank = 0xFFFFFFFF;
93 use_bank = 1; 98 use_bank = 1;
99 } else if (*pos & (1ULL << 61)) {
100
101 me = (*pos & GENMASK_ULL(33, 24)) >> 24;
102 pipe = (*pos & GENMASK_ULL(43, 34)) >> 34;
103 queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
104
105 use_ring = 1;
94 } else { 106 } else {
95 use_bank = 0; 107 use_bank = use_ring = 0;
96 } 108 }
97 109
98 *pos &= (1UL << 22) - 1; 110 *pos &= (1UL << 22) - 1;
@@ -104,6 +116,9 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
104 mutex_lock(&adev->grbm_idx_mutex); 116 mutex_lock(&adev->grbm_idx_mutex);
105 amdgpu_gfx_select_se_sh(adev, se_bank, 117 amdgpu_gfx_select_se_sh(adev, se_bank,
106 sh_bank, instance_bank); 118 sh_bank, instance_bank);
119 } else if (use_ring) {
120 mutex_lock(&adev->srbm_mutex);
121 amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue);
107 } 122 }
108 123
109 if (pm_pg_lock) 124 if (pm_pg_lock)
@@ -115,8 +130,14 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
115 if (*pos > adev->rmmio_size) 130 if (*pos > adev->rmmio_size)
116 goto end; 131 goto end;
117 132
118 value = RREG32(*pos >> 2); 133 if (read) {
119 r = put_user(value, (uint32_t *)buf); 134 value = RREG32(*pos >> 2);
135 r = put_user(value, (uint32_t *)buf);
136 } else {
137 r = get_user(value, (uint32_t *)buf);
138 if (!r)
139 WREG32(*pos >> 2, value);
140 }
120 if (r) { 141 if (r) {
121 result = r; 142 result = r;
122 goto end; 143 goto end;
@@ -132,6 +153,9 @@ end:
132 if (use_bank) { 153 if (use_bank) {
133 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 154 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
134 mutex_unlock(&adev->grbm_idx_mutex); 155 mutex_unlock(&adev->grbm_idx_mutex);
156 } else if (use_ring) {
157 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0);
158 mutex_unlock(&adev->srbm_mutex);
135 } 159 }
136 160
137 if (pm_pg_lock) 161 if (pm_pg_lock)
@@ -140,78 +164,17 @@ end:
140 return result; 164 return result;
141} 165}
142 166
167
168static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
169 size_t size, loff_t *pos)
170{
171 return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos);
172}
173
143static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, 174static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
144 size_t size, loff_t *pos) 175 size_t size, loff_t *pos)
145{ 176{
146 struct amdgpu_device *adev = file_inode(f)->i_private; 177 return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
147 ssize_t result = 0;
148 int r;
149 bool pm_pg_lock, use_bank;
150 unsigned instance_bank, sh_bank, se_bank;
151
152 if (size & 0x3 || *pos & 0x3)
153 return -EINVAL;
154
155 /* are we reading registers for which a PG lock is necessary? */
156 pm_pg_lock = (*pos >> 23) & 1;
157
158 if (*pos & (1ULL << 62)) {
159 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
160 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
161 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
162
163 if (se_bank == 0x3FF)
164 se_bank = 0xFFFFFFFF;
165 if (sh_bank == 0x3FF)
166 sh_bank = 0xFFFFFFFF;
167 if (instance_bank == 0x3FF)
168 instance_bank = 0xFFFFFFFF;
169 use_bank = 1;
170 } else {
171 use_bank = 0;
172 }
173
174 *pos &= (1UL << 22) - 1;
175
176 if (use_bank) {
177 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
178 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
179 return -EINVAL;
180 mutex_lock(&adev->grbm_idx_mutex);
181 amdgpu_gfx_select_se_sh(adev, se_bank,
182 sh_bank, instance_bank);
183 }
184
185 if (pm_pg_lock)
186 mutex_lock(&adev->pm.mutex);
187
188 while (size) {
189 uint32_t value;
190
191 if (*pos > adev->rmmio_size)
192 return result;
193
194 r = get_user(value, (uint32_t *)buf);
195 if (r)
196 return r;
197
198 WREG32(*pos >> 2, value);
199
200 result += 4;
201 buf += 4;
202 *pos += 4;
203 size -= 4;
204 }
205
206 if (use_bank) {
207 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
208 mutex_unlock(&adev->grbm_idx_mutex);
209 }
210
211 if (pm_pg_lock)
212 mutex_unlock(&adev->pm.mutex);
213
214 return result;
215} 178}
216 179
217static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, 180static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 7379aa5a6849..0b19482b36b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -922,6 +922,11 @@ static int __init amdgpu_init(void)
922{ 922{
923 int r; 923 int r;
924 924
925 if (vgacon_text_force()) {
926 DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
927 return -EINVAL;
928 }
929
925 r = amdgpu_sync_init(); 930 r = amdgpu_sync_init();
926 if (r) 931 if (r)
927 goto error_sync; 932 goto error_sync;
@@ -930,10 +935,6 @@ static int __init amdgpu_init(void)
930 if (r) 935 if (r)
931 goto error_fence; 936 goto error_fence;
932 937
933 if (vgacon_text_force()) {
934 DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
935 return -EINVAL;
936 }
937 DRM_INFO("amdgpu kernel modesetting enabled.\n"); 938 DRM_INFO("amdgpu kernel modesetting enabled.\n");
938 driver = &kms_driver; 939 driver = &kms_driver;
939 pdriver = &amdgpu_kms_pci_driver; 940 pdriver = &amdgpu_kms_pci_driver;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 455a81e4c246..97449e06a242 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -410,6 +410,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
410int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, 410int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
411 unsigned num_hw_submission) 411 unsigned num_hw_submission)
412{ 412{
413 long timeout;
413 int r; 414 int r;
414 415
415 /* Check that num_hw_submission is a power of two */ 416 /* Check that num_hw_submission is a power of two */
@@ -433,11 +434,16 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
433 434
434 /* No need to setup the GPU scheduler for KIQ ring */ 435 /* No need to setup the GPU scheduler for KIQ ring */
435 if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) { 436 if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
437 /* for non-sriov case, no timeout enforce on compute ring */
438 if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
439 && !amdgpu_sriov_vf(ring->adev))
440 timeout = MAX_SCHEDULE_TIMEOUT;
441 else
442 timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
443
436 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, 444 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
437 num_hw_submission, amdgpu_job_hang_limit, 445 num_hw_submission, amdgpu_job_hang_limit,
438 (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) ? 446 timeout, ring->name);
439 MAX_SCHEDULE_TIMEOUT : msecs_to_jiffies(amdgpu_lockup_timeout),
440 ring->name);
441 if (r) { 447 if (r) {
442 DRM_ERROR("Failed to create scheduler on ring %s.\n", 448 DRM_ERROR("Failed to create scheduler on ring %s.\n",
443 ring->name); 449 ring->name);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 28c2706e48d7..46b9ea4e6103 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -56,11 +56,23 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
56 alignment = PAGE_SIZE; 56 alignment = PAGE_SIZE;
57 } 57 }
58 58
59retry:
59 r = amdgpu_bo_create(adev, size, alignment, initial_domain, 60 r = amdgpu_bo_create(adev, size, alignment, initial_domain,
60 flags, type, resv, &bo); 61 flags, type, resv, &bo);
61 if (r) { 62 if (r) {
62 DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n", 63 if (r != -ERESTARTSYS) {
63 size, initial_domain, alignment, r); 64 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
65 flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
66 goto retry;
67 }
68
69 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
70 initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
71 goto retry;
72 }
73 DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
74 size, initial_domain, alignment, r);
75 }
64 return r; 76 return r;
65 } 77 }
66 *obj = &bo->gem_base; 78 *obj = &bo->gem_base;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index fac4b6067efd..6d08cde8443c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -356,7 +356,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
356 struct amdgpu_bo *bo; 356 struct amdgpu_bo *bo;
357 unsigned long page_align; 357 unsigned long page_align;
358 size_t acc_size; 358 size_t acc_size;
359 u32 domains;
360 int r; 359 int r;
361 360
362 page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; 361 page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
@@ -418,23 +417,12 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
418#endif 417#endif
419 418
420 bo->tbo.bdev = &adev->mman.bdev; 419 bo->tbo.bdev = &adev->mman.bdev;
421 domains = bo->preferred_domains; 420 amdgpu_ttm_placement_from_domain(bo, domain);
422retry: 421
423 amdgpu_ttm_placement_from_domain(bo, domains);
424 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, 422 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
425 &bo->placement, page_align, &ctx, acc_size, 423 &bo->placement, page_align, &ctx, acc_size,
426 NULL, resv, &amdgpu_ttm_bo_destroy); 424 NULL, resv, &amdgpu_ttm_bo_destroy);
427 425 if (unlikely(r != 0))
428 if (unlikely(r && r != -ERESTARTSYS)) {
429 if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
430 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
431 goto retry;
432 } else if (domains != bo->preferred_domains) {
433 domains = bo->allowed_domains;
434 goto retry;
435 }
436 }
437 if (unlikely(r))
438 return r; 426 return r;
439 427
440 if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size && 428 if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 19e71f4a8ac2..c7d43e064fc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -505,6 +505,9 @@ failed:
505 505
506int psp_gpu_reset(struct amdgpu_device *adev) 506int psp_gpu_reset(struct amdgpu_device *adev)
507{ 507{
508 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
509 return 0;
510
508 return psp_mode1_reset(&adev->psp); 511 return psp_mode1_reset(&adev->psp);
509} 512}
510 513
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index f48ea0dad875..a7576255cc30 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -859,7 +859,7 @@ static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
859 amdgpu_ring_write(ring, addr & 0xfffffffc); 859 amdgpu_ring_write(ring, addr & 0xfffffffc);
860 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 860 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
861 amdgpu_ring_write(ring, seq); /* reference */ 861 amdgpu_ring_write(ring, seq); /* reference */
862 amdgpu_ring_write(ring, 0xfffffff); /* mask */ 862 amdgpu_ring_write(ring, 0xffffffff); /* mask */
863 amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */ 863 amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */
864} 864}
865 865
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 0fff5b8cd318..cd6bf291a853 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -3061,11 +3061,18 @@ static void gfx_v6_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
3061 start + SQIND_WAVE_SGPRS_OFFSET, size, dst); 3061 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
3062} 3062}
3063 3063
3064static void gfx_v6_0_select_me_pipe_q(struct amdgpu_device *adev,
3065 u32 me, u32 pipe, u32 q)
3066{
3067 DRM_INFO("Not implemented\n");
3068}
3069
3064static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = { 3070static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
3065 .get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter, 3071 .get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter,
3066 .select_se_sh = &gfx_v6_0_select_se_sh, 3072 .select_se_sh = &gfx_v6_0_select_se_sh,
3067 .read_wave_data = &gfx_v6_0_read_wave_data, 3073 .read_wave_data = &gfx_v6_0_read_wave_data,
3068 .read_wave_sgprs = &gfx_v6_0_read_wave_sgprs, 3074 .read_wave_sgprs = &gfx_v6_0_read_wave_sgprs,
3075 .select_me_pipe_q = &gfx_v6_0_select_me_pipe_q
3069}; 3076};
3070 3077
3071static int gfx_v6_0_early_init(void *handle) 3078static int gfx_v6_0_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index e13d9d83767b..42b6144c1fd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4270,11 +4270,18 @@ static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
4270 start + SQIND_WAVE_SGPRS_OFFSET, size, dst); 4270 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
4271} 4271}
4272 4272
4273static void gfx_v7_0_select_me_pipe_q(struct amdgpu_device *adev,
4274 u32 me, u32 pipe, u32 q)
4275{
4276 cik_srbm_select(adev, me, pipe, q, 0);
4277}
4278
4273static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = { 4279static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
4274 .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, 4280 .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
4275 .select_se_sh = &gfx_v7_0_select_se_sh, 4281 .select_se_sh = &gfx_v7_0_select_se_sh,
4276 .read_wave_data = &gfx_v7_0_read_wave_data, 4282 .read_wave_data = &gfx_v7_0_read_wave_data,
4277 .read_wave_sgprs = &gfx_v7_0_read_wave_sgprs, 4283 .read_wave_sgprs = &gfx_v7_0_read_wave_sgprs,
4284 .select_me_pipe_q = &gfx_v7_0_select_me_pipe_q
4278}; 4285};
4279 4286
4280static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { 4287static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 27943e57681c..b0e591eaa71a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -3475,6 +3475,12 @@ static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
3475 WREG32(mmGRBM_GFX_INDEX, data); 3475 WREG32(mmGRBM_GFX_INDEX, data);
3476} 3476}
3477 3477
3478static void gfx_v8_0_select_me_pipe_q(struct amdgpu_device *adev,
3479 u32 me, u32 pipe, u32 q)
3480{
3481 vi_srbm_select(adev, me, pipe, q, 0);
3482}
3483
3478static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev) 3484static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
3479{ 3485{
3480 u32 data, mask; 3486 u32 data, mask;
@@ -5442,6 +5448,7 @@ static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
5442 .select_se_sh = &gfx_v8_0_select_se_sh, 5448 .select_se_sh = &gfx_v8_0_select_se_sh,
5443 .read_wave_data = &gfx_v8_0_read_wave_data, 5449 .read_wave_data = &gfx_v8_0_read_wave_data,
5444 .read_wave_sgprs = &gfx_v8_0_read_wave_sgprs, 5450 .read_wave_sgprs = &gfx_v8_0_read_wave_sgprs,
5451 .select_me_pipe_q = &gfx_v8_0_select_me_pipe_q
5445}; 5452};
5446 5453
5447static int gfx_v8_0_early_init(void *handle) 5454static int gfx_v8_0_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 1ae3de1094f9..9d39fd5b1822 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -998,12 +998,19 @@ static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
998 start + SQIND_WAVE_VGPRS_OFFSET, size, dst); 998 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
999} 999}
1000 1000
1001static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
1002 u32 me, u32 pipe, u32 q)
1003{
1004 soc15_grbm_select(adev, me, pipe, q, 0);
1005}
1006
1001static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = { 1007static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
1002 .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter, 1008 .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
1003 .select_se_sh = &gfx_v9_0_select_se_sh, 1009 .select_se_sh = &gfx_v9_0_select_se_sh,
1004 .read_wave_data = &gfx_v9_0_read_wave_data, 1010 .read_wave_data = &gfx_v9_0_read_wave_data,
1005 .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs, 1011 .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
1006 .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs, 1012 .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
1013 .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q
1007}; 1014};
1008 1015
1009static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) 1016static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
@@ -2757,6 +2764,45 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
2757 return 0; 2764 return 0;
2758} 2765}
2759 2766
2767static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
2768{
2769 struct amdgpu_device *adev = ring->adev;
2770 int j;
2771
2772 /* disable the queue if it's active */
2773 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
2774
2775 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
2776
2777 for (j = 0; j < adev->usec_timeout; j++) {
2778 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
2779 break;
2780 udelay(1);
2781 }
2782
2783 if (j == AMDGPU_MAX_USEC_TIMEOUT) {
2784 DRM_DEBUG("KIQ dequeue request failed.\n");
2785
2786 /* Manual disable if dequeue request times out */
2787 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 0);
2788 }
2789
2790 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
2791 0);
2792 }
2793
2794 WREG32_SOC15(GC, 0, mmCP_HQD_IQ_TIMER, 0);
2795 WREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL, 0);
2796 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
2797 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
2798 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
2799 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, 0);
2800 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
2801 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
2802
2803 return 0;
2804}
2805
2760static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring) 2806static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
2761{ 2807{
2762 struct amdgpu_device *adev = ring->adev; 2808 struct amdgpu_device *adev = ring->adev;
@@ -3010,7 +3056,6 @@ static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring
3010 return r; 3056 return r;
3011} 3057}
3012 3058
3013
3014static int gfx_v9_0_hw_fini(void *handle) 3059static int gfx_v9_0_hw_fini(void *handle)
3015{ 3060{
3016 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3061 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -3033,6 +3078,20 @@ static int gfx_v9_0_hw_fini(void *handle)
3033 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); 3078 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3034 return 0; 3079 return 0;
3035 } 3080 }
3081
3082 /* Use deinitialize sequence from CAIL when unbinding device from driver,
3083 * otherwise KIQ is hanging when binding back
3084 */
3085 if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
3086 mutex_lock(&adev->srbm_mutex);
3087 soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
3088 adev->gfx.kiq.ring.pipe,
3089 adev->gfx.kiq.ring.queue, 0);
3090 gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
3091 soc15_grbm_select(adev, 0, 0, 0, 0);
3092 mutex_unlock(&adev->srbm_mutex);
3093 }
3094
3036 gfx_v9_0_cp_enable(adev, false); 3095 gfx_v9_0_cp_enable(adev, false);
3037 gfx_v9_0_rlc_stop(adev); 3096 gfx_v9_0_rlc_stop(adev);
3038 3097
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 6452101c7aab..c7190c39c4f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -837,7 +837,7 @@ static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
837 amdgpu_ring_write(ring, addr & 0xfffffffc); 837 amdgpu_ring_write(ring, addr & 0xfffffffc);
838 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 838 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
839 amdgpu_ring_write(ring, seq); /* reference */ 839 amdgpu_ring_write(ring, seq); /* reference */
840 amdgpu_ring_write(ring, 0xfffffff); /* mask */ 840 amdgpu_ring_write(ring, 0xffffffff); /* mask */
841 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 841 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
842 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */ 842 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
843} 843}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index ecaef084dab1..be20a387d961 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1105,7 +1105,7 @@ static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1105 amdgpu_ring_write(ring, addr & 0xfffffffc); 1105 amdgpu_ring_write(ring, addr & 0xfffffffc);
1106 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 1106 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1107 amdgpu_ring_write(ring, seq); /* reference */ 1107 amdgpu_ring_write(ring, seq); /* reference */
1108 amdgpu_ring_write(ring, 0xfffffff); /* mask */ 1108 amdgpu_ring_write(ring, 0xffffffff); /* mask */
1109 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 1109 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1110 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */ 1110 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1111} 1111}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 2a8184082cd1..399f876f9cad 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1121,7 +1121,7 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1121 amdgpu_ring_write(ring, addr & 0xfffffffc); 1121 amdgpu_ring_write(ring, addr & 0xfffffffc);
1122 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 1122 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1123 amdgpu_ring_write(ring, seq); /* reference */ 1123 amdgpu_ring_write(ring, seq); /* reference */
1124 amdgpu_ring_write(ring, 0xfffffff); /* mask */ 1124 amdgpu_ring_write(ring, 0xffffffff); /* mask */
1125 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 1125 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1126 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */ 1126 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1127} 1127}
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index b154667a8fd9..a675ec6d2811 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1252,6 +1252,71 @@ static void si_invalidate_hdp(struct amdgpu_device *adev,
1252 } 1252 }
1253} 1253}
1254 1254
1255static int si_get_pcie_lanes(struct amdgpu_device *adev)
1256{
1257 u32 link_width_cntl;
1258
1259 if (adev->flags & AMD_IS_APU)
1260 return 0;
1261
1262 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
1263
1264 switch ((link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT) {
1265 case LC_LINK_WIDTH_X1:
1266 return 1;
1267 case LC_LINK_WIDTH_X2:
1268 return 2;
1269 case LC_LINK_WIDTH_X4:
1270 return 4;
1271 case LC_LINK_WIDTH_X8:
1272 return 8;
1273 case LC_LINK_WIDTH_X0:
1274 case LC_LINK_WIDTH_X16:
1275 default:
1276 return 16;
1277 }
1278}
1279
1280static void si_set_pcie_lanes(struct amdgpu_device *adev, int lanes)
1281{
1282 u32 link_width_cntl, mask;
1283
1284 if (adev->flags & AMD_IS_APU)
1285 return;
1286
1287 switch (lanes) {
1288 case 0:
1289 mask = LC_LINK_WIDTH_X0;
1290 break;
1291 case 1:
1292 mask = LC_LINK_WIDTH_X1;
1293 break;
1294 case 2:
1295 mask = LC_LINK_WIDTH_X2;
1296 break;
1297 case 4:
1298 mask = LC_LINK_WIDTH_X4;
1299 break;
1300 case 8:
1301 mask = LC_LINK_WIDTH_X8;
1302 break;
1303 case 16:
1304 mask = LC_LINK_WIDTH_X16;
1305 break;
1306 default:
1307 DRM_ERROR("invalid pcie lane request: %d\n", lanes);
1308 return;
1309 }
1310
1311 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
1312 link_width_cntl &= ~LC_LINK_WIDTH_MASK;
1313 link_width_cntl |= mask << LC_LINK_WIDTH_SHIFT;
1314 link_width_cntl |= (LC_RECONFIG_NOW |
1315 LC_RECONFIG_ARC_MISSING_ESCAPE);
1316
1317 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1318}
1319
1255static const struct amdgpu_asic_funcs si_asic_funcs = 1320static const struct amdgpu_asic_funcs si_asic_funcs =
1256{ 1321{
1257 .read_disabled_bios = &si_read_disabled_bios, 1322 .read_disabled_bios = &si_read_disabled_bios,
@@ -1262,6 +1327,8 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
1262 .get_xclk = &si_get_xclk, 1327 .get_xclk = &si_get_xclk,
1263 .set_uvd_clocks = &si_set_uvd_clocks, 1328 .set_uvd_clocks = &si_set_uvd_clocks,
1264 .set_vce_clocks = NULL, 1329 .set_vce_clocks = NULL,
1330 .get_pcie_lanes = &si_get_pcie_lanes,
1331 .set_pcie_lanes = &si_set_pcie_lanes,
1265 .get_config_memsize = &si_get_config_memsize, 1332 .get_config_memsize = &si_get_config_memsize,
1266 .flush_hdp = &si_flush_hdp, 1333 .flush_hdp = &si_flush_hdp,
1267 .invalidate_hdp = &si_invalidate_hdp, 1334 .invalidate_hdp = &si_invalidate_hdp,
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 672eaffac0a5..797d505bf9ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -6372,9 +6372,9 @@ static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev,
6372{ 6372{
6373 u32 lane_width; 6373 u32 lane_width;
6374 u32 new_lane_width = 6374 u32 new_lane_width =
6375 (amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT; 6375 ((amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
6376 u32 current_lane_width = 6376 u32 current_lane_width =
6377 (amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT; 6377 ((amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
6378 6378
6379 if (new_lane_width != current_lane_width) { 6379 if (new_lane_width != current_lane_width) {
6380 amdgpu_set_pcie_lanes(adev, new_lane_width); 6380 amdgpu_set_pcie_lanes(adev, new_lane_width);
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
index 4c45db7f1157..45aafca7f315 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -38,6 +38,7 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
38 adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); 38 adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
39 adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); 39 adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
40 adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); 40 adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
41 adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
41 adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i])); 42 adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i]));
42 adev->reg_offset[VCE_HWIP][i] = (uint32_t *)(&(VCE_BASE.instance[i])); 43 adev->reg_offset[VCE_HWIP][i] = (uint32_t *)(&(VCE_BASE.instance[i]));
43 adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i])); 44 adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i]));
@@ -49,7 +50,7 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
49 adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); 50 adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
50 adev->reg_offset[PWR_HWIP][i] = (uint32_t *)(&(PWR_BASE.instance[i])); 51 adev->reg_offset[PWR_HWIP][i] = (uint32_t *)(&(PWR_BASE.instance[i]));
51 adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIF_BASE.instance[i])); 52 adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIF_BASE.instance[i]));
52 53 adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
53 } 54 }
54 return 0; 55 return 0;
55} 56}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index e42a28e3adc5..4e2f379ce217 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1403,6 +1403,28 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
1403 return ret; 1403 return ret;
1404} 1404}
1405 1405
1406
1407static void register_backlight_device(struct amdgpu_display_manager *dm,
1408 struct dc_link *link)
1409{
1410#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1411 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1412
1413 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
1414 link->type != dc_connection_none) {
1415 /* Event if registration failed, we should continue with
1416 * DM initialization because not having a backlight control
1417 * is better then a black screen.
1418 */
1419 amdgpu_dm_register_backlight_device(dm);
1420
1421 if (dm->backlight_dev)
1422 dm->backlight_link = link;
1423 }
1424#endif
1425}
1426
1427
1406/* In this architecture, the association 1428/* In this architecture, the association
1407 * connector -> encoder -> crtc 1429 * connector -> encoder -> crtc
1408 * id not really requried. The crtc and connector will hold the 1430 * id not really requried. The crtc and connector will hold the
@@ -1456,6 +1478,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1456 1478
1457 /* loops over all connectors on the board */ 1479 /* loops over all connectors on the board */
1458 for (i = 0; i < link_cnt; i++) { 1480 for (i = 0; i < link_cnt; i++) {
1481 struct dc_link *link = NULL;
1459 1482
1460 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) { 1483 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1461 DRM_ERROR( 1484 DRM_ERROR(
@@ -1482,9 +1505,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1482 goto fail; 1505 goto fail;
1483 } 1506 }
1484 1507
1485 if (dc_link_detect(dc_get_link_at_index(dm->dc, i), 1508 link = dc_get_link_at_index(dm->dc, i);
1486 DETECT_REASON_BOOT)) 1509
1510 if (dc_link_detect(link, DETECT_REASON_BOOT)) {
1487 amdgpu_dm_update_connector_after_detect(aconnector); 1511 amdgpu_dm_update_connector_after_detect(aconnector);
1512 register_backlight_device(dm, link);
1513 }
1514
1515
1488 } 1516 }
1489 1517
1490 /* Software is initialized. Now we can register interrupt handlers. */ 1518 /* Software is initialized. Now we can register interrupt handlers. */
@@ -2685,7 +2713,8 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
2685#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ 2713#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2686 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 2714 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2687 2715
2688 if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) { 2716 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2717 link->type != dc_connection_none) {
2689 amdgpu_dm_register_backlight_device(dm); 2718 amdgpu_dm_register_backlight_device(dm);
2690 2719
2691 if (dm->backlight_dev) { 2720 if (dm->backlight_dev) {
@@ -3561,6 +3590,7 @@ create_i2c(struct ddc_service *ddc_service,
3561 return i2c; 3590 return i2c;
3562} 3591}
3563 3592
3593
3564/* Note: this function assumes that dc_link_detect() was called for the 3594/* Note: this function assumes that dc_link_detect() was called for the
3565 * dc_link which will be represented by this aconnector. 3595 * dc_link which will be represented by this aconnector.
3566 */ 3596 */
@@ -3630,28 +3660,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
3630 || connector_type == DRM_MODE_CONNECTOR_eDP) 3660 || connector_type == DRM_MODE_CONNECTOR_eDP)
3631 amdgpu_dm_initialize_dp_connector(dm, aconnector); 3661 amdgpu_dm_initialize_dp_connector(dm, aconnector);
3632 3662
3633#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3634 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3635
3636 /* NOTE: this currently will create backlight device even if a panel
3637 * is not connected to the eDP/LVDS connector.
3638 *
3639 * This is less than ideal but we don't have sink information at this
3640 * stage since detection happens after. We can't do detection earlier
3641 * since MST detection needs connectors to be created first.
3642 */
3643 if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
3644 /* Event if registration failed, we should continue with
3645 * DM initialization because not having a backlight control
3646 * is better then a black screen.
3647 */
3648 amdgpu_dm_register_backlight_device(dm);
3649
3650 if (dm->backlight_dev)
3651 dm->backlight_link = link;
3652 }
3653#endif
3654
3655out_free: 3663out_free:
3656 if (res) { 3664 if (res) {
3657 kfree(i2c); 3665 kfree(i2c);
@@ -4840,33 +4848,6 @@ static int dm_update_planes_state(struct dc *dc,
4840 return ret; 4848 return ret;
4841} 4849}
4842 4850
4843static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
4844 struct drm_crtc *crtc)
4845{
4846 struct drm_plane *plane;
4847 struct drm_crtc_state *crtc_state;
4848
4849 WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
4850
4851 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
4852 struct drm_plane_state *plane_state =
4853 drm_atomic_get_plane_state(state, plane);
4854
4855 if (IS_ERR(plane_state))
4856 return -EDEADLK;
4857
4858 crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
4859 if (IS_ERR(crtc_state))
4860 return PTR_ERR(crtc_state);
4861
4862 if (crtc->primary == plane && crtc_state->active) {
4863 if (!plane_state->fb)
4864 return -EINVAL;
4865 }
4866 }
4867 return 0;
4868}
4869
4870static int amdgpu_dm_atomic_check(struct drm_device *dev, 4851static int amdgpu_dm_atomic_check(struct drm_device *dev,
4871 struct drm_atomic_state *state) 4852 struct drm_atomic_state *state)
4872{ 4853{
@@ -4890,10 +4871,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
4890 goto fail; 4871 goto fail;
4891 4872
4892 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 4873 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4893 ret = dm_atomic_check_plane_state_fb(state, crtc);
4894 if (ret)
4895 goto fail;
4896
4897 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && 4874 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
4898 !new_crtc_state->color_mgmt_changed) 4875 !new_crtc_state->color_mgmt_changed)
4899 continue; 4876 continue;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index eeb04471b2f5..6d1c4981a185 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1997,6 +1997,19 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
1997 return true; 1997 return true;
1998} 1998}
1999 1999
2000bool dc_link_set_abm_disable(const struct dc_link *link)
2001{
2002 struct dc *core_dc = link->ctx->dc;
2003 struct abm *abm = core_dc->res_pool->abm;
2004
2005 if ((abm == NULL) || (abm->funcs->set_backlight_level == NULL))
2006 return false;
2007
2008 abm->funcs->set_abm_immediate_disable(abm);
2009
2010 return true;
2011}
2012
2000bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait) 2013bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait)
2001{ 2014{
2002 struct dc *core_dc = link->ctx->dc; 2015 struct dc *core_dc = link->ctx->dc;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index fb4d9eafdc6e..dc34515ef01f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -132,6 +132,8 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
132bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level, 132bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
133 uint32_t frame_ramp, const struct dc_stream_state *stream); 133 uint32_t frame_ramp, const struct dc_stream_state *stream);
134 134
135bool dc_link_set_abm_disable(const struct dc_link *dc_link);
136
135bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait); 137bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);
136 138
137bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state); 139bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 444558ca6533..162f6a6c4208 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -735,6 +735,8 @@ static void dce110_stream_encoder_update_hdmi_info_packets(
735 if (info_frame->avi.valid) { 735 if (info_frame->avi.valid) {
736 const uint32_t *content = 736 const uint32_t *content =
737 (const uint32_t *) &info_frame->avi.sb[0]; 737 (const uint32_t *) &info_frame->avi.sb[0];
738 /*we need turn on clock before programming AFMT block*/
739 REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
738 740
739 REG_WRITE(AFMT_AVI_INFO0, content[0]); 741 REG_WRITE(AFMT_AVI_INFO0, content[0]);
740 742
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
index 775d3bf0bd39..9150d2694450 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -102,6 +102,43 @@ static uint32_t align_to_chunks_number_per_line(uint32_t pixels)
102 return 256 * ((pixels + 255) / 256); 102 return 256 * ((pixels + 255) / 256);
103} 103}
104 104
105static void reset_lb_on_vblank(struct dc_context *ctx)
106{
107 uint32_t value, frame_count;
108 uint32_t retry = 0;
109 uint32_t status_pos =
110 dm_read_reg(ctx, mmCRTC_STATUS_POSITION);
111
112
113 /* Only if CRTC is enabled and counter is moving we wait for one frame. */
114 if (status_pos != dm_read_reg(ctx, mmCRTC_STATUS_POSITION)) {
115 /* Resetting LB on VBlank */
116 value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL);
117 set_reg_field_value(value, 3, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL);
118 set_reg_field_value(value, 1, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2);
119 dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value);
120
121 frame_count = dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT);
122
123
124 for (retry = 100; retry > 0; retry--) {
125 if (frame_count != dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT))
126 break;
127 msleep(1);
128 }
129 if (!retry)
130 dm_error("Frame count did not increase for 100ms.\n");
131
132 /* Resetting LB on VBlank */
133 value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL);
134 set_reg_field_value(value, 2, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL);
135 set_reg_field_value(value, 0, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2);
136 dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value);
137
138 }
139
140}
141
105static void wait_for_fbc_state_changed( 142static void wait_for_fbc_state_changed(
106 struct dce110_compressor *cp110, 143 struct dce110_compressor *cp110,
107 bool enabled) 144 bool enabled)
@@ -232,19 +269,23 @@ void dce110_compressor_disable_fbc(struct compressor *compressor)
232{ 269{
233 struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor); 270 struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
234 271
235 if (compressor->options.bits.FBC_SUPPORT && 272 if (compressor->options.bits.FBC_SUPPORT) {
236 dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL)) { 273 if (dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
237 uint32_t reg_data; 274 uint32_t reg_data;
238 /* Turn off compression */ 275 /* Turn off compression */
239 reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL); 276 reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
240 set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN); 277 set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
241 dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data); 278 dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data);
242 279
243 /* Reset enum controller_id to undefined */ 280 /* Reset enum controller_id to undefined */
244 compressor->attached_inst = 0; 281 compressor->attached_inst = 0;
245 compressor->is_enabled = false; 282 compressor->is_enabled = false;
246 283
247 wait_for_fbc_state_changed(cp110, false); 284 wait_for_fbc_state_changed(cp110, false);
285 }
286
287 /* Sync line buffer - dce100/110 only*/
288 reset_lb_on_vblank(compressor->ctx);
248 } 289 }
249} 290}
250 291
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 30dd62f0f5fa..d0575999f172 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -453,10 +453,13 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
453 453
454 } else { 454 } else {
455 /* 10 segments 455 /* 10 segments
456 * segment is from 2^-10 to 2^0 456 * segment is from 2^-10 to 2^1
457 * We include an extra segment for range [2^0, 2^1). This is to
458 * ensure that colors with normalized values of 1 don't miss the
459 * LUT.
457 */ 460 */
458 region_start = -10; 461 region_start = -10;
459 region_end = 0; 462 region_end = 1;
460 463
461 seg_distr[0] = 4; 464 seg_distr[0] = 4;
462 seg_distr[1] = 4; 465 seg_distr[1] = 4;
@@ -468,7 +471,7 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
468 seg_distr[7] = 4; 471 seg_distr[7] = 4;
469 seg_distr[8] = 4; 472 seg_distr[8] = 4;
470 seg_distr[9] = 4; 473 seg_distr[9] = 4;
471 seg_distr[10] = -1; 474 seg_distr[10] = 0;
472 seg_distr[11] = -1; 475 seg_distr[11] = -1;
473 seg_distr[12] = -1; 476 seg_distr[12] = -1;
474 seg_distr[13] = -1; 477 seg_distr[13] = -1;
@@ -1016,8 +1019,10 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
1016 struct dc_stream_state *stream = pipe_ctx->stream; 1019 struct dc_stream_state *stream = pipe_ctx->stream;
1017 struct dc_link *link = stream->sink->link; 1020 struct dc_link *link = stream->sink->link;
1018 1021
1019 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) 1022 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1020 link->dc->hwss.edp_backlight_control(link, false); 1023 link->dc->hwss.edp_backlight_control(link, false);
1024 dc_link_set_abm_disable(link);
1025 }
1021 1026
1022 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 1027 if (dc_is_dp_signal(pipe_ctx->stream->signal))
1023 pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc); 1028 pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 3ae3da4e7c14..0f5ad54d3fd3 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1264,9 +1264,9 @@ struct atom_smc_dpm_info_v4_1
1264 uint8_t ledpin2; 1264 uint8_t ledpin2;
1265 uint8_t padding8_4; 1265 uint8_t padding8_4;
1266 1266
1267 uint8_t gfxclkspreadenabled; 1267 uint8_t pllgfxclkspreadenabled;
1268 uint8_t gfxclkspreadpercent; 1268 uint8_t pllgfxclkspreadpercent;
1269 uint16_t gfxclkspreadfreq; 1269 uint16_t pllgfxclkspreadfreq;
1270 1270
1271 uint8_t uclkspreadenabled; 1271 uint8_t uclkspreadenabled;
1272 uint8_t uclkspreadpercent; 1272 uint8_t uclkspreadpercent;
@@ -1276,7 +1276,11 @@ struct atom_smc_dpm_info_v4_1
1276 uint8_t socclkspreadpercent; 1276 uint8_t socclkspreadpercent;
1277 uint16_t socclkspreadfreq; 1277 uint16_t socclkspreadfreq;
1278 1278
1279 uint32_t boardreserved[3]; 1279 uint8_t acggfxclkspreadenabled;
1280 uint8_t acggfxclkspreadpercent;
1281 uint16_t acggfxclkspreadfreq;
1282
1283 uint32_t boardreserved[10];
1280}; 1284};
1281 1285
1282 1286
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index faf9c880e4f7..210fb3ecd213 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -32,7 +32,7 @@ HARDWARE_MGR = hwmgr.o processpptables.o \
32 vega10_processpptables.o vega10_hwmgr.o vega10_powertune.o \ 32 vega10_processpptables.o vega10_hwmgr.o vega10_powertune.o \
33 vega10_thermal.o smu10_hwmgr.o pp_psm.o\ 33 vega10_thermal.o smu10_hwmgr.o pp_psm.o\
34 vega12_processpptables.o vega12_hwmgr.o \ 34 vega12_processpptables.o vega12_hwmgr.o \
35 vega12_powertune.o vega12_thermal.o \ 35 vega12_thermal.o \
36 pp_overdriver.o smu_helper.o 36 pp_overdriver.o smu_helper.o
37 37
38AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR)) 38AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
index 55f9b30513ff..ad42caac033e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
@@ -616,9 +616,9 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
616 param->ledpin1 = info->ledpin1; 616 param->ledpin1 = info->ledpin1;
617 param->ledpin2 = info->ledpin2; 617 param->ledpin2 = info->ledpin2;
618 618
619 param->gfxclkspreadenabled = info->gfxclkspreadenabled; 619 param->pllgfxclkspreadenabled = info->pllgfxclkspreadenabled;
620 param->gfxclkspreadpercent = info->gfxclkspreadpercent; 620 param->pllgfxclkspreadpercent = info->pllgfxclkspreadpercent;
621 param->gfxclkspreadfreq = info->gfxclkspreadfreq; 621 param->pllgfxclkspreadfreq = info->pllgfxclkspreadfreq;
622 622
623 param->uclkspreadenabled = info->uclkspreadenabled; 623 param->uclkspreadenabled = info->uclkspreadenabled;
624 param->uclkspreadpercent = info->uclkspreadpercent; 624 param->uclkspreadpercent = info->uclkspreadpercent;
@@ -628,5 +628,9 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
628 param->socclkspreadpercent = info->socclkspreadpercent; 628 param->socclkspreadpercent = info->socclkspreadpercent;
629 param->socclkspreadfreq = info->socclkspreadfreq; 629 param->socclkspreadfreq = info->socclkspreadfreq;
630 630
631 param->acggfxclkspreadenabled = info->acggfxclkspreadenabled;
632 param->acggfxclkspreadpercent = info->acggfxclkspreadpercent;
633 param->acggfxclkspreadfreq = info->acggfxclkspreadfreq;
634
631 return 0; 635 return 0;
632} 636}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
index a957d8f08029..8df1e84f27c9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
@@ -192,9 +192,9 @@ struct pp_atomfwctrl_smc_dpm_parameters
192 uint8_t ledpin1; 192 uint8_t ledpin1;
193 uint8_t ledpin2; 193 uint8_t ledpin2;
194 194
195 uint8_t gfxclkspreadenabled; 195 uint8_t pllgfxclkspreadenabled;
196 uint8_t gfxclkspreadpercent; 196 uint8_t pllgfxclkspreadpercent;
197 uint16_t gfxclkspreadfreq; 197 uint16_t pllgfxclkspreadfreq;
198 198
199 uint8_t uclkspreadenabled; 199 uint8_t uclkspreadenabled;
200 uint8_t uclkspreadpercent; 200 uint8_t uclkspreadpercent;
@@ -203,6 +203,10 @@ struct pp_atomfwctrl_smc_dpm_parameters
203 uint8_t socclkspreadenabled; 203 uint8_t socclkspreadenabled;
204 uint8_t socclkspreadpercent; 204 uint8_t socclkspreadpercent;
205 uint16_t socclkspreadfreq; 205 uint16_t socclkspreadfreq;
206
207 uint8_t acggfxclkspreadenabled;
208 uint8_t acggfxclkspreadpercent;
209 uint16_t acggfxclkspreadfreq;
206}; 210};
207 211
208int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr, 212int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 2b0c366d6149..add90675fd2a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -3374,7 +3374,8 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr,
3374 "Failed to start pm status log!", 3374 "Failed to start pm status log!",
3375 return -1); 3375 return -1);
3376 3376
3377 msleep_interruptible(20); 3377 /* Sampling period from 50ms to 4sec */
3378 msleep_interruptible(200);
3378 3379
3379 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, 3380 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
3380 PPSMC_MSG_PmStatusLogSample), 3381 PPSMC_MSG_PmStatusLogSample),
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index 75a465f771f0..7b26607c646a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -319,13 +319,13 @@ static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr)
319 GetIndexIntoMasterTable(DATA, IntegratedSystemInfo), 319 GetIndexIntoMasterTable(DATA, IntegratedSystemInfo),
320 &size, &frev, &crev); 320 &size, &frev, &crev);
321 321
322 if (crev != 9) { 322 if (info == NULL) {
323 pr_err("Unsupported IGP table: %d %d\n", frev, crev); 323 pr_err("Could not retrieve the Integrated System Info Table!\n");
324 return -EINVAL; 324 return -EINVAL;
325 } 325 }
326 326
327 if (info == NULL) { 327 if (crev != 9) {
328 pr_err("Could not retrieve the Integrated System Info Table!\n"); 328 pr_err("Unsupported IGP table: %d %d\n", frev, crev);
329 return -EINVAL; 329 return -EINVAL;
330 } 330 }
331 331
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 15ce1e825021..200de46bd06b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -33,7 +33,6 @@
33#include "ppatomfwctrl.h" 33#include "ppatomfwctrl.h"
34#include "atomfirmware.h" 34#include "atomfirmware.h"
35#include "cgs_common.h" 35#include "cgs_common.h"
36#include "vega12_powertune.h"
37#include "vega12_inc.h" 36#include "vega12_inc.h"
38#include "pp_soc15.h" 37#include "pp_soc15.h"
39#include "pppcielanes.h" 38#include "pppcielanes.h"
@@ -893,6 +892,28 @@ static int vega12_odn_initialize_default_settings(
893 return 0; 892 return 0;
894} 893}
895 894
895static int vega12_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
896 uint32_t adjust_percent)
897{
898 return smum_send_msg_to_smc_with_parameter(hwmgr,
899 PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
900}
901
902static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
903{
904 int adjust_percent, result = 0;
905
906 if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
907 adjust_percent =
908 hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
909 hwmgr->platform_descriptor.TDPAdjustment :
910 (-1 * hwmgr->platform_descriptor.TDPAdjustment);
911 result = vega12_set_overdrive_target_percentage(hwmgr,
912 (uint32_t)adjust_percent);
913 }
914 return result;
915}
916
896static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr) 917static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
897{ 918{
898 int tmp_result, result = 0; 919 int tmp_result, result = 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.c
deleted file mode 100644
index 76e60c0181ac..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.c
+++ /dev/null
@@ -1,1364 +0,0 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "hwmgr.h"
25#include "vega12_hwmgr.h"
26#include "vega12_powertune.h"
27#include "vega12_smumgr.h"
28#include "vega12_ppsmc.h"
29#include "vega12_inc.h"
30#include "pp_debug.h"
31#include "pp_soc15.h"
32
33static const struct vega12_didt_config_reg SEDiDtTuningCtrlConfig_Vega12[] =
34{
35/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
36 * Offset Mask Shift Value
37 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
38 */
39 /* DIDT_SQ */
40 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853 },
41 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153 },
42
43 /* DIDT_TD */
44 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde },
45 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde },
46
47 /* DIDT_TCP */
48 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde },
49 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde },
50
51 /* DIDT_DB */
52 { ixDIDT_DB_TUNING_CTRL, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde },
53 { ixDIDT_DB_TUNING_CTRL, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde },
54
55 { 0xFFFFFFFF } /* End of list */
56};
57
58static const struct vega12_didt_config_reg SEDiDtCtrl3Config_vega12[] =
59{
60/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
61 * Offset Mask Shift Value
62 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
63 */
64 /*DIDT_SQ_CTRL3 */
65 { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_SQ_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
66 { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
67 { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__THROTTLE_POLICY_MASK, DIDT_SQ_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
68 { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
69 { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
70 { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
71 { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
72 { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
73 { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_SQ_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
74 { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_SEL_MASK, DIDT_SQ_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
75 { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_SQ_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
76 { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
77
78 /*DIDT_TCP_CTRL3 */
79 { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_TCP_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
80 { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
81 { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__THROTTLE_POLICY_MASK, DIDT_TCP_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
82 { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
83 { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
84 { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
85 { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
86 { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
87 { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_TCP_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
88 { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_SEL_MASK, DIDT_TCP_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
89 { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_TCP_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
90 { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
91
92 /*DIDT_TD_CTRL3 */
93 { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_TD_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
94 { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
95 { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__THROTTLE_POLICY_MASK, DIDT_TD_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
96 { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
97 { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
98 { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
99 { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
100 { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
101 { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_TD_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
102 { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_SEL_MASK, DIDT_TD_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
103 { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_TD_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
104 { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
105
106 /*DIDT_DB_CTRL3 */
107 { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_DB_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
108 { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
109 { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__THROTTLE_POLICY_MASK, DIDT_DB_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
110 { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
111 { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
112 { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
113 { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
114 { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
115 { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_DB_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
116 { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_SEL_MASK, DIDT_DB_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
117 { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_DB_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
118 { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
119
120 { 0xFFFFFFFF } /* End of list */
121};
122
123static const struct vega12_didt_config_reg SEDiDtCtrl2Config_Vega12[] =
124{
125/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
126 * Offset Mask Shift Value
127 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
128 */
129 /* DIDT_SQ */
130 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853 },
131 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
132 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000 },
133
134 /* DIDT_TD */
135 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff },
136 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
137 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 },
138
139 /* DIDT_TCP */
140 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde },
141 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
142 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 },
143
144 /* DIDT_DB */
145 { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__MAX_POWER_DELTA_MASK, DIDT_DB_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde },
146 { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
147 { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 },
148
149 { 0xFFFFFFFF } /* End of list */
150};
151
152static const struct vega12_didt_config_reg SEDiDtCtrl1Config_Vega12[] =
153{
154/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
155 * Offset Mask Shift Value
156 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
157 */
158 /* DIDT_SQ */
159 { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000 },
160 { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff },
161 /* DIDT_TD */
162 { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000 },
163 { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff },
164 /* DIDT_TCP */
165 { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000 },
166 { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff },
167 /* DIDT_DB */
168 { ixDIDT_DB_CTRL1, DIDT_DB_CTRL1__MIN_POWER_MASK, DIDT_DB_CTRL1__MIN_POWER__SHIFT, 0x0000 },
169 { ixDIDT_DB_CTRL1, DIDT_DB_CTRL1__MAX_POWER_MASK, DIDT_DB_CTRL1__MAX_POWER__SHIFT, 0xffff },
170
171 { 0xFFFFFFFF } /* End of list */
172};
173
174
175static const struct vega12_didt_config_reg SEDiDtWeightConfig_Vega12[] =
176{
177/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
178 * Offset Mask Shift Value
179 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
180 */
181 /* DIDT_SQ */
182 { ixDIDT_SQ_WEIGHT0_3, 0xFFFFFFFF, 0, 0x2B363B1A },
183 { ixDIDT_SQ_WEIGHT4_7, 0xFFFFFFFF, 0, 0x270B2432 },
184 { ixDIDT_SQ_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000018 },
185
186 /* DIDT_TD */
187 { ixDIDT_TD_WEIGHT0_3, 0xFFFFFFFF, 0, 0x2B1D220F },
188 { ixDIDT_TD_WEIGHT4_7, 0xFFFFFFFF, 0, 0x00007558 },
189 { ixDIDT_TD_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000000 },
190
191 /* DIDT_TCP */
192 { ixDIDT_TCP_WEIGHT0_3, 0xFFFFFFFF, 0, 0x5ACE160D },
193 { ixDIDT_TCP_WEIGHT4_7, 0xFFFFFFFF, 0, 0x00000000 },
194 { ixDIDT_TCP_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000000 },
195
196 /* DIDT_DB */
197 { ixDIDT_DB_WEIGHT0_3, 0xFFFFFFFF, 0, 0x0E152A0F },
198 { ixDIDT_DB_WEIGHT4_7, 0xFFFFFFFF, 0, 0x09061813 },
199 { ixDIDT_DB_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000013 },
200
201 { 0xFFFFFFFF } /* End of list */
202};
203
204static const struct vega12_didt_config_reg SEDiDtCtrl0Config_Vega12[] =
205{
206/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
207 * Offset Mask Shift Value
208 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
209 */
210 /* DIDT_SQ */
211 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
212 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
213 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
214 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
215 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
216 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
217 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
218 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
219 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
220 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
221 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
222 /* DIDT_TD */
223 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
224 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
225 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
226 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
227 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
228 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
229 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
230 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
231 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
232 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
233 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
234 /* DIDT_TCP */
235 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
236 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
237 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
238 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
239 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
240 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
241 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
242 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
243 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
244 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
245 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
246 /* DIDT_DB */
247 { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
248 { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__PHASE_OFFSET_MASK, DIDT_DB_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
249 { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CTRL_RST_MASK, DIDT_DB_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
250 { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
251 { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
252 { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
253 { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
254 { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
255 { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
256 { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
257 { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
258
259 { 0xFFFFFFFF } /* End of list */
260};
261
262
263static const struct vega12_didt_config_reg SEDiDtStallCtrlConfig_vega12[] =
264{
265/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
266 * Offset Mask Shift Value
267 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
268 */
269 /* DIDT_SQ */
270 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0004 },
271 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0004 },
272 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
273 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
274
275 /* DIDT_TD */
276 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001 },
277 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001 },
278 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
279 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
280
281 /* DIDT_TCP */
282 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001 },
283 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001 },
284 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
285 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
286
287 /* DIDT_DB */
288 { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0004 },
289 { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0004 },
290 { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
291 { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
292
293 { 0xFFFFFFFF } /* End of list */
294};
295
296static const struct vega12_didt_config_reg SEDiDtStallPatternConfig_vega12[] =
297{
298/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
299 * Offset Mask Shift Value
300 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
301 */
302 /* DIDT_SQ_STALL_PATTERN_1_2 */
303 { ixDIDT_SQ_STALL_PATTERN_1_2, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
304 { ixDIDT_SQ_STALL_PATTERN_1_2, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
305
306 /* DIDT_SQ_STALL_PATTERN_3_4 */
307 { ixDIDT_SQ_STALL_PATTERN_3_4, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
308 { ixDIDT_SQ_STALL_PATTERN_3_4, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
309
310 /* DIDT_SQ_STALL_PATTERN_5_6 */
311 { ixDIDT_SQ_STALL_PATTERN_5_6, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
312 { ixDIDT_SQ_STALL_PATTERN_5_6, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
313
314 /* DIDT_SQ_STALL_PATTERN_7 */
315 { ixDIDT_SQ_STALL_PATTERN_7, DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
316
317 /* DIDT_TCP_STALL_PATTERN_1_2 */
318 { ixDIDT_TCP_STALL_PATTERN_1_2, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
319 { ixDIDT_TCP_STALL_PATTERN_1_2, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
320
321 /* DIDT_TCP_STALL_PATTERN_3_4 */
322 { ixDIDT_TCP_STALL_PATTERN_3_4, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
323 { ixDIDT_TCP_STALL_PATTERN_3_4, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
324
325 /* DIDT_TCP_STALL_PATTERN_5_6 */
326 { ixDIDT_TCP_STALL_PATTERN_5_6, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
327 { ixDIDT_TCP_STALL_PATTERN_5_6, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
328
329 /* DIDT_TCP_STALL_PATTERN_7 */
330 { ixDIDT_TCP_STALL_PATTERN_7, DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
331
332 /* DIDT_TD_STALL_PATTERN_1_2 */
333 { ixDIDT_TD_STALL_PATTERN_1_2, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
334 { ixDIDT_TD_STALL_PATTERN_1_2, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
335
336 /* DIDT_TD_STALL_PATTERN_3_4 */
337 { ixDIDT_TD_STALL_PATTERN_3_4, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
338 { ixDIDT_TD_STALL_PATTERN_3_4, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
339
340 /* DIDT_TD_STALL_PATTERN_5_6 */
341 { ixDIDT_TD_STALL_PATTERN_5_6, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
342 { ixDIDT_TD_STALL_PATTERN_5_6, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
343
344 /* DIDT_TD_STALL_PATTERN_7 */
345 { ixDIDT_TD_STALL_PATTERN_7, DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
346
347 /* DIDT_DB_STALL_PATTERN_1_2 */
348 { ixDIDT_DB_STALL_PATTERN_1_2, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
349 { ixDIDT_DB_STALL_PATTERN_1_2, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
350
351 /* DIDT_DB_STALL_PATTERN_3_4 */
352 { ixDIDT_DB_STALL_PATTERN_3_4, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
353 { ixDIDT_DB_STALL_PATTERN_3_4, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
354
355 /* DIDT_DB_STALL_PATTERN_5_6 */
356 { ixDIDT_DB_STALL_PATTERN_5_6, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
357 { ixDIDT_DB_STALL_PATTERN_5_6, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
358
359 /* DIDT_DB_STALL_PATTERN_7 */
360 { ixDIDT_DB_STALL_PATTERN_7, DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
361
362 { 0xFFFFFFFF } /* End of list */
363};
364
365static const struct vega12_didt_config_reg SELCacConfig_Vega12[] =
366{
367/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
368 * Offset Mask Shift Value
369 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
370 */
371 /* SQ */
372 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060021 },
373 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860021 },
374 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060021 },
375 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860021 },
376 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060021 },
377 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860021 },
378 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060021 },
379 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860021 },
380 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060021 },
381 /* TD */
382 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0020 },
383 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0020 },
384 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0020 },
385 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0020 },
386 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0020 },
387 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x028E0020 },
388 /* TCP */
389 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x001c0020 },
390 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x009c0020 },
391 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x011c0020 },
392 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x019c0020 },
393 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x021c0020 },
394 /* DB */
395 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00200008 },
396 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00820008 },
397 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01020008 },
398 { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01820008 },
399
400 { 0xFFFFFFFF } /* End of list */
401};
402
403
404static const struct vega12_didt_config_reg SEEDCStallPatternConfig_Vega12[] =
405{
406/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
407 * Offset Mask Shift Value
408 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
409 */
410 /* SQ */
411 { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00030001 },
412 { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x000F0007 },
413 { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x003F001F },
414 { ixDIDT_SQ_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x0000007F },
415 /* TD */
416 { ixDIDT_TD_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 },
417 { ixDIDT_TD_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
418 { ixDIDT_TD_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
419 { ixDIDT_TD_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
420 /* TCP */
421 { ixDIDT_TCP_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 },
422 { ixDIDT_TCP_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
423 { ixDIDT_TCP_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
424 { ixDIDT_TCP_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
425 /* DB */
426 { ixDIDT_DB_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 },
427 { ixDIDT_DB_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
428 { ixDIDT_DB_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
429 { ixDIDT_DB_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
430
431 { 0xFFFFFFFF } /* End of list */
432};
433
434static const struct vega12_didt_config_reg SEEDCForceStallPatternConfig_Vega12[] =
435{
436/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
437 * Offset Mask Shift Value
438 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
439 */
440 /* SQ */
441 { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000015 },
442 { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
443 { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
444 { ixDIDT_SQ_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
445 /* TD */
446 { ixDIDT_TD_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000015 },
447 { ixDIDT_TD_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
448 { ixDIDT_TD_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
449 { ixDIDT_TD_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
450
451 { 0xFFFFFFFF } /* End of list */
452};
453
454static const struct vega12_didt_config_reg SEEDCStallDelayConfig_Vega12[] =
455{
456/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
457 * Offset Mask Shift Value
458 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
459 */
460 /* SQ */
461 { ixDIDT_SQ_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
462 { ixDIDT_SQ_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 },
463 /* TD */
464 { ixDIDT_TD_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
465 { ixDIDT_TD_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 },
466 /* TCP */
467 { ixDIDT_TCP_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
468 { ixDIDT_TCP_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 },
469 /* DB */
470 { ixDIDT_DB_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
471
472 { 0xFFFFFFFF } /* End of list */
473};
474
475static const struct vega12_didt_config_reg SEEDCThresholdConfig_Vega12[] =
476{
477/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
478 * Offset Mask Shift Value
479 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
480 */
481 { ixDIDT_SQ_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0x0000010E },
482 { ixDIDT_TD_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF },
483 { ixDIDT_TCP_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF },
484 { ixDIDT_DB_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF },
485
486 { 0xFFFFFFFF } /* End of list */
487};
488
489static const struct vega12_didt_config_reg SEEDCCtrlResetConfig_Vega12[] =
490{
491/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
492 * Offset Mask Shift Value
493 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
494 */
495 /* SQ */
496 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
497 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 },
498 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
499 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
500 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
501 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0000 },
502 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
503 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
504 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
505 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
506 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
507
508 { 0xFFFFFFFF } /* End of list */
509};
510
511static const struct vega12_didt_config_reg SEEDCCtrlConfig_Vega12[] =
512{
513/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
514 * Offset Mask Shift Value
515 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
516 */
517 /* SQ */
518 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0001 },
519 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
520 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
521 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
522 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0004 },
523 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0006 },
524 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
525 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
526 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
527 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
528 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
529
530 { 0xFFFFFFFF } /* End of list */
531};
532
533static const struct vega12_didt_config_reg SEEDCCtrlForceStallConfig_Vega12[] =
534{
535/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
536 * Offset Mask Shift Value
537 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
538 */
539 /* SQ */
540 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
541 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
542 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
543 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 },
544 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0001 },
545 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000C },
546 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
547 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
548 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
549 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
550 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
551
552 /* TD */
553 { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_EN_MASK, DIDT_TD_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
554 { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK, DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
555 { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
556 { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 },
557 { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0001 },
558 { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000E },
559 { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
560 { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_EN_MASK, DIDT_TD_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
561 { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
562 { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
563 { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
564
565 { 0xFFFFFFFF } /* End of list */
566};
567
568static const struct vega12_didt_config_reg GCDiDtDroopCtrlConfig_vega12[] =
569{
570/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
571 * Offset Mask Shift Value
572 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
573 */
574 { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN__SHIFT, 0x0000 },
575 { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD__SHIFT, 0x0000 },
576 { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX__SHIFT, 0x0000 },
577 { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL_MASK, GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL__SHIFT, 0x0000 },
578 { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW__SHIFT, 0x0000 },
579
580 { 0xFFFFFFFF } /* End of list */
581};
582
583static const struct vega12_didt_config_reg GCDiDtCtrl0Config_vega12[] =
584{
585/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
586 * Offset Mask Shift Value
587 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
588 */
589 { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_CTRL_EN_MASK, GC_DIDT_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
590 { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__PHASE_OFFSET_MASK, GC_DIDT_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
591 { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_SW_RST_MASK, GC_DIDT_CTRL0__DIDT_SW_RST__SHIFT, 0x0000 },
592 { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
593 { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
594 { 0xFFFFFFFF } /* End of list */
595};
596
597
598static const struct vega12_didt_config_reg PSMSEEDCStallPatternConfig_Vega12[] =
599{
600/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
601 * Offset Mask Shift Value
602 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
603 */
604 /* SQ EDC STALL PATTERNs */
605 { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT, 0x0101 },
606 { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT, 0x0101 },
607 { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT, 0x1111 },
608 { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT, 0x1111 },
609
610 { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT, 0x1515 },
611 { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT, 0x1515 },
612
613 { ixDIDT_SQ_EDC_STALL_PATTERN_7, DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK, DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT, 0x5555 },
614
615 { 0xFFFFFFFF } /* End of list */
616};
617
618static const struct vega12_didt_config_reg PSMSEEDCStallDelayConfig_Vega12[] =
619{
620/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
621 * Offset Mask Shift Value
622 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
623 */
624 /* SQ EDC STALL DELAYs */
625 { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0__SHIFT, 0x0000 },
626 { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1__SHIFT, 0x0000 },
627 { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2__SHIFT, 0x0000 },
628 { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3__SHIFT, 0x0000 },
629
630 { ixDIDT_SQ_EDC_STALL_DELAY_2, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4_MASK, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4__SHIFT, 0x0000 },
631
632 { 0xFFFFFFFF } /* End of list */
633};
634
635static const struct vega12_didt_config_reg PSMSEEDCThresholdConfig_Vega12[] =
636{
637/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
638 * Offset Mask Shift Value
639 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
640 */
641 /* SQ EDC THRESHOLD */
642 { ixDIDT_SQ_EDC_THRESHOLD, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD_MASK, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000 },
643
644 { 0xFFFFFFFF } /* End of list */
645};
646
647static const struct vega12_didt_config_reg PSMSEEDCCtrlResetConfig_Vega12[] =
648{
649/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
650 * Offset Mask Shift Value
651 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
652 */
653 /* SQ EDC CTRL */
654 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
655 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 },
656 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
657 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
658 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
659 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0000 },
660 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
661 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
662 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
663 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
664 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
665
666 { 0xFFFFFFFF } /* End of list */
667};
668
669static const struct vega12_didt_config_reg PSMSEEDCCtrlConfig_Vega12[] =
670{
671/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
672 * Offset Mask Shift Value
673 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
674 */
675 /* SQ EDC CTRL */
676 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0001 },
677 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
678 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
679 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
680 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
681 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000E },
682 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
683 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0001 },
684 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0003 },
685 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
686 { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
687
688 { 0xFFFFFFFF } /* End of list */
689};
690
691static const struct vega12_didt_config_reg PSMGCEDCThresholdConfig_vega12[] =
692{
693/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
694 * Offset Mask Shift Value
695 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
696 */
697 { mmGC_EDC_THRESHOLD, GC_EDC_THRESHOLD__EDC_THRESHOLD_MASK, GC_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000000 },
698
699 { 0xFFFFFFFF } /* End of list */
700};
701
702static const struct vega12_didt_config_reg PSMGCEDCDroopCtrlConfig_vega12[] =
703{
704/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
705 * Offset Mask Shift Value
706 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
707 */
708 { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN__SHIFT, 0x0001 },
709 { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD__SHIFT, 0x0384 },
710 { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX__SHIFT, 0x0001 },
711 { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__AVG_PSM_SEL_MASK, GC_EDC_DROOP_CTRL__AVG_PSM_SEL__SHIFT, 0x0001 },
712 { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL_MASK, GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL__SHIFT, 0x0001 },
713
714 { 0xFFFFFFFF } /* End of list */
715};
716
717static const struct vega12_didt_config_reg PSMGCEDCCtrlResetConfig_vega12[] =
718{
719/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
720 * Offset Mask Shift Value
721 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
722 */
723 { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_EN_MASK, GC_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
724 { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_SW_RST_MASK, GC_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 },
725 { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
726 { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_FORCE_STALL_MASK, GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
727 { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
728 { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
729
730 { 0xFFFFFFFF } /* End of list */
731};
732
733static const struct vega12_didt_config_reg PSMGCEDCCtrlConfig_vega12[] =
734{
735/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
736 * Offset Mask Shift Value
737 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
738 */
739 { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_EN_MASK, GC_EDC_CTRL__EDC_EN__SHIFT, 0x0001 },
740 { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_SW_RST_MASK, GC_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
741 { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
742 { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_FORCE_STALL_MASK, GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
743 { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
744 { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
745
746 { 0xFFFFFFFF } /* End of list */
747};
748
749static const struct vega12_didt_config_reg AvfsPSMResetConfig_vega12[]=
750{
751/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
752 * Offset Mask Shift Value
753 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
754 */
755 { 0x16A02, 0xFFFFFFFF, 0x0, 0x0000005F },
756 { 0x16A05, 0xFFFFFFFF, 0x0, 0x00000001 },
757 { 0x16A06, 0x00000001, 0x0, 0x02000000 },
758 { 0x16A01, 0xFFFFFFFF, 0x0, 0x00003027 },
759
760 { 0xFFFFFFFF } /* End of list */
761};
762
763static const struct vega12_didt_config_reg AvfsPSMInitConfig_vega12[] =
764{
765/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
766 * Offset Mask Shift Value
767 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
768 */
769 { 0x16A05, 0xFFFFFFFF, 0x18, 0x00000001 },
770 { 0x16A05, 0xFFFFFFFF, 0x8, 0x00000003 },
771 { 0x16A05, 0xFFFFFFFF, 0xa, 0x00000006 },
772 { 0x16A05, 0xFFFFFFFF, 0x7, 0x00000000 },
773 { 0x16A06, 0xFFFFFFFF, 0x18, 0x00000001 },
774 { 0x16A06, 0xFFFFFFFF, 0x19, 0x00000001 },
775 { 0x16A01, 0xFFFFFFFF, 0x0, 0x00003027 },
776
777 { 0xFFFFFFFF } /* End of list */
778};
779
780static int vega12_program_didt_config_registers(struct pp_hwmgr *hwmgr, const struct vega12_didt_config_reg *config_regs, enum vega12_didt_config_reg_type reg_type)
781{
782 uint32_t data;
783
784 PP_ASSERT_WITH_CODE((config_regs != NULL), "[vega12_program_didt_config_registers] Invalid config register table!", return -EINVAL);
785
786 while (config_regs->offset != 0xFFFFFFFF) {
787 switch (reg_type) {
788 case VEGA12_CONFIGREG_DIDT:
789 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset);
790 data &= ~config_regs->mask;
791 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
792 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data);
793 break;
794 case VEGA12_CONFIGREG_GCCAC:
795 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset);
796 data &= ~config_regs->mask;
797 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
798 cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data);
799 break;
800 case VEGA12_CONFIGREG_SECAC:
801 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_SE_CAC, config_regs->offset);
802 data &= ~config_regs->mask;
803 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
804 cgs_write_ind_register(hwmgr->device, CGS_IND_REG_SE_CAC, config_regs->offset, data);
805 break;
806 default:
807 return -EINVAL;
808 }
809
810 config_regs++;
811 }
812
813 return 0;
814}
815
816static int vega12_program_gc_didt_config_registers(struct pp_hwmgr *hwmgr, const struct vega12_didt_config_reg *config_regs)
817{
818 uint32_t data;
819
820 while (config_regs->offset != 0xFFFFFFFF) {
821 data = cgs_read_register(hwmgr->device, config_regs->offset);
822 data &= ~config_regs->mask;
823 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
824 cgs_write_register(hwmgr->device, config_regs->offset, data);
825 config_regs++;
826 }
827
828 return 0;
829}
830
831static void vega12_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
832{
833 uint32_t data;
834 int result;
835 uint32_t en = (enable ? 1 : 0);
836 uint32_t didt_block_info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
837
838 if (PP_CAP(PHM_PlatformCaps_SQRamping)) {
839 CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
840 DIDT_SQ_CTRL0, DIDT_CTRL_EN, en);
841 didt_block_info &= ~SQ_Enable_MASK;
842 didt_block_info |= en << SQ_Enable_SHIFT;
843 }
844
845 if (PP_CAP(PHM_PlatformCaps_DBRamping)) {
846 CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
847 DIDT_DB_CTRL0, DIDT_CTRL_EN, en);
848 didt_block_info &= ~DB_Enable_MASK;
849 didt_block_info |= en << DB_Enable_SHIFT;
850 }
851
852 if (PP_CAP(PHM_PlatformCaps_TDRamping)) {
853 CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
854 DIDT_TD_CTRL0, DIDT_CTRL_EN, en);
855 didt_block_info &= ~TD_Enable_MASK;
856 didt_block_info |= en << TD_Enable_SHIFT;
857 }
858
859 if (PP_CAP(PHM_PlatformCaps_TCPRamping)) {
860 CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
861 DIDT_TCP_CTRL0, DIDT_CTRL_EN, en);
862 didt_block_info &= ~TCP_Enable_MASK;
863 didt_block_info |= en << TCP_Enable_SHIFT;
864 }
865
866#if 0
867 if (PP_CAP(PHM_PlatformCaps_DBRRamping)) {
868 CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
869 DIDT_DBR_CTRL0, DIDT_CTRL_EN, en);
870 }
871#endif
872
873 if (PP_CAP(PHM_PlatformCaps_DiDtEDCEnable)) {
874 if (PP_CAP(PHM_PlatformCaps_SQRamping)) {
875 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL);
876 data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_EN, en);
877 data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_SW_RST, ~en);
878 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL, data);
879 }
880
881 if (PP_CAP(PHM_PlatformCaps_DBRamping)) {
882 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL);
883 data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_EN, en);
884 data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_SW_RST, ~en);
885 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL, data);
886 }
887
888 if (PP_CAP(PHM_PlatformCaps_TDRamping)) {
889 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL);
890 data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_EN, en);
891 data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_SW_RST, ~en);
892 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL, data);
893 }
894
895 if (PP_CAP(PHM_PlatformCaps_TCPRamping)) {
896 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL);
897 data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_EN, en);
898 data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_SW_RST, ~en);
899 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL, data);
900 }
901
902#if 0
903 if (PP_CAP(PHM_PlatformCaps_DBRRamping)) {
904 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL);
905 data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_EN, en);
906 data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_SW_RST, ~en);
907 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL, data);
908 }
909#endif
910 }
911
912 if (enable) {
913 /* For Vega12, SMC does not support any mask yet. */
914 result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info);
915 PP_ASSERT((0 == result), "[EnableDiDtConfig] SMC Configure Gfx Didt Failed!");
916 }
917}
918
919static int vega12_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
920{
921 int result;
922 uint32_t num_se = 0, count, data;
923 struct amdgpu_device *adev = hwmgr->adev;
924 uint32_t reg;
925
926 num_se = adev->gfx.config.max_shader_engines;
927
928 cgs_enter_safe_mode(hwmgr->device, true);
929
930 cgs_lock_grbm_idx(hwmgr->device, true);
931 reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
932 for (count = 0; count < num_se; count++) {
933 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
934 cgs_write_register(hwmgr->device, reg, data);
935
936 result = vega12_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega12, VEGA12_CONFIGREG_DIDT);
937 result |= vega12_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega12, VEGA12_CONFIGREG_DIDT);
938 result |= vega12_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega12, VEGA12_CONFIGREG_DIDT);
939 result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl1Config_Vega12, VEGA12_CONFIGREG_DIDT);
940 result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl2Config_Vega12, VEGA12_CONFIGREG_DIDT);
941 result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl3Config_vega12, VEGA12_CONFIGREG_DIDT);
942 result |= vega12_program_didt_config_registers(hwmgr, SEDiDtTuningCtrlConfig_Vega12, VEGA12_CONFIGREG_DIDT);
943 result |= vega12_program_didt_config_registers(hwmgr, SELCacConfig_Vega12, VEGA12_CONFIGREG_SECAC);
944 result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl0Config_Vega12, VEGA12_CONFIGREG_DIDT);
945
946 if (0 != result)
947 break;
948 }
949 cgs_write_register(hwmgr->device, reg, 0xE0000000);
950 cgs_lock_grbm_idx(hwmgr->device, false);
951
952 vega12_didt_set_mask(hwmgr, true);
953
954 cgs_enter_safe_mode(hwmgr->device, false);
955
956 return 0;
957}
958
959static int vega12_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
960{
961 cgs_enter_safe_mode(hwmgr->device, true);
962
963 vega12_didt_set_mask(hwmgr, false);
964
965 cgs_enter_safe_mode(hwmgr->device, false);
966
967 return 0;
968}
969
970static int vega12_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
971{
972 int result;
973 uint32_t num_se = 0, count, data;
974 struct amdgpu_device *adev = hwmgr->adev;
975 uint32_t reg;
976
977 num_se = adev->gfx.config.max_shader_engines;
978
979 cgs_enter_safe_mode(hwmgr->device, true);
980
981 cgs_lock_grbm_idx(hwmgr->device, true);
982 reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
983 for (count = 0; count < num_se; count++) {
984 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
985 cgs_write_register(hwmgr->device, reg, data);
986
987 result = vega12_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega12, VEGA12_CONFIGREG_DIDT);
988 result |= vega12_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega12, VEGA12_CONFIGREG_DIDT);
989 result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl3Config_vega12, VEGA12_CONFIGREG_DIDT);
990 result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl0Config_Vega12, VEGA12_CONFIGREG_DIDT);
991 if (0 != result)
992 break;
993 }
994 cgs_write_register(hwmgr->device, reg, 0xE0000000);
995 cgs_lock_grbm_idx(hwmgr->device, false);
996
997 vega12_didt_set_mask(hwmgr, true);
998
999 cgs_enter_safe_mode(hwmgr->device, false);
1000
1001 vega12_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega12);
1002 if (PP_CAP(PHM_PlatformCaps_GCEDC))
1003 vega12_program_gc_didt_config_registers(hwmgr, GCDiDtCtrl0Config_vega12);
1004
1005 if (PP_CAP(PHM_PlatformCaps_PSM))
1006 vega12_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega12);
1007
1008 return 0;
1009}
1010
1011static int vega12_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
1012{
1013 uint32_t data;
1014
1015 cgs_enter_safe_mode(hwmgr->device, true);
1016
1017 vega12_didt_set_mask(hwmgr, false);
1018
1019 cgs_enter_safe_mode(hwmgr->device, false);
1020
1021 if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
1022 data = 0x00000000;
1023 cgs_write_register(hwmgr->device, mmGC_DIDT_CTRL0, data);
1024 }
1025
1026 if (PP_CAP(PHM_PlatformCaps_PSM))
1027 vega12_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega12);
1028
1029 return 0;
1030}
1031
1032static int vega12_enable_se_edc_config(struct pp_hwmgr *hwmgr)
1033{
1034 int result;
1035 uint32_t num_se = 0, count, data;
1036 struct amdgpu_device *adev = hwmgr->adev;
1037 uint32_t reg;
1038
1039 num_se = adev->gfx.config.max_shader_engines;
1040
1041 cgs_enter_safe_mode(hwmgr->device, true);
1042
1043 cgs_lock_grbm_idx(hwmgr->device, true);
1044 reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
1045 for (count = 0; count < num_se; count++) {
1046 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1047 cgs_write_register(hwmgr->device, reg, data);
1048 result = vega12_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega12, VEGA12_CONFIGREG_DIDT);
1049 result |= vega12_program_didt_config_registers(hwmgr, SEEDCStallPatternConfig_Vega12, VEGA12_CONFIGREG_DIDT);
1050 result |= vega12_program_didt_config_registers(hwmgr, SEEDCStallDelayConfig_Vega12, VEGA12_CONFIGREG_DIDT);
1051 result |= vega12_program_didt_config_registers(hwmgr, SEEDCThresholdConfig_Vega12, VEGA12_CONFIGREG_DIDT);
1052 result |= vega12_program_didt_config_registers(hwmgr, SEEDCCtrlResetConfig_Vega12, VEGA12_CONFIGREG_DIDT);
1053 result |= vega12_program_didt_config_registers(hwmgr, SEEDCCtrlConfig_Vega12, VEGA12_CONFIGREG_DIDT);
1054
1055 if (0 != result)
1056 break;
1057 }
1058 cgs_write_register(hwmgr->device, reg, 0xE0000000);
1059 cgs_lock_grbm_idx(hwmgr->device, false);
1060
1061 vega12_didt_set_mask(hwmgr, true);
1062
1063 cgs_enter_safe_mode(hwmgr->device, false);
1064
1065 return 0;
1066}
1067
1068static int vega12_disable_se_edc_config(struct pp_hwmgr *hwmgr)
1069{
1070 cgs_enter_safe_mode(hwmgr->device, true);
1071
1072 vega12_didt_set_mask(hwmgr, false);
1073
1074 cgs_enter_safe_mode(hwmgr->device, false);
1075
1076 return 0;
1077}
1078
1079static int vega12_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
1080{
1081 int result;
1082 uint32_t num_se = 0;
1083 uint32_t count, data;
1084 struct amdgpu_device *adev = hwmgr->adev;
1085 uint32_t reg;
1086
1087 num_se = adev->gfx.config.max_shader_engines;
1088
1089 cgs_enter_safe_mode(hwmgr->device, true);
1090
1091 vega12_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega12);
1092
1093 cgs_lock_grbm_idx(hwmgr->device, true);
1094 reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
1095 for (count = 0; count < num_se; count++) {
1096 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1097 cgs_write_register(hwmgr->device, reg, data);
1098 result |= vega12_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega12, VEGA12_CONFIGREG_DIDT);
1099 result |= vega12_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega12, VEGA12_CONFIGREG_DIDT);
1100 result |= vega12_program_didt_config_registers(hwmgr, PSMSEEDCCtrlResetConfig_Vega12, VEGA12_CONFIGREG_DIDT);
1101 result |= vega12_program_didt_config_registers(hwmgr, PSMSEEDCCtrlConfig_Vega12, VEGA12_CONFIGREG_DIDT);
1102
1103 if (0 != result)
1104 break;
1105 }
1106 cgs_write_register(hwmgr->device, reg, 0xE0000000);
1107 cgs_lock_grbm_idx(hwmgr->device, false);
1108
1109 vega12_didt_set_mask(hwmgr, true);
1110
1111 cgs_enter_safe_mode(hwmgr->device, false);
1112
1113 vega12_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega12);
1114
1115 if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
1116 vega12_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlResetConfig_vega12);
1117 vega12_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlConfig_vega12);
1118 }
1119
1120 if (PP_CAP(PHM_PlatformCaps_PSM))
1121 vega12_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega12);
1122
1123 return 0;
1124}
1125
1126static int vega12_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
1127{
1128 uint32_t data;
1129
1130 cgs_enter_safe_mode(hwmgr->device, true);
1131
1132 vega12_didt_set_mask(hwmgr, false);
1133
1134 cgs_enter_safe_mode(hwmgr->device, false);
1135
1136 if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
1137 data = 0x00000000;
1138 cgs_write_register(hwmgr->device, mmGC_EDC_CTRL, data);
1139 }
1140
1141 if (PP_CAP(PHM_PlatformCaps_PSM))
1142 vega12_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega12);
1143
1144 return 0;
1145}
1146
1147static int vega12_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
1148{
1149 uint32_t reg;
1150 int result;
1151
1152 cgs_enter_safe_mode(hwmgr->device, true);
1153
1154 cgs_lock_grbm_idx(hwmgr->device, true);
1155 reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
1156 cgs_write_register(hwmgr->device, reg, 0xE0000000);
1157 cgs_lock_grbm_idx(hwmgr->device, false);
1158
1159 result = vega12_program_didt_config_registers(hwmgr, SEEDCForceStallPatternConfig_Vega12, VEGA12_CONFIGREG_DIDT);
1160 result |= vega12_program_didt_config_registers(hwmgr, SEEDCCtrlForceStallConfig_Vega12, VEGA12_CONFIGREG_DIDT);
1161 if (0 != result)
1162 return result;
1163
1164 vega12_didt_set_mask(hwmgr, false);
1165
1166 cgs_enter_safe_mode(hwmgr->device, false);
1167
1168 return 0;
1169}
1170
1171static int vega12_disable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
1172{
1173 int result;
1174
1175 result = vega12_disable_se_edc_config(hwmgr);
1176 PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDtConfig] Pre DIDT disable clock gating failed!", return result);
1177
1178 return 0;
1179}
1180
1181int vega12_enable_didt_config(struct pp_hwmgr *hwmgr)
1182{
1183 int result = 0;
1184 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1185
1186 if (data->smu_features[GNLD_DIDT].supported) {
1187 if (data->smu_features[GNLD_DIDT].enabled)
1188 PP_DBG_LOG("[EnableDiDtConfig] Feature DiDt Already enabled!\n");
1189
1190 switch (data->registry_data.didt_mode) {
1191 case 0:
1192 result = vega12_enable_cac_driving_se_didt_config(hwmgr);
1193 PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 0 Failed!", return result);
1194 break;
1195 case 2:
1196 result = vega12_enable_psm_gc_didt_config(hwmgr);
1197 PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 2 Failed!", return result);
1198 break;
1199 case 3:
1200 result = vega12_enable_se_edc_config(hwmgr);
1201 PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 3 Failed!", return result);
1202 break;
1203 case 1:
1204 case 4:
1205 case 5:
1206 result = vega12_enable_psm_gc_edc_config(hwmgr);
1207 PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 5 Failed!", return result);
1208 break;
1209 case 6:
1210 result = vega12_enable_se_edc_force_stall_config(hwmgr);
1211 PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 6 Failed!", return result);
1212 break;
1213 default:
1214 result = -EINVAL;
1215 break;
1216 }
1217
1218#if 0
1219 if (0 == result) {
1220 result = vega12_enable_smc_features(hwmgr, true, data->smu_features[GNLD_DIDT].smu_feature_bitmap);
1221 PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDtConfig] Attempt to Enable DiDt feature Failed!", return result);
1222 data->smu_features[GNLD_DIDT].enabled = true;
1223 }
1224#endif
1225 }
1226
1227 return result;
1228}
1229
1230int vega12_disable_didt_config(struct pp_hwmgr *hwmgr)
1231{
1232 int result = 0;
1233 struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1234
1235 if (data->smu_features[GNLD_DIDT].supported) {
1236 if (!data->smu_features[GNLD_DIDT].enabled)
1237 PP_DBG_LOG("[DisableDiDtConfig] Feature DiDt Already Disabled!\n");
1238
1239 switch (data->registry_data.didt_mode) {
1240 case 0:
1241 result = vega12_disable_cac_driving_se_didt_config(hwmgr);
1242 PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 0 Failed!", return result);
1243 break;
1244 case 2:
1245 result = vega12_disable_psm_gc_didt_config(hwmgr);
1246 PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 2 Failed!", return result);
1247 break;
1248 case 3:
1249 result = vega12_disable_se_edc_config(hwmgr);
1250 PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 3 Failed!", return result);
1251 break;
1252 case 1:
1253 case 4:
1254 case 5:
1255 result = vega12_disable_psm_gc_edc_config(hwmgr);
1256 PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 5 Failed!", return result);
1257 break;
1258 case 6:
1259 result = vega12_disable_se_edc_force_stall_config(hwmgr);
1260 PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 6 Failed!", return result);
1261 break;
1262 default:
1263 result = -EINVAL;
1264 break;
1265 }
1266
1267 if (0 == result) {
1268 result = vega12_enable_smc_features(hwmgr, false, data->smu_features[GNLD_DIDT].smu_feature_bitmap);
1269 PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDtConfig] Attempt to Disable DiDt feature Failed!", return result);
1270 data->smu_features[GNLD_DIDT].enabled = false;
1271 }
1272 }
1273
1274 return result;
1275}
1276
1277int vega12_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
1278{
1279 struct vega12_hwmgr *data =
1280 (struct vega12_hwmgr *)(hwmgr->backend);
1281
1282 if (data->smu_features[GNLD_PPT].enabled)
1283 return smum_send_msg_to_smc_with_parameter(hwmgr,
1284 PPSMC_MSG_SetPptLimit, n);
1285
1286 return 0;
1287}
1288
1289int vega12_enable_power_containment(struct pp_hwmgr *hwmgr)
1290{
1291 struct vega12_hwmgr *data =
1292 (struct vega12_hwmgr *)(hwmgr->backend);
1293 struct phm_ppt_v2_information *table_info =
1294 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1295 struct phm_tdp_table *tdp_table = table_info->tdp_table;
1296 uint32_t default_pwr_limit =
1297 (uint32_t)(tdp_table->usMaximumPowerDeliveryLimit);
1298 int result = 0;
1299
1300 if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
1301 if (data->smu_features[GNLD_PPT].supported)
1302 PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
1303 true, data->smu_features[GNLD_PPT].smu_feature_bitmap),
1304 "Attempt to enable PPT feature Failed!",
1305 data->smu_features[GNLD_PPT].supported = false);
1306
1307 if (data->smu_features[GNLD_TDC].supported)
1308 PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
1309 true, data->smu_features[GNLD_TDC].smu_feature_bitmap),
1310 "Attempt to enable PPT feature Failed!",
1311 data->smu_features[GNLD_TDC].supported = false);
1312
1313 result = vega12_set_power_limit(hwmgr, default_pwr_limit);
1314 PP_ASSERT_WITH_CODE(!result,
1315 "Failed to set Default Power Limit in SMC!",
1316 return result);
1317 }
1318
1319 return result;
1320}
1321
1322int vega12_disable_power_containment(struct pp_hwmgr *hwmgr)
1323{
1324 struct vega12_hwmgr *data =
1325 (struct vega12_hwmgr *)(hwmgr->backend);
1326
1327 if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
1328 if (data->smu_features[GNLD_PPT].supported)
1329 PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
1330 false, data->smu_features[GNLD_PPT].smu_feature_bitmap),
1331 "Attempt to disable PPT feature Failed!",
1332 data->smu_features[GNLD_PPT].supported = false);
1333
1334 if (data->smu_features[GNLD_TDC].supported)
1335 PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
1336 false, data->smu_features[GNLD_TDC].smu_feature_bitmap),
1337 "Attempt to disable PPT feature Failed!",
1338 data->smu_features[GNLD_TDC].supported = false);
1339 }
1340
1341 return 0;
1342}
1343
1344static int vega12_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
1345 uint32_t adjust_percent)
1346{
1347 return smum_send_msg_to_smc_with_parameter(hwmgr,
1348 PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
1349}
1350
1351int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
1352{
1353 int adjust_percent, result = 0;
1354
1355 if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
1356 adjust_percent =
1357 hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
1358 hwmgr->platform_descriptor.TDPAdjustment :
1359 (-1 * hwmgr->platform_descriptor.TDPAdjustment);
1360 result = vega12_set_overdrive_target_percentage(hwmgr,
1361 (uint32_t)adjust_percent);
1362 }
1363 return result;
1364}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.h
deleted file mode 100644
index 78d31a6747dd..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.h
+++ /dev/null
@@ -1,53 +0,0 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef _VEGA12_POWERTUNE_H_
24#define _VEGA12_POWERTUNE_H_
25
26enum vega12_didt_config_reg_type {
27 VEGA12_CONFIGREG_DIDT = 0,
28 VEGA12_CONFIGREG_GCCAC,
29 VEGA12_CONFIGREG_SECAC
30};
31
32/* PowerContainment Features */
33#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
34#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
35#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
36
37struct vega12_didt_config_reg {
38 uint32_t offset;
39 uint32_t mask;
40 uint32_t shift;
41 uint32_t value;
42};
43
44int vega12_enable_power_containment(struct pp_hwmgr *hwmgr);
45int vega12_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
46int vega12_power_control_set_level(struct pp_hwmgr *hwmgr);
47int vega12_disable_power_containment(struct pp_hwmgr *hwmgr);
48
49int vega12_enable_didt_config(struct pp_hwmgr *hwmgr);
50int vega12_disable_didt_config(struct pp_hwmgr *hwmgr);
51
52#endif /* _VEGA12_POWERTUNE_H_ */
53
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
index e7d794980b84..b34113f45904 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
@@ -208,9 +208,9 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
208 ppsmc_pptable->LedPin1 = smc_dpm_table.ledpin1; 208 ppsmc_pptable->LedPin1 = smc_dpm_table.ledpin1;
209 ppsmc_pptable->LedPin2 = smc_dpm_table.ledpin2; 209 ppsmc_pptable->LedPin2 = smc_dpm_table.ledpin2;
210 210
211 ppsmc_pptable->GfxclkSpreadEnabled = smc_dpm_table.gfxclkspreadenabled; 211 ppsmc_pptable->PllGfxclkSpreadEnabled = smc_dpm_table.pllgfxclkspreadenabled;
212 ppsmc_pptable->GfxclkSpreadPercent = smc_dpm_table.gfxclkspreadpercent; 212 ppsmc_pptable->PllGfxclkSpreadPercent = smc_dpm_table.pllgfxclkspreadpercent;
213 ppsmc_pptable->GfxclkSpreadFreq = smc_dpm_table.gfxclkspreadfreq; 213 ppsmc_pptable->PllGfxclkSpreadFreq = smc_dpm_table.pllgfxclkspreadfreq;
214 214
215 ppsmc_pptable->UclkSpreadEnabled = 0; 215 ppsmc_pptable->UclkSpreadEnabled = 0;
216 ppsmc_pptable->UclkSpreadPercent = smc_dpm_table.uclkspreadpercent; 216 ppsmc_pptable->UclkSpreadPercent = smc_dpm_table.uclkspreadpercent;
@@ -220,6 +220,11 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
220 ppsmc_pptable->SocclkSpreadPercent = smc_dpm_table.socclkspreadpercent; 220 ppsmc_pptable->SocclkSpreadPercent = smc_dpm_table.socclkspreadpercent;
221 ppsmc_pptable->SocclkSpreadFreq = smc_dpm_table.socclkspreadfreq; 221 ppsmc_pptable->SocclkSpreadFreq = smc_dpm_table.socclkspreadfreq;
222 222
223 ppsmc_pptable->AcgGfxclkSpreadEnabled = smc_dpm_table.acggfxclkspreadenabled;
224 ppsmc_pptable->AcgGfxclkSpreadPercent = smc_dpm_table.acggfxclkspreadpercent;
225 ppsmc_pptable->AcgGfxclkSpreadFreq = smc_dpm_table.acggfxclkspreadfreq;
226
227
223 return 0; 228 return 0;
224} 229}
225 230
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
index cd2e503a87da..fb696e3d06cf 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
@@ -127,7 +127,7 @@
127#define FEATURE_GFX_EDC_MASK (1 << FEATURE_GFX_EDC_BIT ) 127#define FEATURE_GFX_EDC_MASK (1 << FEATURE_GFX_EDC_BIT )
128#define FEATURE_GFXOFF_MASK (1 << FEATURE_GFXOFF_BIT ) 128#define FEATURE_GFXOFF_MASK (1 << FEATURE_GFXOFF_BIT )
129#define FEATURE_CG_MASK (1 << FEATURE_CG_BIT ) 129#define FEATURE_CG_MASK (1 << FEATURE_CG_BIT )
130#define FEATURE_ACG_MASK (1 << FEATURE_ACG_BIT ) 130#define FEATURE_ACG_MASK (1 << FEATURE_ACG_BIT)
131#define FEATURE_SPARE_29_MASK (1 << FEATURE_SPARE_29_BIT ) 131#define FEATURE_SPARE_29_MASK (1 << FEATURE_SPARE_29_BIT )
132#define FEATURE_SPARE_30_MASK (1 << FEATURE_SPARE_30_BIT ) 132#define FEATURE_SPARE_30_MASK (1 << FEATURE_SPARE_30_BIT )
133#define FEATURE_SPARE_31_MASK (1 << FEATURE_SPARE_31_BIT ) 133#define FEATURE_SPARE_31_MASK (1 << FEATURE_SPARE_31_BIT )
@@ -481,9 +481,9 @@ typedef struct {
481 uint8_t padding8_4; 481 uint8_t padding8_4;
482 482
483 483
484 uint8_t GfxclkSpreadEnabled; 484 uint8_t PllGfxclkSpreadEnabled;
485 uint8_t GfxclkSpreadPercent; 485 uint8_t PllGfxclkSpreadPercent;
486 uint16_t GfxclkSpreadFreq; 486 uint16_t PllGfxclkSpreadFreq;
487 487
488 uint8_t UclkSpreadEnabled; 488 uint8_t UclkSpreadEnabled;
489 uint8_t UclkSpreadPercent; 489 uint8_t UclkSpreadPercent;
@@ -493,7 +493,11 @@ typedef struct {
493 uint8_t SocclkSpreadPercent; 493 uint8_t SocclkSpreadPercent;
494 uint16_t SocclkSpreadFreq; 494 uint16_t SocclkSpreadFreq;
495 495
496 uint32_t BoardReserved[3]; 496 uint8_t AcgGfxclkSpreadEnabled;
497 uint8_t AcgGfxclkSpreadPercent;
498 uint16_t AcgGfxclkSpreadFreq;
499
500 uint32_t BoardReserved[10];
497 501
498 502
499 uint32_t MmHubPadding[7]; 503 uint32_t MmHubPadding[7];
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index 55cd204c1789..651a3f28734b 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -30,8 +30,7 @@
30 30
31#include "ppatomctrl.h" 31#include "ppatomctrl.h"
32#include "pp_debug.h" 32#include "pp_debug.h"
33#include "smu_ucode_xfer_vi.h" 33
34#include "smu7_smumgr.h"
35 34
36/* MP Apertures */ 35/* MP Apertures */
37#define MP0_Public 0x03800000 36#define MP0_Public 0x03800000
@@ -392,8 +391,7 @@ static int vega12_smu_init(struct pp_hwmgr *hwmgr)
392 struct cgs_firmware_info info = {0}; 391 struct cgs_firmware_info info = {0};
393 int ret; 392 int ret;
394 393
395 ret = cgs_get_firmware_info(hwmgr->device, 394 ret = cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU,
396 smu7_convert_fw_type_to_cgs(UCODE_ID_SMU),
397 &info); 395 &info);
398 if (ret || !info.kptr) 396 if (ret || !info.kptr)
399 return -EINVAL; 397 return -EINVAL;
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index fb1c27f69e3a..3d662e6805eb 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -142,7 +142,7 @@ static enum dss_clk_source dpi_get_clk_src(struct dpi_data *dpi)
142} 142}
143 143
144struct dpi_clk_calc_ctx { 144struct dpi_clk_calc_ctx {
145 struct dss_pll *pll; 145 struct dpi_data *dpi;
146 unsigned int clkout_idx; 146 unsigned int clkout_idx;
147 147
148 /* inputs */ 148 /* inputs */
@@ -191,7 +191,7 @@ static bool dpi_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
191 ctx->pll_cinfo.mX[ctx->clkout_idx] = m_dispc; 191 ctx->pll_cinfo.mX[ctx->clkout_idx] = m_dispc;
192 ctx->pll_cinfo.clkout[ctx->clkout_idx] = dispc; 192 ctx->pll_cinfo.clkout[ctx->clkout_idx] = dispc;
193 193
194 return dispc_div_calc(ctx->pll->dss->dispc, dispc, 194 return dispc_div_calc(ctx->dpi->dss->dispc, dispc,
195 ctx->pck_min, ctx->pck_max, 195 ctx->pck_min, ctx->pck_max,
196 dpi_calc_dispc_cb, ctx); 196 dpi_calc_dispc_cb, ctx);
197} 197}
@@ -208,8 +208,8 @@ static bool dpi_calc_pll_cb(int n, int m, unsigned long fint,
208 ctx->pll_cinfo.fint = fint; 208 ctx->pll_cinfo.fint = fint;
209 ctx->pll_cinfo.clkdco = clkdco; 209 ctx->pll_cinfo.clkdco = clkdco;
210 210
211 return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, 211 return dss_pll_hsdiv_calc_a(ctx->dpi->pll, clkdco,
212 ctx->pck_min, dss_get_max_fck_rate(ctx->pll->dss), 212 ctx->pck_min, dss_get_max_fck_rate(ctx->dpi->dss),
213 dpi_calc_hsdiv_cb, ctx); 213 dpi_calc_hsdiv_cb, ctx);
214} 214}
215 215
@@ -219,7 +219,7 @@ static bool dpi_calc_dss_cb(unsigned long fck, void *data)
219 219
220 ctx->fck = fck; 220 ctx->fck = fck;
221 221
222 return dispc_div_calc(ctx->pll->dss->dispc, fck, 222 return dispc_div_calc(ctx->dpi->dss->dispc, fck,
223 ctx->pck_min, ctx->pck_max, 223 ctx->pck_min, ctx->pck_max,
224 dpi_calc_dispc_cb, ctx); 224 dpi_calc_dispc_cb, ctx);
225} 225}
@@ -230,7 +230,7 @@ static bool dpi_pll_clk_calc(struct dpi_data *dpi, unsigned long pck,
230 unsigned long clkin; 230 unsigned long clkin;
231 231
232 memset(ctx, 0, sizeof(*ctx)); 232 memset(ctx, 0, sizeof(*ctx));
233 ctx->pll = dpi->pll; 233 ctx->dpi = dpi;
234 ctx->clkout_idx = dss_pll_get_clkout_idx_for_src(dpi->clk_src); 234 ctx->clkout_idx = dss_pll_get_clkout_idx_for_src(dpi->clk_src);
235 235
236 clkin = clk_get_rate(dpi->pll->clkin); 236 clkin = clk_get_rate(dpi->pll->clkin);
@@ -244,7 +244,7 @@ static bool dpi_pll_clk_calc(struct dpi_data *dpi, unsigned long pck,
244 pll_min = 0; 244 pll_min = 0;
245 pll_max = 0; 245 pll_max = 0;
246 246
247 return dss_pll_calc_a(ctx->pll, clkin, 247 return dss_pll_calc_a(ctx->dpi->pll, clkin,
248 pll_min, pll_max, 248 pll_min, pll_max,
249 dpi_calc_pll_cb, ctx); 249 dpi_calc_pll_cb, ctx);
250 } else { /* DSS_PLL_TYPE_B */ 250 } else { /* DSS_PLL_TYPE_B */
@@ -275,6 +275,7 @@ static bool dpi_dss_clk_calc(struct dpi_data *dpi, unsigned long pck,
275 bool ok; 275 bool ok;
276 276
277 memset(ctx, 0, sizeof(*ctx)); 277 memset(ctx, 0, sizeof(*ctx));
278 ctx->dpi = dpi;
278 if (pck > 1000 * i * i * i) 279 if (pck > 1000 * i * i * i)
279 ctx->pck_min = max(pck - 1000 * i * i * i, 0lu); 280 ctx->pck_min = max(pck - 1000 * i * i * i, 0lu);
280 else 281 else
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index e415d2c097a7..48d0e6bd0508 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -140,6 +140,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
140 * https://bugs.freedesktop.org/show_bug.cgi?id=101491 140 * https://bugs.freedesktop.org/show_bug.cgi?id=101491
141 */ 141 */
142 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, 142 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
143 /* Asus K73TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
144 * https://bugzilla.kernel.org/show_bug.cgi?id=51381#c52
145 */
146 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX },
143 { 0, 0, 0, 0, 0 }, 147 { 0, 0, 0, 0, 0 },
144}; 148};
145 149
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 97a0a639dad9..90d5b41007bf 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -5912,9 +5912,9 @@ static void si_set_pcie_lane_width_in_smc(struct radeon_device *rdev,
5912{ 5912{
5913 u32 lane_width; 5913 u32 lane_width;
5914 u32 new_lane_width = 5914 u32 new_lane_width =
5915 (radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT; 5915 ((radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
5916 u32 current_lane_width = 5916 u32 current_lane_width =
5917 (radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT; 5917 ((radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
5918 5918
5919 if (new_lane_width != current_lane_width) { 5919 if (new_lane_width != current_lane_width) {
5920 radeon_set_pcie_lanes(rdev, new_lane_width); 5920 radeon_set_pcie_lanes(rdev, new_lane_width);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 7720e3102bcc..7a111a1b5836 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -987,7 +987,7 @@ static int param_set_xint(const char *val, const struct kernel_param *kp)
987#define azx_del_card_list(chip) /* NOP */ 987#define azx_del_card_list(chip) /* NOP */
988#endif /* CONFIG_PM */ 988#endif /* CONFIG_PM */
989 989
990#if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO) 990#ifdef CONFIG_PM_SLEEP
991/* 991/*
992 * power management 992 * power management
993 */ 993 */
@@ -1068,9 +1068,7 @@ static int azx_resume(struct device *dev)
1068 trace_azx_resume(chip); 1068 trace_azx_resume(chip);
1069 return 0; 1069 return 0;
1070} 1070}
1071#endif /* CONFIG_PM_SLEEP || SUPPORT_VGA_SWITCHEROO */
1072 1071
1073#ifdef CONFIG_PM_SLEEP
1074/* put codec down to D3 at hibernation for Intel SKL+; 1072/* put codec down to D3 at hibernation for Intel SKL+;
1075 * otherwise BIOS may still access the codec and screw up the driver 1073 * otherwise BIOS may still access the codec and screw up the driver
1076 */ 1074 */