aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c236
1 files changed, 99 insertions, 137 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 10c5d78081ed..f6ce52956e6d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -74,7 +74,7 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
74 adev->pm.dpm.ac_power = true; 74 adev->pm.dpm.ac_power = true;
75 else 75 else
76 adev->pm.dpm.ac_power = false; 76 adev->pm.dpm.ac_power = false;
77 if (adev->pm.funcs->enable_bapm) 77 if (adev->powerplay.pp_funcs->enable_bapm)
78 amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power); 78 amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
79 mutex_unlock(&adev->pm.mutex); 79 mutex_unlock(&adev->pm.mutex);
80 } 80 }
@@ -88,9 +88,9 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
88 struct amdgpu_device *adev = ddev->dev_private; 88 struct amdgpu_device *adev = ddev->dev_private;
89 enum amd_pm_state_type pm; 89 enum amd_pm_state_type pm;
90 90
91 if (adev->pp_enabled) { 91 if (adev->powerplay.pp_funcs->get_current_power_state)
92 pm = amdgpu_dpm_get_current_power_state(adev); 92 pm = amdgpu_dpm_get_current_power_state(adev);
93 } else 93 else
94 pm = adev->pm.dpm.user_state; 94 pm = adev->pm.dpm.user_state;
95 95
96 return snprintf(buf, PAGE_SIZE, "%s\n", 96 return snprintf(buf, PAGE_SIZE, "%s\n",
@@ -140,13 +140,17 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
140{ 140{
141 struct drm_device *ddev = dev_get_drvdata(dev); 141 struct drm_device *ddev = dev_get_drvdata(dev);
142 struct amdgpu_device *adev = ddev->dev_private; 142 struct amdgpu_device *adev = ddev->dev_private;
143 enum amd_dpm_forced_level level; 143 enum amd_dpm_forced_level level = 0xff;
144 144
145 if ((adev->flags & AMD_IS_PX) && 145 if ((adev->flags & AMD_IS_PX) &&
146 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 146 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
147 return snprintf(buf, PAGE_SIZE, "off\n"); 147 return snprintf(buf, PAGE_SIZE, "off\n");
148 148
149 level = amdgpu_dpm_get_performance_level(adev); 149 if (adev->powerplay.pp_funcs->get_performance_level)
150 level = amdgpu_dpm_get_performance_level(adev);
151 else
152 level = adev->pm.dpm.forced_level;
153
150 return snprintf(buf, PAGE_SIZE, "%s\n", 154 return snprintf(buf, PAGE_SIZE, "%s\n",
151 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : 155 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
152 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : 156 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
@@ -167,7 +171,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
167 struct drm_device *ddev = dev_get_drvdata(dev); 171 struct drm_device *ddev = dev_get_drvdata(dev);
168 struct amdgpu_device *adev = ddev->dev_private; 172 struct amdgpu_device *adev = ddev->dev_private;
169 enum amd_dpm_forced_level level; 173 enum amd_dpm_forced_level level;
170 enum amd_dpm_forced_level current_level; 174 enum amd_dpm_forced_level current_level = 0xff;
171 int ret = 0; 175 int ret = 0;
172 176
173 /* Can't force performance level when the card is off */ 177 /* Can't force performance level when the card is off */
@@ -175,7 +179,8 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
175 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 179 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
176 return -EINVAL; 180 return -EINVAL;
177 181
178 current_level = amdgpu_dpm_get_performance_level(adev); 182 if (adev->powerplay.pp_funcs->get_performance_level)
183 current_level = amdgpu_dpm_get_performance_level(adev);
179 184
180 if (strncmp("low", buf, strlen("low")) == 0) { 185 if (strncmp("low", buf, strlen("low")) == 0) {
181 level = AMD_DPM_FORCED_LEVEL_LOW; 186 level = AMD_DPM_FORCED_LEVEL_LOW;
@@ -203,9 +208,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
203 if (current_level == level) 208 if (current_level == level)
204 return count; 209 return count;
205 210
206 if (adev->pp_enabled) 211 if (adev->powerplay.pp_funcs->force_performance_level) {
207 amdgpu_dpm_force_performance_level(adev, level);
208 else {
209 mutex_lock(&adev->pm.mutex); 212 mutex_lock(&adev->pm.mutex);
210 if (adev->pm.dpm.thermal_active) { 213 if (adev->pm.dpm.thermal_active) {
211 count = -EINVAL; 214 count = -EINVAL;
@@ -233,7 +236,7 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
233 struct pp_states_info data; 236 struct pp_states_info data;
234 int i, buf_len; 237 int i, buf_len;
235 238
236 if (adev->pp_enabled) 239 if (adev->powerplay.pp_funcs->get_pp_num_states)
237 amdgpu_dpm_get_pp_num_states(adev, &data); 240 amdgpu_dpm_get_pp_num_states(adev, &data);
238 241
239 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums); 242 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
@@ -257,8 +260,8 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
257 enum amd_pm_state_type pm = 0; 260 enum amd_pm_state_type pm = 0;
258 int i = 0; 261 int i = 0;
259 262
260 if (adev->pp_enabled) { 263 if (adev->powerplay.pp_funcs->get_current_power_state
261 264 && adev->powerplay.pp_funcs->get_pp_num_states) {
262 pm = amdgpu_dpm_get_current_power_state(adev); 265 pm = amdgpu_dpm_get_current_power_state(adev);
263 amdgpu_dpm_get_pp_num_states(adev, &data); 266 amdgpu_dpm_get_pp_num_states(adev, &data);
264 267
@@ -280,25 +283,10 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
280{ 283{
281 struct drm_device *ddev = dev_get_drvdata(dev); 284 struct drm_device *ddev = dev_get_drvdata(dev);
282 struct amdgpu_device *adev = ddev->dev_private; 285 struct amdgpu_device *adev = ddev->dev_private;
283 struct pp_states_info data;
284 enum amd_pm_state_type pm = 0;
285 int i;
286
287 if (adev->pp_force_state_enabled && adev->pp_enabled) {
288 pm = amdgpu_dpm_get_current_power_state(adev);
289 amdgpu_dpm_get_pp_num_states(adev, &data);
290
291 for (i = 0; i < data.nums; i++) {
292 if (pm == data.states[i])
293 break;
294 }
295 286
296 if (i == data.nums) 287 if (adev->pp_force_state_enabled)
297 i = -EINVAL; 288 return amdgpu_get_pp_cur_state(dev, attr, buf);
298 289 else
299 return snprintf(buf, PAGE_SIZE, "%d\n", i);
300
301 } else
302 return snprintf(buf, PAGE_SIZE, "\n"); 290 return snprintf(buf, PAGE_SIZE, "\n");
303} 291}
304 292
@@ -347,7 +335,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
347 char *table = NULL; 335 char *table = NULL;
348 int size; 336 int size;
349 337
350 if (adev->pp_enabled) 338 if (adev->powerplay.pp_funcs->get_pp_table)
351 size = amdgpu_dpm_get_pp_table(adev, &table); 339 size = amdgpu_dpm_get_pp_table(adev, &table);
352 else 340 else
353 return 0; 341 return 0;
@@ -368,7 +356,7 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
368 struct drm_device *ddev = dev_get_drvdata(dev); 356 struct drm_device *ddev = dev_get_drvdata(dev);
369 struct amdgpu_device *adev = ddev->dev_private; 357 struct amdgpu_device *adev = ddev->dev_private;
370 358
371 if (adev->pp_enabled) 359 if (adev->powerplay.pp_funcs->set_pp_table)
372 amdgpu_dpm_set_pp_table(adev, buf, count); 360 amdgpu_dpm_set_pp_table(adev, buf, count);
373 361
374 return count; 362 return count;
@@ -380,14 +368,11 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
380{ 368{
381 struct drm_device *ddev = dev_get_drvdata(dev); 369 struct drm_device *ddev = dev_get_drvdata(dev);
382 struct amdgpu_device *adev = ddev->dev_private; 370 struct amdgpu_device *adev = ddev->dev_private;
383 ssize_t size = 0;
384 371
385 if (adev->pp_enabled) 372 if (adev->powerplay.pp_funcs->print_clock_levels)
386 size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); 373 return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
387 else if (adev->pm.funcs->print_clock_levels) 374 else
388 size = adev->pm.funcs->print_clock_levels(adev, PP_SCLK, buf); 375 return snprintf(buf, PAGE_SIZE, "\n");
389
390 return size;
391} 376}
392 377
393static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, 378static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
@@ -416,10 +401,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
416 mask |= 1 << level; 401 mask |= 1 << level;
417 } 402 }
418 403
419 if (adev->pp_enabled) 404 if (adev->powerplay.pp_funcs->force_clock_level)
420 amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); 405 amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
421 else if (adev->pm.funcs->force_clock_level) 406
422 adev->pm.funcs->force_clock_level(adev, PP_SCLK, mask);
423fail: 407fail:
424 return count; 408 return count;
425} 409}
@@ -430,14 +414,11 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
430{ 414{
431 struct drm_device *ddev = dev_get_drvdata(dev); 415 struct drm_device *ddev = dev_get_drvdata(dev);
432 struct amdgpu_device *adev = ddev->dev_private; 416 struct amdgpu_device *adev = ddev->dev_private;
433 ssize_t size = 0;
434
435 if (adev->pp_enabled)
436 size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
437 else if (adev->pm.funcs->print_clock_levels)
438 size = adev->pm.funcs->print_clock_levels(adev, PP_MCLK, buf);
439 417
440 return size; 418 if (adev->powerplay.pp_funcs->print_clock_levels)
419 return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
420 else
421 return snprintf(buf, PAGE_SIZE, "\n");
441} 422}
442 423
443static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, 424static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
@@ -465,11 +446,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
465 } 446 }
466 mask |= 1 << level; 447 mask |= 1 << level;
467 } 448 }
468 449 if (adev->powerplay.pp_funcs->force_clock_level)
469 if (adev->pp_enabled)
470 amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); 450 amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
471 else if (adev->pm.funcs->force_clock_level) 451
472 adev->pm.funcs->force_clock_level(adev, PP_MCLK, mask);
473fail: 452fail:
474 return count; 453 return count;
475} 454}
@@ -480,14 +459,11 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
480{ 459{
481 struct drm_device *ddev = dev_get_drvdata(dev); 460 struct drm_device *ddev = dev_get_drvdata(dev);
482 struct amdgpu_device *adev = ddev->dev_private; 461 struct amdgpu_device *adev = ddev->dev_private;
483 ssize_t size = 0;
484
485 if (adev->pp_enabled)
486 size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
487 else if (adev->pm.funcs->print_clock_levels)
488 size = adev->pm.funcs->print_clock_levels(adev, PP_PCIE, buf);
489 462
490 return size; 463 if (adev->powerplay.pp_funcs->print_clock_levels)
464 return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
465 else
466 return snprintf(buf, PAGE_SIZE, "\n");
491} 467}
492 468
493static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, 469static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
@@ -515,11 +491,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
515 } 491 }
516 mask |= 1 << level; 492 mask |= 1 << level;
517 } 493 }
518 494 if (adev->powerplay.pp_funcs->force_clock_level)
519 if (adev->pp_enabled)
520 amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); 495 amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
521 else if (adev->pm.funcs->force_clock_level) 496
522 adev->pm.funcs->force_clock_level(adev, PP_PCIE, mask);
523fail: 497fail:
524 return count; 498 return count;
525} 499}
@@ -532,10 +506,8 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
532 struct amdgpu_device *adev = ddev->dev_private; 506 struct amdgpu_device *adev = ddev->dev_private;
533 uint32_t value = 0; 507 uint32_t value = 0;
534 508
535 if (adev->pp_enabled) 509 if (adev->powerplay.pp_funcs->get_sclk_od)
536 value = amdgpu_dpm_get_sclk_od(adev); 510 value = amdgpu_dpm_get_sclk_od(adev);
537 else if (adev->pm.funcs->get_sclk_od)
538 value = adev->pm.funcs->get_sclk_od(adev);
539 511
540 return snprintf(buf, PAGE_SIZE, "%d\n", value); 512 return snprintf(buf, PAGE_SIZE, "%d\n", value);
541} 513}
@@ -556,12 +528,12 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
556 count = -EINVAL; 528 count = -EINVAL;
557 goto fail; 529 goto fail;
558 } 530 }
531 if (adev->powerplay.pp_funcs->set_sclk_od)
532 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
559 533
560 if (adev->pp_enabled) { 534 if (adev->pp_enabled) {
561 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
562 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL); 535 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
563 } else if (adev->pm.funcs->set_sclk_od) { 536 } else {
564 adev->pm.funcs->set_sclk_od(adev, (uint32_t)value);
565 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 537 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
566 amdgpu_pm_compute_clocks(adev); 538 amdgpu_pm_compute_clocks(adev);
567 } 539 }
@@ -578,10 +550,8 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
578 struct amdgpu_device *adev = ddev->dev_private; 550 struct amdgpu_device *adev = ddev->dev_private;
579 uint32_t value = 0; 551 uint32_t value = 0;
580 552
581 if (adev->pp_enabled) 553 if (adev->powerplay.pp_funcs->get_mclk_od)
582 value = amdgpu_dpm_get_mclk_od(adev); 554 value = amdgpu_dpm_get_mclk_od(adev);
583 else if (adev->pm.funcs->get_mclk_od)
584 value = adev->pm.funcs->get_mclk_od(adev);
585 555
586 return snprintf(buf, PAGE_SIZE, "%d\n", value); 556 return snprintf(buf, PAGE_SIZE, "%d\n", value);
587} 557}
@@ -602,12 +572,12 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
602 count = -EINVAL; 572 count = -EINVAL;
603 goto fail; 573 goto fail;
604 } 574 }
575 if (adev->powerplay.pp_funcs->set_mclk_od)
576 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
605 577
606 if (adev->pp_enabled) { 578 if (adev->pp_enabled) {
607 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
608 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL); 579 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
609 } else if (adev->pm.funcs->set_mclk_od) { 580 } else {
610 adev->pm.funcs->set_mclk_od(adev, (uint32_t)value);
611 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 581 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
612 amdgpu_pm_compute_clocks(adev); 582 amdgpu_pm_compute_clocks(adev);
613 } 583 }
@@ -621,14 +591,11 @@ static ssize_t amdgpu_get_pp_power_profile(struct device *dev,
621{ 591{
622 struct drm_device *ddev = dev_get_drvdata(dev); 592 struct drm_device *ddev = dev_get_drvdata(dev);
623 struct amdgpu_device *adev = ddev->dev_private; 593 struct amdgpu_device *adev = ddev->dev_private;
624 int ret = 0; 594 int ret = 0xff;
625 595
626 if (adev->pp_enabled) 596 if (adev->powerplay.pp_funcs->get_power_profile_state)
627 ret = amdgpu_dpm_get_power_profile_state( 597 ret = amdgpu_dpm_get_power_profile_state(
628 adev, query); 598 adev, query);
629 else if (adev->pm.funcs->get_power_profile_state)
630 ret = adev->pm.funcs->get_power_profile_state(
631 adev, query);
632 599
633 if (ret) 600 if (ret)
634 return ret; 601 return ret;
@@ -675,15 +642,12 @@ static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
675 char *sub_str, buf_cpy[128], *tmp_str; 642 char *sub_str, buf_cpy[128], *tmp_str;
676 const char delimiter[3] = {' ', '\n', '\0'}; 643 const char delimiter[3] = {' ', '\n', '\0'};
677 long int value; 644 long int value;
678 int ret = 0; 645 int ret = 0xff;
679 646
680 if (strncmp("reset", buf, strlen("reset")) == 0) { 647 if (strncmp("reset", buf, strlen("reset")) == 0) {
681 if (adev->pp_enabled) 648 if (adev->powerplay.pp_funcs->reset_power_profile_state)
682 ret = amdgpu_dpm_reset_power_profile_state( 649 ret = amdgpu_dpm_reset_power_profile_state(
683 adev, request); 650 adev, request);
684 else if (adev->pm.funcs->reset_power_profile_state)
685 ret = adev->pm.funcs->reset_power_profile_state(
686 adev, request);
687 if (ret) { 651 if (ret) {
688 count = -EINVAL; 652 count = -EINVAL;
689 goto fail; 653 goto fail;
@@ -692,12 +656,10 @@ static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
692 } 656 }
693 657
694 if (strncmp("set", buf, strlen("set")) == 0) { 658 if (strncmp("set", buf, strlen("set")) == 0) {
695 if (adev->pp_enabled) 659 if (adev->powerplay.pp_funcs->set_power_profile_state)
696 ret = amdgpu_dpm_set_power_profile_state( 660 ret = amdgpu_dpm_set_power_profile_state(
697 adev, request); 661 adev, request);
698 else if (adev->pm.funcs->set_power_profile_state) 662
699 ret = adev->pm.funcs->set_power_profile_state(
700 adev, request);
701 if (ret) { 663 if (ret) {
702 count = -EINVAL; 664 count = -EINVAL;
703 goto fail; 665 goto fail;
@@ -745,13 +707,8 @@ static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
745 707
746 loop++; 708 loop++;
747 } 709 }
748 710 if (adev->powerplay.pp_funcs->set_power_profile_state)
749 if (adev->pp_enabled) 711 ret = amdgpu_dpm_set_power_profile_state(adev, request);
750 ret = amdgpu_dpm_set_power_profile_state(
751 adev, request);
752 else if (adev->pm.funcs->set_power_profile_state)
753 ret = adev->pm.funcs->set_power_profile_state(
754 adev, request);
755 712
756 if (ret) 713 if (ret)
757 count = -EINVAL; 714 count = -EINVAL;
@@ -831,7 +788,7 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
831 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 788 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
832 return -EINVAL; 789 return -EINVAL;
833 790
834 if (!adev->pp_enabled && !adev->pm.funcs->get_temperature) 791 if (!adev->powerplay.pp_funcs->get_temperature)
835 temp = 0; 792 temp = 0;
836 else 793 else
837 temp = amdgpu_dpm_get_temperature(adev); 794 temp = amdgpu_dpm_get_temperature(adev);
@@ -862,7 +819,7 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
862 struct amdgpu_device *adev = dev_get_drvdata(dev); 819 struct amdgpu_device *adev = dev_get_drvdata(dev);
863 u32 pwm_mode = 0; 820 u32 pwm_mode = 0;
864 821
865 if (!adev->pp_enabled && !adev->pm.funcs->get_fan_control_mode) 822 if (!adev->powerplay.pp_funcs->get_fan_control_mode)
866 return -EINVAL; 823 return -EINVAL;
867 824
868 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 825 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
@@ -879,7 +836,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
879 int err; 836 int err;
880 int value; 837 int value;
881 838
882 if (!adev->pp_enabled && !adev->pm.funcs->set_fan_control_mode) 839 if (!adev->powerplay.pp_funcs->set_fan_control_mode)
883 return -EINVAL; 840 return -EINVAL;
884 841
885 err = kstrtoint(buf, 10, &value); 842 err = kstrtoint(buf, 10, &value);
@@ -919,9 +876,11 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
919 876
920 value = (value * 100) / 255; 877 value = (value * 100) / 255;
921 878
922 err = amdgpu_dpm_set_fan_speed_percent(adev, value); 879 if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
923 if (err) 880 err = amdgpu_dpm_set_fan_speed_percent(adev, value);
924 return err; 881 if (err)
882 return err;
883 }
925 884
926 return count; 885 return count;
927} 886}
@@ -932,11 +891,13 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
932{ 891{
933 struct amdgpu_device *adev = dev_get_drvdata(dev); 892 struct amdgpu_device *adev = dev_get_drvdata(dev);
934 int err; 893 int err;
935 u32 speed; 894 u32 speed = 0;
936 895
937 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed); 896 if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
938 if (err) 897 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
939 return err; 898 if (err)
899 return err;
900 }
940 901
941 speed = (speed * 255) / 100; 902 speed = (speed * 255) / 100;
942 903
@@ -949,11 +910,13 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
949{ 910{
950 struct amdgpu_device *adev = dev_get_drvdata(dev); 911 struct amdgpu_device *adev = dev_get_drvdata(dev);
951 int err; 912 int err;
952 u32 speed; 913 u32 speed = 0;
953 914
954 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); 915 if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
955 if (err) 916 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
956 return err; 917 if (err)
918 return err;
919 }
957 920
958 return sprintf(buf, "%i\n", speed); 921 return sprintf(buf, "%i\n", speed);
959} 922}
@@ -1008,21 +971,21 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
1008 return 0; 971 return 0;
1009 972
1010 /* mask fan attributes if we have no bindings for this asic to expose */ 973 /* mask fan attributes if we have no bindings for this asic to expose */
1011 if ((!adev->pm.funcs->get_fan_speed_percent && 974 if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
1012 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ 975 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
1013 (!adev->pm.funcs->get_fan_control_mode && 976 (!adev->powerplay.pp_funcs->get_fan_control_mode &&
1014 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ 977 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
1015 effective_mode &= ~S_IRUGO; 978 effective_mode &= ~S_IRUGO;
1016 979
1017 if ((!adev->pm.funcs->set_fan_speed_percent && 980 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
1018 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ 981 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
1019 (!adev->pm.funcs->set_fan_control_mode && 982 (!adev->powerplay.pp_funcs->set_fan_control_mode &&
1020 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ 983 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
1021 effective_mode &= ~S_IWUSR; 984 effective_mode &= ~S_IWUSR;
1022 985
1023 /* hide max/min values if we can't both query and manage the fan */ 986 /* hide max/min values if we can't both query and manage the fan */
1024 if ((!adev->pm.funcs->set_fan_speed_percent && 987 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
1025 !adev->pm.funcs->get_fan_speed_percent) && 988 !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
1026 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 989 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
1027 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 990 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
1028 return 0; 991 return 0;
@@ -1055,7 +1018,7 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
1055 if (!adev->pm.dpm_enabled) 1018 if (!adev->pm.dpm_enabled)
1056 return; 1019 return;
1057 1020
1058 if (adev->pm.funcs->get_temperature) { 1021 if (adev->powerplay.pp_funcs->get_temperature) {
1059 int temp = amdgpu_dpm_get_temperature(adev); 1022 int temp = amdgpu_dpm_get_temperature(adev);
1060 1023
1061 if (temp < adev->pm.dpm.thermal.min_temp) 1024 if (temp < adev->pm.dpm.thermal.min_temp)
@@ -1087,7 +1050,7 @@ static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
1087 true : false; 1050 true : false;
1088 1051
1089 /* check if the vblank period is too short to adjust the mclk */ 1052 /* check if the vblank period is too short to adjust the mclk */
1090 if (single_display && adev->pm.funcs->vblank_too_short) { 1053 if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
1091 if (amdgpu_dpm_vblank_too_short(adev)) 1054 if (amdgpu_dpm_vblank_too_short(adev))
1092 single_display = false; 1055 single_display = false;
1093 } 1056 }
@@ -1216,7 +1179,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
1216 struct amdgpu_ps *ps; 1179 struct amdgpu_ps *ps;
1217 enum amd_pm_state_type dpm_state; 1180 enum amd_pm_state_type dpm_state;
1218 int ret; 1181 int ret;
1219 bool equal; 1182 bool equal = false;
1220 1183
1221 /* if dpm init failed */ 1184 /* if dpm init failed */
1222 if (!adev->pm.dpm_enabled) 1185 if (!adev->pm.dpm_enabled)
@@ -1236,7 +1199,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
1236 else 1199 else
1237 return; 1200 return;
1238 1201
1239 if (amdgpu_dpm == 1) { 1202 if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
1240 printk("switching from power state:\n"); 1203 printk("switching from power state:\n");
1241 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); 1204 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
1242 printk("switching to power state:\n"); 1205 printk("switching to power state:\n");
@@ -1245,15 +1208,17 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
1245 1208
1246 /* update whether vce is active */ 1209 /* update whether vce is active */
1247 ps->vce_active = adev->pm.dpm.vce_active; 1210 ps->vce_active = adev->pm.dpm.vce_active;
1248 1211 if (adev->powerplay.pp_funcs->display_configuration_changed)
1249 amdgpu_dpm_display_configuration_changed(adev); 1212 amdgpu_dpm_display_configuration_changed(adev);
1250 1213
1251 ret = amdgpu_dpm_pre_set_power_state(adev); 1214 ret = amdgpu_dpm_pre_set_power_state(adev);
1252 if (ret) 1215 if (ret)
1253 return; 1216 return;
1254 1217
1255 if ((0 != amgdpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))) 1218 if (adev->powerplay.pp_funcs->check_state_equal) {
1256 equal = false; 1219 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
1220 equal = false;
1221 }
1257 1222
1258 if (equal) 1223 if (equal)
1259 return; 1224 return;
@@ -1264,7 +1229,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
1264 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; 1229 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
1265 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; 1230 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
1266 1231
1267 if (adev->pm.funcs->force_performance_level) { 1232 if (adev->powerplay.pp_funcs->force_performance_level) {
1268 if (adev->pm.dpm.thermal_active) { 1233 if (adev->pm.dpm.thermal_active) {
1269 enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; 1234 enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
1270 /* force low perf level for thermal */ 1235 /* force low perf level for thermal */
@@ -1280,7 +1245,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
1280 1245
1281void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 1246void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
1282{ 1247{
1283 if (adev->pp_enabled || adev->pm.funcs->powergate_uvd) { 1248 if (adev->powerplay.pp_funcs->powergate_uvd) {
1284 /* enable/disable UVD */ 1249 /* enable/disable UVD */
1285 mutex_lock(&adev->pm.mutex); 1250 mutex_lock(&adev->pm.mutex);
1286 amdgpu_dpm_powergate_uvd(adev, !enable); 1251 amdgpu_dpm_powergate_uvd(adev, !enable);
@@ -1302,7 +1267,7 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
1302 1267
1303void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 1268void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
1304{ 1269{
1305 if (adev->pp_enabled || adev->pm.funcs->powergate_vce) { 1270 if (adev->powerplay.pp_funcs->powergate_vce) {
1306 /* enable/disable VCE */ 1271 /* enable/disable VCE */
1307 mutex_lock(&adev->pm.mutex); 1272 mutex_lock(&adev->pm.mutex);
1308 amdgpu_dpm_powergate_vce(adev, !enable); 1273 amdgpu_dpm_powergate_vce(adev, !enable);
@@ -1337,8 +1302,7 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
1337{ 1302{
1338 int i; 1303 int i;
1339 1304
1340 if (adev->pp_enabled) 1305 if (adev->powerplay.pp_funcs->print_power_state == NULL)
1341 /* TO DO */
1342 return; 1306 return;
1343 1307
1344 for (i = 0; i < adev->pm.dpm.num_ps; i++) 1308 for (i = 0; i < adev->pm.dpm.num_ps; i++)
@@ -1353,10 +1317,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
1353 if (adev->pm.sysfs_initialized) 1317 if (adev->pm.sysfs_initialized)
1354 return 0; 1318 return 0;
1355 1319
1356 if (!adev->pp_enabled) { 1320 if (adev->powerplay.pp_funcs->get_temperature == NULL)
1357 if (adev->pm.funcs->get_temperature == NULL) 1321 return 0;
1358 return 0;
1359 }
1360 1322
1361 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, 1323 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
1362 DRIVER_NAME, adev, 1324 DRIVER_NAME, adev,
@@ -1634,8 +1596,8 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
1634 return amdgpu_debugfs_pm_info_pp(m, adev); 1596 return amdgpu_debugfs_pm_info_pp(m, adev);
1635 } else { 1597 } else {
1636 mutex_lock(&adev->pm.mutex); 1598 mutex_lock(&adev->pm.mutex);
1637 if (adev->pm.funcs->debugfs_print_current_performance_level) 1599 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
1638 adev->pm.funcs->debugfs_print_current_performance_level(adev, m); 1600 adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
1639 else 1601 else
1640 seq_printf(m, "Debugfs support not implemented for this asic\n"); 1602 seq_printf(m, "Debugfs support not implemented for this asic\n");
1641 mutex_unlock(&adev->pm.mutex); 1603 mutex_unlock(&adev->pm.mutex);