diff options
author | Rex Zhu <Rex.Zhu@amd.com> | 2017-09-25 06:51:50 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2017-09-28 16:03:33 -0400 |
commit | 6d07fe7bcae57ee73d87766b6cd1e026d3fee85d (patch) | |
tree | d5610d5a4bd1ee4c8bd8e69f668864fbd8ca3cc2 /drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | |
parent | 790d84fdc9826500c88abfca0f3f86e96153bd4e (diff) |
drm/amdgpu: delete pp_enable in adev
amdgpu not care powerplay or dpm is enabled.
just check ip functions and pp functions
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 77 |
1 files changed, 35 insertions, 42 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 274886cedb66..a59e04f3eeba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | |||
@@ -64,10 +64,6 @@ static const struct cg_flag_name clocks[] = { | |||
64 | 64 | ||
65 | void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) | 65 | void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) |
66 | { | 66 | { |
67 | if (adev->pp_enabled) | ||
68 | /* TODO */ | ||
69 | return; | ||
70 | |||
71 | if (adev->pm.dpm_enabled) { | 67 | if (adev->pm.dpm_enabled) { |
72 | mutex_lock(&adev->pm.mutex); | 68 | mutex_lock(&adev->pm.mutex); |
73 | if (power_supply_is_system_supplied() > 0) | 69 | if (power_supply_is_system_supplied() > 0) |
@@ -118,7 +114,7 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev, | |||
118 | goto fail; | 114 | goto fail; |
119 | } | 115 | } |
120 | 116 | ||
121 | if (adev->pp_enabled) { | 117 | if (adev->powerplay.pp_funcs->dispatch_tasks) { |
122 | amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL); | 118 | amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL); |
123 | } else { | 119 | } else { |
124 | mutex_lock(&adev->pm.mutex); | 120 | mutex_lock(&adev->pm.mutex); |
@@ -303,7 +299,8 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, | |||
303 | 299 | ||
304 | if (strlen(buf) == 1) | 300 | if (strlen(buf) == 1) |
305 | adev->pp_force_state_enabled = false; | 301 | adev->pp_force_state_enabled = false; |
306 | else if (adev->pp_enabled) { | 302 | else if (adev->powerplay.pp_funcs->dispatch_tasks && |
303 | adev->powerplay.pp_funcs->get_pp_num_states) { | ||
307 | struct pp_states_info data; | 304 | struct pp_states_info data; |
308 | 305 | ||
309 | ret = kstrtoul(buf, 0, &idx); | 306 | ret = kstrtoul(buf, 0, &idx); |
@@ -531,7 +528,7 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, | |||
531 | if (adev->powerplay.pp_funcs->set_sclk_od) | 528 | if (adev->powerplay.pp_funcs->set_sclk_od) |
532 | amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); | 529 | amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); |
533 | 530 | ||
534 | if (adev->pp_enabled) { | 531 | if (adev->powerplay.pp_funcs->dispatch_tasks) { |
535 | amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL); | 532 | amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL); |
536 | } else { | 533 | } else { |
537 | adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; | 534 | adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; |
@@ -575,7 +572,7 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, | |||
575 | if (adev->powerplay.pp_funcs->set_mclk_od) | 572 | if (adev->powerplay.pp_funcs->set_mclk_od) |
576 | amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); | 573 | amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); |
577 | 574 | ||
578 | if (adev->pp_enabled) { | 575 | if (adev->powerplay.pp_funcs->dispatch_tasks) { |
579 | amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL); | 576 | amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL); |
580 | } else { | 577 | } else { |
581 | adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; | 578 | adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; |
@@ -959,9 +956,6 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, | |||
959 | attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) | 956 | attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) |
960 | return 0; | 957 | return 0; |
961 | 958 | ||
962 | if (adev->pp_enabled) | ||
963 | return effective_mode; | ||
964 | |||
965 | /* Skip fan attributes if fan is not present */ | 959 | /* Skip fan attributes if fan is not present */ |
966 | if (adev->pm.no_fan && | 960 | if (adev->pm.no_fan && |
967 | (attr == &sensor_dev_attr_pwm1.dev_attr.attr || | 961 | (attr == &sensor_dev_attr_pwm1.dev_attr.attr || |
@@ -1344,27 +1338,26 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) | |||
1344 | return ret; | 1338 | return ret; |
1345 | } | 1339 | } |
1346 | 1340 | ||
1347 | if (adev->pp_enabled) { | 1341 | |
1348 | ret = device_create_file(adev->dev, &dev_attr_pp_num_states); | 1342 | ret = device_create_file(adev->dev, &dev_attr_pp_num_states); |
1349 | if (ret) { | 1343 | if (ret) { |
1350 | DRM_ERROR("failed to create device file pp_num_states\n"); | 1344 | DRM_ERROR("failed to create device file pp_num_states\n"); |
1351 | return ret; | 1345 | return ret; |
1352 | } | 1346 | } |
1353 | ret = device_create_file(adev->dev, &dev_attr_pp_cur_state); | 1347 | ret = device_create_file(adev->dev, &dev_attr_pp_cur_state); |
1354 | if (ret) { | 1348 | if (ret) { |
1355 | DRM_ERROR("failed to create device file pp_cur_state\n"); | 1349 | DRM_ERROR("failed to create device file pp_cur_state\n"); |
1356 | return ret; | 1350 | return ret; |
1357 | } | 1351 | } |
1358 | ret = device_create_file(adev->dev, &dev_attr_pp_force_state); | 1352 | ret = device_create_file(adev->dev, &dev_attr_pp_force_state); |
1359 | if (ret) { | 1353 | if (ret) { |
1360 | DRM_ERROR("failed to create device file pp_force_state\n"); | 1354 | DRM_ERROR("failed to create device file pp_force_state\n"); |
1361 | return ret; | 1355 | return ret; |
1362 | } | 1356 | } |
1363 | ret = device_create_file(adev->dev, &dev_attr_pp_table); | 1357 | ret = device_create_file(adev->dev, &dev_attr_pp_table); |
1364 | if (ret) { | 1358 | if (ret) { |
1365 | DRM_ERROR("failed to create device file pp_table\n"); | 1359 | DRM_ERROR("failed to create device file pp_table\n"); |
1366 | return ret; | 1360 | return ret; |
1367 | } | ||
1368 | } | 1361 | } |
1369 | 1362 | ||
1370 | ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); | 1363 | ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); |
@@ -1427,12 +1420,12 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) | |||
1427 | hwmon_device_unregister(adev->pm.int_hwmon_dev); | 1420 | hwmon_device_unregister(adev->pm.int_hwmon_dev); |
1428 | device_remove_file(adev->dev, &dev_attr_power_dpm_state); | 1421 | device_remove_file(adev->dev, &dev_attr_power_dpm_state); |
1429 | device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); | 1422 | device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); |
1430 | if (adev->pp_enabled) { | 1423 | |
1431 | device_remove_file(adev->dev, &dev_attr_pp_num_states); | 1424 | device_remove_file(adev->dev, &dev_attr_pp_num_states); |
1432 | device_remove_file(adev->dev, &dev_attr_pp_cur_state); | 1425 | device_remove_file(adev->dev, &dev_attr_pp_cur_state); |
1433 | device_remove_file(adev->dev, &dev_attr_pp_force_state); | 1426 | device_remove_file(adev->dev, &dev_attr_pp_force_state); |
1434 | device_remove_file(adev->dev, &dev_attr_pp_table); | 1427 | device_remove_file(adev->dev, &dev_attr_pp_table); |
1435 | } | 1428 | |
1436 | device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); | 1429 | device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); |
1437 | device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); | 1430 | device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); |
1438 | device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); | 1431 | device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); |
@@ -1463,7 +1456,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) | |||
1463 | amdgpu_fence_wait_empty(ring); | 1456 | amdgpu_fence_wait_empty(ring); |
1464 | } | 1457 | } |
1465 | 1458 | ||
1466 | if (adev->pp_enabled) { | 1459 | if (adev->powerplay.pp_funcs->dispatch_tasks) { |
1467 | amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL, NULL); | 1460 | amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL, NULL); |
1468 | } else { | 1461 | } else { |
1469 | mutex_lock(&adev->pm.mutex); | 1462 | mutex_lock(&adev->pm.mutex); |
@@ -1598,15 +1591,15 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) | |||
1598 | if ((adev->flags & AMD_IS_PX) && | 1591 | if ((adev->flags & AMD_IS_PX) && |
1599 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { | 1592 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { |
1600 | seq_printf(m, "PX asic powered off\n"); | 1593 | seq_printf(m, "PX asic powered off\n"); |
1601 | } else if (adev->pp_enabled) { | 1594 | } else if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) { |
1602 | return amdgpu_debugfs_pm_info_pp(m, adev); | ||
1603 | } else { | ||
1604 | mutex_lock(&adev->pm.mutex); | 1595 | mutex_lock(&adev->pm.mutex); |
1605 | if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) | 1596 | if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) |
1606 | adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m); | 1597 | adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m); |
1607 | else | 1598 | else |
1608 | seq_printf(m, "Debugfs support not implemented for this asic\n"); | 1599 | seq_printf(m, "Debugfs support not implemented for this asic\n"); |
1609 | mutex_unlock(&adev->pm.mutex); | 1600 | mutex_unlock(&adev->pm.mutex); |
1601 | } else { | ||
1602 | return amdgpu_debugfs_pm_info_pp(m, adev); | ||
1610 | } | 1603 | } |
1611 | 1604 | ||
1612 | return 0; | 1605 | return 0; |