diff options
author | Alex Deucher <alexander.deucher@amd.com> | 2013-03-26 17:56:05 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2013-06-27 19:16:44 -0400 |
commit | 79fb809a5dabf330dd0897b83162fc8e2f6ee9d9 (patch) | |
tree | e4a5f67f6d187ab360db34da1390e44e77a4d7ea /drivers/gpu/drm/radeon/ni_dpm.c | |
parent | 779187f2c3e69b8c06488538e0fd9fd02163359e (diff) |
drm/radeon/dpm/ni: properly catch errors in dpm setup
We weren't properly catching errors in dpm_enable()
and dpm_set_power_state().
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/ni_dpm.c')
-rw-r--r-- | drivers/gpu/drm/radeon/ni_dpm.c | 132 |
1 files changed, 98 insertions, 34 deletions
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 86f98db4716d..8e6b23aecc7f 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c | |||
@@ -3517,6 +3517,7 @@ int ni_dpm_enable(struct radeon_device *rdev) | |||
3517 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); | 3517 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
3518 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); | 3518 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
3519 | struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; | 3519 | struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; |
3520 | int ret; | ||
3520 | 3521 | ||
3521 | if (pi->gfx_clock_gating) | 3522 | if (pi->gfx_clock_gating) |
3522 | ni_cg_clockgating_default(rdev); | 3523 | ni_cg_clockgating_default(rdev); |
@@ -3528,10 +3529,15 @@ int ni_dpm_enable(struct radeon_device *rdev) | |||
3528 | ni_ls_clockgating_default(rdev); | 3529 | ni_ls_clockgating_default(rdev); |
3529 | if (pi->voltage_control) { | 3530 | if (pi->voltage_control) { |
3530 | rv770_enable_voltage_control(rdev, true); | 3531 | rv770_enable_voltage_control(rdev, true); |
3531 | cypress_construct_voltage_tables(rdev); | 3532 | ret = cypress_construct_voltage_tables(rdev); |
3533 | if (ret) | ||
3534 | return ret; | ||
3535 | } | ||
3536 | if (eg_pi->dynamic_ac_timing) { | ||
3537 | ret = ni_initialize_mc_reg_table(rdev); | ||
3538 | if (ret) | ||
3539 | eg_pi->dynamic_ac_timing = false; | ||
3532 | } | 3540 | } |
3533 | if (eg_pi->dynamic_ac_timing) | ||
3534 | ni_initialize_mc_reg_table(rdev); | ||
3535 | if (pi->dynamic_ss) | 3541 | if (pi->dynamic_ss) |
3536 | cypress_enable_spread_spectrum(rdev, true); | 3542 | cypress_enable_spread_spectrum(rdev, true); |
3537 | if (pi->thermal_protection) | 3543 | if (pi->thermal_protection) |
@@ -3545,21 +3551,43 @@ int ni_dpm_enable(struct radeon_device *rdev) | |||
3545 | rv770_program_vc(rdev); | 3551 | rv770_program_vc(rdev); |
3546 | if (pi->dynamic_pcie_gen2) | 3552 | if (pi->dynamic_pcie_gen2) |
3547 | ni_enable_dynamic_pcie_gen2(rdev, true); | 3553 | ni_enable_dynamic_pcie_gen2(rdev, true); |
3548 | if (rv770_upload_firmware(rdev)) | 3554 | ret = rv770_upload_firmware(rdev); |
3549 | return -EINVAL; | 3555 | if (ret) |
3550 | ni_process_firmware_header(rdev); | 3556 | return ret; |
3551 | ni_initial_switch_from_arb_f0_to_f1(rdev); | 3557 | ret = ni_process_firmware_header(rdev); |
3552 | ni_init_smc_table(rdev); | 3558 | if (ret) |
3553 | ni_init_smc_spll_table(rdev); | 3559 | return ret; |
3554 | ni_init_arb_table_index(rdev); | 3560 | ret = ni_initial_switch_from_arb_f0_to_f1(rdev); |
3555 | if (eg_pi->dynamic_ac_timing) | 3561 | if (ret) |
3556 | ni_populate_mc_reg_table(rdev, boot_ps); | 3562 | return ret; |
3557 | ni_initialize_smc_cac_tables(rdev); | 3563 | ret = ni_init_smc_table(rdev); |
3558 | ni_initialize_hardware_cac_manager(rdev); | 3564 | if (ret) |
3559 | ni_populate_smc_tdp_limits(rdev, boot_ps); | 3565 | return ret; |
3566 | ret = ni_init_smc_spll_table(rdev); | ||
3567 | if (ret) | ||
3568 | return ret; | ||
3569 | ret = ni_init_arb_table_index(rdev); | ||
3570 | if (ret) | ||
3571 | return ret; | ||
3572 | if (eg_pi->dynamic_ac_timing) { | ||
3573 | ret = ni_populate_mc_reg_table(rdev, boot_ps); | ||
3574 | if (ret) | ||
3575 | return ret; | ||
3576 | } | ||
3577 | ret = ni_initialize_smc_cac_tables(rdev); | ||
3578 | if (ret) | ||
3579 | return ret; | ||
3580 | ret = ni_initialize_hardware_cac_manager(rdev); | ||
3581 | if (ret) | ||
3582 | return ret; | ||
3583 | ret = ni_populate_smc_tdp_limits(rdev, boot_ps); | ||
3584 | if (ret) | ||
3585 | return ret; | ||
3560 | ni_program_response_times(rdev); | 3586 | ni_program_response_times(rdev); |
3561 | r7xx_start_smc(rdev); | 3587 | r7xx_start_smc(rdev); |
3562 | cypress_notify_smc_display_change(rdev, false); | 3588 | ret = cypress_notify_smc_display_change(rdev, false); |
3589 | if (ret) | ||
3590 | return ret; | ||
3563 | cypress_enable_sclk_control(rdev, true); | 3591 | cypress_enable_sclk_control(rdev, true); |
3564 | if (eg_pi->memory_transition) | 3592 | if (eg_pi->memory_transition) |
3565 | cypress_enable_mclk_control(rdev, true); | 3593 | cypress_enable_mclk_control(rdev, true); |
@@ -3575,7 +3603,9 @@ int ni_dpm_enable(struct radeon_device *rdev) | |||
3575 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | 3603 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { |
3576 | PPSMC_Result result; | 3604 | PPSMC_Result result; |
3577 | 3605 | ||
3578 | rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, 0xff * 1000); | 3606 | ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, 0xff * 1000); |
3607 | if (ret) | ||
3608 | return ret; | ||
3579 | rdev->irq.dpm_thermal = true; | 3609 | rdev->irq.dpm_thermal = true; |
3580 | radeon_irq_set(rdev); | 3610 | radeon_irq_set(rdev); |
3581 | result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt); | 3611 | result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt); |
@@ -3632,11 +3662,20 @@ void ni_dpm_disable(struct radeon_device *rdev) | |||
3632 | int ni_power_control_set_level(struct radeon_device *rdev) | 3662 | int ni_power_control_set_level(struct radeon_device *rdev) |
3633 | { | 3663 | { |
3634 | struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps; | 3664 | struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps; |
3665 | int ret; | ||
3635 | 3666 | ||
3636 | ni_restrict_performance_levels_before_switch(rdev); | 3667 | ret = ni_restrict_performance_levels_before_switch(rdev); |
3637 | rv770_halt_smc(rdev); | 3668 | if (ret) |
3638 | ni_populate_smc_tdp_limits(rdev, new_ps); | 3669 | return ret; |
3639 | rv770_resume_smc(rdev); | 3670 | ret = rv770_halt_smc(rdev); |
3671 | if (ret) | ||
3672 | return ret; | ||
3673 | ret = ni_populate_smc_tdp_limits(rdev, new_ps); | ||
3674 | if (ret) | ||
3675 | return ret; | ||
3676 | ret = rv770_resume_smc(rdev); | ||
3677 | if (ret) | ||
3678 | return ret; | ||
3640 | rv770_set_sw_state(rdev); | 3679 | rv770_set_sw_state(rdev); |
3641 | 3680 | ||
3642 | return 0; | 3681 | return 0; |
@@ -3662,29 +3701,54 @@ int ni_dpm_set_power_state(struct radeon_device *rdev) | |||
3662 | struct radeon_ps *old_ps = &eg_pi->current_rps; | 3701 | struct radeon_ps *old_ps = &eg_pi->current_rps; |
3663 | int ret; | 3702 | int ret; |
3664 | 3703 | ||
3665 | ni_restrict_performance_levels_before_switch(rdev); | 3704 | ret = ni_restrict_performance_levels_before_switch(rdev); |
3705 | if (ret) | ||
3706 | return ret; | ||
3666 | rv770_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); | 3707 | rv770_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); |
3667 | ni_enable_power_containment(rdev, new_ps, false); | 3708 | ret = ni_enable_power_containment(rdev, new_ps, false); |
3668 | ni_enable_smc_cac(rdev, new_ps, false); | 3709 | if (ret) |
3669 | rv770_halt_smc(rdev); | 3710 | return ret; |
3711 | ret = ni_enable_smc_cac(rdev, new_ps, false); | ||
3712 | if (ret) | ||
3713 | return ret; | ||
3714 | ret = rv770_halt_smc(rdev); | ||
3715 | if (ret) | ||
3716 | return ret; | ||
3670 | if (eg_pi->smu_uvd_hs) | 3717 | if (eg_pi->smu_uvd_hs) |
3671 | btc_notify_uvd_to_smc(rdev, new_ps); | 3718 | btc_notify_uvd_to_smc(rdev, new_ps); |
3672 | ni_upload_sw_state(rdev, new_ps); | 3719 | ret = ni_upload_sw_state(rdev, new_ps); |
3673 | if (eg_pi->dynamic_ac_timing) | 3720 | if (ret) |
3674 | ni_upload_mc_reg_table(rdev, new_ps); | 3721 | return ret; |
3722 | if (eg_pi->dynamic_ac_timing) { | ||
3723 | ret = ni_upload_mc_reg_table(rdev, new_ps); | ||
3724 | if (ret) | ||
3725 | return ret; | ||
3726 | } | ||
3675 | ret = ni_program_memory_timing_parameters(rdev, new_ps); | 3727 | ret = ni_program_memory_timing_parameters(rdev, new_ps); |
3676 | if (ret) | 3728 | if (ret) |
3677 | return ret; | 3729 | return ret; |
3678 | ni_populate_smc_tdp_limits(rdev, new_ps); | 3730 | ret = ni_populate_smc_tdp_limits(rdev, new_ps); |
3679 | rv770_resume_smc(rdev); | 3731 | if (ret) |
3680 | rv770_set_sw_state(rdev); | 3732 | return ret; |
3733 | ret = rv770_resume_smc(rdev); | ||
3734 | if (ret) | ||
3735 | return ret; | ||
3736 | ret = rv770_set_sw_state(rdev); | ||
3737 | if (ret) | ||
3738 | return ret; | ||
3681 | rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); | 3739 | rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); |
3682 | ni_enable_smc_cac(rdev, new_ps, true); | 3740 | ret = ni_enable_smc_cac(rdev, new_ps, true); |
3683 | ni_enable_power_containment(rdev, new_ps, true); | 3741 | if (ret) |
3742 | return ret; | ||
3743 | ret = ni_enable_power_containment(rdev, new_ps, true); | ||
3744 | if (ret) | ||
3745 | return ret; | ||
3684 | 3746 | ||
3685 | #if 0 | 3747 | #if 0 |
3686 | /* XXX */ | 3748 | /* XXX */ |
3687 | ni_unrestrict_performance_levels_after_switch(rdev); | 3749 | ret = ni_unrestrict_performance_levels_after_switch(rdev); |
3750 | if (ret) | ||
3751 | return ret; | ||
3688 | #endif | 3752 | #endif |
3689 | 3753 | ||
3690 | return 0; | 3754 | return 0; |