diff options
author | Christian König <christian.koenig@amd.com> | 2013-04-29 05:55:02 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2013-05-02 10:09:48 -0400 |
commit | facd112d1395fb6a0b6e460778aefc32197afcfc (patch) | |
tree | 69cc696021ccfe1eb1d78a7aedfb32f984675118 /drivers/gpu/drm/radeon/evergreen.c | |
parent | 092fbc4ca29a3d78895673479f794ee162a13ac5 (diff) |
drm/radeon: consolidate UVD clock programming
Instead of duplicating the code over and over again, just use a single
function to handle the clock calculations.
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/evergreen.c')
-rw-r--r-- | drivers/gpu/drm/radeon/evergreen.c | 103 |
1 files changed, 11 insertions, 92 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 1531f167d152..105bafb6c29d 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -989,62 +989,10 @@ done: | |||
989 | return r; | 989 | return r; |
990 | } | 990 | } |
991 | 991 | ||
992 | static int evergreen_uvd_calc_post_div(unsigned target_freq, | ||
993 | unsigned vco_freq, | ||
994 | unsigned *div) | ||
995 | { | ||
996 | /* target larger than vco frequency ? */ | ||
997 | if (vco_freq < target_freq) | ||
998 | return -1; /* forget it */ | ||
999 | |||
1000 | /* Fclk = Fvco / PDIV */ | ||
1001 | *div = vco_freq / target_freq; | ||
1002 | |||
1003 | /* we alway need a frequency less than or equal the target */ | ||
1004 | if ((vco_freq / *div) > target_freq) | ||
1005 | *div += 1; | ||
1006 | |||
1007 | /* dividers above 5 must be even */ | ||
1008 | if (*div > 5 && *div % 2) | ||
1009 | *div += 1; | ||
1010 | |||
1011 | /* out of range ? */ | ||
1012 | if (*div >= 128) | ||
1013 | return -1; /* forget it */ | ||
1014 | |||
1015 | return vco_freq / *div; | ||
1016 | } | ||
1017 | |||
1018 | static int evergreen_uvd_send_upll_ctlreq(struct radeon_device *rdev) | ||
1019 | { | ||
1020 | unsigned i; | ||
1021 | |||
1022 | /* assert UPLL_CTLREQ */ | ||
1023 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK); | ||
1024 | |||
1025 | /* wait for CTLACK and CTLACK2 to get asserted */ | ||
1026 | for (i = 0; i < 100; ++i) { | ||
1027 | uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK; | ||
1028 | if ((RREG32(CG_UPLL_FUNC_CNTL) & mask) == mask) | ||
1029 | break; | ||
1030 | mdelay(10); | ||
1031 | } | ||
1032 | if (i == 100) | ||
1033 | return -ETIMEDOUT; | ||
1034 | |||
1035 | /* deassert UPLL_CTLREQ */ | ||
1036 | WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK); | ||
1037 | |||
1038 | return 0; | ||
1039 | } | ||
1040 | |||
1041 | int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) | 992 | int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) |
1042 | { | 993 | { |
1043 | /* start off with something large */ | 994 | /* start off with something large */ |
1044 | int optimal_diff_score = 0x7FFFFFF; | 995 | unsigned fb_div = 0, vclk_div = 0, dclk_div = 0; |
1045 | unsigned optimal_fb_div = 0, optimal_vclk_div = 0; | ||
1046 | unsigned optimal_dclk_div = 0, optimal_vco_freq = 0; | ||
1047 | unsigned vco_freq; | ||
1048 | int r; | 996 | int r; |
1049 | 997 | ||
1050 | /* bypass vclk and dclk with bclk */ | 998 | /* bypass vclk and dclk with bclk */ |
@@ -1061,40 +1009,11 @@ int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) | |||
1061 | return 0; | 1009 | return 0; |
1062 | } | 1010 | } |
1063 | 1011 | ||
1064 | /* loop through vco from low to high */ | 1012 | r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000, |
1065 | for (vco_freq = 125000; vco_freq <= 250000; vco_freq += 100) { | 1013 | 16384, 0x03FFFFFF, 0, 128, 5, |
1066 | unsigned fb_div = vco_freq / rdev->clock.spll.reference_freq * 16384; | 1014 | &fb_div, &vclk_div, &dclk_div); |
1067 | int calc_clk, diff_score, diff_vclk, diff_dclk; | 1015 | if (r) |
1068 | unsigned vclk_div, dclk_div; | 1016 | return r; |
1069 | |||
1070 | /* fb div out of range ? */ | ||
1071 | if (fb_div > 0x03FFFFFF) | ||
1072 | break; /* it can oly get worse */ | ||
1073 | |||
1074 | /* calc vclk with current vco freq. */ | ||
1075 | calc_clk = evergreen_uvd_calc_post_div(vclk, vco_freq, &vclk_div); | ||
1076 | if (calc_clk == -1) | ||
1077 | break; /* vco is too big, it has to stop. */ | ||
1078 | diff_vclk = vclk - calc_clk; | ||
1079 | |||
1080 | /* calc dclk with current vco freq. */ | ||
1081 | calc_clk = evergreen_uvd_calc_post_div(dclk, vco_freq, &dclk_div); | ||
1082 | if (calc_clk == -1) | ||
1083 | break; /* vco is too big, it has to stop. */ | ||
1084 | diff_dclk = dclk - calc_clk; | ||
1085 | |||
1086 | /* determine if this vco setting is better than current optimal settings */ | ||
1087 | diff_score = abs(diff_vclk) + abs(diff_dclk); | ||
1088 | if (diff_score < optimal_diff_score) { | ||
1089 | optimal_fb_div = fb_div; | ||
1090 | optimal_vclk_div = vclk_div; | ||
1091 | optimal_dclk_div = dclk_div; | ||
1092 | optimal_vco_freq = vco_freq; | ||
1093 | optimal_diff_score = diff_score; | ||
1094 | if (optimal_diff_score == 0) | ||
1095 | break; /* it can't get better than this */ | ||
1096 | } | ||
1097 | } | ||
1098 | 1017 | ||
1099 | /* set VCO_MODE to 1 */ | 1018 | /* set VCO_MODE to 1 */ |
1100 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); | 1019 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); |
@@ -1108,7 +1027,7 @@ int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) | |||
1108 | 1027 | ||
1109 | mdelay(1); | 1028 | mdelay(1); |
1110 | 1029 | ||
1111 | r = evergreen_uvd_send_upll_ctlreq(rdev); | 1030 | r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); |
1112 | if (r) | 1031 | if (r) |
1113 | return r; | 1032 | return r; |
1114 | 1033 | ||
@@ -1119,19 +1038,19 @@ int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) | |||
1119 | WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK); | 1038 | WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK); |
1120 | 1039 | ||
1121 | /* set feedback divider */ | 1040 | /* set feedback divider */ |
1122 | WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(optimal_fb_div), ~UPLL_FB_DIV_MASK); | 1041 | WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK); |
1123 | 1042 | ||
1124 | /* set ref divider to 0 */ | 1043 | /* set ref divider to 0 */ |
1125 | WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK); | 1044 | WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK); |
1126 | 1045 | ||
1127 | if (optimal_vco_freq < 187500) | 1046 | if (fb_div < 307200) |
1128 | WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9); | 1047 | WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9); |
1129 | else | 1048 | else |
1130 | WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9); | 1049 | WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9); |
1131 | 1050 | ||
1132 | /* set PDIV_A and PDIV_B */ | 1051 | /* set PDIV_A and PDIV_B */ |
1133 | WREG32_P(CG_UPLL_FUNC_CNTL_2, | 1052 | WREG32_P(CG_UPLL_FUNC_CNTL_2, |
1134 | UPLL_PDIV_A(optimal_vclk_div) | UPLL_PDIV_B(optimal_dclk_div), | 1053 | UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div), |
1135 | ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK)); | 1054 | ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK)); |
1136 | 1055 | ||
1137 | /* give the PLL some time to settle */ | 1056 | /* give the PLL some time to settle */ |
@@ -1145,7 +1064,7 @@ int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) | |||
1145 | /* switch from bypass mode to normal mode */ | 1064 | /* switch from bypass mode to normal mode */ |
1146 | WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK); | 1065 | WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK); |
1147 | 1066 | ||
1148 | r = evergreen_uvd_send_upll_ctlreq(rdev); | 1067 | r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); |
1149 | if (r) | 1068 | if (r) |
1150 | return r; | 1069 | return r; |
1151 | 1070 | ||