summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-23 02:37:41 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-28 09:47:28 -0400
commit4032e8915a65aa94f8b556676c5606683ec28f52 (patch)
treedc16ddcc61f9fed52c1c687bb02e6ec13edd28c6 /drivers/gpu/nvgpu/gm20b/clk_gm20b.c
parent8676b2e65b786497c4a0609f06143e7d1bb1a3c0 (diff)
gpu: nvgpu: gm20b: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: I1651ae8ee680bdeb48606569c4e8c2fc7cb87f20 Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1805077 Reviewed-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/clk_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/clk_gm20b.c153
1 files changed, 101 insertions, 52 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
index c9cb353f..93bae921 100644
--- a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
@@ -121,8 +121,9 @@ static u32 get_interim_pldiv(struct gk20a *g, u32 old_pl, u32 new_pl)
121{ 121{
122 u32 pl; 122 u32 pl;
123 123
124 if ((g->clk.gpc_pll.id == GM20B_GPC_PLL_C1) || (old_pl & new_pl)) 124 if ((g->clk.gpc_pll.id == GM20B_GPC_PLL_C1) || (old_pl & new_pl)) {
125 return 0; 125 return 0;
126 }
126 127
127 pl = old_pl | BIT(ffs(new_pl) - 1); /* pl never 0 */ 128 pl = old_pl | BIT(ffs(new_pl) - 1); /* pl never 0 */
128 new_pl |= BIT(ffs(old_pl) - 1); 129 new_pl |= BIT(ffs(old_pl) - 1);
@@ -163,8 +164,9 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
163 best_PL = pll_params->min_PL; 164 best_PL = pll_params->min_PL;
164 165
165 target_vco_f = target_clk_f + target_clk_f / 50; 166 target_vco_f = target_clk_f + target_clk_f / 50;
166 if (max_vco_f < target_vco_f) 167 if (max_vco_f < target_vco_f) {
167 max_vco_f = target_vco_f; 168 max_vco_f = target_vco_f;
169 }
168 170
169 /* Set PL search boundaries. */ 171 /* Set PL search boundaries. */
170 high_PL = nvgpu_div_to_pl((max_vco_f + target_vco_f - 1) / target_vco_f); 172 high_PL = nvgpu_div_to_pl((max_vco_f + target_vco_f - 1) / target_vco_f);
@@ -184,22 +186,27 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
184 for (m = pll_params->min_M; m <= pll_params->max_M; m++) { 186 for (m = pll_params->min_M; m <= pll_params->max_M; m++) {
185 u_f = ref_clk_f / m; 187 u_f = ref_clk_f / m;
186 188
187 if (u_f < pll_params->min_u) 189 if (u_f < pll_params->min_u) {
188 break; 190 break;
189 if (u_f > pll_params->max_u) 191 }
192 if (u_f > pll_params->max_u) {
190 continue; 193 continue;
194 }
191 195
192 n = (target_vco_f * m) / ref_clk_f; 196 n = (target_vco_f * m) / ref_clk_f;
193 n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f; 197 n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f;
194 198
195 if (n > pll_params->max_N) 199 if (n > pll_params->max_N) {
196 break; 200 break;
201 }
197 202
198 for (; n <= n2; n++) { 203 for (; n <= n2; n++) {
199 if (n < pll_params->min_N) 204 if (n < pll_params->min_N) {
200 continue; 205 continue;
201 if (n > pll_params->max_N) 206 }
207 if (n > pll_params->max_N) {
202 break; 208 break;
209 }
203 210
204 vco_f = ref_clk_f * n / m; 211 vco_f = ref_clk_f * n / m;
205 212
@@ -231,9 +238,10 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
231found_match: 238found_match:
232 BUG_ON(best_delta == ~0U); 239 BUG_ON(best_delta == ~0U);
233 240
234 if (best_fit && best_delta != 0) 241 if (best_fit && best_delta != 0) {
235 gk20a_dbg_clk(g, "no best match for target @ %dMHz on gpc_pll", 242 gk20a_dbg_clk(g, "no best match for target @ %dMHz on gpc_pll",
236 target_clk_f); 243 target_clk_f);
244 }
237 245
238 pll->M = best_M; 246 pll->M = best_M;
239 pll->N = best_N; 247 pll->N = best_N;
@@ -278,11 +286,13 @@ static int nvgpu_fuse_calib_gpcpll_get_adc(struct gk20a *g,
278 int ret; 286 int ret;
279 287
280 ret = nvgpu_tegra_fuse_read_reserved_calib(g, &val); 288 ret = nvgpu_tegra_fuse_read_reserved_calib(g, &val);
281 if (ret) 289 if (ret) {
282 return ret; 290 return ret;
291 }
283 292
284 if (!fuse_get_gpcpll_adc_rev(val)) 293 if (!fuse_get_gpcpll_adc_rev(val)) {
285 return -EINVAL; 294 return -EINVAL;
295 }
286 296
287 *slope_uv = fuse_get_gpcpll_adc_slope_uv(val); 297 *slope_uv = fuse_get_gpcpll_adc_slope_uv(val);
288 *intercept_uv = fuse_get_gpcpll_adc_intercept_uv(val); 298 *intercept_uv = fuse_get_gpcpll_adc_intercept_uv(val);
@@ -521,8 +531,9 @@ static int clk_enbale_pll_dvfs(struct gk20a *g)
521 */ 531 */
522 clk_setup_slide(g, g->clk.gpc_pll.clk_in); 532 clk_setup_slide(g, g->clk.gpc_pll.clk_in);
523 533
524 if (calibrated) 534 if (calibrated) {
525 return 0; 535 return 0;
536 }
526 537
527 /* 538 /*
528 * If calibration parameters are not fused, start internal calibration, 539 * If calibration parameters are not fused, start internal calibration,
@@ -544,8 +555,9 @@ static int clk_enbale_pll_dvfs(struct gk20a *g)
544 /* Wait for internal calibration done (spec < 2us). */ 555 /* Wait for internal calibration done (spec < 2us). */
545 do { 556 do {
546 data = gk20a_readl(g, trim_sys_gpcpll_dvfs1_r()); 557 data = gk20a_readl(g, trim_sys_gpcpll_dvfs1_r());
547 if (trim_sys_gpcpll_dvfs1_dfs_cal_done_v(data)) 558 if (trim_sys_gpcpll_dvfs1_dfs_cal_done_v(data)) {
548 break; 559 break;
560 }
549 nvgpu_udelay(1); 561 nvgpu_udelay(1);
550 delay--; 562 delay--;
551 } while (delay > 0); 563 } while (delay > 0);
@@ -623,11 +635,13 @@ static int clk_slide_gpc_pll(struct gk20a *g, struct pll *gpll)
623 coeff = gk20a_readl(g, trim_sys_gpcpll_cfg2_r()); 635 coeff = gk20a_readl(g, trim_sys_gpcpll_cfg2_r());
624 sdm_old = trim_sys_gpcpll_cfg2_sdm_din_v(coeff); 636 sdm_old = trim_sys_gpcpll_cfg2_sdm_din_v(coeff);
625 if ((gpll->dvfs.n_int == nold) && 637 if ((gpll->dvfs.n_int == nold) &&
626 (gpll->dvfs.sdm_din == sdm_old)) 638 (gpll->dvfs.sdm_din == sdm_old)) {
627 return 0; 639 return 0;
640 }
628 } else { 641 } else {
629 if (gpll->N == nold) 642 if (gpll->N == nold) {
630 return 0; 643 return 0;
644 }
631 645
632 /* dynamic ramp setup based on update rate */ 646 /* dynamic ramp setup based on update rate */
633 clk_setup_slide(g, gpll->clk_in / gpll->M); 647 clk_setup_slide(g, gpll->clk_in / gpll->M);
@@ -674,8 +688,9 @@ static int clk_slide_gpc_pll(struct gk20a *g, struct pll *gpll)
674 ramp_timeout--; 688 ramp_timeout--;
675 data = gk20a_readl( 689 data = gk20a_readl(
676 g, trim_gpc_bcast_gpcpll_ndiv_slowdown_debug_r()); 690 g, trim_gpc_bcast_gpcpll_ndiv_slowdown_debug_r());
677 if (trim_gpc_bcast_gpcpll_ndiv_slowdown_debug_pll_dynramp_done_synced_v(data)) 691 if (trim_gpc_bcast_gpcpll_ndiv_slowdown_debug_pll_dynramp_done_synced_v(data)) {
678 break; 692 break;
693 }
679 } while (ramp_timeout > 0); 694 } while (ramp_timeout > 0);
680 695
681 if ((gpll->mode == GPC_PLL_MODE_DVFS) && (ramp_timeout > 0)) { 696 if ((gpll->mode == GPC_PLL_MODE_DVFS) && (ramp_timeout > 0)) {
@@ -836,8 +851,9 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll)
836 do { 851 do {
837 nvgpu_udelay(1); 852 nvgpu_udelay(1);
838 cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 853 cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
839 if (cfg & trim_sys_gpcpll_cfg_pll_lock_true_f()) 854 if (cfg & trim_sys_gpcpll_cfg_pll_lock_true_f()) {
840 goto pll_locked; 855 goto pll_locked;
856 }
841 } while (--timeout > 0); 857 } while (--timeout > 0);
842 858
843 /* PLL is messed up. What can we do here? */ 859 /* PLL is messed up. What can we do here? */
@@ -883,8 +899,9 @@ static int clk_program_gpc_pll(struct gk20a *g, struct pll *gpll_new,
883 899
884 nvgpu_log_fn(g, " "); 900 nvgpu_log_fn(g, " ");
885 901
886 if (!nvgpu_platform_is_silicon(g)) 902 if (!nvgpu_platform_is_silicon(g)) {
887 return 0; 903 return 0;
904 }
888 905
889 /* get old coefficients */ 906 /* get old coefficients */
890 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r()); 907 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
@@ -901,19 +918,22 @@ static int clk_program_gpc_pll(struct gk20a *g, struct pll *gpll_new,
901 cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 918 cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
902 can_slide = allow_slide && trim_sys_gpcpll_cfg_enable_v(cfg); 919 can_slide = allow_slide && trim_sys_gpcpll_cfg_enable_v(cfg);
903 920
904 if (can_slide && (gpll_new->M == gpll.M) && (gpll_new->PL == gpll.PL)) 921 if (can_slide && (gpll_new->M == gpll.M) && (gpll_new->PL == gpll.PL)) {
905 return clk_slide_gpc_pll(g, gpll_new); 922 return clk_slide_gpc_pll(g, gpll_new);
923 }
906 924
907 /* slide down to NDIV_LO */ 925 /* slide down to NDIV_LO */
908 if (can_slide) { 926 if (can_slide) {
909 int ret; 927 int ret;
910 gpll.N = DIV_ROUND_UP(gpll.M * gpc_pll_params.min_vco, 928 gpll.N = DIV_ROUND_UP(gpll.M * gpc_pll_params.min_vco,
911 gpll.clk_in); 929 gpll.clk_in);
912 if (gpll.mode == GPC_PLL_MODE_DVFS) 930 if (gpll.mode == GPC_PLL_MODE_DVFS) {
913 clk_config_dvfs_ndiv(gpll.dvfs.mv, gpll.N, &gpll.dvfs); 931 clk_config_dvfs_ndiv(gpll.dvfs.mv, gpll.N, &gpll.dvfs);
932 }
914 ret = clk_slide_gpc_pll(g, &gpll); 933 ret = clk_slide_gpc_pll(g, &gpll);
915 if (ret) 934 if (ret) {
916 return ret; 935 return ret;
936 }
917 } 937 }
918 pldiv_only = can_slide && (gpll_new->M == gpll.M); 938 pldiv_only = can_slide && (gpll_new->M == gpll.M);
919 939
@@ -962,13 +982,15 @@ static int clk_program_gpc_pll(struct gk20a *g, struct pll *gpll_new,
962 if (allow_slide) { 982 if (allow_slide) {
963 gpll.N = DIV_ROUND_UP(gpll_new->M * gpc_pll_params.min_vco, 983 gpll.N = DIV_ROUND_UP(gpll_new->M * gpc_pll_params.min_vco,
964 gpll_new->clk_in); 984 gpll_new->clk_in);
965 if (gpll.mode == GPC_PLL_MODE_DVFS) 985 if (gpll.mode == GPC_PLL_MODE_DVFS) {
966 clk_config_dvfs_ndiv(gpll.dvfs.mv, gpll.N, &gpll.dvfs); 986 clk_config_dvfs_ndiv(gpll.dvfs.mv, gpll.N, &gpll.dvfs);
987 }
967 } 988 }
968 if (pldiv_only) 989 if (pldiv_only) {
969 clk_change_pldiv_under_bypass(g, &gpll); 990 clk_change_pldiv_under_bypass(g, &gpll);
970 else 991 } else {
971 clk_lock_gpc_pll_under_bypass(g, &gpll); 992 clk_lock_gpc_pll_under_bypass(g, &gpll);
993 }
972 994
973#if PLDIV_GLITCHLESS 995#if PLDIV_GLITCHLESS
974 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r()); 996 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
@@ -1003,8 +1025,9 @@ static void clk_config_pll_safe_dvfs(struct gk20a *g, struct pll *gpll)
1003{ 1025{
1004 u32 nsafe, nmin; 1026 u32 nsafe, nmin;
1005 1027
1006 if (gpll->freq > g->clk.dvfs_safe_max_freq) 1028 if (gpll->freq > g->clk.dvfs_safe_max_freq) {
1007 gpll->freq = gpll->freq * (100 - DVFS_SAFE_MARGIN) / 100; 1029 gpll->freq = gpll->freq * (100 - DVFS_SAFE_MARGIN) / 100;
1030 }
1008 1031
1009 nmin = DIV_ROUND_UP(gpll->M * gpc_pll_params.min_vco, gpll->clk_in); 1032 nmin = DIV_ROUND_UP(gpll->M * gpc_pll_params.min_vco, gpll->clk_in);
1010 nsafe = gpll->M * gpll->freq / gpll->clk_in; 1033 nsafe = gpll->M * gpll->freq / gpll->clk_in;
@@ -1054,8 +1077,9 @@ static int clk_program_na_gpc_pll(struct gk20a *g, struct pll *gpll_new,
1054 * - voltage is not changing, so DVFS detection settings are the same 1077 * - voltage is not changing, so DVFS detection settings are the same
1055 */ 1078 */
1056 if (!allow_slide || !gpll_new->enabled || 1079 if (!allow_slide || !gpll_new->enabled ||
1057 (gpll_old->dvfs.mv == gpll_new->dvfs.mv)) 1080 (gpll_old->dvfs.mv == gpll_new->dvfs.mv)) {
1058 return clk_program_gpc_pll(g, gpll_new, allow_slide); 1081 return clk_program_gpc_pll(g, gpll_new, allow_slide);
1082 }
1059 1083
1060 /* 1084 /*
1061 * Interim step for changing DVFS detection settings: low enough 1085 * Interim step for changing DVFS detection settings: low enough
@@ -1129,8 +1153,9 @@ static int clk_disable_gpcpll(struct gk20a *g, int allow_slide)
1129 gpll.M = trim_sys_gpcpll_coeff_mdiv_v(coeff); 1153 gpll.M = trim_sys_gpcpll_coeff_mdiv_v(coeff);
1130 gpll.N = DIV_ROUND_UP(gpll.M * gpc_pll_params.min_vco, 1154 gpll.N = DIV_ROUND_UP(gpll.M * gpc_pll_params.min_vco,
1131 gpll.clk_in); 1155 gpll.clk_in);
1132 if (gpll.mode == GPC_PLL_MODE_DVFS) 1156 if (gpll.mode == GPC_PLL_MODE_DVFS) {
1133 clk_config_dvfs_ndiv(gpll.dvfs.mv, gpll.N, &gpll.dvfs); 1157 clk_config_dvfs_ndiv(gpll.dvfs.mv, gpll.N, &gpll.dvfs);
1158 }
1134 clk_slide_gpc_pll(g, &gpll); 1159 clk_slide_gpc_pll(g, &gpll);
1135 } 1160 }
1136 1161
@@ -1174,8 +1199,9 @@ int gm20b_init_clk_setup_sw(struct gk20a *g)
1174 nvgpu_log_fn(g, " "); 1199 nvgpu_log_fn(g, " ");
1175 1200
1176 err = nvgpu_mutex_init(&clk->clk_mutex); 1201 err = nvgpu_mutex_init(&clk->clk_mutex);
1177 if (err) 1202 if (err) {
1178 return err; 1203 return err;
1204 }
1179 1205
1180 if (clk->sw_ready) { 1206 if (clk->sw_ready) {
1181 nvgpu_log_fn(g, "skip init"); 1207 nvgpu_log_fn(g, "skip init");
@@ -1184,12 +1210,14 @@ int gm20b_init_clk_setup_sw(struct gk20a *g)
1184 1210
1185 if (clk->gpc_pll.id == GM20B_GPC_PLL_C1) { 1211 if (clk->gpc_pll.id == GM20B_GPC_PLL_C1) {
1186 gpc_pll_params = gpc_pll_params_c1; 1212 gpc_pll_params = gpc_pll_params_c1;
1187 if (!clk->pll_poweron_uv) 1213 if (!clk->pll_poweron_uv) {
1188 clk->pll_poweron_uv = BOOT_GPU_UV_C1; 1214 clk->pll_poweron_uv = BOOT_GPU_UV_C1;
1215 }
1189 } else { 1216 } else {
1190 gpc_pll_params = gpc_pll_params_b1; 1217 gpc_pll_params = gpc_pll_params_b1;
1191 if (!clk->pll_poweron_uv) 1218 if (!clk->pll_poweron_uv) {
1192 clk->pll_poweron_uv = BOOT_GPU_UV_B1; 1219 clk->pll_poweron_uv = BOOT_GPU_UV_B1;
1220 }
1193 } 1221 }
1194 1222
1195 clk->gpc_pll.clk_in = g->ops.clk.get_ref_clock_rate(g) / KHZ; 1223 clk->gpc_pll.clk_in = g->ops.clk.get_ref_clock_rate(g) / KHZ;
@@ -1254,8 +1282,9 @@ int gm20b_clk_prepare(struct clk_gk20a *clk)
1254 int ret = 0; 1282 int ret = 0;
1255 1283
1256 nvgpu_mutex_acquire(&clk->clk_mutex); 1284 nvgpu_mutex_acquire(&clk->clk_mutex);
1257 if (!clk->gpc_pll.enabled && clk->clk_hw_on) 1285 if (!clk->gpc_pll.enabled && clk->clk_hw_on) {
1258 ret = set_pll_freq(clk->g, 1); 1286 ret = set_pll_freq(clk->g, 1);
1287 }
1259 nvgpu_mutex_release(&clk->clk_mutex); 1288 nvgpu_mutex_release(&clk->clk_mutex);
1260 return ret; 1289 return ret;
1261} 1290}
@@ -1263,8 +1292,9 @@ int gm20b_clk_prepare(struct clk_gk20a *clk)
1263void gm20b_clk_unprepare(struct clk_gk20a *clk) 1292void gm20b_clk_unprepare(struct clk_gk20a *clk)
1264{ 1293{
1265 nvgpu_mutex_acquire(&clk->clk_mutex); 1294 nvgpu_mutex_acquire(&clk->clk_mutex);
1266 if (clk->gpc_pll.enabled && clk->clk_hw_on) 1295 if (clk->gpc_pll.enabled && clk->clk_hw_on) {
1267 clk_disable_gpcpll(clk->g, 1); 1296 clk_disable_gpcpll(clk->g, 1);
1297 }
1268 nvgpu_mutex_release(&clk->clk_mutex); 1298 nvgpu_mutex_release(&clk->clk_mutex);
1269} 1299}
1270 1300
@@ -1287,8 +1317,9 @@ int gm20b_gpcclk_set_rate(struct clk_gk20a *clk, unsigned long rate,
1287 nvgpu_mutex_acquire(&clk->clk_mutex); 1317 nvgpu_mutex_acquire(&clk->clk_mutex);
1288 old_freq = clk->gpc_pll.freq; 1318 old_freq = clk->gpc_pll.freq;
1289 ret = set_pll_target(clk->g, rate_gpu_to_gpc2clk(rate), old_freq); 1319 ret = set_pll_target(clk->g, rate_gpu_to_gpc2clk(rate), old_freq);
1290 if (!ret && clk->gpc_pll.enabled && clk->clk_hw_on) 1320 if (!ret && clk->gpc_pll.enabled && clk->clk_hw_on) {
1291 ret = set_pll_freq(clk->g, 1); 1321 ret = set_pll_freq(clk->g, 1);
1322 }
1292 nvgpu_mutex_release(&clk->clk_mutex); 1323 nvgpu_mutex_release(&clk->clk_mutex);
1293 1324
1294 return ret; 1325 return ret;
@@ -1303,15 +1334,17 @@ long gm20b_round_rate(struct clk_gk20a *clk, unsigned long rate,
1303 struct gk20a *g = clk->g; 1334 struct gk20a *g = clk->g;
1304 1335
1305 maxrate = g->ops.clk.get_maxrate(g, CTRL_CLK_DOMAIN_GPCCLK); 1336 maxrate = g->ops.clk.get_maxrate(g, CTRL_CLK_DOMAIN_GPCCLK);
1306 if (rate > maxrate) 1337 if (rate > maxrate) {
1307 rate = maxrate; 1338 rate = maxrate;
1339 }
1308 1340
1309 nvgpu_mutex_acquire(&clk->clk_mutex); 1341 nvgpu_mutex_acquire(&clk->clk_mutex);
1310 freq = rate_gpu_to_gpc2clk(rate); 1342 freq = rate_gpu_to_gpc2clk(rate);
1311 if (freq > gpc_pll_params.max_freq) 1343 if (freq > gpc_pll_params.max_freq) {
1312 freq = gpc_pll_params.max_freq; 1344 freq = gpc_pll_params.max_freq;
1313 else if (freq < gpc_pll_params.min_freq) 1345 } else if (freq < gpc_pll_params.min_freq) {
1314 freq = gpc_pll_params.min_freq; 1346 freq = gpc_pll_params.min_freq;
1347 }
1315 1348
1316 tmp_pll = clk->gpc_pll; 1349 tmp_pll = clk->gpc_pll;
1317 clk_config_pll(clk, &tmp_pll, &gpc_pll_params, &freq, true); 1350 clk_config_pll(clk, &tmp_pll, &gpc_pll_params, &freq, true);
@@ -1366,8 +1399,9 @@ static int gm20b_init_clk_setup_hw(struct gk20a *g)
1366 gk20a_writel(g, therm_clk_slowdown_r(0), data); 1399 gk20a_writel(g, therm_clk_slowdown_r(0), data);
1367 gk20a_readl(g, therm_clk_slowdown_r(0)); 1400 gk20a_readl(g, therm_clk_slowdown_r(0));
1368 1401
1369 if (g->clk.gpc_pll.mode == GPC_PLL_MODE_DVFS) 1402 if (g->clk.gpc_pll.mode == GPC_PLL_MODE_DVFS) {
1370 return clk_enbale_pll_dvfs(g); 1403 return clk_enbale_pll_dvfs(g);
1404 }
1371 1405
1372 return 0; 1406 return 0;
1373} 1407}
@@ -1376,10 +1410,11 @@ static int set_pll_target(struct gk20a *g, u32 freq, u32 old_freq)
1376{ 1410{
1377 struct clk_gk20a *clk = &g->clk; 1411 struct clk_gk20a *clk = &g->clk;
1378 1412
1379 if (freq > gpc_pll_params.max_freq) 1413 if (freq > gpc_pll_params.max_freq) {
1380 freq = gpc_pll_params.max_freq; 1414 freq = gpc_pll_params.max_freq;
1381 else if (freq < gpc_pll_params.min_freq) 1415 } else if (freq < gpc_pll_params.min_freq) {
1382 freq = gpc_pll_params.min_freq; 1416 freq = gpc_pll_params.min_freq;
1417 }
1383 1418
1384 if (freq != old_freq) { 1419 if (freq != old_freq) {
1385 /* gpc_pll.freq is changed to new value here */ 1420 /* gpc_pll.freq is changed to new value here */
@@ -1403,12 +1438,14 @@ static int set_pll_freq(struct gk20a *g, int allow_slide)
1403 /* If programming with dynamic sliding failed, re-try under bypass */ 1438 /* If programming with dynamic sliding failed, re-try under bypass */
1404 if (clk->gpc_pll.mode == GPC_PLL_MODE_DVFS) { 1439 if (clk->gpc_pll.mode == GPC_PLL_MODE_DVFS) {
1405 err = clk_program_na_gpc_pll(g, &clk->gpc_pll, allow_slide); 1440 err = clk_program_na_gpc_pll(g, &clk->gpc_pll, allow_slide);
1406 if (err && allow_slide) 1441 if (err && allow_slide) {
1407 err = clk_program_na_gpc_pll(g, &clk->gpc_pll, 0); 1442 err = clk_program_na_gpc_pll(g, &clk->gpc_pll, 0);
1443 }
1408 } else { 1444 } else {
1409 err = clk_program_gpc_pll(g, &clk->gpc_pll, allow_slide); 1445 err = clk_program_gpc_pll(g, &clk->gpc_pll, allow_slide);
1410 if (err && allow_slide) 1446 if (err && allow_slide) {
1411 err = clk_program_gpc_pll(g, &clk->gpc_pll, 0); 1447 err = clk_program_gpc_pll(g, &clk->gpc_pll, 0);
1448 }
1412 } 1449 }
1413 1450
1414 if (!err) { 1451 if (!err) {
@@ -1437,26 +1474,31 @@ int gm20b_init_clk_support(struct gk20a *g)
1437 1474
1438 err = gm20b_init_clk_setup_hw(g); 1475 err = gm20b_init_clk_setup_hw(g);
1439 nvgpu_mutex_release(&clk->clk_mutex); 1476 nvgpu_mutex_release(&clk->clk_mutex);
1440 if (err) 1477 if (err) {
1441 return err; 1478 return err;
1479 }
1442 1480
1443 /* FIXME: this effectively prevents host level clock gating */ 1481 /* FIXME: this effectively prevents host level clock gating */
1444 err = g->ops.clk.prepare_enable(&g->clk); 1482 err = g->ops.clk.prepare_enable(&g->clk);
1445 if (err) 1483 if (err) {
1446 return err; 1484 return err;
1485 }
1447 1486
1448 /* The prev call may not enable PLL if gbus is unbalanced - force it */ 1487 /* The prev call may not enable PLL if gbus is unbalanced - force it */
1449 nvgpu_mutex_acquire(&clk->clk_mutex); 1488 nvgpu_mutex_acquire(&clk->clk_mutex);
1450 if (!clk->gpc_pll.enabled) 1489 if (!clk->gpc_pll.enabled) {
1451 err = set_pll_freq(g, 1); 1490 err = set_pll_freq(g, 1);
1491 }
1452 nvgpu_mutex_release(&clk->clk_mutex); 1492 nvgpu_mutex_release(&clk->clk_mutex);
1453 if (err) 1493 if (err) {
1454 return err; 1494 return err;
1495 }
1455 1496
1456 if (!clk->debugfs_set && g->ops.clk.init_debugfs) { 1497 if (!clk->debugfs_set && g->ops.clk.init_debugfs) {
1457 err = g->ops.clk.init_debugfs(g); 1498 err = g->ops.clk.init_debugfs(g);
1458 if (err) 1499 if (err) {
1459 return err; 1500 return err;
1501 }
1460 clk->debugfs_set = true; 1502 clk->debugfs_set = true;
1461 } 1503 }
1462 1504
@@ -1471,8 +1513,9 @@ int gm20b_suspend_clk_support(struct gk20a *g)
1471 1513
1472 /* The prev call may not disable PLL if gbus is unbalanced - force it */ 1514 /* The prev call may not disable PLL if gbus is unbalanced - force it */
1473 nvgpu_mutex_acquire(&g->clk.clk_mutex); 1515 nvgpu_mutex_acquire(&g->clk.clk_mutex);
1474 if (g->clk.gpc_pll.enabled) 1516 if (g->clk.gpc_pll.enabled) {
1475 ret = clk_disable_gpcpll(g, 1); 1517 ret = clk_disable_gpcpll(g, 1);
1518 }
1476 g->clk.clk_hw_on = false; 1519 g->clk.clk_hw_on = false;
1477 nvgpu_mutex_release(&g->clk.clk_mutex); 1520 nvgpu_mutex_release(&g->clk.clk_mutex);
1478 1521
@@ -1488,12 +1531,14 @@ int gm20b_clk_get_voltage(struct clk_gk20a *clk, u64 *val)
1488 u32 det_out; 1531 u32 det_out;
1489 int err; 1532 int err;
1490 1533
1491 if (clk->gpc_pll.mode != GPC_PLL_MODE_DVFS) 1534 if (clk->gpc_pll.mode != GPC_PLL_MODE_DVFS) {
1492 return -ENOSYS; 1535 return -ENOSYS;
1536 }
1493 1537
1494 err = gk20a_busy(g); 1538 err = gk20a_busy(g);
1495 if (err) 1539 if (err) {
1496 return err; 1540 return err;
1541 }
1497 1542
1498 nvgpu_mutex_acquire(&g->clk.clk_mutex); 1543 nvgpu_mutex_acquire(&g->clk.clk_mutex);
1499 1544
@@ -1519,8 +1564,9 @@ int gm20b_clk_get_gpcclk_clock_counter(struct clk_gk20a *clk, u64 *val)
1519 u32 count1, count2; 1564 u32 count1, count2;
1520 1565
1521 err = gk20a_busy(g); 1566 err = gk20a_busy(g);
1522 if (err) 1567 if (err) {
1523 return err; 1568 return err;
1569 }
1524 1570
1525 nvgpu_mutex_acquire(&g->clk.clk_mutex); 1571 nvgpu_mutex_acquire(&g->clk.clk_mutex);
1526 1572
@@ -1559,8 +1605,9 @@ int gm20b_clk_get_gpcclk_clock_counter(struct clk_gk20a *clk, u64 *val)
1559 1605
1560 gk20a_idle(g); 1606 gk20a_idle(g);
1561 1607
1562 if (count1 != count2) 1608 if (count1 != count2) {
1563 return -EBUSY; 1609 return -EBUSY;
1610 }
1564 1611
1565 return 0; 1612 return 0;
1566} 1613}
@@ -1571,11 +1618,13 @@ int gm20b_clk_pll_reg_write(struct gk20a *g, u32 reg, u32 val)
1571 (reg > trim_sys_gpcpll_dvfs2_r())) && 1618 (reg > trim_sys_gpcpll_dvfs2_r())) &&
1572 (reg != trim_sys_sel_vco_r()) && 1619 (reg != trim_sys_sel_vco_r()) &&
1573 (reg != trim_sys_gpc2clk_out_r()) && 1620 (reg != trim_sys_gpc2clk_out_r()) &&
1574 (reg != trim_sys_bypassctrl_r())) 1621 (reg != trim_sys_bypassctrl_r())) {
1575 return -EPERM; 1622 return -EPERM;
1623 }
1576 1624
1577 if (reg == trim_sys_gpcpll_dvfs2_r()) 1625 if (reg == trim_sys_gpcpll_dvfs2_r()) {
1578 reg = trim_gpc_bcast_gpcpll_dvfs2_r(); 1626 reg = trim_gpc_bcast_gpcpll_dvfs2_r();
1627 }
1579 1628
1580 nvgpu_mutex_acquire(&g->clk.clk_mutex); 1629 nvgpu_mutex_acquire(&g->clk.clk_mutex);
1581 if (!g->clk.clk_hw_on) { 1630 if (!g->clk.clk_hw_on) {