summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2018-04-18 22:39:46 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-05-09 21:26:04 -0400
commitdd739fcb039d51606e9a5454ec0aab17bcb01965 (patch)
tree806ba8575d146367ad1be00086ca0cdae35a6b28 /drivers/gpu/nvgpu/gm20b/clk_gm20b.c
parent7e66f2a63d4855e763fa768047dfc32f6f96b771 (diff)
gpu: nvgpu: Remove gk20a_dbg* functions
Switch all logging to nvgpu_log*(). gk20a_dbg* macros are intentionally left there because of use from other repositories. Because the new functions do not work without a pointer to struct gk20a, and piping it just for logging is excessive, some log messages are deleted. Change-Id: I00e22e75fe4596a330bb0282ab4774b3639ee31e Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1704148 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/clk_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/clk_gm20b.c41
1 files changed, 21 insertions, 20 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
index fa751ecc..fb89752a 100644
--- a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GM20B Clocks 2 * GM20B Clocks
3 * 3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -36,8 +36,8 @@
36#include <nvgpu/hw/gm20b/hw_therm_gm20b.h> 36#include <nvgpu/hw/gm20b/hw_therm_gm20b.h>
37#include <nvgpu/hw/gm20b/hw_fuse_gm20b.h> 37#include <nvgpu/hw/gm20b/hw_fuse_gm20b.h>
38 38
39#define gk20a_dbg_clk(fmt, arg...) \ 39#define gk20a_dbg_clk(g, fmt, arg...) \
40 gk20a_dbg(gpu_dbg_clk, fmt, ##arg) 40 nvgpu_log(g, gpu_dbg_clk, fmt, ##arg)
41 41
42#define DFS_DET_RANGE 6 /* -2^6 ... 2^6-1 */ 42#define DFS_DET_RANGE 6 /* -2^6 ... 2^6-1 */
43#define SDM_DIN_RANGE 12 /* -2^12 ... 2^12-1 */ 43#define SDM_DIN_RANGE 12 /* -2^12 ... 2^12-1 */
@@ -138,6 +138,7 @@ static u32 get_interim_pldiv(struct gk20a *g, u32 old_pl, u32 new_pl)
138static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll, 138static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
139 struct pll_parms *pll_params, u32 *target_freq, bool best_fit) 139 struct pll_parms *pll_params, u32 *target_freq, bool best_fit)
140{ 140{
141 struct gk20a *g = clk->g;
141 u32 min_vco_f, max_vco_f; 142 u32 min_vco_f, max_vco_f;
142 u32 best_M, best_N; 143 u32 best_M, best_N;
143 u32 low_PL, high_PL, best_PL; 144 u32 low_PL, high_PL, best_PL;
@@ -149,7 +150,7 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
149 150
150 BUG_ON(target_freq == NULL); 151 BUG_ON(target_freq == NULL);
151 152
152 gk20a_dbg_fn("request target freq %d MHz", *target_freq); 153 nvgpu_log_fn(g, "request target freq %d MHz", *target_freq);
153 154
154 ref_clk_f = pll->clk_in; 155 ref_clk_f = pll->clk_in;
155 target_clk_f = *target_freq; 156 target_clk_f = *target_freq;
@@ -172,7 +173,7 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
172 low_PL = min(low_PL, pll_params->max_PL); 173 low_PL = min(low_PL, pll_params->max_PL);
173 low_PL = max(low_PL, pll_params->min_PL); 174 low_PL = max(low_PL, pll_params->min_PL);
174 175
175 gk20a_dbg_info("low_PL %d(div%d), high_PL %d(div%d)", 176 nvgpu_log_info(g, "low_PL %d(div%d), high_PL %d(div%d)",
176 low_PL, nvgpu_pl_to_div(low_PL), high_PL, nvgpu_pl_to_div(high_PL)); 177 low_PL, nvgpu_pl_to_div(low_PL), high_PL, nvgpu_pl_to_div(high_PL));
177 178
178 for (pl = low_PL; pl <= high_PL; pl++) { 179 for (pl = low_PL; pl <= high_PL; pl++) {
@@ -217,7 +218,7 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
217 goto found_match; 218 goto found_match;
218 } 219 }
219 220
220 gk20a_dbg_info("delta %d @ M %d, N %d, PL %d", 221 nvgpu_log_info(g, "delta %d @ M %d, N %d, PL %d",
221 delta, m, n, pl); 222 delta, m, n, pl);
222 } 223 }
223 } 224 }
@@ -229,7 +230,7 @@ found_match:
229 BUG_ON(best_delta == ~0U); 230 BUG_ON(best_delta == ~0U);
230 231
231 if (best_fit && best_delta != 0) 232 if (best_fit && best_delta != 0)
232 gk20a_dbg_clk("no best match for target @ %dMHz on gpc_pll", 233 gk20a_dbg_clk(g, "no best match for target @ %dMHz on gpc_pll",
233 target_clk_f); 234 target_clk_f);
234 235
235 pll->M = best_M; 236 pll->M = best_M;
@@ -241,10 +242,10 @@ found_match:
241 242
242 *target_freq = pll->freq; 243 *target_freq = pll->freq;
243 244
244 gk20a_dbg_clk("actual target freq %d kHz, M %d, N %d, PL %d(div%d)", 245 gk20a_dbg_clk(g, "actual target freq %d kHz, M %d, N %d, PL %d(div%d)",
245 *target_freq, pll->M, pll->N, pll->PL, nvgpu_pl_to_div(pll->PL)); 246 *target_freq, pll->M, pll->N, pll->PL, nvgpu_pl_to_div(pll->PL));
246 247
247 gk20a_dbg_fn("done"); 248 nvgpu_log_fn(g, "done");
248 249
249 return 0; 250 return 0;
250} 251}
@@ -810,7 +811,7 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll)
810 if (gpll->mode == GPC_PLL_MODE_DVFS) { 811 if (gpll->mode == GPC_PLL_MODE_DVFS) {
811 gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 812 gk20a_readl(g, trim_sys_gpcpll_cfg_r());
812 nvgpu_udelay(gpc_pll_params.na_lock_delay); 813 nvgpu_udelay(gpc_pll_params.na_lock_delay);
813 gk20a_dbg_clk("NA config_pll under bypass: %u (%u) kHz %d mV", 814 gk20a_dbg_clk(g, "NA config_pll under bypass: %u (%u) kHz %d mV",
814 gpll->freq, gpll->freq / 2, 815 gpll->freq, gpll->freq / 2,
815 (trim_sys_gpcpll_cfg3_dfs_testout_v( 816 (trim_sys_gpcpll_cfg3_dfs_testout_v(
816 gk20a_readl(g, trim_sys_gpcpll_cfg3_r())) 817 gk20a_readl(g, trim_sys_gpcpll_cfg3_r()))
@@ -843,7 +844,7 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll)
843 return -EBUSY; 844 return -EBUSY;
844 845
845pll_locked: 846pll_locked:
846 gk20a_dbg_clk("locked config_pll under bypass r=0x%x v=0x%x", 847 gk20a_dbg_clk(g, "locked config_pll under bypass r=0x%x v=0x%x",
847 trim_sys_gpcpll_cfg_r(), cfg); 848 trim_sys_gpcpll_cfg_r(), cfg);
848 849
849 /* set SYNC_MODE for glitchless switch out of bypass */ 850 /* set SYNC_MODE for glitchless switch out of bypass */
@@ -878,7 +879,7 @@ static int clk_program_gpc_pll(struct gk20a *g, struct pll *gpll_new,
878 bool can_slide, pldiv_only; 879 bool can_slide, pldiv_only;
879 struct pll gpll; 880 struct pll gpll;
880 881
881 gk20a_dbg_fn(""); 882 nvgpu_log_fn(g, " ");
882 883
883 if (!nvgpu_platform_is_silicon(g)) 884 if (!nvgpu_platform_is_silicon(g))
884 return 0; 885 return 0;
@@ -1028,7 +1029,7 @@ static void clk_config_pll_safe_dvfs(struct gk20a *g, struct pll *gpll)
1028 gpll->N = nsafe; 1029 gpll->N = nsafe;
1029 clk_config_dvfs_ndiv(gpll->dvfs.mv, gpll->N, &gpll->dvfs); 1030 clk_config_dvfs_ndiv(gpll->dvfs.mv, gpll->N, &gpll->dvfs);
1030 1031
1031 gk20a_dbg_clk("safe freq %d kHz, M %d, N %d, PL %d(div%d), mV(cal) %d(%d), DC %d", 1032 gk20a_dbg_clk(g, "safe freq %d kHz, M %d, N %d, PL %d(div%d), mV(cal) %d(%d), DC %d",
1032 gpll->freq, gpll->M, gpll->N, gpll->PL, nvgpu_pl_to_div(gpll->PL), 1033 gpll->freq, gpll->M, gpll->N, gpll->PL, nvgpu_pl_to_div(gpll->PL),
1033 gpll->dvfs.mv, gpll->dvfs.uv_cal / 1000, gpll->dvfs.dfs_coeff); 1034 gpll->dvfs.mv, gpll->dvfs.uv_cal / 1000, gpll->dvfs.dfs_coeff);
1034} 1035}
@@ -1103,7 +1104,7 @@ static int clk_program_na_gpc_pll(struct gk20a *g, struct pll *gpll_new,
1103 clk_set_dfs_ext_cal(g, gpll_new->dvfs.dfs_ext_cal); 1104 clk_set_dfs_ext_cal(g, gpll_new->dvfs.dfs_ext_cal);
1104 clk_set_dfs_coeff(g, gpll_new->dvfs.dfs_coeff); 1105 clk_set_dfs_coeff(g, gpll_new->dvfs.dfs_coeff);
1105 1106
1106 gk20a_dbg_clk("config_pll %d kHz, M %d, N %d, PL %d(div%d), mV(cal) %d(%d), DC %d", 1107 gk20a_dbg_clk(g, "config_pll %d kHz, M %d, N %d, PL %d(div%d), mV(cal) %d(%d), DC %d",
1107 gpll_new->freq, gpll_new->M, gpll_new->N, gpll_new->PL, 1108 gpll_new->freq, gpll_new->M, gpll_new->N, gpll_new->PL,
1108 nvgpu_pl_to_div(gpll_new->PL), 1109 nvgpu_pl_to_div(gpll_new->PL),
1109 max(gpll_new->dvfs.mv, gpll_old->dvfs.mv), 1110 max(gpll_new->dvfs.mv, gpll_old->dvfs.mv),
@@ -1168,14 +1169,14 @@ int gm20b_init_clk_setup_sw(struct gk20a *g)
1168 unsigned long safe_rate; 1169 unsigned long safe_rate;
1169 int err; 1170 int err;
1170 1171
1171 gk20a_dbg_fn(""); 1172 nvgpu_log_fn(g, " ");
1172 1173
1173 err = nvgpu_mutex_init(&clk->clk_mutex); 1174 err = nvgpu_mutex_init(&clk->clk_mutex);
1174 if (err) 1175 if (err)
1175 return err; 1176 return err;
1176 1177
1177 if (clk->sw_ready) { 1178 if (clk->sw_ready) {
1178 gk20a_dbg_fn("skip init"); 1179 nvgpu_log_fn(g, "skip init");
1179 return 0; 1180 return 0;
1180 } 1181 }
1181 1182
@@ -1229,7 +1230,7 @@ int gm20b_init_clk_setup_sw(struct gk20a *g)
1229 1230
1230 clk->sw_ready = true; 1231 clk->sw_ready = true;
1231 1232
1232 gk20a_dbg_fn("done"); 1233 nvgpu_log_fn(g, "done");
1233 nvgpu_info(g, 1234 nvgpu_info(g,
1234 "GPCPLL initial settings:%s M=%u, N=%u, P=%u (id = %u)", 1235 "GPCPLL initial settings:%s M=%u, N=%u, P=%u (id = %u)",
1235 clk->gpc_pll.mode == GPC_PLL_MODE_DVFS ? " NA mode," : "", 1236 clk->gpc_pll.mode == GPC_PLL_MODE_DVFS ? " NA mode," : "",
@@ -1321,7 +1322,7 @@ static int gm20b_init_clk_setup_hw(struct gk20a *g)
1321{ 1322{
1322 u32 data; 1323 u32 data;
1323 1324
1324 gk20a_dbg_fn(""); 1325 nvgpu_log_fn(g, " ");
1325 1326
1326 /* LDIV: Div4 mode (required); both bypass and vco ratios 1:1 */ 1327 /* LDIV: Div4 mode (required); both bypass and vco ratios 1:1 */
1327 data = gk20a_readl(g, trim_sys_gpc2clk_out_r()); 1328 data = gk20a_readl(g, trim_sys_gpc2clk_out_r());
@@ -1394,7 +1395,7 @@ static int set_pll_freq(struct gk20a *g, int allow_slide)
1394 struct clk_gk20a *clk = &g->clk; 1395 struct clk_gk20a *clk = &g->clk;
1395 int err = 0; 1396 int err = 0;
1396 1397
1397 gk20a_dbg_fn("last freq: %dMHz, target freq %dMHz", 1398 nvgpu_log_fn(g, "last freq: %dMHz, target freq %dMHz",
1398 clk->gpc_pll_last.freq, clk->gpc_pll.freq); 1399 clk->gpc_pll_last.freq, clk->gpc_pll.freq);
1399 1400
1400 /* If programming with dynamic sliding failed, re-try under bypass */ 1401 /* If programming with dynamic sliding failed, re-try under bypass */
@@ -1427,7 +1428,7 @@ int gm20b_init_clk_support(struct gk20a *g)
1427 struct clk_gk20a *clk = &g->clk; 1428 struct clk_gk20a *clk = &g->clk;
1428 u32 err; 1429 u32 err;
1429 1430
1430 gk20a_dbg_fn(""); 1431 nvgpu_log_fn(g, " ");
1431 1432
1432 nvgpu_mutex_acquire(&clk->clk_mutex); 1433 nvgpu_mutex_acquire(&clk->clk_mutex);
1433 clk->clk_hw_on = true; 1434 clk->clk_hw_on = true;