summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
diff options
context:
space:
mode:
authorAlex Frid <afrid@nvidia.com>2014-07-25 20:46:57 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:10:38 -0400
commit8c802fc6aec619d2ab9a104b92f1ac7c4f0d963f (patch)
tree2df63d9a6b8f689d720419d418fb335b707c78c2 /drivers/gpu/nvgpu/gm20b/clk_gm20b.c
parentfc73ff7214590782aaa055c7d33c83433f7c9b48 (diff)
gpu: nvgpu: Update GM20b GPCPLL locking under bypass
Moved GPCPLL locking under bypass procedure into separate function. Added SYNC_MODE control during locking. Bug 1450787 Change-Id: I8dbf9427fbdaf55ea20b6876750b518eb738de1b Signed-off-by: Alex Frid <afrid@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/clk_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/clk_gm20b.c144
1 files changed, 88 insertions, 56 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
index 7fc4b8fb..6211a2cc 100644
--- a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
@@ -249,76 +249,42 @@ static int clk_slide_gpc_pll(struct gk20a *g, u32 n)
249 return 0; 249 return 0;
250} 250}
251 251
252static int clk_program_gpc_pll(struct gk20a *g, struct clk_gk20a *clk, 252static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, u32 m, u32 n, u32 pl)
253 int allow_slide)
254{ 253{
255 u32 data, cfg, coeff, timeout; 254 u32 data, cfg, coeff, timeout;
256 u32 m, n, pl;
257 u32 nlo;
258
259 gk20a_dbg_fn("");
260
261 if (!tegra_platform_is_silicon())
262 return 0;
263
264 /* get old coefficients */
265 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
266 m = trim_sys_gpcpll_coeff_mdiv_v(coeff);
267 n = trim_sys_gpcpll_coeff_ndiv_v(coeff);
268 pl = trim_sys_gpcpll_coeff_pldiv_v(coeff);
269
270 /* do NDIV slide if there is no change in M and PL */
271 cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
272 if (allow_slide && clk->gpc_pll.M == m && clk->gpc_pll.PL == pl
273 && trim_sys_gpcpll_cfg_enable_v(cfg)) {
274 return clk_slide_gpc_pll(g, clk->gpc_pll.N);
275 }
276
277 /* slide down to NDIV_LO */
278 nlo = DIV_ROUND_UP(m * gpc_pll_params.min_vco, clk->gpc_pll.clk_in);
279 if (allow_slide && trim_sys_gpcpll_cfg_enable_v(cfg)) {
280 int ret = clk_slide_gpc_pll(g, nlo);
281 if (ret)
282 return ret;
283 }
284
285 /* split FO-to-bypass jump in halfs by setting out divider 1:2 */
286 data = gk20a_readl(g, trim_sys_gpc2clk_out_r());
287 data = set_field(data, trim_sys_gpc2clk_out_vcodiv_m(),
288 trim_sys_gpc2clk_out_vcodiv_f(2));
289 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
290 255
291 /* put PLL in bypass before programming it */ 256 /* put PLL in bypass before programming it */
292 data = gk20a_readl(g, trim_sys_sel_vco_r()); 257 data = gk20a_readl(g, trim_sys_sel_vco_r());
293 data = set_field(data, trim_sys_sel_vco_gpc2clk_out_m(), 258 data = set_field(data, trim_sys_sel_vco_gpc2clk_out_m(),
294 trim_sys_sel_vco_gpc2clk_out_bypass_f()); 259 trim_sys_sel_vco_gpc2clk_out_bypass_f());
295 udelay(2);
296 gk20a_writel(g, trim_sys_sel_vco_r(), data); 260 gk20a_writel(g, trim_sys_sel_vco_r(), data);
297 261
298 /* get out from IDDQ */
299 cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 262 cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
300 if (trim_sys_gpcpll_cfg_iddq_v(cfg)) { 263 if (trim_sys_gpcpll_cfg_iddq_v(cfg)) {
264 /* get out from IDDQ (1st power up) */
301 cfg = set_field(cfg, trim_sys_gpcpll_cfg_iddq_m(), 265 cfg = set_field(cfg, trim_sys_gpcpll_cfg_iddq_m(),
302 trim_sys_gpcpll_cfg_iddq_power_on_v()); 266 trim_sys_gpcpll_cfg_iddq_power_on_v());
303 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg); 267 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
304 gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 268 gk20a_readl(g, trim_sys_gpcpll_cfg_r());
305 udelay(2); 269 udelay(5);
306 } 270 } else {
271 /* clear SYNC_MODE before disabling PLL */
272 cfg = set_field(cfg, trim_sys_gpcpll_cfg_sync_mode_m(),
273 trim_sys_gpcpll_cfg_sync_mode_disable_f());
274 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
275 gk20a_readl(g, trim_sys_gpcpll_cfg_r());
307 276
308 /* disable PLL before changing coefficients */ 277 /* disable running PLL before changing coefficients */
309 cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 278 cfg = set_field(cfg, trim_sys_gpcpll_cfg_enable_m(),
310 cfg = set_field(cfg, trim_sys_gpcpll_cfg_enable_m(), 279 trim_sys_gpcpll_cfg_enable_no_f());
311 trim_sys_gpcpll_cfg_enable_no_f()); 280 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
312 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg); 281 gk20a_readl(g, trim_sys_gpcpll_cfg_r());
313 gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 282 }
314 283
315 /* change coefficients */ 284 /* change coefficients */
316 nlo = DIV_ROUND_UP(clk->gpc_pll.M * gpc_pll_params.min_vco, 285 coeff = trim_sys_gpcpll_coeff_mdiv_f(m) |
317 clk->gpc_pll.clk_in); 286 trim_sys_gpcpll_coeff_ndiv_f(n) |
318 coeff = trim_sys_gpcpll_coeff_mdiv_f(clk->gpc_pll.M) | 287 trim_sys_gpcpll_coeff_pldiv_f(pl);
319 trim_sys_gpcpll_coeff_ndiv_f(allow_slide ?
320 nlo : clk->gpc_pll.N) |
321 trim_sys_gpcpll_coeff_pldiv_f(clk->gpc_pll.PL);
322 gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff); 288 gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff);
323 289
324 /* enable PLL after changing coefficients */ 290 /* enable PLL after changing coefficients */
@@ -336,7 +302,7 @@ static int clk_program_gpc_pll(struct gk20a *g, struct clk_gk20a *clk,
336 } 302 }
337 303
338 /* wait pll lock */ 304 /* wait pll lock */
339 timeout = clk->pll_delay / 2 + 1; 305 timeout = g->clk.pll_delay / 2 + 1;
340 do { 306 do {
341 cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 307 cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
342 if (cfg & trim_sys_gpcpll_cfg_pll_lock_true_f()) 308 if (cfg & trim_sys_gpcpll_cfg_pll_lock_true_f())
@@ -349,11 +315,76 @@ static int clk_program_gpc_pll(struct gk20a *g, struct clk_gk20a *clk,
349 return -EBUSY; 315 return -EBUSY;
350 316
351pll_locked: 317pll_locked:
318 gk20a_dbg_clk("locked config_pll under bypass r=0x%x v=0x%x",
319 trim_sys_gpcpll_cfg_r(), cfg);
320
321 /* set SYNC_MODE for glitchless switch out of bypass */
322 cfg = set_field(cfg, trim_sys_gpcpll_cfg_sync_mode_m(),
323 trim_sys_gpcpll_cfg_sync_mode_enable_f());
324 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
325 gk20a_readl(g, trim_sys_gpcpll_cfg_r());
326
352 /* put PLL back on vco */ 327 /* put PLL back on vco */
353 data = gk20a_readl(g, trim_sys_sel_vco_r()); 328 data = gk20a_readl(g, trim_sys_sel_vco_r());
354 data = set_field(data, trim_sys_sel_vco_gpc2clk_out_m(), 329 data = set_field(data, trim_sys_sel_vco_gpc2clk_out_m(),
355 trim_sys_sel_vco_gpc2clk_out_vco_f()); 330 trim_sys_sel_vco_gpc2clk_out_vco_f());
356 gk20a_writel(g, trim_sys_sel_vco_r(), data); 331 gk20a_writel(g, trim_sys_sel_vco_r(), data);
332
333 return 0;
334}
335
336static int clk_program_gpc_pll(struct gk20a *g, struct clk_gk20a *clk,
337 int allow_slide)
338{
339 u32 data, cfg, coeff;
340 u32 m, n, pl;
341 u32 nlo;
342
343 gk20a_dbg_fn("");
344
345 if (!tegra_platform_is_silicon())
346 return 0;
347
348 /* get old coefficients */
349 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
350 m = trim_sys_gpcpll_coeff_mdiv_v(coeff);
351 n = trim_sys_gpcpll_coeff_ndiv_v(coeff);
352 pl = trim_sys_gpcpll_coeff_pldiv_v(coeff);
353
354 /* do NDIV slide if there is no change in M and PL */
355 cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
356 if (allow_slide && clk->gpc_pll.M == m && clk->gpc_pll.PL == pl
357 && trim_sys_gpcpll_cfg_enable_v(cfg)) {
358 return clk_slide_gpc_pll(g, clk->gpc_pll.N);
359 }
360
361 /* slide down to NDIV_LO */
362 nlo = DIV_ROUND_UP(m * gpc_pll_params.min_vco, clk->gpc_pll.clk_in);
363 if (allow_slide && trim_sys_gpcpll_cfg_enable_v(cfg)) {
364 int ret = clk_slide_gpc_pll(g, nlo);
365 if (ret)
366 return ret;
367 }
368
369 /* split FO-to-bypass jump in halfs by setting out divider 1:2 */
370 data = gk20a_readl(g, trim_sys_gpc2clk_out_r());
371 data = set_field(data, trim_sys_gpc2clk_out_vcodiv_m(),
372 trim_sys_gpc2clk_out_vcodiv_f(2));
373 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
374 gk20a_readl(g, trim_sys_gpc2clk_out_r());
375 udelay(2);
376
377 /*
378 * Program and lock pll under bypass. On exit PLL is out of bypass,
379 * enabled, and locked. VCO is at vco_min if sliding is allowed.
380 * Otherwise it is at VCO target (and therefore last slide call below
381 * is effectively NOP).
382 */
383 m = clk->gpc_pll.M;
384 nlo = DIV_ROUND_UP(m * gpc_pll_params.min_vco, clk->gpc_pll.clk_in);
385 n = allow_slide ? nlo : clk->gpc_pll.N;
386 pl = clk->gpc_pll.PL;
387 clk_lock_gpc_pll_under_bypass(g, m, n, pl);
357 clk->gpc_pll.enabled = true; 388 clk->gpc_pll.enabled = true;
358 389
359 /* restore out divider 1:1 */ 390 /* restore out divider 1:1 */
@@ -731,9 +762,10 @@ static int pll_reg_show(struct seq_file *s, void *data)
731 seq_printf(s, "sel_vco = %s, ", reg ? "vco" : "bypass"); 762 seq_printf(s, "sel_vco = %s, ", reg ? "vco" : "bypass");
732 763
733 reg = gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 764 reg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
734 seq_printf(s, "cfg = 0x%x : %s : %s\n", reg, 765 seq_printf(s, "cfg = 0x%x : %s : %s : %s\n", reg,
735 trim_sys_gpcpll_cfg_enable_v(reg) ? "enabled" : "disabled", 766 trim_sys_gpcpll_cfg_enable_v(reg) ? "enabled" : "disabled",
736 trim_sys_gpcpll_cfg_pll_lock_v(reg) ? "locked" : "unlocked"); 767 trim_sys_gpcpll_cfg_pll_lock_v(reg) ? "locked" : "unlocked",
768 trim_sys_gpcpll_cfg_sync_mode_v(reg) ? "sync_on" : "sync_off");
737 769
738 reg = gk20a_readl(g, trim_sys_gpcpll_coeff_r()); 770 reg = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
739 m = trim_sys_gpcpll_coeff_mdiv_v(reg); 771 m = trim_sys_gpcpll_coeff_mdiv_v(reg);