summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
diff options
context:
space:
mode:
authorAlex Frid <afrid@nvidia.com>2014-07-26 22:12:19 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:10:38 -0400
commit8554e9a9c8ba67835d023da2a9148a5af5db2b17 (patch)
treec372beff66edf6e3edbc789b7263cdbbfd3e2b27 /drivers/gpu/nvgpu/gm20b/clk_gm20b.c
parent40bab00bf706d14914c0ca80a234b9979217453f (diff)
gpu: nvgpu: Update GM20B GPCPLL programming sequence
Updated GM20B GPCPLL programming sequence to utilize new glitch-less post divider: - No longer bypass PLL for re-locking if it is already enabled, and post divider as well as feedback divider are changing (input divider change is still under bypass only). - Use post divider instead of external linear divider to introduce (VCO min/2) intermediated step when changing PLL frequency. Bug 1450787 Signed-off-by: Alex Frid <afrid@nvidia.com> Change-Id: I4fe60f8eb0d8e59002b641a6bfb29a53467dc8ce
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/clk_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/clk_gm20b.c65
1 files changed, 55 insertions, 10 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
index aec96341..8130f53d 100644
--- a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
@@ -56,6 +56,9 @@ static inline u32 div_to_pl(u32 div)
56 return div; 56 return div;
57} 57}
58 58
59/* FIXME: remove after on-silicon testing */
60#define PLDIV_GLITCHLESS 1
61
59/* Calculate and update M/N/PL as well as pll->freq 62/* Calculate and update M/N/PL as well as pll->freq
60 ref_clk_f = clk_in_f; 63 ref_clk_f = clk_in_f;
61 u_f = ref_clk_f / M; 64 u_f = ref_clk_f / M;
@@ -366,9 +369,12 @@ pll_locked:
366static int clk_program_gpc_pll(struct gk20a *g, struct clk_gk20a *clk, 369static int clk_program_gpc_pll(struct gk20a *g, struct clk_gk20a *clk,
367 int allow_slide) 370 int allow_slide)
368{ 371{
369 u32 data, cfg, coeff; 372#if !PLDIV_GLITCHLESS
370 u32 m, n, pl; 373 u32 data;
371 u32 nlo; 374#endif
375 u32 cfg, coeff;
376 u32 m, n, pl, nlo;
377 bool can_slide;
372 378
373 gk20a_dbg_fn(""); 379 gk20a_dbg_fn("");
374 380
@@ -383,19 +389,41 @@ static int clk_program_gpc_pll(struct gk20a *g, struct clk_gk20a *clk,
383 389
384 /* do NDIV slide if there is no change in M and PL */ 390 /* do NDIV slide if there is no change in M and PL */
385 cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 391 cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
386 if (allow_slide && clk->gpc_pll.M == m && clk->gpc_pll.PL == pl 392 can_slide = allow_slide && trim_sys_gpcpll_cfg_enable_v(cfg);
387 && trim_sys_gpcpll_cfg_enable_v(cfg)) { 393
394 if (can_slide && (clk->gpc_pll.M == m) && (clk->gpc_pll.PL == pl))
388 return clk_slide_gpc_pll(g, clk->gpc_pll.N); 395 return clk_slide_gpc_pll(g, clk->gpc_pll.N);
389 }
390 396
391 /* slide down to NDIV_LO */ 397 /* slide down to NDIV_LO */
392 nlo = DIV_ROUND_UP(m * gpc_pll_params.min_vco, clk->gpc_pll.clk_in); 398 nlo = DIV_ROUND_UP(m * gpc_pll_params.min_vco, clk->gpc_pll.clk_in);
393 if (allow_slide && trim_sys_gpcpll_cfg_enable_v(cfg)) { 399 if (can_slide) {
394 int ret = clk_slide_gpc_pll(g, nlo); 400 int ret = clk_slide_gpc_pll(g, nlo);
395 if (ret) 401 if (ret)
396 return ret; 402 return ret;
397 } 403 }
398 404
405#if PLDIV_GLITCHLESS
406 /*
407 * Limit either FO-to-FO (path A below) or FO-to-bypass (path B below)
408 * jump to min_vco/2 by setting post divider >= 1:2.
409 */
410 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
411 if ((clk->gpc_pll.PL < 2) || (pl < 2)) {
412 if (pl != 2) {
413 coeff = set_field(coeff,
414 trim_sys_gpcpll_coeff_pldiv_m(),
415 trim_sys_gpcpll_coeff_pldiv_f(2));
416 gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff);
417 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
418 udelay(2);
419 }
420 }
421
422 if (can_slide && (clk->gpc_pll.M == m))
423 goto set_pldiv; /* path A: no need to bypass */
424
425 /* path B: bypass if either M changes or PLL is disabled */
426#else
399 /* split FO-to-bypass jump in halfs by setting out divider 1:2 */ 427 /* split FO-to-bypass jump in halfs by setting out divider 1:2 */
400 data = gk20a_readl(g, trim_sys_gpc2clk_out_r()); 428 data = gk20a_readl(g, trim_sys_gpc2clk_out_r());
401 data = set_field(data, trim_sys_gpc2clk_out_vcodiv_m(), 429 data = set_field(data, trim_sys_gpc2clk_out_vcodiv_m(),
@@ -403,27 +431,44 @@ static int clk_program_gpc_pll(struct gk20a *g, struct clk_gk20a *clk,
403 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data); 431 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
404 gk20a_readl(g, trim_sys_gpc2clk_out_r()); 432 gk20a_readl(g, trim_sys_gpc2clk_out_r());
405 udelay(2); 433 udelay(2);
406 434#endif
407 /* 435 /*
408 * Program and lock pll under bypass. On exit PLL is out of bypass, 436 * Program and lock pll under bypass. On exit PLL is out of bypass,
409 * enabled, and locked. VCO is at vco_min if sliding is allowed. 437 * enabled, and locked. VCO is at vco_min if sliding is allowed.
410 * Otherwise it is at VCO target (and therefore last slide call below 438 * Otherwise it is at VCO target (and therefore last slide call below
411 * is effectively NOP). 439 * is effectively NOP). PL is preserved (not set to target) of post
440 * divider is glitchless. Otherwise it is at PL target.
412 */ 441 */
413 m = clk->gpc_pll.M; 442 m = clk->gpc_pll.M;
414 nlo = DIV_ROUND_UP(m * gpc_pll_params.min_vco, clk->gpc_pll.clk_in); 443 nlo = DIV_ROUND_UP(m * gpc_pll_params.min_vco, clk->gpc_pll.clk_in);
415 n = allow_slide ? nlo : clk->gpc_pll.N; 444 n = allow_slide ? nlo : clk->gpc_pll.N;
445#if PLDIV_GLITCHLESS
446 pl = trim_sys_gpcpll_coeff_pldiv_v(coeff);
447#else
416 pl = clk->gpc_pll.PL; 448 pl = clk->gpc_pll.PL;
449#endif
417 clk_lock_gpc_pll_under_bypass(g, m, n, pl); 450 clk_lock_gpc_pll_under_bypass(g, m, n, pl);
418 clk->gpc_pll.enabled = true; 451 clk->gpc_pll.enabled = true;
419 452
453#if PLDIV_GLITCHLESS
454 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
455 udelay(2);
456
457set_pldiv:
458 /* coeff must be current from either path A or B */
459 if (trim_sys_gpcpll_coeff_pldiv_v(coeff) != clk->gpc_pll.PL) {
460 coeff = set_field(coeff, trim_sys_gpcpll_coeff_pldiv_m(),
461 trim_sys_gpcpll_coeff_pldiv_f(clk->gpc_pll.PL));
462 gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff);
463 }
464#else
420 /* restore out divider 1:1 */ 465 /* restore out divider 1:1 */
421 data = gk20a_readl(g, trim_sys_gpc2clk_out_r()); 466 data = gk20a_readl(g, trim_sys_gpc2clk_out_r());
422 data = set_field(data, trim_sys_gpc2clk_out_vcodiv_m(), 467 data = set_field(data, trim_sys_gpc2clk_out_vcodiv_m(),
423 trim_sys_gpc2clk_out_vcodiv_by1_f()); 468 trim_sys_gpc2clk_out_vcodiv_by1_f());
424 udelay(2); 469 udelay(2);
425 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data); 470 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
426 471#endif
427 /* slide up to target NDIV */ 472 /* slide up to target NDIV */
428 return clk_slide_gpc_pll(g, clk->gpc_pll.N); 473 return clk_slide_gpc_pll(g, clk->gpc_pll.N);
429} 474}