diff options
author | Alex Frid <afrid@nvidia.com> | 2014-08-24 03:10:57 -0400 |
---|---|---|
committer | Dan Willemsen <dwillemsen@nvidia.com> | 2015-03-18 15:11:03 -0400 |
commit | 4b02dea47d672baf6a566d187cb42c91bf14bf8c (patch) | |
tree | 675dc0d6fb82dea0c16e7fe169484d63656670bd /drivers/gpu/nvgpu/gm20b/clk_gm20b.c | |
parent | c3d31210f8ff706658ebacf89ee74a071ba76c8b (diff) |
gpu: nvgpu: Re-factor GM20b clk_program_gpc_pll()
Passed pll structure to GM20b clk_program_gpc_pll() function instead
of enclosing clock structure.
Change-Id: I81a3a3c03365f4b6997c17894c5210ebdadcbca6
Signed-off-by: Alex Frid <afrid@nvidia.com>
Reviewed-on: http://git-master/r/488023
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com>
Tested-by: Seshendra Gadagottu <sgadagottu@nvidia.com>
Reviewed-by: Krishna Reddy <vdumpa@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/clk_gm20b.c')
-rw-r--r-- | drivers/gpu/nvgpu/gm20b/clk_gm20b.c | 34 |
1 files changed, 17 insertions, 17 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c index 74a5cb96..bfc1cf55 100644 --- a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c | |||
@@ -367,7 +367,7 @@ pll_locked: | |||
367 | return 0; | 367 | return 0; |
368 | } | 368 | } |
369 | 369 | ||
370 | static int clk_program_gpc_pll(struct gk20a *g, struct clk_gk20a *clk, | 370 | static int clk_program_gpc_pll(struct gk20a *g, struct pll *gpll_new, |
371 | int allow_slide) | 371 | int allow_slide) |
372 | { | 372 | { |
373 | #if PLDIV_GLITCHLESS | 373 | #if PLDIV_GLITCHLESS |
@@ -394,11 +394,11 @@ static int clk_program_gpc_pll(struct gk20a *g, struct clk_gk20a *clk, | |||
394 | cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r()); | 394 | cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r()); |
395 | can_slide = allow_slide && trim_sys_gpcpll_cfg_enable_v(cfg); | 395 | can_slide = allow_slide && trim_sys_gpcpll_cfg_enable_v(cfg); |
396 | 396 | ||
397 | if (can_slide && (clk->gpc_pll.M == m) && (clk->gpc_pll.PL == pl)) | 397 | if (can_slide && (gpll_new->M == m) && (gpll_new->PL == pl)) |
398 | return clk_slide_gpc_pll(g, clk->gpc_pll.N); | 398 | return clk_slide_gpc_pll(g, gpll_new->N); |
399 | 399 | ||
400 | /* slide down to NDIV_LO */ | 400 | /* slide down to NDIV_LO */ |
401 | nlo = DIV_ROUND_UP(m * gpc_pll_params.min_vco, clk->gpc_pll.clk_in); | 401 | nlo = DIV_ROUND_UP(m * gpc_pll_params.min_vco, gpll_new->clk_in); |
402 | if (can_slide) { | 402 | if (can_slide) { |
403 | int ret = clk_slide_gpc_pll(g, nlo); | 403 | int ret = clk_slide_gpc_pll(g, nlo); |
404 | if (ret) | 404 | if (ret) |
@@ -410,9 +410,9 @@ static int clk_program_gpc_pll(struct gk20a *g, struct clk_gk20a *clk, | |||
410 | * Limit either FO-to-FO (path A below) or FO-to-bypass (path B below) | 410 | * Limit either FO-to-FO (path A below) or FO-to-bypass (path B below) |
411 | * jump to min_vco/2 by setting post divider >= 1:2. | 411 | * jump to min_vco/2 by setting post divider >= 1:2. |
412 | */ | 412 | */ |
413 | skip_bypass = can_slide && (clk->gpc_pll.M == m); | 413 | skip_bypass = can_slide && (gpll_new->M == m); |
414 | coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r()); | 414 | coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r()); |
415 | if ((skip_bypass && (clk->gpc_pll.PL < 2)) || (pl < 2)) { | 415 | if ((skip_bypass && (gpll_new->PL < 2)) || (pl < 2)) { |
416 | if (pl != 2) { | 416 | if (pl != 2) { |
417 | coeff = set_field(coeff, | 417 | coeff = set_field(coeff, |
418 | trim_sys_gpcpll_coeff_pldiv_m(), | 418 | trim_sys_gpcpll_coeff_pldiv_m(), |
@@ -443,16 +443,16 @@ static int clk_program_gpc_pll(struct gk20a *g, struct clk_gk20a *clk, | |||
443 | * is effectively NOP). PL is preserved (not set to target) of post | 443 | * is effectively NOP). PL is preserved (not set to target) of post |
444 | * divider is glitchless. Otherwise it is at PL target. | 444 | * divider is glitchless. Otherwise it is at PL target. |
445 | */ | 445 | */ |
446 | m = clk->gpc_pll.M; | 446 | m = gpll_new->M; |
447 | nlo = DIV_ROUND_UP(m * gpc_pll_params.min_vco, clk->gpc_pll.clk_in); | 447 | nlo = DIV_ROUND_UP(m * gpc_pll_params.min_vco, gpll_new->clk_in); |
448 | n = allow_slide ? nlo : clk->gpc_pll.N; | 448 | n = allow_slide ? nlo : gpll_new->N; |
449 | #if PLDIV_GLITCHLESS | 449 | #if PLDIV_GLITCHLESS |
450 | pl = (clk->gpc_pll.PL < 2) ? 2 : clk->gpc_pll.PL; | 450 | pl = (gpll_new->PL < 2) ? 2 : gpll_new->PL; |
451 | #else | 451 | #else |
452 | pl = clk->gpc_pll.PL; | 452 | pl = gpll_new->PL; |
453 | #endif | 453 | #endif |
454 | clk_lock_gpc_pll_under_bypass(g, m, n, pl); | 454 | clk_lock_gpc_pll_under_bypass(g, m, n, pl); |
455 | clk->gpc_pll.enabled = true; | 455 | gpll_new->enabled = true; |
456 | 456 | ||
457 | #if PLDIV_GLITCHLESS | 457 | #if PLDIV_GLITCHLESS |
458 | coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r()); | 458 | coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r()); |
@@ -460,9 +460,9 @@ static int clk_program_gpc_pll(struct gk20a *g, struct clk_gk20a *clk, | |||
460 | 460 | ||
461 | set_pldiv: | 461 | set_pldiv: |
462 | /* coeff must be current from either path A or B */ | 462 | /* coeff must be current from either path A or B */ |
463 | if (trim_sys_gpcpll_coeff_pldiv_v(coeff) != clk->gpc_pll.PL) { | 463 | if (trim_sys_gpcpll_coeff_pldiv_v(coeff) != gpll_new->PL) { |
464 | coeff = set_field(coeff, trim_sys_gpcpll_coeff_pldiv_m(), | 464 | coeff = set_field(coeff, trim_sys_gpcpll_coeff_pldiv_m(), |
465 | trim_sys_gpcpll_coeff_pldiv_f(clk->gpc_pll.PL)); | 465 | trim_sys_gpcpll_coeff_pldiv_f(gpll_new->PL)); |
466 | gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff); | 466 | gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff); |
467 | } | 467 | } |
468 | #else | 468 | #else |
@@ -474,7 +474,7 @@ set_pldiv: | |||
474 | gk20a_writel(g, trim_sys_gpc2clk_out_r(), data); | 474 | gk20a_writel(g, trim_sys_gpc2clk_out_r(), data); |
475 | #endif | 475 | #endif |
476 | /* slide up to target NDIV */ | 476 | /* slide up to target NDIV */ |
477 | return clk_slide_gpc_pll(g, clk->gpc_pll.N); | 477 | return clk_slide_gpc_pll(g, gpll_new->N); |
478 | } | 478 | } |
479 | 479 | ||
480 | static int clk_disable_gpcpll(struct gk20a *g, int allow_slide) | 480 | static int clk_disable_gpcpll(struct gk20a *g, int allow_slide) |
@@ -656,9 +656,9 @@ static int set_pll_freq(struct gk20a *g, u32 freq, u32 old_freq) | |||
656 | 656 | ||
657 | /* change frequency only if power is on */ | 657 | /* change frequency only if power is on */ |
658 | if (g->clk.clk_hw_on) { | 658 | if (g->clk.clk_hw_on) { |
659 | err = clk_program_gpc_pll(g, clk, 1); | 659 | err = clk_program_gpc_pll(g, &clk->gpc_pll, 1); |
660 | if (err) | 660 | if (err) |
661 | err = clk_program_gpc_pll(g, clk, 0); | 661 | err = clk_program_gpc_pll(g, &clk->gpc_pll, 0); |
662 | } | 662 | } |
663 | 663 | ||
664 | /* Just report error but not restore PLL since dvfs could already change | 664 | /* Just report error but not restore PLL since dvfs could already change |