summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
diff options
context:
space:
mode:
authorAlex Frid <afrid@nvidia.com>2014-08-26 00:45:21 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:11:04 -0400
commit08983f727f0f3574aebb07027060b75e5b6dac6c (patch)
tree8b1cd6346ffd920036a378b4669d78f7c93b3fe0 /drivers/gpu/nvgpu/gm20b/clk_gm20b.c
parent55438f4009accdc0e113d1b82ade1521fa8be0b4 (diff)
gpu: nvgpu: Re-factor GM20b clk_slide_gpc_pll()
Passed pll structure to GM20b clk_slide_gpc_pll() function instead of just feedback divider N value. Change-Id: Ic99d23895ae27e48ccd5a12de99a58bab320df16 Signed-off-by: Alex Frid <afrid@nvidia.com> Reviewed-on: http://git-master/r/488025 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com> Reviewed-by: Krishna Reddy <vdumpa@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/clk_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/clk_gm20b.c48
1 files changed, 25 insertions, 23 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
index f8b54465..4c1b9012 100644
--- a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
@@ -216,10 +216,10 @@ static void clk_setup_slide(struct gk20a *g, u32 clk_u)
216 gk20a_writel(g, trim_sys_gpcpll_cfg3_r(), data); 216 gk20a_writel(g, trim_sys_gpcpll_cfg3_r(), data);
217} 217}
218 218
219static int clk_slide_gpc_pll(struct gk20a *g, u32 n) 219static int clk_slide_gpc_pll(struct gk20a *g, struct pll *gpll)
220{ 220{
221 u32 data, coeff; 221 u32 data, coeff;
222 u32 nold, m; 222 u32 nold;
223 int ramp_timeout = 500; 223 int ramp_timeout = 500;
224 224
225 /* get old coefficients */ 225 /* get old coefficients */
@@ -227,12 +227,11 @@ static int clk_slide_gpc_pll(struct gk20a *g, u32 n)
227 nold = trim_sys_gpcpll_coeff_ndiv_v(coeff); 227 nold = trim_sys_gpcpll_coeff_ndiv_v(coeff);
228 228
229 /* do nothing if NDIV is same */ 229 /* do nothing if NDIV is same */
230 if (n == nold) 230 if (gpll->N == nold)
231 return 0; 231 return 0;
232 232
233 /* dynamic ramp setup based on update rate */ 233 /* dynamic ramp setup based on update rate */
234 m = trim_sys_gpcpll_coeff_mdiv_v(coeff); 234 clk_setup_slide(g, gpll->clk_in / gpll->M);
235 clk_setup_slide(g, g->clk.gpc_pll.clk_in / m);
236 235
237 /* pll slowdown mode */ 236 /* pll slowdown mode */
238 data = gk20a_readl(g, trim_sys_gpcpll_ndiv_slowdown_r()); 237 data = gk20a_readl(g, trim_sys_gpcpll_ndiv_slowdown_r());
@@ -244,7 +243,7 @@ static int clk_slide_gpc_pll(struct gk20a *g, u32 n)
244 /* new ndiv ready for ramp */ 243 /* new ndiv ready for ramp */
245 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r()); 244 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
246 coeff = set_field(coeff, trim_sys_gpcpll_coeff_ndiv_m(), 245 coeff = set_field(coeff, trim_sys_gpcpll_coeff_ndiv_m(),
247 trim_sys_gpcpll_coeff_ndiv_f(n)); 246 trim_sys_gpcpll_coeff_ndiv_f(gpll->N));
248 udelay(1); 247 udelay(1);
249 gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff); 248 gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff);
250 249
@@ -376,7 +375,6 @@ static int clk_program_gpc_pll(struct gk20a *g, struct pll *gpll_new,
376 u32 data; 375 u32 data;
377#endif 376#endif
378 u32 cfg, coeff; 377 u32 cfg, coeff;
379 u32 m, n, pl, nlo;
380 bool can_slide; 378 bool can_slide;
381 struct pll gpll; 379 struct pll gpll;
382 380
@@ -387,21 +385,24 @@ static int clk_program_gpc_pll(struct gk20a *g, struct pll *gpll_new,
387 385
388 /* get old coefficients */ 386 /* get old coefficients */
389 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r()); 387 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
390 m = trim_sys_gpcpll_coeff_mdiv_v(coeff); 388 gpll.M = trim_sys_gpcpll_coeff_mdiv_v(coeff);
391 n = trim_sys_gpcpll_coeff_ndiv_v(coeff); 389 gpll.N = trim_sys_gpcpll_coeff_ndiv_v(coeff);
392 pl = trim_sys_gpcpll_coeff_pldiv_v(coeff); 390 gpll.PL = trim_sys_gpcpll_coeff_pldiv_v(coeff);
391 gpll.clk_in = gpll_new->clk_in;
393 392
394 /* do NDIV slide if there is no change in M and PL */ 393 /* do NDIV slide if there is no change in M and PL */
395 cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 394 cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
396 can_slide = allow_slide && trim_sys_gpcpll_cfg_enable_v(cfg); 395 can_slide = allow_slide && trim_sys_gpcpll_cfg_enable_v(cfg);
397 396
398 if (can_slide && (gpll_new->M == m) && (gpll_new->PL == pl)) 397 if (can_slide && (gpll_new->M == gpll.M) && (gpll_new->PL == gpll.PL))
399 return clk_slide_gpc_pll(g, gpll_new->N); 398 return clk_slide_gpc_pll(g, gpll_new);
400 399
401 /* slide down to NDIV_LO */ 400 /* slide down to NDIV_LO */
402 nlo = DIV_ROUND_UP(m * gpc_pll_params.min_vco, gpll_new->clk_in);
403 if (can_slide) { 401 if (can_slide) {
404 int ret = clk_slide_gpc_pll(g, nlo); 402 int ret;
403 gpll.N = DIV_ROUND_UP(gpll.M * gpc_pll_params.min_vco,
404 gpll.clk_in);
405 ret = clk_slide_gpc_pll(g, &gpll);
405 if (ret) 406 if (ret)
406 return ret; 407 return ret;
407 } 408 }
@@ -411,10 +412,10 @@ static int clk_program_gpc_pll(struct gk20a *g, struct pll *gpll_new,
411 * Limit either FO-to-FO (path A below) or FO-to-bypass (path B below) 412 * Limit either FO-to-FO (path A below) or FO-to-bypass (path B below)
412 * jump to min_vco/2 by setting post divider >= 1:2. 413 * jump to min_vco/2 by setting post divider >= 1:2.
413 */ 414 */
414 skip_bypass = can_slide && (gpll_new->M == m); 415 skip_bypass = can_slide && (gpll_new->M == gpll.M);
415 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r()); 416 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
416 if ((skip_bypass && (gpll_new->PL < 2)) || (pl < 2)) { 417 if ((skip_bypass && (gpll_new->PL < 2)) || (gpll.PL < 2)) {
417 if (pl != 2) { 418 if (gpll.PL != 2) {
418 coeff = set_field(coeff, 419 coeff = set_field(coeff,
419 trim_sys_gpcpll_coeff_pldiv_m(), 420 trim_sys_gpcpll_coeff_pldiv_m(),
420 trim_sys_gpcpll_coeff_pldiv_f(2)); 421 trim_sys_gpcpll_coeff_pldiv_f(2));
@@ -474,22 +475,23 @@ set_pldiv:
474 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data); 475 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
475#endif 476#endif
476 /* slide up to target NDIV */ 477 /* slide up to target NDIV */
477 return clk_slide_gpc_pll(g, gpll_new->N); 478 return clk_slide_gpc_pll(g, gpll_new);
478} 479}
479 480
480static int clk_disable_gpcpll(struct gk20a *g, int allow_slide) 481static int clk_disable_gpcpll(struct gk20a *g, int allow_slide)
481{ 482{
482 u32 cfg, coeff, m, nlo; 483 u32 cfg, coeff;
483 struct clk_gk20a *clk = &g->clk; 484 struct clk_gk20a *clk = &g->clk;
485 struct pll gpll = clk->gpc_pll;
484 486
485 /* slide to VCO min */ 487 /* slide to VCO min */
486 cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 488 cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
487 if (allow_slide && trim_sys_gpcpll_cfg_enable_v(cfg)) { 489 if (allow_slide && trim_sys_gpcpll_cfg_enable_v(cfg)) {
488 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r()); 490 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
489 m = trim_sys_gpcpll_coeff_mdiv_v(coeff); 491 gpll.M = trim_sys_gpcpll_coeff_mdiv_v(coeff);
490 nlo = DIV_ROUND_UP(m * gpc_pll_params.min_vco, 492 gpll.N = DIV_ROUND_UP(gpll.M * gpc_pll_params.min_vco,
491 clk->gpc_pll.clk_in); 493 gpll.clk_in);
492 clk_slide_gpc_pll(g, nlo); 494 clk_slide_gpc_pll(g, &gpll);
493 } 495 }
494 496
495 /* put PLL in bypass before disabling it */ 497 /* put PLL in bypass before disabling it */