summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-04-10 16:47:37 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-13 16:44:41 -0400
commit947a23606351f1c81072cedcd4ab686d34042912 (patch)
tree79a29254b726d40713d06c6a17d71860c112cc64 /drivers/gpu/nvgpu/gm20b/clk_gm20b.c
parent22426a5452ba943ac48867722fb0927baf66d4ac (diff)
gpu: nvgpu: gm20b: Use new delay APIs
Use platform agnostic delay functions instead of Linux kernel APIs. This allows removing dependency to Linux header linux/delay.h. At the same time remove #include lines for other unused Linux headers. JIRA NVGPU-16 Change-Id: I05df9d72edaf4bb061febe0cb40fc8a7cf9f51c7 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1460114 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/clk_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/clk_gm20b.c42
1 files changed, 20 insertions, 22 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
index 4d7470d4..358e7369 100644
--- a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
@@ -18,8 +18,6 @@
18 18
19#include <linux/version.h> 19#include <linux/version.h>
20#include <linux/clk.h> 20#include <linux/clk.h>
21#include <linux/delay.h> /* for mdelay */
22#include <linux/module.h>
23#include <linux/debugfs.h> 21#include <linux/debugfs.h>
24#include <linux/uaccess.h> 22#include <linux/uaccess.h>
25#include <linux/clk/tegra.h> 23#include <linux/clk/tegra.h>
@@ -403,7 +401,7 @@ static void clk_set_dfs_coeff(struct gk20a *g, u32 dfs_coeff)
403 gk20a_writel(g, trim_sys_gpcpll_dvfs0_r(), data); 401 gk20a_writel(g, trim_sys_gpcpll_dvfs0_r(), data);
404 402
405 data = gk20a_readl(g, trim_gpc_bcast_gpcpll_dvfs2_r()); 403 data = gk20a_readl(g, trim_gpc_bcast_gpcpll_dvfs2_r());
406 udelay(1); 404 nvgpu_udelay(1);
407 data &= ~DFS_EXT_STROBE; 405 data &= ~DFS_EXT_STROBE;
408 gk20a_writel(g, trim_gpc_bcast_gpcpll_dvfs2_r(), data); 406 gk20a_writel(g, trim_gpc_bcast_gpcpll_dvfs2_r(), data);
409} 407}
@@ -420,7 +418,7 @@ static void __maybe_unused clk_set_dfs_det_max(struct gk20a *g, u32 dfs_det_max)
420 gk20a_writel(g, trim_sys_gpcpll_dvfs0_r(), data); 418 gk20a_writel(g, trim_sys_gpcpll_dvfs0_r(), data);
421 419
422 data = gk20a_readl(g, trim_gpc_bcast_gpcpll_dvfs2_r()); 420 data = gk20a_readl(g, trim_gpc_bcast_gpcpll_dvfs2_r());
423 udelay(1); 421 nvgpu_udelay(1);
424 data &= ~DFS_EXT_STROBE; 422 data &= ~DFS_EXT_STROBE;
425 gk20a_writel(g, trim_gpc_bcast_gpcpll_dvfs2_r(), data); 423 gk20a_writel(g, trim_gpc_bcast_gpcpll_dvfs2_r(), data);
426} 424}
@@ -435,7 +433,7 @@ static void clk_set_dfs_ext_cal(struct gk20a *g, u32 dfs_det_cal)
435 gk20a_writel(g, trim_gpc_bcast_gpcpll_dvfs2_r(), data); 433 gk20a_writel(g, trim_gpc_bcast_gpcpll_dvfs2_r(), data);
436 434
437 data = gk20a_readl(g, trim_sys_gpcpll_dvfs1_r()); 435 data = gk20a_readl(g, trim_sys_gpcpll_dvfs1_r());
438 udelay(1); 436 nvgpu_udelay(1);
439 if (~trim_sys_gpcpll_dvfs1_dfs_ctrl_v(data) & DFS_EXT_CAL_EN) { 437 if (~trim_sys_gpcpll_dvfs1_dfs_ctrl_v(data) & DFS_EXT_CAL_EN) {
440 data = set_field(data, trim_sys_gpcpll_dvfs1_dfs_ctrl_m(), 438 data = set_field(data, trim_sys_gpcpll_dvfs1_dfs_ctrl_m(),
441 trim_sys_gpcpll_dvfs1_dfs_ctrl_f( 439 trim_sys_gpcpll_dvfs1_dfs_ctrl_f(
@@ -460,7 +458,7 @@ static void clk_setup_dvfs_detection(struct gk20a *g, struct pll *gpll)
460 gk20a_writel(g, trim_sys_gpcpll_dvfs0_r(), data); 458 gk20a_writel(g, trim_sys_gpcpll_dvfs0_r(), data);
461 459
462 data = gk20a_readl(g, trim_gpc_bcast_gpcpll_dvfs2_r()); 460 data = gk20a_readl(g, trim_gpc_bcast_gpcpll_dvfs2_r());
463 udelay(1); 461 nvgpu_udelay(1);
464 data &= ~DFS_EXT_STROBE; 462 data &= ~DFS_EXT_STROBE;
465 gk20a_writel(g, trim_gpc_bcast_gpcpll_dvfs2_r(), data); 463 gk20a_writel(g, trim_gpc_bcast_gpcpll_dvfs2_r(), data);
466 464
@@ -505,7 +503,7 @@ static int clk_enbale_pll_dvfs(struct gk20a *g)
505 trim_sys_gpcpll_cfg_iddq_power_on_v()); 503 trim_sys_gpcpll_cfg_iddq_power_on_v());
506 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), data); 504 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), data);
507 gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 505 gk20a_readl(g, trim_sys_gpcpll_cfg_r());
508 udelay(delay); 506 nvgpu_udelay(delay);
509 507
510 /* 508 /*
511 * Dynamic ramp setup based on update rate, which in DVFS mode on GM20b 509 * Dynamic ramp setup based on update rate, which in DVFS mode on GM20b
@@ -530,7 +528,7 @@ static int clk_enbale_pll_dvfs(struct gk20a *g)
530 data = gk20a_readl(g, trim_sys_gpcpll_dvfs1_r()); 528 data = gk20a_readl(g, trim_sys_gpcpll_dvfs1_r());
531 if (trim_sys_gpcpll_dvfs1_dfs_cal_done_v(data)) 529 if (trim_sys_gpcpll_dvfs1_dfs_cal_done_v(data))
532 break; 530 break;
533 udelay(1); 531 nvgpu_udelay(1);
534 delay--; 532 delay--;
535 } while (delay > 0); 533 } while (delay > 0);
536 534
@@ -626,13 +624,13 @@ static int clk_slide_gpc_pll(struct gk20a *g, struct pll *gpll)
626 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r()); 624 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
627 coeff = set_field(coeff, trim_sys_gpcpll_coeff_ndiv_m(), 625 coeff = set_field(coeff, trim_sys_gpcpll_coeff_ndiv_m(),
628 trim_sys_gpcpll_coeff_ndiv_f(gpll->dvfs.n_int)); 626 trim_sys_gpcpll_coeff_ndiv_f(gpll->dvfs.n_int));
629 udelay(1); 627 nvgpu_udelay(1);
630 gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff); 628 gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff);
631 } else { 629 } else {
632 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r()); 630 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
633 coeff = set_field(coeff, trim_sys_gpcpll_coeff_ndiv_m(), 631 coeff = set_field(coeff, trim_sys_gpcpll_coeff_ndiv_m(),
634 trim_sys_gpcpll_coeff_ndiv_f(gpll->N)); 632 trim_sys_gpcpll_coeff_ndiv_f(gpll->N));
635 udelay(1); 633 nvgpu_udelay(1);
636 gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff); 634 gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff);
637 } 635 }
638 636
@@ -641,11 +639,11 @@ static int clk_slide_gpc_pll(struct gk20a *g, struct pll *gpll)
641 data = set_field(data, 639 data = set_field(data,
642 trim_sys_gpcpll_ndiv_slowdown_en_dynramp_m(), 640 trim_sys_gpcpll_ndiv_slowdown_en_dynramp_m(),
643 trim_sys_gpcpll_ndiv_slowdown_en_dynramp_yes_f()); 641 trim_sys_gpcpll_ndiv_slowdown_en_dynramp_yes_f());
644 udelay(1); 642 nvgpu_udelay(1);
645 gk20a_writel(g, trim_sys_gpcpll_ndiv_slowdown_r(), data); 643 gk20a_writel(g, trim_sys_gpcpll_ndiv_slowdown_r(), data);
646 644
647 do { 645 do {
648 udelay(1); 646 nvgpu_udelay(1);
649 ramp_timeout--; 647 ramp_timeout--;
650 data = gk20a_readl( 648 data = gk20a_readl(
651 g, trim_gpc_bcast_gpcpll_ndiv_slowdown_debug_r()); 649 g, trim_gpc_bcast_gpcpll_ndiv_slowdown_debug_r());
@@ -692,14 +690,14 @@ static int clk_change_pldiv_under_bypass(struct gk20a *g, struct pll *gpll)
692 690
693 /* change PLDIV */ 691 /* change PLDIV */
694 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r()); 692 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
695 udelay(1); 693 nvgpu_udelay(1);
696 coeff = set_field(coeff, trim_sys_gpcpll_coeff_pldiv_m(), 694 coeff = set_field(coeff, trim_sys_gpcpll_coeff_pldiv_m(),
697 trim_sys_gpcpll_coeff_pldiv_f(gpll->PL)); 695 trim_sys_gpcpll_coeff_pldiv_f(gpll->PL));
698 gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff); 696 gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff);
699 697
700 /* put PLL back on vco */ 698 /* put PLL back on vco */
701 data = gk20a_readl(g, trim_sys_sel_vco_r()); 699 data = gk20a_readl(g, trim_sys_sel_vco_r());
702 udelay(1); 700 nvgpu_udelay(1);
703 data = set_field(data, trim_sys_sel_vco_gpc2clk_out_m(), 701 data = set_field(data, trim_sys_sel_vco_gpc2clk_out_m(),
704 trim_sys_sel_vco_gpc2clk_out_vco_f()); 702 trim_sys_sel_vco_gpc2clk_out_vco_f());
705 gk20a_writel(g, trim_sys_sel_vco_r(), data); 703 gk20a_writel(g, trim_sys_sel_vco_r(), data);
@@ -718,14 +716,14 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll)
718 gk20a_writel(g, trim_sys_sel_vco_r(), data); 716 gk20a_writel(g, trim_sys_sel_vco_r(), data);
719 717
720 cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 718 cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
721 udelay(1); 719 nvgpu_udelay(1);
722 if (trim_sys_gpcpll_cfg_iddq_v(cfg)) { 720 if (trim_sys_gpcpll_cfg_iddq_v(cfg)) {
723 /* get out from IDDQ (1st power up) */ 721 /* get out from IDDQ (1st power up) */
724 cfg = set_field(cfg, trim_sys_gpcpll_cfg_iddq_m(), 722 cfg = set_field(cfg, trim_sys_gpcpll_cfg_iddq_m(),
725 trim_sys_gpcpll_cfg_iddq_power_on_v()); 723 trim_sys_gpcpll_cfg_iddq_power_on_v());
726 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg); 724 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
727 gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 725 gk20a_readl(g, trim_sys_gpcpll_cfg_r());
728 udelay(gpc_pll_params.iddq_exit_delay); 726 nvgpu_udelay(gpc_pll_params.iddq_exit_delay);
729 } else { 727 } else {
730 /* clear SYNC_MODE before disabling PLL */ 728 /* clear SYNC_MODE before disabling PLL */
731 cfg = set_field(cfg, trim_sys_gpcpll_cfg_sync_mode_m(), 729 cfg = set_field(cfg, trim_sys_gpcpll_cfg_sync_mode_m(),
@@ -769,7 +767,7 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll)
769 /* just delay in DVFS mode (lock cannot be used) */ 767 /* just delay in DVFS mode (lock cannot be used) */
770 if (gpll->mode == GPC_PLL_MODE_DVFS) { 768 if (gpll->mode == GPC_PLL_MODE_DVFS) {
771 gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 769 gk20a_readl(g, trim_sys_gpcpll_cfg_r());
772 udelay(gpc_pll_params.na_lock_delay); 770 nvgpu_udelay(gpc_pll_params.na_lock_delay);
773 gk20a_dbg_clk("NA config_pll under bypass: %u (%u) kHz %d mV", 771 gk20a_dbg_clk("NA config_pll under bypass: %u (%u) kHz %d mV",
774 gpll->freq, gpll->freq / 2, 772 gpll->freq, gpll->freq / 2,
775 (trim_sys_gpcpll_cfg3_dfs_testout_v( 773 (trim_sys_gpcpll_cfg3_dfs_testout_v(
@@ -791,7 +789,7 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll)
791 /* wait pll lock */ 789 /* wait pll lock */
792 timeout = gpc_pll_params.lock_timeout + 1; 790 timeout = gpc_pll_params.lock_timeout + 1;
793 do { 791 do {
794 udelay(1); 792 nvgpu_udelay(1);
795 cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 793 cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
796 if (cfg & trim_sys_gpcpll_cfg_pll_lock_true_f()) 794 if (cfg & trim_sys_gpcpll_cfg_pll_lock_true_f())
797 goto pll_locked; 795 goto pll_locked;
@@ -886,7 +884,7 @@ static int clk_program_gpc_pll(struct gk20a *g, struct pll *gpll_new,
886 /* Intentional 2nd write to assure linear divider operation */ 884 /* Intentional 2nd write to assure linear divider operation */
887 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data); 885 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
888 gk20a_readl(g, trim_sys_gpc2clk_out_r()); 886 gk20a_readl(g, trim_sys_gpc2clk_out_r());
889 udelay(2); 887 nvgpu_udelay(2);
890 } 888 }
891 889
892#if PLDIV_GLITCHLESS 890#if PLDIV_GLITCHLESS
@@ -942,7 +940,7 @@ set_pldiv:
942 trim_sys_gpc2clk_out_vcodiv_by1_f()) { 940 trim_sys_gpc2clk_out_vcodiv_by1_f()) {
943 data = set_field(data, trim_sys_gpc2clk_out_vcodiv_m(), 941 data = set_field(data, trim_sys_gpc2clk_out_vcodiv_m(),
944 trim_sys_gpc2clk_out_vcodiv_by1_f()); 942 trim_sys_gpc2clk_out_vcodiv_by1_f());
945 udelay(2); 943 nvgpu_udelay(2);
946 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data); 944 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
947 /* Intentional 2nd write to assure linear divider operation */ 945 /* Intentional 2nd write to assure linear divider operation */
948 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data); 946 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
@@ -1787,10 +1785,10 @@ static int monitor_get(void *data, u64 *val)
1787 /* It should take less than 25us to finish 800 cycle of 38.4MHz. 1785 /* It should take less than 25us to finish 800 cycle of 38.4MHz.
1788 But longer than 100us delay is required here. */ 1786 But longer than 100us delay is required here. */
1789 gk20a_readl(g, trim_gpc_clk_cntr_ncgpcclk_cfg_r(0)); 1787 gk20a_readl(g, trim_gpc_clk_cntr_ncgpcclk_cfg_r(0));
1790 udelay(200); 1788 nvgpu_udelay(200);
1791 1789
1792 count1 = gk20a_readl(g, trim_gpc_clk_cntr_ncgpcclk_cnt_r(0)); 1790 count1 = gk20a_readl(g, trim_gpc_clk_cntr_ncgpcclk_cnt_r(0));
1793 udelay(100); 1791 nvgpu_udelay(100);
1794 count2 = gk20a_readl(g, trim_gpc_clk_cntr_ncgpcclk_cnt_r(0)); 1792 count2 = gk20a_readl(g, trim_gpc_clk_cntr_ncgpcclk_cnt_r(0));
1795 freq *= trim_gpc_clk_cntr_ncgpcclk_cnt_value_v(count2); 1793 freq *= trim_gpc_clk_cntr_ncgpcclk_cnt_value_v(count2);
1796 do_div(freq, ncycle); 1794 do_div(freq, ncycle);