summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
diff options
context:
space:
mode:
authorNicolas Benech <nbenech@nvidia.com>2018-08-27 10:56:19 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-07 00:33:41 -0400
commit0e58ebaae13dd59b6aba5297f898e7c89fcd2742 (patch)
treea012357b1f9d3a5007c192c7e1377759f71a5a10 /drivers/gpu/nvgpu/gm20b/clk_gm20b.c
parent034e23c197541e0e8ac6a4d95b1d3b6d1be885cd (diff)
gpu: nvgpu: Fix nvgpu_readl MISRA 17.7 violations
MISRA Rule-17.7 requires the return value of all functions to be used. Fix is either to use the return value or change the function to return void. This patch contains fix for calls to nvgpu_readl. JIRA NVGPU-677 Change-Id: I432197cca67a10281dfe407aa9ce2dd8120030f0 Signed-off-by: Nicolas Benech <nbenech@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1807528 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/clk_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/clk_gm20b.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
index 03fed222..d2260d9c 100644
--- a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
@@ -522,7 +522,7 @@ static int clk_enbale_pll_dvfs(struct gk20a *g)
522 data = set_field(data, trim_sys_gpcpll_cfg_iddq_m(), 522 data = set_field(data, trim_sys_gpcpll_cfg_iddq_m(),
523 trim_sys_gpcpll_cfg_iddq_power_on_v()); 523 trim_sys_gpcpll_cfg_iddq_power_on_v());
524 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), data); 524 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), data);
525 gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 525 (void) gk20a_readl(g, trim_sys_gpcpll_cfg_r());
526 nvgpu_udelay(delay); 526 nvgpu_udelay(delay);
527 527
528 /* 528 /*
@@ -710,7 +710,7 @@ static int clk_slide_gpc_pll(struct gk20a *g, struct pll *gpll)
710 trim_sys_gpcpll_ndiv_slowdown_en_dynramp_m(), 710 trim_sys_gpcpll_ndiv_slowdown_en_dynramp_m(),
711 trim_sys_gpcpll_ndiv_slowdown_en_dynramp_no_f()); 711 trim_sys_gpcpll_ndiv_slowdown_en_dynramp_no_f());
712 gk20a_writel(g, trim_sys_gpcpll_ndiv_slowdown_r(), data); 712 gk20a_writel(g, trim_sys_gpcpll_ndiv_slowdown_r(), data);
713 gk20a_readl(g, trim_sys_gpcpll_ndiv_slowdown_r()); 713 (void) gk20a_readl(g, trim_sys_gpcpll_ndiv_slowdown_r());
714 714
715 if (ramp_timeout <= 0) { 715 if (ramp_timeout <= 0) {
716 nvgpu_err(g, "gpcpll dynamic ramp timeout"); 716 nvgpu_err(g, "gpcpll dynamic ramp timeout");
@@ -782,20 +782,20 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll)
782 cfg = set_field(cfg, trim_sys_gpcpll_cfg_iddq_m(), 782 cfg = set_field(cfg, trim_sys_gpcpll_cfg_iddq_m(),
783 trim_sys_gpcpll_cfg_iddq_power_on_v()); 783 trim_sys_gpcpll_cfg_iddq_power_on_v());
784 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg); 784 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
785 gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 785 (void) gk20a_readl(g, trim_sys_gpcpll_cfg_r());
786 nvgpu_udelay(gpc_pll_params.iddq_exit_delay); 786 nvgpu_udelay(gpc_pll_params.iddq_exit_delay);
787 } else { 787 } else {
788 /* clear SYNC_MODE before disabling PLL */ 788 /* clear SYNC_MODE before disabling PLL */
789 cfg = set_field(cfg, trim_sys_gpcpll_cfg_sync_mode_m(), 789 cfg = set_field(cfg, trim_sys_gpcpll_cfg_sync_mode_m(),
790 trim_sys_gpcpll_cfg_sync_mode_disable_f()); 790 trim_sys_gpcpll_cfg_sync_mode_disable_f());
791 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg); 791 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
792 gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 792 (void) gk20a_readl(g, trim_sys_gpcpll_cfg_r());
793 793
794 /* disable running PLL before changing coefficients */ 794 /* disable running PLL before changing coefficients */
795 cfg = set_field(cfg, trim_sys_gpcpll_cfg_enable_m(), 795 cfg = set_field(cfg, trim_sys_gpcpll_cfg_enable_m(),
796 trim_sys_gpcpll_cfg_enable_no_f()); 796 trim_sys_gpcpll_cfg_enable_no_f());
797 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg); 797 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
798 gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 798 (void) gk20a_readl(g, trim_sys_gpcpll_cfg_r());
799 } 799 }
800 800
801 /* change coefficients */ 801 /* change coefficients */
@@ -826,7 +826,7 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll)
826 826
827 /* just delay in DVFS mode (lock cannot be used) */ 827 /* just delay in DVFS mode (lock cannot be used) */
828 if (gpll->mode == GPC_PLL_MODE_DVFS) { 828 if (gpll->mode == GPC_PLL_MODE_DVFS) {
829 gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 829 (void) gk20a_readl(g, trim_sys_gpcpll_cfg_r());
830 nvgpu_udelay(gpc_pll_params.na_lock_delay); 830 nvgpu_udelay(gpc_pll_params.na_lock_delay);
831 gk20a_dbg_clk(g, "NA config_pll under bypass: %u (%u) kHz %d mV", 831 gk20a_dbg_clk(g, "NA config_pll under bypass: %u (%u) kHz %d mV",
832 gpll->freq, gpll->freq / 2, 832 gpll->freq, gpll->freq / 2,
@@ -869,7 +869,7 @@ pll_locked:
869 cfg = set_field(cfg, trim_sys_gpcpll_cfg_sync_mode_m(), 869 cfg = set_field(cfg, trim_sys_gpcpll_cfg_sync_mode_m(),
870 trim_sys_gpcpll_cfg_sync_mode_enable_f()); 870 trim_sys_gpcpll_cfg_sync_mode_enable_f());
871 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg); 871 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
872 gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 872 (void) gk20a_readl(g, trim_sys_gpcpll_cfg_r());
873 873
874 /* put PLL back on vco */ 874 /* put PLL back on vco */
875 throt = throttle_disable(g); 875 throt = throttle_disable(g);
@@ -950,7 +950,7 @@ static int clk_program_gpc_pll(struct gk20a *g, struct pll *gpll_new,
950 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data); 950 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
951 /* Intentional 2nd write to assure linear divider operation */ 951 /* Intentional 2nd write to assure linear divider operation */
952 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data); 952 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
953 gk20a_readl(g, trim_sys_gpc2clk_out_r()); 953 (void) gk20a_readl(g, trim_sys_gpc2clk_out_r());
954 nvgpu_udelay(2); 954 nvgpu_udelay(2);
955 } 955 }
956 956
@@ -1013,7 +1013,7 @@ set_pldiv:
1013 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data); 1013 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
1014 /* Intentional 2nd write to assure linear divider operation */ 1014 /* Intentional 2nd write to assure linear divider operation */
1015 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data); 1015 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
1016 gk20a_readl(g, trim_sys_gpc2clk_out_r()); 1016 (void) gk20a_readl(g, trim_sys_gpc2clk_out_r());
1017 } 1017 }
1018 1018
1019 /* slide up to target NDIV */ 1019 /* slide up to target NDIV */
@@ -1178,7 +1178,7 @@ static int clk_disable_gpcpll(struct gk20a *g, int allow_slide)
1178 cfg = set_field(cfg, trim_sys_gpcpll_cfg_enable_m(), 1178 cfg = set_field(cfg, trim_sys_gpcpll_cfg_enable_m(),
1179 trim_sys_gpcpll_cfg_enable_no_f()); 1179 trim_sys_gpcpll_cfg_enable_no_f());
1180 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg); 1180 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
1181 gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 1181 (void) gk20a_readl(g, trim_sys_gpcpll_cfg_r());
1182 1182
1183 clk->gpc_pll.enabled = false; 1183 clk->gpc_pll.enabled = false;
1184 clk->gpc_pll_last.enabled = false; 1184 clk->gpc_pll_last.enabled = false;
@@ -1397,7 +1397,7 @@ static int gm20b_init_clk_setup_hw(struct gk20a *g)
1397 data = set_field(data, therm_clk_slowdown_idle_factor_m(), 1397 data = set_field(data, therm_clk_slowdown_idle_factor_m(),
1398 therm_clk_slowdown_idle_factor_disabled_f()); 1398 therm_clk_slowdown_idle_factor_disabled_f());
1399 gk20a_writel(g, therm_clk_slowdown_r(0), data); 1399 gk20a_writel(g, therm_clk_slowdown_r(0), data);
1400 gk20a_readl(g, therm_clk_slowdown_r(0)); 1400 (void) gk20a_readl(g, therm_clk_slowdown_r(0));
1401 1401
1402 if (g->clk.gpc_pll.mode == GPC_PLL_MODE_DVFS) { 1402 if (g->clk.gpc_pll.mode == GPC_PLL_MODE_DVFS) {
1403 return clk_enbale_pll_dvfs(g); 1403 return clk_enbale_pll_dvfs(g);
@@ -1565,7 +1565,7 @@ int gm20b_clk_get_gpcclk_clock_counter(struct clk_gk20a *clk, u64 *val)
1565 therm_clk_slowdown_idle_factor_m(), 1565 therm_clk_slowdown_idle_factor_m(),
1566 therm_clk_slowdown_idle_factor_disabled_f()); 1566 therm_clk_slowdown_idle_factor_disabled_f());
1567 gk20a_writel(g, therm_clk_slowdown_r(0), clk_slowdown); 1567 gk20a_writel(g, therm_clk_slowdown_r(0), clk_slowdown);
1568 gk20a_readl(g, therm_clk_slowdown_r(0)); 1568 (void) gk20a_readl(g, therm_clk_slowdown_r(0));
1569 1569
1570 gk20a_writel(g, trim_gpc_clk_cntr_ncgpcclk_cfg_r(0), 1570 gk20a_writel(g, trim_gpc_clk_cntr_ncgpcclk_cfg_r(0),
1571 trim_gpc_clk_cntr_ncgpcclk_cfg_reset_asserted_f()); 1571 trim_gpc_clk_cntr_ncgpcclk_cfg_reset_asserted_f());
@@ -1578,7 +1578,7 @@ int gm20b_clk_get_gpcclk_clock_counter(struct clk_gk20a *clk, u64 *val)
1578 /* It should take less than 25us to finish 800 cycle of 38.4MHz. 1578 /* It should take less than 25us to finish 800 cycle of 38.4MHz.
1579 * But longer than 100us delay is required here. 1579 * But longer than 100us delay is required here.
1580 */ 1580 */
1581 gk20a_readl(g, trim_gpc_clk_cntr_ncgpcclk_cfg_r(0)); 1581 (void) gk20a_readl(g, trim_gpc_clk_cntr_ncgpcclk_cfg_r(0));
1582 nvgpu_udelay(200); 1582 nvgpu_udelay(200);
1583 1583
1584 count1 = gk20a_readl(g, trim_gpc_clk_cntr_ncgpcclk_cnt_r(0)); 1584 count1 = gk20a_readl(g, trim_gpc_clk_cntr_ncgpcclk_cnt_r(0));