summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicolas Benech <nbenech@nvidia.com>2018-08-27 10:56:19 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-07 00:33:41 -0400
commit0e58ebaae13dd59b6aba5297f898e7c89fcd2742 (patch)
treea012357b1f9d3a5007c192c7e1377759f71a5a10
parent034e23c197541e0e8ac6a4d95b1d3b6d1be885cd (diff)
gpu: nvgpu: Fix nvgpu_readl MISRA 17.7 violations
MISRA Rule-17.7 requires the return value of all functions to be used. Fix is either to use the return value or change the function to return void. This patch contains fix for calls to nvgpu_readl. JIRA NVGPU-677 Change-Id: I432197cca67a10281dfe407aa9ce2dd8120030f0 Signed-off-by: Nicolas Benech <nbenech@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1807528 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/bus/bus_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/common/pramin.c2
-rw-r--r--drivers/gpu/nvgpu/common/priv_ring/priv_ring_gm20b.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c6
-rw-r--r--drivers/gpu/nvgpu/gk20a/mc_gk20a.c8
-rw-r--r--drivers/gpu/nvgpu/gm20b/clk_gm20b.c26
-rw-r--r--drivers/gpu/nvgpu/gp106/clk_gp106.c8
-rw-r--r--drivers/gpu/nvgpu/gp106/mclk_gp106.c4
-rw-r--r--drivers/gpu/nvgpu/gp106/pmu_gp106.c4
9 files changed, 31 insertions, 31 deletions
diff --git a/drivers/gpu/nvgpu/common/bus/bus_gk20a.c b/drivers/gpu/nvgpu/common/bus/bus_gk20a.c
index 5a424a50..5178dcd0 100644
--- a/drivers/gpu/nvgpu/common/bus/bus_gk20a.c
+++ b/drivers/gpu/nvgpu/common/bus/bus_gk20a.c
@@ -85,7 +85,7 @@ u32 gk20a_bus_set_bar0_window(struct gk20a *g, struct nvgpu_mem *mem,
85 85
86 if (g->mm.pramin_window != win) { 86 if (g->mm.pramin_window != win) {
87 gk20a_writel(g, bus_bar0_window_r(), win); 87 gk20a_writel(g, bus_bar0_window_r(), win);
88 gk20a_readl(g, bus_bar0_window_r()); 88 (void) gk20a_readl(g, bus_bar0_window_r());
89 g->mm.pramin_window = win; 89 g->mm.pramin_window = win;
90 } 90 }
91 91
diff --git a/drivers/gpu/nvgpu/common/pramin.c b/drivers/gpu/nvgpu/common/pramin.c
index 1448fed1..26bf1038 100644
--- a/drivers/gpu/nvgpu/common/pramin.c
+++ b/drivers/gpu/nvgpu/common/pramin.c
@@ -82,7 +82,7 @@ static void nvgpu_pramin_access_batched(struct gk20a *g, struct nvgpu_mem *mem,
82 loop(g, start_reg, n / sizeof(u32), arg); 82 loop(g, start_reg, n / sizeof(u32), arg);
83 83
84 /* read back to synchronize accesses */ 84 /* read back to synchronize accesses */
85 gk20a_readl(g, start_reg); 85 (void) gk20a_readl(g, start_reg);
86 86
87 nvgpu_spinlock_release(&g->mm.pramin_window_lock); 87 nvgpu_spinlock_release(&g->mm.pramin_window_lock);
88 88
diff --git a/drivers/gpu/nvgpu/common/priv_ring/priv_ring_gm20b.c b/drivers/gpu/nvgpu/common/priv_ring/priv_ring_gm20b.c
index 24c35576..41a5391d 100644
--- a/drivers/gpu/nvgpu/common/priv_ring/priv_ring_gm20b.c
+++ b/drivers/gpu/nvgpu/common/priv_ring/priv_ring_gm20b.c
@@ -52,7 +52,7 @@ void gm20b_priv_ring_enable(struct gk20a *g)
52 52
53 gk20a_writel(g, pri_ringstation_sys_decode_config_r(), 53 gk20a_writel(g, pri_ringstation_sys_decode_config_r(),
54 0x2); 54 0x2);
55 gk20a_readl(g, pri_ringstation_sys_decode_config_r()); 55 (void) gk20a_readl(g, pri_ringstation_sys_decode_config_r());
56} 56}
57 57
58void gm20b_priv_ring_isr(struct gk20a *g) 58void gm20b_priv_ring_isr(struct gk20a *g)
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 91ffbb7e..a40d93fd 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -345,7 +345,7 @@ int gr_gk20a_wait_idle(struct gk20a *g, unsigned long duration_ms,
345 do { 345 do {
346 /* fmodel: host gets fifo_engine_status(gr) from gr 346 /* fmodel: host gets fifo_engine_status(gr) from gr
347 only when gr_status is read */ 347 only when gr_status is read */
348 gk20a_readl(g, gr_status_r()); 348 (void) gk20a_readl(g, gr_status_r());
349 349
350 gr_enabled = gk20a_readl(g, mc_enable_r()) & 350 gr_enabled = gk20a_readl(g, mc_enable_r()) &
351 mc_enable_pgraph_enabled_f(); 351 mc_enable_pgraph_enabled_f();
@@ -1482,7 +1482,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
1482 gr_fecs_ctxsw_reset_ctl_sys_context_reset_enabled_f() | 1482 gr_fecs_ctxsw_reset_ctl_sys_context_reset_enabled_f() |
1483 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_enabled_f() | 1483 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_enabled_f() |
1484 gr_fecs_ctxsw_reset_ctl_be_context_reset_enabled_f()); 1484 gr_fecs_ctxsw_reset_ctl_be_context_reset_enabled_f());
1485 gk20a_readl(g, gr_fecs_ctxsw_reset_ctl_r()); 1485 (void) gk20a_readl(g, gr_fecs_ctxsw_reset_ctl_r());
1486 nvgpu_udelay(10); 1486 nvgpu_udelay(10);
1487 1487
1488 gk20a_writel(g, gr_fecs_ctxsw_reset_ctl_r(), 1488 gk20a_writel(g, gr_fecs_ctxsw_reset_ctl_r(),
@@ -1495,7 +1495,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
1495 gr_fecs_ctxsw_reset_ctl_sys_context_reset_disabled_f() | 1495 gr_fecs_ctxsw_reset_ctl_sys_context_reset_disabled_f() |
1496 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_disabled_f() | 1496 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_disabled_f() |
1497 gr_fecs_ctxsw_reset_ctl_be_context_reset_disabled_f()); 1497 gr_fecs_ctxsw_reset_ctl_be_context_reset_disabled_f());
1498 gk20a_readl(g, gr_fecs_ctxsw_reset_ctl_r()); 1498 (void) gk20a_readl(g, gr_fecs_ctxsw_reset_ctl_r());
1499 nvgpu_udelay(10); 1499 nvgpu_udelay(10);
1500 1500
1501 if (!nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { 1501 if (!nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
diff --git a/drivers/gpu/nvgpu/gk20a/mc_gk20a.c b/drivers/gpu/nvgpu/gk20a/mc_gk20a.c
index f7631a9c..f9996e71 100644
--- a/drivers/gpu/nvgpu/gk20a/mc_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mc_gk20a.c
@@ -172,7 +172,7 @@ void mc_gk20a_intr_stall_pause(struct gk20a *g)
172 mc_intr_en_0_inta_disabled_f()); 172 mc_intr_en_0_inta_disabled_f());
173 173
174 /* flush previous write */ 174 /* flush previous write */
175 gk20a_readl(g, mc_intr_en_0_r()); 175 (void) gk20a_readl(g, mc_intr_en_0_r());
176} 176}
177 177
178void mc_gk20a_intr_stall_resume(struct gk20a *g) 178void mc_gk20a_intr_stall_resume(struct gk20a *g)
@@ -181,7 +181,7 @@ void mc_gk20a_intr_stall_resume(struct gk20a *g)
181 mc_intr_en_0_inta_hardware_f()); 181 mc_intr_en_0_inta_hardware_f());
182 182
183 /* flush previous write */ 183 /* flush previous write */
184 gk20a_readl(g, mc_intr_en_0_r()); 184 (void) gk20a_readl(g, mc_intr_en_0_r());
185} 185}
186 186
187void mc_gk20a_intr_nonstall_pause(struct gk20a *g) 187void mc_gk20a_intr_nonstall_pause(struct gk20a *g)
@@ -190,7 +190,7 @@ void mc_gk20a_intr_nonstall_pause(struct gk20a *g)
190 mc_intr_en_0_inta_disabled_f()); 190 mc_intr_en_0_inta_disabled_f());
191 191
192 /* flush previous write */ 192 /* flush previous write */
193 gk20a_readl(g, mc_intr_en_1_r()); 193 (void) gk20a_readl(g, mc_intr_en_1_r());
194} 194}
195 195
196void mc_gk20a_intr_nonstall_resume(struct gk20a *g) 196void mc_gk20a_intr_nonstall_resume(struct gk20a *g)
@@ -199,7 +199,7 @@ void mc_gk20a_intr_nonstall_resume(struct gk20a *g)
199 mc_intr_en_0_inta_hardware_f()); 199 mc_intr_en_0_inta_hardware_f());
200 200
201 /* flush previous write */ 201 /* flush previous write */
202 gk20a_readl(g, mc_intr_en_1_r()); 202 (void) gk20a_readl(g, mc_intr_en_1_r());
203} 203}
204 204
205u32 mc_gk20a_intr_stall(struct gk20a *g) 205u32 mc_gk20a_intr_stall(struct gk20a *g)
diff --git a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
index 03fed222..d2260d9c 100644
--- a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
@@ -522,7 +522,7 @@ static int clk_enbale_pll_dvfs(struct gk20a *g)
522 data = set_field(data, trim_sys_gpcpll_cfg_iddq_m(), 522 data = set_field(data, trim_sys_gpcpll_cfg_iddq_m(),
523 trim_sys_gpcpll_cfg_iddq_power_on_v()); 523 trim_sys_gpcpll_cfg_iddq_power_on_v());
524 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), data); 524 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), data);
525 gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 525 (void) gk20a_readl(g, trim_sys_gpcpll_cfg_r());
526 nvgpu_udelay(delay); 526 nvgpu_udelay(delay);
527 527
528 /* 528 /*
@@ -710,7 +710,7 @@ static int clk_slide_gpc_pll(struct gk20a *g, struct pll *gpll)
710 trim_sys_gpcpll_ndiv_slowdown_en_dynramp_m(), 710 trim_sys_gpcpll_ndiv_slowdown_en_dynramp_m(),
711 trim_sys_gpcpll_ndiv_slowdown_en_dynramp_no_f()); 711 trim_sys_gpcpll_ndiv_slowdown_en_dynramp_no_f());
712 gk20a_writel(g, trim_sys_gpcpll_ndiv_slowdown_r(), data); 712 gk20a_writel(g, trim_sys_gpcpll_ndiv_slowdown_r(), data);
713 gk20a_readl(g, trim_sys_gpcpll_ndiv_slowdown_r()); 713 (void) gk20a_readl(g, trim_sys_gpcpll_ndiv_slowdown_r());
714 714
715 if (ramp_timeout <= 0) { 715 if (ramp_timeout <= 0) {
716 nvgpu_err(g, "gpcpll dynamic ramp timeout"); 716 nvgpu_err(g, "gpcpll dynamic ramp timeout");
@@ -782,20 +782,20 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll)
782 cfg = set_field(cfg, trim_sys_gpcpll_cfg_iddq_m(), 782 cfg = set_field(cfg, trim_sys_gpcpll_cfg_iddq_m(),
783 trim_sys_gpcpll_cfg_iddq_power_on_v()); 783 trim_sys_gpcpll_cfg_iddq_power_on_v());
784 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg); 784 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
785 gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 785 (void) gk20a_readl(g, trim_sys_gpcpll_cfg_r());
786 nvgpu_udelay(gpc_pll_params.iddq_exit_delay); 786 nvgpu_udelay(gpc_pll_params.iddq_exit_delay);
787 } else { 787 } else {
788 /* clear SYNC_MODE before disabling PLL */ 788 /* clear SYNC_MODE before disabling PLL */
789 cfg = set_field(cfg, trim_sys_gpcpll_cfg_sync_mode_m(), 789 cfg = set_field(cfg, trim_sys_gpcpll_cfg_sync_mode_m(),
790 trim_sys_gpcpll_cfg_sync_mode_disable_f()); 790 trim_sys_gpcpll_cfg_sync_mode_disable_f());
791 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg); 791 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
792 gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 792 (void) gk20a_readl(g, trim_sys_gpcpll_cfg_r());
793 793
794 /* disable running PLL before changing coefficients */ 794 /* disable running PLL before changing coefficients */
795 cfg = set_field(cfg, trim_sys_gpcpll_cfg_enable_m(), 795 cfg = set_field(cfg, trim_sys_gpcpll_cfg_enable_m(),
796 trim_sys_gpcpll_cfg_enable_no_f()); 796 trim_sys_gpcpll_cfg_enable_no_f());
797 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg); 797 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
798 gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 798 (void) gk20a_readl(g, trim_sys_gpcpll_cfg_r());
799 } 799 }
800 800
801 /* change coefficients */ 801 /* change coefficients */
@@ -826,7 +826,7 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll)
826 826
827 /* just delay in DVFS mode (lock cannot be used) */ 827 /* just delay in DVFS mode (lock cannot be used) */
828 if (gpll->mode == GPC_PLL_MODE_DVFS) { 828 if (gpll->mode == GPC_PLL_MODE_DVFS) {
829 gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 829 (void) gk20a_readl(g, trim_sys_gpcpll_cfg_r());
830 nvgpu_udelay(gpc_pll_params.na_lock_delay); 830 nvgpu_udelay(gpc_pll_params.na_lock_delay);
831 gk20a_dbg_clk(g, "NA config_pll under bypass: %u (%u) kHz %d mV", 831 gk20a_dbg_clk(g, "NA config_pll under bypass: %u (%u) kHz %d mV",
832 gpll->freq, gpll->freq / 2, 832 gpll->freq, gpll->freq / 2,
@@ -869,7 +869,7 @@ pll_locked:
869 cfg = set_field(cfg, trim_sys_gpcpll_cfg_sync_mode_m(), 869 cfg = set_field(cfg, trim_sys_gpcpll_cfg_sync_mode_m(),
870 trim_sys_gpcpll_cfg_sync_mode_enable_f()); 870 trim_sys_gpcpll_cfg_sync_mode_enable_f());
871 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg); 871 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
872 gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 872 (void) gk20a_readl(g, trim_sys_gpcpll_cfg_r());
873 873
874 /* put PLL back on vco */ 874 /* put PLL back on vco */
875 throt = throttle_disable(g); 875 throt = throttle_disable(g);
@@ -950,7 +950,7 @@ static int clk_program_gpc_pll(struct gk20a *g, struct pll *gpll_new,
950 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data); 950 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
951 /* Intentional 2nd write to assure linear divider operation */ 951 /* Intentional 2nd write to assure linear divider operation */
952 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data); 952 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
953 gk20a_readl(g, trim_sys_gpc2clk_out_r()); 953 (void) gk20a_readl(g, trim_sys_gpc2clk_out_r());
954 nvgpu_udelay(2); 954 nvgpu_udelay(2);
955 } 955 }
956 956
@@ -1013,7 +1013,7 @@ set_pldiv:
1013 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data); 1013 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
1014 /* Intentional 2nd write to assure linear divider operation */ 1014 /* Intentional 2nd write to assure linear divider operation */
1015 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data); 1015 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
1016 gk20a_readl(g, trim_sys_gpc2clk_out_r()); 1016 (void) gk20a_readl(g, trim_sys_gpc2clk_out_r());
1017 } 1017 }
1018 1018
1019 /* slide up to target NDIV */ 1019 /* slide up to target NDIV */
@@ -1178,7 +1178,7 @@ static int clk_disable_gpcpll(struct gk20a *g, int allow_slide)
1178 cfg = set_field(cfg, trim_sys_gpcpll_cfg_enable_m(), 1178 cfg = set_field(cfg, trim_sys_gpcpll_cfg_enable_m(),
1179 trim_sys_gpcpll_cfg_enable_no_f()); 1179 trim_sys_gpcpll_cfg_enable_no_f());
1180 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg); 1180 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
1181 gk20a_readl(g, trim_sys_gpcpll_cfg_r()); 1181 (void) gk20a_readl(g, trim_sys_gpcpll_cfg_r());
1182 1182
1183 clk->gpc_pll.enabled = false; 1183 clk->gpc_pll.enabled = false;
1184 clk->gpc_pll_last.enabled = false; 1184 clk->gpc_pll_last.enabled = false;
@@ -1397,7 +1397,7 @@ static int gm20b_init_clk_setup_hw(struct gk20a *g)
1397 data = set_field(data, therm_clk_slowdown_idle_factor_m(), 1397 data = set_field(data, therm_clk_slowdown_idle_factor_m(),
1398 therm_clk_slowdown_idle_factor_disabled_f()); 1398 therm_clk_slowdown_idle_factor_disabled_f());
1399 gk20a_writel(g, therm_clk_slowdown_r(0), data); 1399 gk20a_writel(g, therm_clk_slowdown_r(0), data);
1400 gk20a_readl(g, therm_clk_slowdown_r(0)); 1400 (void) gk20a_readl(g, therm_clk_slowdown_r(0));
1401 1401
1402 if (g->clk.gpc_pll.mode == GPC_PLL_MODE_DVFS) { 1402 if (g->clk.gpc_pll.mode == GPC_PLL_MODE_DVFS) {
1403 return clk_enbale_pll_dvfs(g); 1403 return clk_enbale_pll_dvfs(g);
@@ -1565,7 +1565,7 @@ int gm20b_clk_get_gpcclk_clock_counter(struct clk_gk20a *clk, u64 *val)
1565 therm_clk_slowdown_idle_factor_m(), 1565 therm_clk_slowdown_idle_factor_m(),
1566 therm_clk_slowdown_idle_factor_disabled_f()); 1566 therm_clk_slowdown_idle_factor_disabled_f());
1567 gk20a_writel(g, therm_clk_slowdown_r(0), clk_slowdown); 1567 gk20a_writel(g, therm_clk_slowdown_r(0), clk_slowdown);
1568 gk20a_readl(g, therm_clk_slowdown_r(0)); 1568 (void) gk20a_readl(g, therm_clk_slowdown_r(0));
1569 1569
1570 gk20a_writel(g, trim_gpc_clk_cntr_ncgpcclk_cfg_r(0), 1570 gk20a_writel(g, trim_gpc_clk_cntr_ncgpcclk_cfg_r(0),
1571 trim_gpc_clk_cntr_ncgpcclk_cfg_reset_asserted_f()); 1571 trim_gpc_clk_cntr_ncgpcclk_cfg_reset_asserted_f());
@@ -1578,7 +1578,7 @@ int gm20b_clk_get_gpcclk_clock_counter(struct clk_gk20a *clk, u64 *val)
1578 /* It should take less than 25us to finish 800 cycle of 38.4MHz. 1578 /* It should take less than 25us to finish 800 cycle of 38.4MHz.
1579 * But longer than 100us delay is required here. 1579 * But longer than 100us delay is required here.
1580 */ 1580 */
1581 gk20a_readl(g, trim_gpc_clk_cntr_ncgpcclk_cfg_r(0)); 1581 (void) gk20a_readl(g, trim_gpc_clk_cntr_ncgpcclk_cfg_r(0));
1582 nvgpu_udelay(200); 1582 nvgpu_udelay(200);
1583 1583
1584 count1 = gk20a_readl(g, trim_gpc_clk_cntr_ncgpcclk_cnt_r(0)); 1584 count1 = gk20a_readl(g, trim_gpc_clk_cntr_ncgpcclk_cnt_r(0));
diff --git a/drivers/gpu/nvgpu/gp106/clk_gp106.c b/drivers/gpu/nvgpu/gp106/clk_gp106.c
index 13a401f0..e892ceda 100644
--- a/drivers/gpu/nvgpu/gp106/clk_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/clk_gp106.c
@@ -188,7 +188,7 @@ u32 gp106_get_rate_cntr(struct gk20a *g, struct namemap_cfg *c)
188 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_enable_deasserted_f()); 188 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_enable_deasserted_f());
189 189
190 /* Force wb() */ 190 /* Force wb() */
191 gk20a_readl(g, c->cntr.reg_ctrl_addr); 191 (void) gk20a_readl(g, c->cntr.reg_ctrl_addr);
192 192
193 /* Wait for reset to happen */ 193 /* Wait for reset to happen */
194 retries = CLK_DEFAULT_CNTRL_SETTLE_RETRIES; 194 retries = CLK_DEFAULT_CNTRL_SETTLE_RETRIES;
@@ -209,7 +209,7 @@ u32 gp106_get_rate_cntr(struct gk20a *g, struct namemap_cfg *c)
209 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_write_en_asserted_f() | 209 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_write_en_asserted_f() |
210 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_noofipclks_f(XTAL_CNTR_CLKS) | 210 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_noofipclks_f(XTAL_CNTR_CLKS) |
211 c->cntr.reg_ctrl_idx); 211 c->cntr.reg_ctrl_idx);
212 gk20a_readl(g, c->cntr.reg_ctrl_addr); 212 (void) gk20a_readl(g, c->cntr.reg_ctrl_addr);
213 213
214 nvgpu_udelay(XTAL_CNTR_DELAY); 214 nvgpu_udelay(XTAL_CNTR_DELAY);
215 215
@@ -220,9 +220,9 @@ read_err:
220 gk20a_writel(g, c->cntr.reg_ctrl_addr, 220 gk20a_writel(g, c->cntr.reg_ctrl_addr,
221 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_reset_asserted_f() | 221 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_reset_asserted_f() |
222 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_enable_deasserted_f()); 222 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_enable_deasserted_f());
223 gk20a_readl(g, c->cntr.reg_ctrl_addr); 223 (void) gk20a_readl(g, c->cntr.reg_ctrl_addr);
224 gk20a_writel(g, c->cntr.reg_ctrl_addr, save_reg); 224 gk20a_writel(g, c->cntr.reg_ctrl_addr, save_reg);
225 gk20a_readl(g, c->cntr.reg_ctrl_addr); 225 (void) gk20a_readl(g, c->cntr.reg_ctrl_addr);
226 nvgpu_mutex_release(&clk->clk_mutex); 226 nvgpu_mutex_release(&clk->clk_mutex);
227 227
228 return cntr; 228 return cntr;
diff --git a/drivers/gpu/nvgpu/gp106/mclk_gp106.c b/drivers/gpu/nvgpu/gp106/mclk_gp106.c
index 36092a1a..6a49e83b 100644
--- a/drivers/gpu/nvgpu/gp106/mclk_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/mclk_gp106.c
@@ -3371,8 +3371,8 @@ int gp106_mclk_change(struct gk20a *g, u16 val)
3371 if (speed == GP106_MCLK_HIGH_SPEED) { 3371 if (speed == GP106_MCLK_HIGH_SPEED) {
3372 gk20a_writel(g, 0x132000, 0x98010000); 3372 gk20a_writel(g, 0x132000, 0x98010000);
3373 /* Introduce delay */ 3373 /* Introduce delay */
3374 gk20a_readl(g, 0x132000); 3374 (void) gk20a_readl(g, 0x132000);
3375 gk20a_readl(g, 0x132000); 3375 (void) gk20a_readl(g, 0x132000);
3376 } 3376 }
3377 3377
3378 gk20a_writel(g, 0x137300, 0x20000103); 3378 gk20a_writel(g, 0x137300, 0x20000103);
diff --git a/drivers/gpu/nvgpu/gp106/pmu_gp106.c b/drivers/gpu/nvgpu/gp106/pmu_gp106.c
index 963668c4..031ac7d8 100644
--- a/drivers/gpu/nvgpu/gp106/pmu_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/pmu_gp106.c
@@ -71,11 +71,11 @@ int gp106_pmu_engine_reset(struct gk20a *g, bool do_reset)
71 if (do_reset) { 71 if (do_reset) {
72 gk20a_writel(g, pwr_falcon_engine_r(), 72 gk20a_writel(g, pwr_falcon_engine_r(),
73 pwr_falcon_engine_reset_false_f()); 73 pwr_falcon_engine_reset_false_f());
74 gk20a_readl(g, pwr_falcon_engine_r()); 74 (void) gk20a_readl(g, pwr_falcon_engine_r());
75 } else { 75 } else {
76 gk20a_writel(g, pwr_falcon_engine_r(), 76 gk20a_writel(g, pwr_falcon_engine_r(),
77 pwr_falcon_engine_reset_true_f()); 77 pwr_falcon_engine_reset_true_f());
78 gk20a_readl(g, pwr_falcon_engine_r()); 78 (void) gk20a_readl(g, pwr_falcon_engine_r());
79 } 79 }
80 80
81 return 0; 81 return 0;