summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVinod G <vinodg@nvidia.com>2018-05-16 13:43:13 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-05-18 17:53:58 -0400
commitac687c95d383c3fb0165e6535893510409559a8e (patch)
tree7a76099c05186ad636704c07c5409bbc8547f20f
parentde67fb18fb639b7a605c77eeb2e1c639a8a3d67e (diff)
gpu: nvgpu: Code updates for MISRA violations
Code related to MC module is updated for handling MISRA violations Rule 10.1: Operands shalln't be an inappropriate essential type. Rule 10.3: Value of expression shalln't be assigned to an object with a narrow essential type. Rule 10.4: Both operands in an operator shall have the same essential type. Rule 14.4: Controlling if statement shall have essentially Boolean type. Rule 15.6: Enclose if() sequences with braces. JIRA NVGPU-646 JIRA NVGPU-659 JIRA NVGPU-671 Change-Id: Ia7ada40068eab5c164b8bad99bf8103b37a2fbc9 Signed-off-by: Vinod G <vinodg@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1720926 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.h18
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c8
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.h14
-rw-r--r--drivers/gpu/nvgpu/gk20a/mc_gk20a.c80
-rw-r--r--drivers/gpu/nvgpu/gk20a/mc_gk20a.h6
-rw-r--r--drivers/gpu/nvgpu/gp10b/mc_gp10b.c42
-rw-r--r--drivers/gpu/nvgpu/gv100/mc_gv100.c12
-rw-r--r--drivers/gpu/nvgpu/gv11b/fb_gv11b.h12
-rw-r--r--drivers/gpu/nvgpu/gv11b/mc_gv11b.c16
11 files changed, 120 insertions, 92 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index c94fc536..7f4a0948 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -2672,7 +2672,7 @@ void gk20a_fifo_isr(struct gk20a *g)
2672 return; 2672 return;
2673} 2673}
2674 2674
2675int gk20a_fifo_nonstall_isr(struct gk20a *g) 2675u32 gk20a_fifo_nonstall_isr(struct gk20a *g)
2676{ 2676{
2677 u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r()); 2677 u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r());
2678 u32 clear_intr = 0; 2678 u32 clear_intr = 0;
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
index 576a4ac8..cf3ac167 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
@@ -21,8 +21,8 @@
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE. 22 * DEALINGS IN THE SOFTWARE.
23 */ 23 */
24#ifndef __FIFO_GK20A_H__ 24#ifndef FIFO_GK20A_H
25#define __FIFO_GK20A_H__ 25#define FIFO_GK20A_H
26 26
27#include "channel_gk20a.h" 27#include "channel_gk20a.h"
28#include "tsg_gk20a.h" 28#include "tsg_gk20a.h"
@@ -103,10 +103,10 @@ struct fifo_runlist_info_gk20a {
103}; 103};
104 104
105enum { 105enum {
106 ENGINE_GR_GK20A = 0, 106 ENGINE_GR_GK20A = 0U,
107 ENGINE_GRCE_GK20A = 1, 107 ENGINE_GRCE_GK20A = 1U,
108 ENGINE_ASYNC_CE_GK20A = 2, 108 ENGINE_ASYNC_CE_GK20A = 2U,
109 ENGINE_INVAL_GK20A 109 ENGINE_INVAL_GK20A = 3U,
110}; 110};
111 111
112struct fifo_pbdma_exception_info_gk20a { 112struct fifo_pbdma_exception_info_gk20a {
@@ -140,7 +140,7 @@ struct fifo_engine_info_gk20a {
140}; 140};
141 141
142enum { 142enum {
143 PROFILE_IOCTL_ENTRY = 0, 143 PROFILE_IOCTL_ENTRY = 0U,
144 PROFILE_ENTRY, 144 PROFILE_ENTRY,
145 PROFILE_JOB_TRACKING, 145 PROFILE_JOB_TRACKING,
146 PROFILE_APPEND, 146 PROFILE_APPEND,
@@ -231,7 +231,7 @@ int gk20a_init_fifo_support(struct gk20a *g);
231int gk20a_init_fifo_setup_hw(struct gk20a *g); 231int gk20a_init_fifo_setup_hw(struct gk20a *g);
232 232
233void gk20a_fifo_isr(struct gk20a *g); 233void gk20a_fifo_isr(struct gk20a *g);
234int gk20a_fifo_nonstall_isr(struct gk20a *g); 234u32 gk20a_fifo_nonstall_isr(struct gk20a *g);
235 235
236int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid); 236int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid);
237int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid); 237int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid);
@@ -454,4 +454,4 @@ void gk20a_fifo_add_sema_cmd(struct gk20a *g,
454 struct nvgpu_semaphore *s, u64 sema_va, 454 struct nvgpu_semaphore *s, u64 sema_va,
455 struct priv_cmd_entry *cmd, 455 struct priv_cmd_entry *cmd,
456 u32 off, bool acquire, bool wfi); 456 u32 off, bool acquire, bool wfi);
457#endif /*__GR_GK20A_H__*/ 457#endif /* FIFO_GK20A_H */
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 076bf89f..84d3f639 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -1076,7 +1076,7 @@ struct gpu_ops {
1076 u32 (*intr_nonstall)(struct gk20a *g); 1076 u32 (*intr_nonstall)(struct gk20a *g);
1077 void (*intr_nonstall_pause)(struct gk20a *g); 1077 void (*intr_nonstall_pause)(struct gk20a *g);
1078 void (*intr_nonstall_resume)(struct gk20a *g); 1078 void (*intr_nonstall_resume)(struct gk20a *g);
1079 int (*isr_nonstall)(struct gk20a *g); 1079 u32 (*isr_nonstall)(struct gk20a *g);
1080 void (*enable)(struct gk20a *g, u32 units); 1080 void (*enable)(struct gk20a *g, u32 units);
1081 void (*disable)(struct gk20a *g, u32 units); 1081 void (*disable)(struct gk20a *g, u32 units);
1082 void (*reset)(struct gk20a *g, u32 units); 1082 void (*reset)(struct gk20a *g, u32 units);
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index c7d028b2..5fd0eb0c 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -6134,18 +6134,18 @@ int gk20a_gr_isr(struct gk20a *g)
6134 return 0; 6134 return 0;
6135} 6135}
6136 6136
6137int gk20a_gr_nonstall_isr(struct gk20a *g) 6137u32 gk20a_gr_nonstall_isr(struct gk20a *g)
6138{ 6138{
6139 int ops = 0; 6139 u32 ops = 0;
6140 u32 gr_intr = gk20a_readl(g, gr_intr_nonstall_r()); 6140 u32 gr_intr = gk20a_readl(g, gr_intr_nonstall_r());
6141 6141
6142 nvgpu_log(g, gpu_dbg_intr, "pgraph nonstall intr %08x", gr_intr); 6142 nvgpu_log(g, gpu_dbg_intr, "pgraph nonstall intr %08x", gr_intr);
6143 6143
6144 if (gr_intr & gr_intr_nonstall_trap_pending_f()) { 6144 if ((gr_intr & gr_intr_nonstall_trap_pending_f()) != 0U) {
6145 /* Clear the interrupt */ 6145 /* Clear the interrupt */
6146 gk20a_writel(g, gr_intr_nonstall_r(), 6146 gk20a_writel(g, gr_intr_nonstall_r(),
6147 gr_intr_nonstall_trap_pending_f()); 6147 gr_intr_nonstall_trap_pending_f());
6148 ops |= (gk20a_nonstall_ops_wakeup_semaphore | 6148 ops |= (u32)(gk20a_nonstall_ops_wakeup_semaphore |
6149 gk20a_nonstall_ops_post_events); 6149 gk20a_nonstall_ops_post_events);
6150 } 6150 }
6151 return ops; 6151 return ops;
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
index a703ef9d..8c21a714 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
@@ -24,6 +24,8 @@
24#ifndef GR_GK20A_H 24#ifndef GR_GK20A_H
25#define GR_GK20A_H 25#define GR_GK20A_H
26 26
27#include <nvgpu/types.h>
28
27#include "gr_ctx_gk20a.h" 29#include "gr_ctx_gk20a.h"
28#include "mm_gk20a.h" 30#include "mm_gk20a.h"
29 31
@@ -566,7 +568,7 @@ int gk20a_init_gr_channel(struct channel_gk20a *ch_gk20a);
566int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags); 568int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags);
567 569
568int gk20a_gr_isr(struct gk20a *g); 570int gk20a_gr_isr(struct gk20a *g);
569int gk20a_gr_nonstall_isr(struct gk20a *g); 571u32 gk20a_gr_nonstall_isr(struct gk20a *g);
570 572
571/* zcull */ 573/* zcull */
572u32 gr_gk20a_get_ctxsw_zcull_size(struct gk20a *g, struct gr_gk20a *gr); 574u32 gr_gk20a_get_ctxsw_zcull_size(struct gk20a *g, struct gr_gk20a *gr);
@@ -603,15 +605,17 @@ u32 gk20a_gr_get_sm_no_lock_down_hww_global_esr_mask(struct gk20a *g);
603#define gr_gk20a_elpg_protected_call(g, func) \ 605#define gr_gk20a_elpg_protected_call(g, func) \
604 ({ \ 606 ({ \
605 int err = 0; \ 607 int err = 0; \
606 if (g->support_pmu && g->elpg_enabled) {\ 608 if ((g->support_pmu) && (g->elpg_enabled)) {\
607 err = nvgpu_pmu_disable_elpg(g); \ 609 err = nvgpu_pmu_disable_elpg(g); \
608 if (err) \ 610 if (err != 0) {\
609 nvgpu_pmu_enable_elpg(g); \ 611 nvgpu_pmu_enable_elpg(g); \
612 } \
610 } \ 613 } \
611 if (!err) { \ 614 if (err == 0) { \
612 err = func; \ 615 err = func; \
613 if (g->support_pmu && g->elpg_enabled) \ 616 if ((g->support_pmu) && (g->elpg_enabled)) {\
614 nvgpu_pmu_enable_elpg(g); \ 617 nvgpu_pmu_enable_elpg(g); \
618 } \
615 } \ 619 } \
616 err; \ 620 err; \
617 }) 621 })
diff --git a/drivers/gpu/nvgpu/gk20a/mc_gk20a.c b/drivers/gpu/nvgpu/gk20a/mc_gk20a.c
index 9473ad4f..69d165af 100644
--- a/drivers/gpu/nvgpu/gk20a/mc_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mc_gk20a.c
@@ -45,7 +45,7 @@ void mc_gk20a_isr_stall(struct gk20a *g)
45 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) { 45 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) {
46 active_engine_id = g->fifo.active_engines_list[engine_id_idx]; 46 active_engine_id = g->fifo.active_engines_list[engine_id_idx];
47 47
48 if (mc_intr_0 & g->fifo.engine_info[active_engine_id].intr_mask) { 48 if ((mc_intr_0 & g->fifo.engine_info[active_engine_id].intr_mask) != 0U) {
49 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum; 49 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum;
50 /* GR Engine */ 50 /* GR Engine */
51 if (engine_enum == ENGINE_GR_GK20A) { 51 if (engine_enum == ENGINE_GR_GK20A) {
@@ -55,28 +55,33 @@ void mc_gk20a_isr_stall(struct gk20a *g)
55 /* CE Engine */ 55 /* CE Engine */
56 if (((engine_enum == ENGINE_GRCE_GK20A) || 56 if (((engine_enum == ENGINE_GRCE_GK20A) ||
57 (engine_enum == ENGINE_ASYNC_CE_GK20A)) && 57 (engine_enum == ENGINE_ASYNC_CE_GK20A)) &&
58 g->ops.ce2.isr_stall){ 58 (g->ops.ce2.isr_stall != NULL)) {
59 g->ops.ce2.isr_stall(g, 59 g->ops.ce2.isr_stall(g,
60 g->fifo.engine_info[active_engine_id].inst_id, 60 g->fifo.engine_info[active_engine_id].inst_id,
61 g->fifo.engine_info[active_engine_id].pri_base); 61 g->fifo.engine_info[active_engine_id].pri_base);
62 } 62 }
63 } 63 }
64 } 64 }
65 if (mc_intr_0 & mc_intr_0_pfifo_pending_f()) 65 if ((mc_intr_0 & mc_intr_0_pfifo_pending_f()) != 0U) {
66 gk20a_fifo_isr(g); 66 gk20a_fifo_isr(g);
67 if (mc_intr_0 & mc_intr_0_pmu_pending_f()) 67 }
68 if ((mc_intr_0 & mc_intr_0_pmu_pending_f()) != 0U) {
68 gk20a_pmu_isr(g); 69 gk20a_pmu_isr(g);
69 if (mc_intr_0 & mc_intr_0_priv_ring_pending_f()) 70 }
71 if ((mc_intr_0 & mc_intr_0_priv_ring_pending_f()) != 0U) {
70 g->ops.priv_ring.isr(g); 72 g->ops.priv_ring.isr(g);
71 if (mc_intr_0 & mc_intr_0_ltc_pending_f()) 73 }
74 if ((mc_intr_0 & mc_intr_0_ltc_pending_f()) != 0U) {
72 g->ops.ltc.isr(g); 75 g->ops.ltc.isr(g);
73 if (mc_intr_0 & mc_intr_0_pbus_pending_f()) 76 }
77 if ((mc_intr_0 & mc_intr_0_pbus_pending_f()) != 0U) {
74 g->ops.bus.isr(g); 78 g->ops.bus.isr(g);
79 }
75} 80}
76 81
77int mc_gk20a_isr_nonstall(struct gk20a *g) 82u32 mc_gk20a_isr_nonstall(struct gk20a *g)
78{ 83{
79 int ops = 0; 84 u32 ops = 0;
80 u32 mc_intr_1; 85 u32 mc_intr_1;
81 u32 engine_id_idx; 86 u32 engine_id_idx;
82 u32 active_engine_id = 0; 87 u32 active_engine_id = 0;
@@ -84,8 +89,9 @@ int mc_gk20a_isr_nonstall(struct gk20a *g)
84 89
85 mc_intr_1 = g->ops.mc.intr_nonstall(g); 90 mc_intr_1 = g->ops.mc.intr_nonstall(g);
86 91
87 if (g->ops.mc.is_intr1_pending(g, NVGPU_UNIT_FIFO, mc_intr_1)) 92 if (g->ops.mc.is_intr1_pending(g, NVGPU_UNIT_FIFO, mc_intr_1) != 0U) {
88 ops |= gk20a_fifo_nonstall_isr(g); 93 ops |= gk20a_fifo_nonstall_isr(g);
94 }
89 95
90 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; 96 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines;
91 engine_id_idx++) { 97 engine_id_idx++) {
@@ -94,19 +100,20 @@ int mc_gk20a_isr_nonstall(struct gk20a *g)
94 active_engine_id = g->fifo.active_engines_list[engine_id_idx]; 100 active_engine_id = g->fifo.active_engines_list[engine_id_idx];
95 engine_info = &g->fifo.engine_info[active_engine_id]; 101 engine_info = &g->fifo.engine_info[active_engine_id];
96 102
97 if (mc_intr_1 & engine_info->intr_mask) { 103 if ((mc_intr_1 & engine_info->intr_mask) != 0U) {
98 engine_enum = engine_info->engine_enum; 104 engine_enum = engine_info->engine_enum;
99 /* GR Engine */ 105 /* GR Engine */
100 if (engine_enum == ENGINE_GR_GK20A) 106 if (engine_enum == ENGINE_GR_GK20A) {
101 ops |= gk20a_gr_nonstall_isr(g); 107 ops |= gk20a_gr_nonstall_isr(g);
102 108 }
103 /* CE Engine */ 109 /* CE Engine */
104 if (((engine_enum == ENGINE_GRCE_GK20A) || 110 if (((engine_enum == ENGINE_GRCE_GK20A) ||
105 (engine_enum == ENGINE_ASYNC_CE_GK20A)) && 111 (engine_enum == ENGINE_ASYNC_CE_GK20A)) &&
106 g->ops.ce2.isr_nonstall) 112 (g->ops.ce2.isr_nonstall != NULL)) {
107 ops |= g->ops.ce2.isr_nonstall(g, 113 ops |= g->ops.ce2.isr_nonstall(g,
108 engine_info->inst_id, 114 engine_info->inst_id,
109 engine_info->pri_base); 115 engine_info->pri_base);
116 }
110 } 117 }
111 } 118 }
112 119
@@ -219,7 +226,7 @@ void gk20a_mc_enable(struct gk20a *g, u32 units)
219 pmc = gk20a_readl(g, mc_enable_r()); 226 pmc = gk20a_readl(g, mc_enable_r());
220 pmc |= units; 227 pmc |= units;
221 gk20a_writel(g, mc_enable_r(), pmc); 228 gk20a_writel(g, mc_enable_r(), pmc);
222 gk20a_readl(g, mc_enable_r()); 229 pmc = gk20a_readl(g, mc_enable_r());
223 nvgpu_spinlock_release(&g->mc_enable_lock); 230 nvgpu_spinlock_release(&g->mc_enable_lock);
224 231
225 nvgpu_udelay(20); 232 nvgpu_udelay(20);
@@ -228,10 +235,11 @@ void gk20a_mc_enable(struct gk20a *g, u32 units)
228void gk20a_mc_reset(struct gk20a *g, u32 units) 235void gk20a_mc_reset(struct gk20a *g, u32 units)
229{ 236{
230 g->ops.mc.disable(g, units); 237 g->ops.mc.disable(g, units);
231 if (units & gk20a_fifo_get_all_ce_engine_reset_mask(g)) 238 if ((units & gk20a_fifo_get_all_ce_engine_reset_mask(g)) != 0U) {
232 nvgpu_udelay(500); 239 nvgpu_udelay(500);
233 else 240 } else {
234 nvgpu_udelay(20); 241 nvgpu_udelay(20);
242 }
235 g->ops.mc.enable(g, units); 243 g->ops.mc.enable(g, units);
236} 244}
237 245
@@ -239,19 +247,22 @@ u32 gk20a_mc_boot_0(struct gk20a *g, u32 *arch, u32 *impl, u32 *rev)
239{ 247{
240 u32 val = __nvgpu_readl(g, mc_boot_0_r()); 248 u32 val = __nvgpu_readl(g, mc_boot_0_r());
241 249
242 if (val == 0xffffffff) 250 if (val != 0xffffffffU) {
243 return val;
244 251
245 if (arch) 252 if (arch != NULL) {
246 *arch = mc_boot_0_architecture_v(val) << 253 *arch = mc_boot_0_architecture_v(val) <<
247 NVGPU_GPU_ARCHITECTURE_SHIFT; 254 NVGPU_GPU_ARCHITECTURE_SHIFT;
255 }
248 256
249 if (impl) 257 if (impl != NULL) {
250 *impl = mc_boot_0_implementation_v(val); 258 *impl = mc_boot_0_implementation_v(val);
259 }
251 260
252 if (rev) 261 if (rev != NULL) {
253 *rev = (mc_boot_0_major_revision_v(val) << 4) | 262 *rev = (mc_boot_0_major_revision_v(val) << 4) |
254 mc_boot_0_minor_revision_v(val); 263 mc_boot_0_minor_revision_v(val);
264 }
265 }
255 266
256 return val; 267 return val;
257} 268}
@@ -259,7 +270,7 @@ u32 gk20a_mc_boot_0(struct gk20a *g, u32 *arch, u32 *impl, u32 *rev)
259bool mc_gk20a_is_intr1_pending(struct gk20a *g, 270bool mc_gk20a_is_intr1_pending(struct gk20a *g,
260 enum nvgpu_unit unit, u32 mc_intr_1) 271 enum nvgpu_unit unit, u32 mc_intr_1)
261{ 272{
262 u32 mask = 0; 273 u32 mask = 0U;
263 bool is_pending; 274 bool is_pending;
264 275
265 switch (unit) { 276 switch (unit) {
@@ -270,11 +281,11 @@ bool mc_gk20a_is_intr1_pending(struct gk20a *g,
270 break; 281 break;
271 } 282 }
272 283
273 if (mask == 0) { 284 if (mask == 0U) {
274 nvgpu_err(g, "unknown unit %d", unit); 285 nvgpu_err(g, "unknown unit %d", unit);
275 is_pending = false; 286 is_pending = false;
276 } else { 287 } else {
277 is_pending = (mc_intr_1 & mask) ? true : false; 288 is_pending = ((mc_intr_1 & mask) != 0U) ? true : false;
278 } 289 }
279 290
280 return is_pending; 291 return is_pending;
@@ -284,9 +295,12 @@ void mc_gk20a_handle_intr_nonstall(struct gk20a *g, u32 ops)
284{ 295{
285 bool semaphore_wakeup, post_events; 296 bool semaphore_wakeup, post_events;
286 297
287 semaphore_wakeup = ops & gk20a_nonstall_ops_wakeup_semaphore; 298 semaphore_wakeup = (((ops & (u32)gk20a_nonstall_ops_wakeup_semaphore) != 0U) ?
288 post_events = ops & gk20a_nonstall_ops_post_events; 299 true : false);
300 post_events = (((ops & (u32)gk20a_nonstall_ops_post_events) != 0U) ?
301 true: false);
289 302
290 if (semaphore_wakeup) 303 if (semaphore_wakeup) {
291 g->ops.semaphore_wakeup(g, post_events); 304 g->ops.semaphore_wakeup(g, post_events);
305 }
292} 306}
diff --git a/drivers/gpu/nvgpu/gk20a/mc_gk20a.h b/drivers/gpu/nvgpu/gk20a/mc_gk20a.h
index 1ce308b8..1b59d634 100644
--- a/drivers/gpu/nvgpu/gk20a/mc_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mc_gk20a.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -32,7 +32,7 @@ u32 mc_gk20a_intr_stall(struct gk20a *g);
32void mc_gk20a_intr_stall_pause(struct gk20a *g); 32void mc_gk20a_intr_stall_pause(struct gk20a *g);
33void mc_gk20a_intr_stall_resume(struct gk20a *g); 33void mc_gk20a_intr_stall_resume(struct gk20a *g);
34u32 mc_gk20a_intr_nonstall(struct gk20a *g); 34u32 mc_gk20a_intr_nonstall(struct gk20a *g);
35int mc_gk20a_isr_nonstall(struct gk20a *g); 35u32 mc_gk20a_isr_nonstall(struct gk20a *g);
36void mc_gk20a_intr_nonstall_pause(struct gk20a *g); 36void mc_gk20a_intr_nonstall_pause(struct gk20a *g);
37void mc_gk20a_intr_nonstall_resume(struct gk20a *g); 37void mc_gk20a_intr_nonstall_resume(struct gk20a *g);
38void gk20a_mc_enable(struct gk20a *g, u32 units); 38void gk20a_mc_enable(struct gk20a *g, u32 units);
@@ -42,4 +42,4 @@ u32 gk20a_mc_boot_0(struct gk20a *g, u32 *arch, u32 *impl, u32 *rev);
42bool mc_gk20a_is_intr1_pending(struct gk20a *g, 42bool mc_gk20a_is_intr1_pending(struct gk20a *g,
43 enum nvgpu_unit unit, u32 mc_intr_1); 43 enum nvgpu_unit unit, u32 mc_intr_1);
44void mc_gk20a_handle_intr_nonstall(struct gk20a *g, u32 ops); 44void mc_gk20a_handle_intr_nonstall(struct gk20a *g, u32 ops);
45#endif 45#endif /* MC_GK20A_H */
diff --git a/drivers/gpu/nvgpu/gp10b/mc_gp10b.c b/drivers/gpu/nvgpu/gp10b/mc_gp10b.c
index 5969e45d..6fe4da15 100644
--- a/drivers/gpu/nvgpu/gp10b/mc_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/mc_gp10b.c
@@ -23,7 +23,6 @@
23 */ 23 */
24 24
25#include "gk20a/gk20a.h" 25#include "gk20a/gk20a.h"
26#include "gk20a/mc_gk20a.h"
27 26
28#include "mc_gp10b.h" 27#include "mc_gp10b.h"
29 28
@@ -37,7 +36,7 @@ void mc_gp10b_intr_enable(struct gk20a *g)
37 u32 eng_intr_mask = gk20a_fifo_engine_interrupt_mask(g); 36 u32 eng_intr_mask = gk20a_fifo_engine_interrupt_mask(g);
38 37
39 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING), 38 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING),
40 0xffffffff); 39 0xffffffffU);
41 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] = 40 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] =
42 mc_intr_pfifo_pending_f() | 41 mc_intr_pfifo_pending_f() |
43 mc_intr_priv_ring_pending_f() | 42 mc_intr_priv_ring_pending_f() |
@@ -49,7 +48,7 @@ void mc_gp10b_intr_enable(struct gk20a *g)
49 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING]); 48 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING]);
50 49
51 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING), 50 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
52 0xffffffff); 51 0xffffffffU);
53 g->mc_intr_mask_restore[NVGPU_MC_INTR_NONSTALLING] = 52 g->mc_intr_mask_restore[NVGPU_MC_INTR_NONSTALLING] =
54 mc_intr_pfifo_pending_f() | 53 mc_intr_pfifo_pending_f() |
55 eng_intr_mask; 54 eng_intr_mask;
@@ -92,7 +91,7 @@ void mc_gp10b_isr_stall(struct gk20a *g)
92 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) { 91 for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) {
93 active_engine_id = g->fifo.active_engines_list[engine_id_idx]; 92 active_engine_id = g->fifo.active_engines_list[engine_id_idx];
94 93
95 if (mc_intr_0 & g->fifo.engine_info[active_engine_id].intr_mask) { 94 if ((mc_intr_0 & g->fifo.engine_info[active_engine_id].intr_mask) != 0U) {
96 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum; 95 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum;
97 /* GR Engine */ 96 /* GR Engine */
98 if (engine_enum == ENGINE_GR_GK20A) { 97 if (engine_enum == ENGINE_GR_GK20A) {
@@ -102,29 +101,36 @@ void mc_gp10b_isr_stall(struct gk20a *g)
102 /* CE Engine */ 101 /* CE Engine */
103 if (((engine_enum == ENGINE_GRCE_GK20A) || 102 if (((engine_enum == ENGINE_GRCE_GK20A) ||
104 (engine_enum == ENGINE_ASYNC_CE_GK20A)) && 103 (engine_enum == ENGINE_ASYNC_CE_GK20A)) &&
105 g->ops.ce2.isr_stall){ 104 (g->ops.ce2.isr_stall != NULL)) {
106 g->ops.ce2.isr_stall(g, 105 g->ops.ce2.isr_stall(g,
107 g->fifo.engine_info[active_engine_id].inst_id, 106 g->fifo.engine_info[active_engine_id].inst_id,
108 g->fifo.engine_info[active_engine_id].pri_base); 107 g->fifo.engine_info[active_engine_id].pri_base);
109 } 108 }
110 } 109 }
111 } 110 }
112 if (g->ops.mc.is_intr_hub_pending && 111 if ((g->ops.mc.is_intr_hub_pending != NULL) &&
113 g->ops.mc.is_intr_hub_pending(g, mc_intr_0)) 112 g->ops.mc.is_intr_hub_pending(g, mc_intr_0)) {
114 g->ops.fb.hub_isr(g); 113 g->ops.fb.hub_isr(g);
115 if (mc_intr_0 & mc_intr_pfifo_pending_f()) 114 }
115 if ((mc_intr_0 & mc_intr_pfifo_pending_f()) != 0U) {
116 gk20a_fifo_isr(g); 116 gk20a_fifo_isr(g);
117 if (mc_intr_0 & mc_intr_pmu_pending_f()) 117 }
118 if ((mc_intr_0 & mc_intr_pmu_pending_f()) != 0U) {
118 gk20a_pmu_isr(g); 119 gk20a_pmu_isr(g);
119 if (mc_intr_0 & mc_intr_priv_ring_pending_f()) 120 }
121 if ((mc_intr_0 & mc_intr_priv_ring_pending_f()) != 0U) {
120 g->ops.priv_ring.isr(g); 122 g->ops.priv_ring.isr(g);
121 if (mc_intr_0 & mc_intr_ltc_pending_f()) 123 }
124 if ((mc_intr_0 & mc_intr_ltc_pending_f()) != 0U) {
122 g->ops.ltc.isr(g); 125 g->ops.ltc.isr(g);
123 if (mc_intr_0 & mc_intr_pbus_pending_f()) 126 }
127 if ((mc_intr_0 & mc_intr_pbus_pending_f()) != 0U) {
124 g->ops.bus.isr(g); 128 g->ops.bus.isr(g);
125 if (g->ops.mc.is_intr_nvlink_pending && 129 }
126 g->ops.mc.is_intr_nvlink_pending(g, mc_intr_0)) 130 if ((g->ops.mc.is_intr_nvlink_pending != NULL) &&
131 g->ops.mc.is_intr_nvlink_pending(g, mc_intr_0)) {
127 g->ops.nvlink.isr(g); 132 g->ops.nvlink.isr(g);
133 }
128 134
129 nvgpu_log(g, gpu_dbg_intr, "stall intr done 0x%08x\n", mc_intr_0); 135 nvgpu_log(g, gpu_dbg_intr, "stall intr done 0x%08x\n", mc_intr_0);
130 136
@@ -137,7 +143,7 @@ u32 mc_gp10b_intr_stall(struct gk20a *g)
137 143
138void mc_gp10b_intr_stall_pause(struct gk20a *g) 144void mc_gp10b_intr_stall_pause(struct gk20a *g)
139{ 145{
140 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING), 0xffffffff); 146 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING), 0xffffffffU);
141} 147}
142 148
143void mc_gp10b_intr_stall_resume(struct gk20a *g) 149void mc_gp10b_intr_stall_resume(struct gk20a *g)
@@ -154,7 +160,7 @@ u32 mc_gp10b_intr_nonstall(struct gk20a *g)
154void mc_gp10b_intr_nonstall_pause(struct gk20a *g) 160void mc_gp10b_intr_nonstall_pause(struct gk20a *g)
155{ 161{
156 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING), 162 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
157 0xffffffff); 163 0xffffffffU);
158} 164}
159 165
160void mc_gp10b_intr_nonstall_resume(struct gk20a *g) 166void mc_gp10b_intr_nonstall_resume(struct gk20a *g)
@@ -177,11 +183,11 @@ bool mc_gp10b_is_intr1_pending(struct gk20a *g,
177 break; 183 break;
178 } 184 }
179 185
180 if (mask == 0) { 186 if (mask == 0U) {
181 nvgpu_err(g, "unknown unit %d", unit); 187 nvgpu_err(g, "unknown unit %d", unit);
182 is_pending = false; 188 is_pending = false;
183 } else { 189 } else {
184 is_pending = (mc_intr_1 & mask) ? true : false; 190 is_pending = ((mc_intr_1 & mask) != 0U) ? true : false;
185 } 191 }
186 192
187 return is_pending; 193 return is_pending;
diff --git a/drivers/gpu/nvgpu/gv100/mc_gv100.c b/drivers/gpu/nvgpu/gv100/mc_gv100.c
index 5848a180..31dc97d9 100644
--- a/drivers/gpu/nvgpu/gv100/mc_gv100.c
+++ b/drivers/gpu/nvgpu/gv100/mc_gv100.c
@@ -38,9 +38,9 @@ void mc_gv100_intr_enable(struct gk20a *g)
38 u32 eng_intr_mask = gk20a_fifo_engine_interrupt_mask(g); 38 u32 eng_intr_mask = gk20a_fifo_engine_interrupt_mask(g);
39 39
40 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING), 40 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING),
41 0xffffffff); 41 0xffffffffU);
42 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING), 42 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
43 0xffffffff); 43 0xffffffffU);
44 gv11b_fb_disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_ALL); 44 gv11b_fb_disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_ALL);
45 45
46 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] = 46 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] =
@@ -69,7 +69,7 @@ void mc_gv100_intr_enable(struct gk20a *g)
69 69
70bool gv100_mc_is_intr_nvlink_pending(struct gk20a *g, u32 mc_intr_0) 70bool gv100_mc_is_intr_nvlink_pending(struct gk20a *g, u32 mc_intr_0)
71{ 71{
72 return ((mc_intr_0 & mc_intr_nvlink_pending_f()) ? true : false); 72 return (((mc_intr_0 & mc_intr_nvlink_pending_f()) != 0U) ? true : false);
73} 73}
74 74
75bool gv100_mc_is_stall_and_eng_intr_pending(struct gk20a *g, u32 act_eng_id) 75bool gv100_mc_is_stall_and_eng_intr_pending(struct gk20a *g, u32 act_eng_id)
@@ -78,8 +78,9 @@ bool gv100_mc_is_stall_and_eng_intr_pending(struct gk20a *g, u32 act_eng_id)
78 u32 stall_intr, eng_intr_mask; 78 u32 stall_intr, eng_intr_mask;
79 79
80 eng_intr_mask = gk20a_fifo_act_eng_interrupt_mask(g, act_eng_id); 80 eng_intr_mask = gk20a_fifo_act_eng_interrupt_mask(g, act_eng_id);
81 if (mc_intr_0 & eng_intr_mask) 81 if ((mc_intr_0 & eng_intr_mask) != 0U) {
82 return true; 82 return true;
83 }
83 84
84 stall_intr = mc_intr_pfifo_pending_f() | 85 stall_intr = mc_intr_pfifo_pending_f() |
85 mc_intr_hub_pending_f() | 86 mc_intr_hub_pending_f() |
@@ -87,8 +88,9 @@ bool gv100_mc_is_stall_and_eng_intr_pending(struct gk20a *g, u32 act_eng_id)
87 mc_intr_pbus_pending_f() | 88 mc_intr_pbus_pending_f() |
88 mc_intr_ltc_pending_f() | 89 mc_intr_ltc_pending_f() |
89 mc_intr_nvlink_pending_f(); 90 mc_intr_nvlink_pending_f();
90 if (mc_intr_0 & stall_intr) 91 if ((mc_intr_0 & stall_intr) != 0U) {
91 return true; 92 return true;
93 }
92 94
93 return false; 95 return false;
94} 96}
diff --git a/drivers/gpu/nvgpu/gv11b/fb_gv11b.h b/drivers/gpu/nvgpu/gv11b/fb_gv11b.h
index d9a6fa77..0b8f9fbf 100644
--- a/drivers/gpu/nvgpu/gv11b/fb_gv11b.h
+++ b/drivers/gpu/nvgpu/gv11b/fb_gv11b.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * GV11B FB 2 * GV11B FB
3 * 3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -37,11 +37,11 @@
37#define FAULT_BUF_INVALID 0 37#define FAULT_BUF_INVALID 0
38#define FAULT_BUF_VALID 1 38#define FAULT_BUF_VALID 1
39 39
40#define HUB_INTR_TYPE_OTHER 1 /* bit 0 */ 40#define HUB_INTR_TYPE_OTHER 1U /* bit 0 */
41#define HUB_INTR_TYPE_NONREPLAY 2 /* bit 1 */ 41#define HUB_INTR_TYPE_NONREPLAY 2U /* bit 1 */
42#define HUB_INTR_TYPE_REPLAY 4 /* bit 2 */ 42#define HUB_INTR_TYPE_REPLAY 4U /* bit 2 */
43#define HUB_INTR_TYPE_ECC_UNCORRECTED 8 /* bit 3 */ 43#define HUB_INTR_TYPE_ECC_UNCORRECTED 8U /* bit 3 */
44#define HUB_INTR_TYPE_ACCESS_COUNTER 16 /* bit 4 */ 44#define HUB_INTR_TYPE_ACCESS_COUNTER 16U /* bit 4 */
45#define HUB_INTR_TYPE_ALL (HUB_INTR_TYPE_OTHER | \ 45#define HUB_INTR_TYPE_ALL (HUB_INTR_TYPE_OTHER | \
46 HUB_INTR_TYPE_NONREPLAY | \ 46 HUB_INTR_TYPE_NONREPLAY | \
47 HUB_INTR_TYPE_REPLAY | \ 47 HUB_INTR_TYPE_REPLAY | \
diff --git a/drivers/gpu/nvgpu/gv11b/mc_gv11b.c b/drivers/gpu/nvgpu/gv11b/mc_gv11b.c
index 74c5c4d6..6c118ceb 100644
--- a/drivers/gpu/nvgpu/gv11b/mc_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/mc_gv11b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GV11B master 2 * GV11B master
3 * 3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -22,7 +22,7 @@
22 * DEALINGS IN THE SOFTWARE. 22 * DEALINGS IN THE SOFTWARE.
23 */ 23 */
24 24
25#include <linux/types.h> 25#include <nvgpu/types.h>
26 26
27#include "gk20a/gk20a.h" 27#include "gk20a/gk20a.h"
28 28
@@ -38,9 +38,9 @@ void mc_gv11b_intr_enable(struct gk20a *g)
38 u32 eng_intr_mask = gk20a_fifo_engine_interrupt_mask(g); 38 u32 eng_intr_mask = gk20a_fifo_engine_interrupt_mask(g);
39 39
40 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING), 40 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING),
41 0xffffffff); 41 0xffffffffU);
42 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING), 42 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
43 0xffffffff); 43 0xffffffffU);
44 gv11b_fb_disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_ALL); 44 gv11b_fb_disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_ALL);
45 45
46 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] = 46 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] =
@@ -68,7 +68,7 @@ void mc_gv11b_intr_enable(struct gk20a *g)
68 68
69bool gv11b_mc_is_intr_hub_pending(struct gk20a *g, u32 mc_intr_0) 69bool gv11b_mc_is_intr_hub_pending(struct gk20a *g, u32 mc_intr_0)
70{ 70{
71 return ((mc_intr_0 & mc_intr_hub_pending_f()) ? true : false); 71 return (((mc_intr_0 & mc_intr_hub_pending_f()) != 0U) ? true : false);
72} 72}
73 73
74bool gv11b_mc_is_stall_and_eng_intr_pending(struct gk20a *g, u32 act_eng_id) 74bool gv11b_mc_is_stall_and_eng_intr_pending(struct gk20a *g, u32 act_eng_id)
@@ -77,16 +77,18 @@ bool gv11b_mc_is_stall_and_eng_intr_pending(struct gk20a *g, u32 act_eng_id)
77 u32 stall_intr, eng_intr_mask; 77 u32 stall_intr, eng_intr_mask;
78 78
79 eng_intr_mask = gk20a_fifo_act_eng_interrupt_mask(g, act_eng_id); 79 eng_intr_mask = gk20a_fifo_act_eng_interrupt_mask(g, act_eng_id);
80 if (mc_intr_0 & eng_intr_mask) 80 if ((mc_intr_0 & eng_intr_mask) != 0U) {
81 return true; 81 return true;
82 }
82 83
83 stall_intr = mc_intr_pfifo_pending_f() | 84 stall_intr = mc_intr_pfifo_pending_f() |
84 mc_intr_hub_pending_f() | 85 mc_intr_hub_pending_f() |
85 mc_intr_priv_ring_pending_f() | 86 mc_intr_priv_ring_pending_f() |
86 mc_intr_pbus_pending_f() | 87 mc_intr_pbus_pending_f() |
87 mc_intr_ltc_pending_f(); 88 mc_intr_ltc_pending_f();
88 if (mc_intr_0 & stall_intr) 89 if ((mc_intr_0 & stall_intr) != 0U) {
89 return true; 90 return true;
91 }
90 92
91 return false; 93 return false;
92} 94}