diff options
author | Vinod G <vinodg@nvidia.com> | 2018-05-16 13:43:13 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2018-05-18 17:53:58 -0400 |
commit | ac687c95d383c3fb0165e6535893510409559a8e (patch) | |
tree | 7a76099c05186ad636704c07c5409bbc8547f20f /drivers/gpu/nvgpu/gp10b/mc_gp10b.c | |
parent | de67fb18fb639b7a605c77eeb2e1c639a8a3d67e (diff) |
gpu: nvgpu: Code updates for MISRA violations
Code related to MC module is updated for handling
MISRA violations
Rule 10.1: Operands shalln't be an inappropriate
essential type.
Rule 10.3: Value of expression shalln't be assigned
to an object with a narrow essential type.
Rule 10.4: Both operands in an operator shall have
the same essential type.
Rule 14.4: Controlling if statement shall have
essentially Boolean type.
Rule 15.6: Enclose if() sequences with braces.
JIRA NVGPU-646
JIRA NVGPU-659
JIRA NVGPU-671
Change-Id: Ia7ada40068eab5c164b8bad99bf8103b37a2fbc9
Signed-off-by: Vinod G <vinodg@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1720926
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b/mc_gp10b.c')
-rw-r--r-- | drivers/gpu/nvgpu/gp10b/mc_gp10b.c | 42 |
1 files changed, 24 insertions, 18 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/mc_gp10b.c b/drivers/gpu/nvgpu/gp10b/mc_gp10b.c index 5969e45d..6fe4da15 100644 --- a/drivers/gpu/nvgpu/gp10b/mc_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/mc_gp10b.c | |||
@@ -23,7 +23,6 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include "gk20a/gk20a.h" | 25 | #include "gk20a/gk20a.h" |
26 | #include "gk20a/mc_gk20a.h" | ||
27 | 26 | ||
28 | #include "mc_gp10b.h" | 27 | #include "mc_gp10b.h" |
29 | 28 | ||
@@ -37,7 +36,7 @@ void mc_gp10b_intr_enable(struct gk20a *g) | |||
37 | u32 eng_intr_mask = gk20a_fifo_engine_interrupt_mask(g); | 36 | u32 eng_intr_mask = gk20a_fifo_engine_interrupt_mask(g); |
38 | 37 | ||
39 | gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING), | 38 | gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING), |
40 | 0xffffffff); | 39 | 0xffffffffU); |
41 | g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] = | 40 | g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] = |
42 | mc_intr_pfifo_pending_f() | | 41 | mc_intr_pfifo_pending_f() | |
43 | mc_intr_priv_ring_pending_f() | | 42 | mc_intr_priv_ring_pending_f() | |
@@ -49,7 +48,7 @@ void mc_gp10b_intr_enable(struct gk20a *g) | |||
49 | g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING]); | 48 | g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING]); |
50 | 49 | ||
51 | gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING), | 50 | gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING), |
52 | 0xffffffff); | 51 | 0xffffffffU); |
53 | g->mc_intr_mask_restore[NVGPU_MC_INTR_NONSTALLING] = | 52 | g->mc_intr_mask_restore[NVGPU_MC_INTR_NONSTALLING] = |
54 | mc_intr_pfifo_pending_f() | | 53 | mc_intr_pfifo_pending_f() | |
55 | eng_intr_mask; | 54 | eng_intr_mask; |
@@ -92,7 +91,7 @@ void mc_gp10b_isr_stall(struct gk20a *g) | |||
92 | for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) { | 91 | for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) { |
93 | active_engine_id = g->fifo.active_engines_list[engine_id_idx]; | 92 | active_engine_id = g->fifo.active_engines_list[engine_id_idx]; |
94 | 93 | ||
95 | if (mc_intr_0 & g->fifo.engine_info[active_engine_id].intr_mask) { | 94 | if ((mc_intr_0 & g->fifo.engine_info[active_engine_id].intr_mask) != 0U) { |
96 | engine_enum = g->fifo.engine_info[active_engine_id].engine_enum; | 95 | engine_enum = g->fifo.engine_info[active_engine_id].engine_enum; |
97 | /* GR Engine */ | 96 | /* GR Engine */ |
98 | if (engine_enum == ENGINE_GR_GK20A) { | 97 | if (engine_enum == ENGINE_GR_GK20A) { |
@@ -102,29 +101,36 @@ void mc_gp10b_isr_stall(struct gk20a *g) | |||
102 | /* CE Engine */ | 101 | /* CE Engine */ |
103 | if (((engine_enum == ENGINE_GRCE_GK20A) || | 102 | if (((engine_enum == ENGINE_GRCE_GK20A) || |
104 | (engine_enum == ENGINE_ASYNC_CE_GK20A)) && | 103 | (engine_enum == ENGINE_ASYNC_CE_GK20A)) && |
105 | g->ops.ce2.isr_stall){ | 104 | (g->ops.ce2.isr_stall != NULL)) { |
106 | g->ops.ce2.isr_stall(g, | 105 | g->ops.ce2.isr_stall(g, |
107 | g->fifo.engine_info[active_engine_id].inst_id, | 106 | g->fifo.engine_info[active_engine_id].inst_id, |
108 | g->fifo.engine_info[active_engine_id].pri_base); | 107 | g->fifo.engine_info[active_engine_id].pri_base); |
109 | } | 108 | } |
110 | } | 109 | } |
111 | } | 110 | } |
112 | if (g->ops.mc.is_intr_hub_pending && | 111 | if ((g->ops.mc.is_intr_hub_pending != NULL) && |
113 | g->ops.mc.is_intr_hub_pending(g, mc_intr_0)) | 112 | g->ops.mc.is_intr_hub_pending(g, mc_intr_0)) { |
114 | g->ops.fb.hub_isr(g); | 113 | g->ops.fb.hub_isr(g); |
115 | if (mc_intr_0 & mc_intr_pfifo_pending_f()) | 114 | } |
115 | if ((mc_intr_0 & mc_intr_pfifo_pending_f()) != 0U) { | ||
116 | gk20a_fifo_isr(g); | 116 | gk20a_fifo_isr(g); |
117 | if (mc_intr_0 & mc_intr_pmu_pending_f()) | 117 | } |
118 | if ((mc_intr_0 & mc_intr_pmu_pending_f()) != 0U) { | ||
118 | gk20a_pmu_isr(g); | 119 | gk20a_pmu_isr(g); |
119 | if (mc_intr_0 & mc_intr_priv_ring_pending_f()) | 120 | } |
121 | if ((mc_intr_0 & mc_intr_priv_ring_pending_f()) != 0U) { | ||
120 | g->ops.priv_ring.isr(g); | 122 | g->ops.priv_ring.isr(g); |
121 | if (mc_intr_0 & mc_intr_ltc_pending_f()) | 123 | } |
124 | if ((mc_intr_0 & mc_intr_ltc_pending_f()) != 0U) { | ||
122 | g->ops.ltc.isr(g); | 125 | g->ops.ltc.isr(g); |
123 | if (mc_intr_0 & mc_intr_pbus_pending_f()) | 126 | } |
127 | if ((mc_intr_0 & mc_intr_pbus_pending_f()) != 0U) { | ||
124 | g->ops.bus.isr(g); | 128 | g->ops.bus.isr(g); |
125 | if (g->ops.mc.is_intr_nvlink_pending && | 129 | } |
126 | g->ops.mc.is_intr_nvlink_pending(g, mc_intr_0)) | 130 | if ((g->ops.mc.is_intr_nvlink_pending != NULL) && |
131 | g->ops.mc.is_intr_nvlink_pending(g, mc_intr_0)) { | ||
127 | g->ops.nvlink.isr(g); | 132 | g->ops.nvlink.isr(g); |
133 | } | ||
128 | 134 | ||
129 | nvgpu_log(g, gpu_dbg_intr, "stall intr done 0x%08x\n", mc_intr_0); | 135 | nvgpu_log(g, gpu_dbg_intr, "stall intr done 0x%08x\n", mc_intr_0); |
130 | 136 | ||
@@ -137,7 +143,7 @@ u32 mc_gp10b_intr_stall(struct gk20a *g) | |||
137 | 143 | ||
138 | void mc_gp10b_intr_stall_pause(struct gk20a *g) | 144 | void mc_gp10b_intr_stall_pause(struct gk20a *g) |
139 | { | 145 | { |
140 | gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING), 0xffffffff); | 146 | gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING), 0xffffffffU); |
141 | } | 147 | } |
142 | 148 | ||
143 | void mc_gp10b_intr_stall_resume(struct gk20a *g) | 149 | void mc_gp10b_intr_stall_resume(struct gk20a *g) |
@@ -154,7 +160,7 @@ u32 mc_gp10b_intr_nonstall(struct gk20a *g) | |||
154 | void mc_gp10b_intr_nonstall_pause(struct gk20a *g) | 160 | void mc_gp10b_intr_nonstall_pause(struct gk20a *g) |
155 | { | 161 | { |
156 | gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING), | 162 | gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING), |
157 | 0xffffffff); | 163 | 0xffffffffU); |
158 | } | 164 | } |
159 | 165 | ||
160 | void mc_gp10b_intr_nonstall_resume(struct gk20a *g) | 166 | void mc_gp10b_intr_nonstall_resume(struct gk20a *g) |
@@ -177,11 +183,11 @@ bool mc_gp10b_is_intr1_pending(struct gk20a *g, | |||
177 | break; | 183 | break; |
178 | } | 184 | } |
179 | 185 | ||
180 | if (mask == 0) { | 186 | if (mask == 0U) { |
181 | nvgpu_err(g, "unknown unit %d", unit); | 187 | nvgpu_err(g, "unknown unit %d", unit); |
182 | is_pending = false; | 188 | is_pending = false; |
183 | } else { | 189 | } else { |
184 | is_pending = (mc_intr_1 & mask) ? true : false; | 190 | is_pending = ((mc_intr_1 & mask) != 0U) ? true : false; |
185 | } | 191 | } |
186 | 192 | ||
187 | return is_pending; | 193 | return is_pending; |