summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-02 04:45:54 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-06 20:36:39 -0400
commit17aeea4a2ffa23fc9dbcdc84cda747fe5a025131 (patch)
treed4be52f246724fb9cb99047059073b93aeb089ce
parent6c9daf7626567fffc9d1ccd475865e81ae90a973 (diff)
gpu: nvgpu: gk20a: Fix MISRA 15.6 violations
This fixes errors due to single statement loop bodies without braces, which is part of Rule 15.6 of MISRA. This patch covers in gpu/nvgpu/gk20a/ JIRA NVGPU-989 Change-Id: I2f422e9bc2b03229f4d2c3198613169ce5e7f3ee Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1791019 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c18
-rw-r--r--drivers/gpu/nvgpu/gk20a/flcn_gk20a.c18
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.c3
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c12
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c63
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c9
-rw-r--r--drivers/gpu/nvgpu/gk20a/regops_gk20a.c3
7 files changed, 84 insertions, 42 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 93ef211e..23e22c21 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -907,8 +907,9 @@ int gk20a_init_fifo_setup_sw_common(struct gk20a *g)
907 memset(f->active_engines_list, 0xff, (f->max_engines * sizeof(u32))); 907 memset(f->active_engines_list, 0xff, (f->max_engines * sizeof(u32)));
908 908
909 /* pbdma map needs to be in place before calling engine info init */ 909 /* pbdma map needs to be in place before calling engine info init */
910 for (i = 0; i < f->num_pbdma; ++i) 910 for (i = 0; i < f->num_pbdma; ++i) {
911 f->pbdma_map[i] = gk20a_readl(g, fifo_pbdma_map_r(i)); 911 f->pbdma_map[i] = gk20a_readl(g, fifo_pbdma_map_r(i));
912 }
912 913
913 g->ops.fifo.init_engine_info(f); 914 g->ops.fifo.init_engine_info(f);
914 915
@@ -2496,9 +2497,10 @@ unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id,
2496 f->intr.pbdma.restartable_0) & pbdma_intr_0) { 2497 f->intr.pbdma.restartable_0) & pbdma_intr_0) {
2497 2498
2498 pbdma_intr_err = (unsigned long)pbdma_intr_0; 2499 pbdma_intr_err = (unsigned long)pbdma_intr_0;
2499 for_each_set_bit(bit, &pbdma_intr_err, 32) 2500 for_each_set_bit(bit, &pbdma_intr_err, 32) {
2500 nvgpu_err(g, "PBDMA intr %s Error", 2501 nvgpu_err(g, "PBDMA intr %s Error",
2501 pbdma_intr_fault_type_desc[bit]); 2502 pbdma_intr_fault_type_desc[bit]);
2503 }
2502 2504
2503 nvgpu_err(g, 2505 nvgpu_err(g,
2504 "pbdma_intr_0(%d):0x%08x PBH: %08x " 2506 "pbdma_intr_0(%d):0x%08x PBH: %08x "
@@ -2851,8 +2853,9 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
2851 return 0; 2853 return 0;
2852 2854
2853 /* we have no idea which runlist we are using. lock all */ 2855 /* we have no idea which runlist we are using. lock all */
2854 for (i = 0; i < g->fifo.max_runlists; i++) 2856 for (i = 0; i < g->fifo.max_runlists; i++) {
2855 nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock); 2857 nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock);
2858 }
2856 2859
2857 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2860 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2858 2861
@@ -2861,8 +2864,9 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
2861 if (!mutex_ret) 2864 if (!mutex_ret)
2862 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2865 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2863 2866
2864 for (i = 0; i < g->fifo.max_runlists; i++) 2867 for (i = 0; i < g->fifo.max_runlists; i++) {
2865 nvgpu_mutex_release(&f->runlist_info[i].runlist_lock); 2868 nvgpu_mutex_release(&f->runlist_info[i].runlist_lock);
2869 }
2866 2870
2867 if (ret) { 2871 if (ret) {
2868 if (nvgpu_platform_is_silicon(g)) { 2872 if (nvgpu_platform_is_silicon(g)) {
@@ -2891,8 +2895,9 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
2891 return 0; 2895 return 0;
2892 2896
2893 /* we have no idea which runlist we are using. lock all */ 2897 /* we have no idea which runlist we are using. lock all */
2894 for (i = 0; i < g->fifo.max_runlists; i++) 2898 for (i = 0; i < g->fifo.max_runlists; i++) {
2895 nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock); 2899 nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock);
2900 }
2896 2901
2897 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2902 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2898 2903
@@ -2901,8 +2906,9 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
2901 if (!mutex_ret) 2906 if (!mutex_ret)
2902 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2907 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2903 2908
2904 for (i = 0; i < g->fifo.max_runlists; i++) 2909 for (i = 0; i < g->fifo.max_runlists; i++) {
2905 nvgpu_mutex_release(&f->runlist_info[i].runlist_lock); 2910 nvgpu_mutex_release(&f->runlist_info[i].runlist_lock);
2911 }
2906 2912
2907 if (ret) { 2913 if (ret) {
2908 if (nvgpu_platform_is_silicon(g)) { 2914 if (nvgpu_platform_is_silicon(g)) {
diff --git a/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c b/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c
index c55b90b6..98fdb8c2 100644
--- a/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c
@@ -213,14 +213,16 @@ static int gk20a_flcn_copy_from_dmem(struct nvgpu_falcon *flcn,
213 gk20a_writel(g, base_addr + falcon_falcon_dmemc_r(port), 213 gk20a_writel(g, base_addr + falcon_falcon_dmemc_r(port),
214 src | falcon_falcon_dmemc_aincr_f(1)); 214 src | falcon_falcon_dmemc_aincr_f(1));
215 215
216 for (i = 0; i < words; i++) 216 for (i = 0; i < words; i++) {
217 dst_u32[i] = gk20a_readl(g, 217 dst_u32[i] = gk20a_readl(g,
218 base_addr + falcon_falcon_dmemd_r(port)); 218 base_addr + falcon_falcon_dmemd_r(port));
219 }
219 220
220 if (bytes > 0) { 221 if (bytes > 0) {
221 data = gk20a_readl(g, base_addr + falcon_falcon_dmemd_r(port)); 222 data = gk20a_readl(g, base_addr + falcon_falcon_dmemd_r(port));
222 for (i = 0; i < bytes; i++) 223 for (i = 0; i < bytes; i++) {
223 dst[(words << 2) + i] = ((u8 *)&data)[i]; 224 dst[(words << 2) + i] = ((u8 *)&data)[i];
225 }
224 } 226 }
225 227
226 nvgpu_mutex_release(&flcn->copy_lock); 228 nvgpu_mutex_release(&flcn->copy_lock);
@@ -256,14 +258,16 @@ static int gk20a_flcn_copy_to_dmem(struct nvgpu_falcon *flcn,
256 gk20a_writel(g, base_addr + falcon_falcon_dmemc_r(port), 258 gk20a_writel(g, base_addr + falcon_falcon_dmemc_r(port),
257 dst | falcon_falcon_dmemc_aincw_f(1)); 259 dst | falcon_falcon_dmemc_aincw_f(1));
258 260
259 for (i = 0; i < words; i++) 261 for (i = 0; i < words; i++) {
260 gk20a_writel(g, 262 gk20a_writel(g,
261 base_addr + falcon_falcon_dmemd_r(port), src_u32[i]); 263 base_addr + falcon_falcon_dmemd_r(port), src_u32[i]);
264 }
262 265
263 if (bytes > 0) { 266 if (bytes > 0) {
264 data = 0; 267 data = 0;
265 for (i = 0; i < bytes; i++) 268 for (i = 0; i < bytes; i++) {
266 ((u8 *)&data)[i] = src[(words << 2) + i]; 269 ((u8 *)&data)[i] = src[(words << 2) + i];
270 }
267 gk20a_writel(g, base_addr + falcon_falcon_dmemd_r(port), data); 271 gk20a_writel(g, base_addr + falcon_falcon_dmemd_r(port), data);
268 } 272 }
269 273
@@ -313,14 +317,16 @@ static int gk20a_flcn_copy_from_imem(struct nvgpu_falcon *flcn, u32 src,
313 falcon_falcon_imemc_blk_f(blk) | 317 falcon_falcon_imemc_blk_f(blk) |
314 falcon_falcon_dmemc_aincr_f(1)); 318 falcon_falcon_dmemc_aincr_f(1));
315 319
316 for (i = 0; i < words; i++) 320 for (i = 0; i < words; i++) {
317 dst_u32[i] = gk20a_readl(g, 321 dst_u32[i] = gk20a_readl(g,
318 base_addr + falcon_falcon_imemd_r(port)); 322 base_addr + falcon_falcon_imemd_r(port));
323 }
319 324
320 if (bytes > 0) { 325 if (bytes > 0) {
321 data = gk20a_readl(g, base_addr + falcon_falcon_imemd_r(port)); 326 data = gk20a_readl(g, base_addr + falcon_falcon_imemd_r(port));
322 for (i = 0; i < bytes; i++) 327 for (i = 0; i < bytes; i++) {
323 dst[(words << 2) + i] = ((u8 *)&data)[i]; 328 dst[(words << 2) + i] = ((u8 *)&data)[i];
329 }
324 } 330 }
325 331
326 nvgpu_mutex_release(&flcn->copy_lock); 332 nvgpu_mutex_release(&flcn->copy_lock);
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c
index c8b094cf..6a0b5bba 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.c
@@ -402,8 +402,9 @@ int gk20a_wait_for_idle(struct gk20a *g)
402 return -ENODEV; 402 return -ENODEV;
403 403
404 while ((nvgpu_atomic_read(&g->usage_count) != target_usage_count) 404 while ((nvgpu_atomic_read(&g->usage_count) != target_usage_count)
405 && (wait_length-- >= 0)) 405 && (wait_length-- >= 0)) {
406 nvgpu_msleep(20); 406 nvgpu_msleep(20);
407 }
407 408
408 if (wait_length < 0) { 409 if (wait_length < 0) {
409 nvgpu_warn(g, "Timed out waiting for idle (%d)!\n", 410 nvgpu_warn(g, "Timed out waiting for idle (%d)!\n",
diff --git a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c
index 80252aaa..ce65c777 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c
@@ -154,21 +154,25 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr)
154 goto fail; 154 goto fail;
155 } 155 }
156 156
157 for (i = 0; i < g->gr.ctx_vars.ucode.fecs.inst.count; i++) 157 for (i = 0; i < g->gr.ctx_vars.ucode.fecs.inst.count; i++) {
158 g->sim->esc_readl(g, "GRCTX_UCODE_INST_FECS", 158 g->sim->esc_readl(g, "GRCTX_UCODE_INST_FECS",
159 i, &g->gr.ctx_vars.ucode.fecs.inst.l[i]); 159 i, &g->gr.ctx_vars.ucode.fecs.inst.l[i]);
160 }
160 161
161 for (i = 0; i < g->gr.ctx_vars.ucode.fecs.data.count; i++) 162 for (i = 0; i < g->gr.ctx_vars.ucode.fecs.data.count; i++) {
162 g->sim->esc_readl(g, "GRCTX_UCODE_DATA_FECS", 163 g->sim->esc_readl(g, "GRCTX_UCODE_DATA_FECS",
163 i, &g->gr.ctx_vars.ucode.fecs.data.l[i]); 164 i, &g->gr.ctx_vars.ucode.fecs.data.l[i]);
165 }
164 166
165 for (i = 0; i < g->gr.ctx_vars.ucode.gpccs.inst.count; i++) 167 for (i = 0; i < g->gr.ctx_vars.ucode.gpccs.inst.count; i++) {
166 g->sim->esc_readl(g, "GRCTX_UCODE_INST_GPCCS", 168 g->sim->esc_readl(g, "GRCTX_UCODE_INST_GPCCS",
167 i, &g->gr.ctx_vars.ucode.gpccs.inst.l[i]); 169 i, &g->gr.ctx_vars.ucode.gpccs.inst.l[i]);
170 }
168 171
169 for (i = 0; i < g->gr.ctx_vars.ucode.gpccs.data.count; i++) 172 for (i = 0; i < g->gr.ctx_vars.ucode.gpccs.data.count; i++) {
170 g->sim->esc_readl(g, "GRCTX_UCODE_DATA_GPCCS", 173 g->sim->esc_readl(g, "GRCTX_UCODE_DATA_GPCCS",
171 i, &g->gr.ctx_vars.ucode.gpccs.data.l[i]); 174 i, &g->gr.ctx_vars.ucode.gpccs.data.l[i]);
175 }
172 176
173 for (i = 0; i < g->gr.ctx_vars.sw_bundle_init.count; i++) { 177 for (i = 0; i < g->gr.ctx_vars.sw_bundle_init.count; i++) {
174 struct av_gk20a *l = g->gr.ctx_vars.sw_bundle_init.l; 178 struct av_gk20a *l = g->gr.ctx_vars.sw_bundle_init.l;
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 4dfddf5d..bdcf750e 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -149,9 +149,10 @@ void gk20a_fecs_dump_falcon_stats(struct gk20a *g)
149 nvgpu_err(g, "gr_fecs_ctxsw_status_1_r : 0x%x", 149 nvgpu_err(g, "gr_fecs_ctxsw_status_1_r : 0x%x",
150 gk20a_readl(g, gr_fecs_ctxsw_status_1_r())); 150 gk20a_readl(g, gr_fecs_ctxsw_status_1_r()));
151 151
152 for (i = 0; i < g->ops.gr.fecs_ctxsw_mailbox_size(); i++) 152 for (i = 0; i < g->ops.gr.fecs_ctxsw_mailbox_size(); i++) {
153 nvgpu_err(g, "gr_fecs_ctxsw_mailbox_r(%d) : 0x%x", 153 nvgpu_err(g, "gr_fecs_ctxsw_mailbox_r(%d) : 0x%x",
154 i, gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(i))); 154 i, gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(i)));
155 }
155 156
156 nvgpu_err(g, "gr_fecs_engctl_r : 0x%x", 157 nvgpu_err(g, "gr_fecs_engctl_r : 0x%x",
157 gk20a_readl(g, gr_fecs_engctl_r())); 158 gk20a_readl(g, gr_fecs_engctl_r()));
@@ -1144,8 +1145,9 @@ static inline u32 count_bits(u32 mask)
1144{ 1145{
1145 u32 temp = mask; 1146 u32 temp = mask;
1146 u32 count; 1147 u32 count;
1147 for (count = 0; temp != 0; count++) 1148 for (count = 0; temp != 0; count++) {
1148 temp &= temp - 1; 1149 temp &= temp - 1;
1150 }
1149 1151
1150 return count; 1152 return count;
1151} 1153}
@@ -1485,9 +1487,10 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
1485 GR_IDLE_CHECK_DEFAULT); 1487 GR_IDLE_CHECK_DEFAULT);
1486 1488
1487 /* load ctx init */ 1489 /* load ctx init */
1488 for (i = 0; i < sw_ctx_load->count; i++) 1490 for (i = 0; i < sw_ctx_load->count; i++) {
1489 gk20a_writel(g, sw_ctx_load->l[i].addr, 1491 gk20a_writel(g, sw_ctx_load->l[i].addr,
1490 sw_ctx_load->l[i].value); 1492 sw_ctx_load->l[i].value);
1493 }
1491 1494
1492 if (g->ops.gr.init_preemption_state) 1495 if (g->ops.gr.init_preemption_state)
1493 g->ops.gr.init_preemption_state(g); 1496 g->ops.gr.init_preemption_state(g);
@@ -2029,8 +2032,9 @@ static int gr_gk20a_copy_ctxsw_ucode_segments(
2029 2032
2030 /* compute a "checksum" for the boot binary to detect its version */ 2033 /* compute a "checksum" for the boot binary to detect its version */
2031 segments->boot_signature = 0; 2034 segments->boot_signature = 0;
2032 for (i = 0; i < segments->boot.size / sizeof(u32); i++) 2035 for (i = 0; i < segments->boot.size / sizeof(u32); i++) {
2033 segments->boot_signature += bootimage[i]; 2036 segments->boot_signature += bootimage[i];
2037 }
2034 2038
2035 return 0; 2039 return 0;
2036} 2040}
@@ -3335,33 +3339,41 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
3335 nvgpu_log_info(g, "tpc_count: %d", gr->tpc_count); 3339 nvgpu_log_info(g, "tpc_count: %d", gr->tpc_count);
3336 nvgpu_log_info(g, "ppc_count: %d", gr->ppc_count); 3340 nvgpu_log_info(g, "ppc_count: %d", gr->ppc_count);
3337 3341
3338 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) 3342 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
3339 nvgpu_log_info(g, "gpc_tpc_count[%d] : %d", 3343 nvgpu_log_info(g, "gpc_tpc_count[%d] : %d",
3340 gpc_index, gr->gpc_tpc_count[gpc_index]); 3344 gpc_index, gr->gpc_tpc_count[gpc_index]);
3341 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) 3345 }
3346 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
3342 nvgpu_log_info(g, "gpc_zcb_count[%d] : %d", 3347 nvgpu_log_info(g, "gpc_zcb_count[%d] : %d",
3343 gpc_index, gr->gpc_zcb_count[gpc_index]); 3348 gpc_index, gr->gpc_zcb_count[gpc_index]);
3344 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) 3349 }
3350 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
3345 nvgpu_log_info(g, "gpc_ppc_count[%d] : %d", 3351 nvgpu_log_info(g, "gpc_ppc_count[%d] : %d",
3346 gpc_index, gr->gpc_ppc_count[gpc_index]); 3352 gpc_index, gr->gpc_ppc_count[gpc_index]);
3347 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) 3353 }
3354 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
3348 nvgpu_log_info(g, "gpc_skip_mask[%d] : %d", 3355 nvgpu_log_info(g, "gpc_skip_mask[%d] : %d",
3349 gpc_index, gr->gpc_skip_mask[gpc_index]); 3356 gpc_index, gr->gpc_skip_mask[gpc_index]);
3350 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) 3357 }
3358 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
3351 for (pes_index = 0; 3359 for (pes_index = 0;
3352 pes_index < gr->pe_count_per_gpc; 3360 pes_index < gr->pe_count_per_gpc;
3353 pes_index++) 3361 pes_index++) {
3354 nvgpu_log_info(g, "pes_tpc_count[%d][%d] : %d", 3362 nvgpu_log_info(g, "pes_tpc_count[%d][%d] : %d",
3355 pes_index, gpc_index, 3363 pes_index, gpc_index,
3356 gr->pes_tpc_count[pes_index][gpc_index]); 3364 gr->pes_tpc_count[pes_index][gpc_index]);
3365 }
3366 }
3357 3367
3358 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) 3368 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
3359 for (pes_index = 0; 3369 for (pes_index = 0;
3360 pes_index < gr->pe_count_per_gpc; 3370 pes_index < gr->pe_count_per_gpc;
3361 pes_index++) 3371 pes_index++) {
3362 nvgpu_log_info(g, "pes_tpc_mask[%d][%d] : %d", 3372 nvgpu_log_info(g, "pes_tpc_mask[%d][%d] : %d",
3363 pes_index, gpc_index, 3373 pes_index, gpc_index,
3364 gr->pes_tpc_mask[pes_index][gpc_index]); 3374 gr->pes_tpc_mask[pes_index][gpc_index]);
3375 }
3376 }
3365 3377
3366 g->ops.gr.bundle_cb_defaults(g); 3378 g->ops.gr.bundle_cb_defaults(g);
3367 g->ops.gr.cb_size_default(g); 3379 g->ops.gr.cb_size_default(g);
@@ -3537,9 +3549,11 @@ static int gr_gk20a_init_map_tiles(struct gk20a *g, struct gr_gk20a *gr)
3537 } 3549 }
3538 } 3550 }
3539 3551
3540 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) 3552 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
3541 if (gr->gpc_tpc_count[gpc_index] > max_tpc_count) 3553 if (gr->gpc_tpc_count[gpc_index] > max_tpc_count) {
3542 max_tpc_count = gr->gpc_tpc_count[gpc_index]; 3554 max_tpc_count = gr->gpc_tpc_count[gpc_index];
3555 }
3556 }
3543 3557
3544 mul_factor = gr->gpc_count * max_tpc_count; 3558 mul_factor = gr->gpc_count * max_tpc_count;
3545 if (mul_factor & 0x1) 3559 if (mul_factor & 0x1)
@@ -4534,9 +4548,10 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
4534 g->ops.gr.disable_rd_coalesce(g); 4548 g->ops.gr.disable_rd_coalesce(g);
4535 4549
4536 /* load ctx init */ 4550 /* load ctx init */
4537 for (i = 0; i < sw_ctx_load->count; i++) 4551 for (i = 0; i < sw_ctx_load->count; i++) {
4538 gk20a_writel(g, sw_ctx_load->l[i].addr, 4552 gk20a_writel(g, sw_ctx_load->l[i].addr,
4539 sw_ctx_load->l[i].value); 4553 sw_ctx_load->l[i].value);
4554 }
4540 4555
4541 err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), 4556 err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g),
4542 GR_IDLE_CHECK_DEFAULT); 4557 GR_IDLE_CHECK_DEFAULT);
@@ -4764,9 +4779,10 @@ static int gk20a_init_gr_reset_enable_hw(struct gk20a *g)
4764 gk20a_writel(g, gr_intr_en_r(), ~0); 4779 gk20a_writel(g, gr_intr_en_r(), ~0);
4765 4780
4766 /* load non_ctx init */ 4781 /* load non_ctx init */
4767 for (i = 0; i < sw_non_ctx_load->count; i++) 4782 for (i = 0; i < sw_non_ctx_load->count; i++) {
4768 gk20a_writel(g, sw_non_ctx_load->l[i].addr, 4783 gk20a_writel(g, sw_non_ctx_load->l[i].addr,
4769 sw_non_ctx_load->l[i].value); 4784 sw_non_ctx_load->l[i].value);
4785 }
4770 4786
4771 err = gr_gk20a_wait_mem_scrubbing(g); 4787 err = gr_gk20a_wait_mem_scrubbing(g);
4772 if (err) 4788 if (err)
@@ -6321,9 +6337,10 @@ void gr_gk20a_split_fbpa_broadcast_addr(struct gk20a *g, u32 addr,
6321{ 6337{
6322 u32 fbpa_id; 6338 u32 fbpa_id;
6323 6339
6324 for (fbpa_id = 0; fbpa_id < num_fbpas; fbpa_id++) 6340 for (fbpa_id = 0; fbpa_id < num_fbpas; fbpa_id++) {
6325 priv_addr_table[(*t)++] = pri_fbpa_addr(g, 6341 priv_addr_table[(*t)++] = pri_fbpa_addr(g,
6326 pri_fbpa_addr_mask(g, addr), fbpa_id); 6342 pri_fbpa_addr_mask(g, addr), fbpa_id);
6343 }
6327} 6344}
6328 6345
6329int gr_gk20a_split_ppc_broadcast_addr(struct gk20a *g, u32 addr, 6346int gr_gk20a_split_ppc_broadcast_addr(struct gk20a *g, u32 addr,
@@ -6334,9 +6351,10 @@ int gr_gk20a_split_ppc_broadcast_addr(struct gk20a *g, u32 addr,
6334 6351
6335 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 6352 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
6336 6353
6337 for (ppc_num = 0; ppc_num < g->gr.gpc_ppc_count[gpc_num]; ppc_num++) 6354 for (ppc_num = 0; ppc_num < g->gr.gpc_ppc_count[gpc_num]; ppc_num++) {
6338 priv_addr_table[(*t)++] = pri_ppc_addr(g, pri_ppccs_addr_mask(addr), 6355 priv_addr_table[(*t)++] = pri_ppc_addr(g, pri_ppccs_addr_mask(addr),
6339 gpc_num, ppc_num); 6356 gpc_num, ppc_num);
6357 }
6340 6358
6341 return 0; 6359 return 0;
6342} 6360}
@@ -6396,10 +6414,11 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g,
6396 if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC) 6414 if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC)
6397 for (tpc_num = 0; 6415 for (tpc_num = 0;
6398 tpc_num < g->gr.gpc_tpc_count[gpc_num]; 6416 tpc_num < g->gr.gpc_tpc_count[gpc_num];
6399 tpc_num++) 6417 tpc_num++) {
6400 priv_addr_table[t++] = 6418 priv_addr_table[t++] =
6401 pri_tpc_addr(g, pri_tpccs_addr_mask(addr), 6419 pri_tpc_addr(g, pri_tpccs_addr_mask(addr),
6402 gpc_num, tpc_num); 6420 gpc_num, tpc_num);
6421 }
6403 6422
6404 else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC) { 6423 else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC) {
6405 err = gr_gk20a_split_ppc_broadcast_addr(g, addr, gpc_num, 6424 err = gr_gk20a_split_ppc_broadcast_addr(g, addr, gpc_num,
@@ -6439,10 +6458,11 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g,
6439 if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC) 6458 if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC)
6440 for (tpc_num = 0; 6459 for (tpc_num = 0;
6441 tpc_num < g->gr.gpc_tpc_count[gpc_num]; 6460 tpc_num < g->gr.gpc_tpc_count[gpc_num];
6442 tpc_num++) 6461 tpc_num++) {
6443 priv_addr_table[t++] = 6462 priv_addr_table[t++] =
6444 pri_tpc_addr(g, pri_tpccs_addr_mask(addr), 6463 pri_tpc_addr(g, pri_tpccs_addr_mask(addr),
6445 gpc_num, tpc_num); 6464 gpc_num, tpc_num);
6465 }
6446 else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC) 6466 else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC)
6447 err = gr_gk20a_split_ppc_broadcast_addr(g, 6467 err = gr_gk20a_split_ppc_broadcast_addr(g,
6448 addr, gpc_num, priv_addr_table, &t); 6468 addr, gpc_num, priv_addr_table, &t);
@@ -7793,8 +7813,9 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g)
7793 7813
7794 nvgpu_log_info(g, "Reg Addr => HWPM Ctxt switch buffer offset"); 7814 nvgpu_log_info(g, "Reg Addr => HWPM Ctxt switch buffer offset");
7795 7815
7796 for (i = 0; i < count; i++) 7816 for (i = 0; i < count; i++) {
7797 nvgpu_log_info(g, "%08x => %08x", map[i].addr, map[i].offset); 7817 nvgpu_log_info(g, "%08x => %08x", map[i].addr, map[i].offset);
7818 }
7798 7819
7799 return 0; 7820 return 0;
7800cleanup: 7821cleanup:
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index bf4673bf..8dbeed40 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -81,9 +81,10 @@ static void print_pmu_trace(struct nvgpu_pmu *pmu)
81 81
82 nvgpu_err(g, "dump PMU trace buffer"); 82 nvgpu_err(g, "dump PMU trace buffer");
83 for (i = 0; i < GK20A_PMU_TRACE_BUFSIZE; i += 0x40) { 83 for (i = 0; i < GK20A_PMU_TRACE_BUFSIZE; i += 0x40) {
84 for (j = 0; j < 0x40; j++) 84 for (j = 0; j < 0x40; j++) {
85 if (trace1[(i / 4) + j]) 85 if (trace1[(i / 4) + j])
86 break; 86 break;
87 }
87 if (j == 0x40) 88 if (j == 0x40)
88 break; 89 break;
89 count = scnprintf(buf, 0x40, "Index %x: ", trace1[(i / 4)]); 90 count = scnprintf(buf, 0x40, "Index %x: ", trace1[(i / 4)]);
@@ -634,13 +635,15 @@ void gk20a_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu)
634 struct gk20a *g = gk20a_from_pmu(pmu); 635 struct gk20a *g = gk20a_from_pmu(pmu);
635 unsigned int i; 636 unsigned int i;
636 637
637 for (i = 0; i < pwr_pmu_mailbox__size_1_v(); i++) 638 for (i = 0; i < pwr_pmu_mailbox__size_1_v(); i++) {
638 nvgpu_err(g, "pwr_pmu_mailbox_r(%d) : 0x%x", 639 nvgpu_err(g, "pwr_pmu_mailbox_r(%d) : 0x%x",
639 i, gk20a_readl(g, pwr_pmu_mailbox_r(i))); 640 i, gk20a_readl(g, pwr_pmu_mailbox_r(i)));
641 }
640 642
641 for (i = 0; i < pwr_pmu_debug__size_1_v(); i++) 643 for (i = 0; i < pwr_pmu_debug__size_1_v(); i++) {
642 nvgpu_err(g, "pwr_pmu_debug_r(%d) : 0x%x", 644 nvgpu_err(g, "pwr_pmu_debug_r(%d) : 0x%x",
643 i, gk20a_readl(g, pwr_pmu_debug_r(i))); 645 i, gk20a_readl(g, pwr_pmu_debug_r(i)));
646 }
644 647
645 i = gk20a_readl(g, pwr_pmu_bar0_error_status_r()); 648 i = gk20a_readl(g, pwr_pmu_bar0_error_status_r());
646 nvgpu_err(g, "pwr_pmu_bar0_error_status_r : 0x%x", i); 649 nvgpu_err(g, "pwr_pmu_bar0_error_status_r : 0x%x", i);
diff --git a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
index 232d01a7..cd3fe2f7 100644
--- a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
@@ -47,9 +47,10 @@ static int regop_bsearch_range_cmp(const void *pkey, const void *pelem)
47static inline bool linear_search(u32 offset, const u32 *list, int size) 47static inline bool linear_search(u32 offset, const u32 *list, int size)
48{ 48{
49 int i; 49 int i;
50 for (i = 0; i < size; i++) 50 for (i = 0; i < size; i++) {
51 if (list[i] == offset) 51 if (list[i] == offset)
52 return true; 52 return true;
53 }
53 return false; 54 return false;
54} 55}
55 56