summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-02 04:45:54 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-06 20:36:39 -0400
commit17aeea4a2ffa23fc9dbcdc84cda747fe5a025131 (patch)
treed4be52f246724fb9cb99047059073b93aeb089ce /drivers/gpu/nvgpu/gk20a/gr_gk20a.c
parent6c9daf7626567fffc9d1ccd475865e81ae90a973 (diff)
gpu: nvgpu: gk20a: Fix MISRA 15.6 violations
This fixes errors due to single statement loop bodies without braces, which is part of Rule 15.6 of MISRA. This patch covers in gpu/nvgpu/gk20a/ JIRA NVGPU-989 Change-Id: I2f422e9bc2b03229f4d2c3198613169ce5e7f3ee Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1791019 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gr_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c63
1 files changed, 42 insertions, 21 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 4dfddf5d..bdcf750e 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -149,9 +149,10 @@ void gk20a_fecs_dump_falcon_stats(struct gk20a *g)
149 nvgpu_err(g, "gr_fecs_ctxsw_status_1_r : 0x%x", 149 nvgpu_err(g, "gr_fecs_ctxsw_status_1_r : 0x%x",
150 gk20a_readl(g, gr_fecs_ctxsw_status_1_r())); 150 gk20a_readl(g, gr_fecs_ctxsw_status_1_r()));
151 151
152 for (i = 0; i < g->ops.gr.fecs_ctxsw_mailbox_size(); i++) 152 for (i = 0; i < g->ops.gr.fecs_ctxsw_mailbox_size(); i++) {
153 nvgpu_err(g, "gr_fecs_ctxsw_mailbox_r(%d) : 0x%x", 153 nvgpu_err(g, "gr_fecs_ctxsw_mailbox_r(%d) : 0x%x",
154 i, gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(i))); 154 i, gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(i)));
155 }
155 156
156 nvgpu_err(g, "gr_fecs_engctl_r : 0x%x", 157 nvgpu_err(g, "gr_fecs_engctl_r : 0x%x",
157 gk20a_readl(g, gr_fecs_engctl_r())); 158 gk20a_readl(g, gr_fecs_engctl_r()));
@@ -1144,8 +1145,9 @@ static inline u32 count_bits(u32 mask)
1144{ 1145{
1145 u32 temp = mask; 1146 u32 temp = mask;
1146 u32 count; 1147 u32 count;
1147 for (count = 0; temp != 0; count++) 1148 for (count = 0; temp != 0; count++) {
1148 temp &= temp - 1; 1149 temp &= temp - 1;
1150 }
1149 1151
1150 return count; 1152 return count;
1151} 1153}
@@ -1485,9 +1487,10 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
1485 GR_IDLE_CHECK_DEFAULT); 1487 GR_IDLE_CHECK_DEFAULT);
1486 1488
1487 /* load ctx init */ 1489 /* load ctx init */
1488 for (i = 0; i < sw_ctx_load->count; i++) 1490 for (i = 0; i < sw_ctx_load->count; i++) {
1489 gk20a_writel(g, sw_ctx_load->l[i].addr, 1491 gk20a_writel(g, sw_ctx_load->l[i].addr,
1490 sw_ctx_load->l[i].value); 1492 sw_ctx_load->l[i].value);
1493 }
1491 1494
1492 if (g->ops.gr.init_preemption_state) 1495 if (g->ops.gr.init_preemption_state)
1493 g->ops.gr.init_preemption_state(g); 1496 g->ops.gr.init_preemption_state(g);
@@ -2029,8 +2032,9 @@ static int gr_gk20a_copy_ctxsw_ucode_segments(
2029 2032
2030 /* compute a "checksum" for the boot binary to detect its version */ 2033 /* compute a "checksum" for the boot binary to detect its version */
2031 segments->boot_signature = 0; 2034 segments->boot_signature = 0;
2032 for (i = 0; i < segments->boot.size / sizeof(u32); i++) 2035 for (i = 0; i < segments->boot.size / sizeof(u32); i++) {
2033 segments->boot_signature += bootimage[i]; 2036 segments->boot_signature += bootimage[i];
2037 }
2034 2038
2035 return 0; 2039 return 0;
2036} 2040}
@@ -3335,33 +3339,41 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
3335 nvgpu_log_info(g, "tpc_count: %d", gr->tpc_count); 3339 nvgpu_log_info(g, "tpc_count: %d", gr->tpc_count);
3336 nvgpu_log_info(g, "ppc_count: %d", gr->ppc_count); 3340 nvgpu_log_info(g, "ppc_count: %d", gr->ppc_count);
3337 3341
3338 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) 3342 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
3339 nvgpu_log_info(g, "gpc_tpc_count[%d] : %d", 3343 nvgpu_log_info(g, "gpc_tpc_count[%d] : %d",
3340 gpc_index, gr->gpc_tpc_count[gpc_index]); 3344 gpc_index, gr->gpc_tpc_count[gpc_index]);
3341 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) 3345 }
3346 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
3342 nvgpu_log_info(g, "gpc_zcb_count[%d] : %d", 3347 nvgpu_log_info(g, "gpc_zcb_count[%d] : %d",
3343 gpc_index, gr->gpc_zcb_count[gpc_index]); 3348 gpc_index, gr->gpc_zcb_count[gpc_index]);
3344 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) 3349 }
3350 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
3345 nvgpu_log_info(g, "gpc_ppc_count[%d] : %d", 3351 nvgpu_log_info(g, "gpc_ppc_count[%d] : %d",
3346 gpc_index, gr->gpc_ppc_count[gpc_index]); 3352 gpc_index, gr->gpc_ppc_count[gpc_index]);
3347 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) 3353 }
3354 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
3348 nvgpu_log_info(g, "gpc_skip_mask[%d] : %d", 3355 nvgpu_log_info(g, "gpc_skip_mask[%d] : %d",
3349 gpc_index, gr->gpc_skip_mask[gpc_index]); 3356 gpc_index, gr->gpc_skip_mask[gpc_index]);
3350 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) 3357 }
3358 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
3351 for (pes_index = 0; 3359 for (pes_index = 0;
3352 pes_index < gr->pe_count_per_gpc; 3360 pes_index < gr->pe_count_per_gpc;
3353 pes_index++) 3361 pes_index++) {
3354 nvgpu_log_info(g, "pes_tpc_count[%d][%d] : %d", 3362 nvgpu_log_info(g, "pes_tpc_count[%d][%d] : %d",
3355 pes_index, gpc_index, 3363 pes_index, gpc_index,
3356 gr->pes_tpc_count[pes_index][gpc_index]); 3364 gr->pes_tpc_count[pes_index][gpc_index]);
3365 }
3366 }
3357 3367
3358 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) 3368 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
3359 for (pes_index = 0; 3369 for (pes_index = 0;
3360 pes_index < gr->pe_count_per_gpc; 3370 pes_index < gr->pe_count_per_gpc;
3361 pes_index++) 3371 pes_index++) {
3362 nvgpu_log_info(g, "pes_tpc_mask[%d][%d] : %d", 3372 nvgpu_log_info(g, "pes_tpc_mask[%d][%d] : %d",
3363 pes_index, gpc_index, 3373 pes_index, gpc_index,
3364 gr->pes_tpc_mask[pes_index][gpc_index]); 3374 gr->pes_tpc_mask[pes_index][gpc_index]);
3375 }
3376 }
3365 3377
3366 g->ops.gr.bundle_cb_defaults(g); 3378 g->ops.gr.bundle_cb_defaults(g);
3367 g->ops.gr.cb_size_default(g); 3379 g->ops.gr.cb_size_default(g);
@@ -3537,9 +3549,11 @@ static int gr_gk20a_init_map_tiles(struct gk20a *g, struct gr_gk20a *gr)
3537 } 3549 }
3538 } 3550 }
3539 3551
3540 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) 3552 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
3541 if (gr->gpc_tpc_count[gpc_index] > max_tpc_count) 3553 if (gr->gpc_tpc_count[gpc_index] > max_tpc_count) {
3542 max_tpc_count = gr->gpc_tpc_count[gpc_index]; 3554 max_tpc_count = gr->gpc_tpc_count[gpc_index];
3555 }
3556 }
3543 3557
3544 mul_factor = gr->gpc_count * max_tpc_count; 3558 mul_factor = gr->gpc_count * max_tpc_count;
3545 if (mul_factor & 0x1) 3559 if (mul_factor & 0x1)
@@ -4534,9 +4548,10 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
4534 g->ops.gr.disable_rd_coalesce(g); 4548 g->ops.gr.disable_rd_coalesce(g);
4535 4549
4536 /* load ctx init */ 4550 /* load ctx init */
4537 for (i = 0; i < sw_ctx_load->count; i++) 4551 for (i = 0; i < sw_ctx_load->count; i++) {
4538 gk20a_writel(g, sw_ctx_load->l[i].addr, 4552 gk20a_writel(g, sw_ctx_load->l[i].addr,
4539 sw_ctx_load->l[i].value); 4553 sw_ctx_load->l[i].value);
4554 }
4540 4555
4541 err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), 4556 err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g),
4542 GR_IDLE_CHECK_DEFAULT); 4557 GR_IDLE_CHECK_DEFAULT);
@@ -4764,9 +4779,10 @@ static int gk20a_init_gr_reset_enable_hw(struct gk20a *g)
4764 gk20a_writel(g, gr_intr_en_r(), ~0); 4779 gk20a_writel(g, gr_intr_en_r(), ~0);
4765 4780
4766 /* load non_ctx init */ 4781 /* load non_ctx init */
4767 for (i = 0; i < sw_non_ctx_load->count; i++) 4782 for (i = 0; i < sw_non_ctx_load->count; i++) {
4768 gk20a_writel(g, sw_non_ctx_load->l[i].addr, 4783 gk20a_writel(g, sw_non_ctx_load->l[i].addr,
4769 sw_non_ctx_load->l[i].value); 4784 sw_non_ctx_load->l[i].value);
4785 }
4770 4786
4771 err = gr_gk20a_wait_mem_scrubbing(g); 4787 err = gr_gk20a_wait_mem_scrubbing(g);
4772 if (err) 4788 if (err)
@@ -6321,9 +6337,10 @@ void gr_gk20a_split_fbpa_broadcast_addr(struct gk20a *g, u32 addr,
6321{ 6337{
6322 u32 fbpa_id; 6338 u32 fbpa_id;
6323 6339
6324 for (fbpa_id = 0; fbpa_id < num_fbpas; fbpa_id++) 6340 for (fbpa_id = 0; fbpa_id < num_fbpas; fbpa_id++) {
6325 priv_addr_table[(*t)++] = pri_fbpa_addr(g, 6341 priv_addr_table[(*t)++] = pri_fbpa_addr(g,
6326 pri_fbpa_addr_mask(g, addr), fbpa_id); 6342 pri_fbpa_addr_mask(g, addr), fbpa_id);
6343 }
6327} 6344}
6328 6345
6329int gr_gk20a_split_ppc_broadcast_addr(struct gk20a *g, u32 addr, 6346int gr_gk20a_split_ppc_broadcast_addr(struct gk20a *g, u32 addr,
@@ -6334,9 +6351,10 @@ int gr_gk20a_split_ppc_broadcast_addr(struct gk20a *g, u32 addr,
6334 6351
6335 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr); 6352 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
6336 6353
6337 for (ppc_num = 0; ppc_num < g->gr.gpc_ppc_count[gpc_num]; ppc_num++) 6354 for (ppc_num = 0; ppc_num < g->gr.gpc_ppc_count[gpc_num]; ppc_num++) {
6338 priv_addr_table[(*t)++] = pri_ppc_addr(g, pri_ppccs_addr_mask(addr), 6355 priv_addr_table[(*t)++] = pri_ppc_addr(g, pri_ppccs_addr_mask(addr),
6339 gpc_num, ppc_num); 6356 gpc_num, ppc_num);
6357 }
6340 6358
6341 return 0; 6359 return 0;
6342} 6360}
@@ -6396,10 +6414,11 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g,
6396 if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC) 6414 if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC)
6397 for (tpc_num = 0; 6415 for (tpc_num = 0;
6398 tpc_num < g->gr.gpc_tpc_count[gpc_num]; 6416 tpc_num < g->gr.gpc_tpc_count[gpc_num];
6399 tpc_num++) 6417 tpc_num++) {
6400 priv_addr_table[t++] = 6418 priv_addr_table[t++] =
6401 pri_tpc_addr(g, pri_tpccs_addr_mask(addr), 6419 pri_tpc_addr(g, pri_tpccs_addr_mask(addr),
6402 gpc_num, tpc_num); 6420 gpc_num, tpc_num);
6421 }
6403 6422
6404 else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC) { 6423 else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC) {
6405 err = gr_gk20a_split_ppc_broadcast_addr(g, addr, gpc_num, 6424 err = gr_gk20a_split_ppc_broadcast_addr(g, addr, gpc_num,
@@ -6439,10 +6458,11 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g,
6439 if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC) 6458 if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC)
6440 for (tpc_num = 0; 6459 for (tpc_num = 0;
6441 tpc_num < g->gr.gpc_tpc_count[gpc_num]; 6460 tpc_num < g->gr.gpc_tpc_count[gpc_num];
6442 tpc_num++) 6461 tpc_num++) {
6443 priv_addr_table[t++] = 6462 priv_addr_table[t++] =
6444 pri_tpc_addr(g, pri_tpccs_addr_mask(addr), 6463 pri_tpc_addr(g, pri_tpccs_addr_mask(addr),
6445 gpc_num, tpc_num); 6464 gpc_num, tpc_num);
6465 }
6446 else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC) 6466 else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC)
6447 err = gr_gk20a_split_ppc_broadcast_addr(g, 6467 err = gr_gk20a_split_ppc_broadcast_addr(g,
6448 addr, gpc_num, priv_addr_table, &t); 6468 addr, gpc_num, priv_addr_table, &t);
@@ -7793,8 +7813,9 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g)
7793 7813
7794 nvgpu_log_info(g, "Reg Addr => HWPM Ctxt switch buffer offset"); 7814 nvgpu_log_info(g, "Reg Addr => HWPM Ctxt switch buffer offset");
7795 7815
7796 for (i = 0; i < count; i++) 7816 for (i = 0; i < count; i++) {
7797 nvgpu_log_info(g, "%08x => %08x", map[i].addr, map[i].offset); 7817 nvgpu_log_info(g, "%08x => %08x", map[i].addr, map[i].offset);
7818 }
7798 7819
7799 return 0; 7820 return 0;
7800cleanup: 7821cleanup: