summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-07-28 13:39:45 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-03 01:45:12 -0400
commit9b9a54920511d560d114881a923437fb88deffad (patch)
tree0bca1bfc725eeeef345bc79e946cd8a72d4dda44
parentd029ad5d8d39e7f153b43ca9d60c0ed1f23c8037 (diff)
gpu: nvgpu: gm20b: Fix MISRA 15.6 violations
This fixes errors due to single statement loop bodies without braces, which is part of Rule 15.6 of MISRA. This patch covers gpu/nvgpu/gm20b/ JIRA NVGPU-989 Change-Id: Ia177bd990409500fc8e8a2a54ba013df84cb9822 Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1788050 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> Reviewed-by: Adeel Raza <araza@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/gm20b/fifo_gm20b.c3
-rw-r--r--drivers/gpu/nvgpu/gm20b/gr_gm20b.c12
2 files changed, 10 insertions, 5 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
index 96262934..73db1ae9 100644
--- a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
@@ -123,8 +123,9 @@ void gm20b_fifo_trigger_mmu_fault(struct gk20a *g,
123 nvgpu_err(g, "mmu fault timeout"); 123 nvgpu_err(g, "mmu fault timeout");
124 124
125 /* release mmu fault trigger */ 125 /* release mmu fault trigger */
126 for_each_set_bit(engine_id, &engine_ids, 32) 126 for_each_set_bit(engine_id, &engine_ids, 32) {
127 gk20a_writel(g, fifo_trigger_mmu_fault_r(engine_id), 0); 127 gk20a_writel(g, fifo_trigger_mmu_fault_r(engine_id), 0);
128 }
128} 129}
129 130
130u32 gm20b_fifo_get_num_fifos(struct gk20a *g) 131u32 gm20b_fifo_get_num_fifos(struct gk20a *g)
diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
index 6ee6afdb..04ec2898 100644
--- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
@@ -577,11 +577,12 @@ void gr_gm20b_load_tpc_mask(struct gk20a *g)
577 u32 gpc, pes; 577 u32 gpc, pes;
578 u32 num_tpc_per_gpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_TPC_PER_GPC); 578 u32 num_tpc_per_gpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_TPC_PER_GPC);
579 579
580 for (gpc = 0; gpc < g->gr.gpc_count; gpc++) 580 for (gpc = 0; gpc < g->gr.gpc_count; gpc++) {
581 for (pes = 0; pes < g->gr.pe_count_per_gpc; pes++) { 581 for (pes = 0; pes < g->gr.pe_count_per_gpc; pes++) {
582 pes_tpc_mask |= g->gr.pes_tpc_mask[pes][gpc] << 582 pes_tpc_mask |= g->gr.pes_tpc_mask[pes][gpc] <<
583 num_tpc_per_gpc * gpc; 583 num_tpc_per_gpc * gpc;
584 } 584 }
585 }
585 586
586 fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, 0); 587 fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, 0);
587 if (g->tpc_fs_mask_user && g->tpc_fs_mask_user != fuse_tpc_mask && 588 if (g->tpc_fs_mask_user && g->tpc_fs_mask_user != fuse_tpc_mask &&
@@ -647,8 +648,9 @@ int gr_gm20b_load_smid_config(struct gk20a *g)
647 gk20a_writel(g, gr_cwd_gpc_tpc_id_r(i), reg); 648 gk20a_writel(g, gr_cwd_gpc_tpc_id_r(i), reg);
648 } 649 }
649 650
650 for (i = 0; i < gr_cwd_sm_id__size_1_v(); i++) 651 for (i = 0; i < gr_cwd_sm_id__size_1_v(); i++) {
651 gk20a_writel(g, gr_cwd_sm_id_r(i), tpc_sm_id[i]); 652 gk20a_writel(g, gr_cwd_sm_id_r(i), tpc_sm_id[i]);
653 }
652 654
653 nvgpu_kfree(g, tpc_sm_id); 655 nvgpu_kfree(g, tpc_sm_id);
654 656
@@ -1467,11 +1469,12 @@ static void gr_gm20b_update_ltc_lts_addr(struct gk20a *g, u32 addr, u32 ltc_num,
1467 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); 1469 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
1468 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); 1470 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
1469 1471
1470 for (lts_num = 0; lts_num < num_ltc_slices; lts_num++) 1472 for (lts_num = 0; lts_num < num_ltc_slices; lts_num++) {
1471 priv_addr_table[index++] = ltc_ltc0_lts0_v() + 1473 priv_addr_table[index++] = ltc_ltc0_lts0_v() +
1472 ltc_num * ltc_stride + 1474 ltc_num * ltc_stride +
1473 lts_num * lts_stride + 1475 lts_num * lts_stride +
1474 (addr & (lts_stride - 1)); 1476 (addr & (lts_stride - 1));
1477 }
1475 1478
1476 *priv_addr_table_index = index; 1479 *priv_addr_table_index = index;
1477} 1480}
@@ -1503,9 +1506,10 @@ void gr_gm20b_split_ltc_broadcast_addr(struct gk20a *g, u32 addr,
1503 u32 num_ltc = g->ltc_count; 1506 u32 num_ltc = g->ltc_count;
1504 u32 ltc_num; 1507 u32 ltc_num;
1505 1508
1506 for (ltc_num = 0; ltc_num < num_ltc; ltc_num++) 1509 for (ltc_num = 0; ltc_num < num_ltc; ltc_num++) {
1507 gr_gm20b_update_ltc_lts_addr(g, addr, ltc_num, 1510 gr_gm20b_update_ltc_lts_addr(g, addr, ltc_num,
1508 priv_addr_table, priv_addr_table_index); 1511 priv_addr_table, priv_addr_table_index);
1512 }
1509} 1513}
1510 1514
1511void gm20b_gr_clear_sm_hww(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, 1515void gm20b_gr_clear_sm_hww(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,