summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-23 02:37:41 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-28 09:47:28 -0400
commit4032e8915a65aa94f8b556676c5606683ec28f52 (patch)
treedc16ddcc61f9fed52c1c687bb02e6ec13edd28c6 /drivers/gpu/nvgpu/gm20b/gr_gm20b.c
parent8676b2e65b786497c4a0609f06143e7d1bb1a3c0 (diff)
gpu: nvgpu: gm20b: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: I1651ae8ee680bdeb48606569c4e8c2fc7cb87f20 Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1805077 Reviewed-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/gr_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/gr_gm20b.c70
1 files changed, 46 insertions, 24 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
index fc4ab3dd..7ed36144 100644
--- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
@@ -89,9 +89,10 @@ void gr_gm20b_cb_size_default(struct gk20a *g)
89{ 89{
90 struct gr_gk20a *gr = &g->gr; 90 struct gr_gk20a *gr = &g->gr;
91 91
92 if (!gr->attrib_cb_default_size) 92 if (!gr->attrib_cb_default_size) {
93 gr->attrib_cb_default_size = 93 gr->attrib_cb_default_size =
94 gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v(); 94 gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v();
95 }
95 gr->alpha_cb_default_size = 96 gr->alpha_cb_default_size =
96 gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v(); 97 gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v();
97} 98}
@@ -189,8 +190,9 @@ int gr_gm20b_commit_global_cb_manager(struct gk20a *g,
189 nvgpu_log_fn(g, " "); 190 nvgpu_log_fn(g, " ");
190 191
191 tsg = tsg_gk20a_from_ch(c); 192 tsg = tsg_gk20a_from_ch(c);
192 if (!tsg) 193 if (!tsg) {
193 return -EINVAL; 194 return -EINVAL;
195 }
194 196
195 ch_ctx = &tsg->gr_ctx; 197 ch_ctx = &tsg->gr_ctx;
196 198
@@ -338,8 +340,9 @@ void gr_gm20b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data)
338 /* if (NO_ALPHA_BETA_TIMESLICE_SUPPORT_DEF) 340 /* if (NO_ALPHA_BETA_TIMESLICE_SUPPORT_DEF)
339 return; */ 341 return; */
340 342
341 if (alpha_cb_size > gr->alpha_cb_size) 343 if (alpha_cb_size > gr->alpha_cb_size) {
342 alpha_cb_size = gr->alpha_cb_size; 344 alpha_cb_size = gr->alpha_cb_size;
345 }
343 346
344 gk20a_writel(g, gr_ds_tga_constraintlogic_r(), 347 gk20a_writel(g, gr_ds_tga_constraintlogic_r(),
345 (gk20a_readl(g, gr_ds_tga_constraintlogic_r()) & 348 (gk20a_readl(g, gr_ds_tga_constraintlogic_r()) &
@@ -385,8 +388,9 @@ void gr_gm20b_set_circular_buffer_size(struct gk20a *g, u32 data)
385 388
386 nvgpu_log_fn(g, " "); 389 nvgpu_log_fn(g, " ");
387 390
388 if (cb_size > gr->attrib_cb_size) 391 if (cb_size > gr->attrib_cb_size) {
389 cb_size = gr->attrib_cb_size; 392 cb_size = gr->attrib_cb_size;
393 }
390 394
391 gk20a_writel(g, gr_ds_tga_constraintlogic_r(), 395 gk20a_writel(g, gr_ds_tga_constraintlogic_r(),
392 (gk20a_readl(g, gr_ds_tga_constraintlogic_r()) & 396 (gk20a_readl(g, gr_ds_tga_constraintlogic_r()) &
@@ -485,18 +489,20 @@ bool gr_gm20b_is_valid_class(struct gk20a *g, u32 class_num)
485 489
486bool gr_gm20b_is_valid_gfx_class(struct gk20a *g, u32 class_num) 490bool gr_gm20b_is_valid_gfx_class(struct gk20a *g, u32 class_num)
487{ 491{
488 if (class_num == MAXWELL_B) 492 if (class_num == MAXWELL_B) {
489 return true; 493 return true;
490 else 494 } else {
491 return false; 495 return false;
496 }
492} 497}
493 498
494bool gr_gm20b_is_valid_compute_class(struct gk20a *g, u32 class_num) 499bool gr_gm20b_is_valid_compute_class(struct gk20a *g, u32 class_num)
495{ 500{
496 if (class_num == MAXWELL_COMPUTE_B) 501 if (class_num == MAXWELL_COMPUTE_B) {
497 return true; 502 return true;
498 else 503 } else {
499 return false; 504 return false;
505 }
500} 506}
501 507
502 508
@@ -511,8 +517,9 @@ static u32 _sm_dsm_perf_ctrl_regs[2];
511 517
512void gr_gm20b_init_sm_dsm_reg_info(void) 518void gr_gm20b_init_sm_dsm_reg_info(void)
513{ 519{
514 if (_sm_dsm_perf_ctrl_regs[0] != 0) 520 if (_sm_dsm_perf_ctrl_regs[0] != 0) {
515 return; 521 return;
522 }
516 523
517 _sm_dsm_perf_ctrl_regs[0] = 524 _sm_dsm_perf_ctrl_regs[0] =
518 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control0_r(); 525 gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control0_r();
@@ -619,8 +626,9 @@ int gr_gm20b_load_smid_config(struct gk20a *g)
619 u32 tpc_index, gpc_index; 626 u32 tpc_index, gpc_index;
620 627
621 tpc_sm_id = nvgpu_kcalloc(g, gr_cwd_sm_id__size_1_v(), sizeof(u32)); 628 tpc_sm_id = nvgpu_kcalloc(g, gr_cwd_sm_id__size_1_v(), sizeof(u32));
622 if (!tpc_sm_id) 629 if (!tpc_sm_id) {
623 return -ENOMEM; 630 return -ENOMEM;
631 }
624 632
625 /* Each NV_PGRAPH_PRI_CWD_GPC_TPC_ID can store 4 TPCs.*/ 633 /* Each NV_PGRAPH_PRI_CWD_GPC_TPC_ID can store 4 TPCs.*/
626 for (i = 0; i <= ((g->gr.tpc_count-1) / 4); i++) { 634 for (i = 0; i <= ((g->gr.tpc_count-1) / 4); i++) {
@@ -632,8 +640,9 @@ int gr_gm20b_load_smid_config(struct gk20a *g)
632 u32 sm_id = (i * 4) + j; 640 u32 sm_id = (i * 4) + j;
633 u32 bits; 641 u32 bits;
634 642
635 if (sm_id >= g->gr.tpc_count) 643 if (sm_id >= g->gr.tpc_count) {
636 break; 644 break;
645 }
637 646
638 gpc_index = g->gr.sm_to_cluster[sm_id].gpc_index; 647 gpc_index = g->gr.sm_to_cluster[sm_id].gpc_index;
639 tpc_index = g->gr.sm_to_cluster[sm_id].tpc_index; 648 tpc_index = g->gr.sm_to_cluster[sm_id].tpc_index;
@@ -663,8 +672,9 @@ int gr_gm20b_init_fs_state(struct gk20a *g)
663 nvgpu_log_fn(g, " "); 672 nvgpu_log_fn(g, " ");
664 673
665 err = gr_gk20a_init_fs_state(g); 674 err = gr_gk20a_init_fs_state(g);
666 if (err) 675 if (err) {
667 return err; 676 return err;
677 }
668 678
669 g->ops.gr.load_tpc_mask(g); 679 g->ops.gr.load_tpc_mask(g);
670 680
@@ -731,8 +741,9 @@ u32 gr_gm20b_get_tpc_num(struct gk20a *g, u32 addr)
731 for (i = 0; i < num_tpcs; i++) { 741 for (i = 0; i < num_tpcs; i++) {
732 start = tpc_in_gpc_base + (i * tpc_in_gpc_stride); 742 start = tpc_in_gpc_base + (i * tpc_in_gpc_stride);
733 if ((addr >= start) && 743 if ((addr >= start) &&
734 (addr < (start + tpc_in_gpc_stride))) 744 (addr < (start + tpc_in_gpc_stride))) {
735 return i; 745 return i;
746 }
736 } 747 }
737 return 0; 748 return 0;
738} 749}
@@ -793,10 +804,12 @@ int gr_gm20b_load_ctxsw_ucode(struct gk20a *g)
793 } else { 804 } else {
794 /* bind WPR VA inst block */ 805 /* bind WPR VA inst block */
795 gr_gk20a_load_falcon_bind_instblk(g); 806 gr_gk20a_load_falcon_bind_instblk(g);
796 if (g->ops.pmu.is_lazy_bootstrap(LSF_FALCON_ID_FECS)) 807 if (g->ops.pmu.is_lazy_bootstrap(LSF_FALCON_ID_FECS)) {
797 falcon_id_mask |= (1 << LSF_FALCON_ID_FECS); 808 falcon_id_mask |= (1 << LSF_FALCON_ID_FECS);
798 if (g->ops.pmu.is_lazy_bootstrap(LSF_FALCON_ID_GPCCS)) 809 }
810 if (g->ops.pmu.is_lazy_bootstrap(LSF_FALCON_ID_GPCCS)) {
799 falcon_id_mask |= (1 << LSF_FALCON_ID_GPCCS); 811 falcon_id_mask |= (1 << LSF_FALCON_ID_GPCCS);
812 }
800 813
801 err = g->ops.pmu.load_lsfalcon_ucode(g, falcon_id_mask); 814 err = g->ops.pmu.load_lsfalcon_ucode(g, falcon_id_mask);
802 815
@@ -856,11 +869,13 @@ int gr_gm20b_alloc_gr_ctx(struct gk20a *g,
856 nvgpu_log_fn(g, " "); 869 nvgpu_log_fn(g, " ");
857 870
858 err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags); 871 err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags);
859 if (err) 872 if (err) {
860 return err; 873 return err;
874 }
861 875
862 if (class == MAXWELL_COMPUTE_B) 876 if (class == MAXWELL_COMPUTE_B) {
863 gr_ctx->compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CTA; 877 gr_ctx->compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CTA;
878 }
864 879
865 nvgpu_log_fn(g, "done"); 880 nvgpu_log_fn(g, "done");
866 881
@@ -879,8 +894,9 @@ void gr_gm20b_update_ctxsw_preemption_mode(struct gk20a *g,
879 nvgpu_log_fn(g, " "); 894 nvgpu_log_fn(g, " ");
880 895
881 tsg = tsg_gk20a_from_ch(c); 896 tsg = tsg_gk20a_from_ch(c);
882 if (!tsg) 897 if (!tsg) {
883 return; 898 return;
899 }
884 900
885 gr_ctx = &tsg->gr_ctx; 901 gr_ctx = &tsg->gr_ctx;
886 if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) { 902 if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) {
@@ -941,9 +957,10 @@ int gr_gm20b_dump_gr_status_regs(struct gk20a *g,
941 gk20a_readl(g, gr_pri_gpc0_gpccs_gpc_activity3_r())); 957 gk20a_readl(g, gr_pri_gpc0_gpccs_gpc_activity3_r()));
942 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPC0_TPCCS_TPC_ACTIVITY0: 0x%x\n", 958 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPC0_TPCCS_TPC_ACTIVITY0: 0x%x\n",
943 gk20a_readl(g, gr_pri_gpc0_tpc0_tpccs_tpc_activity_0_r())); 959 gk20a_readl(g, gr_pri_gpc0_tpc0_tpccs_tpc_activity_0_r()));
944 if (gr->gpc_tpc_count && gr->gpc_tpc_count[0] == 2) 960 if (gr->gpc_tpc_count && gr->gpc_tpc_count[0] == 2) {
945 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPC1_TPCCS_TPC_ACTIVITY0: 0x%x\n", 961 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPC1_TPCCS_TPC_ACTIVITY0: 0x%x\n",
946 gk20a_readl(g, gr_pri_gpc0_tpc1_tpccs_tpc_activity_0_r())); 962 gk20a_readl(g, gr_pri_gpc0_tpc1_tpccs_tpc_activity_0_r()));
963 }
947 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPCS_TPCCS_TPC_ACTIVITY0: 0x%x\n", 964 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPCS_TPCCS_TPC_ACTIVITY0: 0x%x\n",
948 gk20a_readl(g, gr_pri_gpc0_tpcs_tpccs_tpc_activity_0_r())); 965 gk20a_readl(g, gr_pri_gpc0_tpcs_tpccs_tpc_activity_0_r()));
949 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_GPCCS_GPC_ACTIVITY0: 0x%x\n", 966 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_GPCCS_GPC_ACTIVITY0: 0x%x\n",
@@ -956,9 +973,10 @@ int gr_gm20b_dump_gr_status_regs(struct gk20a *g,
956 gk20a_readl(g, gr_pri_gpcs_gpccs_gpc_activity_3_r())); 973 gk20a_readl(g, gr_pri_gpcs_gpccs_gpc_activity_3_r()));
957 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_TPC0_TPCCS_TPC_ACTIVITY0: 0x%x\n", 974 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_TPC0_TPCCS_TPC_ACTIVITY0: 0x%x\n",
958 gk20a_readl(g, gr_pri_gpcs_tpc0_tpccs_tpc_activity_0_r())); 975 gk20a_readl(g, gr_pri_gpcs_tpc0_tpccs_tpc_activity_0_r()));
959 if (gr->gpc_tpc_count && gr->gpc_tpc_count[0] == 2) 976 if (gr->gpc_tpc_count && gr->gpc_tpc_count[0] == 2) {
960 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_TPC1_TPCCS_TPC_ACTIVITY0: 0x%x\n", 977 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_TPC1_TPCCS_TPC_ACTIVITY0: 0x%x\n",
961 gk20a_readl(g, gr_pri_gpcs_tpc1_tpccs_tpc_activity_0_r())); 978 gk20a_readl(g, gr_pri_gpcs_tpc1_tpccs_tpc_activity_0_r()));
979 }
962 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_TPCS_TPCCS_TPC_ACTIVITY0: 0x%x\n", 980 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_TPCS_TPCCS_TPC_ACTIVITY0: 0x%x\n",
963 gk20a_readl(g, gr_pri_gpcs_tpcs_tpccs_tpc_activity_0_r())); 981 gk20a_readl(g, gr_pri_gpcs_tpcs_tpccs_tpc_activity_0_r()));
964 gk20a_debug_output(o, "NV_PGRAPH_PRI_BE0_BECS_BE_ACTIVITY0: 0x%x\n", 982 gk20a_debug_output(o, "NV_PGRAPH_PRI_BE0_BECS_BE_ACTIVITY0: 0x%x\n",
@@ -1042,13 +1060,15 @@ int gr_gm20b_update_pc_sampling(struct channel_gk20a *c,
1042 nvgpu_log_fn(c->g, " "); 1060 nvgpu_log_fn(c->g, " ");
1043 1061
1044 tsg = tsg_gk20a_from_ch(c); 1062 tsg = tsg_gk20a_from_ch(c);
1045 if (!tsg) 1063 if (!tsg) {
1046 return -EINVAL; 1064 return -EINVAL;
1065 }
1047 1066
1048 gr_ctx = &tsg->gr_ctx; 1067 gr_ctx = &tsg->gr_ctx;
1049 mem = &gr_ctx->mem; 1068 mem = &gr_ctx->mem;
1050 if (!nvgpu_mem_is_valid(mem) || c->vpr) 1069 if (!nvgpu_mem_is_valid(mem) || c->vpr) {
1051 return -EINVAL; 1070 return -EINVAL;
1071 }
1052 1072
1053 1073
1054 v = nvgpu_mem_rd(c->g, mem, ctxsw_prog_main_image_pm_o()); 1074 v = nvgpu_mem_rd(c->g, mem, ctxsw_prog_main_image_pm_o());
@@ -1349,8 +1369,9 @@ int gm20b_gr_update_sm_error_state(struct gk20a *g,
1349 int err = 0; 1369 int err = 0;
1350 1370
1351 tsg = tsg_gk20a_from_ch(ch); 1371 tsg = tsg_gk20a_from_ch(ch);
1352 if (!tsg) 1372 if (!tsg) {
1353 return -EINVAL; 1373 return -EINVAL;
1374 }
1354 1375
1355 ch_ctx = &tsg->gr_ctx; 1376 ch_ctx = &tsg->gr_ctx;
1356 1377
@@ -1374,8 +1395,9 @@ int gm20b_gr_update_sm_error_state(struct gk20a *g,
1374 gm20b_gr_write_sm_error_state(g, offset, tsg_sm_error_states); 1395 gm20b_gr_write_sm_error_state(g, offset, tsg_sm_error_states);
1375 } else { 1396 } else {
1376 err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx, false); 1397 err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx, false);
1377 if (err) 1398 if (err) {
1378 goto enable_ctxsw; 1399 goto enable_ctxsw;
1400 }
1379 1401
1380 gr_gk20a_ctx_patch_write(g, ch_ctx, 1402 gr_gk20a_ctx_patch_write(g, ch_ctx,
1381 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_r() + offset, 1403 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_r() + offset,