summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-31 03:50:52 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-05 07:51:32 -0400
commit43851d41b187c92f5ea9c2f503a882277f661d7e (patch)
tree964a76c136c8c0dc14ec95358d27f930532b7dcb /drivers/gpu/nvgpu/gk20a/flcn_gk20a.c
parent0f97bd4d44c8bcedf298f725fe0b6cfc70fa81ff (diff)
gpu: nvgpu: gk20a: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces by introducing the braces. JIRA NVGPU-671 Change-Id: Iedac7d50aa2ebd409434eea5fda902b16d9c6fea Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1797695 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/flcn_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/flcn_gk20a.c38
1 files changed, 23 insertions, 15 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c b/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c
index e6e16511..2f715ae1 100644
--- a/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c
@@ -35,10 +35,10 @@ static int gk20a_flcn_reset(struct nvgpu_falcon *flcn)
35 u32 unit_status = 0; 35 u32 unit_status = 0;
36 int status = 0; 36 int status = 0;
37 37
38 if (flcn->flcn_engine_dep_ops.reset_eng) 38 if (flcn->flcn_engine_dep_ops.reset_eng) {
39 /* falcon & engine reset */ 39 /* falcon & engine reset */
40 status = flcn->flcn_engine_dep_ops.reset_eng(g); 40 status = flcn->flcn_engine_dep_ops.reset_eng(g);
41 else { 41 } else {
42 /* do falcon CPU hard reset */ 42 /* do falcon CPU hard reset */
43 unit_status = gk20a_readl(g, base_addr + 43 unit_status = gk20a_readl(g, base_addr +
44 falcon_falcon_cpuctl_r()); 44 falcon_falcon_cpuctl_r());
@@ -62,9 +62,10 @@ static bool gk20a_flcn_clear_halt_interrupt_status(struct nvgpu_falcon *flcn)
62 data = gk20a_readl(g, (base_addr + falcon_falcon_irqstat_r())); 62 data = gk20a_readl(g, (base_addr + falcon_falcon_irqstat_r()));
63 63
64 if ((data & falcon_falcon_irqstat_halt_true_f()) != 64 if ((data & falcon_falcon_irqstat_halt_true_f()) !=
65 falcon_falcon_irqstat_halt_true_f()) 65 falcon_falcon_irqstat_halt_true_f()) {
66 /*halt irq is clear*/ 66 /*halt irq is clear*/
67 status = true; 67 status = true;
68 }
68 69
69 return status; 70 return status;
70} 71}
@@ -86,9 +87,10 @@ static void gk20a_flcn_set_irq(struct nvgpu_falcon *flcn, bool enable)
86 flcn->intr_mask); 87 flcn->intr_mask);
87 gk20a_writel(g, base_addr + falcon_falcon_irqdest_r(), 88 gk20a_writel(g, base_addr + falcon_falcon_irqdest_r(),
88 flcn->intr_dest); 89 flcn->intr_dest);
89 } else 90 } else {
90 gk20a_writel(g, base_addr + falcon_falcon_irqmclr_r(), 91 gk20a_writel(g, base_addr + falcon_falcon_irqmclr_r(),
91 0xffffffff); 92 0xffffffff);
93 }
92} 94}
93 95
94static bool gk20a_is_falcon_cpu_halted(struct nvgpu_falcon *flcn) 96static bool gk20a_is_falcon_cpu_halted(struct nvgpu_falcon *flcn)
@@ -112,10 +114,11 @@ static bool gk20a_is_falcon_idle(struct nvgpu_falcon *flcn)
112 base_addr + falcon_falcon_idlestate_r()); 114 base_addr + falcon_falcon_idlestate_r());
113 115
114 if (falcon_falcon_idlestate_falcon_busy_v(unit_status) == 0 && 116 if (falcon_falcon_idlestate_falcon_busy_v(unit_status) == 0 &&
115 falcon_falcon_idlestate_ext_busy_v(unit_status) == 0) 117 falcon_falcon_idlestate_ext_busy_v(unit_status) == 0) {
116 status = true; 118 status = true;
117 else 119 } else {
118 status = false; 120 status = false;
121 }
119 122
120 return status; 123 return status;
121} 124}
@@ -131,10 +134,11 @@ static bool gk20a_is_falcon_scrubbing_done(struct nvgpu_falcon *flcn)
131 base_addr + falcon_falcon_dmactl_r()); 134 base_addr + falcon_falcon_dmactl_r());
132 135
133 if (unit_status & (falcon_falcon_dmactl_dmem_scrubbing_m() | 136 if (unit_status & (falcon_falcon_dmactl_dmem_scrubbing_m() |
134 falcon_falcon_dmactl_imem_scrubbing_m())) 137 falcon_falcon_dmactl_imem_scrubbing_m())) {
135 status = false; 138 status = false;
136 else 139 } else {
137 status = true; 140 status = true;
141 }
138 142
139 return status; 143 return status;
140} 144}
@@ -147,12 +151,13 @@ static u32 gk20a_falcon_get_mem_size(struct nvgpu_falcon *flcn,
147 u32 hw_cfg_reg = gk20a_readl(g, 151 u32 hw_cfg_reg = gk20a_readl(g,
148 flcn->flcn_base + falcon_falcon_hwcfg_r()); 152 flcn->flcn_base + falcon_falcon_hwcfg_r());
149 153
150 if (mem_type == MEM_DMEM) 154 if (mem_type == MEM_DMEM) {
151 mem_size = falcon_falcon_hwcfg_dmem_size_v(hw_cfg_reg) 155 mem_size = falcon_falcon_hwcfg_dmem_size_v(hw_cfg_reg)
152 << GK20A_PMU_DMEM_BLKSIZE2; 156 << GK20A_PMU_DMEM_BLKSIZE2;
153 else 157 } else {
154 mem_size = falcon_falcon_hwcfg_imem_size_v(hw_cfg_reg) 158 mem_size = falcon_falcon_hwcfg_imem_size_v(hw_cfg_reg)
155 << GK20A_PMU_DMEM_BLKSIZE2; 159 << GK20A_PMU_DMEM_BLKSIZE2;
160 }
156 161
157 return mem_size; 162 return mem_size;
158} 163}
@@ -416,12 +421,13 @@ static u32 gk20a_falcon_mailbox_read(struct nvgpu_falcon *flcn,
416 struct gk20a *g = flcn->g; 421 struct gk20a *g = flcn->g;
417 u32 data = 0; 422 u32 data = 0;
418 423
419 if (mailbox_index < FALCON_MAILBOX_COUNT) 424 if (mailbox_index < FALCON_MAILBOX_COUNT) {
420 data = gk20a_readl(g, flcn->flcn_base + (mailbox_index ? 425 data = gk20a_readl(g, flcn->flcn_base + (mailbox_index ?
421 falcon_falcon_mailbox1_r() : 426 falcon_falcon_mailbox1_r() :
422 falcon_falcon_mailbox0_r())); 427 falcon_falcon_mailbox0_r()));
423 else 428 } else {
424 nvgpu_err(g, "incorrect mailbox id %d", mailbox_index); 429 nvgpu_err(g, "incorrect mailbox id %d", mailbox_index);
430 }
425 431
426 return data; 432 return data;
427} 433}
@@ -431,13 +437,14 @@ static void gk20a_falcon_mailbox_write(struct nvgpu_falcon *flcn,
431{ 437{
432 struct gk20a *g = flcn->g; 438 struct gk20a *g = flcn->g;
433 439
434 if (mailbox_index < FALCON_MAILBOX_COUNT) 440 if (mailbox_index < FALCON_MAILBOX_COUNT) {
435 gk20a_writel(g, flcn->flcn_base + (mailbox_index ? 441 gk20a_writel(g, flcn->flcn_base + (mailbox_index ?
436 falcon_falcon_mailbox1_r() : 442 falcon_falcon_mailbox1_r() :
437 falcon_falcon_mailbox0_r()), 443 falcon_falcon_mailbox0_r()),
438 data); 444 data);
439 else 445 } else {
440 nvgpu_err(g, "incorrect mailbox id %d", mailbox_index); 446 nvgpu_err(g, "incorrect mailbox id %d", mailbox_index);
447 }
441} 448}
442 449
443static int gk20a_falcon_bl_bootstrap(struct nvgpu_falcon *flcn, 450static int gk20a_falcon_bl_bootstrap(struct nvgpu_falcon *flcn,
@@ -739,7 +746,8 @@ void gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
739 if (flcn->is_falcon_supported) { 746 if (flcn->is_falcon_supported) {
740 nvgpu_mutex_init(&flcn->copy_lock); 747 nvgpu_mutex_init(&flcn->copy_lock);
741 gk20a_falcon_ops(flcn); 748 gk20a_falcon_ops(flcn);
742 } else 749 } else {
743 nvgpu_log_info(g, "falcon 0x%x not supported on %s", 750 nvgpu_log_info(g, "falcon 0x%x not supported on %s",
744 flcn->flcn_id, g->name); 751 flcn->flcn_id, g->name);
752 }
745} 753}