diff options
author | Srirangan <smadhavan@nvidia.com> | 2018-08-20 06:39:12 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2018-08-28 09:46:59 -0400 |
commit | 07d3387ceb10cdc4d4413d04b1223cbd5181438b (patch) | |
tree | c86a661e6bff08c43f45fdb2b79be9ba1a6531b1 /drivers/gpu/nvgpu/gv11b/mm_gv11b.c | |
parent | 3e5e4804f9c2bf5b914012852b56dbbbc00f8253 (diff) |
gpu: nvgpu: gv11b: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces,
including single statement blocks. Fix errors due to single statement
if blocks without braces, introducing the braces.
JIRA NVGPU-671
Change-Id: I1562bd1b109a100af29bd147ed8b56463b6a8e63
Signed-off-by: Srirangan <smadhavan@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1796674
Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com>
Reviewed-by: Scott Long <scottl@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gv11b/mm_gv11b.c')
-rw-r--r-- | drivers/gpu/nvgpu/gv11b/mm_gv11b.c | 29 |
1 files changed, 19 insertions, 10 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c index b8272a92..73b7dae7 100644 --- a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c | |||
@@ -56,8 +56,9 @@ void gv11b_init_inst_block(struct nvgpu_mem *inst_block, | |||
56 | 56 | ||
57 | g->ops.mm.init_pdb(g, inst_block, vm); | 57 | g->ops.mm.init_pdb(g, inst_block, vm); |
58 | 58 | ||
59 | if (big_page_size && g->ops.mm.set_big_page_size) | 59 | if (big_page_size && g->ops.mm.set_big_page_size) { |
60 | g->ops.mm.set_big_page_size(g, inst_block, big_page_size); | 60 | g->ops.mm.set_big_page_size(g, inst_block, big_page_size); |
61 | } | ||
61 | 62 | ||
62 | gv11b_init_subcontext_pdb(vm, inst_block, false); | 63 | gv11b_init_subcontext_pdb(vm, inst_block, false); |
63 | } | 64 | } |
@@ -97,12 +98,14 @@ void gv11b_mm_fault_info_mem_destroy(struct gk20a *g) | |||
97 | nvgpu_mutex_acquire(&g->mm.hub_isr_mutex); | 98 | nvgpu_mutex_acquire(&g->mm.hub_isr_mutex); |
98 | 99 | ||
99 | if (nvgpu_mem_is_valid( | 100 | if (nvgpu_mem_is_valid( |
100 | &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY])) | 101 | &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY])) { |
101 | nvgpu_dma_unmap_free(vm, | 102 | nvgpu_dma_unmap_free(vm, |
102 | &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY]); | 103 | &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY]); |
103 | if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY])) | 104 | } |
105 | if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY])) { | ||
104 | nvgpu_dma_unmap_free(vm, | 106 | nvgpu_dma_unmap_free(vm, |
105 | &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY]); | 107 | &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY]); |
108 | } | ||
106 | 109 | ||
107 | nvgpu_mutex_release(&g->mm.hub_isr_mutex); | 110 | nvgpu_mutex_release(&g->mm.hub_isr_mutex); |
108 | nvgpu_mutex_destroy(&g->mm.hub_isr_mutex); | 111 | nvgpu_mutex_destroy(&g->mm.hub_isr_mutex); |
@@ -152,12 +155,14 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g) | |||
152 | static void gv11b_mm_mmu_fault_setup_hw(struct gk20a *g) | 155 | static void gv11b_mm_mmu_fault_setup_hw(struct gk20a *g) |
153 | { | 156 | { |
154 | if (nvgpu_mem_is_valid( | 157 | if (nvgpu_mem_is_valid( |
155 | &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY])) | 158 | &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY])) { |
156 | g->ops.fb.fault_buf_configure_hw(g, | 159 | g->ops.fb.fault_buf_configure_hw(g, |
157 | NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX); | 160 | NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX); |
158 | if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY])) | 161 | } |
162 | if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY])) { | ||
159 | g->ops.fb.fault_buf_configure_hw(g, | 163 | g->ops.fb.fault_buf_configure_hw(g, |
160 | NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX); | 164 | NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX); |
165 | } | ||
161 | } | 166 | } |
162 | 167 | ||
163 | static int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g) | 168 | static int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g) |
@@ -170,8 +175,9 @@ static int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g) | |||
170 | 175 | ||
171 | err = gv11b_mm_mmu_fault_info_buf_init(g); | 176 | err = gv11b_mm_mmu_fault_info_buf_init(g); |
172 | 177 | ||
173 | if (!err) | 178 | if (!err) { |
174 | gv11b_mm_mmu_hw_fault_buf_init(g); | 179 | gv11b_mm_mmu_hw_fault_buf_init(g); |
180 | } | ||
175 | 181 | ||
176 | return err; | 182 | return err; |
177 | } | 183 | } |
@@ -185,8 +191,9 @@ int gv11b_init_mm_setup_hw(struct gk20a *g) | |||
185 | err = gk20a_init_mm_setup_hw(g); | 191 | err = gk20a_init_mm_setup_hw(g); |
186 | 192 | ||
187 | err = gv11b_mm_mmu_fault_setup_sw(g); | 193 | err = gv11b_mm_mmu_fault_setup_sw(g); |
188 | if (!err) | 194 | if (!err) { |
189 | gv11b_mm_mmu_fault_setup_hw(g); | 195 | gv11b_mm_mmu_fault_setup_hw(g); |
196 | } | ||
190 | 197 | ||
191 | nvgpu_log_fn(g, "end"); | 198 | nvgpu_log_fn(g, "end"); |
192 | 199 | ||
@@ -199,11 +206,12 @@ void gv11b_mm_l2_flush(struct gk20a *g, bool invalidate) | |||
199 | 206 | ||
200 | g->ops.mm.fb_flush(g); | 207 | g->ops.mm.fb_flush(g); |
201 | gk20a_mm_l2_flush(g, invalidate); | 208 | gk20a_mm_l2_flush(g, invalidate); |
202 | if (g->ops.bus.bar1_bind) | 209 | if (g->ops.bus.bar1_bind) { |
203 | g->ops.fb.tlb_invalidate(g, | 210 | g->ops.fb.tlb_invalidate(g, |
204 | g->mm.bar1.vm->pdb.mem); | 211 | g->mm.bar1.vm->pdb.mem); |
205 | else | 212 | } else { |
206 | g->ops.mm.fb_flush(g); | 213 | g->ops.mm.fb_flush(g); |
214 | } | ||
207 | } | 215 | } |
208 | 216 | ||
209 | /* | 217 | /* |
@@ -214,8 +222,9 @@ void gv11b_mm_l2_flush(struct gk20a *g, bool invalidate) | |||
214 | u64 gv11b_gpu_phys_addr(struct gk20a *g, | 222 | u64 gv11b_gpu_phys_addr(struct gk20a *g, |
215 | struct nvgpu_gmmu_attrs *attrs, u64 phys) | 223 | struct nvgpu_gmmu_attrs *attrs, u64 phys) |
216 | { | 224 | { |
217 | if (attrs && attrs->l3_alloc) | 225 | if (attrs && attrs->l3_alloc) { |
218 | return phys | NVGPU_L3_ALLOC_BIT; | 226 | return phys | NVGPU_L3_ALLOC_BIT; |
227 | } | ||
219 | 228 | ||
220 | return phys; | 229 | return phys; |
221 | } | 230 | } |