summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-23 03:27:45 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-27 10:52:18 -0400
commit5c9bedf6f6e3213cd830d045d70f61de49f6e42b (patch)
treeb5ae6359eb15494766d7c1245304837042c0ca5d /drivers/gpu/nvgpu/gp10b/mm_gp10b.c
parent14949fbad615ef55adf08c39fd7614d1cbd4109e (diff)
gpu: nvgpu: gp10b: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: Ib5961506b0f95867a57f8c0d7024568785fe7b93 Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1797332 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b/mm_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/gp10b/mm_gp10b.c37
1 files changed, 24 insertions, 13 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
index 5a24adc0..342dc486 100644
--- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
@@ -59,13 +59,15 @@ int gp10b_init_bar2_vm(struct gk20a *g)
59 mm->bar2.vm = nvgpu_vm_init(g, big_page_size, SZ_4K, 59 mm->bar2.vm = nvgpu_vm_init(g, big_page_size, SZ_4K,
60 mm->bar2.aperture_size - SZ_4K, 60 mm->bar2.aperture_size - SZ_4K,
61 mm->bar2.aperture_size, false, false, "bar2"); 61 mm->bar2.aperture_size, false, false, "bar2");
62 if (!mm->bar2.vm) 62 if (!mm->bar2.vm) {
63 return -ENOMEM; 63 return -ENOMEM;
64 }
64 65
65 /* allocate instance mem for bar2 */ 66 /* allocate instance mem for bar2 */
66 err = g->ops.mm.alloc_inst_block(g, inst_block); 67 err = g->ops.mm.alloc_inst_block(g, inst_block);
67 if (err) 68 if (err) {
68 goto clean_up_va; 69 goto clean_up_va;
70 }
69 71
70 g->ops.mm.init_inst_block(inst_block, mm->bar2.vm, big_page_size); 72 g->ops.mm.init_inst_block(inst_block, mm->bar2.vm, big_page_size);
71 73
@@ -129,11 +131,13 @@ static void update_gmmu_pde0_locked(struct vm_gk20a *vm,
129 small_valid = attrs->pgsz == GMMU_PAGE_SIZE_SMALL; 131 small_valid = attrs->pgsz == GMMU_PAGE_SIZE_SMALL;
130 big_valid = attrs->pgsz == GMMU_PAGE_SIZE_BIG; 132 big_valid = attrs->pgsz == GMMU_PAGE_SIZE_BIG;
131 133
132 if (small_valid) 134 if (small_valid) {
133 small_addr = phys_addr >> gmmu_new_dual_pde_address_shift_v(); 135 small_addr = phys_addr >> gmmu_new_dual_pde_address_shift_v();
136 }
134 137
135 if (big_valid) 138 if (big_valid) {
136 big_addr = phys_addr >> gmmu_new_dual_pde_address_big_shift_v(); 139 big_addr = phys_addr >> gmmu_new_dual_pde_address_big_shift_v();
140 }
137 141
138 if (small_valid) { 142 if (small_valid) {
139 pde_v[2] |= 143 pde_v[2] |=
@@ -195,24 +199,28 @@ static void __update_pte(struct vm_gk20a *vm,
195 199
196 pte_w[0] = pte_valid | pte_addr | pte_tgt; 200 pte_w[0] = pte_valid | pte_addr | pte_tgt;
197 201
198 if (attrs->priv) 202 if (attrs->priv) {
199 pte_w[0] |= gmmu_new_pte_privilege_true_f(); 203 pte_w[0] |= gmmu_new_pte_privilege_true_f();
204 }
200 205
201 pte_w[1] = phys_addr >> (24 + gmmu_new_pte_address_shift_v()) | 206 pte_w[1] = phys_addr >> (24 + gmmu_new_pte_address_shift_v()) |
202 gmmu_new_pte_kind_f(attrs->kind_v) | 207 gmmu_new_pte_kind_f(attrs->kind_v) |
203 gmmu_new_pte_comptagline_f((u32)(attrs->ctag / 208 gmmu_new_pte_comptagline_f((u32)(attrs->ctag /
204 ctag_granularity)); 209 ctag_granularity));
205 210
206 if (attrs->rw_flag == gk20a_mem_flag_read_only) 211 if (attrs->rw_flag == gk20a_mem_flag_read_only) {
207 pte_w[0] |= gmmu_new_pte_read_only_true_f(); 212 pte_w[0] |= gmmu_new_pte_read_only_true_f();
213 }
208 214
209 if (!attrs->valid && !attrs->cacheable) 215 if (!attrs->valid && !attrs->cacheable) {
210 pte_w[0] |= gmmu_new_pte_read_only_true_f(); 216 pte_w[0] |= gmmu_new_pte_read_only_true_f();
211 else if (!attrs->cacheable) 217 } else if (!attrs->cacheable) {
212 pte_w[0] |= gmmu_new_pte_vol_true_f(); 218 pte_w[0] |= gmmu_new_pte_vol_true_f();
219 }
213 220
214 if (attrs->ctag) 221 if (attrs->ctag) {
215 attrs->ctag += page_size; 222 attrs->ctag += page_size;
223 }
216 224
217} 225}
218 226
@@ -235,10 +243,11 @@ static void update_gmmu_pte_locked(struct vm_gk20a *vm,
235 u32 pd_offset = pd_offset_from_index(l, pd_idx); 243 u32 pd_offset = pd_offset_from_index(l, pd_idx);
236 u32 pte_w[2] = {0, 0}; 244 u32 pte_w[2] = {0, 0};
237 245
238 if (phys_addr) 246 if (phys_addr) {
239 __update_pte(vm, pte_w, phys_addr, attrs); 247 __update_pte(vm, pte_w, phys_addr, attrs);
240 else if (attrs->sparse) 248 } else if (attrs->sparse) {
241 __update_pte_sparse(pte_w); 249 __update_pte_sparse(pte_w);
250 }
242 251
243 pte_dbg(g, attrs, 252 pte_dbg(g, attrs,
244 "vm=%s " 253 "vm=%s "
@@ -283,8 +292,9 @@ static u32 gp10b_get_pde0_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
283 u32 i; 292 u32 i;
284 u32 pgsz = GMMU_NR_PAGE_SIZES; 293 u32 pgsz = GMMU_NR_PAGE_SIZES;
285 294
286 if (!pd->mem) 295 if (!pd->mem) {
287 return pgsz; 296 return pgsz;
297 }
288 298
289 for (i = 0; i < GP10B_PDE0_ENTRY_SIZE >> 2; i++) { 299 for (i = 0; i < GP10B_PDE0_ENTRY_SIZE >> 2; i++) {
290 pde_v[i] = nvgpu_mem_rd32(g, pd->mem, pde_offset + i); 300 pde_v[i] = nvgpu_mem_rd32(g, pd->mem, pde_offset + i);
@@ -300,8 +310,9 @@ static u32 gp10b_get_pde0_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
300 gmmu_new_dual_pde_address_small_sys_f(~0))) << 310 gmmu_new_dual_pde_address_small_sys_f(~0))) <<
301 gmmu_new_dual_pde_address_shift_v(); 311 gmmu_new_dual_pde_address_shift_v();
302 312
303 if (addr) 313 if (addr) {
304 pgsz = GMMU_PAGE_SIZE_SMALL; 314 pgsz = GMMU_PAGE_SIZE_SMALL;
315 }
305 } 316 }
306 317
307 if (pde_v[0] & (gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f() | 318 if (pde_v[0] & (gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f() |