summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b
diff options
context:
space:
mode:
authorAmulya <Amurthyreddy@nvidia.com>2018-08-09 01:10:08 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-22 20:31:33 -0400
commit1c13da1d29c344cb60953eabeca56b601446c64a (patch)
tree145a1a133b2d85592e0ddd1a25b12fc48e879829 /drivers/gpu/nvgpu/gp10b
parentf3c3e4dece89c5e2f77fbfaf3cacd877ba62406c (diff)
gpu: nvgpu: Changed enum gmmu_pgsz_gk20a into macros
Changed the enum gmmu_pgsz_gk20a into macros and changed all the instances of it. The enum gmmu_pgsz_gk20a was being used in for loops, where it was compared with an integer. This violates MISRA rule 10.4, which only allows arithmetic operations on operands of the same essential type category. Changing this enum into macro will fix this violation. JIRA NVGPU-993 Change-Id: I6f18b08bc7548093d99e8229378415bcdec749e3 Signed-off-by: Amulya <Amurthyreddy@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1795593 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b')
-rw-r--r--drivers/gpu/nvgpu/gp10b/mm_gp10b.c19
1 files changed, 9 insertions, 10 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
index 75ae3d04..5a24adc0 100644
--- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
@@ -126,8 +126,8 @@ static void update_gmmu_pde0_locked(struct vm_gk20a *vm,
126 u32 pd_offset = pd_offset_from_index(l, pd_idx); 126 u32 pd_offset = pd_offset_from_index(l, pd_idx);
127 u32 pde_v[4] = {0, 0, 0, 0}; 127 u32 pde_v[4] = {0, 0, 0, 0};
128 128
129 small_valid = attrs->pgsz == gmmu_page_size_small; 129 small_valid = attrs->pgsz == GMMU_PAGE_SIZE_SMALL;
130 big_valid = attrs->pgsz == gmmu_page_size_big; 130 big_valid = attrs->pgsz == GMMU_PAGE_SIZE_BIG;
131 131
132 if (small_valid) 132 if (small_valid)
133 small_addr = phys_addr >> gmmu_new_dual_pde_address_shift_v(); 133 small_addr = phys_addr >> gmmu_new_dual_pde_address_shift_v();
@@ -274,15 +274,14 @@ static void update_gmmu_pte_locked(struct vm_gk20a *vm,
274 * level having a different number of entries depending on whether it holds 274 * level having a different number of entries depending on whether it holds
275 * big pages or small pages. 275 * big pages or small pages.
276 */ 276 */
277static enum gmmu_pgsz_gk20a gp10b_get_pde0_pgsz(struct gk20a *g, 277static u32 gp10b_get_pde0_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
278 const struct gk20a_mmu_level *l, 278 struct nvgpu_gmmu_pd *pd, u32 pd_idx)
279 struct nvgpu_gmmu_pd *pd, u32 pd_idx)
280{ 279{
281 u32 pde_base = pd->mem_offs / sizeof(u32); 280 u32 pde_base = pd->mem_offs / sizeof(u32);
282 u32 pde_offset = pde_base + pd_offset_from_index(l, pd_idx); 281 u32 pde_offset = pde_base + pd_offset_from_index(l, pd_idx);
283 u32 pde_v[GP10B_PDE0_ENTRY_SIZE >> 2]; 282 u32 pde_v[GP10B_PDE0_ENTRY_SIZE >> 2];
284 u32 i; 283 u32 i;
285 enum gmmu_pgsz_gk20a pgsz = gmmu_nr_page_sizes; 284 u32 pgsz = GMMU_NR_PAGE_SIZES;
286 285
287 if (!pd->mem) 286 if (!pd->mem)
288 return pgsz; 287 return pgsz;
@@ -302,7 +301,7 @@ static enum gmmu_pgsz_gk20a gp10b_get_pde0_pgsz(struct gk20a *g,
302 gmmu_new_dual_pde_address_shift_v(); 301 gmmu_new_dual_pde_address_shift_v();
303 302
304 if (addr) 303 if (addr)
305 pgsz = gmmu_page_size_small; 304 pgsz = GMMU_PAGE_SIZE_SMALL;
306 } 305 }
307 306
308 if (pde_v[0] & (gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f() | 307 if (pde_v[0] & (gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f() |
@@ -318,12 +317,12 @@ static enum gmmu_pgsz_gk20a gp10b_get_pde0_pgsz(struct gk20a *g,
318 * both small and big to be set, the PDE is not valid 317 * both small and big to be set, the PDE is not valid
319 * and may be corrupted 318 * and may be corrupted
320 */ 319 */
321 if (pgsz == gmmu_page_size_small) { 320 if (pgsz == GMMU_PAGE_SIZE_SMALL) {
322 nvgpu_err(g, 321 nvgpu_err(g,
323 "both small and big apertures enabled"); 322 "both small and big apertures enabled");
324 return gmmu_nr_page_sizes; 323 return GMMU_NR_PAGE_SIZES;
325 } 324 }
326 pgsz = gmmu_page_size_big; 325 pgsz = GMMU_PAGE_SIZE_BIG;
327 } 326 }
328 } 327 }
329 328