From 1c13da1d29c344cb60953eabeca56b601446c64a Mon Sep 17 00:00:00 2001 From: Amulya Date: Thu, 9 Aug 2018 10:40:08 +0530 Subject: gpu: nvgpu: Changed enum gmmu_pgsz_gk20a into macros Changed the enum gmmu_pgsz_gk20a into macros and changed all the instances of it. The enum gmmu_pgsz_gk20a was being used in for loops, where it was compared with an integer. This violates MISRA rule 10.4, which only allows arithmetic operations on operands of the same essential type category. Changing this enum into macro will fix this violation. JIRA NVGPU-993 Change-Id: I6f18b08bc7548093d99e8229378415bcdec749e3 Signed-off-by: Amulya Reviewed-on: https://git-master.nvidia.com/r/1795593 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/gp10b/mm_gp10b.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/nvgpu/gp10b') diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c index 75ae3d04..5a24adc0 100644 --- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c @@ -126,8 +126,8 @@ static void update_gmmu_pde0_locked(struct vm_gk20a *vm, u32 pd_offset = pd_offset_from_index(l, pd_idx); u32 pde_v[4] = {0, 0, 0, 0}; - small_valid = attrs->pgsz == gmmu_page_size_small; - big_valid = attrs->pgsz == gmmu_page_size_big; + small_valid = attrs->pgsz == GMMU_PAGE_SIZE_SMALL; + big_valid = attrs->pgsz == GMMU_PAGE_SIZE_BIG; if (small_valid) small_addr = phys_addr >> gmmu_new_dual_pde_address_shift_v(); @@ -274,15 +274,14 @@ static void update_gmmu_pte_locked(struct vm_gk20a *vm, * level having a different number of entries depending on whether it holds * big pages or small pages. */ -static enum gmmu_pgsz_gk20a gp10b_get_pde0_pgsz(struct gk20a *g, - const struct gk20a_mmu_level *l, - struct nvgpu_gmmu_pd *pd, u32 pd_idx) +static u32 gp10b_get_pde0_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l, + struct nvgpu_gmmu_pd *pd, u32 pd_idx) { u32 pde_base = pd->mem_offs / sizeof(u32); u32 pde_offset = pde_base + pd_offset_from_index(l, pd_idx); u32 pde_v[GP10B_PDE0_ENTRY_SIZE >> 2]; u32 i; - enum gmmu_pgsz_gk20a pgsz = gmmu_nr_page_sizes; + u32 pgsz = GMMU_NR_PAGE_SIZES; if (!pd->mem) return pgsz; @@ -302,7 +301,7 @@ static enum gmmu_pgsz_gk20a gp10b_get_pde0_pgsz(struct gk20a *g, gmmu_new_dual_pde_address_shift_v(); if (addr) - pgsz = gmmu_page_size_small; + pgsz = GMMU_PAGE_SIZE_SMALL; } if (pde_v[0] & (gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f() | @@ -318,12 +317,12 @@ static enum gmmu_pgsz_gk20a gp10b_get_pde0_pgsz(struct gk20a *g, * both small and big to be set, the PDE is not valid * and may be corrupted */ - if (pgsz == gmmu_page_size_small) { + if (pgsz == GMMU_PAGE_SIZE_SMALL) { nvgpu_err(g, "both small and big apertures enabled"); - return gmmu_nr_page_sizes; + return GMMU_NR_PAGE_SIZES; } - pgsz = gmmu_page_size_big; + pgsz = GMMU_PAGE_SIZE_BIG; } } -- cgit v1.2.2