From 1c13da1d29c344cb60953eabeca56b601446c64a Mon Sep 17 00:00:00 2001 From: Amulya Date: Thu, 9 Aug 2018 10:40:08 +0530 Subject: gpu: nvgpu: Changed enum gmmu_pgsz_gk20a into macros Changed the enum gmmu_pgsz_gk20a into macros and changed all the instances of it. The enum gmmu_pgsz_gk20a was being used in for loops, where it was compared with an integer. This violates MISRA rule 10.4, which only allows arithmetic operations on operands of the same essential type category. Changing this enum into macro will fix this violation. JIRA NVGPU-993 Change-Id: I6f18b08bc7548093d99e8229378415bcdec749e3 Signed-off-by: Amulya Reviewed-on: https://git-master.nvidia.com/r/1795593 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/common/mm/mm.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/nvgpu/common/mm/mm.c') diff --git a/drivers/gpu/nvgpu/common/mm/mm.c b/drivers/gpu/nvgpu/common/mm/mm.c index 2e46e211..6be619ed 100644 --- a/drivers/gpu/nvgpu/common/mm/mm.c +++ b/drivers/gpu/nvgpu/common/mm/mm.c @@ -34,14 +34,14 @@ * Attempt to find a reserved memory area to determine PTE size for the passed * mapping. If no reserved area can be found use small pages. */ -enum gmmu_pgsz_gk20a __get_pte_size_fixed_map(struct vm_gk20a *vm, +u32 __get_pte_size_fixed_map(struct vm_gk20a *vm, u64 base, u64 size) { struct nvgpu_vm_area *vm_area; vm_area = nvgpu_vm_area_find(vm, base); if (!vm_area) { - return gmmu_page_size_small; + return GMMU_PAGE_SIZE_SMALL; } return vm_area->pgsz_idx; @@ -50,19 +50,19 @@ enum gmmu_pgsz_gk20a __get_pte_size_fixed_map(struct vm_gk20a *vm, /* * This is for when the address space does not support unified address spaces. */ -static enum gmmu_pgsz_gk20a __get_pte_size_split_addr(struct vm_gk20a *vm, +static u32 __get_pte_size_split_addr(struct vm_gk20a *vm, u64 base, u64 size) { if (!base) { - if (size >= vm->gmmu_page_sizes[gmmu_page_size_big]) { - return gmmu_page_size_big; + if (size >= vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG]) { + return GMMU_PAGE_SIZE_BIG; } - return gmmu_page_size_small; + return GMMU_PAGE_SIZE_SMALL; } else { if (base < __nv_gmmu_va_small_page_limit()) { - return gmmu_page_size_small; + return GMMU_PAGE_SIZE_SMALL; } else { - return gmmu_page_size_big; + return GMMU_PAGE_SIZE_BIG; } } } @@ -88,12 +88,12 @@ static enum gmmu_pgsz_gk20a __get_pte_size_split_addr(struct vm_gk20a *vm, * - Regardless of buffer size use small pages since we have no * - guarantee of contiguity. */ -enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size) +u32 __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size) { struct gk20a *g = gk20a_from_vm(vm); if (!vm->big_pages) { - return gmmu_page_size_small; + return GMMU_PAGE_SIZE_SMALL; } if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) { @@ -104,11 +104,11 @@ enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size) return __get_pte_size_fixed_map(vm, base, size); } - if (size >= vm->gmmu_page_sizes[gmmu_page_size_big] && + if (size >= vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG] && nvgpu_iommuable(g)) { - return gmmu_page_size_big; + return GMMU_PAGE_SIZE_BIG; } - return gmmu_page_size_small; + return GMMU_PAGE_SIZE_SMALL; } int nvgpu_mm_suspend(struct gk20a *g) -- cgit v1.2.2