From 1c13da1d29c344cb60953eabeca56b601446c64a Mon Sep 17 00:00:00 2001 From: Amulya Date: Thu, 9 Aug 2018 10:40:08 +0530 Subject: gpu: nvgpu: Changed enum gmmu_pgsz_gk20a into macros Changed the enum gmmu_pgsz_gk20a into macros and changed all the instances of it. The enum gmmu_pgsz_gk20a was being used in for loops, where it was compared with an integer. This violates MISRA rule 10.4, which only allows arithmetic operations on operands of the same essential type category. Changing this enum into macro will fix this violation. JIRA NVGPU-993 Change-Id: I6f18b08bc7548093d99e8229378415bcdec749e3 Signed-off-by: Amulya Reviewed-on: https://git-master.nvidia.com/r/1795593 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/common/mm/buddy_allocator.c | 12 +++++----- drivers/gpu/nvgpu/common/mm/gmmu.c | 14 ++++++------ drivers/gpu/nvgpu/common/mm/mm.c | 26 +++++++++++----------- drivers/gpu/nvgpu/common/mm/vm.c | 32 +++++++++++++-------------- drivers/gpu/nvgpu/common/mm/vm_area.c | 10 ++++----- 5 files changed, 46 insertions(+), 48 deletions(-) (limited to 'drivers/gpu/nvgpu/common') diff --git a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c index 365f3b7b..f8c97839 100644 --- a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c @@ -142,7 +142,7 @@ static void __balloc_buddy_list_add(struct nvgpu_buddy_allocator *a, * without cycling through the entire list. */ if (a->flags & GPU_ALLOC_GVA_SPACE && - b->pte_size == gmmu_page_size_big) { + b->pte_size == GMMU_PAGE_SIZE_BIG) { nvgpu_list_add_tail(&b->buddy_entry, list); } else { nvgpu_list_add(&b->buddy_entry, list); @@ -487,7 +487,7 @@ static struct nvgpu_buddy *__balloc_find_buddy(struct nvgpu_buddy_allocator *a, } if (a->flags & GPU_ALLOC_GVA_SPACE && - pte_size == gmmu_page_size_big) { + pte_size == GMMU_PAGE_SIZE_BIG) { bud = nvgpu_list_last_entry(balloc_get_order_list(a, order), nvgpu_buddy, buddy_entry); } else { @@ -844,8 +844,8 @@ static u64 nvgpu_buddy_balloc(struct nvgpu_allocator *__a, u64 len) alloc_dbg(balloc_owner(a), "Alloc 0x%-10llx %3lld:0x%-10llx pte_size=%s", addr, order, len, - pte_size == gmmu_page_size_big ? "big" : - pte_size == gmmu_page_size_small ? "small" : + pte_size == GMMU_PAGE_SIZE_BIG ? "big" : + pte_size == GMMU_PAGE_SIZE_SMALL ? "small" : "NA/any"); } else { alloc_dbg(balloc_owner(a), "Alloc failed: no mem!"); @@ -882,9 +882,9 @@ static u64 __nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *__a, /* Check that the page size is valid. */ if (a->flags & GPU_ALLOC_GVA_SPACE && a->vm->big_pages) { if (page_size == a->vm->big_page_size) { - pte_size = gmmu_page_size_big; + pte_size = GMMU_PAGE_SIZE_BIG; } else if (page_size == SZ_4K) { - pte_size = gmmu_page_size_small; + pte_size = GMMU_PAGE_SIZE_SMALL; } else { goto fail; } diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c index 73a37b57..02273393 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu.c @@ -109,7 +109,7 @@ static u64 __nvgpu_gmmu_map(struct vm_gk20a *vm, sgt, /* sg list */ 0, /* sg offset */ size, - gmmu_page_size_kernel, + GMMU_PAGE_SIZE_KERNEL, 0, /* kind */ 0, /* ctag_offset */ flags, rw_flag, @@ -169,7 +169,7 @@ void nvgpu_gmmu_unmap(struct vm_gk20a *vm, struct nvgpu_mem *mem, u64 gpu_va) g->ops.mm.gmmu_unmap(vm, gpu_va, mem->size, - gmmu_page_size_kernel, + GMMU_PAGE_SIZE_KERNEL, mem->free_gpu_va, gk20a_mem_flag_none, false, @@ -609,8 +609,8 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm, /* note: here we need to map kernel to small, since the * low-level mmu code assumes 0 is small and 1 is big pages */ - if (attrs->pgsz == gmmu_page_size_kernel) { - attrs->pgsz = gmmu_page_size_small; + if (attrs->pgsz == GMMU_PAGE_SIZE_KERNEL) { + attrs->pgsz = GMMU_PAGE_SIZE_SMALL; } page_size = vm->gmmu_page_sizes[attrs->pgsz]; @@ -676,7 +676,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, struct nvgpu_sgt *sgt, u64 buffer_offset, u64 size, - int pgsz_idx, + u32 pgsz_idx, u8 kind_v, u32 ctag_offset, u32 flags, @@ -764,7 +764,7 @@ fail_alloc: void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm, u64 vaddr, u64 size, - int pgsz_idx, + u32 pgsz_idx, bool va_allocated, enum gk20a_mem_rw_flag rw_flag, bool sparse, @@ -865,7 +865,7 @@ static int __nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm, attrs->pgsz = l->get_pgsz(g, l, pd, pd_idx); - if (attrs->pgsz >= gmmu_nr_page_sizes) { + if (attrs->pgsz >= GMMU_NR_PAGE_SIZES) { return -EINVAL; } diff --git a/drivers/gpu/nvgpu/common/mm/mm.c b/drivers/gpu/nvgpu/common/mm/mm.c index 2e46e211..6be619ed 100644 --- a/drivers/gpu/nvgpu/common/mm/mm.c +++ b/drivers/gpu/nvgpu/common/mm/mm.c @@ -34,14 +34,14 @@ * Attempt to find a reserved memory area to determine PTE size for the passed * mapping. If no reserved area can be found use small pages. */ -enum gmmu_pgsz_gk20a __get_pte_size_fixed_map(struct vm_gk20a *vm, +u32 __get_pte_size_fixed_map(struct vm_gk20a *vm, u64 base, u64 size) { struct nvgpu_vm_area *vm_area; vm_area = nvgpu_vm_area_find(vm, base); if (!vm_area) { - return gmmu_page_size_small; + return GMMU_PAGE_SIZE_SMALL; } return vm_area->pgsz_idx; @@ -50,19 +50,19 @@ enum gmmu_pgsz_gk20a __get_pte_size_fixed_map(struct vm_gk20a *vm, /* * This is for when the address space does not support unified address spaces. */ -static enum gmmu_pgsz_gk20a __get_pte_size_split_addr(struct vm_gk20a *vm, +static u32 __get_pte_size_split_addr(struct vm_gk20a *vm, u64 base, u64 size) { if (!base) { - if (size >= vm->gmmu_page_sizes[gmmu_page_size_big]) { - return gmmu_page_size_big; + if (size >= vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG]) { + return GMMU_PAGE_SIZE_BIG; } - return gmmu_page_size_small; + return GMMU_PAGE_SIZE_SMALL; } else { if (base < __nv_gmmu_va_small_page_limit()) { - return gmmu_page_size_small; + return GMMU_PAGE_SIZE_SMALL; } else { - return gmmu_page_size_big; + return GMMU_PAGE_SIZE_BIG; } } } @@ -88,12 +88,12 @@ static enum gmmu_pgsz_gk20a __get_pte_size_split_addr(struct vm_gk20a *vm, * - Regardless of buffer size use small pages since we have no * - guarantee of contiguity. */ -enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size) +u32 __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size) { struct gk20a *g = gk20a_from_vm(vm); if (!vm->big_pages) { - return gmmu_page_size_small; + return GMMU_PAGE_SIZE_SMALL; } if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) { @@ -104,11 +104,11 @@ enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size) return __get_pte_size_fixed_map(vm, base, size); } - if (size >= vm->gmmu_page_sizes[gmmu_page_size_big] && + if (size >= vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG] && nvgpu_iommuable(g)) { - return gmmu_page_size_big; + return GMMU_PAGE_SIZE_BIG; } - return gmmu_page_size_small; + return GMMU_PAGE_SIZE_SMALL; } int nvgpu_mm_suspend(struct gk20a *g) diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index 7d97b7b7..bd6c1e87 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c @@ -41,7 +41,7 @@ struct nvgpu_ctag_buffer_info { u64 size; - enum gmmu_pgsz_gk20a pgsz_idx; + u32 pgsz_idx; u32 flags; s16 compr_kind; @@ -123,8 +123,7 @@ static void nvgpu_vm_free_entries(struct vm_gk20a *vm, pdb->entries = NULL; } -u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, - enum gmmu_pgsz_gk20a pgsz_idx) +u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx) { struct gk20a *g = vm->mm->g; @@ -139,12 +138,12 @@ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, return 0; } - if (pgsz_idx >= gmmu_nr_page_sizes) { + if (pgsz_idx >= GMMU_NR_PAGE_SIZES) { nvgpu_err(g, "(%s) invalid page size requested", vma->name); return 0; } - if ((pgsz_idx == gmmu_page_size_big) && !vm->big_pages) { + if ((pgsz_idx == GMMU_PAGE_SIZE_BIG) && !vm->big_pages) { nvgpu_err(g, "(%s) unsupportd page size requested", vma->name); return 0; } @@ -161,8 +160,7 @@ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, return addr; } -int __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, - enum gmmu_pgsz_gk20a pgsz_idx) +int __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, u32 pgsz_idx) { struct nvgpu_allocator *vma = vm->vma[pgsz_idx]; @@ -264,7 +262,7 @@ static int nvgpu_init_sema_pool(struct vm_gk20a *vm) err = nvgpu_semaphore_pool_map(vm->sema_pool, vm); if (err) { nvgpu_semaphore_pool_unmap(vm->sema_pool, vm); - nvgpu_free(vm->vma[gmmu_page_size_small], + nvgpu_free(vm->vma[GMMU_PAGE_SIZE_SMALL], vm->sema_pool->gpu_va); return err; } @@ -308,22 +306,22 @@ int __nvgpu_vm_init(struct mm_gk20a *mm, vm->mm = mm; - vm->gmmu_page_sizes[gmmu_page_size_small] = SZ_4K; - vm->gmmu_page_sizes[gmmu_page_size_big] = big_page_size; - vm->gmmu_page_sizes[gmmu_page_size_kernel] = SZ_4K; + vm->gmmu_page_sizes[GMMU_PAGE_SIZE_SMALL] = SZ_4K; + vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG] = big_page_size; + vm->gmmu_page_sizes[GMMU_PAGE_SIZE_KERNEL] = SZ_4K; /* Set up vma pointers. */ - vm->vma[gmmu_page_size_small] = &vm->user; - vm->vma[gmmu_page_size_big] = &vm->user; - vm->vma[gmmu_page_size_kernel] = &vm->kernel; + vm->vma[GMMU_PAGE_SIZE_SMALL] = &vm->user; + vm->vma[GMMU_PAGE_SIZE_BIG] = &vm->user; + vm->vma[GMMU_PAGE_SIZE_KERNEL] = &vm->kernel; if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) { - vm->vma[gmmu_page_size_big] = &vm->user_lp; + vm->vma[GMMU_PAGE_SIZE_BIG] = &vm->user_lp; } vm->va_start = low_hole; vm->va_limit = aperture_size; - vm->big_page_size = vm->gmmu_page_sizes[gmmu_page_size_big]; + vm->big_page_size = vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG]; vm->userspace_managed = userspace_managed; vm->mmu_levels = g->ops.mm.get_mmu_levels(g, vm->big_page_size); @@ -876,7 +874,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, align = nvgpu_sgt_alignment(g, sgt); if (g->mm.disable_bigpage) { - binfo.pgsz_idx = gmmu_page_size_small; + binfo.pgsz_idx = GMMU_PAGE_SIZE_SMALL; } else { binfo.pgsz_idx = __get_pte_size(vm, map_addr, min_t(u64, binfo.size, align)); diff --git a/drivers/gpu/nvgpu/common/mm/vm_area.c b/drivers/gpu/nvgpu/common/mm/vm_area.c index b8fecbfc..7e2b5c34 100644 --- a/drivers/gpu/nvgpu/common/mm/vm_area.c +++ b/drivers/gpu/nvgpu/common/mm/vm_area.c @@ -43,7 +43,7 @@ struct nvgpu_vm_area *nvgpu_vm_area_find(struct vm_gk20a *vm, u64 addr) } int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm, - u64 map_addr, u64 map_size, int pgsz_idx, + u64 map_addr, u64 map_size, u32 pgsz_idx, struct nvgpu_vm_area **pvm_area) { struct gk20a *g = vm->mm->g; @@ -99,19 +99,19 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size, struct nvgpu_allocator *vma; struct nvgpu_vm_area *vm_area; u64 vaddr_start = 0; - int pgsz_idx = gmmu_page_size_small; + u32 pgsz_idx = GMMU_PAGE_SIZE_SMALL; nvgpu_log(g, gpu_dbg_map, "ADD vm_area: pgsz=%#-8x pages=%-9u addr=%#-14llx flags=0x%x", page_size, pages, *addr, flags); - for (; pgsz_idx < gmmu_nr_page_sizes; pgsz_idx++) { + for (; pgsz_idx < GMMU_NR_PAGE_SIZES; pgsz_idx++) { if (vm->gmmu_page_sizes[pgsz_idx] == page_size) { break; } } - if (pgsz_idx > gmmu_page_size_big) { + if (pgsz_idx > GMMU_PAGE_SIZE_BIG) { return -EINVAL; } @@ -122,7 +122,7 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size, */ nvgpu_speculation_barrier(); - if (!vm->big_pages && pgsz_idx == gmmu_page_size_big) { + if (!vm->big_pages && pgsz_idx == GMMU_PAGE_SIZE_BIG) { return -EINVAL; } -- cgit v1.2.2