From 2c95becc9edf5e9ebfa392c4b6c3fbd0b9580f8d Mon Sep 17 00:00:00 2001 From: Alex Waterman Date: Wed, 5 Sep 2018 16:09:43 -0700 Subject: gpu: nvgpu: Fix MISRA 21.2 violations (nvgpu_mem.c, mm.c) MISRA 21.2 states that we may not use reserved identifiers; since all identifiers beginning with '_' are reserved by libc, the usage of '__' as a prefix is disallowed. Handle the 21.2 fixes for nvgpu_mem.c and mm.c; this deletes the '__' prefixes and slightly renames the __nvgpu_aperture_mask() function since there's a coherent version and a general version. Change-Id: Iee871ad90db3f2622f9099bd9992eb994e0fbf34 Signed-off-by: Alex Waterman Reviewed-on: https://git-master.nvidia.com/r/1813623 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/common/mm/mm.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/nvgpu/common/mm/mm.c') diff --git a/drivers/gpu/nvgpu/common/mm/mm.c b/drivers/gpu/nvgpu/common/mm/mm.c index 988b1e5c..f97d9ebd 100644 --- a/drivers/gpu/nvgpu/common/mm/mm.c +++ b/drivers/gpu/nvgpu/common/mm/mm.c @@ -36,8 +36,8 @@ * Attempt to find a reserved memory area to determine PTE size for the passed * mapping. If no reserved area can be found use small pages. */ -u32 __get_pte_size_fixed_map(struct vm_gk20a *vm, - u64 base, u64 size) +static u32 nvgpu_vm_get_pte_size_fixed_map(struct vm_gk20a *vm, + u64 base, u64 size) { struct nvgpu_vm_area *vm_area; @@ -52,8 +52,8 @@ u32 __get_pte_size_fixed_map(struct vm_gk20a *vm, /* * This is for when the address space does not support unified address spaces. */ -static u32 __get_pte_size_split_addr(struct vm_gk20a *vm, - u64 base, u64 size) +static u32 nvgpu_vm_get_pte_size_split_addr(struct vm_gk20a *vm, + u64 base, u64 size) { if (!base) { if (size >= vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG]) { @@ -61,7 +61,7 @@ static u32 __get_pte_size_split_addr(struct vm_gk20a *vm, } return GMMU_PAGE_SIZE_SMALL; } else { - if (base < __nv_gmmu_va_small_page_limit()) { + if (base < nvgpu_gmmu_va_small_page_limit()) { return GMMU_PAGE_SIZE_SMALL; } else { return GMMU_PAGE_SIZE_BIG; @@ -90,7 +90,7 @@ static u32 __get_pte_size_split_addr(struct vm_gk20a *vm, * - Regardless of buffer size use small pages since we have no * - guarantee of contiguity. */ -u32 __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size) +u32 nvgpu_vm_get_pte_size(struct vm_gk20a *vm, u64 base, u64 size) { struct gk20a *g = gk20a_from_vm(vm); @@ -99,11 +99,11 @@ u32 __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size) } if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) { - return __get_pte_size_split_addr(vm, base, size); + return nvgpu_vm_get_pte_size_split_addr(vm, base, size); } if (base) { - return __get_pte_size_fixed_map(vm, base, size); + return nvgpu_vm_get_pte_size_fixed_map(vm, base, size); } if (size >= vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG] && -- cgit v1.2.2