From 7405f69ae2cb60a90885064533d8c9e95dd2de4d Mon Sep 17 00:00:00 2001 From: Alex Waterman Date: Wed, 5 Sep 2018 16:30:08 -0700 Subject: gpu: nvgpu: Fix MISRA 21.2 violations (pd_cache.c) MISRA 21.2 states that we may not use reserved identifiers; since all identifiers beginning with '_' are reserved by libc, the usage of '__' as a prefix is disallowed. Fixes for all the pd_cache functions that use '__' prefixes. This was trivial: the '__' prefix was simply deleted. JIRA NVGPU-1029 Change-Id: Ia91dabe3ef97fb17a2a85105935fb3a72d7c2c5e Signed-off-by: Alex Waterman Reviewed-on: https://git-master.nvidia.com/r/1813643 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: Terje Bergstrom GVS: Gerrit_Virtual_Submit Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/common/mm/gmmu.c | 4 ++-- drivers/gpu/nvgpu/common/mm/pd_cache.c | 20 ++++++++++---------- drivers/gpu/nvgpu/common/mm/vm.c | 6 +++--- drivers/gpu/nvgpu/include/nvgpu/gmmu.h | 8 ++++---- 4 files changed, 19 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c index dbcc8ac2..e21ffd8d 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu.c @@ -200,7 +200,7 @@ int nvgpu_gmmu_init_page_table(struct vm_gk20a *vm) */ pdb_size = ALIGN(pd_size(&vm->mmu_levels[0], &attrs), PAGE_SIZE); - err = __nvgpu_pd_cache_alloc_direct(vm->mm->g, &vm->pdb, pdb_size); + err = nvgpu_pd_cache_alloc_direct(vm->mm->g, &vm->pdb, pdb_size); if (WARN_ON(err)) { return err; } @@ -277,7 +277,7 @@ static int pd_allocate(struct vm_gk20a *vm, return 0; } - err = __nvgpu_pd_alloc(vm, pd, pd_size(l, attrs)); + err = nvgpu_pd_alloc(vm, pd, pd_size(l, attrs)); if (err) { nvgpu_info(vm->mm->g, "error allocating page directory!"); return err; diff --git a/drivers/gpu/nvgpu/common/mm/pd_cache.c b/drivers/gpu/nvgpu/common/mm/pd_cache.c index 77e20c38..dae6d34e 100644 --- a/drivers/gpu/nvgpu/common/mm/pd_cache.c +++ b/drivers/gpu/nvgpu/common/mm/pd_cache.c @@ -64,10 +64,10 @@ * lists. For a 4Kb page NVGPU_PD_CACHE_COUNT is 4. This is enough space for * 256, 512, 1024, and 2048 byte PDs. * - * __nvgpu_pd_alloc() will allocate a PD for the GMMU. It will check if the PD + * nvgpu_pd_alloc() will allocate a PD for the GMMU. It will check if the PD * size is page size or larger and choose the correct allocation scheme - either - * from the PD cache or directly. Similarly __nvgpu_pd_free() will free a PD - * allocated by __nvgpu_pd_alloc(). + * from the PD cache or directly. Similarly nvgpu_pd_free() will free a PD + * allocated by nvgpu_pd_alloc(). * * Since the top level PD (the PDB) is a page aligned pointer but less than a * page size the direct functions must be used for allocating PDBs. Otherwise @@ -150,8 +150,8 @@ void nvgpu_pd_cache_fini(struct gk20a *g) * Note: this does not need the cache lock since it does not modify any of the * PD cache data structures. */ -int __nvgpu_pd_cache_alloc_direct(struct gk20a *g, - struct nvgpu_gmmu_pd *pd, u32 bytes) +int nvgpu_pd_cache_alloc_direct(struct gk20a *g, + struct nvgpu_gmmu_pd *pd, u32 bytes) { int err; unsigned long flags = 0; @@ -339,7 +339,7 @@ static int nvgpu_pd_cache_alloc(struct gk20a *g, struct nvgpu_pd_cache *cache, * cache logistics. Since on Parker and later GPUs some of the page directories * are smaller than a page packing these PDs together saves a lot of memory. */ -int __nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes) +int nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes) { struct gk20a *g = gk20a_from_vm(vm); int err; @@ -349,7 +349,7 @@ int __nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes) * alloc. */ if (bytes >= PAGE_SIZE) { - err = __nvgpu_pd_cache_alloc_direct(g, pd, bytes); + err = nvgpu_pd_cache_alloc_direct(g, pd, bytes); if (err) { return err; } @@ -368,7 +368,7 @@ int __nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes) return err; } -void __nvgpu_pd_cache_free_direct(struct gk20a *g, struct nvgpu_gmmu_pd *pd) +void nvgpu_pd_cache_free_direct(struct gk20a *g, struct nvgpu_gmmu_pd *pd) { pd_dbg(g, "PD-Free [D] 0x%p", pd->mem); @@ -448,7 +448,7 @@ static void nvgpu_pd_cache_free(struct gk20a *g, struct nvgpu_pd_cache *cache, nvgpu_pd_cache_do_free(g, cache, pentry, pd); } -void __nvgpu_pd_free(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd) +void nvgpu_pd_free(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd) { struct gk20a *g = gk20a_from_vm(vm); @@ -456,7 +456,7 @@ void __nvgpu_pd_free(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd) * Simple case: just DMA free. */ if (!pd->cached) { - return __nvgpu_pd_cache_free_direct(g, pd); + return nvgpu_pd_cache_free_direct(g, pd); } nvgpu_mutex_acquire(&g->mm.pd_cache->lock); diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index 27667f34..17e49969 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c @@ -90,7 +90,7 @@ static void __nvgpu_vm_free_entries(struct vm_gk20a *vm, int i; if (pd->mem) { - __nvgpu_pd_free(vm, pd); + nvgpu_pd_free(vm, pd); pd->mem = NULL; } @@ -110,7 +110,7 @@ static void nvgpu_vm_free_entries(struct vm_gk20a *vm, struct gk20a *g = vm->mm->g; int i; - __nvgpu_pd_cache_free_direct(g, pdb); + nvgpu_pd_cache_free_direct(g, pdb); if (!pdb->entries) { return; @@ -522,7 +522,7 @@ clean_up_allocators: } clean_up_page_tables: /* Cleans up nvgpu_gmmu_init_page_table() */ - __nvgpu_pd_cache_free_direct(g, &vm->pdb); + nvgpu_pd_cache_free_direct(g, &vm->pdb); clean_up_vgpu_vm: #ifdef CONFIG_TEGRA_GR_VIRTUALIZATION if (g->is_virtual) diff --git a/drivers/gpu/nvgpu/include/nvgpu/gmmu.h b/drivers/gpu/nvgpu/include/nvgpu/gmmu.h index da27e29c..aa110f18 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/gmmu.h +++ b/drivers/gpu/nvgpu/include/nvgpu/gmmu.h @@ -251,11 +251,11 @@ void nvgpu_gmmu_unmap(struct vm_gk20a *vm, struct nvgpu_mem *mem, u64 gpu_va); -int __nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes); -void __nvgpu_pd_free(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd); -int __nvgpu_pd_cache_alloc_direct(struct gk20a *g, +int nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes); +void nvgpu_pd_free(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd); +int nvgpu_pd_cache_alloc_direct(struct gk20a *g, struct nvgpu_gmmu_pd *pd, u32 bytes); -void __nvgpu_pd_cache_free_direct(struct gk20a *g, struct nvgpu_gmmu_pd *pd); +void nvgpu_pd_cache_free_direct(struct gk20a *g, struct nvgpu_gmmu_pd *pd); int nvgpu_pd_cache_init(struct gk20a *g); void nvgpu_pd_cache_fini(struct gk20a *g); -- cgit v1.2.2