summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2018-09-05 19:30:08 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-12 20:48:28 -0400
commit7405f69ae2cb60a90885064533d8c9e95dd2de4d (patch)
tree24b474ad5cc8750537b90270134b05c66e32bb7c
parent2c95becc9edf5e9ebfa392c4b6c3fbd0b9580f8d (diff)
gpu: nvgpu: Fix MISRA 21.2 violations (pd_cache.c)
MISRA 21.2 states that we may not use reserved identifiers; since all identifiers beginning with '_' are reserved by libc, the usage of '__' as a prefix is disallowed. Fixes for all the pd_cache functions that use '__' prefixes. This was trivial: the '__' prefix was simply deleted. JIRA NVGPU-1029 Change-Id: Ia91dabe3ef97fb17a2a85105935fb3a72d7c2c5e Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1813643 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/mm/gmmu.c4
-rw-r--r--drivers/gpu/nvgpu/common/mm/pd_cache.c20
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c6
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/gmmu.h8
4 files changed, 19 insertions, 19 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c
index dbcc8ac2..e21ffd8d 100644
--- a/drivers/gpu/nvgpu/common/mm/gmmu.c
+++ b/drivers/gpu/nvgpu/common/mm/gmmu.c
@@ -200,7 +200,7 @@ int nvgpu_gmmu_init_page_table(struct vm_gk20a *vm)
200 */ 200 */
201 pdb_size = ALIGN(pd_size(&vm->mmu_levels[0], &attrs), PAGE_SIZE); 201 pdb_size = ALIGN(pd_size(&vm->mmu_levels[0], &attrs), PAGE_SIZE);
202 202
203 err = __nvgpu_pd_cache_alloc_direct(vm->mm->g, &vm->pdb, pdb_size); 203 err = nvgpu_pd_cache_alloc_direct(vm->mm->g, &vm->pdb, pdb_size);
204 if (WARN_ON(err)) { 204 if (WARN_ON(err)) {
205 return err; 205 return err;
206 } 206 }
@@ -277,7 +277,7 @@ static int pd_allocate(struct vm_gk20a *vm,
277 return 0; 277 return 0;
278 } 278 }
279 279
280 err = __nvgpu_pd_alloc(vm, pd, pd_size(l, attrs)); 280 err = nvgpu_pd_alloc(vm, pd, pd_size(l, attrs));
281 if (err) { 281 if (err) {
282 nvgpu_info(vm->mm->g, "error allocating page directory!"); 282 nvgpu_info(vm->mm->g, "error allocating page directory!");
283 return err; 283 return err;
diff --git a/drivers/gpu/nvgpu/common/mm/pd_cache.c b/drivers/gpu/nvgpu/common/mm/pd_cache.c
index 77e20c38..dae6d34e 100644
--- a/drivers/gpu/nvgpu/common/mm/pd_cache.c
+++ b/drivers/gpu/nvgpu/common/mm/pd_cache.c
@@ -64,10 +64,10 @@
64 * lists. For a 4Kb page NVGPU_PD_CACHE_COUNT is 4. This is enough space for 64 * lists. For a 4Kb page NVGPU_PD_CACHE_COUNT is 4. This is enough space for
65 * 256, 512, 1024, and 2048 byte PDs. 65 * 256, 512, 1024, and 2048 byte PDs.
66 * 66 *
67 * __nvgpu_pd_alloc() will allocate a PD for the GMMU. It will check if the PD 67 * nvgpu_pd_alloc() will allocate a PD for the GMMU. It will check if the PD
68 * size is page size or larger and choose the correct allocation scheme - either 68 * size is page size or larger and choose the correct allocation scheme - either
69 * from the PD cache or directly. Similarly __nvgpu_pd_free() will free a PD 69 * from the PD cache or directly. Similarly nvgpu_pd_free() will free a PD
70 * allocated by __nvgpu_pd_alloc(). 70 * allocated by nvgpu_pd_alloc().
71 * 71 *
72 * Since the top level PD (the PDB) is a page aligned pointer but less than a 72 * Since the top level PD (the PDB) is a page aligned pointer but less than a
73 * page size the direct functions must be used for allocating PDBs. Otherwise 73 * page size the direct functions must be used for allocating PDBs. Otherwise
@@ -150,8 +150,8 @@ void nvgpu_pd_cache_fini(struct gk20a *g)
150 * Note: this does not need the cache lock since it does not modify any of the 150 * Note: this does not need the cache lock since it does not modify any of the
151 * PD cache data structures. 151 * PD cache data structures.
152 */ 152 */
153int __nvgpu_pd_cache_alloc_direct(struct gk20a *g, 153int nvgpu_pd_cache_alloc_direct(struct gk20a *g,
154 struct nvgpu_gmmu_pd *pd, u32 bytes) 154 struct nvgpu_gmmu_pd *pd, u32 bytes)
155{ 155{
156 int err; 156 int err;
157 unsigned long flags = 0; 157 unsigned long flags = 0;
@@ -339,7 +339,7 @@ static int nvgpu_pd_cache_alloc(struct gk20a *g, struct nvgpu_pd_cache *cache,
339 * cache logistics. Since on Parker and later GPUs some of the page directories 339 * cache logistics. Since on Parker and later GPUs some of the page directories
340 * are smaller than a page packing these PDs together saves a lot of memory. 340 * are smaller than a page packing these PDs together saves a lot of memory.
341 */ 341 */
342int __nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes) 342int nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes)
343{ 343{
344 struct gk20a *g = gk20a_from_vm(vm); 344 struct gk20a *g = gk20a_from_vm(vm);
345 int err; 345 int err;
@@ -349,7 +349,7 @@ int __nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes)
349 * alloc. 349 * alloc.
350 */ 350 */
351 if (bytes >= PAGE_SIZE) { 351 if (bytes >= PAGE_SIZE) {
352 err = __nvgpu_pd_cache_alloc_direct(g, pd, bytes); 352 err = nvgpu_pd_cache_alloc_direct(g, pd, bytes);
353 if (err) { 353 if (err) {
354 return err; 354 return err;
355 } 355 }
@@ -368,7 +368,7 @@ int __nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes)
368 return err; 368 return err;
369} 369}
370 370
371void __nvgpu_pd_cache_free_direct(struct gk20a *g, struct nvgpu_gmmu_pd *pd) 371void nvgpu_pd_cache_free_direct(struct gk20a *g, struct nvgpu_gmmu_pd *pd)
372{ 372{
373 pd_dbg(g, "PD-Free [D] 0x%p", pd->mem); 373 pd_dbg(g, "PD-Free [D] 0x%p", pd->mem);
374 374
@@ -448,7 +448,7 @@ static void nvgpu_pd_cache_free(struct gk20a *g, struct nvgpu_pd_cache *cache,
448 nvgpu_pd_cache_do_free(g, cache, pentry, pd); 448 nvgpu_pd_cache_do_free(g, cache, pentry, pd);
449} 449}
450 450
451void __nvgpu_pd_free(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd) 451void nvgpu_pd_free(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd)
452{ 452{
453 struct gk20a *g = gk20a_from_vm(vm); 453 struct gk20a *g = gk20a_from_vm(vm);
454 454
@@ -456,7 +456,7 @@ void __nvgpu_pd_free(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd)
456 * Simple case: just DMA free. 456 * Simple case: just DMA free.
457 */ 457 */
458 if (!pd->cached) { 458 if (!pd->cached) {
459 return __nvgpu_pd_cache_free_direct(g, pd); 459 return nvgpu_pd_cache_free_direct(g, pd);
460 } 460 }
461 461
462 nvgpu_mutex_acquire(&g->mm.pd_cache->lock); 462 nvgpu_mutex_acquire(&g->mm.pd_cache->lock);
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index 27667f34..17e49969 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -90,7 +90,7 @@ static void __nvgpu_vm_free_entries(struct vm_gk20a *vm,
90 int i; 90 int i;
91 91
92 if (pd->mem) { 92 if (pd->mem) {
93 __nvgpu_pd_free(vm, pd); 93 nvgpu_pd_free(vm, pd);
94 pd->mem = NULL; 94 pd->mem = NULL;
95 } 95 }
96 96
@@ -110,7 +110,7 @@ static void nvgpu_vm_free_entries(struct vm_gk20a *vm,
110 struct gk20a *g = vm->mm->g; 110 struct gk20a *g = vm->mm->g;
111 int i; 111 int i;
112 112
113 __nvgpu_pd_cache_free_direct(g, pdb); 113 nvgpu_pd_cache_free_direct(g, pdb);
114 114
115 if (!pdb->entries) { 115 if (!pdb->entries) {
116 return; 116 return;
@@ -522,7 +522,7 @@ clean_up_allocators:
522 } 522 }
523clean_up_page_tables: 523clean_up_page_tables:
524 /* Cleans up nvgpu_gmmu_init_page_table() */ 524 /* Cleans up nvgpu_gmmu_init_page_table() */
525 __nvgpu_pd_cache_free_direct(g, &vm->pdb); 525 nvgpu_pd_cache_free_direct(g, &vm->pdb);
526clean_up_vgpu_vm: 526clean_up_vgpu_vm:
527#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION 527#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
528 if (g->is_virtual) 528 if (g->is_virtual)
diff --git a/drivers/gpu/nvgpu/include/nvgpu/gmmu.h b/drivers/gpu/nvgpu/include/nvgpu/gmmu.h
index da27e29c..aa110f18 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/gmmu.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/gmmu.h
@@ -251,11 +251,11 @@ void nvgpu_gmmu_unmap(struct vm_gk20a *vm,
251 struct nvgpu_mem *mem, 251 struct nvgpu_mem *mem,
252 u64 gpu_va); 252 u64 gpu_va);
253 253
254int __nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes); 254int nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes);
255void __nvgpu_pd_free(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd); 255void nvgpu_pd_free(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd);
256int __nvgpu_pd_cache_alloc_direct(struct gk20a *g, 256int nvgpu_pd_cache_alloc_direct(struct gk20a *g,
257 struct nvgpu_gmmu_pd *pd, u32 bytes); 257 struct nvgpu_gmmu_pd *pd, u32 bytes);
258void __nvgpu_pd_cache_free_direct(struct gk20a *g, struct nvgpu_gmmu_pd *pd); 258void nvgpu_pd_cache_free_direct(struct gk20a *g, struct nvgpu_gmmu_pd *pd);
259int nvgpu_pd_cache_init(struct gk20a *g); 259int nvgpu_pd_cache_init(struct gk20a *g);
260void nvgpu_pd_cache_fini(struct gk20a *g); 260void nvgpu_pd_cache_fini(struct gk20a *g);
261 261