summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/mm.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2018-09-05 19:09:43 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-12 20:48:24 -0400
commit2c95becc9edf5e9ebfa392c4b6c3fbd0b9580f8d (patch)
treeacacafd7aef3db7b98245f144817895eb8b0ff09 /drivers/gpu/nvgpu/common/mm/mm.c
parentba2a632f039af2d13db9a0e4df9e34206116aef0 (diff)
gpu: nvgpu: Fix MISRA 21.2 violations (nvgpu_mem.c, mm.c)
MISRA 21.2 states that we may not use reserved identifiers; since all identifiers beginning with '_' are reserved by libc, the usage of '__' as a prefix is disallowed. Handle the 21.2 fixes for nvgpu_mem.c and mm.c; this deletes the '__' prefixes and slightly renames the __nvgpu_aperture_mask() function since there's a coherent version and a general version. Change-Id: Iee871ad90db3f2622f9099bd9992eb994e0fbf34 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1813623 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/mm.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/mm.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/mm.c b/drivers/gpu/nvgpu/common/mm/mm.c
index 988b1e5c..f97d9ebd 100644
--- a/drivers/gpu/nvgpu/common/mm/mm.c
+++ b/drivers/gpu/nvgpu/common/mm/mm.c
@@ -36,8 +36,8 @@
36 * Attempt to find a reserved memory area to determine PTE size for the passed 36 * Attempt to find a reserved memory area to determine PTE size for the passed
37 * mapping. If no reserved area can be found use small pages. 37 * mapping. If no reserved area can be found use small pages.
38 */ 38 */
39u32 __get_pte_size_fixed_map(struct vm_gk20a *vm, 39static u32 nvgpu_vm_get_pte_size_fixed_map(struct vm_gk20a *vm,
40 u64 base, u64 size) 40 u64 base, u64 size)
41{ 41{
42 struct nvgpu_vm_area *vm_area; 42 struct nvgpu_vm_area *vm_area;
43 43
@@ -52,8 +52,8 @@ u32 __get_pte_size_fixed_map(struct vm_gk20a *vm,
52/* 52/*
53 * This is for when the address space does not support unified address spaces. 53 * This is for when the address space does not support unified address spaces.
54 */ 54 */
55static u32 __get_pte_size_split_addr(struct vm_gk20a *vm, 55static u32 nvgpu_vm_get_pte_size_split_addr(struct vm_gk20a *vm,
56 u64 base, u64 size) 56 u64 base, u64 size)
57{ 57{
58 if (!base) { 58 if (!base) {
59 if (size >= vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG]) { 59 if (size >= vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG]) {
@@ -61,7 +61,7 @@ static u32 __get_pte_size_split_addr(struct vm_gk20a *vm,
61 } 61 }
62 return GMMU_PAGE_SIZE_SMALL; 62 return GMMU_PAGE_SIZE_SMALL;
63 } else { 63 } else {
64 if (base < __nv_gmmu_va_small_page_limit()) { 64 if (base < nvgpu_gmmu_va_small_page_limit()) {
65 return GMMU_PAGE_SIZE_SMALL; 65 return GMMU_PAGE_SIZE_SMALL;
66 } else { 66 } else {
67 return GMMU_PAGE_SIZE_BIG; 67 return GMMU_PAGE_SIZE_BIG;
@@ -90,7 +90,7 @@ static u32 __get_pte_size_split_addr(struct vm_gk20a *vm,
90 * - Regardless of buffer size use small pages since we have no 90 * - Regardless of buffer size use small pages since we have no
91 * - guarantee of contiguity. 91 * - guarantee of contiguity.
92 */ 92 */
93u32 __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size) 93u32 nvgpu_vm_get_pte_size(struct vm_gk20a *vm, u64 base, u64 size)
94{ 94{
95 struct gk20a *g = gk20a_from_vm(vm); 95 struct gk20a *g = gk20a_from_vm(vm);
96 96
@@ -99,11 +99,11 @@ u32 __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size)
99 } 99 }
100 100
101 if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) { 101 if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) {
102 return __get_pte_size_split_addr(vm, base, size); 102 return nvgpu_vm_get_pte_size_split_addr(vm, base, size);
103 } 103 }
104 104
105 if (base) { 105 if (base) {
106 return __get_pte_size_fixed_map(vm, base, size); 106 return nvgpu_vm_get_pte_size_fixed_map(vm, base, size);
107 } 107 }
108 108
109 if (size >= vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG] && 109 if (size >= vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG] &&