summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/vm.c
diff options
context:
space:
mode:
authorAmulya <Amurthyreddy@nvidia.com>2018-08-09 01:10:08 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-22 20:31:33 -0400
commit1c13da1d29c344cb60953eabeca56b601446c64a (patch)
tree145a1a133b2d85592e0ddd1a25b12fc48e879829 /drivers/gpu/nvgpu/common/mm/vm.c
parentf3c3e4dece89c5e2f77fbfaf3cacd877ba62406c (diff)
gpu: nvgpu: Changed enum gmmu_pgsz_gk20a into macros
Changed the enum gmmu_pgsz_gk20a into macros and changed all the instances of it. The enum gmmu_pgsz_gk20a was being used in for loops, where it was compared with an integer. This violates MISRA rule 10.4, which only allows arithmetic operations on operands of the same essential type category. Changing this enum into macro will fix this violation. JIRA NVGPU-993 Change-Id: I6f18b08bc7548093d99e8229378415bcdec749e3 Signed-off-by: Amulya <Amurthyreddy@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1795593 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c32
1 files changed, 15 insertions, 17 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index 7d97b7b7..bd6c1e87 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -41,7 +41,7 @@
41 41
42struct nvgpu_ctag_buffer_info { 42struct nvgpu_ctag_buffer_info {
43 u64 size; 43 u64 size;
44 enum gmmu_pgsz_gk20a pgsz_idx; 44 u32 pgsz_idx;
45 u32 flags; 45 u32 flags;
46 46
47 s16 compr_kind; 47 s16 compr_kind;
@@ -123,8 +123,7 @@ static void nvgpu_vm_free_entries(struct vm_gk20a *vm,
123 pdb->entries = NULL; 123 pdb->entries = NULL;
124} 124}
125 125
126u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, 126u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx)
127 enum gmmu_pgsz_gk20a pgsz_idx)
128 127
129{ 128{
130 struct gk20a *g = vm->mm->g; 129 struct gk20a *g = vm->mm->g;
@@ -139,12 +138,12 @@ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size,
139 return 0; 138 return 0;
140 } 139 }
141 140
142 if (pgsz_idx >= gmmu_nr_page_sizes) { 141 if (pgsz_idx >= GMMU_NR_PAGE_SIZES) {
143 nvgpu_err(g, "(%s) invalid page size requested", vma->name); 142 nvgpu_err(g, "(%s) invalid page size requested", vma->name);
144 return 0; 143 return 0;
145 } 144 }
146 145
147 if ((pgsz_idx == gmmu_page_size_big) && !vm->big_pages) { 146 if ((pgsz_idx == GMMU_PAGE_SIZE_BIG) && !vm->big_pages) {
148 nvgpu_err(g, "(%s) unsupportd page size requested", vma->name); 147 nvgpu_err(g, "(%s) unsupportd page size requested", vma->name);
149 return 0; 148 return 0;
150 } 149 }
@@ -161,8 +160,7 @@ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size,
161 return addr; 160 return addr;
162} 161}
163 162
164int __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, 163int __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, u32 pgsz_idx)
165 enum gmmu_pgsz_gk20a pgsz_idx)
166{ 164{
167 struct nvgpu_allocator *vma = vm->vma[pgsz_idx]; 165 struct nvgpu_allocator *vma = vm->vma[pgsz_idx];
168 166
@@ -264,7 +262,7 @@ static int nvgpu_init_sema_pool(struct vm_gk20a *vm)
264 err = nvgpu_semaphore_pool_map(vm->sema_pool, vm); 262 err = nvgpu_semaphore_pool_map(vm->sema_pool, vm);
265 if (err) { 263 if (err) {
266 nvgpu_semaphore_pool_unmap(vm->sema_pool, vm); 264 nvgpu_semaphore_pool_unmap(vm->sema_pool, vm);
267 nvgpu_free(vm->vma[gmmu_page_size_small], 265 nvgpu_free(vm->vma[GMMU_PAGE_SIZE_SMALL],
268 vm->sema_pool->gpu_va); 266 vm->sema_pool->gpu_va);
269 return err; 267 return err;
270 } 268 }
@@ -308,22 +306,22 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
308 306
309 vm->mm = mm; 307 vm->mm = mm;
310 308
311 vm->gmmu_page_sizes[gmmu_page_size_small] = SZ_4K; 309 vm->gmmu_page_sizes[GMMU_PAGE_SIZE_SMALL] = SZ_4K;
312 vm->gmmu_page_sizes[gmmu_page_size_big] = big_page_size; 310 vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG] = big_page_size;
313 vm->gmmu_page_sizes[gmmu_page_size_kernel] = SZ_4K; 311 vm->gmmu_page_sizes[GMMU_PAGE_SIZE_KERNEL] = SZ_4K;
314 312
315 /* Set up vma pointers. */ 313 /* Set up vma pointers. */
316 vm->vma[gmmu_page_size_small] = &vm->user; 314 vm->vma[GMMU_PAGE_SIZE_SMALL] = &vm->user;
317 vm->vma[gmmu_page_size_big] = &vm->user; 315 vm->vma[GMMU_PAGE_SIZE_BIG] = &vm->user;
318 vm->vma[gmmu_page_size_kernel] = &vm->kernel; 316 vm->vma[GMMU_PAGE_SIZE_KERNEL] = &vm->kernel;
319 if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) { 317 if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) {
320 vm->vma[gmmu_page_size_big] = &vm->user_lp; 318 vm->vma[GMMU_PAGE_SIZE_BIG] = &vm->user_lp;
321 } 319 }
322 320
323 vm->va_start = low_hole; 321 vm->va_start = low_hole;
324 vm->va_limit = aperture_size; 322 vm->va_limit = aperture_size;
325 323
326 vm->big_page_size = vm->gmmu_page_sizes[gmmu_page_size_big]; 324 vm->big_page_size = vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG];
327 vm->userspace_managed = userspace_managed; 325 vm->userspace_managed = userspace_managed;
328 vm->mmu_levels = g->ops.mm.get_mmu_levels(g, vm->big_page_size); 326 vm->mmu_levels = g->ops.mm.get_mmu_levels(g, vm->big_page_size);
329 327
@@ -876,7 +874,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
876 874
877 align = nvgpu_sgt_alignment(g, sgt); 875 align = nvgpu_sgt_alignment(g, sgt);
878 if (g->mm.disable_bigpage) { 876 if (g->mm.disable_bigpage) {
879 binfo.pgsz_idx = gmmu_page_size_small; 877 binfo.pgsz_idx = GMMU_PAGE_SIZE_SMALL;
880 } else { 878 } else {
881 binfo.pgsz_idx = __get_pte_size(vm, map_addr, 879 binfo.pgsz_idx = __get_pte_size(vm, map_addr,
882 min_t(u64, binfo.size, align)); 880 min_t(u64, binfo.size, align));