summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAmulya <Amurthyreddy@nvidia.com>2018-08-09 01:10:08 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-22 20:31:33 -0400
commit1c13da1d29c344cb60953eabeca56b601446c64a (patch)
tree145a1a133b2d85592e0ddd1a25b12fc48e879829
parentf3c3e4dece89c5e2f77fbfaf3cacd877ba62406c (diff)
gpu: nvgpu: Changed enum gmmu_pgsz_gk20a into macros
Changed the enum gmmu_pgsz_gk20a into macros and changed all the instances of it. The enum gmmu_pgsz_gk20a was being used in for loops, where it was compared with an integer. This violates MISRA rule 10.4, which only allows arithmetic operations on operands of the same essential type category. Changing this enum into macro will fix this violation. JIRA NVGPU-993 Change-Id: I6f18b08bc7548093d99e8229378415bcdec749e3 Signed-off-by: Amulya <Amurthyreddy@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1795593 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/mm/buddy_allocator.c12
-rw-r--r--drivers/gpu/nvgpu/common/mm/gmmu.c14
-rw-r--r--drivers/gpu/nvgpu/common/mm/mm.c26
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c32
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm_area.c10
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h4
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c18
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h14
-rw-r--r--drivers/gpu/nvgpu/gp10b/mm_gp10b.c19
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/gmmu.h15
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/mm.h4
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/vm.h8
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/vm_area.h2
-rw-r--r--drivers/gpu/nvgpu/os/linux/ioctl_as.c2
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c12
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.h2
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.c30
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c10
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c6
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.c2
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.h2
22 files changed, 118 insertions, 128 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
index 365f3b7b..f8c97839 100644
--- a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
@@ -142,7 +142,7 @@ static void __balloc_buddy_list_add(struct nvgpu_buddy_allocator *a,
142 * without cycling through the entire list. 142 * without cycling through the entire list.
143 */ 143 */
144 if (a->flags & GPU_ALLOC_GVA_SPACE && 144 if (a->flags & GPU_ALLOC_GVA_SPACE &&
145 b->pte_size == gmmu_page_size_big) { 145 b->pte_size == GMMU_PAGE_SIZE_BIG) {
146 nvgpu_list_add_tail(&b->buddy_entry, list); 146 nvgpu_list_add_tail(&b->buddy_entry, list);
147 } else { 147 } else {
148 nvgpu_list_add(&b->buddy_entry, list); 148 nvgpu_list_add(&b->buddy_entry, list);
@@ -487,7 +487,7 @@ static struct nvgpu_buddy *__balloc_find_buddy(struct nvgpu_buddy_allocator *a,
487 } 487 }
488 488
489 if (a->flags & GPU_ALLOC_GVA_SPACE && 489 if (a->flags & GPU_ALLOC_GVA_SPACE &&
490 pte_size == gmmu_page_size_big) { 490 pte_size == GMMU_PAGE_SIZE_BIG) {
491 bud = nvgpu_list_last_entry(balloc_get_order_list(a, order), 491 bud = nvgpu_list_last_entry(balloc_get_order_list(a, order),
492 nvgpu_buddy, buddy_entry); 492 nvgpu_buddy, buddy_entry);
493 } else { 493 } else {
@@ -844,8 +844,8 @@ static u64 nvgpu_buddy_balloc(struct nvgpu_allocator *__a, u64 len)
844 alloc_dbg(balloc_owner(a), 844 alloc_dbg(balloc_owner(a),
845 "Alloc 0x%-10llx %3lld:0x%-10llx pte_size=%s", 845 "Alloc 0x%-10llx %3lld:0x%-10llx pte_size=%s",
846 addr, order, len, 846 addr, order, len,
847 pte_size == gmmu_page_size_big ? "big" : 847 pte_size == GMMU_PAGE_SIZE_BIG ? "big" :
848 pte_size == gmmu_page_size_small ? "small" : 848 pte_size == GMMU_PAGE_SIZE_SMALL ? "small" :
849 "NA/any"); 849 "NA/any");
850 } else { 850 } else {
851 alloc_dbg(balloc_owner(a), "Alloc failed: no mem!"); 851 alloc_dbg(balloc_owner(a), "Alloc failed: no mem!");
@@ -882,9 +882,9 @@ static u64 __nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *__a,
882 /* Check that the page size is valid. */ 882 /* Check that the page size is valid. */
883 if (a->flags & GPU_ALLOC_GVA_SPACE && a->vm->big_pages) { 883 if (a->flags & GPU_ALLOC_GVA_SPACE && a->vm->big_pages) {
884 if (page_size == a->vm->big_page_size) { 884 if (page_size == a->vm->big_page_size) {
885 pte_size = gmmu_page_size_big; 885 pte_size = GMMU_PAGE_SIZE_BIG;
886 } else if (page_size == SZ_4K) { 886 } else if (page_size == SZ_4K) {
887 pte_size = gmmu_page_size_small; 887 pte_size = GMMU_PAGE_SIZE_SMALL;
888 } else { 888 } else {
889 goto fail; 889 goto fail;
890 } 890 }
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c
index 73a37b57..02273393 100644
--- a/drivers/gpu/nvgpu/common/mm/gmmu.c
+++ b/drivers/gpu/nvgpu/common/mm/gmmu.c
@@ -109,7 +109,7 @@ static u64 __nvgpu_gmmu_map(struct vm_gk20a *vm,
109 sgt, /* sg list */ 109 sgt, /* sg list */
110 0, /* sg offset */ 110 0, /* sg offset */
111 size, 111 size,
112 gmmu_page_size_kernel, 112 GMMU_PAGE_SIZE_KERNEL,
113 0, /* kind */ 113 0, /* kind */
114 0, /* ctag_offset */ 114 0, /* ctag_offset */
115 flags, rw_flag, 115 flags, rw_flag,
@@ -169,7 +169,7 @@ void nvgpu_gmmu_unmap(struct vm_gk20a *vm, struct nvgpu_mem *mem, u64 gpu_va)
169 g->ops.mm.gmmu_unmap(vm, 169 g->ops.mm.gmmu_unmap(vm,
170 gpu_va, 170 gpu_va,
171 mem->size, 171 mem->size,
172 gmmu_page_size_kernel, 172 GMMU_PAGE_SIZE_KERNEL,
173 mem->free_gpu_va, 173 mem->free_gpu_va,
174 gk20a_mem_flag_none, 174 gk20a_mem_flag_none,
175 false, 175 false,
@@ -609,8 +609,8 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm,
609 609
610 /* note: here we need to map kernel to small, since the 610 /* note: here we need to map kernel to small, since the
611 * low-level mmu code assumes 0 is small and 1 is big pages */ 611 * low-level mmu code assumes 0 is small and 1 is big pages */
612 if (attrs->pgsz == gmmu_page_size_kernel) { 612 if (attrs->pgsz == GMMU_PAGE_SIZE_KERNEL) {
613 attrs->pgsz = gmmu_page_size_small; 613 attrs->pgsz = GMMU_PAGE_SIZE_SMALL;
614 } 614 }
615 615
616 page_size = vm->gmmu_page_sizes[attrs->pgsz]; 616 page_size = vm->gmmu_page_sizes[attrs->pgsz];
@@ -676,7 +676,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
676 struct nvgpu_sgt *sgt, 676 struct nvgpu_sgt *sgt,
677 u64 buffer_offset, 677 u64 buffer_offset,
678 u64 size, 678 u64 size,
679 int pgsz_idx, 679 u32 pgsz_idx,
680 u8 kind_v, 680 u8 kind_v,
681 u32 ctag_offset, 681 u32 ctag_offset,
682 u32 flags, 682 u32 flags,
@@ -764,7 +764,7 @@ fail_alloc:
764void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm, 764void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
765 u64 vaddr, 765 u64 vaddr,
766 u64 size, 766 u64 size,
767 int pgsz_idx, 767 u32 pgsz_idx,
768 bool va_allocated, 768 bool va_allocated,
769 enum gk20a_mem_rw_flag rw_flag, 769 enum gk20a_mem_rw_flag rw_flag,
770 bool sparse, 770 bool sparse,
@@ -865,7 +865,7 @@ static int __nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm,
865 865
866 attrs->pgsz = l->get_pgsz(g, l, pd, pd_idx); 866 attrs->pgsz = l->get_pgsz(g, l, pd, pd_idx);
867 867
868 if (attrs->pgsz >= gmmu_nr_page_sizes) { 868 if (attrs->pgsz >= GMMU_NR_PAGE_SIZES) {
869 return -EINVAL; 869 return -EINVAL;
870 } 870 }
871 871
diff --git a/drivers/gpu/nvgpu/common/mm/mm.c b/drivers/gpu/nvgpu/common/mm/mm.c
index 2e46e211..6be619ed 100644
--- a/drivers/gpu/nvgpu/common/mm/mm.c
+++ b/drivers/gpu/nvgpu/common/mm/mm.c
@@ -34,14 +34,14 @@
34 * Attempt to find a reserved memory area to determine PTE size for the passed 34 * Attempt to find a reserved memory area to determine PTE size for the passed
35 * mapping. If no reserved area can be found use small pages. 35 * mapping. If no reserved area can be found use small pages.
36 */ 36 */
37enum gmmu_pgsz_gk20a __get_pte_size_fixed_map(struct vm_gk20a *vm, 37u32 __get_pte_size_fixed_map(struct vm_gk20a *vm,
38 u64 base, u64 size) 38 u64 base, u64 size)
39{ 39{
40 struct nvgpu_vm_area *vm_area; 40 struct nvgpu_vm_area *vm_area;
41 41
42 vm_area = nvgpu_vm_area_find(vm, base); 42 vm_area = nvgpu_vm_area_find(vm, base);
43 if (!vm_area) { 43 if (!vm_area) {
44 return gmmu_page_size_small; 44 return GMMU_PAGE_SIZE_SMALL;
45 } 45 }
46 46
47 return vm_area->pgsz_idx; 47 return vm_area->pgsz_idx;
@@ -50,19 +50,19 @@ enum gmmu_pgsz_gk20a __get_pte_size_fixed_map(struct vm_gk20a *vm,
50/* 50/*
51 * This is for when the address space does not support unified address spaces. 51 * This is for when the address space does not support unified address spaces.
52 */ 52 */
53static enum gmmu_pgsz_gk20a __get_pte_size_split_addr(struct vm_gk20a *vm, 53static u32 __get_pte_size_split_addr(struct vm_gk20a *vm,
54 u64 base, u64 size) 54 u64 base, u64 size)
55{ 55{
56 if (!base) { 56 if (!base) {
57 if (size >= vm->gmmu_page_sizes[gmmu_page_size_big]) { 57 if (size >= vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG]) {
58 return gmmu_page_size_big; 58 return GMMU_PAGE_SIZE_BIG;
59 } 59 }
60 return gmmu_page_size_small; 60 return GMMU_PAGE_SIZE_SMALL;
61 } else { 61 } else {
62 if (base < __nv_gmmu_va_small_page_limit()) { 62 if (base < __nv_gmmu_va_small_page_limit()) {
63 return gmmu_page_size_small; 63 return GMMU_PAGE_SIZE_SMALL;
64 } else { 64 } else {
65 return gmmu_page_size_big; 65 return GMMU_PAGE_SIZE_BIG;
66 } 66 }
67 } 67 }
68} 68}
@@ -88,12 +88,12 @@ static enum gmmu_pgsz_gk20a __get_pte_size_split_addr(struct vm_gk20a *vm,
88 * - Regardless of buffer size use small pages since we have no 88 * - Regardless of buffer size use small pages since we have no
89 * - guarantee of contiguity. 89 * - guarantee of contiguity.
90 */ 90 */
91enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size) 91u32 __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size)
92{ 92{
93 struct gk20a *g = gk20a_from_vm(vm); 93 struct gk20a *g = gk20a_from_vm(vm);
94 94
95 if (!vm->big_pages) { 95 if (!vm->big_pages) {
96 return gmmu_page_size_small; 96 return GMMU_PAGE_SIZE_SMALL;
97 } 97 }
98 98
99 if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) { 99 if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) {
@@ -104,11 +104,11 @@ enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size)
104 return __get_pte_size_fixed_map(vm, base, size); 104 return __get_pte_size_fixed_map(vm, base, size);
105 } 105 }
106 106
107 if (size >= vm->gmmu_page_sizes[gmmu_page_size_big] && 107 if (size >= vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG] &&
108 nvgpu_iommuable(g)) { 108 nvgpu_iommuable(g)) {
109 return gmmu_page_size_big; 109 return GMMU_PAGE_SIZE_BIG;
110 } 110 }
111 return gmmu_page_size_small; 111 return GMMU_PAGE_SIZE_SMALL;
112} 112}
113 113
114int nvgpu_mm_suspend(struct gk20a *g) 114int nvgpu_mm_suspend(struct gk20a *g)
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index 7d97b7b7..bd6c1e87 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -41,7 +41,7 @@
41 41
42struct nvgpu_ctag_buffer_info { 42struct nvgpu_ctag_buffer_info {
43 u64 size; 43 u64 size;
44 enum gmmu_pgsz_gk20a pgsz_idx; 44 u32 pgsz_idx;
45 u32 flags; 45 u32 flags;
46 46
47 s16 compr_kind; 47 s16 compr_kind;
@@ -123,8 +123,7 @@ static void nvgpu_vm_free_entries(struct vm_gk20a *vm,
123 pdb->entries = NULL; 123 pdb->entries = NULL;
124} 124}
125 125
126u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, 126u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx)
127 enum gmmu_pgsz_gk20a pgsz_idx)
128 127
129{ 128{
130 struct gk20a *g = vm->mm->g; 129 struct gk20a *g = vm->mm->g;
@@ -139,12 +138,12 @@ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size,
139 return 0; 138 return 0;
140 } 139 }
141 140
142 if (pgsz_idx >= gmmu_nr_page_sizes) { 141 if (pgsz_idx >= GMMU_NR_PAGE_SIZES) {
143 nvgpu_err(g, "(%s) invalid page size requested", vma->name); 142 nvgpu_err(g, "(%s) invalid page size requested", vma->name);
144 return 0; 143 return 0;
145 } 144 }
146 145
147 if ((pgsz_idx == gmmu_page_size_big) && !vm->big_pages) { 146 if ((pgsz_idx == GMMU_PAGE_SIZE_BIG) && !vm->big_pages) {
148 nvgpu_err(g, "(%s) unsupportd page size requested", vma->name); 147 nvgpu_err(g, "(%s) unsupportd page size requested", vma->name);
149 return 0; 148 return 0;
150 } 149 }
@@ -161,8 +160,7 @@ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size,
161 return addr; 160 return addr;
162} 161}
163 162
164int __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, 163int __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, u32 pgsz_idx)
165 enum gmmu_pgsz_gk20a pgsz_idx)
166{ 164{
167 struct nvgpu_allocator *vma = vm->vma[pgsz_idx]; 165 struct nvgpu_allocator *vma = vm->vma[pgsz_idx];
168 166
@@ -264,7 +262,7 @@ static int nvgpu_init_sema_pool(struct vm_gk20a *vm)
264 err = nvgpu_semaphore_pool_map(vm->sema_pool, vm); 262 err = nvgpu_semaphore_pool_map(vm->sema_pool, vm);
265 if (err) { 263 if (err) {
266 nvgpu_semaphore_pool_unmap(vm->sema_pool, vm); 264 nvgpu_semaphore_pool_unmap(vm->sema_pool, vm);
267 nvgpu_free(vm->vma[gmmu_page_size_small], 265 nvgpu_free(vm->vma[GMMU_PAGE_SIZE_SMALL],
268 vm->sema_pool->gpu_va); 266 vm->sema_pool->gpu_va);
269 return err; 267 return err;
270 } 268 }
@@ -308,22 +306,22 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
308 306
309 vm->mm = mm; 307 vm->mm = mm;
310 308
311 vm->gmmu_page_sizes[gmmu_page_size_small] = SZ_4K; 309 vm->gmmu_page_sizes[GMMU_PAGE_SIZE_SMALL] = SZ_4K;
312 vm->gmmu_page_sizes[gmmu_page_size_big] = big_page_size; 310 vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG] = big_page_size;
313 vm->gmmu_page_sizes[gmmu_page_size_kernel] = SZ_4K; 311 vm->gmmu_page_sizes[GMMU_PAGE_SIZE_KERNEL] = SZ_4K;
314 312
315 /* Set up vma pointers. */ 313 /* Set up vma pointers. */
316 vm->vma[gmmu_page_size_small] = &vm->user; 314 vm->vma[GMMU_PAGE_SIZE_SMALL] = &vm->user;
317 vm->vma[gmmu_page_size_big] = &vm->user; 315 vm->vma[GMMU_PAGE_SIZE_BIG] = &vm->user;
318 vm->vma[gmmu_page_size_kernel] = &vm->kernel; 316 vm->vma[GMMU_PAGE_SIZE_KERNEL] = &vm->kernel;
319 if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) { 317 if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) {
320 vm->vma[gmmu_page_size_big] = &vm->user_lp; 318 vm->vma[GMMU_PAGE_SIZE_BIG] = &vm->user_lp;
321 } 319 }
322 320
323 vm->va_start = low_hole; 321 vm->va_start = low_hole;
324 vm->va_limit = aperture_size; 322 vm->va_limit = aperture_size;
325 323
326 vm->big_page_size = vm->gmmu_page_sizes[gmmu_page_size_big]; 324 vm->big_page_size = vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG];
327 vm->userspace_managed = userspace_managed; 325 vm->userspace_managed = userspace_managed;
328 vm->mmu_levels = g->ops.mm.get_mmu_levels(g, vm->big_page_size); 326 vm->mmu_levels = g->ops.mm.get_mmu_levels(g, vm->big_page_size);
329 327
@@ -876,7 +874,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
876 874
877 align = nvgpu_sgt_alignment(g, sgt); 875 align = nvgpu_sgt_alignment(g, sgt);
878 if (g->mm.disable_bigpage) { 876 if (g->mm.disable_bigpage) {
879 binfo.pgsz_idx = gmmu_page_size_small; 877 binfo.pgsz_idx = GMMU_PAGE_SIZE_SMALL;
880 } else { 878 } else {
881 binfo.pgsz_idx = __get_pte_size(vm, map_addr, 879 binfo.pgsz_idx = __get_pte_size(vm, map_addr,
882 min_t(u64, binfo.size, align)); 880 min_t(u64, binfo.size, align));
diff --git a/drivers/gpu/nvgpu/common/mm/vm_area.c b/drivers/gpu/nvgpu/common/mm/vm_area.c
index b8fecbfc..7e2b5c34 100644
--- a/drivers/gpu/nvgpu/common/mm/vm_area.c
+++ b/drivers/gpu/nvgpu/common/mm/vm_area.c
@@ -43,7 +43,7 @@ struct nvgpu_vm_area *nvgpu_vm_area_find(struct vm_gk20a *vm, u64 addr)
43} 43}
44 44
45int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm, 45int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm,
46 u64 map_addr, u64 map_size, int pgsz_idx, 46 u64 map_addr, u64 map_size, u32 pgsz_idx,
47 struct nvgpu_vm_area **pvm_area) 47 struct nvgpu_vm_area **pvm_area)
48{ 48{
49 struct gk20a *g = vm->mm->g; 49 struct gk20a *g = vm->mm->g;
@@ -99,19 +99,19 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size,
99 struct nvgpu_allocator *vma; 99 struct nvgpu_allocator *vma;
100 struct nvgpu_vm_area *vm_area; 100 struct nvgpu_vm_area *vm_area;
101 u64 vaddr_start = 0; 101 u64 vaddr_start = 0;
102 int pgsz_idx = gmmu_page_size_small; 102 u32 pgsz_idx = GMMU_PAGE_SIZE_SMALL;
103 103
104 nvgpu_log(g, gpu_dbg_map, 104 nvgpu_log(g, gpu_dbg_map,
105 "ADD vm_area: pgsz=%#-8x pages=%-9u addr=%#-14llx flags=0x%x", 105 "ADD vm_area: pgsz=%#-8x pages=%-9u addr=%#-14llx flags=0x%x",
106 page_size, pages, *addr, flags); 106 page_size, pages, *addr, flags);
107 107
108 for (; pgsz_idx < gmmu_nr_page_sizes; pgsz_idx++) { 108 for (; pgsz_idx < GMMU_NR_PAGE_SIZES; pgsz_idx++) {
109 if (vm->gmmu_page_sizes[pgsz_idx] == page_size) { 109 if (vm->gmmu_page_sizes[pgsz_idx] == page_size) {
110 break; 110 break;
111 } 111 }
112 } 112 }
113 113
114 if (pgsz_idx > gmmu_page_size_big) { 114 if (pgsz_idx > GMMU_PAGE_SIZE_BIG) {
115 return -EINVAL; 115 return -EINVAL;
116 } 116 }
117 117
@@ -122,7 +122,7 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size,
122 */ 122 */
123 nvgpu_speculation_barrier(); 123 nvgpu_speculation_barrier();
124 124
125 if (!vm->big_pages && pgsz_idx == gmmu_page_size_big) { 125 if (!vm->big_pages && pgsz_idx == GMMU_PAGE_SIZE_BIG) {
126 return -EINVAL; 126 return -EINVAL;
127 } 127 }
128 128
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 8d6c3b70..77458917 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -120,7 +120,7 @@ int channel_gk20a_commit_va(struct channel_gk20a *c)
120 nvgpu_log_fn(g, " "); 120 nvgpu_log_fn(g, " ");
121 121
122 g->ops.mm.init_inst_block(&c->inst_block, c->vm, 122 g->ops.mm.init_inst_block(&c->inst_block, c->vm,
123 c->vm->gmmu_page_sizes[gmmu_page_size_big]); 123 c->vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG]);
124 124
125 return 0; 125 return 0;
126} 126}
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 262dbb2c..5300f7dd 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -921,7 +921,7 @@ struct gpu_ops {
921 struct nvgpu_sgt *sgt, 921 struct nvgpu_sgt *sgt,
922 u64 buffer_offset, 922 u64 buffer_offset,
923 u64 size, 923 u64 size,
924 int pgsz_idx, 924 u32 pgsz_idx,
925 u8 kind_v, 925 u8 kind_v,
926 u32 ctag_offset, 926 u32 ctag_offset,
927 u32 flags, 927 u32 flags,
@@ -934,7 +934,7 @@ struct gpu_ops {
934 void (*gmmu_unmap)(struct vm_gk20a *vm, 934 void (*gmmu_unmap)(struct vm_gk20a *vm,
935 u64 vaddr, 935 u64 vaddr,
936 u64 size, 936 u64 size,
937 int pgsz_idx, 937 u32 pgsz_idx,
938 bool va_allocated, 938 bool va_allocated,
939 enum gk20a_mem_rw_flag rw_flag, 939 enum gk20a_mem_rw_flag rw_flag,
940 bool sparse, 940 bool sparse,
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index ee63489e..b5626035 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -158,8 +158,8 @@ static void update_gmmu_pde_locked(struct vm_gk20a *vm,
158 u32 pd_offset = pd_offset_from_index(l, pd_idx); 158 u32 pd_offset = pd_offset_from_index(l, pd_idx);
159 u32 pde_v[2] = {0, 0}; 159 u32 pde_v[2] = {0, 0};
160 160
161 small_valid = attrs->pgsz == gmmu_page_size_small; 161 small_valid = attrs->pgsz == GMMU_PAGE_SIZE_SMALL;
162 big_valid = attrs->pgsz == gmmu_page_size_big; 162 big_valid = attrs->pgsz == GMMU_PAGE_SIZE_BIG;
163 163
164 pde_v[0] = gmmu_pde_size_full_f(); 164 pde_v[0] = gmmu_pde_size_full_f();
165 pde_v[0] |= big_valid ? 165 pde_v[0] |= big_valid ?
@@ -283,24 +283,22 @@ static void update_gmmu_pte_locked(struct vm_gk20a *vm,
283 pd_write(g, pd, pd_offset + 1, pte_w[1]); 283 pd_write(g, pd, pd_offset + 1, pte_w[1]);
284} 284}
285 285
286enum gmmu_pgsz_gk20a gk20a_get_pde_pgsz(struct gk20a *g, 286u32 gk20a_get_pde_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
287 const struct gk20a_mmu_level *l, 287 struct nvgpu_gmmu_pd *pd, u32 pd_idx)
288 struct nvgpu_gmmu_pd *pd, u32 pd_idx)
289{ 288{
290 /* 289 /*
291 * big and small page sizes are the same 290 * big and small page sizes are the same
292 */ 291 */
293 return gmmu_page_size_small; 292 return GMMU_PAGE_SIZE_SMALL;
294} 293}
295 294
296enum gmmu_pgsz_gk20a gk20a_get_pte_pgsz(struct gk20a *g, 295u32 gk20a_get_pte_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
297 const struct gk20a_mmu_level *l, 296 struct nvgpu_gmmu_pd *pd, u32 pd_idx)
298 struct nvgpu_gmmu_pd *pd, u32 pd_idx)
299{ 297{
300 /* 298 /*
301 * return invalid 299 * return invalid
302 */ 300 */
303 return gmmu_nr_page_sizes; 301 return GMMU_NR_PAGE_SIZES;
304} 302}
305 303
306const struct gk20a_mmu_level gk20a_mm_levels_64k[] = { 304const struct gk20a_mmu_level gk20a_mm_levels_64k[] = {
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index b99603bb..0827d355 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -139,7 +139,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
139 struct nvgpu_sgt *sgt, 139 struct nvgpu_sgt *sgt,
140 u64 buffer_offset, 140 u64 buffer_offset,
141 u64 size, 141 u64 size,
142 int pgsz_idx, 142 u32 pgsz_idx,
143 u8 kind_v, 143 u8 kind_v,
144 u32 ctag_offset, 144 u32 ctag_offset,
145 u32 flags, 145 u32 flags,
@@ -153,7 +153,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
153void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm, 153void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
154 u64 vaddr, 154 u64 vaddr,
155 u64 size, 155 u64 size,
156 int pgsz_idx, 156 u32 pgsz_idx,
157 bool va_allocated, 157 bool va_allocated,
158 enum gk20a_mem_rw_flag rw_flag, 158 enum gk20a_mem_rw_flag rw_flag,
159 bool sparse, 159 bool sparse,
@@ -178,10 +178,8 @@ void gk20a_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *mem,
178extern const struct gk20a_mmu_level gk20a_mm_levels_64k[]; 178extern const struct gk20a_mmu_level gk20a_mm_levels_64k[];
179extern const struct gk20a_mmu_level gk20a_mm_levels_128k[]; 179extern const struct gk20a_mmu_level gk20a_mm_levels_128k[];
180 180
181enum gmmu_pgsz_gk20a gk20a_get_pde_pgsz(struct gk20a *g, 181u32 gk20a_get_pde_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
182 const struct gk20a_mmu_level *l, 182 struct nvgpu_gmmu_pd *pd, u32 pd_idx);
183 struct nvgpu_gmmu_pd *pd, u32 pd_idx); 183u32 gk20a_get_pte_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
184enum gmmu_pgsz_gk20a gk20a_get_pte_pgsz(struct gk20a *g, 184 struct nvgpu_gmmu_pd *pd, u32 pd_idx);
185 const struct gk20a_mmu_level *l,
186 struct nvgpu_gmmu_pd *pd, u32 pd_idx);
187#endif /* MM_GK20A_H */ 185#endif /* MM_GK20A_H */
diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
index 75ae3d04..5a24adc0 100644
--- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
@@ -126,8 +126,8 @@ static void update_gmmu_pde0_locked(struct vm_gk20a *vm,
126 u32 pd_offset = pd_offset_from_index(l, pd_idx); 126 u32 pd_offset = pd_offset_from_index(l, pd_idx);
127 u32 pde_v[4] = {0, 0, 0, 0}; 127 u32 pde_v[4] = {0, 0, 0, 0};
128 128
129 small_valid = attrs->pgsz == gmmu_page_size_small; 129 small_valid = attrs->pgsz == GMMU_PAGE_SIZE_SMALL;
130 big_valid = attrs->pgsz == gmmu_page_size_big; 130 big_valid = attrs->pgsz == GMMU_PAGE_SIZE_BIG;
131 131
132 if (small_valid) 132 if (small_valid)
133 small_addr = phys_addr >> gmmu_new_dual_pde_address_shift_v(); 133 small_addr = phys_addr >> gmmu_new_dual_pde_address_shift_v();
@@ -274,15 +274,14 @@ static void update_gmmu_pte_locked(struct vm_gk20a *vm,
274 * level having a different number of entries depending on whether it holds 274 * level having a different number of entries depending on whether it holds
275 * big pages or small pages. 275 * big pages or small pages.
276 */ 276 */
277static enum gmmu_pgsz_gk20a gp10b_get_pde0_pgsz(struct gk20a *g, 277static u32 gp10b_get_pde0_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
278 const struct gk20a_mmu_level *l, 278 struct nvgpu_gmmu_pd *pd, u32 pd_idx)
279 struct nvgpu_gmmu_pd *pd, u32 pd_idx)
280{ 279{
281 u32 pde_base = pd->mem_offs / sizeof(u32); 280 u32 pde_base = pd->mem_offs / sizeof(u32);
282 u32 pde_offset = pde_base + pd_offset_from_index(l, pd_idx); 281 u32 pde_offset = pde_base + pd_offset_from_index(l, pd_idx);
283 u32 pde_v[GP10B_PDE0_ENTRY_SIZE >> 2]; 282 u32 pde_v[GP10B_PDE0_ENTRY_SIZE >> 2];
284 u32 i; 283 u32 i;
285 enum gmmu_pgsz_gk20a pgsz = gmmu_nr_page_sizes; 284 u32 pgsz = GMMU_NR_PAGE_SIZES;
286 285
287 if (!pd->mem) 286 if (!pd->mem)
288 return pgsz; 287 return pgsz;
@@ -302,7 +301,7 @@ static enum gmmu_pgsz_gk20a gp10b_get_pde0_pgsz(struct gk20a *g,
302 gmmu_new_dual_pde_address_shift_v(); 301 gmmu_new_dual_pde_address_shift_v();
303 302
304 if (addr) 303 if (addr)
305 pgsz = gmmu_page_size_small; 304 pgsz = GMMU_PAGE_SIZE_SMALL;
306 } 305 }
307 306
308 if (pde_v[0] & (gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f() | 307 if (pde_v[0] & (gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f() |
@@ -318,12 +317,12 @@ static enum gmmu_pgsz_gk20a gp10b_get_pde0_pgsz(struct gk20a *g,
318 * both small and big to be set, the PDE is not valid 317 * both small and big to be set, the PDE is not valid
319 * and may be corrupted 318 * and may be corrupted
320 */ 319 */
321 if (pgsz == gmmu_page_size_small) { 320 if (pgsz == GMMU_PAGE_SIZE_SMALL) {
322 nvgpu_err(g, 321 nvgpu_err(g,
323 "both small and big apertures enabled"); 322 "both small and big apertures enabled");
324 return gmmu_nr_page_sizes; 323 return GMMU_NR_PAGE_SIZES;
325 } 324 }
326 pgsz = gmmu_page_size_big; 325 pgsz = GMMU_PAGE_SIZE_BIG;
327 } 326 }
328 } 327 }
329 328
diff --git a/drivers/gpu/nvgpu/include/nvgpu/gmmu.h b/drivers/gpu/nvgpu/include/nvgpu/gmmu.h
index 886a79da..a83b0dd8 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/gmmu.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/gmmu.h
@@ -38,12 +38,10 @@
38struct vm_gk20a; 38struct vm_gk20a;
39struct nvgpu_mem; 39struct nvgpu_mem;
40 40
41enum gmmu_pgsz_gk20a { 41#define GMMU_PAGE_SIZE_SMALL 0U
42 gmmu_page_size_small = 0, 42#define GMMU_PAGE_SIZE_BIG 1U
43 gmmu_page_size_big = 1, 43#define GMMU_PAGE_SIZE_KERNEL 2U
44 gmmu_page_size_kernel = 2, 44#define GMMU_NR_PAGE_SIZES 3U
45 gmmu_nr_page_sizes = 3,
46};
47 45
48enum gk20a_mem_rw_flag { 46enum gk20a_mem_rw_flag {
49 gk20a_mem_flag_none = 0, /* RW */ 47 gk20a_mem_flag_none = 0, /* RW */
@@ -197,9 +195,8 @@ struct gk20a_mmu_level {
197 /* 195 /*
198 * Get pde page size 196 * Get pde page size
199 */ 197 */
200 enum gmmu_pgsz_gk20a (*get_pgsz)(struct gk20a *g, 198 u32 (*get_pgsz)(struct gk20a *g, const struct gk20a_mmu_level *l,
201 const struct gk20a_mmu_level *l, 199 struct nvgpu_gmmu_pd *pd, u32 pd_idx);
202 struct nvgpu_gmmu_pd *pd, u32 pd_idx);
203}; 200};
204 201
205static inline const char *nvgpu_gmmu_perm_str(enum gk20a_mem_rw_flag p) 202static inline const char *nvgpu_gmmu_perm_str(enum gk20a_mem_rw_flag p)
diff --git a/drivers/gpu/nvgpu/include/nvgpu/mm.h b/drivers/gpu/nvgpu/include/nvgpu/mm.h
index 3a1b2a6c..e627296d 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/mm.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/mm.h
@@ -204,9 +204,9 @@ static inline u64 __nv_gmmu_va_small_page_limit(void)
204 return ((u64)SZ_1G * 56); 204 return ((u64)SZ_1G * 56);
205} 205}
206 206
207enum gmmu_pgsz_gk20a __get_pte_size_fixed_map(struct vm_gk20a *vm, 207u32 __get_pte_size_fixed_map(struct vm_gk20a *vm,
208 u64 base, u64 size); 208 u64 base, u64 size);
209enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size); 209u32 __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size);
210 210
211void nvgpu_init_mm_ce_context(struct gk20a *g); 211void nvgpu_init_mm_ce_context(struct gk20a *g);
212int nvgpu_init_mm_support(struct gk20a *g); 212int nvgpu_init_mm_support(struct gk20a *g);
diff --git a/drivers/gpu/nvgpu/include/nvgpu/vm.h b/drivers/gpu/nvgpu/include/nvgpu/vm.h
index 30a2d71d..ad8c7cca 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/vm.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/vm.h
@@ -172,7 +172,7 @@ struct vm_gk20a {
172 * not. vma[] allows the code to be agnostic to this by always using 172 * not. vma[] allows the code to be agnostic to this by always using
173 * address spaces through this pointer array. 173 * address spaces through this pointer array.
174 */ 174 */
175 struct nvgpu_allocator *vma[gmmu_nr_page_sizes]; 175 struct nvgpu_allocator *vma[GMMU_NR_PAGE_SIZES];
176 struct nvgpu_allocator kernel; 176 struct nvgpu_allocator kernel;
177 struct nvgpu_allocator user; 177 struct nvgpu_allocator user;
178 struct nvgpu_allocator user_lp; 178 struct nvgpu_allocator user_lp;
@@ -184,7 +184,7 @@ struct vm_gk20a {
184#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION 184#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
185 u64 handle; 185 u64 handle;
186#endif 186#endif
187 u32 gmmu_page_sizes[gmmu_nr_page_sizes]; 187 u32 gmmu_page_sizes[GMMU_NR_PAGE_SIZES];
188 188
189 /* if non-NULL, kref_put will use this batch when 189 /* if non-NULL, kref_put will use this batch when
190 unmapping. Must hold vm->update_gmmu_lock. */ 190 unmapping. Must hold vm->update_gmmu_lock. */
@@ -322,8 +322,8 @@ struct vm_gk20a *nvgpu_vm_init(struct gk20a *g,
322 * will be used by the vgpu code. 322 * will be used by the vgpu code.
323 */ 323 */
324u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, 324u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size,
325 enum gmmu_pgsz_gk20a pgsz_idx); 325 u32 pgsz_idx);
326int __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, 326int __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr,
327 enum gmmu_pgsz_gk20a pgsz_idx); 327 u32 pgsz_idx);
328 328
329#endif 329#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/vm_area.h b/drivers/gpu/nvgpu/include/nvgpu/vm_area.h
index 92852633..a055ada3 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/vm_area.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/vm_area.h
@@ -69,7 +69,7 @@ int nvgpu_vm_area_free(struct vm_gk20a *vm, u64 addr);
69 69
70struct nvgpu_vm_area *nvgpu_vm_area_find(struct vm_gk20a *vm, u64 addr); 70struct nvgpu_vm_area *nvgpu_vm_area_find(struct vm_gk20a *vm, u64 addr);
71int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm, 71int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm,
72 u64 map_offset, u64 map_size, int pgsz_idx, 72 u64 map_offset, u64 map_size, u32 pgsz_idx,
73 struct nvgpu_vm_area **pvm_area); 73 struct nvgpu_vm_area **pvm_area);
74 74
75#endif 75#endif
diff --git a/drivers/gpu/nvgpu/os/linux/ioctl_as.c b/drivers/gpu/nvgpu/os/linux/ioctl_as.c
index 5eb9802f..7d1d618e 100644
--- a/drivers/gpu/nvgpu/os/linux/ioctl_as.c
+++ b/drivers/gpu/nvgpu/os/linux/ioctl_as.c
@@ -230,7 +230,7 @@ static int gk20a_as_ioctl_get_va_regions(
230 struct nvgpu_as_va_region __user *user_region_ptr; 230 struct nvgpu_as_va_region __user *user_region_ptr;
231 struct vm_gk20a *vm = as_share->vm; 231 struct vm_gk20a *vm = as_share->vm;
232 struct gk20a *g = gk20a_from_vm(vm); 232 struct gk20a *g = gk20a_from_vm(vm);
233 unsigned int page_sizes = gmmu_page_size_kernel; 233 unsigned int page_sizes = GMMU_PAGE_SIZE_KERNEL;
234 234
235 nvgpu_log_fn(g, " "); 235 nvgpu_log_fn(g, " ");
236 236
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
index e7bd0a49..6017046f 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
@@ -53,7 +53,7 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
53 struct nvgpu_sgt *sgt, 53 struct nvgpu_sgt *sgt,
54 u64 buffer_offset, 54 u64 buffer_offset,
55 u64 size, 55 u64 size,
56 int pgsz_idx, 56 u32 pgsz_idx,
57 u8 kind_v, 57 u8 kind_v,
58 u32 ctag_offset, 58 u32 ctag_offset,
59 u32 flags, 59 u32 flags,
@@ -147,12 +147,12 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
147 else 147 else
148 prot = TEGRA_VGPU_MAP_PROT_NONE; 148 prot = TEGRA_VGPU_MAP_PROT_NONE;
149 149
150 if (pgsz_idx == gmmu_page_size_kernel) { 150 if (pgsz_idx == GMMU_PAGE_SIZE_KERNEL) {
151 if (page_size == vm->gmmu_page_sizes[gmmu_page_size_small]) { 151 if (page_size == vm->gmmu_page_sizes[GMMU_PAGE_SIZE_SMALL]) {
152 pgsz_idx = gmmu_page_size_small; 152 pgsz_idx = GMMU_PAGE_SIZE_SMALL;
153 } else if (page_size == 153 } else if (page_size ==
154 vm->gmmu_page_sizes[gmmu_page_size_big]) { 154 vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG]) {
155 pgsz_idx = gmmu_page_size_big; 155 pgsz_idx = GMMU_PAGE_SIZE_BIG;
156 } else { 156 } else {
157 nvgpu_err(g, "invalid kernel page size %d", 157 nvgpu_err(g, "invalid kernel page size %d",
158 page_size); 158 page_size);
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.h b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.h
index 9435b75f..704c400e 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.h
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.h
@@ -30,7 +30,7 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
30 struct nvgpu_sgt *sgt, 30 struct nvgpu_sgt *sgt,
31 u64 buffer_offset, 31 u64 buffer_offset,
32 u64 size, 32 u64 size,
33 int pgsz_idx, 33 u32 pgsz_idx,
34 u8 kind_v, 34 u8 kind_v,
35 u32 ctag_offset, 35 u32 ctag_offset,
36 u32 flags, 36 u32 flags,
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
index 0077c537..fa64cb82 100644
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
@@ -192,7 +192,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
192 /* Circular Buffer */ 192 /* Circular Buffer */
193 gpu_va = __nvgpu_vm_alloc_va(ch_vm, 193 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
194 gr->global_ctx_buffer[CIRCULAR].mem.size, 194 gr->global_ctx_buffer[CIRCULAR].mem.size,
195 gmmu_page_size_kernel); 195 GMMU_PAGE_SIZE_KERNEL);
196 196
197 if (!gpu_va) 197 if (!gpu_va)
198 goto clean_up; 198 goto clean_up;
@@ -202,7 +202,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
202 /* Attribute Buffer */ 202 /* Attribute Buffer */
203 gpu_va = __nvgpu_vm_alloc_va(ch_vm, 203 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
204 gr->global_ctx_buffer[ATTRIBUTE].mem.size, 204 gr->global_ctx_buffer[ATTRIBUTE].mem.size,
205 gmmu_page_size_kernel); 205 GMMU_PAGE_SIZE_KERNEL);
206 206
207 if (!gpu_va) 207 if (!gpu_va)
208 goto clean_up; 208 goto clean_up;
@@ -212,7 +212,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
212 /* Page Pool */ 212 /* Page Pool */
213 gpu_va = __nvgpu_vm_alloc_va(ch_vm, 213 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
214 gr->global_ctx_buffer[PAGEPOOL].mem.size, 214 gr->global_ctx_buffer[PAGEPOOL].mem.size,
215 gmmu_page_size_kernel); 215 GMMU_PAGE_SIZE_KERNEL);
216 if (!gpu_va) 216 if (!gpu_va)
217 goto clean_up; 217 goto clean_up;
218 g_bfr_va[PAGEPOOL_VA] = gpu_va; 218 g_bfr_va[PAGEPOOL_VA] = gpu_va;
@@ -221,7 +221,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
221 /* Priv register Access Map */ 221 /* Priv register Access Map */
222 gpu_va = __nvgpu_vm_alloc_va(ch_vm, 222 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
223 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size, 223 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size,
224 gmmu_page_size_kernel); 224 GMMU_PAGE_SIZE_KERNEL);
225 if (!gpu_va) 225 if (!gpu_va)
226 goto clean_up; 226 goto clean_up;
227 g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va; 227 g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va;
@@ -232,7 +232,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
232#ifdef CONFIG_GK20A_CTXSW_TRACE 232#ifdef CONFIG_GK20A_CTXSW_TRACE
233 gpu_va = __nvgpu_vm_alloc_va(ch_vm, 233 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
234 gr->global_ctx_buffer[FECS_TRACE_BUFFER].mem.size, 234 gr->global_ctx_buffer[FECS_TRACE_BUFFER].mem.size,
235 gmmu_page_size_kernel); 235 GMMU_PAGE_SIZE_KERNEL);
236 236
237 if (!gpu_va) 237 if (!gpu_va)
238 goto clean_up; 238 goto clean_up;
@@ -262,7 +262,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
262 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { 262 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
263 if (g_bfr_va[i]) { 263 if (g_bfr_va[i]) {
264 __nvgpu_vm_free_va(ch_vm, g_bfr_va[i], 264 __nvgpu_vm_free_va(ch_vm, g_bfr_va[i],
265 gmmu_page_size_kernel); 265 GMMU_PAGE_SIZE_KERNEL);
266 g_bfr_va[i] = 0; 266 g_bfr_va[i] = 0;
267 } 267 }
268 } 268 }
@@ -285,7 +285,7 @@ static void vgpu_gr_unmap_global_ctx_buffers(struct tsg_gk20a *tsg)
285 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { 285 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
286 if (g_bfr_va[i]) { 286 if (g_bfr_va[i]) {
287 __nvgpu_vm_free_va(ch_vm, g_bfr_va[i], 287 __nvgpu_vm_free_va(ch_vm, g_bfr_va[i],
288 gmmu_page_size_kernel); 288 GMMU_PAGE_SIZE_KERNEL);
289 g_bfr_va[i] = 0; 289 g_bfr_va[i] = 0;
290 g_bfr_size[i] = 0; 290 g_bfr_size[i] = 0;
291 } 291 }
@@ -317,7 +317,7 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
317 317
318 gr_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(vm, 318 gr_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(vm,
319 gr->ctx_vars.buffer_total_size, 319 gr->ctx_vars.buffer_total_size,
320 gmmu_page_size_kernel); 320 GMMU_PAGE_SIZE_KERNEL);
321 321
322 if (!gr_ctx->mem.gpu_va) 322 if (!gr_ctx->mem.gpu_va)
323 return -ENOMEM; 323 return -ENOMEM;
@@ -336,7 +336,7 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
336 if (unlikely(err)) { 336 if (unlikely(err)) {
337 nvgpu_err(g, "fail to alloc gr_ctx"); 337 nvgpu_err(g, "fail to alloc gr_ctx");
338 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, 338 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va,
339 gmmu_page_size_kernel); 339 GMMU_PAGE_SIZE_KERNEL);
340 gr_ctx->mem.aperture = APERTURE_INVALID; 340 gr_ctx->mem.aperture = APERTURE_INVALID;
341 } else { 341 } else {
342 gr_ctx->virt_ctx = p->gr_ctx_handle; 342 gr_ctx->virt_ctx = p->gr_ctx_handle;
@@ -365,7 +365,7 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
365 patch_ctx->mem.size = 128 * sizeof(u32); 365 patch_ctx->mem.size = 128 * sizeof(u32);
366 patch_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch_vm, 366 patch_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch_vm,
367 patch_ctx->mem.size, 367 patch_ctx->mem.size,
368 gmmu_page_size_kernel); 368 GMMU_PAGE_SIZE_KERNEL);
369 if (!patch_ctx->mem.gpu_va) 369 if (!patch_ctx->mem.gpu_va)
370 return -ENOMEM; 370 return -ENOMEM;
371 371
@@ -376,7 +376,7 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
376 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 376 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
377 if (err || msg.ret) { 377 if (err || msg.ret) {
378 __nvgpu_vm_free_va(ch_vm, patch_ctx->mem.gpu_va, 378 __nvgpu_vm_free_va(ch_vm, patch_ctx->mem.gpu_va,
379 gmmu_page_size_kernel); 379 GMMU_PAGE_SIZE_KERNEL);
380 err = -ENOMEM; 380 err = -ENOMEM;
381 } 381 }
382 382
@@ -394,7 +394,7 @@ static void vgpu_gr_free_channel_patch_ctx(struct tsg_gk20a *tsg)
394 /* server will free on channel close */ 394 /* server will free on channel close */
395 395
396 __nvgpu_vm_free_va(tsg->vm, patch_ctx->mem.gpu_va, 396 __nvgpu_vm_free_va(tsg->vm, patch_ctx->mem.gpu_va,
397 gmmu_page_size_kernel); 397 GMMU_PAGE_SIZE_KERNEL);
398 patch_ctx->mem.gpu_va = 0; 398 patch_ctx->mem.gpu_va = 0;
399 } 399 }
400} 400}
@@ -414,7 +414,7 @@ static void vgpu_gr_free_channel_pm_ctx(struct tsg_gk20a *tsg)
414 /* server will free on channel close */ 414 /* server will free on channel close */
415 415
416 __nvgpu_vm_free_va(tsg->vm, pm_ctx->mem.gpu_va, 416 __nvgpu_vm_free_va(tsg->vm, pm_ctx->mem.gpu_va,
417 gmmu_page_size_kernel); 417 GMMU_PAGE_SIZE_KERNEL);
418 pm_ctx->mem.gpu_va = 0; 418 pm_ctx->mem.gpu_va = 0;
419} 419}
420 420
@@ -437,7 +437,7 @@ void vgpu_gr_free_gr_ctx(struct gk20a *g,
437 WARN_ON(err || msg.ret); 437 WARN_ON(err || msg.ret);
438 438
439 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, 439 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va,
440 gmmu_page_size_kernel); 440 GMMU_PAGE_SIZE_KERNEL);
441 441
442 tsg = &g->fifo.tsg[gr_ctx->tsgid]; 442 tsg = &g->fifo.tsg[gr_ctx->tsgid];
443 vgpu_gr_unmap_global_ctx_buffers(tsg); 443 vgpu_gr_unmap_global_ctx_buffers(tsg);
@@ -1120,7 +1120,7 @@ int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
1120 if (pm_ctx->mem.gpu_va == 0) { 1120 if (pm_ctx->mem.gpu_va == 0) {
1121 pm_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch->vm, 1121 pm_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch->vm,
1122 g->gr.ctx_vars.pm_ctxsw_image_size, 1122 g->gr.ctx_vars.pm_ctxsw_image_size,
1123 gmmu_page_size_kernel); 1123 GMMU_PAGE_SIZE_KERNEL);
1124 1124
1125 if (!pm_ctx->mem.gpu_va) 1125 if (!pm_ctx->mem.gpu_va)
1126 return -ENOMEM; 1126 return -ENOMEM;
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c
index e718a30d..43cff1c0 100644
--- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c
+++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c
@@ -43,7 +43,7 @@ static int set_syncpt_ro_map_gpu_va_locked(struct vm_gk20a *vm)
43 43
44 vm->syncpt_ro_map_gpu_va = __nvgpu_vm_alloc_va(vm, 44 vm->syncpt_ro_map_gpu_va = __nvgpu_vm_alloc_va(vm,
45 g->syncpt_unit_size, 45 g->syncpt_unit_size,
46 gmmu_page_size_kernel); 46 GMMU_PAGE_SIZE_KERNEL);
47 if (!vm->syncpt_ro_map_gpu_va) { 47 if (!vm->syncpt_ro_map_gpu_va) {
48 nvgpu_err(g, "allocating read-only va space failed"); 48 nvgpu_err(g, "allocating read-only va space failed");
49 return -ENOMEM; 49 return -ENOMEM;
@@ -63,7 +63,7 @@ static int set_syncpt_ro_map_gpu_va_locked(struct vm_gk20a *vm)
63 "mapping read-only va space failed err %d", 63 "mapping read-only va space failed err %d",
64 err); 64 err);
65 __nvgpu_vm_free_va(vm, vm->syncpt_ro_map_gpu_va, 65 __nvgpu_vm_free_va(vm, vm->syncpt_ro_map_gpu_va,
66 gmmu_page_size_kernel); 66 GMMU_PAGE_SIZE_KERNEL);
67 vm->syncpt_ro_map_gpu_va = 0; 67 vm->syncpt_ro_map_gpu_va = 0;
68 return err; 68 return err;
69 } 69 }
@@ -91,7 +91,7 @@ int vgpu_gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c,
91 return err; 91 return err;
92 92
93 syncpt_buf->gpu_va = __nvgpu_vm_alloc_va(c->vm, g->syncpt_size, 93 syncpt_buf->gpu_va = __nvgpu_vm_alloc_va(c->vm, g->syncpt_size,
94 gmmu_page_size_kernel); 94 GMMU_PAGE_SIZE_KERNEL);
95 if (!syncpt_buf->gpu_va) { 95 if (!syncpt_buf->gpu_va) {
96 nvgpu_err(g, "allocating syncpt va space failed"); 96 nvgpu_err(g, "allocating syncpt va space failed");
97 return -ENOMEM; 97 return -ENOMEM;
@@ -110,7 +110,7 @@ int vgpu_gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c,
110 if (err) { 110 if (err) {
111 nvgpu_err(g, "mapping syncpt va space failed err %d", err); 111 nvgpu_err(g, "mapping syncpt va space failed err %d", err);
112 __nvgpu_vm_free_va(c->vm, syncpt_buf->gpu_va, 112 __nvgpu_vm_free_va(c->vm, syncpt_buf->gpu_va,
113 gmmu_page_size_kernel); 113 GMMU_PAGE_SIZE_KERNEL);
114 return err; 114 return err;
115 } 115 }
116 116
@@ -121,7 +121,7 @@ void vgpu_gv11b_fifo_free_syncpt_buf(struct channel_gk20a *c,
121 struct nvgpu_mem *syncpt_buf) 121 struct nvgpu_mem *syncpt_buf)
122{ 122{
123 nvgpu_gmmu_unmap(c->vm, syncpt_buf, syncpt_buf->gpu_va); 123 nvgpu_gmmu_unmap(c->vm, syncpt_buf, syncpt_buf->gpu_va);
124 __nvgpu_vm_free_va(c->vm, syncpt_buf->gpu_va, gmmu_page_size_kernel); 124 __nvgpu_vm_free_va(c->vm, syncpt_buf->gpu_va, GMMU_PAGE_SIZE_KERNEL);
125 nvgpu_dma_free(c->g, syncpt_buf); 125 nvgpu_dma_free(c->g, syncpt_buf);
126} 126}
127 127
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c
index 2372b9c4..b536d15e 100644
--- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c
+++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c
@@ -40,7 +40,7 @@ int vgpu_gv11b_alloc_subctx_header(struct channel_gk20a *c)
40 p->ch_handle = c->virt_ctx; 40 p->ch_handle = c->virt_ctx;
41 p->ctx_header_va = __nvgpu_vm_alloc_va(c->vm, 41 p->ctx_header_va = __nvgpu_vm_alloc_va(c->vm,
42 ctxsw_prog_fecs_header_v(), 42 ctxsw_prog_fecs_header_v(),
43 gmmu_page_size_kernel); 43 GMMU_PAGE_SIZE_KERNEL);
44 if (!p->ctx_header_va) { 44 if (!p->ctx_header_va) {
45 nvgpu_err(c->g, "alloc va failed for ctx_header"); 45 nvgpu_err(c->g, "alloc va failed for ctx_header");
46 return -ENOMEM; 46 return -ENOMEM;
@@ -50,7 +50,7 @@ int vgpu_gv11b_alloc_subctx_header(struct channel_gk20a *c)
50 if (unlikely(err)) { 50 if (unlikely(err)) {
51 nvgpu_err(c->g, "alloc ctx_header failed err %d", err); 51 nvgpu_err(c->g, "alloc ctx_header failed err %d", err);
52 __nvgpu_vm_free_va(c->vm, p->ctx_header_va, 52 __nvgpu_vm_free_va(c->vm, p->ctx_header_va,
53 gmmu_page_size_kernel); 53 GMMU_PAGE_SIZE_KERNEL);
54 return err; 54 return err;
55 } 55 }
56 ctx->mem.gpu_va = p->ctx_header_va; 56 ctx->mem.gpu_va = p->ctx_header_va;
@@ -75,7 +75,7 @@ void vgpu_gv11b_free_subctx_header(struct channel_gk20a *c)
75 if (unlikely(err)) 75 if (unlikely(err))
76 nvgpu_err(c->g, "free ctx_header failed err %d", err); 76 nvgpu_err(c->g, "free ctx_header failed err %d", err);
77 __nvgpu_vm_free_va(c->vm, ctx->mem.gpu_va, 77 __nvgpu_vm_free_va(c->vm, ctx->mem.gpu_va,
78 gmmu_page_size_kernel); 78 GMMU_PAGE_SIZE_KERNEL);
79 ctx->mem.gpu_va = 0; 79 ctx->mem.gpu_va = 0;
80 } 80 }
81} 81}
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
index 54b1e7c2..229a9767 100644
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
@@ -84,7 +84,7 @@ int vgpu_init_mm_support(struct gk20a *g)
84void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm, 84void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
85 u64 vaddr, 85 u64 vaddr,
86 u64 size, 86 u64 size,
87 int pgsz_idx, 87 u32 pgsz_idx,
88 bool va_allocated, 88 bool va_allocated,
89 enum gk20a_mem_rw_flag rw_flag, 89 enum gk20a_mem_rw_flag rw_flag,
90 bool sparse, 90 bool sparse,
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.h b/drivers/gpu/nvgpu/vgpu/mm_vgpu.h
index e8f40d5c..41bae96d 100644
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.h
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.h
@@ -33,7 +33,7 @@ enum gk20a_mem_rw_flag;
33void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm, 33void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
34 u64 vaddr, 34 u64 vaddr,
35 u64 size, 35 u64 size,
36 int pgsz_idx, 36 u32 pgsz_idx,
37 bool va_allocated, 37 bool va_allocated,
38 enum gk20a_mem_rw_flag rw_flag, 38 enum gk20a_mem_rw_flag rw_flag,
39 bool sparse, 39 bool sparse,