summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm')
-rw-r--r--drivers/gpu/nvgpu/common/mm/buddy_allocator.c12
-rw-r--r--drivers/gpu/nvgpu/common/mm/gmmu.c14
-rw-r--r--drivers/gpu/nvgpu/common/mm/mm.c26
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c32
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm_area.c10
5 files changed, 46 insertions, 48 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
index 365f3b7b..f8c97839 100644
--- a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
@@ -142,7 +142,7 @@ static void __balloc_buddy_list_add(struct nvgpu_buddy_allocator *a,
142 * without cycling through the entire list. 142 * without cycling through the entire list.
143 */ 143 */
144 if (a->flags & GPU_ALLOC_GVA_SPACE && 144 if (a->flags & GPU_ALLOC_GVA_SPACE &&
145 b->pte_size == gmmu_page_size_big) { 145 b->pte_size == GMMU_PAGE_SIZE_BIG) {
146 nvgpu_list_add_tail(&b->buddy_entry, list); 146 nvgpu_list_add_tail(&b->buddy_entry, list);
147 } else { 147 } else {
148 nvgpu_list_add(&b->buddy_entry, list); 148 nvgpu_list_add(&b->buddy_entry, list);
@@ -487,7 +487,7 @@ static struct nvgpu_buddy *__balloc_find_buddy(struct nvgpu_buddy_allocator *a,
487 } 487 }
488 488
489 if (a->flags & GPU_ALLOC_GVA_SPACE && 489 if (a->flags & GPU_ALLOC_GVA_SPACE &&
490 pte_size == gmmu_page_size_big) { 490 pte_size == GMMU_PAGE_SIZE_BIG) {
491 bud = nvgpu_list_last_entry(balloc_get_order_list(a, order), 491 bud = nvgpu_list_last_entry(balloc_get_order_list(a, order),
492 nvgpu_buddy, buddy_entry); 492 nvgpu_buddy, buddy_entry);
493 } else { 493 } else {
@@ -844,8 +844,8 @@ static u64 nvgpu_buddy_balloc(struct nvgpu_allocator *__a, u64 len)
844 alloc_dbg(balloc_owner(a), 844 alloc_dbg(balloc_owner(a),
845 "Alloc 0x%-10llx %3lld:0x%-10llx pte_size=%s", 845 "Alloc 0x%-10llx %3lld:0x%-10llx pte_size=%s",
846 addr, order, len, 846 addr, order, len,
847 pte_size == gmmu_page_size_big ? "big" : 847 pte_size == GMMU_PAGE_SIZE_BIG ? "big" :
848 pte_size == gmmu_page_size_small ? "small" : 848 pte_size == GMMU_PAGE_SIZE_SMALL ? "small" :
849 "NA/any"); 849 "NA/any");
850 } else { 850 } else {
851 alloc_dbg(balloc_owner(a), "Alloc failed: no mem!"); 851 alloc_dbg(balloc_owner(a), "Alloc failed: no mem!");
@@ -882,9 +882,9 @@ static u64 __nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *__a,
882 /* Check that the page size is valid. */ 882 /* Check that the page size is valid. */
883 if (a->flags & GPU_ALLOC_GVA_SPACE && a->vm->big_pages) { 883 if (a->flags & GPU_ALLOC_GVA_SPACE && a->vm->big_pages) {
884 if (page_size == a->vm->big_page_size) { 884 if (page_size == a->vm->big_page_size) {
885 pte_size = gmmu_page_size_big; 885 pte_size = GMMU_PAGE_SIZE_BIG;
886 } else if (page_size == SZ_4K) { 886 } else if (page_size == SZ_4K) {
887 pte_size = gmmu_page_size_small; 887 pte_size = GMMU_PAGE_SIZE_SMALL;
888 } else { 888 } else {
889 goto fail; 889 goto fail;
890 } 890 }
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c
index 73a37b57..02273393 100644
--- a/drivers/gpu/nvgpu/common/mm/gmmu.c
+++ b/drivers/gpu/nvgpu/common/mm/gmmu.c
@@ -109,7 +109,7 @@ static u64 __nvgpu_gmmu_map(struct vm_gk20a *vm,
109 sgt, /* sg list */ 109 sgt, /* sg list */
110 0, /* sg offset */ 110 0, /* sg offset */
111 size, 111 size,
112 gmmu_page_size_kernel, 112 GMMU_PAGE_SIZE_KERNEL,
113 0, /* kind */ 113 0, /* kind */
114 0, /* ctag_offset */ 114 0, /* ctag_offset */
115 flags, rw_flag, 115 flags, rw_flag,
@@ -169,7 +169,7 @@ void nvgpu_gmmu_unmap(struct vm_gk20a *vm, struct nvgpu_mem *mem, u64 gpu_va)
169 g->ops.mm.gmmu_unmap(vm, 169 g->ops.mm.gmmu_unmap(vm,
170 gpu_va, 170 gpu_va,
171 mem->size, 171 mem->size,
172 gmmu_page_size_kernel, 172 GMMU_PAGE_SIZE_KERNEL,
173 mem->free_gpu_va, 173 mem->free_gpu_va,
174 gk20a_mem_flag_none, 174 gk20a_mem_flag_none,
175 false, 175 false,
@@ -609,8 +609,8 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm,
609 609
610 /* note: here we need to map kernel to small, since the 610 /* note: here we need to map kernel to small, since the
611 * low-level mmu code assumes 0 is small and 1 is big pages */ 611 * low-level mmu code assumes 0 is small and 1 is big pages */
612 if (attrs->pgsz == gmmu_page_size_kernel) { 612 if (attrs->pgsz == GMMU_PAGE_SIZE_KERNEL) {
613 attrs->pgsz = gmmu_page_size_small; 613 attrs->pgsz = GMMU_PAGE_SIZE_SMALL;
614 } 614 }
615 615
616 page_size = vm->gmmu_page_sizes[attrs->pgsz]; 616 page_size = vm->gmmu_page_sizes[attrs->pgsz];
@@ -676,7 +676,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
676 struct nvgpu_sgt *sgt, 676 struct nvgpu_sgt *sgt,
677 u64 buffer_offset, 677 u64 buffer_offset,
678 u64 size, 678 u64 size,
679 int pgsz_idx, 679 u32 pgsz_idx,
680 u8 kind_v, 680 u8 kind_v,
681 u32 ctag_offset, 681 u32 ctag_offset,
682 u32 flags, 682 u32 flags,
@@ -764,7 +764,7 @@ fail_alloc:
764void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm, 764void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
765 u64 vaddr, 765 u64 vaddr,
766 u64 size, 766 u64 size,
767 int pgsz_idx, 767 u32 pgsz_idx,
768 bool va_allocated, 768 bool va_allocated,
769 enum gk20a_mem_rw_flag rw_flag, 769 enum gk20a_mem_rw_flag rw_flag,
770 bool sparse, 770 bool sparse,
@@ -865,7 +865,7 @@ static int __nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm,
865 865
866 attrs->pgsz = l->get_pgsz(g, l, pd, pd_idx); 866 attrs->pgsz = l->get_pgsz(g, l, pd, pd_idx);
867 867
868 if (attrs->pgsz >= gmmu_nr_page_sizes) { 868 if (attrs->pgsz >= GMMU_NR_PAGE_SIZES) {
869 return -EINVAL; 869 return -EINVAL;
870 } 870 }
871 871
diff --git a/drivers/gpu/nvgpu/common/mm/mm.c b/drivers/gpu/nvgpu/common/mm/mm.c
index 2e46e211..6be619ed 100644
--- a/drivers/gpu/nvgpu/common/mm/mm.c
+++ b/drivers/gpu/nvgpu/common/mm/mm.c
@@ -34,14 +34,14 @@
34 * Attempt to find a reserved memory area to determine PTE size for the passed 34 * Attempt to find a reserved memory area to determine PTE size for the passed
35 * mapping. If no reserved area can be found use small pages. 35 * mapping. If no reserved area can be found use small pages.
36 */ 36 */
37enum gmmu_pgsz_gk20a __get_pte_size_fixed_map(struct vm_gk20a *vm, 37u32 __get_pte_size_fixed_map(struct vm_gk20a *vm,
38 u64 base, u64 size) 38 u64 base, u64 size)
39{ 39{
40 struct nvgpu_vm_area *vm_area; 40 struct nvgpu_vm_area *vm_area;
41 41
42 vm_area = nvgpu_vm_area_find(vm, base); 42 vm_area = nvgpu_vm_area_find(vm, base);
43 if (!vm_area) { 43 if (!vm_area) {
44 return gmmu_page_size_small; 44 return GMMU_PAGE_SIZE_SMALL;
45 } 45 }
46 46
47 return vm_area->pgsz_idx; 47 return vm_area->pgsz_idx;
@@ -50,19 +50,19 @@ enum gmmu_pgsz_gk20a __get_pte_size_fixed_map(struct vm_gk20a *vm,
50/* 50/*
51 * This is for when the address space does not support unified address spaces. 51 * This is for when the address space does not support unified address spaces.
52 */ 52 */
53static enum gmmu_pgsz_gk20a __get_pte_size_split_addr(struct vm_gk20a *vm, 53static u32 __get_pte_size_split_addr(struct vm_gk20a *vm,
54 u64 base, u64 size) 54 u64 base, u64 size)
55{ 55{
56 if (!base) { 56 if (!base) {
57 if (size >= vm->gmmu_page_sizes[gmmu_page_size_big]) { 57 if (size >= vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG]) {
58 return gmmu_page_size_big; 58 return GMMU_PAGE_SIZE_BIG;
59 } 59 }
60 return gmmu_page_size_small; 60 return GMMU_PAGE_SIZE_SMALL;
61 } else { 61 } else {
62 if (base < __nv_gmmu_va_small_page_limit()) { 62 if (base < __nv_gmmu_va_small_page_limit()) {
63 return gmmu_page_size_small; 63 return GMMU_PAGE_SIZE_SMALL;
64 } else { 64 } else {
65 return gmmu_page_size_big; 65 return GMMU_PAGE_SIZE_BIG;
66 } 66 }
67 } 67 }
68} 68}
@@ -88,12 +88,12 @@ static enum gmmu_pgsz_gk20a __get_pte_size_split_addr(struct vm_gk20a *vm,
88 * - Regardless of buffer size use small pages since we have no 88 * - Regardless of buffer size use small pages since we have no
89 * - guarantee of contiguity. 89 * - guarantee of contiguity.
90 */ 90 */
91enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size) 91u32 __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size)
92{ 92{
93 struct gk20a *g = gk20a_from_vm(vm); 93 struct gk20a *g = gk20a_from_vm(vm);
94 94
95 if (!vm->big_pages) { 95 if (!vm->big_pages) {
96 return gmmu_page_size_small; 96 return GMMU_PAGE_SIZE_SMALL;
97 } 97 }
98 98
99 if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) { 99 if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) {
@@ -104,11 +104,11 @@ enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size)
104 return __get_pte_size_fixed_map(vm, base, size); 104 return __get_pte_size_fixed_map(vm, base, size);
105 } 105 }
106 106
107 if (size >= vm->gmmu_page_sizes[gmmu_page_size_big] && 107 if (size >= vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG] &&
108 nvgpu_iommuable(g)) { 108 nvgpu_iommuable(g)) {
109 return gmmu_page_size_big; 109 return GMMU_PAGE_SIZE_BIG;
110 } 110 }
111 return gmmu_page_size_small; 111 return GMMU_PAGE_SIZE_SMALL;
112} 112}
113 113
114int nvgpu_mm_suspend(struct gk20a *g) 114int nvgpu_mm_suspend(struct gk20a *g)
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index 7d97b7b7..bd6c1e87 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -41,7 +41,7 @@
41 41
42struct nvgpu_ctag_buffer_info { 42struct nvgpu_ctag_buffer_info {
43 u64 size; 43 u64 size;
44 enum gmmu_pgsz_gk20a pgsz_idx; 44 u32 pgsz_idx;
45 u32 flags; 45 u32 flags;
46 46
47 s16 compr_kind; 47 s16 compr_kind;
@@ -123,8 +123,7 @@ static void nvgpu_vm_free_entries(struct vm_gk20a *vm,
123 pdb->entries = NULL; 123 pdb->entries = NULL;
124} 124}
125 125
126u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, 126u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx)
127 enum gmmu_pgsz_gk20a pgsz_idx)
128 127
129{ 128{
130 struct gk20a *g = vm->mm->g; 129 struct gk20a *g = vm->mm->g;
@@ -139,12 +138,12 @@ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size,
139 return 0; 138 return 0;
140 } 139 }
141 140
142 if (pgsz_idx >= gmmu_nr_page_sizes) { 141 if (pgsz_idx >= GMMU_NR_PAGE_SIZES) {
143 nvgpu_err(g, "(%s) invalid page size requested", vma->name); 142 nvgpu_err(g, "(%s) invalid page size requested", vma->name);
144 return 0; 143 return 0;
145 } 144 }
146 145
147 if ((pgsz_idx == gmmu_page_size_big) && !vm->big_pages) { 146 if ((pgsz_idx == GMMU_PAGE_SIZE_BIG) && !vm->big_pages) {
148 nvgpu_err(g, "(%s) unsupportd page size requested", vma->name); 147 nvgpu_err(g, "(%s) unsupportd page size requested", vma->name);
149 return 0; 148 return 0;
150 } 149 }
@@ -161,8 +160,7 @@ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size,
161 return addr; 160 return addr;
162} 161}
163 162
164int __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, 163int __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, u32 pgsz_idx)
165 enum gmmu_pgsz_gk20a pgsz_idx)
166{ 164{
167 struct nvgpu_allocator *vma = vm->vma[pgsz_idx]; 165 struct nvgpu_allocator *vma = vm->vma[pgsz_idx];
168 166
@@ -264,7 +262,7 @@ static int nvgpu_init_sema_pool(struct vm_gk20a *vm)
264 err = nvgpu_semaphore_pool_map(vm->sema_pool, vm); 262 err = nvgpu_semaphore_pool_map(vm->sema_pool, vm);
265 if (err) { 263 if (err) {
266 nvgpu_semaphore_pool_unmap(vm->sema_pool, vm); 264 nvgpu_semaphore_pool_unmap(vm->sema_pool, vm);
267 nvgpu_free(vm->vma[gmmu_page_size_small], 265 nvgpu_free(vm->vma[GMMU_PAGE_SIZE_SMALL],
268 vm->sema_pool->gpu_va); 266 vm->sema_pool->gpu_va);
269 return err; 267 return err;
270 } 268 }
@@ -308,22 +306,22 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
308 306
309 vm->mm = mm; 307 vm->mm = mm;
310 308
311 vm->gmmu_page_sizes[gmmu_page_size_small] = SZ_4K; 309 vm->gmmu_page_sizes[GMMU_PAGE_SIZE_SMALL] = SZ_4K;
312 vm->gmmu_page_sizes[gmmu_page_size_big] = big_page_size; 310 vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG] = big_page_size;
313 vm->gmmu_page_sizes[gmmu_page_size_kernel] = SZ_4K; 311 vm->gmmu_page_sizes[GMMU_PAGE_SIZE_KERNEL] = SZ_4K;
314 312
315 /* Set up vma pointers. */ 313 /* Set up vma pointers. */
316 vm->vma[gmmu_page_size_small] = &vm->user; 314 vm->vma[GMMU_PAGE_SIZE_SMALL] = &vm->user;
317 vm->vma[gmmu_page_size_big] = &vm->user; 315 vm->vma[GMMU_PAGE_SIZE_BIG] = &vm->user;
318 vm->vma[gmmu_page_size_kernel] = &vm->kernel; 316 vm->vma[GMMU_PAGE_SIZE_KERNEL] = &vm->kernel;
319 if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) { 317 if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) {
320 vm->vma[gmmu_page_size_big] = &vm->user_lp; 318 vm->vma[GMMU_PAGE_SIZE_BIG] = &vm->user_lp;
321 } 319 }
322 320
323 vm->va_start = low_hole; 321 vm->va_start = low_hole;
324 vm->va_limit = aperture_size; 322 vm->va_limit = aperture_size;
325 323
326 vm->big_page_size = vm->gmmu_page_sizes[gmmu_page_size_big]; 324 vm->big_page_size = vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG];
327 vm->userspace_managed = userspace_managed; 325 vm->userspace_managed = userspace_managed;
328 vm->mmu_levels = g->ops.mm.get_mmu_levels(g, vm->big_page_size); 326 vm->mmu_levels = g->ops.mm.get_mmu_levels(g, vm->big_page_size);
329 327
@@ -876,7 +874,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
876 874
877 align = nvgpu_sgt_alignment(g, sgt); 875 align = nvgpu_sgt_alignment(g, sgt);
878 if (g->mm.disable_bigpage) { 876 if (g->mm.disable_bigpage) {
879 binfo.pgsz_idx = gmmu_page_size_small; 877 binfo.pgsz_idx = GMMU_PAGE_SIZE_SMALL;
880 } else { 878 } else {
881 binfo.pgsz_idx = __get_pte_size(vm, map_addr, 879 binfo.pgsz_idx = __get_pte_size(vm, map_addr,
882 min_t(u64, binfo.size, align)); 880 min_t(u64, binfo.size, align));
diff --git a/drivers/gpu/nvgpu/common/mm/vm_area.c b/drivers/gpu/nvgpu/common/mm/vm_area.c
index b8fecbfc..7e2b5c34 100644
--- a/drivers/gpu/nvgpu/common/mm/vm_area.c
+++ b/drivers/gpu/nvgpu/common/mm/vm_area.c
@@ -43,7 +43,7 @@ struct nvgpu_vm_area *nvgpu_vm_area_find(struct vm_gk20a *vm, u64 addr)
43} 43}
44 44
45int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm, 45int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm,
46 u64 map_addr, u64 map_size, int pgsz_idx, 46 u64 map_addr, u64 map_size, u32 pgsz_idx,
47 struct nvgpu_vm_area **pvm_area) 47 struct nvgpu_vm_area **pvm_area)
48{ 48{
49 struct gk20a *g = vm->mm->g; 49 struct gk20a *g = vm->mm->g;
@@ -99,19 +99,19 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size,
99 struct nvgpu_allocator *vma; 99 struct nvgpu_allocator *vma;
100 struct nvgpu_vm_area *vm_area; 100 struct nvgpu_vm_area *vm_area;
101 u64 vaddr_start = 0; 101 u64 vaddr_start = 0;
102 int pgsz_idx = gmmu_page_size_small; 102 u32 pgsz_idx = GMMU_PAGE_SIZE_SMALL;
103 103
104 nvgpu_log(g, gpu_dbg_map, 104 nvgpu_log(g, gpu_dbg_map,
105 "ADD vm_area: pgsz=%#-8x pages=%-9u addr=%#-14llx flags=0x%x", 105 "ADD vm_area: pgsz=%#-8x pages=%-9u addr=%#-14llx flags=0x%x",
106 page_size, pages, *addr, flags); 106 page_size, pages, *addr, flags);
107 107
108 for (; pgsz_idx < gmmu_nr_page_sizes; pgsz_idx++) { 108 for (; pgsz_idx < GMMU_NR_PAGE_SIZES; pgsz_idx++) {
109 if (vm->gmmu_page_sizes[pgsz_idx] == page_size) { 109 if (vm->gmmu_page_sizes[pgsz_idx] == page_size) {
110 break; 110 break;
111 } 111 }
112 } 112 }
113 113
114 if (pgsz_idx > gmmu_page_size_big) { 114 if (pgsz_idx > GMMU_PAGE_SIZE_BIG) {
115 return -EINVAL; 115 return -EINVAL;
116 } 116 }
117 117
@@ -122,7 +122,7 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size,
122 */ 122 */
123 nvgpu_speculation_barrier(); 123 nvgpu_speculation_barrier();
124 124
125 if (!vm->big_pages && pgsz_idx == gmmu_page_size_big) { 125 if (!vm->big_pages && pgsz_idx == GMMU_PAGE_SIZE_BIG) {
126 return -EINVAL; 126 return -EINVAL;
127 } 127 }
128 128