summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c36
1 files changed, 3 insertions, 33 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index 88622eca..3aeba500 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -36,7 +36,7 @@ int vm_aspace_id(struct vm_gk20a *vm)
36} 36}
37 37
38static void nvgpu_vm_free_entries(struct vm_gk20a *vm, 38static void nvgpu_vm_free_entries(struct vm_gk20a *vm,
39 struct gk20a_mm_entry *parent, 39 struct nvgpu_gmmu_pd *parent,
40 int level) 40 int level)
41{ 41{
42 int i; 42 int i;
@@ -75,8 +75,6 @@ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size,
75 75
76 /* Be certain we round up to page_size if needed */ 76 /* Be certain we round up to page_size if needed */
77 size = (size + ((u64)page_size - 1)) & ~((u64)page_size - 1); 77 size = (size + ((u64)page_size - 1)) & ~((u64)page_size - 1);
78 nvgpu_log(g, gpu_dbg_map, "size=0x%llx @ pgsz=%dKB", size,
79 vm->gmmu_page_sizes[pgsz_idx] >> 10);
80 78
81 addr = nvgpu_alloc(vma, size); 79 addr = nvgpu_alloc(vma, size);
82 if (!addr) { 80 if (!addr) {
@@ -84,17 +82,14 @@ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size,
84 return 0; 82 return 0;
85 } 83 }
86 84
87 nvgpu_log(g, gpu_dbg_map, "(%s) addr: 0x%llx", vma->name, addr);
88 return addr; 85 return addr;
89} 86}
90 87
91int __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, 88int __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr,
92 enum gmmu_pgsz_gk20a pgsz_idx) 89 enum gmmu_pgsz_gk20a pgsz_idx)
93{ 90{
94 struct gk20a *g = vm->mm->g;
95 struct nvgpu_allocator *vma = vm->vma[pgsz_idx]; 91 struct nvgpu_allocator *vma = vm->vma[pgsz_idx];
96 92
97 nvgpu_log(g, gpu_dbg_map, "(%s) addr: 0x%llx", vma->name, addr);
98 nvgpu_free(vma, addr); 93 nvgpu_free(vma, addr);
99 94
100 return 0; 95 return 0;
@@ -127,32 +122,6 @@ void nvgpu_vm_mapping_batch_finish(struct vm_gk20a *vm,
127 nvgpu_mutex_release(&vm->update_gmmu_lock); 122 nvgpu_mutex_release(&vm->update_gmmu_lock);
128} 123}
129 124
130static int nvgpu_vm_init_page_tables(struct vm_gk20a *vm)
131{
132 u32 pde_lo, pde_hi;
133 int err;
134
135 pde_range_from_vaddr_range(vm,
136 0, vm->va_limit-1,
137 &pde_lo, &pde_hi);
138 vm->pdb.entries = nvgpu_vzalloc(vm->mm->g,
139 sizeof(struct gk20a_mm_entry) *
140 (pde_hi + 1));
141 vm->pdb.num_entries = pde_hi + 1;
142
143 if (!vm->pdb.entries)
144 return -ENOMEM;
145
146 err = nvgpu_zalloc_gmmu_page_table(vm, 0, &vm->mmu_levels[0],
147 &vm->pdb, NULL);
148 if (err) {
149 nvgpu_vfree(vm->mm->g, vm->pdb.entries);
150 return err;
151 }
152
153 return 0;
154}
155
156/* 125/*
157 * Determine if the passed address space can support big pages or not. 126 * Determine if the passed address space can support big pages or not.
158 */ 127 */
@@ -280,7 +249,8 @@ static int __nvgpu_vm_init(struct mm_gk20a *mm,
280#endif 249#endif
281 250
282 /* Initialize the page table data structures. */ 251 /* Initialize the page table data structures. */
283 err = nvgpu_vm_init_page_tables(vm); 252 strncpy(vm->name, name, min(strlen(name), sizeof(vm->name)));
253 err = nvgpu_gmmu_init_page_table(vm);
284 if (err) 254 if (err)
285 goto clean_up_vgpu_vm; 255 goto clean_up_vgpu_vm;
286 256