summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/vm.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-05-11 16:59:22 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-07-06 17:44:15 -0400
commitc1393d5b68e63c992f4c689cb788139fdf8c2f1a (patch)
tree00a588d35342d75c05fed7733e91da753ba640fb /drivers/gpu/nvgpu/common/mm/vm.c
parent84f712dee8b582dd7d2a19345c621a2ae3bd6292 (diff)
gpu: nvgpu: gmmu programming rewrite
Update the high level mapping logic. Instead of iterating over the GPU VA iterate over the scatter-gather table chunks. As a result each GMMU page table update call is simplified dramatically. This also modifies the chip level code to no longer require an SGL as an argument. Each call to the chip level code will be guaranteed to be contiguous so it only has to worry about making a mapping from virt -> phys. This removes the dependency on Linux that the chip code currently has. With this patch the core GMMU code still uses the Linux SGL but the logic is highly transferable to a different, nvgpu specific, scatter gather list format in the near future. The last major update is to push most of the page table attribute arguments to a struct. That struct is passed on through the various mapping levels. This makes the funtions calls more simple and easier to follow. JIRA NVGPU-30 Change-Id: Ibb6b11755f99818fe642622ca0bd4cbed054f602 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master/r/1484104 Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c36
1 files changed, 3 insertions, 33 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index 88622eca..3aeba500 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -36,7 +36,7 @@ int vm_aspace_id(struct vm_gk20a *vm)
36} 36}
37 37
38static void nvgpu_vm_free_entries(struct vm_gk20a *vm, 38static void nvgpu_vm_free_entries(struct vm_gk20a *vm,
39 struct gk20a_mm_entry *parent, 39 struct nvgpu_gmmu_pd *parent,
40 int level) 40 int level)
41{ 41{
42 int i; 42 int i;
@@ -75,8 +75,6 @@ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size,
75 75
76 /* Be certain we round up to page_size if needed */ 76 /* Be certain we round up to page_size if needed */
77 size = (size + ((u64)page_size - 1)) & ~((u64)page_size - 1); 77 size = (size + ((u64)page_size - 1)) & ~((u64)page_size - 1);
78 nvgpu_log(g, gpu_dbg_map, "size=0x%llx @ pgsz=%dKB", size,
79 vm->gmmu_page_sizes[pgsz_idx] >> 10);
80 78
81 addr = nvgpu_alloc(vma, size); 79 addr = nvgpu_alloc(vma, size);
82 if (!addr) { 80 if (!addr) {
@@ -84,17 +82,14 @@ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size,
84 return 0; 82 return 0;
85 } 83 }
86 84
87 nvgpu_log(g, gpu_dbg_map, "(%s) addr: 0x%llx", vma->name, addr);
88 return addr; 85 return addr;
89} 86}
90 87
91int __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, 88int __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr,
92 enum gmmu_pgsz_gk20a pgsz_idx) 89 enum gmmu_pgsz_gk20a pgsz_idx)
93{ 90{
94 struct gk20a *g = vm->mm->g;
95 struct nvgpu_allocator *vma = vm->vma[pgsz_idx]; 91 struct nvgpu_allocator *vma = vm->vma[pgsz_idx];
96 92
97 nvgpu_log(g, gpu_dbg_map, "(%s) addr: 0x%llx", vma->name, addr);
98 nvgpu_free(vma, addr); 93 nvgpu_free(vma, addr);
99 94
100 return 0; 95 return 0;
@@ -127,32 +122,6 @@ void nvgpu_vm_mapping_batch_finish(struct vm_gk20a *vm,
127 nvgpu_mutex_release(&vm->update_gmmu_lock); 122 nvgpu_mutex_release(&vm->update_gmmu_lock);
128} 123}
129 124
130static int nvgpu_vm_init_page_tables(struct vm_gk20a *vm)
131{
132 u32 pde_lo, pde_hi;
133 int err;
134
135 pde_range_from_vaddr_range(vm,
136 0, vm->va_limit-1,
137 &pde_lo, &pde_hi);
138 vm->pdb.entries = nvgpu_vzalloc(vm->mm->g,
139 sizeof(struct gk20a_mm_entry) *
140 (pde_hi + 1));
141 vm->pdb.num_entries = pde_hi + 1;
142
143 if (!vm->pdb.entries)
144 return -ENOMEM;
145
146 err = nvgpu_zalloc_gmmu_page_table(vm, 0, &vm->mmu_levels[0],
147 &vm->pdb, NULL);
148 if (err) {
149 nvgpu_vfree(vm->mm->g, vm->pdb.entries);
150 return err;
151 }
152
153 return 0;
154}
155
156/* 125/*
157 * Determine if the passed address space can support big pages or not. 126 * Determine if the passed address space can support big pages or not.
158 */ 127 */
@@ -280,7 +249,8 @@ static int __nvgpu_vm_init(struct mm_gk20a *mm,
280#endif 249#endif
281 250
282 /* Initialize the page table data structures. */ 251 /* Initialize the page table data structures. */
283 err = nvgpu_vm_init_page_tables(vm); 252 strncpy(vm->name, name, min(strlen(name), sizeof(vm->name)));
253 err = nvgpu_gmmu_init_page_table(vm);
284 if (err) 254 if (err)
285 goto clean_up_vgpu_vm; 255 goto clean_up_vgpu_vm;
286 256