summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common')
-rw-r--r--drivers/gpu/nvgpu/common/linux/vm.c2
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c48
2 files changed, 49 insertions, 1 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/vm.c b/drivers/gpu/nvgpu/common/linux/vm.c
index 5470d9ee..9238a9df 100644
--- a/drivers/gpu/nvgpu/common/linux/vm.c
+++ b/drivers/gpu/nvgpu/common/linux/vm.c
@@ -394,7 +394,7 @@ clean_up:
394 } 394 }
395 nvgpu_kfree(g, mapped_buffer); 395 nvgpu_kfree(g, mapped_buffer);
396 if (va_allocated) 396 if (va_allocated)
397 gk20a_vm_free_va(vm, map_offset, bfr.size, bfr.pgsz_idx); 397 __nvgpu_vm_free_va(vm, map_offset, bfr.pgsz_idx);
398 if (!IS_ERR(bfr.sgt)) 398 if (!IS_ERR(bfr.sgt))
399 gk20a_mm_unpin(g->dev, dmabuf, bfr.sgt); 399 gk20a_mm_unpin(g->dev, dmabuf, bfr.sgt);
400 400
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index 3bdc905e..3b3b7a10 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -29,6 +29,54 @@ int vm_aspace_id(struct vm_gk20a *vm)
29 return vm->as_share ? vm->as_share->id : -1; 29 return vm->as_share ? vm->as_share->id : -1;
30} 30}
31 31
32u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size,
33 enum gmmu_pgsz_gk20a pgsz_idx)
34
35{
36 struct gk20a *g = vm->mm->g;
37 struct nvgpu_allocator *vma = NULL;
38 u64 addr;
39 u64 page_size = vm->gmmu_page_sizes[pgsz_idx];
40
41 vma = vm->vma[pgsz_idx];
42
43 if (pgsz_idx >= gmmu_nr_page_sizes) {
44 nvgpu_err(g, "(%s) invalid page size requested", vma->name);
45 return 0;
46 }
47
48 if ((pgsz_idx == gmmu_page_size_big) && !vm->big_pages) {
49 nvgpu_err(g, "(%s) unsupportd page size requested", vma->name);
50 return 0;
51 }
52
53 /* Be certain we round up to page_size if needed */
54 size = (size + ((u64)page_size - 1)) & ~((u64)page_size - 1);
55 nvgpu_log(g, gpu_dbg_map, "size=0x%llx @ pgsz=%dKB", size,
56 vm->gmmu_page_sizes[pgsz_idx] >> 10);
57
58 addr = nvgpu_alloc(vma, size);
59 if (!addr) {
60 nvgpu_err(g, "(%s) oom: sz=0x%llx", vma->name, size);
61 return 0;
62 }
63
64 nvgpu_log(g, gpu_dbg_map, "(%s) addr: 0x%llx", vma->name, addr);
65 return addr;
66}
67
68int __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr,
69 enum gmmu_pgsz_gk20a pgsz_idx)
70{
71 struct gk20a *g = vm->mm->g;
72 struct nvgpu_allocator *vma = vm->vma[pgsz_idx];
73
74 nvgpu_log(g, gpu_dbg_map, "(%s) addr: 0x%llx", vma->name, addr);
75 nvgpu_free(vma, addr);
76
77 return 0;
78}
79
32void nvgpu_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *mapping_batch) 80void nvgpu_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *mapping_batch)
33{ 81{
34 memset(mapping_batch, 0, sizeof(*mapping_batch)); 82 memset(mapping_batch, 0, sizeof(*mapping_batch));