summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/vm.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-04-26 17:27:02 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-05-24 15:14:13 -0400
commitb70bad4b9f40e94f731fd9d509e1f3f6617f0b05 (patch)
tree21bfaf082aeb7662eb194f72c5f33a36c7cb7bdc /drivers/gpu/nvgpu/common/mm/vm.c
parent92fe030e5250409ecd500dcf719547f3fb0f1873 (diff)
gpu: nvgpu: Refactor gk20a_vm_alloc_va()
This function is an internal function to the VM manager that allocates virtual memory space in the GVA allocator. It is unfortunately used in the vGPU code, though. In any event, this patch cleans up and moves the implementation of these functions into the VM common code. JIRA NVGPU-12 JIRA NVGPU-30 Change-Id: I24a3d29b5fcb12615df27d2ac82891d1bacfe541 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1477745 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c48
1 files changed, 48 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index 3bdc905e..3b3b7a10 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -29,6 +29,54 @@ int vm_aspace_id(struct vm_gk20a *vm)
29 return vm->as_share ? vm->as_share->id : -1; 29 return vm->as_share ? vm->as_share->id : -1;
30} 30}
31 31
32u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size,
33 enum gmmu_pgsz_gk20a pgsz_idx)
34
35{
36 struct gk20a *g = vm->mm->g;
37 struct nvgpu_allocator *vma = NULL;
38 u64 addr;
39 u64 page_size = vm->gmmu_page_sizes[pgsz_idx];
40
41 vma = vm->vma[pgsz_idx];
42
43 if (pgsz_idx >= gmmu_nr_page_sizes) {
44 nvgpu_err(g, "(%s) invalid page size requested", vma->name);
45 return 0;
46 }
47
48 if ((pgsz_idx == gmmu_page_size_big) && !vm->big_pages) {
49 nvgpu_err(g, "(%s) unsupportd page size requested", vma->name);
50 return 0;
51 }
52
53 /* Be certain we round up to page_size if needed */
54 size = (size + ((u64)page_size - 1)) & ~((u64)page_size - 1);
55 nvgpu_log(g, gpu_dbg_map, "size=0x%llx @ pgsz=%dKB", size,
56 vm->gmmu_page_sizes[pgsz_idx] >> 10);
57
58 addr = nvgpu_alloc(vma, size);
59 if (!addr) {
60 nvgpu_err(g, "(%s) oom: sz=0x%llx", vma->name, size);
61 return 0;
62 }
63
64 nvgpu_log(g, gpu_dbg_map, "(%s) addr: 0x%llx", vma->name, addr);
65 return addr;
66}
67
68int __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr,
69 enum gmmu_pgsz_gk20a pgsz_idx)
70{
71 struct gk20a *g = vm->mm->g;
72 struct nvgpu_allocator *vma = vm->vma[pgsz_idx];
73
74 nvgpu_log(g, gpu_dbg_map, "(%s) addr: 0x%llx", vma->name, addr);
75 nvgpu_free(vma, addr);
76
77 return 0;
78}
79
32void nvgpu_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *mapping_batch) 80void nvgpu_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *mapping_batch)
33{ 81{
34 memset(mapping_batch, 0, sizeof(*mapping_batch)); 82 memset(mapping_batch, 0, sizeof(*mapping_batch));