summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/vm.c
diff options
context:
space:
mode:
authorSourab Gupta <sourabg@nvidia.com>2017-11-23 09:27:18 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2018-01-04 03:36:08 -0500
commitfcdde6ad8ae0aa22b8edeb6a9462e7f2a1a213ce (patch)
tree655df3a8ffec6d8864da86423448db05519167bf /drivers/gpu/nvgpu/common/mm/vm.c
parent7240b3c2515413583722e35f9ef2a7745961531d (diff)
gpu: nvgpu: add guest_managed field in vm_gk20a
Add a field in vm_gk20a to identify guest managed VM, with the corresponding checks to ensure that there's no kernel section for guest managed VMs. Also make the __nvgpu_vm_init function available globally, so that the vm can be allocated elsewhere, requisite fields set, and passed to the function to initialize the vm. Change-Id: Iad841d1b8ff9c894fe9d350dc43d74247e9c5512 Signed-off-by: Sourab Gupta <sourabg@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1617171 Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index ee9d2e0b..e90437a3 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -109,6 +109,11 @@ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size,
109 109
110 vma = vm->vma[pgsz_idx]; 110 vma = vm->vma[pgsz_idx];
111 111
112 if (vm->guest_managed) {
113 nvgpu_err(g, "Illegal GPU allocation on behalf of guest OS");
114 return 0;
115 }
116
112 if (pgsz_idx >= gmmu_nr_page_sizes) { 117 if (pgsz_idx >= gmmu_nr_page_sizes) {
113 nvgpu_err(g, "(%s) invalid page size requested", vma->name); 118 nvgpu_err(g, "(%s) invalid page size requested", vma->name);
114 return 0; 119 return 0;
@@ -237,7 +242,10 @@ static int nvgpu_init_sema_pool(struct vm_gk20a *vm)
237 return 0; 242 return 0;
238} 243}
239 244
240static int __nvgpu_vm_init(struct mm_gk20a *mm, 245/*
246 * Initialize a preallocated vm
247 */
248int __nvgpu_vm_init(struct mm_gk20a *mm,
241 struct vm_gk20a *vm, 249 struct vm_gk20a *vm,
242 u32 big_page_size, 250 u32 big_page_size,
243 u64 low_hole, 251 u64 low_hole,
@@ -258,6 +266,9 @@ static int __nvgpu_vm_init(struct mm_gk20a *mm,
258 if (WARN_ON(kernel_reserved + low_hole > aperture_size)) 266 if (WARN_ON(kernel_reserved + low_hole > aperture_size))
259 return -ENOMEM; 267 return -ENOMEM;
260 268
269 if (WARN_ON(vm->guest_managed && kernel_reserved != 0))
270 return -EINVAL;
271
261 nvgpu_log_info(g, "Init space for %s: valimit=0x%llx, " 272 nvgpu_log_info(g, "Init space for %s: valimit=0x%llx, "
262 "LP size=0x%x lowhole=0x%llx", 273 "LP size=0x%x lowhole=0x%llx",
263 name, aperture_size, 274 name, aperture_size,
@@ -337,7 +348,7 @@ static int __nvgpu_vm_init(struct mm_gk20a *mm,
337 348
338 if (WARN_ON(user_vma_start > user_vma_limit) || 349 if (WARN_ON(user_vma_start > user_vma_limit) ||
339 WARN_ON(user_lp_vma_start > user_lp_vma_limit) || 350 WARN_ON(user_lp_vma_start > user_lp_vma_limit) ||
340 WARN_ON(kernel_vma_start >= kernel_vma_limit)) { 351 WARN_ON(!vm->guest_managed && kernel_vma_start >= kernel_vma_limit)) {
341 err = -EINVAL; 352 err = -EINVAL;
342 goto clean_up_page_tables; 353 goto clean_up_page_tables;
343 } 354 }