summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-04-26 17:27:02 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-05-24 15:14:13 -0400
commitb70bad4b9f40e94f731fd9d509e1f3f6617f0b05 (patch)
tree21bfaf082aeb7662eb194f72c5f33a36c7cb7bdc /drivers/gpu/nvgpu/gk20a/mm_gk20a.c
parent92fe030e5250409ecd500dcf719547f3fb0f1873 (diff)
gpu: nvgpu: Refactor gk20a_vm_alloc_va()
This function is an internal function to the VM manager that allocates virtual memory space in the GVA allocator. It is unfortunately used in the vGPU code, though. In any event, this patch cleans up and moves the implementation of these functions into the VM common code. JIRA NVGPU-12 JIRA NVGPU-30 Change-Id: I24a3d29b5fcb12615df27d2ac82891d1bacfe541 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1477745 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c57
1 files changed, 3 insertions, 54 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 5051f028..2642a0b1 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -1192,57 +1192,6 @@ static void nvgpu_vm_unmap_user(struct vm_gk20a *vm, u64 offset,
1192 nvgpu_mutex_release(&vm->update_gmmu_lock); 1192 nvgpu_mutex_release(&vm->update_gmmu_lock);
1193} 1193}
1194 1194
1195u64 gk20a_vm_alloc_va(struct vm_gk20a *vm,
1196 u64 size,
1197 enum gmmu_pgsz_gk20a gmmu_pgsz_idx)
1198
1199{
1200 struct nvgpu_allocator *vma = vm->vma[gmmu_pgsz_idx];
1201 u64 offset;
1202 u64 gmmu_page_size = vm->gmmu_page_sizes[gmmu_pgsz_idx];
1203 struct gk20a *g = vm->mm->g;
1204
1205 if (gmmu_pgsz_idx >= gmmu_nr_page_sizes) {
1206 nvgpu_warn(g,
1207 "invalid page size requested in gk20a vm alloc");
1208 return 0;
1209 }
1210
1211 if ((gmmu_pgsz_idx == gmmu_page_size_big) && !vm->big_pages) {
1212 nvgpu_warn(g, "unsupportd page size requested");
1213 return 0;
1214
1215 }
1216
1217 /* Be certain we round up to gmmu_page_size if needed */
1218 size = (size + ((u64)gmmu_page_size - 1)) & ~((u64)gmmu_page_size - 1);
1219 gk20a_dbg_info("size=0x%llx @ pgsz=%dKB", size,
1220 vm->gmmu_page_sizes[gmmu_pgsz_idx]>>10);
1221
1222 offset = nvgpu_alloc(vma, size);
1223 if (!offset) {
1224 nvgpu_err(vm->mm->g,
1225 "%s oom: sz=0x%llx", vma->name, size);
1226 return 0;
1227 }
1228
1229 gk20a_dbg_fn("%s found addr: 0x%llx", vma->name, offset);
1230 return offset;
1231}
1232
1233int gk20a_vm_free_va(struct vm_gk20a *vm,
1234 u64 offset, u64 size,
1235 enum gmmu_pgsz_gk20a pgsz_idx)
1236{
1237 struct nvgpu_allocator *vma = vm->vma[pgsz_idx];
1238
1239 gk20a_dbg_info("%s free addr=0x%llx, size=0x%llx",
1240 vma->name, offset, size);
1241 nvgpu_free(vma, offset);
1242
1243 return 0;
1244}
1245
1246int setup_buffer_kind_and_compression(struct vm_gk20a *vm, 1195int setup_buffer_kind_and_compression(struct vm_gk20a *vm,
1247 u32 flags, 1196 u32 flags,
1248 struct buffer_attrs *bfr, 1197 struct buffer_attrs *bfr,
@@ -1313,7 +1262,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
1313 1262
1314 /* Allocate (or validate when map_offset != 0) the virtual address. */ 1263 /* Allocate (or validate when map_offset != 0) the virtual address. */
1315 if (!map_offset) { 1264 if (!map_offset) {
1316 map_offset = gk20a_vm_alloc_va(vm, size, 1265 map_offset = __nvgpu_vm_alloc_va(vm, size,
1317 pgsz_idx); 1266 pgsz_idx);
1318 if (!map_offset) { 1267 if (!map_offset) {
1319 nvgpu_err(g, "failed to allocate va space"); 1268 nvgpu_err(g, "failed to allocate va space");
@@ -1364,7 +1313,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
1364 return map_offset; 1313 return map_offset;
1365fail_validate: 1314fail_validate:
1366 if (allocated) 1315 if (allocated)
1367 gk20a_vm_free_va(vm, map_offset, size, pgsz_idx); 1316 __nvgpu_vm_free_va(vm, map_offset, pgsz_idx);
1368fail_alloc: 1317fail_alloc:
1369 nvgpu_err(g, "%s: failed with err=%d\n", __func__, err); 1318 nvgpu_err(g, "%s: failed with err=%d\n", __func__, err);
1370 return 0; 1319 return 0;
@@ -1383,7 +1332,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
1383 struct gk20a *g = gk20a_from_vm(vm); 1332 struct gk20a *g = gk20a_from_vm(vm);
1384 1333
1385 if (va_allocated) { 1334 if (va_allocated) {
1386 err = gk20a_vm_free_va(vm, vaddr, size, pgsz_idx); 1335 err = __nvgpu_vm_free_va(vm, vaddr, pgsz_idx);
1387 if (err) { 1336 if (err) {
1388 nvgpu_err(g, "failed to free va"); 1337 nvgpu_err(g, "failed to free va");
1389 return; 1338 return;