diff options
Diffstat (limited to 'drivers/gpu/nvgpu')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/as_gk20a.c | 4 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gk20a.h | 8 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 42 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.h | 3 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/vgpu/mm_vgpu.c | 2 |
5 files changed, 4 insertions, 55 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/as_gk20a.c b/drivers/gpu/nvgpu/gk20a/as_gk20a.c index adf0297b..c8b3b02f 100644 --- a/drivers/gpu/nvgpu/gk20a/as_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/as_gk20a.c | |||
@@ -279,9 +279,7 @@ static int gk20a_as_ioctl_get_va_regions( | |||
279 | 279 | ||
280 | for (i = 0; i < write_entries; ++i) { | 280 | for (i = 0; i < write_entries; ++i) { |
281 | struct nvgpu_as_va_region region; | 281 | struct nvgpu_as_va_region region; |
282 | struct nvgpu_allocator *vma = | 282 | struct nvgpu_allocator *vma = vm->vma[i]; |
283 | nvgpu_alloc_initialized(&vm->fixed) ? | ||
284 | &vm->fixed : vm->vma[i]; | ||
285 | 283 | ||
286 | memset(®ion, 0, sizeof(struct nvgpu_as_va_region)); | 284 | memset(®ion, 0, sizeof(struct nvgpu_as_va_region)); |
287 | 285 | ||
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h index f7ceaced..2f6e83a4 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gk20a.h | |||
@@ -932,14 +932,6 @@ struct gk20a { | |||
932 | struct nvgpu_dbg_gpu_reg_op *dbg_regops_tmp_buf; | 932 | struct nvgpu_dbg_gpu_reg_op *dbg_regops_tmp_buf; |
933 | u32 dbg_regops_tmp_buf_ops; | 933 | u32 dbg_regops_tmp_buf_ops; |
934 | 934 | ||
935 | /* | ||
936 | * When set subsequent VMAs will separate fixed and non-fixed | ||
937 | * allocations. This avoids conflicts with fixed and non-fixed allocs | ||
938 | * for some tests. The value in separate_fixed_allocs is used to | ||
939 | * determine the split boundary. | ||
940 | */ | ||
941 | u64 separate_fixed_allocs; | ||
942 | |||
943 | void (*remove_support)(struct device *); | 935 | void (*remove_support)(struct device *); |
944 | 936 | ||
945 | u64 pg_ingating_time_us; | 937 | u64 pg_ingating_time_us; |
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index bf73e79f..b04a7e87 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c | |||
@@ -4437,31 +4437,6 @@ int gk20a_init_vm(struct mm_gk20a *mm, | |||
4437 | goto clean_up_page_tables; | 4437 | goto clean_up_page_tables; |
4438 | } | 4438 | } |
4439 | 4439 | ||
4440 | /* | ||
4441 | * Attempt to make a separate VM for fixed allocations. | ||
4442 | */ | ||
4443 | if (g->separate_fixed_allocs && | ||
4444 | user_vma_start < user_vma_limit) { | ||
4445 | if (g->separate_fixed_allocs >= user_vma_limit) | ||
4446 | goto clean_up_page_tables; | ||
4447 | |||
4448 | snprintf(alloc_name, sizeof(alloc_name), | ||
4449 | "gk20a_%s-fixed", name); | ||
4450 | |||
4451 | err = __nvgpu_buddy_allocator_init(g, &vm->fixed, | ||
4452 | vm, alloc_name, | ||
4453 | user_vma_start, | ||
4454 | g->separate_fixed_allocs, | ||
4455 | SZ_4K, | ||
4456 | GPU_BALLOC_MAX_ORDER, | ||
4457 | GPU_ALLOC_GVA_SPACE); | ||
4458 | if (err) | ||
4459 | goto clean_up_page_tables; | ||
4460 | |||
4461 | /* Make sure to update the user vma size. */ | ||
4462 | user_vma_start = g->separate_fixed_allocs; | ||
4463 | } | ||
4464 | |||
4465 | if (user_vma_start < user_vma_limit) { | 4440 | if (user_vma_start < user_vma_limit) { |
4466 | snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s", name); | 4441 | snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s", name); |
4467 | if (!gk20a_big_pages_possible(vm, user_vma_start, | 4442 | if (!gk20a_big_pages_possible(vm, user_vma_start, |
@@ -4631,18 +4606,15 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share, | |||
4631 | } | 4606 | } |
4632 | 4607 | ||
4633 | vma = vm->vma[pgsz_idx]; | 4608 | vma = vm->vma[pgsz_idx]; |
4634 | if (args->flags & NVGPU_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET) { | 4609 | if (args->flags & NVGPU_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET) |
4635 | if (nvgpu_alloc_initialized(&vm->fixed)) | ||
4636 | vma = &vm->fixed; | ||
4637 | vaddr_start = nvgpu_alloc_fixed(vma, args->o_a.offset, | 4610 | vaddr_start = nvgpu_alloc_fixed(vma, args->o_a.offset, |
4638 | (u64)args->pages * | 4611 | (u64)args->pages * |
4639 | (u64)args->page_size, | 4612 | (u64)args->page_size, |
4640 | args->page_size); | 4613 | args->page_size); |
4641 | } else { | 4614 | else |
4642 | vaddr_start = nvgpu_alloc(vma, | 4615 | vaddr_start = nvgpu_alloc(vma, |
4643 | (u64)args->pages * | 4616 | (u64)args->pages * |
4644 | (u64)args->page_size); | 4617 | (u64)args->page_size); |
4645 | } | ||
4646 | 4618 | ||
4647 | if (!vaddr_start) { | 4619 | if (!vaddr_start) { |
4648 | kfree(va_node); | 4620 | kfree(va_node); |
@@ -4710,10 +4682,7 @@ int gk20a_vm_free_space(struct gk20a_as_share *as_share, | |||
4710 | pgsz_idx = __get_pte_size(vm, args->offset, | 4682 | pgsz_idx = __get_pte_size(vm, args->offset, |
4711 | args->page_size * args->pages); | 4683 | args->page_size * args->pages); |
4712 | 4684 | ||
4713 | if (nvgpu_alloc_initialized(&vm->fixed)) | 4685 | vma = vm->vma[pgsz_idx]; |
4714 | vma = &vm->fixed; | ||
4715 | else | ||
4716 | vma = vm->vma[pgsz_idx]; | ||
4717 | nvgpu_free(vma, args->offset); | 4686 | nvgpu_free(vma, args->offset); |
4718 | 4687 | ||
4719 | mutex_lock(&vm->update_gmmu_lock); | 4688 | mutex_lock(&vm->update_gmmu_lock); |
@@ -4902,8 +4871,6 @@ void gk20a_deinit_vm(struct vm_gk20a *vm) | |||
4902 | nvgpu_alloc_destroy(&vm->kernel); | 4871 | nvgpu_alloc_destroy(&vm->kernel); |
4903 | if (nvgpu_alloc_initialized(&vm->user)) | 4872 | if (nvgpu_alloc_initialized(&vm->user)) |
4904 | nvgpu_alloc_destroy(&vm->user); | 4873 | nvgpu_alloc_destroy(&vm->user); |
4905 | if (nvgpu_alloc_initialized(&vm->fixed)) | ||
4906 | nvgpu_alloc_destroy(&vm->fixed); | ||
4907 | 4874 | ||
4908 | gk20a_vm_free_entries(vm, &vm->pdb, 0); | 4875 | gk20a_vm_free_entries(vm, &vm->pdb, 0); |
4909 | } | 4876 | } |
@@ -5467,9 +5434,6 @@ void gk20a_mm_debugfs_init(struct device *dev) | |||
5467 | struct dentry *gpu_root = platform->debugfs; | 5434 | struct dentry *gpu_root = platform->debugfs; |
5468 | struct gk20a *g = gk20a_get_platform(dev)->g; | 5435 | struct gk20a *g = gk20a_get_platform(dev)->g; |
5469 | 5436 | ||
5470 | debugfs_create_x64("separate_fixed_allocs", 0664, gpu_root, | ||
5471 | &g->separate_fixed_allocs); | ||
5472 | |||
5473 | debugfs_create_bool("force_pramin", 0664, gpu_root, | 5437 | debugfs_create_bool("force_pramin", 0664, gpu_root, |
5474 | &g->mm.force_pramin); | 5438 | &g->mm.force_pramin); |
5475 | } | 5439 | } |
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h index 394d1d25..f362e27c 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h | |||
@@ -270,9 +270,6 @@ struct vm_gk20a { | |||
270 | 270 | ||
271 | struct gk20a_mm_entry pdb; | 271 | struct gk20a_mm_entry pdb; |
272 | 272 | ||
273 | /* If necessary, split fixed from non-fixed. */ | ||
274 | struct nvgpu_allocator fixed; | ||
275 | |||
276 | struct nvgpu_allocator *vma[gmmu_nr_page_sizes]; | 273 | struct nvgpu_allocator *vma[gmmu_nr_page_sizes]; |
277 | struct nvgpu_allocator kernel; | 274 | struct nvgpu_allocator kernel; |
278 | struct nvgpu_allocator user; | 275 | struct nvgpu_allocator user; |
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c index a21a020d..76631b96 100644 --- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c | |||
@@ -231,8 +231,6 @@ static void vgpu_vm_remove_support(struct vm_gk20a *vm) | |||
231 | nvgpu_alloc_destroy(&vm->kernel); | 231 | nvgpu_alloc_destroy(&vm->kernel); |
232 | if (nvgpu_alloc_initialized(&vm->user)) | 232 | if (nvgpu_alloc_initialized(&vm->user)) |
233 | nvgpu_alloc_destroy(&vm->user); | 233 | nvgpu_alloc_destroy(&vm->user); |
234 | if (nvgpu_alloc_initialized(&vm->fixed)) | ||
235 | nvgpu_alloc_destroy(&vm->fixed); | ||
236 | 234 | ||
237 | mutex_unlock(&vm->update_gmmu_lock); | 235 | mutex_unlock(&vm->update_gmmu_lock); |
238 | 236 | ||