summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r--drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h1
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c28
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h4
4 files changed, 13 insertions, 24 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
index 82bf7b3e..1f9b856d 100644
--- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
@@ -1924,7 +1924,7 @@ static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s,
1924err_unmap: 1924err_unmap:
1925 nvgpu_vm_unmap_buffer(vm, args->offset, NULL); 1925 nvgpu_vm_unmap_buffer(vm, args->offset, NULL);
1926err_remove_vm: 1926err_remove_vm:
1927 nvgpu_vm_remove(vm, &mm->perfbuf.inst_block); 1927 nvgpu_vm_remove_inst(vm, &mm->perfbuf.inst_block);
1928 nvgpu_mutex_release(&g->dbg_sessions_lock); 1928 nvgpu_mutex_release(&g->dbg_sessions_lock);
1929 return err; 1929 return err;
1930} 1930}
@@ -1962,7 +1962,7 @@ static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset)
1962 err = gk20a_perfbuf_disable_locked(g); 1962 err = gk20a_perfbuf_disable_locked(g);
1963 1963
1964 nvgpu_vm_unmap_buffer(vm, offset, NULL); 1964 nvgpu_vm_unmap_buffer(vm, offset, NULL);
1965 nvgpu_vm_remove(vm, &mm->perfbuf.inst_block); 1965 nvgpu_vm_remove_inst(vm, &mm->perfbuf.inst_block);
1966 1966
1967 g->perfbuf.owner = NULL; 1967 g->perfbuf.owner = NULL;
1968 g->perfbuf.offset = 0; 1968 g->perfbuf.offset = 0;
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index a8837433..ceadbae2 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -651,7 +651,6 @@ struct gpu_ops {
651 int rw_flag, 651 int rw_flag,
652 bool sparse, 652 bool sparse,
653 struct vm_gk20a_mapping_batch *batch); 653 struct vm_gk20a_mapping_batch *batch);
654 void (*vm_remove)(struct vm_gk20a *vm);
655 int (*vm_alloc_share)(struct gk20a_as_share *as_share, 654 int (*vm_alloc_share)(struct gk20a_as_share *as_share,
656 u32 big_page_size, u32 flags); 655 u32 big_page_size, u32 flags);
657 int (*vm_bind_channel)(struct gk20a_as_share *as_share, 656 int (*vm_bind_channel)(struct gk20a_as_share *as_share,
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 17f1622f..183a540a 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -464,7 +464,7 @@ static void gk20a_remove_mm_ce_support(struct mm_gk20a *mm)
464 464
465 mm->vidmem.ce_ctx_id = (u32)~0; 465 mm->vidmem.ce_ctx_id = (u32)~0;
466 466
467 nvgpu_vm_remove_support_nofree(&mm->ce.vm); 467 __nvgpu_vm_remove(&mm->ce.vm);
468 468
469} 469}
470 470
@@ -476,11 +476,11 @@ static void gk20a_remove_mm_support(struct mm_gk20a *mm)
476 g->ops.mm.remove_bar2_vm(g); 476 g->ops.mm.remove_bar2_vm(g);
477 477
478 if (g->ops.mm.is_bar1_supported(g)) 478 if (g->ops.mm.is_bar1_supported(g))
479 nvgpu_vm_remove(&mm->bar1.vm, &mm->bar1.inst_block); 479 nvgpu_vm_remove_inst(&mm->bar1.vm, &mm->bar1.inst_block);
480 480
481 nvgpu_vm_remove(&mm->pmu.vm, &mm->pmu.inst_block); 481 nvgpu_vm_remove_inst(&mm->pmu.vm, &mm->pmu.inst_block);
482 gk20a_free_inst_block(gk20a_from_mm(mm), &mm->hwpm.inst_block); 482 gk20a_free_inst_block(gk20a_from_mm(mm), &mm->hwpm.inst_block);
483 nvgpu_vm_remove_support_nofree(&mm->cde.vm); 483 __nvgpu_vm_remove(&mm->cde.vm);
484 484
485 gk20a_semaphore_sea_destroy(g); 485 gk20a_semaphore_sea_destroy(g);
486 gk20a_vidmem_destroy(g); 486 gk20a_vidmem_destroy(g);
@@ -2337,10 +2337,9 @@ void nvgpu_vm_unmap_locked(struct nvgpu_mapped_buf *mapped_buffer,
2337 return; 2337 return;
2338} 2338}
2339 2339
2340 2340void gk20a_vm_free_entries(struct vm_gk20a *vm,
2341static void gk20a_vm_free_entries(struct vm_gk20a *vm, 2341 struct gk20a_mm_entry *parent,
2342 struct gk20a_mm_entry *parent, 2342 int level)
2343 int level)
2344{ 2343{
2345 int i; 2344 int i;
2346 2345
@@ -2663,18 +2662,6 @@ int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset,
2663 return 0; 2662 return 0;
2664} 2663}
2665 2664
2666void nvgpu_deinit_vm(struct vm_gk20a *vm)
2667{
2668 if (nvgpu_alloc_initialized(&vm->kernel))
2669 nvgpu_alloc_destroy(&vm->kernel);
2670 if (nvgpu_alloc_initialized(&vm->user))
2671 nvgpu_alloc_destroy(&vm->user);
2672 if (nvgpu_alloc_initialized(&vm->user_lp))
2673 nvgpu_alloc_destroy(&vm->user_lp);
2674
2675 gk20a_vm_free_entries(vm, &vm->pdb, 0);
2676}
2677
2678int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block) 2665int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
2679{ 2666{
2680 int err; 2667 int err;
@@ -3151,7 +3138,6 @@ void gk20a_init_mm(struct gpu_ops *gops)
3151{ 3138{
3152 gops->mm.gmmu_map = gk20a_locked_gmmu_map; 3139 gops->mm.gmmu_map = gk20a_locked_gmmu_map;
3153 gops->mm.gmmu_unmap = gk20a_locked_gmmu_unmap; 3140 gops->mm.gmmu_unmap = gk20a_locked_gmmu_unmap;
3154 gops->mm.vm_remove = nvgpu_vm_remove_support;
3155 gops->mm.vm_alloc_share = gk20a_vm_alloc_share; 3141 gops->mm.vm_alloc_share = gk20a_vm_alloc_share;
3156 gops->mm.vm_bind_channel = gk20a_vm_bind_channel; 3142 gops->mm.vm_bind_channel = gk20a_vm_bind_channel;
3157 gops->mm.fb_flush = gk20a_mm_fb_flush; 3143 gops->mm.fb_flush = gk20a_mm_fb_flush;
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index 90a72811..4adf346e 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -463,4 +463,8 @@ int gk20a_mm_get_buffer_info(struct device *dev, int dmabuf_fd,
463 u64 *buffer_id, u64 *buffer_len); 463 u64 *buffer_id, u64 *buffer_len);
464void gk20a_vm_unmap_locked_kref(struct kref *ref); 464void gk20a_vm_unmap_locked_kref(struct kref *ref);
465 465
466void gk20a_vm_free_entries(struct vm_gk20a *vm,
467 struct gk20a_mm_entry *parent,
468 int level);
469
466#endif /* MM_GK20A_H */ 470#endif /* MM_GK20A_H */