diff options
author | Alex Waterman <alexw@nvidia.com> | 2017-05-05 18:00:23 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-05-26 06:33:57 -0400 |
commit | 0bb47c3675d2030545d40353931e2b8120541de4 (patch) | |
tree | 1a23b45c1ac19dbc98e1d4a585822eb47b7dfeb2 /drivers/gpu/nvgpu | |
parent | fbafc7eba41ba7654dfdadf51a53acf1638e9fa1 (diff) |
gpu: nvgpu: Add and use VM init/deinit APIs
Remove the VM init/de-init from the HAL and instead use a single
set of routines that init/de-init VMs. This prevents code divergence
between vGPUs and regular GPUs.
This patch also clears up the naming of the routines a little bit.
Since some VMs are used inplace and others are dynamically allocated
the APIs for freeing them were confusing. Also some free calls also
clean up an instance block (this is API abuse - but this is how it
currently exists).
The new API looks like this:
void __nvgpu_vm_remove(struct vm_gk20a *vm);
void nvgpu_vm_remove(struct vm_gk20a *vm);
void nvgpu_vm_remove_inst(struct vm_gk20a *vm,
struct nvgpu_mem *inst_block);
void nvgpu_vm_remove_vgpu(struct vm_gk20a *vm);
int nvgpu_init_vm(struct mm_gk20a *mm,
struct vm_gk20a *vm,
u32 big_page_size,
u64 low_hole,
u64 kernel_reserved,
u64 aperture_size,
bool big_pages,
bool userspace_managed,
char *name);
void nvgpu_deinit_vm(struct vm_gk20a *vm);
JIRA NVGPU-12
JIRA NVGPU-30
Change-Id: Ia4016384c54746bfbcaa4bdd0d29d03d5d7f7f1b
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: http://git-master/r/1477747
Reviewed-by: Automatic_Commit_Validation_User
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu')
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/vm.c | 57 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c | 4 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gk20a.h | 1 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 28 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.h | 4 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gm20b/mm_gm20b.c | 1 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gp10b/mm_gp10b.c | 2 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/include/nvgpu/vm.h | 25 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/vgpu/mm_vgpu.c | 40 |
9 files changed, 72 insertions, 90 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index e42c7c5a..bdc8554c 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c | |||
@@ -442,7 +442,22 @@ clean_up_page_tables: | |||
442 | return err; | 442 | return err; |
443 | } | 443 | } |
444 | 444 | ||
445 | void nvgpu_vm_remove_support_nofree(struct vm_gk20a *vm) | 445 | void nvgpu_deinit_vm(struct vm_gk20a *vm) |
446 | { | ||
447 | if (nvgpu_alloc_initialized(&vm->kernel)) | ||
448 | nvgpu_alloc_destroy(&vm->kernel); | ||
449 | if (nvgpu_alloc_initialized(&vm->user)) | ||
450 | nvgpu_alloc_destroy(&vm->user); | ||
451 | if (nvgpu_alloc_initialized(&vm->user_lp)) | ||
452 | nvgpu_alloc_destroy(&vm->user_lp); | ||
453 | |||
454 | gk20a_vm_free_entries(vm, &vm->pdb, 0); | ||
455 | } | ||
456 | |||
457 | /* | ||
458 | * Cleanup the VM but don't nvgpu_kfree() on the vm pointer. | ||
459 | */ | ||
460 | void __nvgpu_vm_remove(struct vm_gk20a *vm) | ||
446 | { | 461 | { |
447 | struct nvgpu_mapped_buf *mapped_buffer; | 462 | struct nvgpu_mapped_buf *mapped_buffer; |
448 | struct nvgpu_vm_area *vm_area, *vm_area_tmp; | 463 | struct nvgpu_vm_area *vm_area, *vm_area_tmp; |
@@ -480,22 +495,40 @@ void nvgpu_vm_remove_support_nofree(struct vm_gk20a *vm) | |||
480 | 495 | ||
481 | nvgpu_deinit_vm(vm); | 496 | nvgpu_deinit_vm(vm); |
482 | 497 | ||
498 | #ifdef CONFIG_TEGRA_GR_VIRTUALIZATION | ||
499 | if (g->is_virtual) | ||
500 | nvgpu_vm_remove_vgpu(vm); | ||
501 | #endif | ||
502 | |||
483 | nvgpu_mutex_release(&vm->update_gmmu_lock); | 503 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
484 | } | 504 | } |
485 | 505 | ||
486 | void nvgpu_vm_remove_support(struct vm_gk20a *vm) | 506 | /* |
507 | * Remove and nvgpu_kfree() the VM struct. | ||
508 | */ | ||
509 | void nvgpu_vm_remove(struct vm_gk20a *vm) | ||
487 | { | 510 | { |
488 | nvgpu_vm_remove_support_nofree(vm); | 511 | __nvgpu_vm_remove(vm); |
489 | /* vm is not used anymore. release it. */ | 512 | |
490 | nvgpu_kfree(vm->mm->g, vm); | 513 | nvgpu_kfree(vm->mm->g, vm); |
491 | } | 514 | } |
492 | 515 | ||
493 | static void nvgpu_vm_remove_support_kref(struct kref *ref) | 516 | /* |
517 | * Note: this does not nvgpu_kfree() the vm. This might be a bug. | ||
518 | */ | ||
519 | void nvgpu_vm_remove_inst(struct vm_gk20a *vm, struct nvgpu_mem *inst_block) | ||
520 | { | ||
521 | struct gk20a *g = vm->mm->g; | ||
522 | |||
523 | gk20a_free_inst_block(g, inst_block); | ||
524 | __nvgpu_vm_remove(vm); | ||
525 | } | ||
526 | |||
527 | static void __nvgpu_vm_remove_kref(struct kref *ref) | ||
494 | { | 528 | { |
495 | struct vm_gk20a *vm = container_of(ref, struct vm_gk20a, ref); | 529 | struct vm_gk20a *vm = container_of(ref, struct vm_gk20a, ref); |
496 | struct gk20a *g = gk20a_from_vm(vm); | ||
497 | 530 | ||
498 | g->ops.mm.vm_remove(vm); | 531 | nvgpu_vm_remove(vm); |
499 | } | 532 | } |
500 | 533 | ||
501 | void nvgpu_vm_get(struct vm_gk20a *vm) | 534 | void nvgpu_vm_get(struct vm_gk20a *vm) |
@@ -505,15 +538,7 @@ void nvgpu_vm_get(struct vm_gk20a *vm) | |||
505 | 538 | ||
506 | void nvgpu_vm_put(struct vm_gk20a *vm) | 539 | void nvgpu_vm_put(struct vm_gk20a *vm) |
507 | { | 540 | { |
508 | kref_put(&vm->ref, nvgpu_vm_remove_support_kref); | 541 | kref_put(&vm->ref, __nvgpu_vm_remove_kref); |
509 | } | ||
510 | |||
511 | void nvgpu_vm_remove(struct vm_gk20a *vm, struct nvgpu_mem *inst_block) | ||
512 | { | ||
513 | struct gk20a *g = vm->mm->g; | ||
514 | |||
515 | gk20a_free_inst_block(g, inst_block); | ||
516 | nvgpu_vm_remove_support_nofree(vm); | ||
517 | } | 542 | } |
518 | 543 | ||
519 | int nvgpu_insert_mapped_buf(struct vm_gk20a *vm, | 544 | int nvgpu_insert_mapped_buf(struct vm_gk20a *vm, |
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c index 82bf7b3e..1f9b856d 100644 --- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c | |||
@@ -1924,7 +1924,7 @@ static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s, | |||
1924 | err_unmap: | 1924 | err_unmap: |
1925 | nvgpu_vm_unmap_buffer(vm, args->offset, NULL); | 1925 | nvgpu_vm_unmap_buffer(vm, args->offset, NULL); |
1926 | err_remove_vm: | 1926 | err_remove_vm: |
1927 | nvgpu_vm_remove(vm, &mm->perfbuf.inst_block); | 1927 | nvgpu_vm_remove_inst(vm, &mm->perfbuf.inst_block); |
1928 | nvgpu_mutex_release(&g->dbg_sessions_lock); | 1928 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
1929 | return err; | 1929 | return err; |
1930 | } | 1930 | } |
@@ -1962,7 +1962,7 @@ static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset) | |||
1962 | err = gk20a_perfbuf_disable_locked(g); | 1962 | err = gk20a_perfbuf_disable_locked(g); |
1963 | 1963 | ||
1964 | nvgpu_vm_unmap_buffer(vm, offset, NULL); | 1964 | nvgpu_vm_unmap_buffer(vm, offset, NULL); |
1965 | nvgpu_vm_remove(vm, &mm->perfbuf.inst_block); | 1965 | nvgpu_vm_remove_inst(vm, &mm->perfbuf.inst_block); |
1966 | 1966 | ||
1967 | g->perfbuf.owner = NULL; | 1967 | g->perfbuf.owner = NULL; |
1968 | g->perfbuf.offset = 0; | 1968 | g->perfbuf.offset = 0; |
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h index a8837433..ceadbae2 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gk20a.h | |||
@@ -651,7 +651,6 @@ struct gpu_ops { | |||
651 | int rw_flag, | 651 | int rw_flag, |
652 | bool sparse, | 652 | bool sparse, |
653 | struct vm_gk20a_mapping_batch *batch); | 653 | struct vm_gk20a_mapping_batch *batch); |
654 | void (*vm_remove)(struct vm_gk20a *vm); | ||
655 | int (*vm_alloc_share)(struct gk20a_as_share *as_share, | 654 | int (*vm_alloc_share)(struct gk20a_as_share *as_share, |
656 | u32 big_page_size, u32 flags); | 655 | u32 big_page_size, u32 flags); |
657 | int (*vm_bind_channel)(struct gk20a_as_share *as_share, | 656 | int (*vm_bind_channel)(struct gk20a_as_share *as_share, |
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 17f1622f..183a540a 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c | |||
@@ -464,7 +464,7 @@ static void gk20a_remove_mm_ce_support(struct mm_gk20a *mm) | |||
464 | 464 | ||
465 | mm->vidmem.ce_ctx_id = (u32)~0; | 465 | mm->vidmem.ce_ctx_id = (u32)~0; |
466 | 466 | ||
467 | nvgpu_vm_remove_support_nofree(&mm->ce.vm); | 467 | __nvgpu_vm_remove(&mm->ce.vm); |
468 | 468 | ||
469 | } | 469 | } |
470 | 470 | ||
@@ -476,11 +476,11 @@ static void gk20a_remove_mm_support(struct mm_gk20a *mm) | |||
476 | g->ops.mm.remove_bar2_vm(g); | 476 | g->ops.mm.remove_bar2_vm(g); |
477 | 477 | ||
478 | if (g->ops.mm.is_bar1_supported(g)) | 478 | if (g->ops.mm.is_bar1_supported(g)) |
479 | nvgpu_vm_remove(&mm->bar1.vm, &mm->bar1.inst_block); | 479 | nvgpu_vm_remove_inst(&mm->bar1.vm, &mm->bar1.inst_block); |
480 | 480 | ||
481 | nvgpu_vm_remove(&mm->pmu.vm, &mm->pmu.inst_block); | 481 | nvgpu_vm_remove_inst(&mm->pmu.vm, &mm->pmu.inst_block); |
482 | gk20a_free_inst_block(gk20a_from_mm(mm), &mm->hwpm.inst_block); | 482 | gk20a_free_inst_block(gk20a_from_mm(mm), &mm->hwpm.inst_block); |
483 | nvgpu_vm_remove_support_nofree(&mm->cde.vm); | 483 | __nvgpu_vm_remove(&mm->cde.vm); |
484 | 484 | ||
485 | gk20a_semaphore_sea_destroy(g); | 485 | gk20a_semaphore_sea_destroy(g); |
486 | gk20a_vidmem_destroy(g); | 486 | gk20a_vidmem_destroy(g); |
@@ -2337,10 +2337,9 @@ void nvgpu_vm_unmap_locked(struct nvgpu_mapped_buf *mapped_buffer, | |||
2337 | return; | 2337 | return; |
2338 | } | 2338 | } |
2339 | 2339 | ||
2340 | 2340 | void gk20a_vm_free_entries(struct vm_gk20a *vm, | |
2341 | static void gk20a_vm_free_entries(struct vm_gk20a *vm, | 2341 | struct gk20a_mm_entry *parent, |
2342 | struct gk20a_mm_entry *parent, | 2342 | int level) |
2343 | int level) | ||
2344 | { | 2343 | { |
2345 | int i; | 2344 | int i; |
2346 | 2345 | ||
@@ -2663,18 +2662,6 @@ int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset, | |||
2663 | return 0; | 2662 | return 0; |
2664 | } | 2663 | } |
2665 | 2664 | ||
2666 | void nvgpu_deinit_vm(struct vm_gk20a *vm) | ||
2667 | { | ||
2668 | if (nvgpu_alloc_initialized(&vm->kernel)) | ||
2669 | nvgpu_alloc_destroy(&vm->kernel); | ||
2670 | if (nvgpu_alloc_initialized(&vm->user)) | ||
2671 | nvgpu_alloc_destroy(&vm->user); | ||
2672 | if (nvgpu_alloc_initialized(&vm->user_lp)) | ||
2673 | nvgpu_alloc_destroy(&vm->user_lp); | ||
2674 | |||
2675 | gk20a_vm_free_entries(vm, &vm->pdb, 0); | ||
2676 | } | ||
2677 | |||
2678 | int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block) | 2665 | int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block) |
2679 | { | 2666 | { |
2680 | int err; | 2667 | int err; |
@@ -3151,7 +3138,6 @@ void gk20a_init_mm(struct gpu_ops *gops) | |||
3151 | { | 3138 | { |
3152 | gops->mm.gmmu_map = gk20a_locked_gmmu_map; | 3139 | gops->mm.gmmu_map = gk20a_locked_gmmu_map; |
3153 | gops->mm.gmmu_unmap = gk20a_locked_gmmu_unmap; | 3140 | gops->mm.gmmu_unmap = gk20a_locked_gmmu_unmap; |
3154 | gops->mm.vm_remove = nvgpu_vm_remove_support; | ||
3155 | gops->mm.vm_alloc_share = gk20a_vm_alloc_share; | 3141 | gops->mm.vm_alloc_share = gk20a_vm_alloc_share; |
3156 | gops->mm.vm_bind_channel = gk20a_vm_bind_channel; | 3142 | gops->mm.vm_bind_channel = gk20a_vm_bind_channel; |
3157 | gops->mm.fb_flush = gk20a_mm_fb_flush; | 3143 | gops->mm.fb_flush = gk20a_mm_fb_flush; |
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h index 90a72811..4adf346e 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h | |||
@@ -463,4 +463,8 @@ int gk20a_mm_get_buffer_info(struct device *dev, int dmabuf_fd, | |||
463 | u64 *buffer_id, u64 *buffer_len); | 463 | u64 *buffer_id, u64 *buffer_len); |
464 | void gk20a_vm_unmap_locked_kref(struct kref *ref); | 464 | void gk20a_vm_unmap_locked_kref(struct kref *ref); |
465 | 465 | ||
466 | void gk20a_vm_free_entries(struct vm_gk20a *vm, | ||
467 | struct gk20a_mm_entry *parent, | ||
468 | int level); | ||
469 | |||
466 | #endif /* MM_GK20A_H */ | 470 | #endif /* MM_GK20A_H */ |
diff --git a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c index 1405ef30..78e083d0 100644 --- a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c | |||
@@ -60,7 +60,6 @@ void gm20b_init_mm(struct gpu_ops *gops) | |||
60 | gops->mm.support_sparse = gm20b_mm_support_sparse; | 60 | gops->mm.support_sparse = gm20b_mm_support_sparse; |
61 | gops->mm.gmmu_map = gk20a_locked_gmmu_map; | 61 | gops->mm.gmmu_map = gk20a_locked_gmmu_map; |
62 | gops->mm.gmmu_unmap = gk20a_locked_gmmu_unmap; | 62 | gops->mm.gmmu_unmap = gk20a_locked_gmmu_unmap; |
63 | gops->mm.vm_remove = nvgpu_vm_remove_support; | ||
64 | gops->mm.vm_alloc_share = gk20a_vm_alloc_share; | 63 | gops->mm.vm_alloc_share = gk20a_vm_alloc_share; |
65 | gops->mm.vm_bind_channel = gk20a_vm_bind_channel; | 64 | gops->mm.vm_bind_channel = gk20a_vm_bind_channel; |
66 | gops->mm.fb_flush = gk20a_mm_fb_flush; | 65 | gops->mm.fb_flush = gk20a_mm_fb_flush; |
diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c index 6acea549..bc4aee3a 100644 --- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c | |||
@@ -401,7 +401,7 @@ static void gp10b_remove_bar2_vm(struct gk20a *g) | |||
401 | struct mm_gk20a *mm = &g->mm; | 401 | struct mm_gk20a *mm = &g->mm; |
402 | 402 | ||
403 | gp10b_replayable_pagefault_buffer_deinit(g); | 403 | gp10b_replayable_pagefault_buffer_deinit(g); |
404 | nvgpu_vm_remove(&mm->bar2.vm, &mm->bar2.inst_block); | 404 | nvgpu_vm_remove_inst(&mm->bar2.vm, &mm->bar2.inst_block); |
405 | } | 405 | } |
406 | 406 | ||
407 | 407 | ||
diff --git a/drivers/gpu/nvgpu/include/nvgpu/vm.h b/drivers/gpu/nvgpu/include/nvgpu/vm.h index c89282bf..fed58f24 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/vm.h +++ b/drivers/gpu/nvgpu/include/nvgpu/vm.h | |||
@@ -225,19 +225,22 @@ int nvgpu_insert_mapped_buf(struct vm_gk20a *vm, | |||
225 | void nvgpu_remove_mapped_buf(struct vm_gk20a *vm, | 225 | void nvgpu_remove_mapped_buf(struct vm_gk20a *vm, |
226 | struct nvgpu_mapped_buf *mapped_buffer); | 226 | struct nvgpu_mapped_buf *mapped_buffer); |
227 | 227 | ||
228 | void nvgpu_vm_remove_support_nofree(struct vm_gk20a *vm); | 228 | void __nvgpu_vm_remove(struct vm_gk20a *vm); |
229 | void nvgpu_vm_remove_support(struct vm_gk20a *vm); | 229 | void nvgpu_vm_remove(struct vm_gk20a *vm); |
230 | void nvgpu_vm_remove(struct vm_gk20a *vm, struct nvgpu_mem *inst_block); | 230 | void nvgpu_vm_remove_inst(struct vm_gk20a *vm, struct nvgpu_mem *inst_block); |
231 | #ifdef CONFIG_TEGRA_GR_VIRTUALIZATION | ||
232 | void nvgpu_vm_remove_vgpu(struct vm_gk20a *vm); | ||
233 | #endif | ||
231 | 234 | ||
232 | int nvgpu_init_vm(struct mm_gk20a *mm, | 235 | int nvgpu_init_vm(struct mm_gk20a *mm, |
233 | struct vm_gk20a *vm, | 236 | struct vm_gk20a *vm, |
234 | u32 big_page_size, | 237 | u32 big_page_size, |
235 | u64 low_hole, | 238 | u64 low_hole, |
236 | u64 kernel_reserved, | 239 | u64 kernel_reserved, |
237 | u64 aperture_size, | 240 | u64 aperture_size, |
238 | bool big_pages, | 241 | bool big_pages, |
239 | bool userspace_managed, | 242 | bool userspace_managed, |
240 | char *name); | 243 | char *name); |
241 | void nvgpu_deinit_vm(struct vm_gk20a *vm); | 244 | void nvgpu_deinit_vm(struct vm_gk20a *vm); |
242 | 245 | ||
243 | /* | 246 | /* |
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c index 63490aa5..db120d76 100644 --- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <nvgpu/kmem.h> | 18 | #include <nvgpu/kmem.h> |
19 | #include <nvgpu/dma.h> | 19 | #include <nvgpu/dma.h> |
20 | #include <nvgpu/bug.h> | 20 | #include <nvgpu/bug.h> |
21 | #include <nvgpu/vm.h> | ||
21 | #include <nvgpu/vm_area.h> | 22 | #include <nvgpu/vm_area.h> |
22 | 23 | ||
23 | #include "vgpu/vgpu.h" | 24 | #include "vgpu/vgpu.h" |
@@ -200,52 +201,18 @@ static void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm, | |||
200 | /* TLB invalidate handled on server side */ | 201 | /* TLB invalidate handled on server side */ |
201 | } | 202 | } |
202 | 203 | ||
203 | static void vgpu_vm_remove_support(struct vm_gk20a *vm) | 204 | void nvgpu_vm_remove_vgpu(struct vm_gk20a *vm) |
204 | { | 205 | { |
205 | struct gk20a *g = vm->mm->g; | 206 | struct gk20a *g = gk20a_from_vm(vm); |
206 | struct nvgpu_mapped_buf *mapped_buffer; | ||
207 | struct nvgpu_vm_area *vm_area, *vm_area_tmp; | ||
208 | struct tegra_vgpu_cmd_msg msg; | 207 | struct tegra_vgpu_cmd_msg msg; |
209 | struct tegra_vgpu_as_share_params *p = &msg.params.as_share; | 208 | struct tegra_vgpu_as_share_params *p = &msg.params.as_share; |
210 | struct nvgpu_rbtree_node *node = NULL; | ||
211 | int err; | 209 | int err; |
212 | 210 | ||
213 | gk20a_dbg_fn(""); | ||
214 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | ||
215 | |||
216 | /* TBD: add a flag here for the unmap code to recognize teardown | ||
217 | * and short-circuit any otherwise expensive operations. */ | ||
218 | |||
219 | nvgpu_rbtree_enum_start(0, &node, vm->mapped_buffers); | ||
220 | while (node) { | ||
221 | mapped_buffer = mapped_buffer_from_rbtree_node(node); | ||
222 | nvgpu_vm_unmap_locked(mapped_buffer, NULL); | ||
223 | nvgpu_rbtree_enum_start(0, &node, vm->mapped_buffers); | ||
224 | } | ||
225 | |||
226 | /* destroy remaining reserved memory areas */ | ||
227 | nvgpu_list_for_each_entry_safe(vm_area, vm_area_tmp, | ||
228 | &vm->vm_area_list, | ||
229 | nvgpu_vm_area, vm_area_list) { | ||
230 | nvgpu_list_del(&vm_area->vm_area_list); | ||
231 | nvgpu_kfree(g, vm_area); | ||
232 | } | ||
233 | |||
234 | msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE; | 211 | msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE; |
235 | msg.handle = vgpu_get_handle(g); | 212 | msg.handle = vgpu_get_handle(g); |
236 | p->handle = vm->handle; | 213 | p->handle = vm->handle; |
237 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); | 214 | err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); |
238 | WARN_ON(err || msg.ret); | 215 | WARN_ON(err || msg.ret); |
239 | |||
240 | if (nvgpu_alloc_initialized(&vm->kernel)) | ||
241 | nvgpu_alloc_destroy(&vm->kernel); | ||
242 | if (nvgpu_alloc_initialized(&vm->user)) | ||
243 | nvgpu_alloc_destroy(&vm->user); | ||
244 | |||
245 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
246 | |||
247 | /* vm is not used anymore. release it. */ | ||
248 | nvgpu_kfree(g, vm); | ||
249 | } | 216 | } |
250 | 217 | ||
251 | u64 vgpu_bar1_map(struct gk20a *g, struct sg_table **sgt, u64 size) | 218 | u64 vgpu_bar1_map(struct gk20a *g, struct sg_table **sgt, u64 size) |
@@ -534,7 +501,6 @@ void vgpu_init_mm_ops(struct gpu_ops *gops) | |||
534 | gops->fb.set_debug_mode = vgpu_mm_mmu_set_debug_mode; | 501 | gops->fb.set_debug_mode = vgpu_mm_mmu_set_debug_mode; |
535 | gops->mm.gmmu_map = vgpu_locked_gmmu_map; | 502 | gops->mm.gmmu_map = vgpu_locked_gmmu_map; |
536 | gops->mm.gmmu_unmap = vgpu_locked_gmmu_unmap; | 503 | gops->mm.gmmu_unmap = vgpu_locked_gmmu_unmap; |
537 | gops->mm.vm_remove = vgpu_vm_remove_support; | ||
538 | gops->mm.vm_alloc_share = vgpu_vm_alloc_share; | 504 | gops->mm.vm_alloc_share = vgpu_vm_alloc_share; |
539 | gops->mm.vm_bind_channel = vgpu_vm_bind_channel; | 505 | gops->mm.vm_bind_channel = vgpu_vm_bind_channel; |
540 | gops->mm.fb_flush = vgpu_mm_fb_flush; | 506 | gops->mm.fb_flush = vgpu_mm_fb_flush; |