diff options
author | Alex Waterman <alexw@nvidia.com> | 2017-10-17 13:55:00 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-11-01 22:06:23 -0400 |
commit | d13c256d5ee11da1664377481543005142d9bd30 (patch) | |
tree | e00ef697d05a20428619c8920ddfcb645fc095d8 /drivers | |
parent | a37cec19f0cf5212cbd472cd8d94acaa1e1cff6d (diff) |
gpu: nvgpu: VM unmap refactoring
Re-organize the unmap code to be better split between OS specific
requirements and common core requirements. The new code flow works
as follows:
nvgpu_vm_unmap()
Is the primary entrance to the unmap path. It takes a VM and a GPU
virtual address to unmap. There's also an optional batch mapping
struct.
This function is responsible for making sure there is a real buffer
and that if it's being called on a fixed mapping then the mapping
will definitely be freed (since buffers are ref-counted). Then this
function decrements the ref-count and returns.
If the ref-count hits zero then __nvgpu_vm_unmap_ref() is called
which just calls __nvgpu_vm_unmap() with the relevant batch struct
if present. This is where the real work is done. __nvgpu_vm_unmap()
clears the GMMU mapping, removes the mapped buffer from the various
lists and trees it may be in and then calls the
nvgpu_vm_unmap_system() function. This function handles any OS
specific stuff and must be defined by all VM OS implementations.
There's a a short cut used by some other core VM code to free
mappings without going through nvgpu_vm_map(). Mostly they just
directly decrement the mapping ref-count which can then call
__nvgpu_vm_unmap_ref() if the ref-count hits zero.
JIRA NVGPU-30
JIRA NVGPU-71
Change-Id: Ic626d37ab936819841bab45214f027b40ffa4e5a
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1583982
Reviewed-by: Automatic_Commit_Validation_User
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/cde.c | 4 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/ioctl_as.c | 10 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/ioctl_dbg.c | 4 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/vm.c | 84 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/vm.c | 118 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/vm_area.c | 2 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/include/nvgpu/linux/vm.h | 4 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/include/nvgpu/vm.h | 16 |
8 files changed, 136 insertions, 106 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/cde.c b/drivers/gpu/nvgpu/common/linux/cde.c index 65c50726..2f2e886f 100644 --- a/drivers/gpu/nvgpu/common/linux/cde.c +++ b/drivers/gpu/nvgpu/common/linux/cde.c | |||
@@ -1167,7 +1167,7 @@ __releases(&l->cde_app->mutex) | |||
1167 | cde_ctx->init_cmd_executed = true; | 1167 | cde_ctx->init_cmd_executed = true; |
1168 | 1168 | ||
1169 | /* unmap the buffers - channel holds references to them now */ | 1169 | /* unmap the buffers - channel holds references to them now */ |
1170 | nvgpu_vm_unmap(cde_ctx->vm, map_vaddr); | 1170 | nvgpu_vm_unmap(cde_ctx->vm, map_vaddr, NULL); |
1171 | 1171 | ||
1172 | return err; | 1172 | return err; |
1173 | 1173 | ||
@@ -1175,7 +1175,7 @@ exit_unmap_surface: | |||
1175 | if (surface) | 1175 | if (surface) |
1176 | dma_buf_vunmap(compbits_scatter_buf, surface); | 1176 | dma_buf_vunmap(compbits_scatter_buf, surface); |
1177 | exit_unmap_vaddr: | 1177 | exit_unmap_vaddr: |
1178 | nvgpu_vm_unmap(cde_ctx->vm, map_vaddr); | 1178 | nvgpu_vm_unmap(cde_ctx->vm, map_vaddr, NULL); |
1179 | exit_idle: | 1179 | exit_idle: |
1180 | gk20a_idle(g); | 1180 | gk20a_idle(g); |
1181 | return err; | 1181 | return err; |
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_as.c b/drivers/gpu/nvgpu/common/linux/ioctl_as.c index 08064370..18d0dd07 100644 --- a/drivers/gpu/nvgpu/common/linux/ioctl_as.c +++ b/drivers/gpu/nvgpu/common/linux/ioctl_as.c | |||
@@ -118,7 +118,10 @@ static int gk20a_as_ioctl_unmap_buffer( | |||
118 | struct nvgpu_as_unmap_buffer_args *args) | 118 | struct nvgpu_as_unmap_buffer_args *args) |
119 | { | 119 | { |
120 | gk20a_dbg_fn(""); | 120 | gk20a_dbg_fn(""); |
121 | return nvgpu_vm_unmap_buffer(as_share->vm, args->offset, NULL); | 121 | |
122 | nvgpu_vm_unmap(as_share->vm, args->offset, NULL); | ||
123 | |||
124 | return 0; | ||
122 | } | 125 | } |
123 | 126 | ||
124 | static int gk20a_as_ioctl_map_buffer_batch( | 127 | static int gk20a_as_ioctl_map_buffer_batch( |
@@ -155,10 +158,7 @@ static int gk20a_as_ioctl_map_buffer_batch( | |||
155 | break; | 158 | break; |
156 | } | 159 | } |
157 | 160 | ||
158 | err = nvgpu_vm_unmap_buffer(as_share->vm, unmap_args.offset, | 161 | nvgpu_vm_unmap(as_share->vm, unmap_args.offset, &batch); |
159 | &batch); | ||
160 | if (err) | ||
161 | break; | ||
162 | } | 162 | } |
163 | 163 | ||
164 | if (err) { | 164 | if (err) { |
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c b/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c index 403d9261..24bf813a 100644 --- a/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c +++ b/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c | |||
@@ -1142,7 +1142,7 @@ static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s, | |||
1142 | return 0; | 1142 | return 0; |
1143 | 1143 | ||
1144 | err_unmap: | 1144 | err_unmap: |
1145 | nvgpu_vm_unmap_buffer(mm->perfbuf.vm, args->offset, NULL); | 1145 | nvgpu_vm_unmap(mm->perfbuf.vm, args->offset, NULL); |
1146 | err_remove_vm: | 1146 | err_remove_vm: |
1147 | nvgpu_vm_put(mm->perfbuf.vm); | 1147 | nvgpu_vm_put(mm->perfbuf.vm); |
1148 | nvgpu_mutex_release(&g->dbg_sessions_lock); | 1148 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
@@ -1386,7 +1386,7 @@ static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset) | |||
1386 | 1386 | ||
1387 | err = g->ops.dbg_session_ops.perfbuffer_disable(g); | 1387 | err = g->ops.dbg_session_ops.perfbuffer_disable(g); |
1388 | 1388 | ||
1389 | nvgpu_vm_unmap_buffer(vm, offset, NULL); | 1389 | nvgpu_vm_unmap(vm, offset, NULL); |
1390 | nvgpu_free_inst_block(g, &mm->perfbuf.inst_block); | 1390 | nvgpu_free_inst_block(g, &mm->perfbuf.inst_block); |
1391 | nvgpu_vm_put(vm); | 1391 | nvgpu_vm_put(vm); |
1392 | 1392 | ||
diff --git a/drivers/gpu/nvgpu/common/linux/vm.c b/drivers/gpu/nvgpu/common/linux/vm.c index 984c2015..feb124f8 100644 --- a/drivers/gpu/nvgpu/common/linux/vm.c +++ b/drivers/gpu/nvgpu/common/linux/vm.c | |||
@@ -641,88 +641,20 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm, | |||
641 | return err; | 641 | return err; |
642 | } | 642 | } |
643 | 643 | ||
644 | int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset, | 644 | /* |
645 | struct vm_gk20a_mapping_batch *batch) | 645 | * This is the function call-back for freeing OS specific components of an |
646 | { | 646 | * nvgpu_mapped_buf. This should most likely never be called outside of the |
647 | struct gk20a *g = vm->mm->g; | 647 | * core MM framework! |
648 | struct nvgpu_mapped_buf *mapped_buffer; | 648 | * |
649 | 649 | * Note: the VM lock will be held. | |
650 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | 650 | */ |
651 | 651 | void nvgpu_vm_unmap_system(struct nvgpu_mapped_buf *mapped_buffer) | |
652 | mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset); | ||
653 | if (!mapped_buffer) { | ||
654 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
655 | nvgpu_err(g, "invalid addr to unmap 0x%llx", offset); | ||
656 | return 0; | ||
657 | } | ||
658 | |||
659 | if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { | ||
660 | struct nvgpu_timeout timeout; | ||
661 | |||
662 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
663 | |||
664 | nvgpu_timeout_init(vm->mm->g, &timeout, 10000, | ||
665 | NVGPU_TIMER_RETRY_TIMER); | ||
666 | do { | ||
667 | if (nvgpu_atomic_read( | ||
668 | &mapped_buffer->ref.refcount) == 1) | ||
669 | break; | ||
670 | nvgpu_udelay(5); | ||
671 | } while (!nvgpu_timeout_expired_msg(&timeout, | ||
672 | "sync-unmap failed on 0x%llx")); | ||
673 | |||
674 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | ||
675 | } | ||
676 | |||
677 | if (mapped_buffer->user_mapped == 0) { | ||
678 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
679 | nvgpu_err(g, "addr already unmapped from user 0x%llx", offset); | ||
680 | return 0; | ||
681 | } | ||
682 | |||
683 | mapped_buffer->user_mapped--; | ||
684 | if (mapped_buffer->user_mapped == 0) | ||
685 | vm->num_user_mapped_buffers--; | ||
686 | |||
687 | vm->kref_put_batch = batch; | ||
688 | nvgpu_ref_put(&mapped_buffer->ref, nvgpu_vm_unmap_locked_ref); | ||
689 | vm->kref_put_batch = NULL; | ||
690 | |||
691 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
692 | return 0; | ||
693 | } | ||
694 | |||
695 | /* NOTE! mapped_buffers lock must be held */ | ||
696 | void nvgpu_vm_unmap_locked(struct nvgpu_mapped_buf *mapped_buffer, | ||
697 | struct vm_gk20a_mapping_batch *batch) | ||
698 | { | 652 | { |
699 | struct vm_gk20a *vm = mapped_buffer->vm; | 653 | struct vm_gk20a *vm = mapped_buffer->vm; |
700 | struct gk20a *g = vm->mm->g; | ||
701 | |||
702 | g->ops.mm.gmmu_unmap(vm, | ||
703 | mapped_buffer->addr, | ||
704 | mapped_buffer->size, | ||
705 | mapped_buffer->pgsz_idx, | ||
706 | mapped_buffer->va_allocated, | ||
707 | gk20a_mem_flag_none, | ||
708 | mapped_buffer->vm_area ? | ||
709 | mapped_buffer->vm_area->sparse : false, | ||
710 | batch); | ||
711 | 654 | ||
712 | gk20a_mm_unpin(dev_from_vm(vm), mapped_buffer->dmabuf, | 655 | gk20a_mm_unpin(dev_from_vm(vm), mapped_buffer->dmabuf, |
713 | mapped_buffer->sgt); | 656 | mapped_buffer->sgt); |
714 | 657 | ||
715 | /* remove from mapped buffer tree and remove list, free */ | ||
716 | nvgpu_remove_mapped_buf(vm, mapped_buffer); | ||
717 | if (!nvgpu_list_empty(&mapped_buffer->buffer_list)) | ||
718 | nvgpu_list_del(&mapped_buffer->buffer_list); | ||
719 | |||
720 | /* keep track of mapped buffers */ | ||
721 | if (mapped_buffer->user_mapped) | ||
722 | vm->num_user_mapped_buffers--; | ||
723 | |||
724 | if (mapped_buffer->own_mem_ref) | 658 | if (mapped_buffer->own_mem_ref) |
725 | dma_buf_put(mapped_buffer->dmabuf); | 659 | dma_buf_put(mapped_buffer->dmabuf); |
726 | |||
727 | nvgpu_kfree(g, mapped_buffer); | ||
728 | } | 660 | } |
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index 88af6456..3d10ff48 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c | |||
@@ -36,6 +36,9 @@ | |||
36 | #include "gk20a/gk20a.h" | 36 | #include "gk20a/gk20a.h" |
37 | #include "gk20a/mm_gk20a.h" | 37 | #include "gk20a/mm_gk20a.h" |
38 | 38 | ||
39 | static void __nvgpu_vm_unmap(struct nvgpu_mapped_buf *mapped_buffer, | ||
40 | struct vm_gk20a_mapping_batch *batch); | ||
41 | |||
39 | int vm_aspace_id(struct vm_gk20a *vm) | 42 | int vm_aspace_id(struct vm_gk20a *vm) |
40 | { | 43 | { |
41 | return vm->as_share ? vm->as_share->id : -1; | 44 | return vm->as_share ? vm->as_share->id : -1; |
@@ -538,7 +541,7 @@ static void __nvgpu_vm_remove(struct vm_gk20a *vm) | |||
538 | nvgpu_rbtree_enum_start(0, &node, vm->mapped_buffers); | 541 | nvgpu_rbtree_enum_start(0, &node, vm->mapped_buffers); |
539 | while (node) { | 542 | while (node) { |
540 | mapped_buffer = mapped_buffer_from_rbtree_node(node); | 543 | mapped_buffer = mapped_buffer_from_rbtree_node(node); |
541 | nvgpu_vm_unmap_locked(mapped_buffer, NULL); | 544 | __nvgpu_vm_unmap(mapped_buffer, NULL); |
542 | nvgpu_rbtree_enum_start(0, &node, vm->mapped_buffers); | 545 | nvgpu_rbtree_enum_start(0, &node, vm->mapped_buffers); |
543 | } | 546 | } |
544 | 547 | ||
@@ -702,8 +705,7 @@ void nvgpu_vm_put_buffers(struct vm_gk20a *vm, | |||
702 | vm->kref_put_batch = &batch; | 705 | vm->kref_put_batch = &batch; |
703 | 706 | ||
704 | for (i = 0; i < num_buffers; ++i) | 707 | for (i = 0; i < num_buffers; ++i) |
705 | nvgpu_ref_put(&mapped_buffers[i]->ref, | 708 | nvgpu_ref_put(&mapped_buffers[i]->ref, __nvgpu_vm_unmap_ref); |
706 | nvgpu_vm_unmap_locked_ref); | ||
707 | 709 | ||
708 | vm->kref_put_batch = NULL; | 710 | vm->kref_put_batch = NULL; |
709 | nvgpu_vm_mapping_batch_finish_locked(vm, &batch); | 711 | nvgpu_vm_mapping_batch_finish_locked(vm, &batch); |
@@ -712,26 +714,118 @@ void nvgpu_vm_put_buffers(struct vm_gk20a *vm, | |||
712 | nvgpu_big_free(vm->mm->g, mapped_buffers); | 714 | nvgpu_big_free(vm->mm->g, mapped_buffers); |
713 | } | 715 | } |
714 | 716 | ||
715 | void nvgpu_vm_unmap_locked_ref(struct nvgpu_ref *ref) | 717 | /* |
718 | * Really unmap. This does the real GMMU unmap and removes the mapping from the | ||
719 | * VM map tracking tree (and vm_area list if necessary). | ||
720 | */ | ||
721 | static void __nvgpu_vm_unmap(struct nvgpu_mapped_buf *mapped_buffer, | ||
722 | struct vm_gk20a_mapping_batch *batch) | ||
723 | { | ||
724 | struct vm_gk20a *vm = mapped_buffer->vm; | ||
725 | struct gk20a *g = vm->mm->g; | ||
726 | |||
727 | vm->num_user_mapped_buffers--; | ||
728 | |||
729 | g->ops.mm.gmmu_unmap(vm, | ||
730 | mapped_buffer->addr, | ||
731 | mapped_buffer->size, | ||
732 | mapped_buffer->pgsz_idx, | ||
733 | mapped_buffer->va_allocated, | ||
734 | gk20a_mem_flag_none, | ||
735 | mapped_buffer->vm_area ? | ||
736 | mapped_buffer->vm_area->sparse : false, | ||
737 | batch); | ||
738 | |||
739 | /* | ||
740 | * Remove from mapped buffer tree. Then delete the buffer from the | ||
741 | * linked list of mapped buffers; though note: not all mapped buffers | ||
742 | * are part of a vm_area. | ||
743 | */ | ||
744 | nvgpu_remove_mapped_buf(vm, mapped_buffer); | ||
745 | nvgpu_list_del(&mapped_buffer->buffer_list); | ||
746 | |||
747 | /* | ||
748 | * OS specific freeing. This is after the generic freeing incase the | ||
749 | * generic freeing relies on some component of the OS specific | ||
750 | * nvgpu_mapped_buf in some abstraction or the like. | ||
751 | */ | ||
752 | nvgpu_vm_unmap_system(mapped_buffer); | ||
753 | |||
754 | nvgpu_kfree(g, mapped_buffer); | ||
755 | } | ||
756 | |||
757 | void __nvgpu_vm_unmap_ref(struct nvgpu_ref *ref) | ||
716 | { | 758 | { |
717 | struct nvgpu_mapped_buf *mapped_buffer = | 759 | struct nvgpu_mapped_buf *mapped_buffer = |
718 | container_of(ref, struct nvgpu_mapped_buf, ref); | 760 | container_of(ref, struct nvgpu_mapped_buf, ref); |
719 | nvgpu_vm_unmap_locked(mapped_buffer, mapped_buffer->vm->kref_put_batch); | 761 | |
762 | __nvgpu_vm_unmap(mapped_buffer, mapped_buffer->vm->kref_put_batch); | ||
720 | } | 763 | } |
721 | 764 | ||
722 | void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset) | 765 | /* |
766 | * For fixed-offset buffers we must sync the buffer. That means we wait for the | ||
767 | * buffer to hit a ref-count of 1 before proceeding. | ||
768 | * | ||
769 | * Note: this requires the update_gmmu_lock to be held since we release it and | ||
770 | * re-aquire it in this function. | ||
771 | */ | ||
772 | static int nvgpu_vm_unmap_sync_buffer(struct vm_gk20a *vm, | ||
773 | struct nvgpu_mapped_buf *mapped_buffer) | ||
774 | { | ||
775 | struct nvgpu_timeout timeout; | ||
776 | int ret = 0; | ||
777 | |||
778 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
779 | |||
780 | /* | ||
781 | * 500ms second timer. | ||
782 | */ | ||
783 | nvgpu_timeout_init(vm->mm->g, &timeout, 50, NVGPU_TIMER_CPU_TIMER); | ||
784 | |||
785 | do { | ||
786 | if (nvgpu_atomic_read(&mapped_buffer->ref.refcount) == 1) | ||
787 | break; | ||
788 | nvgpu_msleep(10); | ||
789 | } while (!nvgpu_timeout_expired_msg(&timeout, | ||
790 | "sync-unmap failed on 0x%llx")); | ||
791 | |||
792 | if (nvgpu_timeout_expired(&timeout)) | ||
793 | ret = -ETIMEDOUT; | ||
794 | |||
795 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | ||
796 | |||
797 | return ret; | ||
798 | } | ||
799 | |||
800 | void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset, | ||
801 | struct vm_gk20a_mapping_batch *batch) | ||
723 | { | 802 | { |
724 | struct gk20a *g = vm->mm->g; | ||
725 | struct nvgpu_mapped_buf *mapped_buffer; | 803 | struct nvgpu_mapped_buf *mapped_buffer; |
726 | 804 | ||
727 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | 805 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); |
806 | |||
728 | mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset); | 807 | mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset); |
729 | if (!mapped_buffer) { | 808 | if (!mapped_buffer) |
730 | nvgpu_mutex_release(&vm->update_gmmu_lock); | 809 | goto done; |
731 | nvgpu_err(g, "invalid addr to unmap 0x%llx", offset); | 810 | |
732 | return; | 811 | if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { |
812 | if (nvgpu_vm_unmap_sync_buffer(vm, mapped_buffer)) | ||
813 | /* | ||
814 | * Looks like we have failed... Better not continue in | ||
815 | * case the buffer is in use. | ||
816 | */ | ||
817 | goto done; | ||
733 | } | 818 | } |
734 | 819 | ||
735 | nvgpu_ref_put(&mapped_buffer->ref, nvgpu_vm_unmap_locked_ref); | 820 | /* |
821 | * Make sure we have access to the batch if we end up calling through to | ||
822 | * the unmap_ref function. | ||
823 | */ | ||
824 | vm->kref_put_batch = batch; | ||
825 | nvgpu_ref_put(&mapped_buffer->ref, __nvgpu_vm_unmap_ref); | ||
826 | vm->kref_put_batch = NULL; | ||
827 | |||
828 | done: | ||
736 | nvgpu_mutex_release(&vm->update_gmmu_lock); | 829 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
830 | return; | ||
737 | } | 831 | } |
diff --git a/drivers/gpu/nvgpu/common/mm/vm_area.c b/drivers/gpu/nvgpu/common/mm/vm_area.c index 88758b85..fddec357 100644 --- a/drivers/gpu/nvgpu/common/mm/vm_area.c +++ b/drivers/gpu/nvgpu/common/mm/vm_area.c | |||
@@ -208,7 +208,7 @@ int nvgpu_vm_area_free(struct vm_gk20a *vm, u64 addr) | |||
208 | &vm_area->buffer_list_head, | 208 | &vm_area->buffer_list_head, |
209 | nvgpu_mapped_buf, buffer_list) { | 209 | nvgpu_mapped_buf, buffer_list) { |
210 | nvgpu_list_del(&buffer->buffer_list); | 210 | nvgpu_list_del(&buffer->buffer_list); |
211 | nvgpu_ref_put(&buffer->ref, nvgpu_vm_unmap_locked_ref); | 211 | nvgpu_ref_put(&buffer->ref, __nvgpu_vm_unmap_ref); |
212 | } | 212 | } |
213 | 213 | ||
214 | /* if this was a sparse mapping, free the va */ | 214 | /* if this was a sparse mapping, free the va */ |
diff --git a/drivers/gpu/nvgpu/include/nvgpu/linux/vm.h b/drivers/gpu/nvgpu/include/nvgpu/linux/vm.h index 7aacf496..3d9f9ea6 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/linux/vm.h +++ b/drivers/gpu/nvgpu/include/nvgpu/linux/vm.h | |||
@@ -98,10 +98,6 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm, | |||
98 | u64 mapping_size, | 98 | u64 mapping_size, |
99 | struct vm_gk20a_mapping_batch *batch); | 99 | struct vm_gk20a_mapping_batch *batch); |
100 | 100 | ||
101 | /* Note: batch may be NULL if unmap op is not part of a batch */ | ||
102 | int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset, | ||
103 | struct vm_gk20a_mapping_batch *batch); | ||
104 | |||
105 | /* find buffer corresponding to va */ | 101 | /* find buffer corresponding to va */ |
106 | int nvgpu_vm_find_buf(struct vm_gk20a *vm, u64 gpu_va, | 102 | int nvgpu_vm_find_buf(struct vm_gk20a *vm, u64 gpu_va, |
107 | struct dma_buf **dmabuf, | 103 | struct dma_buf **dmabuf, |
diff --git a/drivers/gpu/nvgpu/include/nvgpu/vm.h b/drivers/gpu/nvgpu/include/nvgpu/vm.h index e529512b..84c7e0c7 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/vm.h +++ b/drivers/gpu/nvgpu/include/nvgpu/vm.h | |||
@@ -207,11 +207,19 @@ void nvgpu_vm_put_buffers(struct vm_gk20a *vm, | |||
207 | struct nvgpu_mapped_buf **mapped_buffers, | 207 | struct nvgpu_mapped_buf **mapped_buffers, |
208 | int num_buffers); | 208 | int num_buffers); |
209 | 209 | ||
210 | void nvgpu_vm_unmap_locked(struct nvgpu_mapped_buf *mapped_buffer, | 210 | void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset, |
211 | struct vm_gk20a_mapping_batch *batch); | 211 | struct vm_gk20a_mapping_batch *batch); |
212 | void nvgpu_vm_unmap_locked_ref(struct nvgpu_ref *ref); | ||
213 | 212 | ||
214 | void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset); | 213 | /* |
214 | * Implemented by each OS. Called from within the core VM code to handle OS | ||
215 | * specific components of an nvgpu_mapped_buf. | ||
216 | */ | ||
217 | void nvgpu_vm_unmap_system(struct nvgpu_mapped_buf *mapped_buffer); | ||
218 | |||
219 | /* | ||
220 | * Don't use this outside of the core VM code! | ||
221 | */ | ||
222 | void __nvgpu_vm_unmap_ref(struct nvgpu_ref *ref); | ||
215 | 223 | ||
216 | /* | 224 | /* |
217 | * These all require the VM update lock to be held. | 225 | * These all require the VM update lock to be held. |