summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/vm.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-09-07 19:45:07 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-10-18 19:00:37 -0400
commit0853109c9994d1375a10d13ae254690a4a4c9610 (patch)
tree339adb7e8e74c8e0085b24ed9e50181a9aafa3ee /drivers/gpu/nvgpu/common/mm/vm.c
parentb3446bc0b6fca6cb992667f80a95f8503b6a652a (diff)
gpu: nvgpu: Refactoring nvgpu_vm functions
Refactor the last nvgpu_vm functions from the mm_gk20a.c code. This removes some usages of dma_buf from the mm_gk20a.c code, too, which helps make mm_gk20a.c less Linux specific. Also delete some header files that are no longer necessary in gk20a/mm_gk20a.c which are Linux specific. The mm_gk20a.c code is now quite close to being Linux free. JIRA NVGPU-30 JIRA NVGPU-138 Change-Id: I72b370bd85a7b029768b0fb4827d6abba42007c3 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1566629 Reviewed-by: Konsta Holtta <kholtta@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c134
1 files changed, 134 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index f8d58349..9f04ee01 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -641,3 +641,137 @@ struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_less_than(
641 641
642 return mapped_buffer_from_rbtree_node(node); 642 return mapped_buffer_from_rbtree_node(node);
643} 643}
644
645int nvgpu_vm_get_buffers(struct vm_gk20a *vm,
646 struct nvgpu_mapped_buf ***mapped_buffers,
647 int *num_buffers)
648{
649 struct nvgpu_mapped_buf *mapped_buffer;
650 struct nvgpu_mapped_buf **buffer_list;
651 struct nvgpu_rbtree_node *node = NULL;
652 int i = 0;
653
654 if (vm->userspace_managed) {
655 *mapped_buffers = NULL;
656 *num_buffers = 0;
657 return 0;
658 }
659
660 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
661
662 buffer_list = nvgpu_big_zalloc(vm->mm->g, sizeof(*buffer_list) *
663 vm->num_user_mapped_buffers);
664 if (!buffer_list) {
665 nvgpu_mutex_release(&vm->update_gmmu_lock);
666 return -ENOMEM;
667 }
668
669 nvgpu_rbtree_enum_start(0, &node, vm->mapped_buffers);
670 while (node) {
671 mapped_buffer = mapped_buffer_from_rbtree_node(node);
672 if (mapped_buffer->user_mapped) {
673 buffer_list[i] = mapped_buffer;
674 nvgpu_ref_get(&mapped_buffer->ref);
675 i++;
676 }
677 nvgpu_rbtree_enum_next(&node, node);
678 }
679
680 BUG_ON(i != vm->num_user_mapped_buffers);
681
682 *num_buffers = vm->num_user_mapped_buffers;
683 *mapped_buffers = buffer_list;
684
685 nvgpu_mutex_release(&vm->update_gmmu_lock);
686
687 return 0;
688}
689
690void nvgpu_vm_unmap_locked_ref(struct nvgpu_ref *ref)
691{
692 struct nvgpu_mapped_buf *mapped_buffer =
693 container_of(ref, struct nvgpu_mapped_buf, ref);
694 nvgpu_vm_unmap_locked(mapped_buffer, mapped_buffer->vm->kref_put_batch);
695}
696
697void nvgpu_vm_put_buffers(struct vm_gk20a *vm,
698 struct nvgpu_mapped_buf **mapped_buffers,
699 int num_buffers)
700{
701 int i;
702 struct vm_gk20a_mapping_batch batch;
703
704 if (num_buffers == 0)
705 return;
706
707 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
708 nvgpu_vm_mapping_batch_start(&batch);
709 vm->kref_put_batch = &batch;
710
711 for (i = 0; i < num_buffers; ++i)
712 nvgpu_ref_put(&mapped_buffers[i]->ref,
713 nvgpu_vm_unmap_locked_ref);
714
715 vm->kref_put_batch = NULL;
716 nvgpu_vm_mapping_batch_finish_locked(vm, &batch);
717 nvgpu_mutex_release(&vm->update_gmmu_lock);
718
719 nvgpu_big_free(vm->mm->g, mapped_buffers);
720}
721
722static void nvgpu_vm_unmap_user(struct vm_gk20a *vm, u64 offset,
723 struct vm_gk20a_mapping_batch *batch)
724{
725 struct gk20a *g = vm->mm->g;
726 struct nvgpu_mapped_buf *mapped_buffer;
727
728 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
729
730 mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset);
731 if (!mapped_buffer) {
732 nvgpu_mutex_release(&vm->update_gmmu_lock);
733 nvgpu_err(g, "invalid addr to unmap 0x%llx", offset);
734 return;
735 }
736
737 if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) {
738 struct nvgpu_timeout timeout;
739
740 nvgpu_mutex_release(&vm->update_gmmu_lock);
741
742 nvgpu_timeout_init(vm->mm->g, &timeout, 10000,
743 NVGPU_TIMER_RETRY_TIMER);
744 do {
745 if (nvgpu_atomic_read(
746 &mapped_buffer->ref.refcount) == 1)
747 break;
748 nvgpu_udelay(5);
749 } while (!nvgpu_timeout_expired_msg(&timeout,
750 "sync-unmap failed on 0x%llx"));
751
752 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
753 }
754
755 if (mapped_buffer->user_mapped == 0) {
756 nvgpu_mutex_release(&vm->update_gmmu_lock);
757 nvgpu_err(g, "addr already unmapped from user 0x%llx", offset);
758 return;
759 }
760
761 mapped_buffer->user_mapped--;
762 if (mapped_buffer->user_mapped == 0)
763 vm->num_user_mapped_buffers--;
764
765 vm->kref_put_batch = batch;
766 nvgpu_ref_put(&mapped_buffer->ref, nvgpu_vm_unmap_locked_ref);
767 vm->kref_put_batch = NULL;
768
769 nvgpu_mutex_release(&vm->update_gmmu_lock);
770}
771
772int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset,
773 struct vm_gk20a_mapping_batch *batch)
774{
775 nvgpu_vm_unmap_user(vm, offset, batch);
776 return 0;
777}