diff options
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm')
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/vm.c | 134 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/vm_area.c | 2 |
2 files changed, 135 insertions, 1 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index f8d58349..9f04ee01 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c | |||
@@ -641,3 +641,137 @@ struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_less_than( | |||
641 | 641 | ||
642 | return mapped_buffer_from_rbtree_node(node); | 642 | return mapped_buffer_from_rbtree_node(node); |
643 | } | 643 | } |
644 | |||
645 | int nvgpu_vm_get_buffers(struct vm_gk20a *vm, | ||
646 | struct nvgpu_mapped_buf ***mapped_buffers, | ||
647 | int *num_buffers) | ||
648 | { | ||
649 | struct nvgpu_mapped_buf *mapped_buffer; | ||
650 | struct nvgpu_mapped_buf **buffer_list; | ||
651 | struct nvgpu_rbtree_node *node = NULL; | ||
652 | int i = 0; | ||
653 | |||
654 | if (vm->userspace_managed) { | ||
655 | *mapped_buffers = NULL; | ||
656 | *num_buffers = 0; | ||
657 | return 0; | ||
658 | } | ||
659 | |||
660 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | ||
661 | |||
662 | buffer_list = nvgpu_big_zalloc(vm->mm->g, sizeof(*buffer_list) * | ||
663 | vm->num_user_mapped_buffers); | ||
664 | if (!buffer_list) { | ||
665 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
666 | return -ENOMEM; | ||
667 | } | ||
668 | |||
669 | nvgpu_rbtree_enum_start(0, &node, vm->mapped_buffers); | ||
670 | while (node) { | ||
671 | mapped_buffer = mapped_buffer_from_rbtree_node(node); | ||
672 | if (mapped_buffer->user_mapped) { | ||
673 | buffer_list[i] = mapped_buffer; | ||
674 | nvgpu_ref_get(&mapped_buffer->ref); | ||
675 | i++; | ||
676 | } | ||
677 | nvgpu_rbtree_enum_next(&node, node); | ||
678 | } | ||
679 | |||
680 | BUG_ON(i != vm->num_user_mapped_buffers); | ||
681 | |||
682 | *num_buffers = vm->num_user_mapped_buffers; | ||
683 | *mapped_buffers = buffer_list; | ||
684 | |||
685 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
686 | |||
687 | return 0; | ||
688 | } | ||
689 | |||
690 | void nvgpu_vm_unmap_locked_ref(struct nvgpu_ref *ref) | ||
691 | { | ||
692 | struct nvgpu_mapped_buf *mapped_buffer = | ||
693 | container_of(ref, struct nvgpu_mapped_buf, ref); | ||
694 | nvgpu_vm_unmap_locked(mapped_buffer, mapped_buffer->vm->kref_put_batch); | ||
695 | } | ||
696 | |||
697 | void nvgpu_vm_put_buffers(struct vm_gk20a *vm, | ||
698 | struct nvgpu_mapped_buf **mapped_buffers, | ||
699 | int num_buffers) | ||
700 | { | ||
701 | int i; | ||
702 | struct vm_gk20a_mapping_batch batch; | ||
703 | |||
704 | if (num_buffers == 0) | ||
705 | return; | ||
706 | |||
707 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | ||
708 | nvgpu_vm_mapping_batch_start(&batch); | ||
709 | vm->kref_put_batch = &batch; | ||
710 | |||
711 | for (i = 0; i < num_buffers; ++i) | ||
712 | nvgpu_ref_put(&mapped_buffers[i]->ref, | ||
713 | nvgpu_vm_unmap_locked_ref); | ||
714 | |||
715 | vm->kref_put_batch = NULL; | ||
716 | nvgpu_vm_mapping_batch_finish_locked(vm, &batch); | ||
717 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
718 | |||
719 | nvgpu_big_free(vm->mm->g, mapped_buffers); | ||
720 | } | ||
721 | |||
722 | static void nvgpu_vm_unmap_user(struct vm_gk20a *vm, u64 offset, | ||
723 | struct vm_gk20a_mapping_batch *batch) | ||
724 | { | ||
725 | struct gk20a *g = vm->mm->g; | ||
726 | struct nvgpu_mapped_buf *mapped_buffer; | ||
727 | |||
728 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | ||
729 | |||
730 | mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset); | ||
731 | if (!mapped_buffer) { | ||
732 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
733 | nvgpu_err(g, "invalid addr to unmap 0x%llx", offset); | ||
734 | return; | ||
735 | } | ||
736 | |||
737 | if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { | ||
738 | struct nvgpu_timeout timeout; | ||
739 | |||
740 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
741 | |||
742 | nvgpu_timeout_init(vm->mm->g, &timeout, 10000, | ||
743 | NVGPU_TIMER_RETRY_TIMER); | ||
744 | do { | ||
745 | if (nvgpu_atomic_read( | ||
746 | &mapped_buffer->ref.refcount) == 1) | ||
747 | break; | ||
748 | nvgpu_udelay(5); | ||
749 | } while (!nvgpu_timeout_expired_msg(&timeout, | ||
750 | "sync-unmap failed on 0x%llx")); | ||
751 | |||
752 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | ||
753 | } | ||
754 | |||
755 | if (mapped_buffer->user_mapped == 0) { | ||
756 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
757 | nvgpu_err(g, "addr already unmapped from user 0x%llx", offset); | ||
758 | return; | ||
759 | } | ||
760 | |||
761 | mapped_buffer->user_mapped--; | ||
762 | if (mapped_buffer->user_mapped == 0) | ||
763 | vm->num_user_mapped_buffers--; | ||
764 | |||
765 | vm->kref_put_batch = batch; | ||
766 | nvgpu_ref_put(&mapped_buffer->ref, nvgpu_vm_unmap_locked_ref); | ||
767 | vm->kref_put_batch = NULL; | ||
768 | |||
769 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
770 | } | ||
771 | |||
772 | int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset, | ||
773 | struct vm_gk20a_mapping_batch *batch) | ||
774 | { | ||
775 | nvgpu_vm_unmap_user(vm, offset, batch); | ||
776 | return 0; | ||
777 | } | ||
diff --git a/drivers/gpu/nvgpu/common/mm/vm_area.c b/drivers/gpu/nvgpu/common/mm/vm_area.c index de1623bc..88758b85 100644 --- a/drivers/gpu/nvgpu/common/mm/vm_area.c +++ b/drivers/gpu/nvgpu/common/mm/vm_area.c | |||
@@ -208,7 +208,7 @@ int nvgpu_vm_area_free(struct vm_gk20a *vm, u64 addr) | |||
208 | &vm_area->buffer_list_head, | 208 | &vm_area->buffer_list_head, |
209 | nvgpu_mapped_buf, buffer_list) { | 209 | nvgpu_mapped_buf, buffer_list) { |
210 | nvgpu_list_del(&buffer->buffer_list); | 210 | nvgpu_list_del(&buffer->buffer_list); |
211 | nvgpu_ref_put(&buffer->ref, gk20a_vm_unmap_locked_ref); | 211 | nvgpu_ref_put(&buffer->ref, nvgpu_vm_unmap_locked_ref); |
212 | } | 212 | } |
213 | 213 | ||
214 | /* if this was a sparse mapping, free the va */ | 214 | /* if this was a sparse mapping, free the va */ |