diff options
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm.c')
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/vm.c | 56 |
1 files changed, 8 insertions, 48 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index 9f04ee01..c6c99b31 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c | |||
@@ -687,13 +687,6 @@ int nvgpu_vm_get_buffers(struct vm_gk20a *vm, | |||
687 | return 0; | 687 | return 0; |
688 | } | 688 | } |
689 | 689 | ||
690 | void nvgpu_vm_unmap_locked_ref(struct nvgpu_ref *ref) | ||
691 | { | ||
692 | struct nvgpu_mapped_buf *mapped_buffer = | ||
693 | container_of(ref, struct nvgpu_mapped_buf, ref); | ||
694 | nvgpu_vm_unmap_locked(mapped_buffer, mapped_buffer->vm->kref_put_batch); | ||
695 | } | ||
696 | |||
697 | void nvgpu_vm_put_buffers(struct vm_gk20a *vm, | 690 | void nvgpu_vm_put_buffers(struct vm_gk20a *vm, |
698 | struct nvgpu_mapped_buf **mapped_buffers, | 691 | struct nvgpu_mapped_buf **mapped_buffers, |
699 | int num_buffers) | 692 | int num_buffers) |
@@ -719,14 +712,19 @@ void nvgpu_vm_put_buffers(struct vm_gk20a *vm, | |||
719 | nvgpu_big_free(vm->mm->g, mapped_buffers); | 712 | nvgpu_big_free(vm->mm->g, mapped_buffers); |
720 | } | 713 | } |
721 | 714 | ||
722 | static void nvgpu_vm_unmap_user(struct vm_gk20a *vm, u64 offset, | 715 | void nvgpu_vm_unmap_locked_ref(struct nvgpu_ref *ref) |
723 | struct vm_gk20a_mapping_batch *batch) | 716 | { |
717 | struct nvgpu_mapped_buf *mapped_buffer = | ||
718 | container_of(ref, struct nvgpu_mapped_buf, ref); | ||
719 | nvgpu_vm_unmap_locked(mapped_buffer, mapped_buffer->vm->kref_put_batch); | ||
720 | } | ||
721 | |||
722 | void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset) | ||
724 | { | 723 | { |
725 | struct gk20a *g = vm->mm->g; | 724 | struct gk20a *g = vm->mm->g; |
726 | struct nvgpu_mapped_buf *mapped_buffer; | 725 | struct nvgpu_mapped_buf *mapped_buffer; |
727 | 726 | ||
728 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | 727 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); |
729 | |||
730 | mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset); | 728 | mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset); |
731 | if (!mapped_buffer) { | 729 | if (!mapped_buffer) { |
732 | nvgpu_mutex_release(&vm->update_gmmu_lock); | 730 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
@@ -734,44 +732,6 @@ static void nvgpu_vm_unmap_user(struct vm_gk20a *vm, u64 offset, | |||
734 | return; | 732 | return; |
735 | } | 733 | } |
736 | 734 | ||
737 | if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { | ||
738 | struct nvgpu_timeout timeout; | ||
739 | |||
740 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
741 | |||
742 | nvgpu_timeout_init(vm->mm->g, &timeout, 10000, | ||
743 | NVGPU_TIMER_RETRY_TIMER); | ||
744 | do { | ||
745 | if (nvgpu_atomic_read( | ||
746 | &mapped_buffer->ref.refcount) == 1) | ||
747 | break; | ||
748 | nvgpu_udelay(5); | ||
749 | } while (!nvgpu_timeout_expired_msg(&timeout, | ||
750 | "sync-unmap failed on 0x%llx")); | ||
751 | |||
752 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | ||
753 | } | ||
754 | |||
755 | if (mapped_buffer->user_mapped == 0) { | ||
756 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
757 | nvgpu_err(g, "addr already unmapped from user 0x%llx", offset); | ||
758 | return; | ||
759 | } | ||
760 | |||
761 | mapped_buffer->user_mapped--; | ||
762 | if (mapped_buffer->user_mapped == 0) | ||
763 | vm->num_user_mapped_buffers--; | ||
764 | |||
765 | vm->kref_put_batch = batch; | ||
766 | nvgpu_ref_put(&mapped_buffer->ref, nvgpu_vm_unmap_locked_ref); | 735 | nvgpu_ref_put(&mapped_buffer->ref, nvgpu_vm_unmap_locked_ref); |
767 | vm->kref_put_batch = NULL; | ||
768 | |||
769 | nvgpu_mutex_release(&vm->update_gmmu_lock); | 736 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
770 | } | 737 | } |
771 | |||
772 | int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset, | ||
773 | struct vm_gk20a_mapping_batch *batch) | ||
774 | { | ||
775 | nvgpu_vm_unmap_user(vm, offset, batch); | ||
776 | return 0; | ||
777 | } | ||