summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/vm.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/vm.c84
1 files changed, 8 insertions, 76 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/vm.c b/drivers/gpu/nvgpu/common/linux/vm.c
index 984c2015..feb124f8 100644
--- a/drivers/gpu/nvgpu/common/linux/vm.c
+++ b/drivers/gpu/nvgpu/common/linux/vm.c
@@ -641,88 +641,20 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
641 return err; 641 return err;
642} 642}
643 643
644int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset, 644/*
645 struct vm_gk20a_mapping_batch *batch) 645 * This is the function call-back for freeing OS specific components of an
646{ 646 * nvgpu_mapped_buf. This should most likely never be called outside of the
647 struct gk20a *g = vm->mm->g; 647 * core MM framework!
648 struct nvgpu_mapped_buf *mapped_buffer; 648 *
649 649 * Note: the VM lock will be held.
650 nvgpu_mutex_acquire(&vm->update_gmmu_lock); 650 */
651 651void nvgpu_vm_unmap_system(struct nvgpu_mapped_buf *mapped_buffer)
652 mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset);
653 if (!mapped_buffer) {
654 nvgpu_mutex_release(&vm->update_gmmu_lock);
655 nvgpu_err(g, "invalid addr to unmap 0x%llx", offset);
656 return 0;
657 }
658
659 if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) {
660 struct nvgpu_timeout timeout;
661
662 nvgpu_mutex_release(&vm->update_gmmu_lock);
663
664 nvgpu_timeout_init(vm->mm->g, &timeout, 10000,
665 NVGPU_TIMER_RETRY_TIMER);
666 do {
667 if (nvgpu_atomic_read(
668 &mapped_buffer->ref.refcount) == 1)
669 break;
670 nvgpu_udelay(5);
671 } while (!nvgpu_timeout_expired_msg(&timeout,
672 "sync-unmap failed on 0x%llx"));
673
674 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
675 }
676
677 if (mapped_buffer->user_mapped == 0) {
678 nvgpu_mutex_release(&vm->update_gmmu_lock);
679 nvgpu_err(g, "addr already unmapped from user 0x%llx", offset);
680 return 0;
681 }
682
683 mapped_buffer->user_mapped--;
684 if (mapped_buffer->user_mapped == 0)
685 vm->num_user_mapped_buffers--;
686
687 vm->kref_put_batch = batch;
688 nvgpu_ref_put(&mapped_buffer->ref, nvgpu_vm_unmap_locked_ref);
689 vm->kref_put_batch = NULL;
690
691 nvgpu_mutex_release(&vm->update_gmmu_lock);
692 return 0;
693}
694
695/* NOTE! mapped_buffers lock must be held */
696void nvgpu_vm_unmap_locked(struct nvgpu_mapped_buf *mapped_buffer,
697 struct vm_gk20a_mapping_batch *batch)
698{ 652{
699 struct vm_gk20a *vm = mapped_buffer->vm; 653 struct vm_gk20a *vm = mapped_buffer->vm;
700 struct gk20a *g = vm->mm->g;
701
702 g->ops.mm.gmmu_unmap(vm,
703 mapped_buffer->addr,
704 mapped_buffer->size,
705 mapped_buffer->pgsz_idx,
706 mapped_buffer->va_allocated,
707 gk20a_mem_flag_none,
708 mapped_buffer->vm_area ?
709 mapped_buffer->vm_area->sparse : false,
710 batch);
711 654
712 gk20a_mm_unpin(dev_from_vm(vm), mapped_buffer->dmabuf, 655 gk20a_mm_unpin(dev_from_vm(vm), mapped_buffer->dmabuf,
713 mapped_buffer->sgt); 656 mapped_buffer->sgt);
714 657
715 /* remove from mapped buffer tree and remove list, free */
716 nvgpu_remove_mapped_buf(vm, mapped_buffer);
717 if (!nvgpu_list_empty(&mapped_buffer->buffer_list))
718 nvgpu_list_del(&mapped_buffer->buffer_list);
719
720 /* keep track of mapped buffers */
721 if (mapped_buffer->user_mapped)
722 vm->num_user_mapped_buffers--;
723
724 if (mapped_buffer->own_mem_ref) 658 if (mapped_buffer->own_mem_ref)
725 dma_buf_put(mapped_buffer->dmabuf); 659 dma_buf_put(mapped_buffer->dmabuf);
726
727 nvgpu_kfree(g, mapped_buffer);
728} 660}