summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/vm.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/vm.c76
1 files changed, 54 insertions, 22 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/vm.c b/drivers/gpu/nvgpu/common/linux/vm.c
index 638d3e51..984c2015 100644
--- a/drivers/gpu/nvgpu/common/linux/vm.c
+++ b/drivers/gpu/nvgpu/common/linux/vm.c
@@ -25,6 +25,7 @@
25#include <nvgpu/page_allocator.h> 25#include <nvgpu/page_allocator.h>
26#include <nvgpu/vidmem.h> 26#include <nvgpu/vidmem.h>
27 27
28#include <nvgpu/linux/vm.h>
28#include <nvgpu/linux/vidmem.h> 29#include <nvgpu/linux/vidmem.h>
29#include <nvgpu/linux/nvgpu_mem.h> 30#include <nvgpu/linux/nvgpu_mem.h>
30 31
@@ -33,7 +34,6 @@
33#include "gk20a/kind_gk20a.h" 34#include "gk20a/kind_gk20a.h"
34#include "gk20a/platform_gk20a.h" 35#include "gk20a/platform_gk20a.h"
35 36
36#include "vm_priv.h"
37#include "os_linux.h" 37#include "os_linux.h"
38#include "dmabuf.h" 38#include "dmabuf.h"
39 39
@@ -323,17 +323,17 @@ static int setup_bfr_kind_fields(struct buffer_attrs *bfr, s16 compr_kind,
323 return 0; 323 return 0;
324} 324}
325 325
326u64 nvgpu_vm_map(struct vm_gk20a *vm, 326u64 nvgpu_vm_map_linux(struct vm_gk20a *vm,
327 struct dma_buf *dmabuf, 327 struct dma_buf *dmabuf,
328 u64 offset_align, 328 u64 offset_align,
329 u32 flags, 329 u32 flags,
330 s16 compr_kind, 330 s16 compr_kind,
331 s16 incompr_kind, 331 s16 incompr_kind,
332 bool user_mapped, 332 bool user_mapped,
333 int rw_flag, 333 int rw_flag,
334 u64 buffer_offset, 334 u64 buffer_offset,
335 u64 mapping_size, 335 u64 mapping_size,
336 struct vm_gk20a_mapping_batch *batch) 336 struct vm_gk20a_mapping_batch *batch)
337{ 337{
338 struct gk20a *g = gk20a_from_vm(vm); 338 struct gk20a *g = gk20a_from_vm(vm);
339 struct device *dev = dev_from_gk20a(g); 339 struct device *dev = dev_from_gk20a(g);
@@ -625,12 +625,12 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
625 return err; 625 return err;
626 } 626 }
627 627
628 ret_va = nvgpu_vm_map(vm, dmabuf, *offset_align, 628 ret_va = nvgpu_vm_map_linux(vm, dmabuf, *offset_align,
629 flags, compr_kind, incompr_kind, true, 629 flags, compr_kind, incompr_kind, true,
630 gk20a_mem_flag_none, 630 gk20a_mem_flag_none,
631 buffer_offset, 631 buffer_offset,
632 mapping_size, 632 mapping_size,
633 batch); 633 batch);
634 634
635 *offset_align = ret_va; 635 *offset_align = ret_va;
636 if (!ret_va) { 636 if (!ret_va) {
@@ -641,21 +641,55 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
641 return err; 641 return err;
642} 642}
643 643
644void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset) 644int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset,
645 struct vm_gk20a_mapping_batch *batch)
645{ 646{
646 struct gk20a *g = vm->mm->g; 647 struct gk20a *g = vm->mm->g;
647 struct nvgpu_mapped_buf *mapped_buffer; 648 struct nvgpu_mapped_buf *mapped_buffer;
648 649
649 nvgpu_mutex_acquire(&vm->update_gmmu_lock); 650 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
651
650 mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset); 652 mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset);
651 if (!mapped_buffer) { 653 if (!mapped_buffer) {
652 nvgpu_mutex_release(&vm->update_gmmu_lock); 654 nvgpu_mutex_release(&vm->update_gmmu_lock);
653 nvgpu_err(g, "invalid addr to unmap 0x%llx", offset); 655 nvgpu_err(g, "invalid addr to unmap 0x%llx", offset);
654 return; 656 return 0;
657 }
658
659 if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) {
660 struct nvgpu_timeout timeout;
661
662 nvgpu_mutex_release(&vm->update_gmmu_lock);
663
664 nvgpu_timeout_init(vm->mm->g, &timeout, 10000,
665 NVGPU_TIMER_RETRY_TIMER);
666 do {
667 if (nvgpu_atomic_read(
668 &mapped_buffer->ref.refcount) == 1)
669 break;
670 nvgpu_udelay(5);
671 } while (!nvgpu_timeout_expired_msg(&timeout,
672 "sync-unmap failed on 0x%llx"));
673
674 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
675 }
676
677 if (mapped_buffer->user_mapped == 0) {
678 nvgpu_mutex_release(&vm->update_gmmu_lock);
679 nvgpu_err(g, "addr already unmapped from user 0x%llx", offset);
680 return 0;
655 } 681 }
656 682
683 mapped_buffer->user_mapped--;
684 if (mapped_buffer->user_mapped == 0)
685 vm->num_user_mapped_buffers--;
686
687 vm->kref_put_batch = batch;
657 nvgpu_ref_put(&mapped_buffer->ref, nvgpu_vm_unmap_locked_ref); 688 nvgpu_ref_put(&mapped_buffer->ref, nvgpu_vm_unmap_locked_ref);
689 vm->kref_put_batch = NULL;
690
658 nvgpu_mutex_release(&vm->update_gmmu_lock); 691 nvgpu_mutex_release(&vm->update_gmmu_lock);
692 return 0;
659} 693}
660 694
661/* NOTE! mapped_buffers lock must be held */ 695/* NOTE! mapped_buffers lock must be held */
@@ -691,6 +725,4 @@ void nvgpu_vm_unmap_locked(struct nvgpu_mapped_buf *mapped_buffer,
691 dma_buf_put(mapped_buffer->dmabuf); 725 dma_buf_put(mapped_buffer->dmabuf);
692 726
693 nvgpu_kfree(g, mapped_buffer); 727 nvgpu_kfree(g, mapped_buffer);
694
695 return;
696} 728}