summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/common/linux/cde.c12
-rw-r--r--drivers/gpu/nvgpu/common/linux/dmabuf.c2
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_as.c6
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_dbg.c3
-rw-r--r--drivers/gpu/nvgpu/common/linux/vidmem.c3
-rw-r--r--drivers/gpu/nvgpu/common/linux/vm.c76
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c56
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c14
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/linux/vm.h (renamed from drivers/gpu/nvgpu/common/linux/vm_priv.h)47
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/vm.h6
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.c3
11 files changed, 108 insertions, 120 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/cde.c b/drivers/gpu/nvgpu/common/linux/cde.c
index 577d86e8..30edc1d5 100644
--- a/drivers/gpu/nvgpu/common/linux/cde.c
+++ b/drivers/gpu/nvgpu/common/linux/cde.c
@@ -31,6 +31,8 @@
31#include <nvgpu/bug.h> 31#include <nvgpu/bug.h>
32#include <nvgpu/firmware.h> 32#include <nvgpu/firmware.h>
33 33
34#include <nvgpu/linux/vm.h>
35
34#include "gk20a/gk20a.h" 36#include "gk20a/gk20a.h"
35#include "gk20a/channel_gk20a.h" 37#include "gk20a/channel_gk20a.h"
36#include "gk20a/mm_gk20a.h" 38#include "gk20a/mm_gk20a.h"
@@ -44,12 +46,6 @@
44#include <nvgpu/hw/gk20a/hw_ccsr_gk20a.h> 46#include <nvgpu/hw/gk20a/hw_ccsr_gk20a.h>
45#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h> 47#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h>
46 48
47/*
48 * Currently this code uses nvgpu_vm_map() since it takes dmabuf FDs from the
49 * CDE ioctls. That has to change - instead this needs to take an nvgpu_mem.
50 */
51#include "common/linux/vm_priv.h"
52
53static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx); 49static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx);
54static struct gk20a_cde_ctx *gk20a_cde_allocate_context(struct nvgpu_os_linux *l); 50static struct gk20a_cde_ctx *gk20a_cde_allocate_context(struct nvgpu_os_linux *l);
55 51
@@ -1052,8 +1048,8 @@ __releases(&l->cde_app->mutex)
1052 1048
1053 1049
1054 /* map the destination buffer */ 1050 /* map the destination buffer */
1055 get_dma_buf(compbits_scatter_buf); /* a ref for nvgpu_vm_map */ 1051 get_dma_buf(compbits_scatter_buf); /* a ref for nvgpu_vm_map_linux */
1056 map_vaddr = nvgpu_vm_map(cde_ctx->vm, compbits_scatter_buf, 0, 1052 map_vaddr = nvgpu_vm_map_linux(cde_ctx->vm, compbits_scatter_buf, 0,
1057 NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE | 1053 NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE |
1058 NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL, 1054 NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL,
1059 NV_KIND_INVALID, 1055 NV_KIND_INVALID,
diff --git a/drivers/gpu/nvgpu/common/linux/dmabuf.c b/drivers/gpu/nvgpu/common/linux/dmabuf.c
index 0b07b255..2415b7c2 100644
--- a/drivers/gpu/nvgpu/common/linux/dmabuf.c
+++ b/drivers/gpu/nvgpu/common/linux/dmabuf.c
@@ -21,13 +21,13 @@
21#include <nvgpu/comptags.h> 21#include <nvgpu/comptags.h>
22#include <nvgpu/enabled.h> 22#include <nvgpu/enabled.h>
23 23
24#include <nvgpu/linux/vm.h>
24#include <nvgpu/linux/vidmem.h> 25#include <nvgpu/linux/vidmem.h>
25 26
26#include "gk20a/gk20a.h" 27#include "gk20a/gk20a.h"
27#include "gk20a/platform_gk20a.h" 28#include "gk20a/platform_gk20a.h"
28 29
29#include "dmabuf.h" 30#include "dmabuf.h"
30#include "vm_priv.h"
31#include "os_linux.h" 31#include "os_linux.h"
32 32
33static void gk20a_mm_delete_priv(void *_priv) 33static void gk20a_mm_delete_priv(void *_priv)
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_as.c b/drivers/gpu/nvgpu/common/linux/ioctl_as.c
index cfc4e7ef..08064370 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_as.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_as.c
@@ -17,19 +17,19 @@
17#include <linux/uaccess.h> 17#include <linux/uaccess.h>
18#include <linux/fs.h> 18#include <linux/fs.h>
19 19
20#include <nvgpu/log2.h>
21
22#include <trace/events/gk20a.h> 20#include <trace/events/gk20a.h>
23 21
24#include <uapi/linux/nvgpu.h> 22#include <uapi/linux/nvgpu.h>
25 23
26#include <nvgpu/gmmu.h> 24#include <nvgpu/gmmu.h>
27#include <nvgpu/vm_area.h> 25#include <nvgpu/vm_area.h>
26#include <nvgpu/log2.h>
27
28#include <nvgpu/linux/vm.h>
28 29
29#include "gk20a/gk20a.h" 30#include "gk20a/gk20a.h"
30#include "gk20a/platform_gk20a.h" 31#include "gk20a/platform_gk20a.h"
31#include "ioctl_as.h" 32#include "ioctl_as.h"
32#include "vm_priv.h"
33#include "os_linux.h" 33#include "os_linux.h"
34 34
35static int gk20a_as_ioctl_bind_channel( 35static int gk20a_as_ioctl_bind_channel(
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c b/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c
index c8831a97..7e62bb5c 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c
@@ -28,7 +28,9 @@
28#include <nvgpu/vm.h> 28#include <nvgpu/vm.h>
29#include <nvgpu/atomic.h> 29#include <nvgpu/atomic.h>
30#include <nvgpu/cond.h> 30#include <nvgpu/cond.h>
31
31#include <nvgpu/linux/vidmem.h> 32#include <nvgpu/linux/vidmem.h>
33#include <nvgpu/linux/vm.h>
32 34
33#include "gk20a/gk20a.h" 35#include "gk20a/gk20a.h"
34#include "gk20a/platform_gk20a.h" 36#include "gk20a/platform_gk20a.h"
@@ -38,7 +40,6 @@
38#include "os_linux.h" 40#include "os_linux.h"
39#include "ioctl_dbg.h" 41#include "ioctl_dbg.h"
40 42
41#include "vm_priv.h"
42 43
43/* silly allocator - just increment id */ 44/* silly allocator - just increment id */
44static nvgpu_atomic_t unique_id = NVGPU_ATOMIC_INIT(0); 45static nvgpu_atomic_t unique_id = NVGPU_ATOMIC_INIT(0);
diff --git a/drivers/gpu/nvgpu/common/linux/vidmem.c b/drivers/gpu/nvgpu/common/linux/vidmem.c
index 03976da3..1e65b54d 100644
--- a/drivers/gpu/nvgpu/common/linux/vidmem.c
+++ b/drivers/gpu/nvgpu/common/linux/vidmem.c
@@ -24,14 +24,13 @@
24#include <nvgpu/nvgpu_mem.h> 24#include <nvgpu/nvgpu_mem.h>
25#include <nvgpu/page_allocator.h> 25#include <nvgpu/page_allocator.h>
26 26
27#include <nvgpu/linux/vm.h>
27#include <nvgpu/linux/dma.h> 28#include <nvgpu/linux/dma.h>
28#include <nvgpu/linux/vidmem.h> 29#include <nvgpu/linux/vidmem.h>
29 30
30#include "gk20a/gk20a.h" 31#include "gk20a/gk20a.h"
31#include "gk20a/mm_gk20a.h" 32#include "gk20a/mm_gk20a.h"
32 33
33#include "vm_priv.h"
34
35bool nvgpu_addr_is_vidmem_page_alloc(u64 addr) 34bool nvgpu_addr_is_vidmem_page_alloc(u64 addr)
36{ 35{
37 return !!(addr & 1ULL); 36 return !!(addr & 1ULL);
diff --git a/drivers/gpu/nvgpu/common/linux/vm.c b/drivers/gpu/nvgpu/common/linux/vm.c
index 638d3e51..984c2015 100644
--- a/drivers/gpu/nvgpu/common/linux/vm.c
+++ b/drivers/gpu/nvgpu/common/linux/vm.c
@@ -25,6 +25,7 @@
25#include <nvgpu/page_allocator.h> 25#include <nvgpu/page_allocator.h>
26#include <nvgpu/vidmem.h> 26#include <nvgpu/vidmem.h>
27 27
28#include <nvgpu/linux/vm.h>
28#include <nvgpu/linux/vidmem.h> 29#include <nvgpu/linux/vidmem.h>
29#include <nvgpu/linux/nvgpu_mem.h> 30#include <nvgpu/linux/nvgpu_mem.h>
30 31
@@ -33,7 +34,6 @@
33#include "gk20a/kind_gk20a.h" 34#include "gk20a/kind_gk20a.h"
34#include "gk20a/platform_gk20a.h" 35#include "gk20a/platform_gk20a.h"
35 36
36#include "vm_priv.h"
37#include "os_linux.h" 37#include "os_linux.h"
38#include "dmabuf.h" 38#include "dmabuf.h"
39 39
@@ -323,17 +323,17 @@ static int setup_bfr_kind_fields(struct buffer_attrs *bfr, s16 compr_kind,
323 return 0; 323 return 0;
324} 324}
325 325
326u64 nvgpu_vm_map(struct vm_gk20a *vm, 326u64 nvgpu_vm_map_linux(struct vm_gk20a *vm,
327 struct dma_buf *dmabuf, 327 struct dma_buf *dmabuf,
328 u64 offset_align, 328 u64 offset_align,
329 u32 flags, 329 u32 flags,
330 s16 compr_kind, 330 s16 compr_kind,
331 s16 incompr_kind, 331 s16 incompr_kind,
332 bool user_mapped, 332 bool user_mapped,
333 int rw_flag, 333 int rw_flag,
334 u64 buffer_offset, 334 u64 buffer_offset,
335 u64 mapping_size, 335 u64 mapping_size,
336 struct vm_gk20a_mapping_batch *batch) 336 struct vm_gk20a_mapping_batch *batch)
337{ 337{
338 struct gk20a *g = gk20a_from_vm(vm); 338 struct gk20a *g = gk20a_from_vm(vm);
339 struct device *dev = dev_from_gk20a(g); 339 struct device *dev = dev_from_gk20a(g);
@@ -625,12 +625,12 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
625 return err; 625 return err;
626 } 626 }
627 627
628 ret_va = nvgpu_vm_map(vm, dmabuf, *offset_align, 628 ret_va = nvgpu_vm_map_linux(vm, dmabuf, *offset_align,
629 flags, compr_kind, incompr_kind, true, 629 flags, compr_kind, incompr_kind, true,
630 gk20a_mem_flag_none, 630 gk20a_mem_flag_none,
631 buffer_offset, 631 buffer_offset,
632 mapping_size, 632 mapping_size,
633 batch); 633 batch);
634 634
635 *offset_align = ret_va; 635 *offset_align = ret_va;
636 if (!ret_va) { 636 if (!ret_va) {
@@ -641,21 +641,55 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
641 return err; 641 return err;
642} 642}
643 643
644void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset) 644int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset,
645 struct vm_gk20a_mapping_batch *batch)
645{ 646{
646 struct gk20a *g = vm->mm->g; 647 struct gk20a *g = vm->mm->g;
647 struct nvgpu_mapped_buf *mapped_buffer; 648 struct nvgpu_mapped_buf *mapped_buffer;
648 649
649 nvgpu_mutex_acquire(&vm->update_gmmu_lock); 650 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
651
650 mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset); 652 mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset);
651 if (!mapped_buffer) { 653 if (!mapped_buffer) {
652 nvgpu_mutex_release(&vm->update_gmmu_lock); 654 nvgpu_mutex_release(&vm->update_gmmu_lock);
653 nvgpu_err(g, "invalid addr to unmap 0x%llx", offset); 655 nvgpu_err(g, "invalid addr to unmap 0x%llx", offset);
654 return; 656 return 0;
657 }
658
659 if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) {
660 struct nvgpu_timeout timeout;
661
662 nvgpu_mutex_release(&vm->update_gmmu_lock);
663
664 nvgpu_timeout_init(vm->mm->g, &timeout, 10000,
665 NVGPU_TIMER_RETRY_TIMER);
666 do {
667 if (nvgpu_atomic_read(
668 &mapped_buffer->ref.refcount) == 1)
669 break;
670 nvgpu_udelay(5);
671 } while (!nvgpu_timeout_expired_msg(&timeout,
672 "sync-unmap failed on 0x%llx"));
673
674 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
675 }
676
677 if (mapped_buffer->user_mapped == 0) {
678 nvgpu_mutex_release(&vm->update_gmmu_lock);
679 nvgpu_err(g, "addr already unmapped from user 0x%llx", offset);
680 return 0;
655 } 681 }
656 682
683 mapped_buffer->user_mapped--;
684 if (mapped_buffer->user_mapped == 0)
685 vm->num_user_mapped_buffers--;
686
687 vm->kref_put_batch = batch;
657 nvgpu_ref_put(&mapped_buffer->ref, nvgpu_vm_unmap_locked_ref); 688 nvgpu_ref_put(&mapped_buffer->ref, nvgpu_vm_unmap_locked_ref);
689 vm->kref_put_batch = NULL;
690
658 nvgpu_mutex_release(&vm->update_gmmu_lock); 691 nvgpu_mutex_release(&vm->update_gmmu_lock);
692 return 0;
659} 693}
660 694
661/* NOTE! mapped_buffers lock must be held */ 695/* NOTE! mapped_buffers lock must be held */
@@ -691,6 +725,4 @@ void nvgpu_vm_unmap_locked(struct nvgpu_mapped_buf *mapped_buffer,
691 dma_buf_put(mapped_buffer->dmabuf); 725 dma_buf_put(mapped_buffer->dmabuf);
692 726
693 nvgpu_kfree(g, mapped_buffer); 727 nvgpu_kfree(g, mapped_buffer);
694
695 return;
696} 728}
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index 9f04ee01..c6c99b31 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -687,13 +687,6 @@ int nvgpu_vm_get_buffers(struct vm_gk20a *vm,
687 return 0; 687 return 0;
688} 688}
689 689
690void nvgpu_vm_unmap_locked_ref(struct nvgpu_ref *ref)
691{
692 struct nvgpu_mapped_buf *mapped_buffer =
693 container_of(ref, struct nvgpu_mapped_buf, ref);
694 nvgpu_vm_unmap_locked(mapped_buffer, mapped_buffer->vm->kref_put_batch);
695}
696
697void nvgpu_vm_put_buffers(struct vm_gk20a *vm, 690void nvgpu_vm_put_buffers(struct vm_gk20a *vm,
698 struct nvgpu_mapped_buf **mapped_buffers, 691 struct nvgpu_mapped_buf **mapped_buffers,
699 int num_buffers) 692 int num_buffers)
@@ -719,14 +712,19 @@ void nvgpu_vm_put_buffers(struct vm_gk20a *vm,
719 nvgpu_big_free(vm->mm->g, mapped_buffers); 712 nvgpu_big_free(vm->mm->g, mapped_buffers);
720} 713}
721 714
722static void nvgpu_vm_unmap_user(struct vm_gk20a *vm, u64 offset, 715void nvgpu_vm_unmap_locked_ref(struct nvgpu_ref *ref)
723 struct vm_gk20a_mapping_batch *batch) 716{
717 struct nvgpu_mapped_buf *mapped_buffer =
718 container_of(ref, struct nvgpu_mapped_buf, ref);
719 nvgpu_vm_unmap_locked(mapped_buffer, mapped_buffer->vm->kref_put_batch);
720}
721
722void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset)
724{ 723{
725 struct gk20a *g = vm->mm->g; 724 struct gk20a *g = vm->mm->g;
726 struct nvgpu_mapped_buf *mapped_buffer; 725 struct nvgpu_mapped_buf *mapped_buffer;
727 726
728 nvgpu_mutex_acquire(&vm->update_gmmu_lock); 727 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
729
730 mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset); 728 mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset);
731 if (!mapped_buffer) { 729 if (!mapped_buffer) {
732 nvgpu_mutex_release(&vm->update_gmmu_lock); 730 nvgpu_mutex_release(&vm->update_gmmu_lock);
@@ -734,44 +732,6 @@ static void nvgpu_vm_unmap_user(struct vm_gk20a *vm, u64 offset,
734 return; 732 return;
735 } 733 }
736 734
737 if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) {
738 struct nvgpu_timeout timeout;
739
740 nvgpu_mutex_release(&vm->update_gmmu_lock);
741
742 nvgpu_timeout_init(vm->mm->g, &timeout, 10000,
743 NVGPU_TIMER_RETRY_TIMER);
744 do {
745 if (nvgpu_atomic_read(
746 &mapped_buffer->ref.refcount) == 1)
747 break;
748 nvgpu_udelay(5);
749 } while (!nvgpu_timeout_expired_msg(&timeout,
750 "sync-unmap failed on 0x%llx"));
751
752 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
753 }
754
755 if (mapped_buffer->user_mapped == 0) {
756 nvgpu_mutex_release(&vm->update_gmmu_lock);
757 nvgpu_err(g, "addr already unmapped from user 0x%llx", offset);
758 return;
759 }
760
761 mapped_buffer->user_mapped--;
762 if (mapped_buffer->user_mapped == 0)
763 vm->num_user_mapped_buffers--;
764
765 vm->kref_put_batch = batch;
766 nvgpu_ref_put(&mapped_buffer->ref, nvgpu_vm_unmap_locked_ref); 735 nvgpu_ref_put(&mapped_buffer->ref, nvgpu_vm_unmap_locked_ref);
767 vm->kref_put_batch = NULL;
768
769 nvgpu_mutex_release(&vm->update_gmmu_lock); 736 nvgpu_mutex_release(&vm->update_gmmu_lock);
770} 737}
771
772int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset,
773 struct vm_gk20a_mapping_batch *batch)
774{
775 nvgpu_vm_unmap_user(vm, offset, batch);
776 return 0;
777}
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index e3fc61c0..590506d6 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -43,6 +43,13 @@
43#include <nvgpu/ltc.h> 43#include <nvgpu/ltc.h>
44#include <nvgpu/barrier.h> 44#include <nvgpu/barrier.h>
45 45
46/*
47 * This is required for nvgpu_vm_find_buf() which is used in the tracing
48 * code. Once we can get and access userspace buffers without requiring
49 * direct dma_buf usage this can be removed.
50 */
51#include <nvgpu/linux/vm.h>
52
46#include "gk20a.h" 53#include "gk20a.h"
47#include "ctxsw_trace_gk20a.h" 54#include "ctxsw_trace_gk20a.h"
48#include "dbg_gpu_gk20a.h" 55#include "dbg_gpu_gk20a.h"
@@ -58,13 +65,6 @@
58#include <linux/uaccess.h> 65#include <linux/uaccess.h>
59 66
60/* 67/*
61 * This is required for nvgpu_vm_find_buffer() which is used in the tracing
62 * code. Once we can get and access userspace buffers without requiring
63 * direct dma_buf usage this can be removed.
64 */
65#include "common/linux/vm_priv.h"
66
67/*
68 * Although channels do have pointers back to the gk20a struct that they were 68 * Although channels do have pointers back to the gk20a struct that they were
69 * created under in cases where the driver is killed that pointer can be bad. 69 * created under in cases where the driver is killed that pointer can be bad.
70 * The channel memory can be freed before the release() function for a given 70 * The channel memory can be freed before the release() function for a given
diff --git a/drivers/gpu/nvgpu/common/linux/vm_priv.h b/drivers/gpu/nvgpu/include/nvgpu/linux/vm.h
index be7efa8b..91f0cf09 100644
--- a/drivers/gpu/nvgpu/common/linux/vm_priv.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/linux/vm.h
@@ -24,6 +24,7 @@ struct dma_buf;
24 24
25struct vm_gk20a; 25struct vm_gk20a;
26struct vm_gk20a_mapping_batch; 26struct vm_gk20a_mapping_batch;
27struct nvgpu_vm_area;
27 28
28struct buffer_attrs { 29struct buffer_attrs {
29 struct sg_table *sgt; 30 struct sg_table *sgt;
@@ -40,30 +41,30 @@ struct buffer_attrs {
40 bool ctag_user_mappable; 41 bool ctag_user_mappable;
41}; 42};
42 43
43u64 nvgpu_vm_map(struct vm_gk20a *vm, 44u64 nvgpu_vm_map_linux(struct vm_gk20a *vm,
44 struct dma_buf *dmabuf, 45 struct dma_buf *dmabuf,
45 u64 offset_align, 46 u64 offset_align,
46 u32 flags, 47 u32 flags,
47 48
48 /* 49 /*
49 * compressible kind if 50 * compressible kind if
50 * NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is 51 * NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is
51 * specified, otherwise just the kind 52 * specified, otherwise just the kind
52 */ 53 */
53 s16 compr_kind, 54 s16 compr_kind,
54 55
55 /* 56 /*
56 * incompressible kind if 57 * incompressible kind if
57 * NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is 58 * NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is
58 * specified, otherwise ignored 59 * specified, otherwise ignored
59 */ 60 */
60 s16 incompr_kind, 61 s16 incompr_kind,
61 62
62 bool user_mapped, 63 bool user_mapped,
63 int rw_flag, 64 int rw_flag,
64 u64 buffer_offset, 65 u64 buffer_offset,
65 u64 mapping_size, 66 u64 mapping_size,
66 struct vm_gk20a_mapping_batch *mapping_batch); 67 struct vm_gk20a_mapping_batch *mapping_batch);
67 68
68/* 69/*
69 * Notes: 70 * Notes:
@@ -85,7 +86,9 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
85 u64 mapping_size, 86 u64 mapping_size,
86 struct vm_gk20a_mapping_batch *batch); 87 struct vm_gk20a_mapping_batch *batch);
87 88
88void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset); 89/* Note: batch may be NULL if unmap op is not part of a batch */
90int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset,
91 struct vm_gk20a_mapping_batch *batch);
89 92
90/* find buffer corresponding to va */ 93/* find buffer corresponding to va */
91int nvgpu_vm_find_buf(struct vm_gk20a *vm, u64 gpu_va, 94int nvgpu_vm_find_buf(struct vm_gk20a *vm, u64 gpu_va,
diff --git a/drivers/gpu/nvgpu/include/nvgpu/vm.h b/drivers/gpu/nvgpu/include/nvgpu/vm.h
index 8c56461c..e529512b 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/vm.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/vm.h
@@ -207,14 +207,12 @@ void nvgpu_vm_put_buffers(struct vm_gk20a *vm,
207 struct nvgpu_mapped_buf **mapped_buffers, 207 struct nvgpu_mapped_buf **mapped_buffers,
208 int num_buffers); 208 int num_buffers);
209 209
210/* Note: batch may be NULL if unmap op is not part of a batch */
211int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset,
212 struct vm_gk20a_mapping_batch *batch);
213
214void nvgpu_vm_unmap_locked(struct nvgpu_mapped_buf *mapped_buffer, 210void nvgpu_vm_unmap_locked(struct nvgpu_mapped_buf *mapped_buffer,
215 struct vm_gk20a_mapping_batch *batch); 211 struct vm_gk20a_mapping_batch *batch);
216void nvgpu_vm_unmap_locked_ref(struct nvgpu_ref *ref); 212void nvgpu_vm_unmap_locked_ref(struct nvgpu_ref *ref);
217 213
214void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset);
215
218/* 216/*
219 * These all require the VM update lock to be held. 217 * These all require the VM update lock to be held.
220 */ 218 */
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
index 8dcca0a1..c4256afb 100644
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
@@ -32,6 +32,7 @@
32 32
33#include <nvgpu/vgpu/vm.h> 33#include <nvgpu/vgpu/vm.h>
34 34
35#include <nvgpu/linux/vm.h>
35#include <nvgpu/linux/nvgpu_mem.h> 36#include <nvgpu/linux/nvgpu_mem.h>
36 37
37#include "vgpu/vgpu.h" 38#include "vgpu/vgpu.h"
@@ -39,8 +40,6 @@
39#include "gk20a/mm_gk20a.h" 40#include "gk20a/mm_gk20a.h"
40#include "gm20b/mm_gm20b.h" 41#include "gm20b/mm_gm20b.h"
41 42
42#include "common/linux/vm_priv.h"
43
44static int vgpu_init_mm_setup_sw(struct gk20a *g) 43static int vgpu_init_mm_setup_sw(struct gk20a *g)
45{ 44{
46 struct mm_gk20a *mm = &g->mm; 45 struct mm_gk20a *mm = &g->mm;