summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux')
-rw-r--r--drivers/gpu/nvgpu/common/linux/cde.c12
-rw-r--r--drivers/gpu/nvgpu/common/linux/dmabuf.c2
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_as.c6
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_dbg.c3
-rw-r--r--drivers/gpu/nvgpu/common/linux/vidmem.c3
-rw-r--r--drivers/gpu/nvgpu/common/linux/vm.c76
-rw-r--r--drivers/gpu/nvgpu/common/linux/vm_priv.h102
7 files changed, 65 insertions, 139 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/cde.c b/drivers/gpu/nvgpu/common/linux/cde.c
index 577d86e8..30edc1d5 100644
--- a/drivers/gpu/nvgpu/common/linux/cde.c
+++ b/drivers/gpu/nvgpu/common/linux/cde.c
@@ -31,6 +31,8 @@
31#include <nvgpu/bug.h> 31#include <nvgpu/bug.h>
32#include <nvgpu/firmware.h> 32#include <nvgpu/firmware.h>
33 33
34#include <nvgpu/linux/vm.h>
35
34#include "gk20a/gk20a.h" 36#include "gk20a/gk20a.h"
35#include "gk20a/channel_gk20a.h" 37#include "gk20a/channel_gk20a.h"
36#include "gk20a/mm_gk20a.h" 38#include "gk20a/mm_gk20a.h"
@@ -44,12 +46,6 @@
44#include <nvgpu/hw/gk20a/hw_ccsr_gk20a.h> 46#include <nvgpu/hw/gk20a/hw_ccsr_gk20a.h>
45#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h> 47#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h>
46 48
47/*
48 * Currently this code uses nvgpu_vm_map() since it takes dmabuf FDs from the
49 * CDE ioctls. That has to change - instead this needs to take an nvgpu_mem.
50 */
51#include "common/linux/vm_priv.h"
52
53static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx); 49static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx);
54static struct gk20a_cde_ctx *gk20a_cde_allocate_context(struct nvgpu_os_linux *l); 50static struct gk20a_cde_ctx *gk20a_cde_allocate_context(struct nvgpu_os_linux *l);
55 51
@@ -1052,8 +1048,8 @@ __releases(&l->cde_app->mutex)
1052 1048
1053 1049
1054 /* map the destination buffer */ 1050 /* map the destination buffer */
1055 get_dma_buf(compbits_scatter_buf); /* a ref for nvgpu_vm_map */ 1051 get_dma_buf(compbits_scatter_buf); /* a ref for nvgpu_vm_map_linux */
1056 map_vaddr = nvgpu_vm_map(cde_ctx->vm, compbits_scatter_buf, 0, 1052 map_vaddr = nvgpu_vm_map_linux(cde_ctx->vm, compbits_scatter_buf, 0,
1057 NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE | 1053 NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE |
1058 NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL, 1054 NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL,
1059 NV_KIND_INVALID, 1055 NV_KIND_INVALID,
diff --git a/drivers/gpu/nvgpu/common/linux/dmabuf.c b/drivers/gpu/nvgpu/common/linux/dmabuf.c
index 0b07b255..2415b7c2 100644
--- a/drivers/gpu/nvgpu/common/linux/dmabuf.c
+++ b/drivers/gpu/nvgpu/common/linux/dmabuf.c
@@ -21,13 +21,13 @@
21#include <nvgpu/comptags.h> 21#include <nvgpu/comptags.h>
22#include <nvgpu/enabled.h> 22#include <nvgpu/enabled.h>
23 23
24#include <nvgpu/linux/vm.h>
24#include <nvgpu/linux/vidmem.h> 25#include <nvgpu/linux/vidmem.h>
25 26
26#include "gk20a/gk20a.h" 27#include "gk20a/gk20a.h"
27#include "gk20a/platform_gk20a.h" 28#include "gk20a/platform_gk20a.h"
28 29
29#include "dmabuf.h" 30#include "dmabuf.h"
30#include "vm_priv.h"
31#include "os_linux.h" 31#include "os_linux.h"
32 32
33static void gk20a_mm_delete_priv(void *_priv) 33static void gk20a_mm_delete_priv(void *_priv)
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_as.c b/drivers/gpu/nvgpu/common/linux/ioctl_as.c
index cfc4e7ef..08064370 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_as.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_as.c
@@ -17,19 +17,19 @@
17#include <linux/uaccess.h> 17#include <linux/uaccess.h>
18#include <linux/fs.h> 18#include <linux/fs.h>
19 19
20#include <nvgpu/log2.h>
21
22#include <trace/events/gk20a.h> 20#include <trace/events/gk20a.h>
23 21
24#include <uapi/linux/nvgpu.h> 22#include <uapi/linux/nvgpu.h>
25 23
26#include <nvgpu/gmmu.h> 24#include <nvgpu/gmmu.h>
27#include <nvgpu/vm_area.h> 25#include <nvgpu/vm_area.h>
26#include <nvgpu/log2.h>
27
28#include <nvgpu/linux/vm.h>
28 29
29#include "gk20a/gk20a.h" 30#include "gk20a/gk20a.h"
30#include "gk20a/platform_gk20a.h" 31#include "gk20a/platform_gk20a.h"
31#include "ioctl_as.h" 32#include "ioctl_as.h"
32#include "vm_priv.h"
33#include "os_linux.h" 33#include "os_linux.h"
34 34
35static int gk20a_as_ioctl_bind_channel( 35static int gk20a_as_ioctl_bind_channel(
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c b/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c
index c8831a97..7e62bb5c 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c
@@ -28,7 +28,9 @@
28#include <nvgpu/vm.h> 28#include <nvgpu/vm.h>
29#include <nvgpu/atomic.h> 29#include <nvgpu/atomic.h>
30#include <nvgpu/cond.h> 30#include <nvgpu/cond.h>
31
31#include <nvgpu/linux/vidmem.h> 32#include <nvgpu/linux/vidmem.h>
33#include <nvgpu/linux/vm.h>
32 34
33#include "gk20a/gk20a.h" 35#include "gk20a/gk20a.h"
34#include "gk20a/platform_gk20a.h" 36#include "gk20a/platform_gk20a.h"
@@ -38,7 +40,6 @@
38#include "os_linux.h" 40#include "os_linux.h"
39#include "ioctl_dbg.h" 41#include "ioctl_dbg.h"
40 42
41#include "vm_priv.h"
42 43
43/* silly allocator - just increment id */ 44/* silly allocator - just increment id */
44static nvgpu_atomic_t unique_id = NVGPU_ATOMIC_INIT(0); 45static nvgpu_atomic_t unique_id = NVGPU_ATOMIC_INIT(0);
diff --git a/drivers/gpu/nvgpu/common/linux/vidmem.c b/drivers/gpu/nvgpu/common/linux/vidmem.c
index 03976da3..1e65b54d 100644
--- a/drivers/gpu/nvgpu/common/linux/vidmem.c
+++ b/drivers/gpu/nvgpu/common/linux/vidmem.c
@@ -24,14 +24,13 @@
24#include <nvgpu/nvgpu_mem.h> 24#include <nvgpu/nvgpu_mem.h>
25#include <nvgpu/page_allocator.h> 25#include <nvgpu/page_allocator.h>
26 26
27#include <nvgpu/linux/vm.h>
27#include <nvgpu/linux/dma.h> 28#include <nvgpu/linux/dma.h>
28#include <nvgpu/linux/vidmem.h> 29#include <nvgpu/linux/vidmem.h>
29 30
30#include "gk20a/gk20a.h" 31#include "gk20a/gk20a.h"
31#include "gk20a/mm_gk20a.h" 32#include "gk20a/mm_gk20a.h"
32 33
33#include "vm_priv.h"
34
35bool nvgpu_addr_is_vidmem_page_alloc(u64 addr) 34bool nvgpu_addr_is_vidmem_page_alloc(u64 addr)
36{ 35{
37 return !!(addr & 1ULL); 36 return !!(addr & 1ULL);
diff --git a/drivers/gpu/nvgpu/common/linux/vm.c b/drivers/gpu/nvgpu/common/linux/vm.c
index 638d3e51..984c2015 100644
--- a/drivers/gpu/nvgpu/common/linux/vm.c
+++ b/drivers/gpu/nvgpu/common/linux/vm.c
@@ -25,6 +25,7 @@
25#include <nvgpu/page_allocator.h> 25#include <nvgpu/page_allocator.h>
26#include <nvgpu/vidmem.h> 26#include <nvgpu/vidmem.h>
27 27
28#include <nvgpu/linux/vm.h>
28#include <nvgpu/linux/vidmem.h> 29#include <nvgpu/linux/vidmem.h>
29#include <nvgpu/linux/nvgpu_mem.h> 30#include <nvgpu/linux/nvgpu_mem.h>
30 31
@@ -33,7 +34,6 @@
33#include "gk20a/kind_gk20a.h" 34#include "gk20a/kind_gk20a.h"
34#include "gk20a/platform_gk20a.h" 35#include "gk20a/platform_gk20a.h"
35 36
36#include "vm_priv.h"
37#include "os_linux.h" 37#include "os_linux.h"
38#include "dmabuf.h" 38#include "dmabuf.h"
39 39
@@ -323,17 +323,17 @@ static int setup_bfr_kind_fields(struct buffer_attrs *bfr, s16 compr_kind,
323 return 0; 323 return 0;
324} 324}
325 325
326u64 nvgpu_vm_map(struct vm_gk20a *vm, 326u64 nvgpu_vm_map_linux(struct vm_gk20a *vm,
327 struct dma_buf *dmabuf, 327 struct dma_buf *dmabuf,
328 u64 offset_align, 328 u64 offset_align,
329 u32 flags, 329 u32 flags,
330 s16 compr_kind, 330 s16 compr_kind,
331 s16 incompr_kind, 331 s16 incompr_kind,
332 bool user_mapped, 332 bool user_mapped,
333 int rw_flag, 333 int rw_flag,
334 u64 buffer_offset, 334 u64 buffer_offset,
335 u64 mapping_size, 335 u64 mapping_size,
336 struct vm_gk20a_mapping_batch *batch) 336 struct vm_gk20a_mapping_batch *batch)
337{ 337{
338 struct gk20a *g = gk20a_from_vm(vm); 338 struct gk20a *g = gk20a_from_vm(vm);
339 struct device *dev = dev_from_gk20a(g); 339 struct device *dev = dev_from_gk20a(g);
@@ -625,12 +625,12 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
625 return err; 625 return err;
626 } 626 }
627 627
628 ret_va = nvgpu_vm_map(vm, dmabuf, *offset_align, 628 ret_va = nvgpu_vm_map_linux(vm, dmabuf, *offset_align,
629 flags, compr_kind, incompr_kind, true, 629 flags, compr_kind, incompr_kind, true,
630 gk20a_mem_flag_none, 630 gk20a_mem_flag_none,
631 buffer_offset, 631 buffer_offset,
632 mapping_size, 632 mapping_size,
633 batch); 633 batch);
634 634
635 *offset_align = ret_va; 635 *offset_align = ret_va;
636 if (!ret_va) { 636 if (!ret_va) {
@@ -641,21 +641,55 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
641 return err; 641 return err;
642} 642}
643 643
644void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset) 644int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset,
645 struct vm_gk20a_mapping_batch *batch)
645{ 646{
646 struct gk20a *g = vm->mm->g; 647 struct gk20a *g = vm->mm->g;
647 struct nvgpu_mapped_buf *mapped_buffer; 648 struct nvgpu_mapped_buf *mapped_buffer;
648 649
649 nvgpu_mutex_acquire(&vm->update_gmmu_lock); 650 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
651
650 mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset); 652 mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset);
651 if (!mapped_buffer) { 653 if (!mapped_buffer) {
652 nvgpu_mutex_release(&vm->update_gmmu_lock); 654 nvgpu_mutex_release(&vm->update_gmmu_lock);
653 nvgpu_err(g, "invalid addr to unmap 0x%llx", offset); 655 nvgpu_err(g, "invalid addr to unmap 0x%llx", offset);
654 return; 656 return 0;
657 }
658
659 if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) {
660 struct nvgpu_timeout timeout;
661
662 nvgpu_mutex_release(&vm->update_gmmu_lock);
663
664 nvgpu_timeout_init(vm->mm->g, &timeout, 10000,
665 NVGPU_TIMER_RETRY_TIMER);
666 do {
667 if (nvgpu_atomic_read(
668 &mapped_buffer->ref.refcount) == 1)
669 break;
670 nvgpu_udelay(5);
671 } while (!nvgpu_timeout_expired_msg(&timeout,
672 "sync-unmap failed on 0x%llx"));
673
674 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
675 }
676
677 if (mapped_buffer->user_mapped == 0) {
678 nvgpu_mutex_release(&vm->update_gmmu_lock);
679 nvgpu_err(g, "addr already unmapped from user 0x%llx", offset);
680 return 0;
655 } 681 }
656 682
683 mapped_buffer->user_mapped--;
684 if (mapped_buffer->user_mapped == 0)
685 vm->num_user_mapped_buffers--;
686
687 vm->kref_put_batch = batch;
657 nvgpu_ref_put(&mapped_buffer->ref, nvgpu_vm_unmap_locked_ref); 688 nvgpu_ref_put(&mapped_buffer->ref, nvgpu_vm_unmap_locked_ref);
689 vm->kref_put_batch = NULL;
690
658 nvgpu_mutex_release(&vm->update_gmmu_lock); 691 nvgpu_mutex_release(&vm->update_gmmu_lock);
692 return 0;
659} 693}
660 694
661/* NOTE! mapped_buffers lock must be held */ 695/* NOTE! mapped_buffers lock must be held */
@@ -691,6 +725,4 @@ void nvgpu_vm_unmap_locked(struct nvgpu_mapped_buf *mapped_buffer,
691 dma_buf_put(mapped_buffer->dmabuf); 725 dma_buf_put(mapped_buffer->dmabuf);
692 726
693 nvgpu_kfree(g, mapped_buffer); 727 nvgpu_kfree(g, mapped_buffer);
694
695 return;
696} 728}
diff --git a/drivers/gpu/nvgpu/common/linux/vm_priv.h b/drivers/gpu/nvgpu/common/linux/vm_priv.h
deleted file mode 100644
index be7efa8b..00000000
--- a/drivers/gpu/nvgpu/common/linux/vm_priv.h
+++ /dev/null
@@ -1,102 +0,0 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __COMMON_LINUX_VM_PRIV_H__
18#define __COMMON_LINUX_VM_PRIV_H__
19
20#include <nvgpu/types.h>
21
22struct sg_table;
23struct dma_buf;
24
25struct vm_gk20a;
26struct vm_gk20a_mapping_batch;
27
28struct buffer_attrs {
29 struct sg_table *sgt;
30 u64 size;
31 u64 align;
32 u32 ctag_offset;
33 u32 ctag_lines;
34 u32 ctag_allocated_lines;
35 int pgsz_idx;
36 u8 kind_v;
37 bool use_kind_v;
38 u8 uc_kind_v;
39 bool use_uc_kind_v;
40 bool ctag_user_mappable;
41};
42
43u64 nvgpu_vm_map(struct vm_gk20a *vm,
44 struct dma_buf *dmabuf,
45 u64 offset_align,
46 u32 flags,
47
48 /*
49 * compressible kind if
50 * NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is
51 * specified, otherwise just the kind
52 */
53 s16 compr_kind,
54
55 /*
56 * incompressible kind if
57 * NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is
58 * specified, otherwise ignored
59 */
60 s16 incompr_kind,
61
62 bool user_mapped,
63 int rw_flag,
64 u64 buffer_offset,
65 u64 mapping_size,
66 struct vm_gk20a_mapping_batch *mapping_batch);
67
68/*
69 * Notes:
70 * - Batch may be NULL if map op is not part of a batch.
71 * - If NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is set,
72 * compr_kind and incompr_kind work as explained in nvgpu.h.
73 * - If NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is NOT set,
74 * compr_kind holds the kind and kernel will figure out whether
75 * it is a compressible or incompressible kind. If compressible, kernel will
76 * also figure out the incompressible counterpart or return an error.
77 */
78int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
79 int dmabuf_fd,
80 u64 *offset_align,
81 u32 flags, /* NVGPU_AS_MAP_BUFFER_FLAGS_ */
82 s16 compr_kind,
83 s16 incompr_kind,
84 u64 buffer_offset,
85 u64 mapping_size,
86 struct vm_gk20a_mapping_batch *batch);
87
88void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset);
89
90/* find buffer corresponding to va */
91int nvgpu_vm_find_buf(struct vm_gk20a *vm, u64 gpu_va,
92 struct dma_buf **dmabuf,
93 u64 *offset);
94
95enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g,
96 struct dma_buf *dmabuf);
97int validate_fixed_buffer(struct vm_gk20a *vm,
98 struct buffer_attrs *bfr,
99 u64 map_offset, u64 map_size,
100 struct nvgpu_vm_area **pva_node);
101
102#endif