summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux')
-rw-r--r--drivers/gpu/nvgpu/common/linux/cde.c8
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_as.c18
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_mm_gp10b.c2
-rw-r--r--drivers/gpu/nvgpu/common/linux/vgpu/mm_vgpu.c2
-rw-r--r--drivers/gpu/nvgpu/common/linux/vm.c44
5 files changed, 58 insertions, 16 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/cde.c b/drivers/gpu/nvgpu/common/linux/cde.c
index 143e5b75..c4f678b6 100644
--- a/drivers/gpu/nvgpu/common/linux/cde.c
+++ b/drivers/gpu/nvgpu/common/linux/cde.c
@@ -1052,9 +1052,9 @@ __releases(&l->cde_app->mutex)
1052 /* map the destination buffer */ 1052 /* map the destination buffer */
1053 get_dma_buf(compbits_scatter_buf); /* a ref for nvgpu_vm_map_linux */ 1053 get_dma_buf(compbits_scatter_buf); /* a ref for nvgpu_vm_map_linux */
1054 err = nvgpu_vm_map_linux(cde_ctx->vm, compbits_scatter_buf, 0, 1054 err = nvgpu_vm_map_linux(cde_ctx->vm, compbits_scatter_buf, 0,
1055 NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE | 1055 NVGPU_VM_MAP_CACHEABLE |
1056 NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL, 1056 NVGPU_VM_MAP_DIRECT_KIND_CTRL,
1057 NV_KIND_INVALID, 1057 NVGPU_KIND_INVALID,
1058 compbits_kind, /* incompressible kind */ 1058 compbits_kind, /* incompressible kind */
1059 gk20a_mem_flag_none, 1059 gk20a_mem_flag_none,
1060 map_offset, map_size, 1060 map_offset, map_size,
@@ -1284,7 +1284,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
1284 /* map backing store to gpu virtual space */ 1284 /* map backing store to gpu virtual space */
1285 vaddr = nvgpu_gmmu_map(ch->vm, &gr->compbit_store.mem, 1285 vaddr = nvgpu_gmmu_map(ch->vm, &gr->compbit_store.mem,
1286 g->gr.compbit_store.mem.size, 1286 g->gr.compbit_store.mem.size,
1287 NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE, 1287 NVGPU_VM_MAP_CACHEABLE,
1288 gk20a_mem_flag_read_only, 1288 gk20a_mem_flag_read_only,
1289 false, 1289 false,
1290 gr->compbit_store.mem.aperture); 1290 gr->compbit_store.mem.aperture);
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_as.c b/drivers/gpu/nvgpu/common/linux/ioctl_as.c
index 848fee04..8aea3d22 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_as.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_as.c
@@ -32,6 +32,18 @@
32#include "ioctl_as.h" 32#include "ioctl_as.h"
33#include "os_linux.h" 33#include "os_linux.h"
34 34
35static u32 gk20a_as_translate_linux_flags(struct gk20a *g, u32 flags)
36{
37 u32 core_flags = 0;
38
39 if (flags & NVGPU_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET)
40 core_flags |= NVGPU_VM_AREA_ALLOC_FIXED_OFFSET;
41 if (flags & NVGPU_AS_ALLOC_SPACE_FLAGS_SPARSE)
42 core_flags |= NVGPU_VM_AREA_ALLOC_SPARSE;
43
44 return core_flags;
45}
46
35static int gk20a_as_ioctl_bind_channel( 47static int gk20a_as_ioctl_bind_channel(
36 struct gk20a_as_share *as_share, 48 struct gk20a_as_share *as_share,
37 struct nvgpu_as_bind_channel_args *args) 49 struct nvgpu_as_bind_channel_args *args)
@@ -62,9 +74,13 @@ static int gk20a_as_ioctl_alloc_space(
62 struct gk20a_as_share *as_share, 74 struct gk20a_as_share *as_share,
63 struct nvgpu_as_alloc_space_args *args) 75 struct nvgpu_as_alloc_space_args *args)
64{ 76{
77 struct gk20a *g = gk20a_from_vm(as_share->vm);
78
65 gk20a_dbg_fn(""); 79 gk20a_dbg_fn("");
66 return nvgpu_vm_area_alloc(as_share->vm, args->pages, args->page_size, 80 return nvgpu_vm_area_alloc(as_share->vm, args->pages, args->page_size,
67 &args->o_a.offset, args->flags); 81 &args->o_a.offset,
82 gk20a_as_translate_linux_flags(g,
83 args->flags));
68} 84}
69 85
70static int gk20a_as_ioctl_free_space( 86static int gk20a_as_ioctl_free_space(
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_mm_gp10b.c
index 9eb140a3..8c5a6d27 100644
--- a/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_mm_gp10b.c
+++ b/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_mm_gp10b.c
@@ -165,7 +165,7 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
165 p->pgsz_idx = pgsz_idx; 165 p->pgsz_idx = pgsz_idx;
166 p->iova = 0; 166 p->iova = 0;
167 p->kind = kind_v; 167 p->kind = kind_v;
168 p->cacheable = (flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE) ? 1 : 0; 168 p->cacheable = (flags & NVGPU_VM_MAP_CACHEABLE) ? 1 : 0;
169 p->prot = prot; 169 p->prot = prot;
170 p->ctag_offset = ctag_offset; 170 p->ctag_offset = ctag_offset;
171 p->clear_ctags = clear_ctags; 171 p->clear_ctags = clear_ctags;
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/common/linux/vgpu/mm_vgpu.c
index f8c5c406..bc0fe575 100644
--- a/drivers/gpu/nvgpu/common/linux/vgpu/mm_vgpu.c
+++ b/drivers/gpu/nvgpu/common/linux/vgpu/mm_vgpu.c
@@ -148,7 +148,7 @@ u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
148 p->pgsz_idx = pgsz_idx; 148 p->pgsz_idx = pgsz_idx;
149 p->iova = mapping ? 1 : 0; 149 p->iova = mapping ? 1 : 0;
150 p->kind = kind_v; 150 p->kind = kind_v;
151 p->cacheable = (flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE) ? 1 : 0; 151 p->cacheable = (flags & NVGPU_VM_MAP_CACHEABLE) ? 1 : 0;
152 p->prot = prot; 152 p->prot = prot;
153 p->ctag_offset = ctag_offset; 153 p->ctag_offset = ctag_offset;
154 p->clear_ctags = clear_ctags; 154 p->clear_ctags = clear_ctags;
diff --git a/drivers/gpu/nvgpu/common/linux/vm.c b/drivers/gpu/nvgpu/common/linux/vm.c
index 8e464627..4529a322 100644
--- a/drivers/gpu/nvgpu/common/linux/vm.c
+++ b/drivers/gpu/nvgpu/common/linux/vm.c
@@ -37,6 +37,30 @@
37#include "os_linux.h" 37#include "os_linux.h"
38#include "dmabuf.h" 38#include "dmabuf.h"
39 39
40static u32 nvgpu_vm_translate_linux_flags(struct gk20a *g, u32 flags)
41{
42 u32 core_flags = 0;
43
44 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)
45 core_flags |= NVGPU_VM_MAP_FIXED_OFFSET;
46 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE)
47 core_flags |= NVGPU_VM_MAP_CACHEABLE;
48 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_IO_COHERENT)
49 core_flags |= NVGPU_VM_MAP_IO_COHERENT;
50 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_UNMAPPED_PTE)
51 core_flags |= NVGPU_VM_MAP_UNMAPPED_PTE;
52 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_L3_ALLOC)
53 core_flags |= NVGPU_VM_MAP_L3_ALLOC;
54 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL)
55 core_flags |= NVGPU_VM_MAP_DIRECT_KIND_CTRL;
56
57 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS)
58 nvgpu_warn(g, "Ignoring deprecated flag: "
59 "NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS");
60
61 return core_flags;
62}
63
40static struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_reverse( 64static struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_reverse(
41 struct vm_gk20a *vm, struct dma_buf *dmabuf, u32 kind) 65 struct vm_gk20a *vm, struct dma_buf *dmabuf, u32 kind)
42{ 66{
@@ -102,7 +126,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_find_mapping(struct vm_gk20a *vm,
102 struct gk20a *g = gk20a_from_vm(vm); 126 struct gk20a *g = gk20a_from_vm(vm);
103 struct nvgpu_mapped_buf *mapped_buffer = NULL; 127 struct nvgpu_mapped_buf *mapped_buffer = NULL;
104 128
105 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { 129 if (flags & NVGPU_VM_MAP_FIXED_OFFSET) {
106 mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, map_addr); 130 mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, map_addr);
107 if (!mapped_buffer) 131 if (!mapped_buffer)
108 return NULL; 132 return NULL;
@@ -167,7 +191,7 @@ int nvgpu_vm_map_linux(struct vm_gk20a *vm,
167 u64 map_addr = 0ULL; 191 u64 map_addr = 0ULL;
168 int err = 0; 192 int err = 0;
169 193
170 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) 194 if (flags & NVGPU_VM_MAP_FIXED_OFFSET)
171 map_addr = offset_align; 195 map_addr = offset_align;
172 196
173 sgt = gk20a_mm_pin(dev, dmabuf); 197 sgt = gk20a_mm_pin(dev, dmabuf);
@@ -229,15 +253,16 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
229 u64 mapping_size, 253 u64 mapping_size,
230 struct vm_gk20a_mapping_batch *batch) 254 struct vm_gk20a_mapping_batch *batch)
231{ 255{
232 int err = 0; 256 struct gk20a *g = gk20a_from_vm(vm);
233 struct dma_buf *dmabuf; 257 struct dma_buf *dmabuf;
234 u64 ret_va; 258 u64 ret_va;
259 int err = 0;
235 260
236 /* get ref to the mem handle (released on unmap_locked) */ 261 /* get ref to the mem handle (released on unmap_locked) */
237 dmabuf = dma_buf_get(dmabuf_fd); 262 dmabuf = dma_buf_get(dmabuf_fd);
238 if (IS_ERR(dmabuf)) { 263 if (IS_ERR(dmabuf)) {
239 nvgpu_warn(gk20a_from_vm(vm), "%s: fd %d is not a dmabuf", 264 nvgpu_warn(g, "%s: fd %d is not a dmabuf",
240 __func__, dmabuf_fd); 265 __func__, dmabuf_fd);
241 return PTR_ERR(dmabuf); 266 return PTR_ERR(dmabuf);
242 } 267 }
243 268
@@ -250,9 +275,9 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
250 */ 275 */
251 if ((mapping_size > dmabuf->size) || 276 if ((mapping_size > dmabuf->size) ||
252 (buffer_offset > (dmabuf->size - mapping_size))) { 277 (buffer_offset > (dmabuf->size - mapping_size))) {
253 nvgpu_err(gk20a_from_vm(vm), 278 nvgpu_err(g,
254 "buf size %llx < (offset(%llx) + map_size(%llx))\n", 279 "buf size %llx < (offset(%llx) + map_size(%llx))\n",
255 (u64)dmabuf->size, buffer_offset, mapping_size); 280 (u64)dmabuf->size, buffer_offset, mapping_size);
256 return -EINVAL; 281 return -EINVAL;
257 } 282 }
258 283
@@ -263,7 +288,8 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
263 } 288 }
264 289
265 err = nvgpu_vm_map_linux(vm, dmabuf, *offset_align, 290 err = nvgpu_vm_map_linux(vm, dmabuf, *offset_align,
266 flags, compr_kind, incompr_kind, 291 nvgpu_vm_translate_linux_flags(g, flags),
292 compr_kind, incompr_kind,
267 gk20a_mem_flag_none, 293 gk20a_mem_flag_none,
268 buffer_offset, 294 buffer_offset,
269 mapping_size, 295 mapping_size,