summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/vm.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-11-16 15:56:53 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-11-17 19:17:20 -0500
commit35ae4194a05d47aa6d79353428f81f2ca47ce90f (patch)
tree73c7f15348e1f5deb411392f41e339572b797bb4 /drivers/gpu/nvgpu/common/linux/vm.c
parentb42fb7ba26b565f93118fbdd9e17b42ee6144c5e (diff)
gpu: nvgpu: Add translation for NVGPU MM flags
Add a translation layer to convert from the NVGPU_AS_* flags to to new set of NVGPU_VM_MAP_* and NVGPU_VM_AREA_ALLOC_* flags. This allows the common MM code to not depend on the UAPI header defined for Linux. In addition to this change a couple of other small changes were made: 1. Deprecate, print a warning, and ignore usage of the NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS flag. 2. Move the t19x IO coherence flag from the t19x UAPI header to the regular UAPI header. JIRA NVGPU-293 Change-Id: I146402b0e8617294374e63e78f8826c57cd3b291 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1599802 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/vm.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/vm.c44
1 files changed, 35 insertions, 9 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/vm.c b/drivers/gpu/nvgpu/common/linux/vm.c
index 8e464627..4529a322 100644
--- a/drivers/gpu/nvgpu/common/linux/vm.c
+++ b/drivers/gpu/nvgpu/common/linux/vm.c
@@ -37,6 +37,30 @@
37#include "os_linux.h" 37#include "os_linux.h"
38#include "dmabuf.h" 38#include "dmabuf.h"
39 39
40static u32 nvgpu_vm_translate_linux_flags(struct gk20a *g, u32 flags)
41{
42 u32 core_flags = 0;
43
44 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)
45 core_flags |= NVGPU_VM_MAP_FIXED_OFFSET;
46 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE)
47 core_flags |= NVGPU_VM_MAP_CACHEABLE;
48 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_IO_COHERENT)
49 core_flags |= NVGPU_VM_MAP_IO_COHERENT;
50 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_UNMAPPED_PTE)
51 core_flags |= NVGPU_VM_MAP_UNMAPPED_PTE;
52 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_L3_ALLOC)
53 core_flags |= NVGPU_VM_MAP_L3_ALLOC;
54 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL)
55 core_flags |= NVGPU_VM_MAP_DIRECT_KIND_CTRL;
56
57 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS)
58 nvgpu_warn(g, "Ignoring deprecated flag: "
59 "NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS");
60
61 return core_flags;
62}
63
40static struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_reverse( 64static struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_reverse(
41 struct vm_gk20a *vm, struct dma_buf *dmabuf, u32 kind) 65 struct vm_gk20a *vm, struct dma_buf *dmabuf, u32 kind)
42{ 66{
@@ -102,7 +126,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_find_mapping(struct vm_gk20a *vm,
102 struct gk20a *g = gk20a_from_vm(vm); 126 struct gk20a *g = gk20a_from_vm(vm);
103 struct nvgpu_mapped_buf *mapped_buffer = NULL; 127 struct nvgpu_mapped_buf *mapped_buffer = NULL;
104 128
105 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { 129 if (flags & NVGPU_VM_MAP_FIXED_OFFSET) {
106 mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, map_addr); 130 mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, map_addr);
107 if (!mapped_buffer) 131 if (!mapped_buffer)
108 return NULL; 132 return NULL;
@@ -167,7 +191,7 @@ int nvgpu_vm_map_linux(struct vm_gk20a *vm,
167 u64 map_addr = 0ULL; 191 u64 map_addr = 0ULL;
168 int err = 0; 192 int err = 0;
169 193
170 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) 194 if (flags & NVGPU_VM_MAP_FIXED_OFFSET)
171 map_addr = offset_align; 195 map_addr = offset_align;
172 196
173 sgt = gk20a_mm_pin(dev, dmabuf); 197 sgt = gk20a_mm_pin(dev, dmabuf);
@@ -229,15 +253,16 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
229 u64 mapping_size, 253 u64 mapping_size,
230 struct vm_gk20a_mapping_batch *batch) 254 struct vm_gk20a_mapping_batch *batch)
231{ 255{
232 int err = 0; 256 struct gk20a *g = gk20a_from_vm(vm);
233 struct dma_buf *dmabuf; 257 struct dma_buf *dmabuf;
234 u64 ret_va; 258 u64 ret_va;
259 int err = 0;
235 260
236 /* get ref to the mem handle (released on unmap_locked) */ 261 /* get ref to the mem handle (released on unmap_locked) */
237 dmabuf = dma_buf_get(dmabuf_fd); 262 dmabuf = dma_buf_get(dmabuf_fd);
238 if (IS_ERR(dmabuf)) { 263 if (IS_ERR(dmabuf)) {
239 nvgpu_warn(gk20a_from_vm(vm), "%s: fd %d is not a dmabuf", 264 nvgpu_warn(g, "%s: fd %d is not a dmabuf",
240 __func__, dmabuf_fd); 265 __func__, dmabuf_fd);
241 return PTR_ERR(dmabuf); 266 return PTR_ERR(dmabuf);
242 } 267 }
243 268
@@ -250,9 +275,9 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
250 */ 275 */
251 if ((mapping_size > dmabuf->size) || 276 if ((mapping_size > dmabuf->size) ||
252 (buffer_offset > (dmabuf->size - mapping_size))) { 277 (buffer_offset > (dmabuf->size - mapping_size))) {
253 nvgpu_err(gk20a_from_vm(vm), 278 nvgpu_err(g,
254 "buf size %llx < (offset(%llx) + map_size(%llx))\n", 279 "buf size %llx < (offset(%llx) + map_size(%llx))\n",
255 (u64)dmabuf->size, buffer_offset, mapping_size); 280 (u64)dmabuf->size, buffer_offset, mapping_size);
256 return -EINVAL; 281 return -EINVAL;
257 } 282 }
258 283
@@ -263,7 +288,8 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
263 } 288 }
264 289
265 err = nvgpu_vm_map_linux(vm, dmabuf, *offset_align, 290 err = nvgpu_vm_map_linux(vm, dmabuf, *offset_align,
266 flags, compr_kind, incompr_kind, 291 nvgpu_vm_translate_linux_flags(g, flags),
292 compr_kind, incompr_kind,
267 gk20a_mem_flag_none, 293 gk20a_mem_flag_none,
268 buffer_offset, 294 buffer_offset,
269 mapping_size, 295 mapping_size,