summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux')
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_as.c15
-rw-r--r--drivers/gpu/nvgpu/common/linux/vm.c37
2 files changed, 25 insertions, 27 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_as.c b/drivers/gpu/nvgpu/common/linux/ioctl_as.c
index 8a5318e4..f23dc53c 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_as.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_as.c
@@ -79,22 +79,19 @@ static int gk20a_as_ioctl_map_buffer_ex(
79 struct gk20a_as_share *as_share, 79 struct gk20a_as_share *as_share,
80 struct nvgpu_as_map_buffer_ex_args *args) 80 struct nvgpu_as_map_buffer_ex_args *args)
81{ 81{
82 s16 compressible_kind;
83 s16 incompressible_kind;
84
85 gk20a_dbg_fn(""); 82 gk20a_dbg_fn("");
86 83
87 if (args->flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL) { 84 /* unsupported, direct kind control must be used */
88 compressible_kind = args->compr_kind; 85 if (!(args->flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL)) {
89 incompressible_kind = args->incompr_kind; 86 struct gk20a *g = as_share->vm->mm->g;
90 } else { 87 nvgpu_log_info(g, "Direct kind control must be requested");
91 /* unsupported, direct kind control must be used */
92 return -EINVAL; 88 return -EINVAL;
93 } 89 }
94 90
95 return nvgpu_vm_map_buffer(as_share->vm, args->dmabuf_fd, 91 return nvgpu_vm_map_buffer(as_share->vm, args->dmabuf_fd,
96 &args->offset, args->flags, 92 &args->offset, args->flags,
97 compressible_kind, incompressible_kind, 93 args->compr_kind,
94 args->incompr_kind,
98 args->buffer_offset, 95 args->buffer_offset,
99 args->mapping_size, 96 args->mapping_size,
100 NULL); 97 NULL);
diff --git a/drivers/gpu/nvgpu/common/linux/vm.c b/drivers/gpu/nvgpu/common/linux/vm.c
index 0cfd010b..895a5771 100644
--- a/drivers/gpu/nvgpu/common/linux/vm.c
+++ b/drivers/gpu/nvgpu/common/linux/vm.c
@@ -32,7 +32,6 @@
32 32
33#include "gk20a/gk20a.h" 33#include "gk20a/gk20a.h"
34#include "gk20a/mm_gk20a.h" 34#include "gk20a/mm_gk20a.h"
35#include "gk20a/kind_gk20a.h"
36 35
37#include "platform_gk20a.h" 36#include "platform_gk20a.h"
38#include "os_linux.h" 37#include "os_linux.h"
@@ -239,14 +238,17 @@ int nvgpu_vm_map_linux(struct vm_gk20a *vm,
239 238
240 binfo.flags = flags; 239 binfo.flags = flags;
241 binfo.size = dmabuf->size; 240 binfo.size = dmabuf->size;
241 binfo.compr_kind = compr_kind;
242 binfo.incompr_kind = incompr_kind;
242 243
243 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL) { 244 if (compr_kind != NV_KIND_INVALID)
244 if (compr_kind != NV_KIND_INVALID)
245 map_key_kind = compr_kind;
246 else
247 map_key_kind = incompr_kind;
248 } else {
249 map_key_kind = compr_kind; 245 map_key_kind = compr_kind;
246 else
247 map_key_kind = incompr_kind;
248
249 if (map_key_kind == NV_KIND_INVALID) {
250 nvgpu_err(g, "Valid kind must be supplied");
251 return -EINVAL;
250 } 252 }
251 253
252 if (vm->userspace_managed && 254 if (vm->userspace_managed &&
@@ -277,10 +279,6 @@ int nvgpu_vm_map_linux(struct vm_gk20a *vm,
277 goto clean_up; 279 goto clean_up;
278 } 280 }
279 281
280 err = nvgpu_vm_init_kind_info(&binfo, compr_kind, incompr_kind);
281 if (err)
282 goto clean_up;
283
284 aperture = gk20a_dmabuf_aperture(g, dmabuf); 282 aperture = gk20a_dmabuf_aperture(g, dmabuf);
285 if (aperture == APERTURE_INVALID) { 283 if (aperture == APERTURE_INVALID) {
286 err = -EINVAL; 284 err = -EINVAL;
@@ -321,9 +319,9 @@ int nvgpu_vm_map_linux(struct vm_gk20a *vm,
321 va_allocated = true; 319 va_allocated = true;
322 } 320 }
323 321
324 err = nvgpu_vm_compute_kind_and_compression(vm, &binfo); 322 err = nvgpu_vm_compute_compression(vm, &binfo);
325 if (err) { 323 if (err) {
326 nvgpu_err(g, "failure setting up kind and compression"); 324 nvgpu_err(g, "failure setting up compression");
327 goto clean_up; 325 goto clean_up;
328 } 326 }
329 327
@@ -340,10 +338,12 @@ int nvgpu_vm_map_linux(struct vm_gk20a *vm,
340 binfo.ctag_lines); 338 binfo.ctag_lines);
341 if (err) { 339 if (err) {
342 /* TBD: we can partially alloc ctags as well... */ 340 /* TBD: we can partially alloc ctags as well... */
343 if (binfo.use_uc_kind_v) { 341
344 /* no comptags, but fallback kind available */ 342 /* prevent compression ... */
345 binfo.kind_v = binfo.uc_kind_v; 343 binfo.compr_kind = NV_KIND_INVALID;
346 } else { 344
345 /* ... and make sure we have the fallback */
346 if (binfo.incompr_kind == NV_KIND_INVALID) {
347 nvgpu_err(g, "comptag alloc failed and no fallback kind specified"); 347 nvgpu_err(g, "comptag alloc failed and no fallback kind specified");
348 goto clean_up; 348 goto clean_up;
349 } 349 }
@@ -379,7 +379,8 @@ int nvgpu_vm_map_linux(struct vm_gk20a *vm,
379 buffer_offset, /* sg offset */ 379 buffer_offset, /* sg offset */
380 mapping_size, 380 mapping_size,
381 binfo.pgsz_idx, 381 binfo.pgsz_idx,
382 binfo.kind_v, 382 (binfo.compr_kind != NV_KIND_INVALID ?
383 binfo.compr_kind : binfo.incompr_kind),
383 ctag_offset, 384 ctag_offset,
384 flags, rw_flag, 385 flags, rw_flag,
385 clear_ctags, 386 clear_ctags,