summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux
diff options
context:
space:
mode:
authorSami Kiminki <skiminki@nvidia.com>2017-11-02 15:28:42 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-11-03 14:55:30 -0400
commit9f5f029ae291ebbfef9f1e08b39875fa47c44982 (patch)
tree24220a5d15cd838131fd477d077d76eb96baef2c /drivers/gpu/nvgpu/common/linux
parentfe1e09d473044f7caaf8b834a094f4784bc5f5e1 (diff)
gpu: nvgpu: Return error code properly from nvgpu_vm_map_linux
The function nvgpu_vm_map_linux() used to return GPU VA on successful map or 0 when things didn't go smoothly. However, this scheme does not propagate the actual map error back to the userspace. So, modify the function a bit: return error and return the GPU VA via pointer on success. Bug 1705731 Change-Id: I2174b5fbaf64dcb00f9567dab1c583d6ddfa5d78 Signed-off-by: Sami Kiminki <skiminki@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1590961 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux')
-rw-r--r--drivers/gpu/nvgpu/common/linux/cde.c7
-rw-r--r--drivers/gpu/nvgpu/common/linux/vm.c34
2 files changed, 23 insertions, 18 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/cde.c b/drivers/gpu/nvgpu/common/linux/cde.c
index f6020d9a..775f9657 100644
--- a/drivers/gpu/nvgpu/common/linux/cde.c
+++ b/drivers/gpu/nvgpu/common/linux/cde.c
@@ -1046,15 +1046,16 @@ __releases(&l->cde_app->mutex)
1046 1046
1047 /* map the destination buffer */ 1047 /* map the destination buffer */
1048 get_dma_buf(compbits_scatter_buf); /* a ref for nvgpu_vm_map_linux */ 1048 get_dma_buf(compbits_scatter_buf); /* a ref for nvgpu_vm_map_linux */
1049 map_vaddr = nvgpu_vm_map_linux(cde_ctx->vm, compbits_scatter_buf, 0, 1049 err = nvgpu_vm_map_linux(cde_ctx->vm, compbits_scatter_buf, 0,
1050 NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE | 1050 NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE |
1051 NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL, 1051 NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL,
1052 NV_KIND_INVALID, 1052 NV_KIND_INVALID,
1053 compbits_kind, /* incompressible kind */ 1053 compbits_kind, /* incompressible kind */
1054 gk20a_mem_flag_none, 1054 gk20a_mem_flag_none,
1055 map_offset, map_size, 1055 map_offset, map_size,
1056 NULL); 1056 NULL,
1057 if (!map_vaddr) { 1057 &map_vaddr);
1058 if (err) {
1058 dma_buf_put(compbits_scatter_buf); 1059 dma_buf_put(compbits_scatter_buf);
1059 err = -EINVAL; 1060 err = -EINVAL;
1060 goto exit_idle; 1061 goto exit_idle;
diff --git a/drivers/gpu/nvgpu/common/linux/vm.c b/drivers/gpu/nvgpu/common/linux/vm.c
index d04ed5ed..123807d9 100644
--- a/drivers/gpu/nvgpu/common/linux/vm.c
+++ b/drivers/gpu/nvgpu/common/linux/vm.c
@@ -201,7 +201,7 @@ static u64 __nvgpu_vm_find_mapping(struct vm_gk20a *vm,
201 return mapped_buffer->addr; 201 return mapped_buffer->addr;
202} 202}
203 203
204u64 nvgpu_vm_map_linux(struct vm_gk20a *vm, 204int nvgpu_vm_map_linux(struct vm_gk20a *vm,
205 struct dma_buf *dmabuf, 205 struct dma_buf *dmabuf,
206 u64 offset_align, 206 u64 offset_align,
207 u32 flags, 207 u32 flags,
@@ -210,7 +210,8 @@ u64 nvgpu_vm_map_linux(struct vm_gk20a *vm,
210 int rw_flag, 210 int rw_flag,
211 u64 buffer_offset, 211 u64 buffer_offset,
212 u64 mapping_size, 212 u64 mapping_size,
213 struct vm_gk20a_mapping_batch *batch) 213 struct vm_gk20a_mapping_batch *batch,
214 u64 *gpu_va)
214{ 215{
215 struct gk20a *g = gk20a_from_vm(vm); 216 struct gk20a *g = gk20a_from_vm(vm);
216 struct device *dev = dev_from_gk20a(g); 217 struct device *dev = dev_from_gk20a(g);
@@ -263,12 +264,14 @@ u64 nvgpu_vm_map_linux(struct vm_gk20a *vm,
263 flags, map_key_kind, rw_flag); 264 flags, map_key_kind, rw_flag);
264 if (map_offset) { 265 if (map_offset) {
265 nvgpu_mutex_release(&vm->update_gmmu_lock); 266 nvgpu_mutex_release(&vm->update_gmmu_lock);
266 return map_offset; 267 *gpu_va = map_offset;
268 return 0;
267 } 269 }
268 } 270 }
269 271
270 sgt = gk20a_mm_pin(dev, dmabuf); 272 sgt = gk20a_mm_pin(dev, dmabuf);
271 if (IS_ERR(sgt)) { 273 if (IS_ERR(sgt)) {
274 err = PTR_ERR(sgt);
272 nvgpu_warn(g, "oom allocating tracking buffer"); 275 nvgpu_warn(g, "oom allocating tracking buffer");
273 goto clean_up; 276 goto clean_up;
274 } 277 }
@@ -424,7 +427,8 @@ u64 nvgpu_vm_map_linux(struct vm_gk20a *vm,
424 427
425 nvgpu_mutex_release(&vm->update_gmmu_lock); 428 nvgpu_mutex_release(&vm->update_gmmu_lock);
426 429
427 return map_offset; 430 *gpu_va = map_offset;
431 return 0;
428 432
429clean_up: 433clean_up:
430 nvgpu_kfree(g, mapped_buffer); 434 nvgpu_kfree(g, mapped_buffer);
@@ -435,7 +439,7 @@ clean_up:
435 439
436 nvgpu_mutex_release(&vm->update_gmmu_lock); 440 nvgpu_mutex_release(&vm->update_gmmu_lock);
437 nvgpu_log_info(g, "err=%d", err); 441 nvgpu_log_info(g, "err=%d", err);
438 return 0; 442 return err;
439} 443}
440 444
441int nvgpu_vm_map_buffer(struct vm_gk20a *vm, 445int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
@@ -483,18 +487,18 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
483 return err; 487 return err;
484 } 488 }
485 489
486 ret_va = nvgpu_vm_map_linux(vm, dmabuf, *offset_align, 490 err = nvgpu_vm_map_linux(vm, dmabuf, *offset_align,
487 flags, compr_kind, incompr_kind, 491 flags, compr_kind, incompr_kind,
488 gk20a_mem_flag_none, 492 gk20a_mem_flag_none,
489 buffer_offset, 493 buffer_offset,
490 mapping_size, 494 mapping_size,
491 batch); 495 batch,
496 &ret_va);
492 497
493 *offset_align = ret_va; 498 if (!err)
494 if (!ret_va) { 499 *offset_align = ret_va;
500 else
495 dma_buf_put(dmabuf); 501 dma_buf_put(dmabuf);
496 err = -EINVAL;
497 }
498 502
499 return err; 503 return err;
500} 504}