summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/vm.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/vm.c14
1 files changed, 9 insertions, 5 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/vm.c b/drivers/gpu/nvgpu/common/linux/vm.c
index f356fee2..cbacbdc0 100644
--- a/drivers/gpu/nvgpu/common/linux/vm.c
+++ b/drivers/gpu/nvgpu/common/linux/vm.c
@@ -26,6 +26,7 @@
26#include "gk20a/gk20a.h" 26#include "gk20a/gk20a.h"
27#include "gk20a/mm_gk20a.h" 27#include "gk20a/mm_gk20a.h"
28#include "gk20a/kind_gk20a.h" 28#include "gk20a/kind_gk20a.h"
29#include "gk20a/platform_gk20a.h"
29 30
30#include "vm_priv.h" 31#include "vm_priv.h"
31 32
@@ -187,6 +188,7 @@ u64 nvgpu_vm_map(struct vm_gk20a *vm,
187 struct vm_gk20a_mapping_batch *batch) 188 struct vm_gk20a_mapping_batch *batch)
188{ 189{
189 struct gk20a *g = gk20a_from_vm(vm); 190 struct gk20a *g = gk20a_from_vm(vm);
191 struct device *dev = dev_from_gk20a(g);
190 struct gk20a_comptag_allocator *ctag_allocator = &g->gr.comp_tags; 192 struct gk20a_comptag_allocator *ctag_allocator = &g->gr.comp_tags;
191 struct nvgpu_mapped_buf *mapped_buffer = NULL; 193 struct nvgpu_mapped_buf *mapped_buffer = NULL;
192 bool va_allocated = false; 194 bool va_allocated = false;
@@ -224,7 +226,7 @@ u64 nvgpu_vm_map(struct vm_gk20a *vm,
224 } 226 }
225 227
226 /* pin buffer to get phys/iovmm addr */ 228 /* pin buffer to get phys/iovmm addr */
227 bfr.sgt = gk20a_mm_pin(g->dev, dmabuf); 229 bfr.sgt = gk20a_mm_pin(dev, dmabuf);
228 if (IS_ERR(bfr.sgt)) { 230 if (IS_ERR(bfr.sgt)) {
229 /* Falling back to physical is actually possible 231 /* Falling back to physical is actually possible
230 * here in many cases if we use 4K phys pages in the 232 * here in many cases if we use 4K phys pages in the
@@ -283,7 +285,7 @@ u64 nvgpu_vm_map(struct vm_gk20a *vm,
283 if (!vm->enable_ctag) 285 if (!vm->enable_ctag)
284 bfr.ctag_lines = 0; 286 bfr.ctag_lines = 0;
285 287
286 gk20a_get_comptags(g->dev, dmabuf, &comptags); 288 gk20a_get_comptags(dev, dmabuf, &comptags);
287 289
288 /* ensure alignment to compression page size if compression enabled */ 290 /* ensure alignment to compression page size if compression enabled */
289 if (bfr.ctag_offset) 291 if (bfr.ctag_offset)
@@ -295,7 +297,8 @@ u64 nvgpu_vm_map(struct vm_gk20a *vm,
295 !!(flags & NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS); 297 !!(flags & NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS);
296 298
297 /* allocate compression resources if needed */ 299 /* allocate compression resources if needed */
298 err = gk20a_alloc_comptags(g, g->dev, dmabuf, ctag_allocator, 300 err = gk20a_alloc_comptags(g, dev, dmabuf,
301 ctag_allocator,
299 bfr.ctag_lines, user_mappable, 302 bfr.ctag_lines, user_mappable,
300 &ctag_map_win_size, 303 &ctag_map_win_size,
301 &ctag_map_win_ctagline); 304 &ctag_map_win_ctagline);
@@ -304,7 +307,8 @@ u64 nvgpu_vm_map(struct vm_gk20a *vm,
304 /* TBD: we can partially alloc ctags as well... */ 307 /* TBD: we can partially alloc ctags as well... */
305 bfr.kind_v = bfr.uc_kind_v; 308 bfr.kind_v = bfr.uc_kind_v;
306 } else { 309 } else {
307 gk20a_get_comptags(g->dev, dmabuf, &comptags); 310 gk20a_get_comptags(dev,
311 dmabuf, &comptags);
308 312
309 if (g->ops.ltc.cbc_ctrl) 313 if (g->ops.ltc.cbc_ctrl)
310 g->ops.ltc.cbc_ctrl(g, gk20a_cbc_op_clear, 314 g->ops.ltc.cbc_ctrl(g, gk20a_cbc_op_clear,
@@ -396,7 +400,7 @@ clean_up:
396 if (va_allocated) 400 if (va_allocated)
397 __nvgpu_vm_free_va(vm, map_offset, bfr.pgsz_idx); 401 __nvgpu_vm_free_va(vm, map_offset, bfr.pgsz_idx);
398 if (!IS_ERR(bfr.sgt)) 402 if (!IS_ERR(bfr.sgt))
399 gk20a_mm_unpin(g->dev, dmabuf, bfr.sgt); 403 gk20a_mm_unpin(dev, dmabuf, bfr.sgt);
400 404
401 nvgpu_mutex_release(&vm->update_gmmu_lock); 405 nvgpu_mutex_release(&vm->update_gmmu_lock);
402 nvgpu_log_info(g, "err=%d", err); 406 nvgpu_log_info(g, "err=%d", err);