summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/gmmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/gmmu.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/gmmu.c14
1 files changed, 13 insertions, 1 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c
index 44e540dc..89d71b3b 100644
--- a/drivers/gpu/nvgpu/common/mm/gmmu.c
+++ b/drivers/gpu/nvgpu/common/mm/gmmu.c
@@ -86,6 +86,18 @@ static u64 __nvgpu_gmmu_map(struct vm_gk20a *vm,
86 if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM)) 86 if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
87 flags |= NVGPU_VM_MAP_IO_COHERENT; 87 flags |= NVGPU_VM_MAP_IO_COHERENT;
88 88
89 /*
90 * Later on, when we free this nvgpu_mem's GPU mapping, we are going to
91 * potentially have to free the GPU VA space. If the address passed in
92 * is non-zero then this API is not expected to manage the VA space and
93 * therefor we should not try and free it. But otherwise, if we do
94 * manage the VA alloc, we obviously must free it.
95 */
96 if (addr != 0)
97 mem->free_gpu_va = false;
98 else
99 mem->free_gpu_va = true;
100
89 nvgpu_mutex_acquire(&vm->update_gmmu_lock); 101 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
90 vaddr = g->ops.mm.gmmu_map(vm, addr, 102 vaddr = g->ops.mm.gmmu_map(vm, addr,
91 sgt, /* sg list */ 103 sgt, /* sg list */
@@ -152,7 +164,7 @@ void nvgpu_gmmu_unmap(struct vm_gk20a *vm, struct nvgpu_mem *mem, u64 gpu_va)
152 gpu_va, 164 gpu_va,
153 mem->size, 165 mem->size,
154 gmmu_page_size_kernel, 166 gmmu_page_size_kernel,
155 true, /*va_allocated */ 167 mem->free_gpu_va,
156 gk20a_mem_flag_none, 168 gk20a_mem_flag_none,
157 false, 169 false,
158 NULL); 170 NULL);