summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/gmmu.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2018-03-06 18:47:39 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2018-03-14 13:04:03 -0400
commitbf374b779fbb71280fafcefeb3bb3d68f047b304 (patch)
treed3f3d0eb4a95c89841b9a9346142b773ccd01a4b /drivers/gpu/nvgpu/common/mm/gmmu.c
parent76ad9e8366f5c73d1ea47d54cea043f8cd9fa23e (diff)
gpu: nvgpu: Pass correct va_allocated field in .gmmu_unmap()
When nvgpu maps an nvgpu_mem struct the nvgpu driver has a choice of either using a fixed or non-fixed mapping. For non-fixed mappings the GMMU APIs allocate a VA space for the caller. In that case the GMMU APIs must also free that VA range when nvgpu unmaps the nvgpu_mem. For fixed mappings the GMMU APIs must instead not manage the life time of the VA space. To support these two possibilities add a field to nvgpu_mem that specifies whether the GMMU APIs must or must not free the GPU VA range during the GMMU unmap operation. Also fix a case in the nvgpu vm_area code that would double free a VA allocation in some cases (sparse allocs). Change-Id: Idc32dbb8208fa7c1c05823e67b54707fea51c6b7 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1669920 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/gmmu.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/gmmu.c14
1 files changed, 13 insertions, 1 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c
index 44e540dc..89d71b3b 100644
--- a/drivers/gpu/nvgpu/common/mm/gmmu.c
+++ b/drivers/gpu/nvgpu/common/mm/gmmu.c
@@ -86,6 +86,18 @@ static u64 __nvgpu_gmmu_map(struct vm_gk20a *vm,
86 if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM)) 86 if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
87 flags |= NVGPU_VM_MAP_IO_COHERENT; 87 flags |= NVGPU_VM_MAP_IO_COHERENT;
88 88
89 /*
90 * Later on, when we free this nvgpu_mem's GPU mapping, we are going to
91 * potentially have to free the GPU VA space. If the address passed in
92 * is non-zero then this API is not expected to manage the VA space and
93 * therefor we should not try and free it. But otherwise, if we do
94 * manage the VA alloc, we obviously must free it.
95 */
96 if (addr != 0)
97 mem->free_gpu_va = false;
98 else
99 mem->free_gpu_va = true;
100
89 nvgpu_mutex_acquire(&vm->update_gmmu_lock); 101 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
90 vaddr = g->ops.mm.gmmu_map(vm, addr, 102 vaddr = g->ops.mm.gmmu_map(vm, addr,
91 sgt, /* sg list */ 103 sgt, /* sg list */
@@ -152,7 +164,7 @@ void nvgpu_gmmu_unmap(struct vm_gk20a *vm, struct nvgpu_mem *mem, u64 gpu_va)
152 gpu_va, 164 gpu_va,
153 mem->size, 165 mem->size,
154 gmmu_page_size_kernel, 166 gmmu_page_size_kernel,
155 true, /*va_allocated */ 167 mem->free_gpu_va,
156 gk20a_mem_flag_none, 168 gk20a_mem_flag_none,
157 false, 169 false,
158 NULL); 170 NULL);