From 9bc735ac6a0776f76604f91b6c2659450aef3028 Mon Sep 17 00:00:00 2001 From: Richard Zhao Date: Tue, 29 Nov 2016 17:43:12 -0800 Subject: gpu: nvgpu: vgpu: fix va leak when call gk20a_vm_free_va page size index needs to be set explicitly when call gk20a_vm_free_va. Bug 200255799 JIRA VFND-3033 Change-Id: I376c63e724b8f59aee389c54ca1589683536f043 Signed-off-by: Richard Zhao Reviewed-on: http://git-master/r/1262586 (cherry picked from commit 82c05633f17fa094d8e08c8a0fa4bad2d3275268) Reviewed-on: http://git-master/r/1263403 Reviewed-by: Aingara Paramakuru GVS: Gerrit_Virtual_Submit Reviewed-by: Vladislav Buzov --- drivers/gpu/nvgpu/vgpu/gr_vgpu.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c index 65e3589b..f1b498ca 100644 --- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c @@ -208,7 +208,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { if (g_bfr_va[i]) { gk20a_vm_free_va(ch_vm, g_bfr_va[i], - g_bfr_size[i], 0); + g_bfr_size[i], gmmu_page_size_kernel); g_bfr_va[i] = 0; } } @@ -238,7 +238,8 @@ static void vgpu_gr_unmap_global_ctx_buffers(struct channel_gk20a *c) for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { if (g_bfr_va[i]) { - gk20a_vm_free_va(ch_vm, g_bfr_va[i], g_bfr_size[i], 0); + gk20a_vm_free_va(ch_vm, g_bfr_va[i], g_bfr_size[i], + gmmu_page_size_kernel); g_bfr_va[i] = 0; g_bfr_size[i] = 0; } @@ -292,7 +293,7 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g, if (unlikely(err)) { gk20a_err(dev_from_gk20a(g), "fail to alloc gr_ctx"); gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va, - gr_ctx->mem.size, 0); + gr_ctx->mem.size, gmmu_page_size_kernel); kfree(gr_ctx); } else { gr_ctx->virt_ctx = p->gr_ctx_handle; @@ -318,7 +319,8 @@ void vgpu_gr_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm, err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); - gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va, gr_ctx->mem.size, 0); + gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va, gr_ctx->mem.size, + gmmu_page_size_kernel); kfree(gr_ctx); } } @@ -356,7 +358,7 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g, err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); if (err || msg.ret) { gk20a_vm_free_va(ch_vm, patch_ctx->mem.gpu_va, - patch_ctx->mem.size, 0); + patch_ctx->mem.size, gmmu_page_size_kernel); err = -ENOMEM; } @@ -382,7 +384,7 @@ static void vgpu_gr_free_channel_patch_ctx(struct channel_gk20a *c) WARN_ON(err || msg.ret); gk20a_vm_free_va(ch_vm, patch_ctx->mem.gpu_va, - patch_ctx->mem.size, 0); + patch_ctx->mem.size, gmmu_page_size_kernel); patch_ctx->mem.gpu_va = 0; } } @@ -407,7 +409,8 @@ static void vgpu_gr_free_channel_pm_ctx(struct channel_gk20a *c) err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); - gk20a_vm_free_va(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size, 0); + gk20a_vm_free_va(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size, + gmmu_page_size_kernel); pm_ctx->mem.gpu_va = 0; } -- cgit v1.2.2