From 50667e097b2be567e3d2f95e23b046243bca2bf6 Mon Sep 17 00:00:00 2001 From: Alex Waterman Date: Tue, 21 Mar 2017 15:34:50 -0700 Subject: gpu: nvgpu: Rename nvgpu DMA APIs Rename the nvgpu DMA APIs from gk20a_gmmu_alloc* to nvgpu_dma_alloc*. This better reflects the purpose of the APIs (to allocate DMA suitable memory) and avoids confusion with GMMU related code. JIRA NVGPU-12 Change-Id: I673d607db56dd6e44f02008dc7b5293209ef67bf Signed-off-by: Alex Waterman Reviewed-on: http://git-master/r/1325548 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/gk20a/gr_gk20a.c | 40 +++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/nvgpu/gk20a/gr_gk20a.c') diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index a9b6a546..af02491e 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c @@ -1938,7 +1938,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, if (enable_hwpm_ctxsw) { /* Allocate buffer if necessary */ if (pm_ctx->mem.gpu_va == 0) { - ret = gk20a_gmmu_alloc_flags_sys(g, + ret = nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING, g->gr.ctx_vars.pm_ctxsw_image_size, &pm_ctx->mem); @@ -1958,7 +1958,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, if (!pm_ctx->mem.gpu_va) { gk20a_err(dev_from_gk20a(g), "failed to map pm ctxt buffer"); - gk20a_gmmu_free(g, &pm_ctx->mem); + nvgpu_dma_free(g, &pm_ctx->mem); c->g->ops.fifo.enable_channel(c); return -ENOMEM; } @@ -2018,7 +2018,7 @@ clean_up_mem: cleanup_pm_buf: gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size, gk20a_mem_flag_none); - gk20a_gmmu_free(g, &pm_ctx->mem); + nvgpu_dma_free(g, &pm_ctx->mem); memset(&pm_ctx->mem, 0, sizeof(struct nvgpu_mem)); gk20a_enable_channel_tsg(g, c); @@ -2318,7 +2318,7 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g) g->gr.ctx_vars.ucode.gpccs.inst.count * sizeof(u32), g->gr.ctx_vars.ucode.gpccs.data.count * sizeof(u32)); - err = gk20a_gmmu_alloc_sys(g, ucode_size, &ucode_info->surface_desc); + err = nvgpu_dma_alloc_sys(g, ucode_size, &ucode_info->surface_desc); if (err) goto clean_up; @@ -2350,7 +2350,7 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g) if (ucode_info->surface_desc.gpu_va) gk20a_gmmu_unmap(vm, ucode_info->surface_desc.gpu_va, ucode_info->surface_desc.size, gk20a_mem_flag_none); - gk20a_gmmu_free(g, &ucode_info->surface_desc); + nvgpu_dma_free(g, &ucode_info->surface_desc); release_firmware(gpccs_fw); gpccs_fw = NULL; @@ -2700,7 +2700,7 @@ static void gk20a_gr_destroy_ctx_buffer(struct gk20a *g, { if (!desc) return; - gk20a_gmmu_free(g, &desc->mem); + nvgpu_dma_free(g, &desc->mem); desc->destroy = NULL; } @@ -2710,7 +2710,7 @@ static int gk20a_gr_alloc_ctx_buffer(struct gk20a *g, { int err = 0; - err = gk20a_gmmu_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING, + err = nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING, size, &desc->mem); if (err) return err; @@ -2953,7 +2953,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g, if (!gr_ctx) return -ENOMEM; - err = gk20a_gmmu_alloc_flags(g, NVGPU_DMA_NO_KERNEL_MAPPING, + err = nvgpu_dma_alloc_flags(g, NVGPU_DMA_NO_KERNEL_MAPPING, gr->ctx_vars.buffer_total_size, &gr_ctx->mem); if (err) @@ -2973,7 +2973,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g, return 0; err_free_mem: - gk20a_gmmu_free(g, &gr_ctx->mem); + nvgpu_dma_free(g, &gr_ctx->mem); err_free_ctx: nvgpu_kfree(g, gr_ctx); gr_ctx = NULL; @@ -3022,7 +3022,7 @@ void gr_gk20a_free_gr_ctx(struct gk20a *g, gk20a_gmmu_unmap(vm, gr_ctx->mem.gpu_va, gr_ctx->mem.size, gk20a_mem_flag_none); - gk20a_gmmu_free(g, &gr_ctx->mem); + nvgpu_dma_free(g, &gr_ctx->mem); nvgpu_kfree(g, gr_ctx); } @@ -3051,7 +3051,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g, gk20a_dbg_fn(""); - err = gk20a_gmmu_alloc_map_flags_sys(ch_vm, NVGPU_DMA_NO_KERNEL_MAPPING, + err = nvgpu_dma_alloc_map_flags_sys(ch_vm, NVGPU_DMA_NO_KERNEL_MAPPING, 128 * sizeof(u32), &patch_ctx->mem); if (err) return err; @@ -3071,7 +3071,7 @@ static void gr_gk20a_free_channel_patch_ctx(struct channel_gk20a *c) gk20a_gmmu_unmap(c->vm, patch_ctx->mem.gpu_va, patch_ctx->mem.size, gk20a_mem_flag_none); - gk20a_gmmu_free(g, &patch_ctx->mem); + nvgpu_dma_free(g, &patch_ctx->mem); patch_ctx->data_count = 0; } @@ -3086,7 +3086,7 @@ static void gr_gk20a_free_channel_pm_ctx(struct channel_gk20a *c) gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size, gk20a_mem_flag_none); - gk20a_gmmu_free(g, &pm_ctx->mem); + nvgpu_dma_free(g, &pm_ctx->mem); } } @@ -3366,10 +3366,10 @@ static void gk20a_remove_gr_support(struct gr_gk20a *gr) gr_gk20a_free_global_ctx_buffers(g); - gk20a_gmmu_free(g, &gr->mmu_wr_mem); - gk20a_gmmu_free(g, &gr->mmu_rd_mem); + nvgpu_dma_free(g, &gr->mmu_wr_mem); + nvgpu_dma_free(g, &gr->mmu_rd_mem); - gk20a_gmmu_free(g, &gr->compbit_store.mem); + nvgpu_dma_free(g, &gr->compbit_store.mem); memset(&gr->compbit_store, 0, sizeof(struct compbit_store_desc)); @@ -3658,17 +3658,17 @@ static int gr_gk20a_init_mmu_sw(struct gk20a *g, struct gr_gk20a *gr) { int err; - err = gk20a_gmmu_alloc_sys(g, 0x1000, &gr->mmu_wr_mem); + err = nvgpu_dma_alloc_sys(g, 0x1000, &gr->mmu_wr_mem); if (err) goto err; - err = gk20a_gmmu_alloc_sys(g, 0x1000, &gr->mmu_rd_mem); + err = nvgpu_dma_alloc_sys(g, 0x1000, &gr->mmu_rd_mem); if (err) goto err_free_wr_mem; return 0; err_free_wr_mem: - gk20a_gmmu_free(g, &gr->mmu_wr_mem); + nvgpu_dma_free(g, &gr->mmu_wr_mem); err: return -ENOMEM; } @@ -5215,7 +5215,7 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g) } if (!pmu->pg_buf.cpu_va) { - err = gk20a_gmmu_alloc_map_sys(vm, size, &pmu->pg_buf); + err = nvgpu_dma_alloc_map_sys(vm, size, &pmu->pg_buf); if (err) { gk20a_err(d, "failed to allocate memory\n"); return -ENOMEM; -- cgit v1.2.2