summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-03-21 18:34:50 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-06 21:15:04 -0400
commit50667e097b2be567e3d2f95e23b046243bca2bf6 (patch)
treee8fc42261868c6d69844f2e92fce33f6169434d4 /drivers/gpu/nvgpu/gk20a/gr_gk20a.c
parent8f2d4a3f4a0acc81bae6725d30506e92651a42b5 (diff)
gpu: nvgpu: Rename nvgpu DMA APIs
Rename the nvgpu DMA APIs from gk20a_gmmu_alloc* to nvgpu_dma_alloc*. This better reflects the purpose of the APIs (to allocate DMA suitable memory) and avoids confusion with GMMU related code. JIRA NVGPU-12 Change-Id: I673d607db56dd6e44f02008dc7b5293209ef67bf Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1325548 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gr_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index a9b6a546..af02491e 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -1938,7 +1938,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1938 if (enable_hwpm_ctxsw) { 1938 if (enable_hwpm_ctxsw) {
1939 /* Allocate buffer if necessary */ 1939 /* Allocate buffer if necessary */
1940 if (pm_ctx->mem.gpu_va == 0) { 1940 if (pm_ctx->mem.gpu_va == 0) {
1941 ret = gk20a_gmmu_alloc_flags_sys(g, 1941 ret = nvgpu_dma_alloc_flags_sys(g,
1942 NVGPU_DMA_NO_KERNEL_MAPPING, 1942 NVGPU_DMA_NO_KERNEL_MAPPING,
1943 g->gr.ctx_vars.pm_ctxsw_image_size, 1943 g->gr.ctx_vars.pm_ctxsw_image_size,
1944 &pm_ctx->mem); 1944 &pm_ctx->mem);
@@ -1958,7 +1958,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1958 if (!pm_ctx->mem.gpu_va) { 1958 if (!pm_ctx->mem.gpu_va) {
1959 gk20a_err(dev_from_gk20a(g), 1959 gk20a_err(dev_from_gk20a(g),
1960 "failed to map pm ctxt buffer"); 1960 "failed to map pm ctxt buffer");
1961 gk20a_gmmu_free(g, &pm_ctx->mem); 1961 nvgpu_dma_free(g, &pm_ctx->mem);
1962 c->g->ops.fifo.enable_channel(c); 1962 c->g->ops.fifo.enable_channel(c);
1963 return -ENOMEM; 1963 return -ENOMEM;
1964 } 1964 }
@@ -2018,7 +2018,7 @@ clean_up_mem:
2018cleanup_pm_buf: 2018cleanup_pm_buf:
2019 gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size, 2019 gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size,
2020 gk20a_mem_flag_none); 2020 gk20a_mem_flag_none);
2021 gk20a_gmmu_free(g, &pm_ctx->mem); 2021 nvgpu_dma_free(g, &pm_ctx->mem);
2022 memset(&pm_ctx->mem, 0, sizeof(struct nvgpu_mem)); 2022 memset(&pm_ctx->mem, 0, sizeof(struct nvgpu_mem));
2023 2023
2024 gk20a_enable_channel_tsg(g, c); 2024 gk20a_enable_channel_tsg(g, c);
@@ -2318,7 +2318,7 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g)
2318 g->gr.ctx_vars.ucode.gpccs.inst.count * sizeof(u32), 2318 g->gr.ctx_vars.ucode.gpccs.inst.count * sizeof(u32),
2319 g->gr.ctx_vars.ucode.gpccs.data.count * sizeof(u32)); 2319 g->gr.ctx_vars.ucode.gpccs.data.count * sizeof(u32));
2320 2320
2321 err = gk20a_gmmu_alloc_sys(g, ucode_size, &ucode_info->surface_desc); 2321 err = nvgpu_dma_alloc_sys(g, ucode_size, &ucode_info->surface_desc);
2322 if (err) 2322 if (err)
2323 goto clean_up; 2323 goto clean_up;
2324 2324
@@ -2350,7 +2350,7 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g)
2350 if (ucode_info->surface_desc.gpu_va) 2350 if (ucode_info->surface_desc.gpu_va)
2351 gk20a_gmmu_unmap(vm, ucode_info->surface_desc.gpu_va, 2351 gk20a_gmmu_unmap(vm, ucode_info->surface_desc.gpu_va,
2352 ucode_info->surface_desc.size, gk20a_mem_flag_none); 2352 ucode_info->surface_desc.size, gk20a_mem_flag_none);
2353 gk20a_gmmu_free(g, &ucode_info->surface_desc); 2353 nvgpu_dma_free(g, &ucode_info->surface_desc);
2354 2354
2355 release_firmware(gpccs_fw); 2355 release_firmware(gpccs_fw);
2356 gpccs_fw = NULL; 2356 gpccs_fw = NULL;
@@ -2700,7 +2700,7 @@ static void gk20a_gr_destroy_ctx_buffer(struct gk20a *g,
2700{ 2700{
2701 if (!desc) 2701 if (!desc)
2702 return; 2702 return;
2703 gk20a_gmmu_free(g, &desc->mem); 2703 nvgpu_dma_free(g, &desc->mem);
2704 desc->destroy = NULL; 2704 desc->destroy = NULL;
2705} 2705}
2706 2706
@@ -2710,7 +2710,7 @@ static int gk20a_gr_alloc_ctx_buffer(struct gk20a *g,
2710{ 2710{
2711 int err = 0; 2711 int err = 0;
2712 2712
2713 err = gk20a_gmmu_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING, 2713 err = nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING,
2714 size, &desc->mem); 2714 size, &desc->mem);
2715 if (err) 2715 if (err)
2716 return err; 2716 return err;
@@ -2953,7 +2953,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
2953 if (!gr_ctx) 2953 if (!gr_ctx)
2954 return -ENOMEM; 2954 return -ENOMEM;
2955 2955
2956 err = gk20a_gmmu_alloc_flags(g, NVGPU_DMA_NO_KERNEL_MAPPING, 2956 err = nvgpu_dma_alloc_flags(g, NVGPU_DMA_NO_KERNEL_MAPPING,
2957 gr->ctx_vars.buffer_total_size, 2957 gr->ctx_vars.buffer_total_size,
2958 &gr_ctx->mem); 2958 &gr_ctx->mem);
2959 if (err) 2959 if (err)
@@ -2973,7 +2973,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
2973 return 0; 2973 return 0;
2974 2974
2975 err_free_mem: 2975 err_free_mem:
2976 gk20a_gmmu_free(g, &gr_ctx->mem); 2976 nvgpu_dma_free(g, &gr_ctx->mem);
2977 err_free_ctx: 2977 err_free_ctx:
2978 nvgpu_kfree(g, gr_ctx); 2978 nvgpu_kfree(g, gr_ctx);
2979 gr_ctx = NULL; 2979 gr_ctx = NULL;
@@ -3022,7 +3022,7 @@ void gr_gk20a_free_gr_ctx(struct gk20a *g,
3022 3022
3023 gk20a_gmmu_unmap(vm, gr_ctx->mem.gpu_va, 3023 gk20a_gmmu_unmap(vm, gr_ctx->mem.gpu_va,
3024 gr_ctx->mem.size, gk20a_mem_flag_none); 3024 gr_ctx->mem.size, gk20a_mem_flag_none);
3025 gk20a_gmmu_free(g, &gr_ctx->mem); 3025 nvgpu_dma_free(g, &gr_ctx->mem);
3026 nvgpu_kfree(g, gr_ctx); 3026 nvgpu_kfree(g, gr_ctx);
3027} 3027}
3028 3028
@@ -3051,7 +3051,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g,
3051 3051
3052 gk20a_dbg_fn(""); 3052 gk20a_dbg_fn("");
3053 3053
3054 err = gk20a_gmmu_alloc_map_flags_sys(ch_vm, NVGPU_DMA_NO_KERNEL_MAPPING, 3054 err = nvgpu_dma_alloc_map_flags_sys(ch_vm, NVGPU_DMA_NO_KERNEL_MAPPING,
3055 128 * sizeof(u32), &patch_ctx->mem); 3055 128 * sizeof(u32), &patch_ctx->mem);
3056 if (err) 3056 if (err)
3057 return err; 3057 return err;
@@ -3071,7 +3071,7 @@ static void gr_gk20a_free_channel_patch_ctx(struct channel_gk20a *c)
3071 gk20a_gmmu_unmap(c->vm, patch_ctx->mem.gpu_va, 3071 gk20a_gmmu_unmap(c->vm, patch_ctx->mem.gpu_va,
3072 patch_ctx->mem.size, gk20a_mem_flag_none); 3072 patch_ctx->mem.size, gk20a_mem_flag_none);
3073 3073
3074 gk20a_gmmu_free(g, &patch_ctx->mem); 3074 nvgpu_dma_free(g, &patch_ctx->mem);
3075 patch_ctx->data_count = 0; 3075 patch_ctx->data_count = 0;
3076} 3076}
3077 3077
@@ -3086,7 +3086,7 @@ static void gr_gk20a_free_channel_pm_ctx(struct channel_gk20a *c)
3086 gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, 3086 gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va,
3087 pm_ctx->mem.size, gk20a_mem_flag_none); 3087 pm_ctx->mem.size, gk20a_mem_flag_none);
3088 3088
3089 gk20a_gmmu_free(g, &pm_ctx->mem); 3089 nvgpu_dma_free(g, &pm_ctx->mem);
3090 } 3090 }
3091} 3091}
3092 3092
@@ -3366,10 +3366,10 @@ static void gk20a_remove_gr_support(struct gr_gk20a *gr)
3366 3366
3367 gr_gk20a_free_global_ctx_buffers(g); 3367 gr_gk20a_free_global_ctx_buffers(g);
3368 3368
3369 gk20a_gmmu_free(g, &gr->mmu_wr_mem); 3369 nvgpu_dma_free(g, &gr->mmu_wr_mem);
3370 gk20a_gmmu_free(g, &gr->mmu_rd_mem); 3370 nvgpu_dma_free(g, &gr->mmu_rd_mem);
3371 3371
3372 gk20a_gmmu_free(g, &gr->compbit_store.mem); 3372 nvgpu_dma_free(g, &gr->compbit_store.mem);
3373 3373
3374 memset(&gr->compbit_store, 0, sizeof(struct compbit_store_desc)); 3374 memset(&gr->compbit_store, 0, sizeof(struct compbit_store_desc));
3375 3375
@@ -3658,17 +3658,17 @@ static int gr_gk20a_init_mmu_sw(struct gk20a *g, struct gr_gk20a *gr)
3658{ 3658{
3659 int err; 3659 int err;
3660 3660
3661 err = gk20a_gmmu_alloc_sys(g, 0x1000, &gr->mmu_wr_mem); 3661 err = nvgpu_dma_alloc_sys(g, 0x1000, &gr->mmu_wr_mem);
3662 if (err) 3662 if (err)
3663 goto err; 3663 goto err;
3664 3664
3665 err = gk20a_gmmu_alloc_sys(g, 0x1000, &gr->mmu_rd_mem); 3665 err = nvgpu_dma_alloc_sys(g, 0x1000, &gr->mmu_rd_mem);
3666 if (err) 3666 if (err)
3667 goto err_free_wr_mem; 3667 goto err_free_wr_mem;
3668 return 0; 3668 return 0;
3669 3669
3670 err_free_wr_mem: 3670 err_free_wr_mem:
3671 gk20a_gmmu_free(g, &gr->mmu_wr_mem); 3671 nvgpu_dma_free(g, &gr->mmu_wr_mem);
3672 err: 3672 err:
3673 return -ENOMEM; 3673 return -ENOMEM;
3674} 3674}
@@ -5215,7 +5215,7 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g)
5215 } 5215 }
5216 5216
5217 if (!pmu->pg_buf.cpu_va) { 5217 if (!pmu->pg_buf.cpu_va) {
5218 err = gk20a_gmmu_alloc_map_sys(vm, size, &pmu->pg_buf); 5218 err = nvgpu_dma_alloc_map_sys(vm, size, &pmu->pg_buf);
5219 if (err) { 5219 if (err) {
5220 gk20a_err(d, "failed to allocate memory\n"); 5220 gk20a_err(d, "failed to allocate memory\n");
5221 return -ENOMEM; 5221 return -ENOMEM;