summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
diff options
context:
space:
mode:
authorKonsta Holtta <kholtta@nvidia.com>2017-03-14 07:47:04 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-03-21 18:01:47 -0400
commit8f3875393e7a6bd0fc03afdb1fa99b7e33b71576 (patch)
tree7ee7b2da741fae7d06eeb367db2b14d8f78f0f55 /drivers/gpu/nvgpu/gk20a/gr_gk20a.c
parent79658ac5cb22cc68a2d24d964379a606086c8b39 (diff)
gpu: nvgpu: abstract away dma alloc attrs
Don't use enum dma_attr in the gk20a_gmmu_alloc_attr* functions, but define nvgpu-internal flags for no kernel mapping, force contiguous, and read only modes. Store the flags in the allocated struct mem_desc and only use gk20a_gmmu_free, remove gk20a_gmmu_free_attr. This helps in OS abstraction. Rename the notion of attr to flags. Add implicit NVGPU_DMA_NO_KERNEL_MAPPING to all vidmem buffers allocated via gk20a_gmmu_alloc_vid for consistency. Fix a bug in gk20a_gmmu_alloc_map_attr that dropped the attr parameter accidentally. Bug 1853519 Change-Id: I1ff67dff9fc425457ae445ce4976a780eb4dcc9f Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: http://git-master/r/1321101 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gr_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c28
1 files changed, 13 insertions, 15 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 2451786b..5a76822f 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -1936,8 +1936,8 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1936 if (enable_hwpm_ctxsw) { 1936 if (enable_hwpm_ctxsw) {
1937 /* Allocate buffer if necessary */ 1937 /* Allocate buffer if necessary */
1938 if (pm_ctx->mem.gpu_va == 0) { 1938 if (pm_ctx->mem.gpu_va == 0) {
1939 ret = gk20a_gmmu_alloc_attr_sys(g, 1939 ret = gk20a_gmmu_alloc_flags_sys(g,
1940 DMA_ATTR_NO_KERNEL_MAPPING, 1940 NVGPU_DMA_NO_KERNEL_MAPPING,
1941 g->gr.ctx_vars.pm_ctxsw_image_size, 1941 g->gr.ctx_vars.pm_ctxsw_image_size,
1942 &pm_ctx->mem); 1942 &pm_ctx->mem);
1943 if (ret) { 1943 if (ret) {
@@ -1956,8 +1956,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1956 if (!pm_ctx->mem.gpu_va) { 1956 if (!pm_ctx->mem.gpu_va) {
1957 gk20a_err(dev_from_gk20a(g), 1957 gk20a_err(dev_from_gk20a(g),
1958 "failed to map pm ctxt buffer"); 1958 "failed to map pm ctxt buffer");
1959 gk20a_gmmu_free_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, 1959 gk20a_gmmu_free(g, &pm_ctx->mem);
1960 &pm_ctx->mem);
1961 c->g->ops.fifo.enable_channel(c); 1960 c->g->ops.fifo.enable_channel(c);
1962 return -ENOMEM; 1961 return -ENOMEM;
1963 } 1962 }
@@ -2017,7 +2016,7 @@ clean_up_mem:
2017cleanup_pm_buf: 2016cleanup_pm_buf:
2018 gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size, 2017 gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size,
2019 gk20a_mem_flag_none); 2018 gk20a_mem_flag_none);
2020 gk20a_gmmu_free_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, &pm_ctx->mem); 2019 gk20a_gmmu_free(g, &pm_ctx->mem);
2021 memset(&pm_ctx->mem, 0, sizeof(struct mem_desc)); 2020 memset(&pm_ctx->mem, 0, sizeof(struct mem_desc));
2022 2021
2023 gk20a_enable_channel_tsg(g, c); 2022 gk20a_enable_channel_tsg(g, c);
@@ -2699,7 +2698,7 @@ static void gk20a_gr_destroy_ctx_buffer(struct gk20a *g,
2699{ 2698{
2700 if (!desc) 2699 if (!desc)
2701 return; 2700 return;
2702 gk20a_gmmu_free_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, &desc->mem); 2701 gk20a_gmmu_free(g, &desc->mem);
2703 desc->destroy = NULL; 2702 desc->destroy = NULL;
2704} 2703}
2705 2704
@@ -2709,7 +2708,7 @@ static int gk20a_gr_alloc_ctx_buffer(struct gk20a *g,
2709{ 2708{
2710 int err = 0; 2709 int err = 0;
2711 2710
2712 err = gk20a_gmmu_alloc_attr_sys(g, DMA_ATTR_NO_KERNEL_MAPPING, 2711 err = gk20a_gmmu_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING,
2713 size, &desc->mem); 2712 size, &desc->mem);
2714 if (err) 2713 if (err)
2715 return err; 2714 return err;
@@ -2952,7 +2951,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
2952 if (!gr_ctx) 2951 if (!gr_ctx)
2953 return -ENOMEM; 2952 return -ENOMEM;
2954 2953
2955 err = gk20a_gmmu_alloc_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, 2954 err = gk20a_gmmu_alloc_flags(g, NVGPU_DMA_NO_KERNEL_MAPPING,
2956 gr->ctx_vars.buffer_total_size, 2955 gr->ctx_vars.buffer_total_size,
2957 &gr_ctx->mem); 2956 &gr_ctx->mem);
2958 if (err) 2957 if (err)
@@ -2972,7 +2971,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
2972 return 0; 2971 return 0;
2973 2972
2974 err_free_mem: 2973 err_free_mem:
2975 gk20a_gmmu_free_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, &gr_ctx->mem); 2974 gk20a_gmmu_free(g, &gr_ctx->mem);
2976 err_free_ctx: 2975 err_free_ctx:
2977 kfree(gr_ctx); 2976 kfree(gr_ctx);
2978 gr_ctx = NULL; 2977 gr_ctx = NULL;
@@ -3021,7 +3020,7 @@ void gr_gk20a_free_gr_ctx(struct gk20a *g,
3021 3020
3022 gk20a_gmmu_unmap(vm, gr_ctx->mem.gpu_va, 3021 gk20a_gmmu_unmap(vm, gr_ctx->mem.gpu_va,
3023 gr_ctx->mem.size, gk20a_mem_flag_none); 3022 gr_ctx->mem.size, gk20a_mem_flag_none);
3024 gk20a_gmmu_free_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, &gr_ctx->mem); 3023 gk20a_gmmu_free(g, &gr_ctx->mem);
3025 kfree(gr_ctx); 3024 kfree(gr_ctx);
3026} 3025}
3027 3026
@@ -3050,7 +3049,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g,
3050 3049
3051 gk20a_dbg_fn(""); 3050 gk20a_dbg_fn("");
3052 3051
3053 err = gk20a_gmmu_alloc_map_attr_sys(ch_vm, DMA_ATTR_NO_KERNEL_MAPPING, 3052 err = gk20a_gmmu_alloc_map_flags_sys(ch_vm, NVGPU_DMA_NO_KERNEL_MAPPING,
3054 128 * sizeof(u32), &patch_ctx->mem); 3053 128 * sizeof(u32), &patch_ctx->mem);
3055 if (err) 3054 if (err)
3056 return err; 3055 return err;
@@ -3070,7 +3069,7 @@ static void gr_gk20a_free_channel_patch_ctx(struct channel_gk20a *c)
3070 gk20a_gmmu_unmap(c->vm, patch_ctx->mem.gpu_va, 3069 gk20a_gmmu_unmap(c->vm, patch_ctx->mem.gpu_va,
3071 patch_ctx->mem.size, gk20a_mem_flag_none); 3070 patch_ctx->mem.size, gk20a_mem_flag_none);
3072 3071
3073 gk20a_gmmu_free_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, &patch_ctx->mem); 3072 gk20a_gmmu_free(g, &patch_ctx->mem);
3074 patch_ctx->data_count = 0; 3073 patch_ctx->data_count = 0;
3075} 3074}
3076 3075
@@ -3085,7 +3084,7 @@ static void gr_gk20a_free_channel_pm_ctx(struct channel_gk20a *c)
3085 gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, 3084 gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va,
3086 pm_ctx->mem.size, gk20a_mem_flag_none); 3085 pm_ctx->mem.size, gk20a_mem_flag_none);
3087 3086
3088 gk20a_gmmu_free_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, &pm_ctx->mem); 3087 gk20a_gmmu_free(g, &pm_ctx->mem);
3089 } 3088 }
3090} 3089}
3091 3090
@@ -3365,8 +3364,7 @@ static void gk20a_remove_gr_support(struct gr_gk20a *gr)
3365 gk20a_gmmu_free(g, &gr->mmu_wr_mem); 3364 gk20a_gmmu_free(g, &gr->mmu_wr_mem);
3366 gk20a_gmmu_free(g, &gr->mmu_rd_mem); 3365 gk20a_gmmu_free(g, &gr->mmu_rd_mem);
3367 3366
3368 gk20a_gmmu_free_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, 3367 gk20a_gmmu_free(g, &gr->compbit_store.mem);
3369 &gr->compbit_store.mem);
3370 3368
3371 memset(&gr->compbit_store, 0, sizeof(struct compbit_store_desc)); 3369 memset(&gr->compbit_store, 0, sizeof(struct compbit_store_desc));
3372 3370