summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-04-10 17:04:15 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-05-11 09:04:12 -0400
commitc3fa78b1d9cba28547ca59154207d434931ae746 (patch)
tree42117714f2d8dd217229e6c183d4b6affd29c7d1 /drivers/gpu/nvgpu/gk20a/gr_gk20a.c
parent36c1fdccc994d337fc15dd2b67ff05435f37dec9 (diff)
gpu: nvgpu: Separate GMMU out of mm_gk20a.c
Begin moving (and renaming) the GMMU code into common/mm/gmmu.c. This block of code will be responsible for handling the platform/OS independent GMMU operations. JIRA NVGPU-12 JIRA NVGPU-30 Change-Id: Ide761bab75e5d84be3dcb977c4842ae4b3a7c1b3 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1464083 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gr_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c55
1 files changed, 26 insertions, 29 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 77a947de..2f52fdcf 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -21,6 +21,7 @@
21 21
22#include <nvgpu/dma.h> 22#include <nvgpu/dma.h>
23#include <nvgpu/kmem.h> 23#include <nvgpu/kmem.h>
24#include <nvgpu/gmmu.h>
24#include <nvgpu/timers.h> 25#include <nvgpu/timers.h>
25#include <nvgpu/nvgpu_common.h> 26#include <nvgpu/nvgpu_common.h>
26#include <nvgpu/log.h> 27#include <nvgpu/log.h>
@@ -1946,8 +1947,8 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1946 return ret; 1947 return ret;
1947 } 1948 }
1948 1949
1949 pm_ctx->mem.gpu_va = gk20a_gmmu_map(c->vm, 1950 pm_ctx->mem.gpu_va = nvgpu_gmmu_map(c->vm,
1950 &pm_ctx->mem.priv.sgt, 1951 &pm_ctx->mem,
1951 pm_ctx->mem.size, 1952 pm_ctx->mem.size,
1952 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 1953 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
1953 gk20a_mem_flag_none, true, 1954 gk20a_mem_flag_none, true,
@@ -2013,8 +2014,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
2013clean_up_mem: 2014clean_up_mem:
2014 nvgpu_mem_end(g, gr_mem); 2015 nvgpu_mem_end(g, gr_mem);
2015cleanup_pm_buf: 2016cleanup_pm_buf:
2016 gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size, 2017 nvgpu_gmmu_unmap(c->vm, &pm_ctx->mem, pm_ctx->mem.gpu_va);
2017 gk20a_mem_flag_none);
2018 nvgpu_dma_free(g, &pm_ctx->mem); 2018 nvgpu_dma_free(g, &pm_ctx->mem);
2019 memset(&pm_ctx->mem, 0, sizeof(struct nvgpu_mem)); 2019 memset(&pm_ctx->mem, 0, sizeof(struct nvgpu_mem));
2020 2020
@@ -2198,8 +2198,8 @@ static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g)
2198 g->ops.mm.init_inst_block(&ucode_info->inst_blk_desc, vm, 0); 2198 g->ops.mm.init_inst_block(&ucode_info->inst_blk_desc, vm, 0);
2199 2199
2200 /* Map ucode surface to GMMU */ 2200 /* Map ucode surface to GMMU */
2201 ucode_info->surface_desc.gpu_va = gk20a_gmmu_map(vm, 2201 ucode_info->surface_desc.gpu_va = nvgpu_gmmu_map(vm,
2202 &ucode_info->surface_desc.priv.sgt, 2202 &ucode_info->surface_desc,
2203 ucode_info->surface_desc.size, 2203 ucode_info->surface_desc.size,
2204 0, /* flags */ 2204 0, /* flags */
2205 gk20a_mem_flag_read_only, 2205 gk20a_mem_flag_read_only,
@@ -2331,10 +2331,10 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g)
2331 2331
2332 return 0; 2332 return 0;
2333 2333
2334 clean_up: 2334clean_up:
2335 if (ucode_info->surface_desc.gpu_va) 2335 if (ucode_info->surface_desc.gpu_va)
2336 gk20a_gmmu_unmap(vm, ucode_info->surface_desc.gpu_va, 2336 nvgpu_gmmu_unmap(vm, &ucode_info->surface_desc,
2337 ucode_info->surface_desc.size, gk20a_mem_flag_none); 2337 ucode_info->surface_desc.gpu_va);
2338 nvgpu_dma_free(g, &ucode_info->surface_desc); 2338 nvgpu_dma_free(g, &ucode_info->surface_desc);
2339 2339
2340 nvgpu_release_firmware(g, gpccs_fw); 2340 nvgpu_release_firmware(g, gpccs_fw);
@@ -2824,7 +2824,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2824 mem = &gr->global_ctx_buffer[CIRCULAR_VPR].mem; 2824 mem = &gr->global_ctx_buffer[CIRCULAR_VPR].mem;
2825 } 2825 }
2826 2826
2827 gpu_va = gk20a_gmmu_map(ch_vm, &mem->priv.sgt, mem->size, 2827 gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size,
2828 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 2828 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
2829 gk20a_mem_flag_none, true, mem->aperture); 2829 gk20a_mem_flag_none, true, mem->aperture);
2830 if (!gpu_va) 2830 if (!gpu_va)
@@ -2840,7 +2840,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2840 mem = &gr->global_ctx_buffer[ATTRIBUTE_VPR].mem; 2840 mem = &gr->global_ctx_buffer[ATTRIBUTE_VPR].mem;
2841 } 2841 }
2842 2842
2843 gpu_va = gk20a_gmmu_map(ch_vm, &mem->priv.sgt, mem->size, 2843 gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size,
2844 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 2844 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
2845 gk20a_mem_flag_none, false, mem->aperture); 2845 gk20a_mem_flag_none, false, mem->aperture);
2846 if (!gpu_va) 2846 if (!gpu_va)
@@ -2856,7 +2856,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2856 mem = &gr->global_ctx_buffer[PAGEPOOL_VPR].mem; 2856 mem = &gr->global_ctx_buffer[PAGEPOOL_VPR].mem;
2857 } 2857 }
2858 2858
2859 gpu_va = gk20a_gmmu_map(ch_vm, &mem->priv.sgt, mem->size, 2859 gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size,
2860 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 2860 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
2861 gk20a_mem_flag_none, true, mem->aperture); 2861 gk20a_mem_flag_none, true, mem->aperture);
2862 if (!gpu_va) 2862 if (!gpu_va)
@@ -2866,7 +2866,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2866 2866
2867 /* Golden Image */ 2867 /* Golden Image */
2868 mem = &gr->global_ctx_buffer[GOLDEN_CTX].mem; 2868 mem = &gr->global_ctx_buffer[GOLDEN_CTX].mem;
2869 gpu_va = gk20a_gmmu_map(ch_vm, &mem->priv.sgt, mem->size, 0, 2869 gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, 0,
2870 gk20a_mem_flag_none, true, mem->aperture); 2870 gk20a_mem_flag_none, true, mem->aperture);
2871 if (!gpu_va) 2871 if (!gpu_va)
2872 goto clean_up; 2872 goto clean_up;
@@ -2875,7 +2875,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2875 2875
2876 /* Priv register Access Map */ 2876 /* Priv register Access Map */
2877 mem = &gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem; 2877 mem = &gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem;
2878 gpu_va = gk20a_gmmu_map(ch_vm, &mem->priv.sgt, mem->size, 0, 2878 gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, 0,
2879 gk20a_mem_flag_none, true, mem->aperture); 2879 gk20a_mem_flag_none, true, mem->aperture);
2880 if (!gpu_va) 2880 if (!gpu_va)
2881 goto clean_up; 2881 goto clean_up;
@@ -2885,12 +2885,11 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2885 c->ch_ctx.global_ctx_buffer_mapped = true; 2885 c->ch_ctx.global_ctx_buffer_mapped = true;
2886 return 0; 2886 return 0;
2887 2887
2888 clean_up: 2888clean_up:
2889 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { 2889 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
2890 if (g_bfr_va[i]) { 2890 if (g_bfr_va[i]) {
2891 gk20a_gmmu_unmap(ch_vm, g_bfr_va[i], 2891 nvgpu_gmmu_unmap(ch_vm, &gr->global_ctx_buffer[i].mem,
2892 gr->global_ctx_buffer[i].mem.size, 2892 g_bfr_va[i]);
2893 gk20a_mem_flag_none);
2894 g_bfr_va[i] = 0; 2893 g_bfr_va[i] = 0;
2895 } 2894 }
2896 } 2895 }
@@ -2900,6 +2899,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2900static void gr_gk20a_unmap_global_ctx_buffers(struct channel_gk20a *c) 2899static void gr_gk20a_unmap_global_ctx_buffers(struct channel_gk20a *c)
2901{ 2900{
2902 struct vm_gk20a *ch_vm = c->vm; 2901 struct vm_gk20a *ch_vm = c->vm;
2902 struct gr_gk20a *gr = &c->g->gr;
2903 u64 *g_bfr_va = c->ch_ctx.global_ctx_buffer_va; 2903 u64 *g_bfr_va = c->ch_ctx.global_ctx_buffer_va;
2904 u64 *g_bfr_size = c->ch_ctx.global_ctx_buffer_size; 2904 u64 *g_bfr_size = c->ch_ctx.global_ctx_buffer_size;
2905 u32 i; 2905 u32 i;
@@ -2908,9 +2908,8 @@ static void gr_gk20a_unmap_global_ctx_buffers(struct channel_gk20a *c)
2908 2908
2909 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { 2909 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
2910 if (g_bfr_va[i]) { 2910 if (g_bfr_va[i]) {
2911 gk20a_gmmu_unmap(ch_vm, g_bfr_va[i], 2911 nvgpu_gmmu_unmap(ch_vm, &gr->global_ctx_buffer[i].mem,
2912 g_bfr_size[i], 2912 g_bfr_va[i]);
2913 gk20a_mem_flag_none);
2914 g_bfr_va[i] = 0; 2913 g_bfr_va[i] = 0;
2915 g_bfr_size[i] = 0; 2914 g_bfr_size[i] = 0;
2916 } 2915 }
@@ -2946,8 +2945,8 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
2946 if (err) 2945 if (err)
2947 goto err_free_ctx; 2946 goto err_free_ctx;
2948 2947
2949 gr_ctx->mem.gpu_va = gk20a_gmmu_map(vm, 2948 gr_ctx->mem.gpu_va = nvgpu_gmmu_map(vm,
2950 &gr_ctx->mem.priv.sgt, 2949 &gr_ctx->mem,
2951 gr_ctx->mem.size, 2950 gr_ctx->mem.size,
2952 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_FALSE, 2951 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_FALSE,
2953 gk20a_mem_flag_none, true, 2952 gk20a_mem_flag_none, true,
@@ -3007,8 +3006,7 @@ void gr_gk20a_free_gr_ctx(struct gk20a *g,
3007 if (!gr_ctx || !gr_ctx->mem.gpu_va) 3006 if (!gr_ctx || !gr_ctx->mem.gpu_va)
3008 return; 3007 return;
3009 3008
3010 gk20a_gmmu_unmap(vm, gr_ctx->mem.gpu_va, 3009 nvgpu_gmmu_unmap(vm, &gr_ctx->mem, gr_ctx->mem.gpu_va);
3011 gr_ctx->mem.size, gk20a_mem_flag_none);
3012 nvgpu_dma_free(g, &gr_ctx->mem); 3010 nvgpu_dma_free(g, &gr_ctx->mem);
3013 nvgpu_kfree(g, gr_ctx); 3011 nvgpu_kfree(g, gr_ctx);
3014} 3012}
@@ -3055,8 +3053,8 @@ static void gr_gk20a_free_channel_patch_ctx(struct channel_gk20a *c)
3055 gk20a_dbg_fn(""); 3053 gk20a_dbg_fn("");
3056 3054
3057 if (patch_ctx->mem.gpu_va) 3055 if (patch_ctx->mem.gpu_va)
3058 gk20a_gmmu_unmap(c->vm, patch_ctx->mem.gpu_va, 3056 nvgpu_gmmu_unmap(c->vm, &patch_ctx->mem,
3059 patch_ctx->mem.size, gk20a_mem_flag_none); 3057 patch_ctx->mem.gpu_va);
3060 3058
3061 nvgpu_dma_free(g, &patch_ctx->mem); 3059 nvgpu_dma_free(g, &patch_ctx->mem);
3062 patch_ctx->data_count = 0; 3060 patch_ctx->data_count = 0;
@@ -3070,8 +3068,7 @@ static void gr_gk20a_free_channel_pm_ctx(struct channel_gk20a *c)
3070 gk20a_dbg_fn(""); 3068 gk20a_dbg_fn("");
3071 3069
3072 if (pm_ctx->mem.gpu_va) { 3070 if (pm_ctx->mem.gpu_va) {
3073 gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, 3071 nvgpu_gmmu_unmap(c->vm, &pm_ctx->mem, pm_ctx->mem.gpu_va);
3074 pm_ctx->mem.size, gk20a_mem_flag_none);
3075 3072
3076 nvgpu_dma_free(g, &pm_ctx->mem); 3073 nvgpu_dma_free(g, &pm_ctx->mem);
3077 } 3074 }