diff options
author | Konsta Holtta <kholtta@nvidia.com> | 2016-07-07 03:36:46 -0400 |
---|---|---|
committer | Terje Bergstrom <tbergstrom@nvidia.com> | 2016-07-08 07:19:04 -0400 |
commit | d33fb5a9645ef95a97fcf6b3f8f10d9a812ade6d (patch) | |
tree | cfd53a1fb88e044b8b7004ff9b32f1093efd4f1c /drivers/gpu/nvgpu/gk20a/gr_gk20a.c | |
parent | e27c72446bf09196d6d66f28389f00565273a13f (diff) |
gpu: nvgpu: use vidmem by default in gmmu_alloc variants
For devices that have vidmem available, use the vidmem allocator in
gk20a_gmmu_alloc{,attr,_map,_map_attr}. For others, use sysmem.
Because all of the buffers haven't been tested to work in vidmem yet,
rename calls to gk20a_gmmu_alloc{,attr,_map,_map_attr} to have _sys at
the end to declare explicitly that vidmem is used. Enabling vidmem for
each now is a matter of removing "_sys" from the function call.
Jira DNVGPU-18
Change-Id: Ibe42f67eff2c2b68c36582e978ace419dc815dc5
Signed-off-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-on: http://git-master/r/1176805
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gr_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gr_gk20a.c | 21 |
1 files changed, 12 insertions, 9 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index 0d97e84c..c5b2ba5c 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c | |||
@@ -1810,7 +1810,8 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, | |||
1810 | if (enable_hwpm_ctxsw) { | 1810 | if (enable_hwpm_ctxsw) { |
1811 | /* Allocate buffer if necessary */ | 1811 | /* Allocate buffer if necessary */ |
1812 | if (pm_ctx->mem.gpu_va == 0) { | 1812 | if (pm_ctx->mem.gpu_va == 0) { |
1813 | ret = gk20a_gmmu_alloc_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, | 1813 | ret = gk20a_gmmu_alloc_attr_sys(g, |
1814 | DMA_ATTR_NO_KERNEL_MAPPING, | ||
1814 | g->gr.ctx_vars.pm_ctxsw_image_size, | 1815 | g->gr.ctx_vars.pm_ctxsw_image_size, |
1815 | &pm_ctx->mem); | 1816 | &pm_ctx->mem); |
1816 | if (ret) { | 1817 | if (ret) { |
@@ -2148,7 +2149,7 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g) | |||
2148 | g->gr.ctx_vars.ucode.gpccs.inst.count * sizeof(u32), | 2149 | g->gr.ctx_vars.ucode.gpccs.inst.count * sizeof(u32), |
2149 | g->gr.ctx_vars.ucode.gpccs.data.count * sizeof(u32)); | 2150 | g->gr.ctx_vars.ucode.gpccs.data.count * sizeof(u32)); |
2150 | 2151 | ||
2151 | err = gk20a_gmmu_alloc(g, ucode_size, &ucode_info->surface_desc); | 2152 | err = gk20a_gmmu_alloc_sys(g, ucode_size, &ucode_info->surface_desc); |
2152 | if (err) | 2153 | if (err) |
2153 | goto clean_up; | 2154 | goto clean_up; |
2154 | 2155 | ||
@@ -2535,7 +2536,7 @@ static int gk20a_gr_alloc_ctx_buffer(struct gk20a *g, | |||
2535 | { | 2536 | { |
2536 | int err = 0; | 2537 | int err = 0; |
2537 | 2538 | ||
2538 | err = gk20a_gmmu_alloc_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, | 2539 | err = gk20a_gmmu_alloc_attr_sys(g, DMA_ATTR_NO_KERNEL_MAPPING, |
2539 | size, &desc->mem); | 2540 | size, &desc->mem); |
2540 | if (err) | 2541 | if (err) |
2541 | return err; | 2542 | return err; |
@@ -2778,13 +2779,15 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g, | |||
2778 | if (!gr_ctx) | 2779 | if (!gr_ctx) |
2779 | return -ENOMEM; | 2780 | return -ENOMEM; |
2780 | 2781 | ||
2781 | err = gk20a_gmmu_alloc_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, | 2782 | err = gk20a_gmmu_alloc_attr_sys(g, DMA_ATTR_NO_KERNEL_MAPPING, |
2782 | gr->ctx_vars.buffer_total_size, | 2783 | gr->ctx_vars.buffer_total_size, |
2783 | &gr_ctx->mem); | 2784 | &gr_ctx->mem); |
2784 | if (err) | 2785 | if (err) |
2785 | goto err_free_ctx; | 2786 | goto err_free_ctx; |
2786 | 2787 | ||
2787 | gr_ctx->mem.gpu_va = gk20a_gmmu_map(vm, &gr_ctx->mem.sgt, gr_ctx->mem.size, | 2788 | gr_ctx->mem.gpu_va = gk20a_gmmu_map(vm, |
2789 | &gr_ctx->mem.sgt, | ||
2790 | gr_ctx->mem.size, | ||
2788 | NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, | 2791 | NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, |
2789 | gk20a_mem_flag_none, true, | 2792 | gk20a_mem_flag_none, true, |
2790 | gr_ctx->mem.aperture); | 2793 | gr_ctx->mem.aperture); |
@@ -2874,7 +2877,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g, | |||
2874 | 2877 | ||
2875 | gk20a_dbg_fn(""); | 2878 | gk20a_dbg_fn(""); |
2876 | 2879 | ||
2877 | err = gk20a_gmmu_alloc_map_attr(ch_vm, DMA_ATTR_NO_KERNEL_MAPPING, | 2880 | err = gk20a_gmmu_alloc_map_attr_sys(ch_vm, DMA_ATTR_NO_KERNEL_MAPPING, |
2878 | 128 * sizeof(u32), &patch_ctx->mem); | 2881 | 128 * sizeof(u32), &patch_ctx->mem); |
2879 | if (err) | 2882 | if (err) |
2880 | return err; | 2883 | return err; |
@@ -3486,11 +3489,11 @@ static int gr_gk20a_init_mmu_sw(struct gk20a *g, struct gr_gk20a *gr) | |||
3486 | { | 3489 | { |
3487 | int err; | 3490 | int err; |
3488 | 3491 | ||
3489 | err = gk20a_gmmu_alloc(g, 0x1000, &gr->mmu_wr_mem); | 3492 | err = gk20a_gmmu_alloc_sys(g, 0x1000, &gr->mmu_wr_mem); |
3490 | if (err) | 3493 | if (err) |
3491 | goto err; | 3494 | goto err; |
3492 | 3495 | ||
3493 | err = gk20a_gmmu_alloc(g, 0x1000, &gr->mmu_rd_mem); | 3496 | err = gk20a_gmmu_alloc_sys(g, 0x1000, &gr->mmu_rd_mem); |
3494 | if (err) | 3497 | if (err) |
3495 | goto err_free_wr_mem; | 3498 | goto err_free_wr_mem; |
3496 | return 0; | 3499 | return 0; |
@@ -4945,7 +4948,7 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g) | |||
4945 | } | 4948 | } |
4946 | 4949 | ||
4947 | if (!pmu->pg_buf.cpu_va) { | 4950 | if (!pmu->pg_buf.cpu_va) { |
4948 | err = gk20a_gmmu_alloc_map(vm, size, &pmu->pg_buf); | 4951 | err = gk20a_gmmu_alloc_map_sys(vm, size, &pmu->pg_buf); |
4949 | if (err) { | 4952 | if (err) { |
4950 | gk20a_err(d, "failed to allocate memory\n"); | 4953 | gk20a_err(d, "failed to allocate memory\n"); |
4951 | return -ENOMEM; | 4954 | return -ENOMEM; |