summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-11-16 15:56:53 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-11-17 19:17:20 -0500
commit35ae4194a05d47aa6d79353428f81f2ca47ce90f (patch)
tree73c7f15348e1f5deb411392f41e339572b797bb4 /drivers/gpu/nvgpu/gk20a/gr_gk20a.c
parentb42fb7ba26b565f93118fbdd9e17b42ee6144c5e (diff)
gpu: nvgpu: Add translation for NVGPU MM flags
Add a translation layer to convert from the NVGPU_AS_* flags to to new set of NVGPU_VM_MAP_* and NVGPU_VM_AREA_ALLOC_* flags. This allows the common MM code to not depend on the UAPI header defined for Linux. In addition to this change a couple of other small changes were made: 1. Deprecate, print a warning, and ignore usage of the NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS flag. 2. Move the t19x IO coherence flag from the t19x UAPI header to the regular UAPI header. JIRA NVGPU-293 Change-Id: I146402b0e8617294374e63e78f8826c57cd3b291 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1599802 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gr_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index fc008169..03e1d567 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -1728,7 +1728,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1728 pm_ctx->mem.gpu_va = nvgpu_gmmu_map(c->vm, 1728 pm_ctx->mem.gpu_va = nvgpu_gmmu_map(c->vm,
1729 &pm_ctx->mem, 1729 &pm_ctx->mem,
1730 pm_ctx->mem.size, 1730 pm_ctx->mem.size,
1731 NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE, 1731 NVGPU_VM_MAP_CACHEABLE,
1732 gk20a_mem_flag_none, true, 1732 gk20a_mem_flag_none, true,
1733 pm_ctx->mem.aperture); 1733 pm_ctx->mem.aperture);
1734 if (!pm_ctx->mem.gpu_va) { 1734 if (!pm_ctx->mem.gpu_va) {
@@ -2623,7 +2623,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2623 } 2623 }
2624 2624
2625 gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, 2625 gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size,
2626 NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE, 2626 NVGPU_VM_MAP_CACHEABLE,
2627 gk20a_mem_flag_none, true, mem->aperture); 2627 gk20a_mem_flag_none, true, mem->aperture);
2628 if (!gpu_va) 2628 if (!gpu_va)
2629 goto clean_up; 2629 goto clean_up;
@@ -2641,7 +2641,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2641 } 2641 }
2642 2642
2643 gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, 2643 gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size,
2644 NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE, 2644 NVGPU_VM_MAP_CACHEABLE,
2645 gk20a_mem_flag_none, false, mem->aperture); 2645 gk20a_mem_flag_none, false, mem->aperture);
2646 if (!gpu_va) 2646 if (!gpu_va)
2647 goto clean_up; 2647 goto clean_up;
@@ -2659,7 +2659,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2659 } 2659 }
2660 2660
2661 gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, 2661 gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size,
2662 NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE, 2662 NVGPU_VM_MAP_CACHEABLE,
2663 gk20a_mem_flag_none, true, mem->aperture); 2663 gk20a_mem_flag_none, true, mem->aperture);
2664 if (!gpu_va) 2664 if (!gpu_va)
2665 goto clean_up; 2665 goto clean_up;