summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a
diff options
context:
space:
mode:
authorKonsta Holtta <kholtta@nvidia.com>2016-06-17 08:56:07 -0400
committerTerje Bergstrom <tbergstrom@nvidia.com>2016-07-06 06:34:23 -0400
commitb8915ab5aabb02866019221c51d96f304658207f (patch)
tree16754368daa70c837dea7be8283cf0c5027b23f3 /drivers/gpu/nvgpu/gk20a
parent67fae6e547ca20c4171e1b3d0ad9d252c2e3c0ee (diff)
gpu: nvgpu: support in-kernel vidmem mappings
Propagate the buffer aperture flag in gk20a_locked_gmmu_map up so that buffers represented as a mem_desc and present in vidmem can be mapped to gpu. JIRA DNVGPU-18 JIRA DNVGPU-76 Change-Id: I46cf87e27229123016727339b9349d5e2c835b3e Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: http://git-master/r/1169308 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r--drivers/gpu/nvgpu/gk20a/cde_gk20a.c3
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h3
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c66
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c38
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h16
-rw-r--r--drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c7
-rw-r--r--drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c6
7 files changed, 79 insertions, 60 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
index 7818f046..02b1938a 100644
--- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
@@ -1215,7 +1215,8 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
1215 g->gr.compbit_store.mem.size, 1215 g->gr.compbit_store.mem.size,
1216 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 1216 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
1217 gk20a_mem_flag_read_only, 1217 gk20a_mem_flag_read_only,
1218 false); 1218 false,
1219 gr->compbit_store.mem.aperture);
1219 1220
1220 if (!vaddr) { 1221 if (!vaddr) {
1221 gk20a_warn(cde_ctx->dev, "cde: cannot map compression bit backing store"); 1222 gk20a_warn(cde_ctx->dev, "cde: cannot map compression bit backing store");
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 45e16ad9..b8a2fc3e 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -492,7 +492,8 @@ struct gpu_ops {
492 bool clear_ctags, 492 bool clear_ctags,
493 bool sparse, 493 bool sparse,
494 bool priv, 494 bool priv,
495 struct vm_gk20a_mapping_batch *batch); 495 struct vm_gk20a_mapping_batch *batch,
496 enum gk20a_aperture aperture);
496 void (*gmmu_unmap)(struct vm_gk20a *vm, 497 void (*gmmu_unmap)(struct vm_gk20a *vm,
497 u64 vaddr, 498 u64 vaddr,
498 u64 size, 499 u64 size,
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index bdc65cab..0d97e84c 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -1824,7 +1824,8 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1824 &pm_ctx->mem.sgt, 1824 &pm_ctx->mem.sgt,
1825 pm_ctx->mem.size, 1825 pm_ctx->mem.size,
1826 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 1826 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
1827 gk20a_mem_flag_none, true); 1827 gk20a_mem_flag_none, true,
1828 pm_ctx->mem.aperture);
1828 if (!pm_ctx->mem.gpu_va) { 1829 if (!pm_ctx->mem.gpu_va) {
1829 gk20a_err(dev_from_gk20a(g), 1830 gk20a_err(dev_from_gk20a(g),
1830 "failed to map pm ctxt buffer"); 1831 "failed to map pm ctxt buffer");
@@ -2046,7 +2047,8 @@ static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g)
2046 ucode_info->surface_desc.size, 2047 ucode_info->surface_desc.size,
2047 0, /* flags */ 2048 0, /* flags */
2048 gk20a_mem_flag_read_only, 2049 gk20a_mem_flag_read_only,
2049 false); 2050 false,
2051 ucode_info->surface_desc.aperture);
2050 if (!ucode_info->surface_desc.gpu_va) { 2052 if (!ucode_info->surface_desc.gpu_va) {
2051 gk20a_err(d, "failed to update gmmu ptes\n"); 2053 gk20a_err(d, "failed to update gmmu ptes\n");
2052 return -ENOMEM; 2054 return -ENOMEM;
@@ -2650,82 +2652,73 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2650 u64 *g_bfr_va = c->ch_ctx.global_ctx_buffer_va; 2652 u64 *g_bfr_va = c->ch_ctx.global_ctx_buffer_va;
2651 u64 *g_bfr_size = c->ch_ctx.global_ctx_buffer_size; 2653 u64 *g_bfr_size = c->ch_ctx.global_ctx_buffer_size;
2652 struct gr_gk20a *gr = &g->gr; 2654 struct gr_gk20a *gr = &g->gr;
2653 struct sg_table *sgt; 2655 struct mem_desc *mem;
2654 u64 size;
2655 u64 gpu_va; 2656 u64 gpu_va;
2656 u32 i; 2657 u32 i;
2657 gk20a_dbg_fn(""); 2658 gk20a_dbg_fn("");
2658 2659
2659 /* Circular Buffer */ 2660 /* Circular Buffer */
2660 if (!c->vpr || (gr->global_ctx_buffer[CIRCULAR_VPR].mem.sgt == NULL)) { 2661 if (!c->vpr || (gr->global_ctx_buffer[CIRCULAR_VPR].mem.sgt == NULL)) {
2661 sgt = gr->global_ctx_buffer[CIRCULAR].mem.sgt; 2662 mem = &gr->global_ctx_buffer[CIRCULAR].mem;
2662 size = gr->global_ctx_buffer[CIRCULAR].mem.size;
2663 } else { 2663 } else {
2664 sgt = gr->global_ctx_buffer[CIRCULAR_VPR].mem.sgt; 2664 mem = &gr->global_ctx_buffer[CIRCULAR_VPR].mem;
2665 size = gr->global_ctx_buffer[CIRCULAR_VPR].mem.size;
2666 } 2665 }
2667 2666
2668 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 2667 gpu_va = gk20a_gmmu_map(ch_vm, &mem->sgt, mem->size,
2669 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 2668 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
2670 gk20a_mem_flag_none, true); 2669 gk20a_mem_flag_none, true, mem->aperture);
2671 if (!gpu_va) 2670 if (!gpu_va)
2672 goto clean_up; 2671 goto clean_up;
2673 g_bfr_va[CIRCULAR_VA] = gpu_va; 2672 g_bfr_va[CIRCULAR_VA] = gpu_va;
2674 g_bfr_size[CIRCULAR_VA] = size; 2673 g_bfr_size[CIRCULAR_VA] = mem->size;
2675 2674
2676 /* Attribute Buffer */ 2675 /* Attribute Buffer */
2677 if (!c->vpr || (gr->global_ctx_buffer[ATTRIBUTE_VPR].mem.sgt == NULL)) { 2676 if (!c->vpr || (gr->global_ctx_buffer[ATTRIBUTE_VPR].mem.sgt == NULL)) {
2678 sgt = gr->global_ctx_buffer[ATTRIBUTE].mem.sgt; 2677 mem = &gr->global_ctx_buffer[ATTRIBUTE].mem;
2679 size = gr->global_ctx_buffer[ATTRIBUTE].mem.size;
2680 } else { 2678 } else {
2681 sgt = gr->global_ctx_buffer[ATTRIBUTE_VPR].mem.sgt; 2679 mem = &gr->global_ctx_buffer[ATTRIBUTE_VPR].mem;
2682 size = gr->global_ctx_buffer[ATTRIBUTE_VPR].mem.size;
2683 } 2680 }
2684 2681
2685 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 2682 gpu_va = gk20a_gmmu_map(ch_vm, &mem->sgt, mem->size,
2686 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 2683 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
2687 gk20a_mem_flag_none, false); 2684 gk20a_mem_flag_none, false, mem->aperture);
2688 if (!gpu_va) 2685 if (!gpu_va)
2689 goto clean_up; 2686 goto clean_up;
2690 g_bfr_va[ATTRIBUTE_VA] = gpu_va; 2687 g_bfr_va[ATTRIBUTE_VA] = gpu_va;
2691 g_bfr_size[ATTRIBUTE_VA] = size; 2688 g_bfr_size[ATTRIBUTE_VA] = mem->size;
2692 2689
2693 /* Page Pool */ 2690 /* Page Pool */
2694 if (!c->vpr || (gr->global_ctx_buffer[PAGEPOOL_VPR].mem.sgt == NULL)) { 2691 if (!c->vpr || (gr->global_ctx_buffer[PAGEPOOL_VPR].mem.sgt == NULL)) {
2695 sgt = gr->global_ctx_buffer[PAGEPOOL].mem.sgt; 2692 mem = &gr->global_ctx_buffer[PAGEPOOL].mem;
2696 size = gr->global_ctx_buffer[PAGEPOOL].mem.size;
2697 } else { 2693 } else {
2698 sgt = gr->global_ctx_buffer[PAGEPOOL_VPR].mem.sgt; 2694 mem = &gr->global_ctx_buffer[PAGEPOOL_VPR].mem;
2699 size = gr->global_ctx_buffer[PAGEPOOL_VPR].mem.size;
2700 } 2695 }
2701 2696
2702 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 2697 gpu_va = gk20a_gmmu_map(ch_vm, &mem->sgt, mem->size,
2703 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 2698 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
2704 gk20a_mem_flag_none, true); 2699 gk20a_mem_flag_none, true, mem->aperture);
2705 if (!gpu_va) 2700 if (!gpu_va)
2706 goto clean_up; 2701 goto clean_up;
2707 g_bfr_va[PAGEPOOL_VA] = gpu_va; 2702 g_bfr_va[PAGEPOOL_VA] = gpu_va;
2708 g_bfr_size[PAGEPOOL_VA] = size; 2703 g_bfr_size[PAGEPOOL_VA] = mem->size;
2709 2704
2710 /* Golden Image */ 2705 /* Golden Image */
2711 sgt = gr->global_ctx_buffer[GOLDEN_CTX].mem.sgt; 2706 mem = &gr->global_ctx_buffer[GOLDEN_CTX].mem;
2712 size = gr->global_ctx_buffer[GOLDEN_CTX].mem.size; 2707 gpu_va = gk20a_gmmu_map(ch_vm, &mem->sgt, mem->size, 0,
2713 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 0, 2708 gk20a_mem_flag_none, true, mem->aperture);
2714 gk20a_mem_flag_none, true);
2715 if (!gpu_va) 2709 if (!gpu_va)
2716 goto clean_up; 2710 goto clean_up;
2717 g_bfr_va[GOLDEN_CTX_VA] = gpu_va; 2711 g_bfr_va[GOLDEN_CTX_VA] = gpu_va;
2718 g_bfr_size[GOLDEN_CTX_VA] = size; 2712 g_bfr_size[GOLDEN_CTX_VA] = mem->size;
2719 2713
2720 /* Priv register Access Map */ 2714 /* Priv register Access Map */
2721 sgt = gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.sgt; 2715 mem = &gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem;
2722 size = gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size; 2716 gpu_va = gk20a_gmmu_map(ch_vm, &mem->sgt, mem->size, 0,
2723 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 0, 2717 gk20a_mem_flag_none, true, mem->aperture);
2724 gk20a_mem_flag_none, true);
2725 if (!gpu_va) 2718 if (!gpu_va)
2726 goto clean_up; 2719 goto clean_up;
2727 g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va; 2720 g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va;
2728 g_bfr_size[PRIV_ACCESS_MAP_VA] = size; 2721 g_bfr_size[PRIV_ACCESS_MAP_VA] = mem->size;
2729 2722
2730 c->ch_ctx.global_ctx_buffer_mapped = true; 2723 c->ch_ctx.global_ctx_buffer_mapped = true;
2731 return 0; 2724 return 0;
@@ -2793,7 +2786,8 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
2793 2786
2794 gr_ctx->mem.gpu_va = gk20a_gmmu_map(vm, &gr_ctx->mem.sgt, gr_ctx->mem.size, 2787 gr_ctx->mem.gpu_va = gk20a_gmmu_map(vm, &gr_ctx->mem.sgt, gr_ctx->mem.size,
2795 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 2788 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
2796 gk20a_mem_flag_none, true); 2789 gk20a_mem_flag_none, true,
2790 gr_ctx->mem.aperture);
2797 if (!gr_ctx->mem.gpu_va) 2791 if (!gr_ctx->mem.gpu_va)
2798 goto err_free_mem; 2792 goto err_free_mem;
2799 2793
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 6fdfacdd..bb32749d 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -1594,7 +1594,8 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
1594 bool clear_ctags, 1594 bool clear_ctags,
1595 bool sparse, 1595 bool sparse,
1596 bool priv, 1596 bool priv,
1597 struct vm_gk20a_mapping_batch *batch) 1597 struct vm_gk20a_mapping_batch *batch,
1598 enum gk20a_aperture aperture)
1598{ 1599{
1599 int err = 0; 1600 int err = 0;
1600 bool allocated = false; 1601 bool allocated = false;
@@ -1642,7 +1643,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
1642 rw_flag, 1643 rw_flag,
1643 sparse, 1644 sparse,
1644 priv, 1645 priv,
1645 APERTURE_SYSMEM); /* no vidmem bufs yet */ 1646 aperture);
1646 if (err) { 1647 if (err) {
1647 gk20a_err(d, "failed to update ptes on map"); 1648 gk20a_err(d, "failed to update ptes on map");
1648 goto fail_validate; 1649 goto fail_validate;
@@ -1998,7 +1999,8 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
1998 clear_ctags, 1999 clear_ctags,
1999 false, 2000 false,
2000 false, 2001 false,
2001 batch); 2002 batch,
2003 APERTURE_SYSMEM); /* no vidmem yet */
2002 if (!map_offset) 2004 if (!map_offset)
2003 goto clean_up; 2005 goto clean_up;
2004 2006
@@ -2256,7 +2258,8 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
2256 false, /* clear_ctags */ 2258 false, /* clear_ctags */
2257 false, /* sparse */ 2259 false, /* sparse */
2258 false, /* priv */ 2260 false, /* priv */
2259 NULL); /* mapping_batch handle */ 2261 NULL, /* mapping_batch handle */
2262 g->gr.compbit_store.mem.aperture);
2260 2263
2261 if (!mapped_buffer->ctag_map_win_addr) { 2264 if (!mapped_buffer->ctag_map_win_addr) {
2262 mutex_unlock(&vm->update_gmmu_lock); 2265 mutex_unlock(&vm->update_gmmu_lock);
@@ -2295,7 +2298,8 @@ static u64 __gk20a_gmmu_map(struct vm_gk20a *vm,
2295 u64 size, 2298 u64 size,
2296 u32 flags, 2299 u32 flags,
2297 int rw_flag, 2300 int rw_flag,
2298 bool priv) 2301 bool priv,
2302 enum gk20a_aperture aperture)
2299{ 2303{
2300 struct gk20a *g = gk20a_from_vm(vm); 2304 struct gk20a *g = gk20a_from_vm(vm);
2301 u64 vaddr; 2305 u64 vaddr;
@@ -2312,7 +2316,8 @@ static u64 __gk20a_gmmu_map(struct vm_gk20a *vm,
2312 false, /* clear_ctags */ 2316 false, /* clear_ctags */
2313 false, /* sparse */ 2317 false, /* sparse */
2314 priv, /* priv */ 2318 priv, /* priv */
2315 NULL); /* mapping_batch handle */ 2319 NULL, /* mapping_batch handle */
2320 aperture);
2316 mutex_unlock(&vm->update_gmmu_lock); 2321 mutex_unlock(&vm->update_gmmu_lock);
2317 if (!vaddr) { 2322 if (!vaddr) {
2318 gk20a_err(dev_from_vm(vm), "failed to allocate va space"); 2323 gk20a_err(dev_from_vm(vm), "failed to allocate va space");
@@ -2327,9 +2332,11 @@ u64 gk20a_gmmu_map(struct vm_gk20a *vm,
2327 u64 size, 2332 u64 size,
2328 u32 flags, 2333 u32 flags,
2329 int rw_flag, 2334 int rw_flag,
2330 bool priv) 2335 bool priv,
2336 enum gk20a_aperture aperture)
2331{ 2337{
2332 return __gk20a_gmmu_map(vm, sgt, 0, size, flags, rw_flag, priv); 2338 return __gk20a_gmmu_map(vm, sgt, 0, size, flags, rw_flag, priv,
2339 aperture);
2333} 2340}
2334 2341
2335/* 2342/*
@@ -2341,9 +2348,11 @@ u64 gk20a_gmmu_fixed_map(struct vm_gk20a *vm,
2341 u64 size, 2348 u64 size,
2342 u32 flags, 2349 u32 flags,
2343 int rw_flag, 2350 int rw_flag,
2344 bool priv) 2351 bool priv,
2352 enum gk20a_aperture aperture)
2345{ 2353{
2346 return __gk20a_gmmu_map(vm, sgt, addr, size, flags, rw_flag, priv); 2354 return __gk20a_gmmu_map(vm, sgt, addr, size, flags, rw_flag, priv,
2355 aperture);
2347} 2356}
2348 2357
2349int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct mem_desc *mem) 2358int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct mem_desc *mem)
@@ -2599,7 +2608,8 @@ int gk20a_gmmu_alloc_map_attr(struct vm_gk20a *vm,
2599 return err; 2608 return err;
2600 2609
2601 mem->gpu_va = gk20a_gmmu_map(vm, &mem->sgt, size, 0, 2610 mem->gpu_va = gk20a_gmmu_map(vm, &mem->sgt, size, 0,
2602 gk20a_mem_flag_none, false); 2611 gk20a_mem_flag_none, false,
2612 mem->aperture);
2603 if (!mem->gpu_va) { 2613 if (!mem->gpu_va) {
2604 err = -ENOMEM; 2614 err = -ENOMEM;
2605 goto fail_free; 2615 goto fail_free;
@@ -2626,7 +2636,8 @@ int gk20a_gmmu_alloc_map_attr_vid(struct vm_gk20a *vm,
2626 return err; 2636 return err;
2627 2637
2628 mem->gpu_va = gk20a_gmmu_map(vm, &mem->sgt, size, 0, 2638 mem->gpu_va = gk20a_gmmu_map(vm, &mem->sgt, size, 0,
2629 gk20a_mem_flag_none, false); 2639 gk20a_mem_flag_none, false,
2640 mem->aperture);
2630 if (!mem->gpu_va) { 2641 if (!mem->gpu_va) {
2631 err = -ENOMEM; 2642 err = -ENOMEM;
2632 goto fail_free; 2643 goto fail_free;
@@ -3727,7 +3738,8 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
3727 false, 3738 false,
3728 true, 3739 true,
3729 false, 3740 false,
3730 NULL); 3741 NULL,
3742 APERTURE_INVALID);
3731 if (!map_offset) { 3743 if (!map_offset) {
3732 mutex_unlock(&vm->update_gmmu_lock); 3744 mutex_unlock(&vm->update_gmmu_lock);
3733 gk20a_bfree(vma, vaddr_start); 3745 gk20a_bfree(vma, vaddr_start);
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index a697e520..f87ba605 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -40,8 +40,13 @@
40 outer_flush_range(pa, pa + (size_t)(size)); \ 40 outer_flush_range(pa, pa + (size_t)(size)); \
41 } while (0) 41 } while (0)
42 42
43/*
44 * Real location of a buffer - gk20a_aperture_mask() will deduce what will be
45 * told to the gpu about the aperture, but this flag designates where the
46 * memory actually was allocated from.
47 */
43enum gk20a_aperture { 48enum gk20a_aperture {
44 APERTURE_INVALID, /* e.g., unallocated */ 49 APERTURE_INVALID, /* unallocated or N/A */
45 APERTURE_SYSMEM, 50 APERTURE_SYSMEM,
46 APERTURE_VIDMEM 51 APERTURE_VIDMEM
47}; 52};
@@ -520,14 +525,16 @@ u64 gk20a_gmmu_map(struct vm_gk20a *vm,
520 u64 size, 525 u64 size,
521 u32 flags, 526 u32 flags,
522 int rw_flag, 527 int rw_flag,
523 bool priv); 528 bool priv,
529 enum gk20a_aperture aperture);
524u64 gk20a_gmmu_fixed_map(struct vm_gk20a *vm, 530u64 gk20a_gmmu_fixed_map(struct vm_gk20a *vm,
525 struct sg_table **sgt, 531 struct sg_table **sgt,
526 u64 addr, 532 u64 addr,
527 u64 size, 533 u64 size,
528 u32 flags, 534 u32 flags,
529 int rw_flag, 535 int rw_flag,
530 bool priv); 536 bool priv,
537 enum gk20a_aperture aperture);
531 538
532int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, 539int gk20a_gmmu_alloc_map(struct vm_gk20a *vm,
533 size_t size, 540 size_t size,
@@ -619,7 +626,8 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
619 bool clear_ctags, 626 bool clear_ctags,
620 bool sparse, 627 bool sparse,
621 bool priv, 628 bool priv,
622 struct vm_gk20a_mapping_batch *batch); 629 struct vm_gk20a_mapping_batch *batch,
630 enum gk20a_aperture aperture);
623 631
624void gk20a_gmmu_unmap(struct vm_gk20a *vm, 632void gk20a_gmmu_unmap(struct vm_gk20a *vm,
625 u64 vaddr, 633 u64 vaddr,
diff --git a/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c b/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c
index b8f70ab3..25f9a8dd 100644
--- a/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c
+++ b/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c
@@ -130,9 +130,6 @@ int gk20a_tegra_secure_alloc(struct device *dev,
130 if (dma_mapping_error(&tegra_vpr_dev, iova)) 130 if (dma_mapping_error(&tegra_vpr_dev, iova))
131 return -ENOMEM; 131 return -ENOMEM;
132 132
133 desc->mem.size = size;
134 desc->destroy = gk20a_tegra_secure_destroy;
135
136 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 133 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
137 if (!sgt) { 134 if (!sgt) {
138 gk20a_err(dev, "failed to allocate memory\n"); 135 gk20a_err(dev, "failed to allocate memory\n");
@@ -148,7 +145,11 @@ int gk20a_tegra_secure_alloc(struct device *dev,
148 /* This bypasses SMMU for VPR during gmmu_map. */ 145 /* This bypasses SMMU for VPR during gmmu_map. */
149 sg_dma_address(sgt->sgl) = 0; 146 sg_dma_address(sgt->sgl) = 0;
150 147
148 desc->destroy = gk20a_tegra_secure_destroy;
149
151 desc->mem.sgt = sgt; 150 desc->mem.sgt = sgt;
151 desc->mem.size = size;
152 desc->mem.aperture = APERTURE_SYSMEM;
152 153
153 return err; 154 return err;
154 155
diff --git a/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c b/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c
index aa375b24..113c59ef 100644
--- a/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c
@@ -187,7 +187,8 @@ int gk20a_semaphore_pool_map(struct gk20a_semaphore_pool *p,
187 187
188 /* Map into the GPU... Doesn't need to be fixed. */ 188 /* Map into the GPU... Doesn't need to be fixed. */
189 p->gpu_va = gk20a_gmmu_map(vm, &p->rw_sg_table, PAGE_SIZE, 189 p->gpu_va = gk20a_gmmu_map(vm, &p->rw_sg_table, PAGE_SIZE,
190 0, gk20a_mem_flag_none, false); 190 0, gk20a_mem_flag_none, false,
191 APERTURE_SYSMEM);
191 if (!p->gpu_va) { 192 if (!p->gpu_va) {
192 err = -ENOMEM; 193 err = -ENOMEM;
193 goto fail_unmap_sgt; 194 goto fail_unmap_sgt;
@@ -204,7 +205,8 @@ int gk20a_semaphore_pool_map(struct gk20a_semaphore_pool *p,
204 p->sema_sea->gpu_va, p->sema_sea->map_size, 205 p->sema_sea->gpu_va, p->sema_sea->map_size,
205 0, 206 0,
206 gk20a_mem_flag_read_only, 207 gk20a_mem_flag_read_only,
207 false); 208 false,
209 APERTURE_SYSMEM);
208 if (!addr) { 210 if (!addr) {
209 err = -ENOMEM; 211 err = -ENOMEM;
210 BUG(); 212 BUG();