summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/nvgpu/gk20a/cde_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c5
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c21
-rw-r--r--drivers/gpu/nvgpu/gk20a/ltc_common.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c36
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h62
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c9
-rw-r--r--drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gm206/acr_gm206.c2
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.c6
-rw-r--r--drivers/gpu/nvgpu/vgpu/fifo_vgpu.c5
14 files changed, 86 insertions, 76 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
index 02b1938a..4b84dc69 100644
--- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
@@ -241,7 +241,7 @@ static int gk20a_init_cde_buf(struct gk20a_cde_ctx *cde_ctx,
241 241
242 /* allocate buf */ 242 /* allocate buf */
243 mem = cde_ctx->mem + cde_ctx->num_bufs; 243 mem = cde_ctx->mem + cde_ctx->num_bufs;
244 err = gk20a_gmmu_alloc_map(cde_ctx->vm, buf->num_bytes, mem); 244 err = gk20a_gmmu_alloc_map_sys(cde_ctx->vm, buf->num_bytes, mem);
245 if (err) { 245 if (err) {
246 gk20a_warn(cde_ctx->dev, "cde: could not allocate device memory. buffer idx = %d", 246 gk20a_warn(cde_ctx->dev, "cde: could not allocate device memory. buffer idx = %d",
247 cde_ctx->num_bufs); 247 cde_ctx->num_bufs);
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index ad452919..1439410e 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -1327,7 +1327,7 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c)
1327 size = roundup_pow_of_two(c->gpfifo.entry_num * 1327 size = roundup_pow_of_two(c->gpfifo.entry_num *
1328 2 * 18 * sizeof(u32) / 3); 1328 2 * 18 * sizeof(u32) / 3);
1329 1329
1330 err = gk20a_gmmu_alloc_map(ch_vm, size, &q->mem); 1330 err = gk20a_gmmu_alloc_map_sys(ch_vm, size, &q->mem);
1331 if (err) { 1331 if (err) {
1332 gk20a_err(d, "%s: memory allocation failed\n", __func__); 1332 gk20a_err(d, "%s: memory allocation failed\n", __func__);
1333 goto clean_up; 1333 goto clean_up;
@@ -1464,7 +1464,7 @@ int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
1464 return -EEXIST; 1464 return -EEXIST;
1465 } 1465 }
1466 1466
1467 err = gk20a_gmmu_alloc_map(ch_vm, 1467 err = gk20a_gmmu_alloc_map_sys(ch_vm,
1468 gpfifo_size * sizeof(struct nvgpu_gpfifo), 1468 gpfifo_size * sizeof(struct nvgpu_gpfifo),
1469 &c->gpfifo.mem); 1469 &c->gpfifo.mem);
1470 if (err) { 1470 if (err) {
diff --git a/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c
index 5edb07a7..62f60761 100644
--- a/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c
@@ -231,7 +231,7 @@ static int css_hw_enable_snapshot(struct gr_gk20a *gr, u32 snapshot_size)
231 if (snapshot_size < CSS_MIN_HW_SNAPSHOT_SIZE) 231 if (snapshot_size < CSS_MIN_HW_SNAPSHOT_SIZE)
232 snapshot_size = CSS_MIN_HW_SNAPSHOT_SIZE; 232 snapshot_size = CSS_MIN_HW_SNAPSHOT_SIZE;
233 233
234 ret = gk20a_gmmu_alloc_map(&g->mm.pmu.vm, snapshot_size, 234 ret = gk20a_gmmu_alloc_map_sys(&g->mm.pmu.vm, snapshot_size,
235 &data->hw_memdesc); 235 &data->hw_memdesc);
236 if (ret) 236 if (ret)
237 return ret; 237 return ret;
diff --git a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
index 52166b7f..69e2b409 100644
--- a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
@@ -385,7 +385,7 @@ static int gk20a_fecs_trace_alloc_ring(struct gk20a *g)
385{ 385{
386 struct gk20a_fecs_trace *trace = g->fecs_trace; 386 struct gk20a_fecs_trace *trace = g->fecs_trace;
387 387
388 return gk20a_gmmu_alloc(g, GK20A_FECS_TRACE_NUM_RECORDS 388 return gk20a_gmmu_alloc_sys(g, GK20A_FECS_TRACE_NUM_RECORDS
389 * ctxsw_prog_record_timestamp_record_size_in_bytes_v(), 389 * ctxsw_prog_record_timestamp_record_size_in_bytes_v(),
390 &trace->trace_buf); 390 &trace->trace_buf);
391} 391}
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 0e006cb6..3e55e3c1 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -591,7 +591,8 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
591 591
592 runlist_size = ram_rl_entry_size_v() * f->num_runlist_entries; 592 runlist_size = ram_rl_entry_size_v() * f->num_runlist_entries;
593 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { 593 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
594 int err = gk20a_gmmu_alloc(g, runlist_size, &runlist->mem[i]); 594 int err = gk20a_gmmu_alloc_sys(g, runlist_size,
595 &runlist->mem[i]);
595 if (err) { 596 if (err) {
596 dev_err(d, "memory allocation failed\n"); 597 dev_err(d, "memory allocation failed\n");
597 goto clean_up_runlist; 598 goto clean_up_runlist;
@@ -766,7 +767,7 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
766 767
767 f->userd_entry_size = 1 << ram_userd_base_shift_v(); 768 f->userd_entry_size = 1 << ram_userd_base_shift_v();
768 769
769 err = gk20a_gmmu_alloc_map(&g->mm.bar1.vm, 770 err = gk20a_gmmu_alloc_map_sys(&g->mm.bar1.vm,
770 f->userd_entry_size * f->num_channels, 771 f->userd_entry_size * f->num_channels,
771 &f->userd); 772 &f->userd);
772 if (err) { 773 if (err) {
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 0d97e84c..c5b2ba5c 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -1810,7 +1810,8 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1810 if (enable_hwpm_ctxsw) { 1810 if (enable_hwpm_ctxsw) {
1811 /* Allocate buffer if necessary */ 1811 /* Allocate buffer if necessary */
1812 if (pm_ctx->mem.gpu_va == 0) { 1812 if (pm_ctx->mem.gpu_va == 0) {
1813 ret = gk20a_gmmu_alloc_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, 1813 ret = gk20a_gmmu_alloc_attr_sys(g,
1814 DMA_ATTR_NO_KERNEL_MAPPING,
1814 g->gr.ctx_vars.pm_ctxsw_image_size, 1815 g->gr.ctx_vars.pm_ctxsw_image_size,
1815 &pm_ctx->mem); 1816 &pm_ctx->mem);
1816 if (ret) { 1817 if (ret) {
@@ -2148,7 +2149,7 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g)
2148 g->gr.ctx_vars.ucode.gpccs.inst.count * sizeof(u32), 2149 g->gr.ctx_vars.ucode.gpccs.inst.count * sizeof(u32),
2149 g->gr.ctx_vars.ucode.gpccs.data.count * sizeof(u32)); 2150 g->gr.ctx_vars.ucode.gpccs.data.count * sizeof(u32));
2150 2151
2151 err = gk20a_gmmu_alloc(g, ucode_size, &ucode_info->surface_desc); 2152 err = gk20a_gmmu_alloc_sys(g, ucode_size, &ucode_info->surface_desc);
2152 if (err) 2153 if (err)
2153 goto clean_up; 2154 goto clean_up;
2154 2155
@@ -2535,7 +2536,7 @@ static int gk20a_gr_alloc_ctx_buffer(struct gk20a *g,
2535{ 2536{
2536 int err = 0; 2537 int err = 0;
2537 2538
2538 err = gk20a_gmmu_alloc_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, 2539 err = gk20a_gmmu_alloc_attr_sys(g, DMA_ATTR_NO_KERNEL_MAPPING,
2539 size, &desc->mem); 2540 size, &desc->mem);
2540 if (err) 2541 if (err)
2541 return err; 2542 return err;
@@ -2778,13 +2779,15 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
2778 if (!gr_ctx) 2779 if (!gr_ctx)
2779 return -ENOMEM; 2780 return -ENOMEM;
2780 2781
2781 err = gk20a_gmmu_alloc_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, 2782 err = gk20a_gmmu_alloc_attr_sys(g, DMA_ATTR_NO_KERNEL_MAPPING,
2782 gr->ctx_vars.buffer_total_size, 2783 gr->ctx_vars.buffer_total_size,
2783 &gr_ctx->mem); 2784 &gr_ctx->mem);
2784 if (err) 2785 if (err)
2785 goto err_free_ctx; 2786 goto err_free_ctx;
2786 2787
2787 gr_ctx->mem.gpu_va = gk20a_gmmu_map(vm, &gr_ctx->mem.sgt, gr_ctx->mem.size, 2788 gr_ctx->mem.gpu_va = gk20a_gmmu_map(vm,
2789 &gr_ctx->mem.sgt,
2790 gr_ctx->mem.size,
2788 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 2791 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
2789 gk20a_mem_flag_none, true, 2792 gk20a_mem_flag_none, true,
2790 gr_ctx->mem.aperture); 2793 gr_ctx->mem.aperture);
@@ -2874,7 +2877,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g,
2874 2877
2875 gk20a_dbg_fn(""); 2878 gk20a_dbg_fn("");
2876 2879
2877 err = gk20a_gmmu_alloc_map_attr(ch_vm, DMA_ATTR_NO_KERNEL_MAPPING, 2880 err = gk20a_gmmu_alloc_map_attr_sys(ch_vm, DMA_ATTR_NO_KERNEL_MAPPING,
2878 128 * sizeof(u32), &patch_ctx->mem); 2881 128 * sizeof(u32), &patch_ctx->mem);
2879 if (err) 2882 if (err)
2880 return err; 2883 return err;
@@ -3486,11 +3489,11 @@ static int gr_gk20a_init_mmu_sw(struct gk20a *g, struct gr_gk20a *gr)
3486{ 3489{
3487 int err; 3490 int err;
3488 3491
3489 err = gk20a_gmmu_alloc(g, 0x1000, &gr->mmu_wr_mem); 3492 err = gk20a_gmmu_alloc_sys(g, 0x1000, &gr->mmu_wr_mem);
3490 if (err) 3493 if (err)
3491 goto err; 3494 goto err;
3492 3495
3493 err = gk20a_gmmu_alloc(g, 0x1000, &gr->mmu_rd_mem); 3496 err = gk20a_gmmu_alloc_sys(g, 0x1000, &gr->mmu_rd_mem);
3494 if (err) 3497 if (err)
3495 goto err_free_wr_mem; 3498 goto err_free_wr_mem;
3496 return 0; 3499 return 0;
@@ -4945,7 +4948,7 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g)
4945 } 4948 }
4946 4949
4947 if (!pmu->pg_buf.cpu_va) { 4950 if (!pmu->pg_buf.cpu_va) {
4948 err = gk20a_gmmu_alloc_map(vm, size, &pmu->pg_buf); 4951 err = gk20a_gmmu_alloc_map_sys(vm, size, &pmu->pg_buf);
4949 if (err) { 4952 if (err) {
4950 gk20a_err(d, "failed to allocate memory\n"); 4953 gk20a_err(d, "failed to allocate memory\n");
4951 return -ENOMEM; 4954 return -ENOMEM;
diff --git a/drivers/gpu/nvgpu/gk20a/ltc_common.c b/drivers/gpu/nvgpu/gk20a/ltc_common.c
index a4d16b67..45607154 100644
--- a/drivers/gpu/nvgpu/gk20a/ltc_common.c
+++ b/drivers/gpu/nvgpu/gk20a/ltc_common.c
@@ -68,7 +68,7 @@ static int gk20a_ltc_alloc_phys_cbc(struct gk20a *g,
68{ 68{
69 struct gr_gk20a *gr = &g->gr; 69 struct gr_gk20a *gr = &g->gr;
70 70
71 return gk20a_gmmu_alloc_attr(g, DMA_ATTR_FORCE_CONTIGUOUS, 71 return gk20a_gmmu_alloc_attr_sys(g, DMA_ATTR_FORCE_CONTIGUOUS,
72 compbit_backing_size, 72 compbit_backing_size,
73 &gr->compbit_store.mem); 73 &gr->compbit_store.mem);
74} 74}
@@ -78,7 +78,7 @@ static int gk20a_ltc_alloc_virt_cbc(struct gk20a *g,
78{ 78{
79 struct gr_gk20a *gr = &g->gr; 79 struct gr_gk20a *gr = &g->gr;
80 80
81 return gk20a_gmmu_alloc_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, 81 return gk20a_gmmu_alloc_attr_sys(g, DMA_ATTR_NO_KERNEL_MAPPING,
82 compbit_backing_size, 82 compbit_backing_size,
83 &gr->compbit_store.mem); 83 &gr->compbit_store.mem);
84} 84}
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 5e87ae25..d7ace0d8 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -713,7 +713,7 @@ static void gk20a_remove_mm_support(struct mm_gk20a *mm)
713 713
714static int gk20a_alloc_sysmem_flush(struct gk20a *g) 714static int gk20a_alloc_sysmem_flush(struct gk20a *g)
715{ 715{
716 return gk20a_gmmu_alloc(g, SZ_4K, &g->mm.sysmem_flush); 716 return gk20a_gmmu_alloc_sys(g, SZ_4K, &g->mm.sysmem_flush);
717} 717}
718 718
719static void gk20a_init_pramin(struct mm_gk20a *mm) 719static void gk20a_init_pramin(struct mm_gk20a *mm)
@@ -976,9 +976,9 @@ static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
976 * default. 976 * default.
977 */ 977 */
978 if (IS_ENABLED(CONFIG_ARM64)) 978 if (IS_ENABLED(CONFIG_ARM64))
979 err = gk20a_gmmu_alloc(g, len, &entry->mem); 979 err = gk20a_gmmu_alloc_sys(g, len, &entry->mem);
980 else 980 else
981 err = gk20a_gmmu_alloc_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, 981 err = gk20a_gmmu_alloc_attr_sys(g, DMA_ATTR_NO_KERNEL_MAPPING,
982 len, &entry->mem); 982 len, &entry->mem);
983 983
984 984
@@ -2363,9 +2363,17 @@ int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct mem_desc *mem)
2363int gk20a_gmmu_alloc_attr(struct gk20a *g, enum dma_attr attr, size_t size, 2363int gk20a_gmmu_alloc_attr(struct gk20a *g, enum dma_attr attr, size_t size,
2364 struct mem_desc *mem) 2364 struct mem_desc *mem)
2365{ 2365{
2366 if (g->mm.vidmem_is_vidmem)
2367 return gk20a_gmmu_alloc_attr_vid(g, attr, size, mem);
2368
2366 return gk20a_gmmu_alloc_attr_sys(g, attr, size, mem); 2369 return gk20a_gmmu_alloc_attr_sys(g, attr, size, mem);
2367} 2370}
2368 2371
2372int gk20a_gmmu_alloc_sys(struct gk20a *g, size_t size, struct mem_desc *mem)
2373{
2374 return gk20a_gmmu_alloc_attr_sys(g, 0, size, mem);
2375}
2376
2369int gk20a_gmmu_alloc_attr_sys(struct gk20a *g, enum dma_attr attr, 2377int gk20a_gmmu_alloc_attr_sys(struct gk20a *g, enum dma_attr attr,
2370 size_t size, struct mem_desc *mem) 2378 size_t size, struct mem_desc *mem)
2371{ 2379{
@@ -2594,7 +2602,8 @@ u32 gk20a_aperture_mask(struct gk20a *g, struct mem_desc *mem,
2594 sysmem_mask, vidmem_mask); 2602 sysmem_mask, vidmem_mask);
2595} 2603}
2596 2604
2597int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, size_t size, struct mem_desc *mem) 2605int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, size_t size,
2606 struct mem_desc *mem)
2598{ 2607{
2599 return gk20a_gmmu_alloc_map_attr(vm, 0, size, mem); 2608 return gk20a_gmmu_alloc_map_attr(vm, 0, size, mem);
2600} 2609}
@@ -2602,7 +2611,22 @@ int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, size_t size, struct mem_desc *mem)
2602int gk20a_gmmu_alloc_map_attr(struct vm_gk20a *vm, 2611int gk20a_gmmu_alloc_map_attr(struct vm_gk20a *vm,
2603 enum dma_attr attr, size_t size, struct mem_desc *mem) 2612 enum dma_attr attr, size_t size, struct mem_desc *mem)
2604{ 2613{
2605 int err = gk20a_gmmu_alloc_attr(vm->mm->g, attr, size, mem); 2614 if (vm->mm->vidmem_is_vidmem)
2615 return gk20a_gmmu_alloc_map_attr_vid(vm, 0, size, mem);
2616
2617 return gk20a_gmmu_alloc_map_attr_sys(vm, 0, size, mem);
2618}
2619
2620int gk20a_gmmu_alloc_map_sys(struct vm_gk20a *vm, size_t size,
2621 struct mem_desc *mem)
2622{
2623 return gk20a_gmmu_alloc_map_attr_sys(vm, 0, size, mem);
2624}
2625
2626int gk20a_gmmu_alloc_map_attr_sys(struct vm_gk20a *vm,
2627 enum dma_attr attr, size_t size, struct mem_desc *mem)
2628{
2629 int err = gk20a_gmmu_alloc_attr_sys(vm->mm->g, attr, size, mem);
2606 2630
2607 if (err) 2631 if (err)
2608 return err; 2632 return err;
@@ -3983,7 +4007,7 @@ int gk20a_alloc_inst_block(struct gk20a *g, struct mem_desc *inst_block)
3983 4007
3984 gk20a_dbg_fn(""); 4008 gk20a_dbg_fn("");
3985 4009
3986 err = gk20a_gmmu_alloc(g, ram_in_alloc_size_v(), inst_block); 4010 err = gk20a_gmmu_alloc_sys(g, ram_in_alloc_size_v(), inst_block);
3987 if (err) { 4011 if (err) {
3988 gk20a_err(dev, "%s: memory allocation failed\n", __func__); 4012 gk20a_err(dev, "%s: memory allocation failed\n", __func__);
3989 return err; 4013 return err;
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index f87ba605..4b811ddf 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -536,61 +536,39 @@ u64 gk20a_gmmu_fixed_map(struct vm_gk20a *vm,
536 bool priv, 536 bool priv,
537 enum gk20a_aperture aperture); 537 enum gk20a_aperture aperture);
538 538
539int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, 539int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, size_t size,
540 size_t size,
541 struct mem_desc *mem); 540 struct mem_desc *mem);
541int gk20a_gmmu_alloc_map_attr(struct vm_gk20a *vm, enum dma_attr attr,
542 size_t size, struct mem_desc *mem);
542 543
543int gk20a_gmmu_alloc_map_attr(struct vm_gk20a *vm, 544int gk20a_gmmu_alloc_map_sys(struct vm_gk20a *vm, size_t size,
544 enum dma_attr attr,
545 size_t size,
546 struct mem_desc *mem); 545 struct mem_desc *mem);
546int gk20a_gmmu_alloc_map_attr_sys(struct vm_gk20a *vm, enum dma_attr attr,
547 size_t size, struct mem_desc *mem);
547 548
548int gk20a_gmmu_alloc_map_vid(struct vm_gk20a *vm, 549int gk20a_gmmu_alloc_map_vid(struct vm_gk20a *vm, size_t size,
549 size_t size,
550 struct mem_desc *mem); 550 struct mem_desc *mem);
551int gk20a_gmmu_alloc_map_attr_vid(struct vm_gk20a *vm, enum dma_attr attr,
552 size_t size, struct mem_desc *mem);
551 553
552int gk20a_gmmu_alloc_map_attr_vid(struct vm_gk20a *vm, 554void gk20a_gmmu_unmap_free(struct vm_gk20a *vm, struct mem_desc *mem);
553 enum dma_attr attr,
554 size_t size,
555 struct mem_desc *mem);
556
557void gk20a_gmmu_unmap_free(struct vm_gk20a *vm,
558 struct mem_desc *mem);
559
560int gk20a_gmmu_alloc(struct gk20a *g,
561 size_t size,
562 struct mem_desc *mem);
563 555
564int gk20a_gmmu_alloc_attr(struct gk20a *g, 556int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct mem_desc *mem);
565 enum dma_attr attr, 557int gk20a_gmmu_alloc_attr(struct gk20a *g, enum dma_attr attr, size_t size,
566 size_t size,
567 struct mem_desc *mem); 558 struct mem_desc *mem);
568 559
569int gk20a_gmmu_alloc_attr_sys(struct gk20a *g, 560int gk20a_gmmu_alloc_sys(struct gk20a *g, size_t size, struct mem_desc *mem);
570 enum dma_attr attr, 561int gk20a_gmmu_alloc_attr_sys(struct gk20a *g, enum dma_attr attr, size_t size,
571 size_t size,
572 struct mem_desc *mem); 562 struct mem_desc *mem);
573 563
574int gk20a_gmmu_alloc_vid(struct gk20a *g, 564int gk20a_gmmu_alloc_vid(struct gk20a *g, size_t size, struct mem_desc *mem);
575 size_t size, 565int gk20a_gmmu_alloc_attr_vid(struct gk20a *g, enum dma_attr attr, size_t size,
576 struct mem_desc *mem);
577
578int gk20a_gmmu_alloc_attr_vid(struct gk20a *g,
579 enum dma_attr attr,
580 size_t size,
581 struct mem_desc *mem);
582
583int gk20a_gmmu_alloc_attr_vid_at(struct gk20a *g,
584 enum dma_attr attr,
585 size_t size,
586 struct mem_desc *mem,
587 dma_addr_t at);
588
589void gk20a_gmmu_free(struct gk20a *g,
590 struct mem_desc *mem); 566 struct mem_desc *mem);
567int gk20a_gmmu_alloc_attr_vid_at(struct gk20a *g, enum dma_attr attr,
568 size_t size, struct mem_desc *mem, dma_addr_t at);
591 569
592void gk20a_gmmu_free_attr(struct gk20a *g, 570void gk20a_gmmu_free(struct gk20a *g, struct mem_desc *mem);
593 enum dma_attr attr, 571void gk20a_gmmu_free_attr(struct gk20a *g, enum dma_attr attr,
594 struct mem_desc *mem); 572 struct mem_desc *mem);
595 573
596static inline phys_addr_t gk20a_mem_phys(struct mem_desc *mem) 574static inline phys_addr_t gk20a_mem_phys(struct mem_desc *mem)
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index b0940229..347d7158 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -2894,7 +2894,8 @@ static int gk20a_prepare_ucode(struct gk20a *g)
2894 pmu->ucode_image = (u32 *)((u8 *)pmu->desc + 2894 pmu->ucode_image = (u32 *)((u8 *)pmu->desc +
2895 pmu->desc->descriptor_size); 2895 pmu->desc->descriptor_size);
2896 2896
2897 err = gk20a_gmmu_alloc_map(vm, GK20A_PMU_UCODE_SIZE_MAX, &pmu->ucode); 2897 err = gk20a_gmmu_alloc_map_sys(vm, GK20A_PMU_UCODE_SIZE_MAX,
2898 &pmu->ucode);
2898 if (err) 2899 if (err)
2899 goto err_release_fw; 2900 goto err_release_fw;
2900 2901
@@ -2965,13 +2966,15 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g)
2965 2966
2966 INIT_WORK(&pmu->pg_init, pmu_setup_hw); 2967 INIT_WORK(&pmu->pg_init, pmu_setup_hw);
2967 2968
2968 err = gk20a_gmmu_alloc_map(vm, GK20A_PMU_SEQ_BUF_SIZE, &pmu->seq_buf); 2969 err = gk20a_gmmu_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE,
2970 &pmu->seq_buf);
2969 if (err) { 2971 if (err) {
2970 gk20a_err(d, "failed to allocate memory\n"); 2972 gk20a_err(d, "failed to allocate memory\n");
2971 goto err_free_seq; 2973 goto err_free_seq;
2972 } 2974 }
2973 2975
2974 err = gk20a_gmmu_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE, &pmu->trace_buf); 2976 err = gk20a_gmmu_alloc_map_sys(vm, GK20A_PMU_TRACE_BUFSIZE,
2977 &pmu->trace_buf);
2975 if (err) { 2978 if (err) {
2976 gk20a_err(d, "failed to allocate trace memory\n"); 2979 gk20a_err(d, "failed to allocate trace memory\n");
2977 goto err_free_seq_buf; 2980 goto err_free_seq_buf;
diff --git a/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c b/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c
index 113c59ef..d513fa84 100644
--- a/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c
@@ -52,7 +52,7 @@ static int __gk20a_semaphore_sea_grow(struct gk20a_semaphore_sea *sea)
52 52
53 __lock_sema_sea(sea); 53 __lock_sema_sea(sea);
54 54
55 ret = gk20a_gmmu_alloc_attr(gk20a, DMA_ATTR_NO_KERNEL_MAPPING, 55 ret = gk20a_gmmu_alloc_attr_sys(gk20a, DMA_ATTR_NO_KERNEL_MAPPING,
56 PAGE_SIZE * SEMAPHORE_POOL_COUNT, 56 PAGE_SIZE * SEMAPHORE_POOL_COUNT,
57 &sea->sea_mem); 57 &sea->sea_mem);
58 if (ret) 58 if (ret)
diff --git a/drivers/gpu/nvgpu/gm206/acr_gm206.c b/drivers/gpu/nvgpu/gm206/acr_gm206.c
index 5a61bd21..988761f0 100644
--- a/drivers/gpu/nvgpu/gm206/acr_gm206.c
+++ b/drivers/gpu/nvgpu/gm206/acr_gm206.c
@@ -252,7 +252,7 @@ static int gm206_bootstrap_hs_flcn(struct gk20a *g)
252 err = -1; 252 err = -1;
253 goto err_release_acr_fw; 253 goto err_release_acr_fw;
254 } 254 }
255 err = gk20a_gmmu_alloc_map(vm, img_size_in_bytes, 255 err = gk20a_gmmu_alloc_map_sys(vm, img_size_in_bytes,
256 &acr->acr_ucode); 256 &acr->acr_ucode);
257 if (err) { 257 if (err) {
258 err = -ENOMEM; 258 err = -ENOMEM;
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
index c503bc48..c4a188ff 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
@@ -355,7 +355,7 @@ static int gm20b_alloc_blob_space(struct gk20a *g,
355{ 355{
356 int err; 356 int err;
357 357
358 err = gk20a_gmmu_alloc(g, size, mem); 358 err = gk20a_gmmu_alloc_sys(g, size, mem);
359 359
360 return err; 360 return err;
361} 361}
@@ -1100,7 +1100,7 @@ static int gm20b_bootstrap_hs_flcn(struct gk20a *g)
1100 err = -1; 1100 err = -1;
1101 goto err_release_acr_fw; 1101 goto err_release_acr_fw;
1102 } 1102 }
1103 err = gk20a_gmmu_alloc_map(vm, img_size_in_bytes, 1103 err = gk20a_gmmu_alloc_map_sys(vm, img_size_in_bytes,
1104 &acr->acr_ucode); 1104 &acr->acr_ucode);
1105 if (err) { 1105 if (err) {
1106 err = -ENOMEM; 1106 err = -ENOMEM;
@@ -1403,7 +1403,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
1403 /*TODO in code verify that enable PMU is done, 1403 /*TODO in code verify that enable PMU is done,
1404 scrubbing etc is done*/ 1404 scrubbing etc is done*/
1405 /*TODO in code verify that gmmu vm init is done*/ 1405 /*TODO in code verify that gmmu vm init is done*/
1406 err = gk20a_gmmu_alloc_attr(g, 1406 err = gk20a_gmmu_alloc_attr_sys(g,
1407 DMA_ATTR_READ_ONLY, bl_sz, &acr->hsbl_ucode); 1407 DMA_ATTR_READ_ONLY, bl_sz, &acr->hsbl_ucode);
1408 if (err) { 1408 if (err) {
1409 gk20a_err(d, "failed to allocate memory\n"); 1409 gk20a_err(d, "failed to allocate memory\n");
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
index ffa16cd3..29692573 100644
--- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
@@ -217,7 +217,8 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
217 217
218 runlist_size = sizeof(u16) * f->num_channels; 218 runlist_size = sizeof(u16) * f->num_channels;
219 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { 219 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
220 int err = gk20a_gmmu_alloc(g, runlist_size, &runlist->mem[i]); 220 int err = gk20a_gmmu_alloc_sys(g, runlist_size,
221 &runlist->mem[i]);
221 if (err) { 222 if (err) {
222 dev_err(d, "memory allocation failed\n"); 223 dev_err(d, "memory allocation failed\n");
223 goto clean_up_runlist; 224 goto clean_up_runlist;
@@ -265,7 +266,7 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
265 266
266 f->userd_entry_size = 1 << ram_userd_base_shift_v(); 267 f->userd_entry_size = 1 << ram_userd_base_shift_v();
267 268
268 err = gk20a_gmmu_alloc(g, f->userd_entry_size * f->num_channels, 269 err = gk20a_gmmu_alloc_sys(g, f->userd_entry_size * f->num_channels,
269 &f->userd); 270 &f->userd);
270 if (err) { 271 if (err) {
271 dev_err(d, "memory allocation failed\n"); 272 dev_err(d, "memory allocation failed\n");