From 50667e097b2be567e3d2f95e23b046243bca2bf6 Mon Sep 17 00:00:00 2001 From: Alex Waterman Date: Tue, 21 Mar 2017 15:34:50 -0700 Subject: gpu: nvgpu: Rename nvgpu DMA APIs Rename the nvgpu DMA APIs from gk20a_gmmu_alloc* to nvgpu_dma_alloc*. This better reflects the purpose of the APIs (to allocate DMA suitable memory) and avoids confusion with GMMU related code. JIRA NVGPU-12 Change-Id: I673d607db56dd6e44f02008dc7b5293209ef67bf Signed-off-by: Alex Waterman Reviewed-on: http://git-master/r/1325548 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/common/linux/dma.c | 87 +++++++++++++-------------- drivers/gpu/nvgpu/common/semaphore.c | 2 +- drivers/gpu/nvgpu/gk20a/cde_gk20a.c | 4 +- drivers/gpu/nvgpu/gk20a/ce2_gk20a.c | 4 +- drivers/gpu/nvgpu/gk20a/channel_gk20a.c | 10 ++-- drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c | 6 +- drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c | 4 +- drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 16 ++--- drivers/gpu/nvgpu/gk20a/gr_gk20a.c | 40 ++++++------- drivers/gpu/nvgpu/gk20a/ltc_common.c | 4 +- drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 18 +++--- drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | 14 ++--- drivers/gpu/nvgpu/gm20b/acr_gm20b.c | 10 ++-- drivers/gpu/nvgpu/gp106/acr_gp106.c | 8 +-- drivers/gpu/nvgpu/gp106/gr_gp106.c | 6 +- drivers/gpu/nvgpu/gp10b/gr_gp10b.c | 18 +++--- drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c | 4 +- drivers/gpu/nvgpu/include/nvgpu/dma.h | 88 ++++++++++++++-------------- drivers/gpu/nvgpu/vgpu/fifo_vgpu.c | 6 +- drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c | 8 +-- 20 files changed, 179 insertions(+), 178 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/nvgpu/common/linux/dma.c b/drivers/gpu/nvgpu/common/linux/dma.c index 755848ea..92182a0d 100644 --- a/drivers/gpu/nvgpu/common/linux/dma.c +++ b/drivers/gpu/nvgpu/common/linux/dma.c @@ -23,7 +23,7 @@ #include "gk20a/gk20a.h" #if defined(CONFIG_GK20A_VIDMEM) -static u64 __gk20a_gmmu_alloc(struct nvgpu_allocator *allocator, dma_addr_t at, +static u64 __nvgpu_dma_alloc(struct nvgpu_allocator *allocator, dma_addr_t at, size_t size) { u64 addr = 0; @@ -38,11 +38,11 @@ static u64 __gk20a_gmmu_alloc(struct nvgpu_allocator *allocator, dma_addr_t at, #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0) -static void gk20a_dma_flags_to_attrs(unsigned long *attrs, +static void nvgpu_dma_flags_to_attrs(unsigned long *attrs, unsigned long flags) #define ATTR_ARG(x) *x #else -static void gk20a_dma_flags_to_attrs(struct dma_attrs *attrs, +static void nvgpu_dma_flags_to_attrs(struct dma_attrs *attrs, unsigned long flags) #define ATTR_ARG(x) x #endif @@ -56,12 +56,12 @@ static void gk20a_dma_flags_to_attrs(struct dma_attrs *attrs, #undef ATTR_ARG } -int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem) +int nvgpu_dma_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem) { - return gk20a_gmmu_alloc_flags(g, 0, size, mem); + return nvgpu_dma_alloc_flags(g, 0, size, mem); } -int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size, +int nvgpu_dma_alloc_flags(struct gk20a *g, unsigned long flags, size_t size, struct nvgpu_mem *mem) { if (g->mm.vidmem_is_vidmem) { @@ -71,7 +71,7 @@ int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size, * using gk20a_gmmu_alloc_map and it's vidmem, or if there's a * difference, the user should use the flag explicitly anyway. */ - int err = gk20a_gmmu_alloc_flags_vid(g, + int err = nvgpu_dma_alloc_flags_vid(g, flags | NVGPU_DMA_NO_KERNEL_MAPPING, size, mem); @@ -83,15 +83,15 @@ int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size, */ } - return gk20a_gmmu_alloc_flags_sys(g, flags, size, mem); + return nvgpu_dma_alloc_flags_sys(g, flags, size, mem); } -int gk20a_gmmu_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem) +int nvgpu_dma_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem) { - return gk20a_gmmu_alloc_flags_sys(g, 0, size, mem); + return nvgpu_dma_alloc_flags_sys(g, 0, size, mem); } -int gk20a_gmmu_alloc_flags_sys(struct gk20a *g, unsigned long flags, +int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags, size_t size, struct nvgpu_mem *mem) { struct device *d = dev_from_gk20a(g); @@ -103,7 +103,7 @@ int gk20a_gmmu_alloc_flags_sys(struct gk20a *g, unsigned long flags, if (flags) { DEFINE_DMA_ATTRS(dma_attrs); - gk20a_dma_flags_to_attrs(&dma_attrs, flags); + nvgpu_dma_flags_to_attrs(&dma_attrs, flags); if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) { mem->pages = dma_alloc_attrs(d, @@ -149,19 +149,19 @@ fail_free: return err; } -int gk20a_gmmu_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem) +int nvgpu_dma_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem) { - return gk20a_gmmu_alloc_flags_vid(g, + return nvgpu_dma_alloc_flags_vid(g, NVGPU_DMA_NO_KERNEL_MAPPING, size, mem); } -int gk20a_gmmu_alloc_flags_vid(struct gk20a *g, unsigned long flags, +int nvgpu_dma_alloc_flags_vid(struct gk20a *g, unsigned long flags, size_t size, struct nvgpu_mem *mem) { - return gk20a_gmmu_alloc_flags_vid_at(g, flags, size, mem, 0); + return nvgpu_dma_alloc_flags_vid_at(g, flags, size, mem, 0); } -int gk20a_gmmu_alloc_flags_vid_at(struct gk20a *g, unsigned long flags, +int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags, size_t size, struct nvgpu_mem *mem, dma_addr_t at) { #if defined(CONFIG_GK20A_VIDMEM) @@ -185,7 +185,7 @@ int gk20a_gmmu_alloc_flags_vid_at(struct gk20a *g, unsigned long flags, nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex); before_pending = atomic64_read(&g->mm.vidmem.bytes_pending); - addr = __gk20a_gmmu_alloc(vidmem_alloc, at, size); + addr = __nvgpu_dma_alloc(vidmem_alloc, at, size); nvgpu_mutex_release(&g->mm.vidmem.clear_list_mutex); if (!addr) { /* @@ -237,23 +237,23 @@ fail_physfree: #endif } -int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, size_t size, +int nvgpu_dma_alloc_map(struct vm_gk20a *vm, size_t size, struct nvgpu_mem *mem) { - return gk20a_gmmu_alloc_map_flags(vm, 0, size, mem); + return nvgpu_dma_alloc_map_flags(vm, 0, size, mem); } -int gk20a_gmmu_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags, +int nvgpu_dma_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags, size_t size, struct nvgpu_mem *mem) { if (vm->mm->vidmem_is_vidmem) { /* * Force the no-kernel-mapping flag on because we don't support * the lack of it for vidmem - the user should not care when - * using gk20a_gmmu_alloc_map and it's vidmem, or if there's a + * using nvgpu_dma_alloc_map and it's vidmem, or if there's a * difference, the user should use the flag explicitly anyway. */ - int err = gk20a_gmmu_alloc_map_flags_vid(vm, + int err = nvgpu_dma_alloc_map_flags_vid(vm, flags | NVGPU_DMA_NO_KERNEL_MAPPING, size, mem); @@ -265,19 +265,19 @@ int gk20a_gmmu_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags, */ } - return gk20a_gmmu_alloc_map_flags_sys(vm, flags, size, mem); + return nvgpu_dma_alloc_map_flags_sys(vm, flags, size, mem); } -int gk20a_gmmu_alloc_map_sys(struct vm_gk20a *vm, size_t size, +int nvgpu_dma_alloc_map_sys(struct vm_gk20a *vm, size_t size, struct nvgpu_mem *mem) { - return gk20a_gmmu_alloc_map_flags_sys(vm, 0, size, mem); + return nvgpu_dma_alloc_map_flags_sys(vm, 0, size, mem); } -int gk20a_gmmu_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags, +int nvgpu_dma_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags, size_t size, struct nvgpu_mem *mem) { - int err = gk20a_gmmu_alloc_flags_sys(vm->mm->g, flags, size, mem); + int err = nvgpu_dma_alloc_flags_sys(vm->mm->g, flags, size, mem); if (err) return err; @@ -293,21 +293,21 @@ int gk20a_gmmu_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags, return 0; fail_free: - gk20a_gmmu_free(vm->mm->g, mem); + nvgpu_dma_free(vm->mm->g, mem); return err; } -int gk20a_gmmu_alloc_map_vid(struct vm_gk20a *vm, size_t size, +int nvgpu_dma_alloc_map_vid(struct vm_gk20a *vm, size_t size, struct nvgpu_mem *mem) { - return gk20a_gmmu_alloc_map_flags_vid(vm, + return nvgpu_dma_alloc_map_flags_vid(vm, NVGPU_DMA_NO_KERNEL_MAPPING, size, mem); } -int gk20a_gmmu_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags, +int nvgpu_dma_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags, size_t size, struct nvgpu_mem *mem) { - int err = gk20a_gmmu_alloc_flags_vid(vm->mm->g, flags, size, mem); + int err = nvgpu_dma_alloc_flags_vid(vm->mm->g, flags, size, mem); if (err) return err; @@ -323,11 +323,11 @@ int gk20a_gmmu_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags, return 0; fail_free: - gk20a_gmmu_free(vm->mm->g, mem); + nvgpu_dma_free(vm->mm->g, mem); return err; } -static void gk20a_gmmu_free_sys(struct gk20a *g, struct nvgpu_mem *mem) +static void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem) { struct device *d = dev_from_gk20a(g); @@ -335,7 +335,7 @@ static void gk20a_gmmu_free_sys(struct gk20a *g, struct nvgpu_mem *mem) if (mem->flags) { DEFINE_DMA_ATTRS(dma_attrs); - gk20a_dma_flags_to_attrs(&dma_attrs, mem->flags); + nvgpu_dma_flags_to_attrs(&dma_attrs, mem->flags); if (mem->flags & NVGPU_DMA_NO_KERNEL_MAPPING) { dma_free_attrs(d, mem->size, mem->pages, @@ -361,7 +361,7 @@ static void gk20a_gmmu_free_sys(struct gk20a *g, struct nvgpu_mem *mem) mem->aperture = APERTURE_INVALID; } -static void gk20a_gmmu_free_vid(struct gk20a *g, struct nvgpu_mem *mem) +static void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem) { #if defined(CONFIG_GK20A_VIDMEM) bool was_empty; @@ -393,23 +393,24 @@ static void gk20a_gmmu_free_vid(struct gk20a *g, struct nvgpu_mem *mem) #endif } -void gk20a_gmmu_free(struct gk20a *g, struct nvgpu_mem *mem) +void nvgpu_dma_free(struct gk20a *g, struct nvgpu_mem *mem) { switch (mem->aperture) { case APERTURE_SYSMEM: - return gk20a_gmmu_free_sys(g, mem); + return nvgpu_dma_free_sys(g, mem); case APERTURE_VIDMEM: - return gk20a_gmmu_free_vid(g, mem); + return nvgpu_dma_free_vid(g, mem); default: break; /* like free() on "null" memory */ } } -void gk20a_gmmu_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem) +void nvgpu_dma_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem) { if (mem->gpu_va) - gk20a_gmmu_unmap(vm, mem->gpu_va, mem->size, gk20a_mem_flag_none); + gk20a_gmmu_unmap(vm, mem->gpu_va, mem->size, + gk20a_mem_flag_none); mem->gpu_va = 0; - gk20a_gmmu_free(vm->mm->g, mem); + nvgpu_dma_free(vm->mm->g, mem); } diff --git a/drivers/gpu/nvgpu/common/semaphore.c b/drivers/gpu/nvgpu/common/semaphore.c index cfe1149f..7c9bf9da 100644 --- a/drivers/gpu/nvgpu/common/semaphore.c +++ b/drivers/gpu/nvgpu/common/semaphore.c @@ -53,7 +53,7 @@ static int __nvgpu_semaphore_sea_grow(struct nvgpu_semaphore_sea *sea) __lock_sema_sea(sea); - ret = gk20a_gmmu_alloc_flags_sys(gk20a, NVGPU_DMA_NO_KERNEL_MAPPING, + ret = nvgpu_dma_alloc_flags_sys(gk20a, NVGPU_DMA_NO_KERNEL_MAPPING, PAGE_SIZE * SEMAPHORE_POOL_COUNT, &sea->sea_mem); if (ret) diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c index e70ee4a6..7c251e2d 100644 --- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c @@ -54,7 +54,7 @@ static void gk20a_deinit_cde_img(struct gk20a_cde_ctx *cde_ctx) for (i = 0; i < cde_ctx->num_bufs; i++) { struct nvgpu_mem *mem = cde_ctx->mem + i; - gk20a_gmmu_unmap_free(cde_ctx->vm, mem); + nvgpu_dma_unmap_free(cde_ctx->vm, mem); } nvgpu_kfree(cde_ctx->g, cde_ctx->init_convert_cmd); @@ -247,7 +247,7 @@ static int gk20a_init_cde_buf(struct gk20a_cde_ctx *cde_ctx, /* allocate buf */ mem = cde_ctx->mem + cde_ctx->num_bufs; - err = gk20a_gmmu_alloc_map_sys(cde_ctx->vm, buf->num_bytes, mem); + err = nvgpu_dma_alloc_map_sys(cde_ctx->vm, buf->num_bytes, mem); if (err) { gk20a_warn(cde_ctx->dev, "cde: could not allocate device memory. buffer idx = %d", cde_ctx->num_bufs); diff --git a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c index 9cc4b678..f3ac28ea 100644 --- a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c @@ -195,7 +195,7 @@ static void gk20a_ce_delete_gpu_context(struct gk20a_gpu_ctx *ce_ctx) if (ce_ctx->cmd_buf_mem.cpu_va) { gk20a_ce_free_command_buffer_stored_fence(ce_ctx); - gk20a_gmmu_unmap_free(ce_ctx->vm, &ce_ctx->cmd_buf_mem); + nvgpu_dma_unmap_free(ce_ctx->vm, &ce_ctx->cmd_buf_mem); } /* free the channel */ @@ -479,7 +479,7 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev, } /* allocate command buffer (4096 should be more than enough) from sysmem*/ - err = gk20a_gmmu_alloc_map_sys(ce_ctx->vm, NVGPU_CE_COMMAND_BUF_SIZE, &ce_ctx->cmd_buf_mem); + err = nvgpu_dma_alloc_map_sys(ce_ctx->vm, NVGPU_CE_COMMAND_BUF_SIZE, &ce_ctx->cmd_buf_mem); if (err) { gk20a_err(ce_ctx->dev, "ce: could not allocate command buffer for CE context"); diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c index 6be616b3..81901c52 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c @@ -523,7 +523,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force) gk20a_gr_flush_channel_tlb(gr); - gk20a_gmmu_unmap_free(ch_vm, &ch->gpfifo.mem); + nvgpu_dma_unmap_free(ch_vm, &ch->gpfifo.mem); nvgpu_big_free(g, ch->gpfifo.pipe); memset(&ch->gpfifo, 0, sizeof(struct gpfifo_desc)); @@ -899,7 +899,7 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c) size = roundup_pow_of_two(c->gpfifo.entry_num * 2 * 18 * sizeof(u32) / 3); - err = gk20a_gmmu_alloc_map_sys(ch_vm, size, &q->mem); + err = nvgpu_dma_alloc_map_sys(ch_vm, size, &q->mem); if (err) { gk20a_err(d, "%s: memory allocation failed\n", __func__); goto clean_up; @@ -922,7 +922,7 @@ static void channel_gk20a_free_priv_cmdbuf(struct channel_gk20a *c) if (q->size == 0) return; - gk20a_gmmu_unmap_free(ch_vm, &q->mem); + nvgpu_dma_unmap_free(ch_vm, &q->mem); memset(q, 0, sizeof(struct priv_cmd_queue)); } @@ -1244,7 +1244,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c, return -EEXIST; } - err = gk20a_gmmu_alloc_map_sys(ch_vm, + err = nvgpu_dma_alloc_map_sys(ch_vm, gpfifo_size * sizeof(struct nvgpu_gpfifo), &c->gpfifo.mem); if (err) { @@ -1331,7 +1331,7 @@ clean_up_sync: } clean_up_unmap: nvgpu_big_free(g, c->gpfifo.pipe); - gk20a_gmmu_unmap_free(ch_vm, &c->gpfifo.mem); + nvgpu_dma_unmap_free(ch_vm, &c->gpfifo.mem); clean_up: memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc)); gk20a_err(d, "fail"); diff --git a/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c index 738e8c1c..e5910e7f 100644 --- a/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c @@ -143,7 +143,7 @@ static int css_hw_enable_snapshot(struct channel_gk20a *ch, if (snapshot_size < CSS_MIN_HW_SNAPSHOT_SIZE) snapshot_size = CSS_MIN_HW_SNAPSHOT_SIZE; - ret = gk20a_gmmu_alloc_map_sys(&g->mm.pmu.vm, snapshot_size, + ret = nvgpu_dma_alloc_map_sys(&g->mm.pmu.vm, snapshot_size, &data->hw_memdesc); if (ret) return ret; @@ -192,7 +192,7 @@ static int css_hw_enable_snapshot(struct channel_gk20a *ch, failed_allocation: if (data->hw_memdesc.size) { - gk20a_gmmu_unmap_free(&g->mm.pmu.vm, &data->hw_memdesc); + nvgpu_dma_unmap_free(&g->mm.pmu.vm, &data->hw_memdesc); memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); } data->hw_snapshot = NULL; @@ -220,7 +220,7 @@ static void css_hw_disable_snapshot(struct gr_gk20a *gr) perf_pmasys_mem_block_valid_false_f() | perf_pmasys_mem_block_target_f(0)); - gk20a_gmmu_unmap_free(&g->mm.pmu.vm, &data->hw_memdesc); + nvgpu_dma_unmap_free(&g->mm.pmu.vm, &data->hw_memdesc); memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); data->hw_snapshot = NULL; diff --git a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c index d8fa7505..96b94ea7 100644 --- a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c @@ -400,7 +400,7 @@ static int gk20a_fecs_trace_alloc_ring(struct gk20a *g) { struct gk20a_fecs_trace *trace = g->fecs_trace; - return gk20a_gmmu_alloc_sys(g, GK20A_FECS_TRACE_NUM_RECORDS + return nvgpu_dma_alloc_sys(g, GK20A_FECS_TRACE_NUM_RECORDS * ctxsw_prog_record_timestamp_record_size_in_bytes_v(), &trace->trace_buf); } @@ -409,7 +409,7 @@ static void gk20a_fecs_trace_free_ring(struct gk20a *g) { struct gk20a_fecs_trace *trace = g->fecs_trace; - gk20a_gmmu_free(g, &trace->trace_buf); + nvgpu_dma_free(g, &trace->trace_buf); } #ifdef CONFIG_DEBUG_FS diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index c1f94eb3..ca09c22a 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c @@ -483,7 +483,7 @@ void gk20a_fifo_delete_runlist(struct fifo_gk20a *f) for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) { runlist = &f->runlist_info[runlist_id]; for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { - gk20a_gmmu_free(g, &runlist->mem[i]); + nvgpu_dma_free(g, &runlist->mem[i]); } nvgpu_kfree(g, runlist->active_channels); @@ -544,9 +544,9 @@ static void gk20a_remove_fifo_support(struct fifo_gk20a *f) nvgpu_vfree(g, f->channel); nvgpu_vfree(g, f->tsg); if (g->ops.mm.is_bar1_supported(g)) - gk20a_gmmu_unmap_free(&g->mm.bar1.vm, &f->userd); + nvgpu_dma_unmap_free(&g->mm.bar1.vm, &f->userd); else - gk20a_gmmu_free(g, &f->userd); + nvgpu_dma_free(g, &f->userd); gk20a_fifo_delete_runlist(f); @@ -686,7 +686,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) f->num_runlist_entries, runlist_size); for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { - int err = gk20a_gmmu_alloc_sys(g, runlist_size, + int err = nvgpu_dma_alloc_sys(g, runlist_size, &runlist->mem[i]); if (err) { dev_err(d, "memory allocation failed\n"); @@ -940,12 +940,12 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g) nvgpu_mutex_init(&f->free_chs_mutex); if (g->ops.mm.is_bar1_supported(g)) - err = gk20a_gmmu_alloc_map_sys(&g->mm.bar1.vm, + err = nvgpu_dma_alloc_map_sys(&g->mm.bar1.vm, f->userd_entry_size * f->num_channels, &f->userd); else - err = gk20a_gmmu_alloc_sys(g, f->userd_entry_size * + err = nvgpu_dma_alloc_sys(g, f->userd_entry_size * f->num_channels, &f->userd); if (err) { dev_err(d, "userd memory allocation failed\n"); @@ -980,9 +980,9 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g) clean_up: gk20a_dbg_fn("fail"); if (g->ops.mm.is_bar1_supported(g)) - gk20a_gmmu_unmap_free(&g->mm.bar1.vm, &f->userd); + nvgpu_dma_unmap_free(&g->mm.bar1.vm, &f->userd); else - gk20a_gmmu_free(g, &f->userd); + nvgpu_dma_free(g, &f->userd); nvgpu_vfree(g, f->channel); f->channel = NULL; diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index a9b6a546..af02491e 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c @@ -1938,7 +1938,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, if (enable_hwpm_ctxsw) { /* Allocate buffer if necessary */ if (pm_ctx->mem.gpu_va == 0) { - ret = gk20a_gmmu_alloc_flags_sys(g, + ret = nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING, g->gr.ctx_vars.pm_ctxsw_image_size, &pm_ctx->mem); @@ -1958,7 +1958,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, if (!pm_ctx->mem.gpu_va) { gk20a_err(dev_from_gk20a(g), "failed to map pm ctxt buffer"); - gk20a_gmmu_free(g, &pm_ctx->mem); + nvgpu_dma_free(g, &pm_ctx->mem); c->g->ops.fifo.enable_channel(c); return -ENOMEM; } @@ -2018,7 +2018,7 @@ clean_up_mem: cleanup_pm_buf: gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size, gk20a_mem_flag_none); - gk20a_gmmu_free(g, &pm_ctx->mem); + nvgpu_dma_free(g, &pm_ctx->mem); memset(&pm_ctx->mem, 0, sizeof(struct nvgpu_mem)); gk20a_enable_channel_tsg(g, c); @@ -2318,7 +2318,7 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g) g->gr.ctx_vars.ucode.gpccs.inst.count * sizeof(u32), g->gr.ctx_vars.ucode.gpccs.data.count * sizeof(u32)); - err = gk20a_gmmu_alloc_sys(g, ucode_size, &ucode_info->surface_desc); + err = nvgpu_dma_alloc_sys(g, ucode_size, &ucode_info->surface_desc); if (err) goto clean_up; @@ -2350,7 +2350,7 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g) if (ucode_info->surface_desc.gpu_va) gk20a_gmmu_unmap(vm, ucode_info->surface_desc.gpu_va, ucode_info->surface_desc.size, gk20a_mem_flag_none); - gk20a_gmmu_free(g, &ucode_info->surface_desc); + nvgpu_dma_free(g, &ucode_info->surface_desc); release_firmware(gpccs_fw); gpccs_fw = NULL; @@ -2700,7 +2700,7 @@ static void gk20a_gr_destroy_ctx_buffer(struct gk20a *g, { if (!desc) return; - gk20a_gmmu_free(g, &desc->mem); + nvgpu_dma_free(g, &desc->mem); desc->destroy = NULL; } @@ -2710,7 +2710,7 @@ static int gk20a_gr_alloc_ctx_buffer(struct gk20a *g, { int err = 0; - err = gk20a_gmmu_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING, + err = nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING, size, &desc->mem); if (err) return err; @@ -2953,7 +2953,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g, if (!gr_ctx) return -ENOMEM; - err = gk20a_gmmu_alloc_flags(g, NVGPU_DMA_NO_KERNEL_MAPPING, + err = nvgpu_dma_alloc_flags(g, NVGPU_DMA_NO_KERNEL_MAPPING, gr->ctx_vars.buffer_total_size, &gr_ctx->mem); if (err) @@ -2973,7 +2973,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g, return 0; err_free_mem: - gk20a_gmmu_free(g, &gr_ctx->mem); + nvgpu_dma_free(g, &gr_ctx->mem); err_free_ctx: nvgpu_kfree(g, gr_ctx); gr_ctx = NULL; @@ -3022,7 +3022,7 @@ void gr_gk20a_free_gr_ctx(struct gk20a *g, gk20a_gmmu_unmap(vm, gr_ctx->mem.gpu_va, gr_ctx->mem.size, gk20a_mem_flag_none); - gk20a_gmmu_free(g, &gr_ctx->mem); + nvgpu_dma_free(g, &gr_ctx->mem); nvgpu_kfree(g, gr_ctx); } @@ -3051,7 +3051,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g, gk20a_dbg_fn(""); - err = gk20a_gmmu_alloc_map_flags_sys(ch_vm, NVGPU_DMA_NO_KERNEL_MAPPING, + err = nvgpu_dma_alloc_map_flags_sys(ch_vm, NVGPU_DMA_NO_KERNEL_MAPPING, 128 * sizeof(u32), &patch_ctx->mem); if (err) return err; @@ -3071,7 +3071,7 @@ static void gr_gk20a_free_channel_patch_ctx(struct channel_gk20a *c) gk20a_gmmu_unmap(c->vm, patch_ctx->mem.gpu_va, patch_ctx->mem.size, gk20a_mem_flag_none); - gk20a_gmmu_free(g, &patch_ctx->mem); + nvgpu_dma_free(g, &patch_ctx->mem); patch_ctx->data_count = 0; } @@ -3086,7 +3086,7 @@ static void gr_gk20a_free_channel_pm_ctx(struct channel_gk20a *c) gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size, gk20a_mem_flag_none); - gk20a_gmmu_free(g, &pm_ctx->mem); + nvgpu_dma_free(g, &pm_ctx->mem); } } @@ -3366,10 +3366,10 @@ static void gk20a_remove_gr_support(struct gr_gk20a *gr) gr_gk20a_free_global_ctx_buffers(g); - gk20a_gmmu_free(g, &gr->mmu_wr_mem); - gk20a_gmmu_free(g, &gr->mmu_rd_mem); + nvgpu_dma_free(g, &gr->mmu_wr_mem); + nvgpu_dma_free(g, &gr->mmu_rd_mem); - gk20a_gmmu_free(g, &gr->compbit_store.mem); + nvgpu_dma_free(g, &gr->compbit_store.mem); memset(&gr->compbit_store, 0, sizeof(struct compbit_store_desc)); @@ -3658,17 +3658,17 @@ static int gr_gk20a_init_mmu_sw(struct gk20a *g, struct gr_gk20a *gr) { int err; - err = gk20a_gmmu_alloc_sys(g, 0x1000, &gr->mmu_wr_mem); + err = nvgpu_dma_alloc_sys(g, 0x1000, &gr->mmu_wr_mem); if (err) goto err; - err = gk20a_gmmu_alloc_sys(g, 0x1000, &gr->mmu_rd_mem); + err = nvgpu_dma_alloc_sys(g, 0x1000, &gr->mmu_rd_mem); if (err) goto err_free_wr_mem; return 0; err_free_wr_mem: - gk20a_gmmu_free(g, &gr->mmu_wr_mem); + nvgpu_dma_free(g, &gr->mmu_wr_mem); err: return -ENOMEM; } @@ -5215,7 +5215,7 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g) } if (!pmu->pg_buf.cpu_va) { - err = gk20a_gmmu_alloc_map_sys(vm, size, &pmu->pg_buf); + err = nvgpu_dma_alloc_map_sys(vm, size, &pmu->pg_buf); if (err) { gk20a_err(d, "failed to allocate memory\n"); return -ENOMEM; diff --git a/drivers/gpu/nvgpu/gk20a/ltc_common.c b/drivers/gpu/nvgpu/gk20a/ltc_common.c index 7c73be77..03b12740 100644 --- a/drivers/gpu/nvgpu/gk20a/ltc_common.c +++ b/drivers/gpu/nvgpu/gk20a/ltc_common.c @@ -70,7 +70,7 @@ static int gk20a_ltc_alloc_phys_cbc(struct gk20a *g, { struct gr_gk20a *gr = &g->gr; - return gk20a_gmmu_alloc_flags_sys(g, NVGPU_DMA_FORCE_CONTIGUOUS, + return nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_FORCE_CONTIGUOUS, compbit_backing_size, &gr->compbit_store.mem); } @@ -80,7 +80,7 @@ static int gk20a_ltc_alloc_virt_cbc(struct gk20a *g, { struct gr_gk20a *gr = &g->gr; - return gk20a_gmmu_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING, + return nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING, compbit_backing_size, &gr->compbit_store.mem); } diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 79654af3..cfe7745d 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c @@ -509,7 +509,7 @@ static void gk20a_remove_mm_support(struct mm_gk20a *mm) static int gk20a_alloc_sysmem_flush(struct gk20a *g) { - return gk20a_gmmu_alloc_sys(g, SZ_4K, &g->mm.sysmem_flush); + return nvgpu_dma_alloc_sys(g, SZ_4K, &g->mm.sysmem_flush); } #if defined(CONFIG_GK20A_VIDMEM) @@ -897,9 +897,9 @@ static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order, * default. */ if (IS_ENABLED(CONFIG_ARM64)) - err = gk20a_gmmu_alloc(g, len, &entry->mem); + err = nvgpu_dma_alloc(g, len, &entry->mem); else - err = gk20a_gmmu_alloc_flags(g, NVGPU_DMA_NO_KERNEL_MAPPING, + err = nvgpu_dma_alloc_flags(g, NVGPU_DMA_NO_KERNEL_MAPPING, len, &entry->mem); @@ -929,7 +929,7 @@ void free_gmmu_pages(struct vm_gk20a *vm, return; } - gk20a_gmmu_free(g, &entry->mem); + nvgpu_dma_free(g, &entry->mem); } int map_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry) @@ -1756,7 +1756,7 @@ static void gk20a_vidbuf_release(struct dma_buf *dmabuf) if (buf->dmabuf_priv) buf->dmabuf_priv_delete(buf->dmabuf_priv); - gk20a_gmmu_free(buf->g, buf->mem); + nvgpu_dma_free(buf->g, buf->mem); nvgpu_kfree(buf->g, buf); } @@ -1873,7 +1873,7 @@ int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes) buf->mem->user_mem = true; - err = gk20a_gmmu_alloc_vid(g, bytes, buf->mem); + err = nvgpu_dma_alloc_vid(g, bytes, buf->mem); if (err) goto err_memfree; @@ -1896,7 +1896,7 @@ int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes) return fd; err_bfree: - gk20a_gmmu_free(g, buf->mem); + nvgpu_dma_free(g, buf->mem); err_memfree: nvgpu_kfree(g, buf->mem); err_kfree: @@ -4199,7 +4199,7 @@ int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block) gk20a_dbg_fn(""); - err = gk20a_gmmu_alloc(g, ram_in_alloc_size_v(), inst_block); + err = nvgpu_dma_alloc(g, ram_in_alloc_size_v(), inst_block); if (err) { gk20a_err(dev, "%s: memory allocation failed\n", __func__); return err; @@ -4212,7 +4212,7 @@ int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block) void gk20a_free_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block) { if (inst_block->size) - gk20a_gmmu_free(g, inst_block); + nvgpu_dma_free(g, inst_block); } u64 gk20a_mm_inst_block_addr(struct gk20a *g, struct nvgpu_mem *inst_block) diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c index 7a6bfe22..547ba924 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c @@ -3151,7 +3151,7 @@ static int gk20a_prepare_ucode(struct gk20a *g) pmu->ucode_image = (u32 *)((u8 *)pmu->desc + pmu->desc->descriptor_size); - err = gk20a_gmmu_alloc_map_sys(vm, GK20A_PMU_UCODE_SIZE_MAX, + err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_UCODE_SIZE_MAX, &pmu->ucode); if (err) goto err_release_fw; @@ -3225,7 +3225,7 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g) INIT_WORK(&pmu->pg_init, pmu_setup_hw); - err = gk20a_gmmu_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE, + err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE, &pmu->seq_buf); if (err) { gk20a_err(d, "failed to allocate memory\n"); @@ -3242,7 +3242,7 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g) pmu->seq_buf.size = GK20A_PMU_SEQ_BUF_SIZE; - err = gk20a_gmmu_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE, + err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE, &pmu->trace_buf); if (err) { gk20a_err(d, "failed to allocate pmu trace buffer\n"); @@ -3255,7 +3255,7 @@ skip_init: gk20a_dbg_fn("done"); return 0; err_free_seq_buf: - gk20a_gmmu_unmap_free(vm, &pmu->seq_buf); + nvgpu_dma_unmap_free(vm, &pmu->seq_buf); err_free_seq: nvgpu_kfree(g, pmu->seq); err_free_mutex: @@ -4760,7 +4760,7 @@ int gk20a_pmu_vidmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem, struct vm_gk20a *vm = &mm->pmu.vm; int err; - err = gk20a_gmmu_alloc_map_vid(vm, size, mem); + err = nvgpu_dma_alloc_map_vid(vm, size, mem); if (err) { gk20a_err(g->dev, "memory allocation failed"); return -ENOMEM; @@ -4776,7 +4776,7 @@ int gk20a_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem, struct vm_gk20a *vm = &mm->pmu.vm; int err; - err = gk20a_gmmu_alloc_map_sys(vm, size, mem); + err = nvgpu_dma_alloc_map_sys(vm, size, mem); if (err) { gk20a_err(g->dev, "failed to allocate memory\n"); return -ENOMEM; @@ -4787,7 +4787,7 @@ int gk20a_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem, void gk20a_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem) { - gk20a_gmmu_free(g, mem); + nvgpu_dma_free(g, mem); memset(mem, 0, sizeof(struct nvgpu_mem)); } diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c index 3cfcbb19..181e5301 100644 --- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c @@ -369,7 +369,7 @@ static int gm20b_alloc_blob_space(struct gk20a *g, { int err; - err = gk20a_gmmu_alloc_sys(g, size, mem); + err = nvgpu_dma_alloc_sys(g, size, mem); return err; } @@ -1115,7 +1115,7 @@ static int gm20b_bootstrap_hs_flcn(struct gk20a *g) err = -1; goto err_release_acr_fw; } - err = gk20a_gmmu_alloc_map_sys(vm, img_size_in_bytes, + err = nvgpu_dma_alloc_map_sys(vm, img_size_in_bytes, &acr->acr_ucode); if (err) { err = -ENOMEM; @@ -1171,7 +1171,7 @@ static int gm20b_bootstrap_hs_flcn(struct gk20a *g) } return 0; err_free_ucode_map: - gk20a_gmmu_unmap_free(vm, &acr->acr_ucode); + nvgpu_dma_unmap_free(vm, &acr->acr_ucode); err_release_acr_fw: release_firmware(acr_fw); acr->acr_fw = NULL; @@ -1417,7 +1417,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt) /*TODO in code verify that enable PMU is done, scrubbing etc is done*/ /*TODO in code verify that gmmu vm init is done*/ - err = gk20a_gmmu_alloc_flags_sys(g, + err = nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_READ_ONLY, bl_sz, &acr->hsbl_ucode); if (err) { gk20a_err(d, "failed to allocate memory\n"); @@ -1475,7 +1475,7 @@ err_unmap_bl: gk20a_gmmu_unmap(vm, acr->hsbl_ucode.gpu_va, acr->hsbl_ucode.size, gk20a_mem_flag_none); err_free_ucode: - gk20a_gmmu_free(g, &acr->hsbl_ucode); + nvgpu_dma_free(g, &acr->hsbl_ucode); err_done: release_firmware(hsbl_fw); return err; diff --git a/drivers/gpu/nvgpu/gp106/acr_gp106.c b/drivers/gpu/nvgpu/gp106/acr_gp106.c index 9acc8eda..631f9891 100644 --- a/drivers/gpu/nvgpu/gp106/acr_gp106.c +++ b/drivers/gpu/nvgpu/gp106/acr_gp106.c @@ -113,13 +113,13 @@ static int gp106_alloc_blob_space(struct gk20a *g, * Even though this mem_desc wouldn't be used, the wpr region needs to * be reserved in the allocator. */ - err = gk20a_gmmu_alloc_flags_vid_at(g, + err = nvgpu_dma_alloc_flags_vid_at(g, NVGPU_DMA_NO_KERNEL_MAPPING, wpr_inf.size, &g->acr.wpr_dummy, wpr_inf.wpr_base); if (err) return err; - return gk20a_gmmu_alloc_flags_vid_at(g, + return nvgpu_dma_alloc_flags_vid_at(g, NVGPU_DMA_NO_KERNEL_MAPPING, wpr_inf.size, mem, wpr_inf.nonwpr_base); } @@ -1094,7 +1094,7 @@ static int gp106_bootstrap_hs_flcn(struct gk20a *g) err = -1; goto err_release_acr_fw; } - err = gk20a_gmmu_alloc_map_sys(vm, img_size_in_bytes, + err = nvgpu_dma_alloc_map_sys(vm, img_size_in_bytes, &acr->acr_ucode); if (err) { err = -ENOMEM; @@ -1170,7 +1170,7 @@ static int gp106_bootstrap_hs_flcn(struct gk20a *g) return 0; err_free_ucode_map: - gk20a_gmmu_unmap_free(vm, &acr->acr_ucode); + nvgpu_dma_unmap_free(vm, &acr->acr_ucode); err_release_acr_fw: release_firmware(acr_fw); acr->acr_fw = NULL; diff --git a/drivers/gpu/nvgpu/gp106/gr_gp106.c b/drivers/gpu/nvgpu/gp106/gr_gp106.c index 78859f88..a804f9bb 100644 --- a/drivers/gpu/nvgpu/gp106/gr_gp106.c +++ b/drivers/gpu/nvgpu/gp106/gr_gp106.c @@ -226,11 +226,11 @@ static int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g, return 0; fail_free_betacb: - gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer); + nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer); fail_free_spill: - gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer); + nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer); fail_free_preempt: - gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer); + nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer); fail: return err; } diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c index b9367120..c1cb1376 100644 --- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c @@ -839,7 +839,7 @@ int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size, gk20a_dbg_fn(""); - err = gk20a_gmmu_alloc_sys(vm->mm->g, size, mem); + err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem); if (err) return err; @@ -859,7 +859,7 @@ int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size, return 0; fail_free: - gk20a_gmmu_free(vm->mm->g, mem); + nvgpu_dma_free(vm->mm->g, mem); return err; } @@ -980,11 +980,11 @@ static int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g, return 0; fail_free_betacb: - gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer); + nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer); fail_free_spill: - gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer); + nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer); fail_free_preempt: - gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer); + nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer); fail: return err; } @@ -1098,10 +1098,10 @@ static void gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm, if (g->gr.t18x.ctx_vars.dump_ctxsw_stats_on_channel_close) dump_ctx_switch_stats(g, vm, gr_ctx); - gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.pagepool_ctxsw_buffer); - gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer); - gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer); - gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer); + nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.pagepool_ctxsw_buffer); + nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer); + nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer); + nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer); gr_gk20a_free_gr_ctx(g, vm, gr_ctx); gk20a_dbg_fn("done"); } diff --git a/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c b/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c index bf52b5c9..e8f3d930 100644 --- a/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c @@ -39,7 +39,7 @@ int gp10b_replayable_pagefault_buffer_init(struct gk20a *g) gk20a_dbg_fn(""); if (!g->mm.bar2_desc.gpu_va) { - err = gk20a_gmmu_alloc_map_sys(vm, rbfb_size, + err = nvgpu_dma_alloc_map_sys(vm, rbfb_size, &g->mm.bar2_desc); if (err) { dev_err(dev_from_gk20a(g), @@ -63,7 +63,7 @@ void gp10b_replayable_pagefault_buffer_deinit(struct gk20a *g) { struct vm_gk20a *vm = &g->mm.bar2.vm; - gk20a_gmmu_unmap_free(vm, &g->mm.bar2_desc); + nvgpu_dma_unmap_free(vm, &g->mm.bar2_desc); } u32 gp10b_replayable_pagefault_buffer_get_index(struct gk20a *g) diff --git a/drivers/gpu/nvgpu/include/nvgpu/dma.h b/drivers/gpu/nvgpu/include/nvgpu/dma.h index d4fad584..43cff215 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/dma.h +++ b/drivers/gpu/nvgpu/include/nvgpu/dma.h @@ -24,7 +24,7 @@ struct vm_gk20a; struct nvgpu_mem; /* - * Flags for the below gk20a_gmmu_{alloc,alloc_map}_flags* + * Flags for the below nvgpu_dma_{alloc,alloc_map}_flags* */ /* @@ -45,7 +45,7 @@ struct nvgpu_mem; #define NVGPU_DMA_READ_ONLY (1 << 2) /** - * gk20a_gmmu_alloc - Allocate DMA memory + * nvgpu_dma_alloc - Allocate DMA memory * * @g - The GPU. * @size - Size of the allocation in bytes. @@ -56,10 +56,10 @@ struct nvgpu_mem; * memory can be either placed in VIDMEM or SYSMEM, which ever is more * convenient for the driver. */ -int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem); +int nvgpu_dma_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem); /** - * gk20a_gmmu_alloc_flags - Allocate DMA memory + * nvgpu_dma_alloc_flags - Allocate DMA memory * * @g - The GPU. * @flags - Flags modifying the operation of the DMA allocation. @@ -77,11 +77,11 @@ int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem); * %NVGPU_DMA_FORCE_CONTIGUOUS * %NVGPU_DMA_READ_ONLY */ -int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size, +int nvgpu_dma_alloc_flags(struct gk20a *g, unsigned long flags, size_t size, struct nvgpu_mem *mem); /** - * gk20a_gmmu_alloc_sys - Allocate DMA memory + * nvgpu_dma_alloc_sys - Allocate DMA memory * * @g - The GPU. * @size - Size of the allocation in bytes. @@ -91,10 +91,10 @@ int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size, * Returns 0 on success and a suitable error code when there's an error. This * allocates memory specifically in SYSMEM. */ -int gk20a_gmmu_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem); +int nvgpu_dma_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem); /** - * gk20a_gmmu_alloc_flags_sys - Allocate DMA memory + * nvgpu_dma_alloc_flags_sys - Allocate DMA memory * * @g - The GPU. * @flags - Flags modifying the operation of the DMA allocation. @@ -111,11 +111,11 @@ int gk20a_gmmu_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem); * %NVGPU_DMA_FORCE_CONTIGUOUS * %NVGPU_DMA_READ_ONLY */ -int gk20a_gmmu_alloc_flags_sys(struct gk20a *g, unsigned long flags, +int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags, size_t size, struct nvgpu_mem *mem); /** - * gk20a_gmmu_alloc_vid - Allocate DMA memory + * nvgpu_dma_alloc_vid - Allocate DMA memory * * @g - The GPU. * @size - Size of the allocation in bytes. @@ -125,10 +125,10 @@ int gk20a_gmmu_alloc_flags_sys(struct gk20a *g, unsigned long flags, * Returns 0 on success and a suitable error code when there's an error. This * allocates memory specifically in VIDMEM. */ -int gk20a_gmmu_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem); +int nvgpu_dma_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem); /** - * gk20a_gmmu_alloc_flags_vid - Allocate DMA memory + * nvgpu_dma_alloc_flags_vid - Allocate DMA memory * * @g - The GPU. * @flags - Flags modifying the operation of the DMA allocation. @@ -144,11 +144,11 @@ int gk20a_gmmu_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem); * %NVGPU_DMA_NO_KERNEL_MAPPING * */ -int gk20a_gmmu_alloc_flags_vid(struct gk20a *g, unsigned long flags, +int nvgpu_dma_alloc_flags_vid(struct gk20a *g, unsigned long flags, size_t size, struct nvgpu_mem *mem); /** - * gk20a_gmmu_alloc_flags_vid_at - Allocate DMA memory + * nvgpu_dma_alloc_flags_vid_at - Allocate DMA memory * * @g - The GPU. * @flags - Flags modifying the operation of the DMA allocation. @@ -165,29 +165,29 @@ int gk20a_gmmu_alloc_flags_vid(struct gk20a *g, unsigned long flags, * * %NVGPU_DMA_NO_KERNEL_MAPPING */ -int gk20a_gmmu_alloc_flags_vid_at(struct gk20a *g, unsigned long flags, +int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags, size_t size, struct nvgpu_mem *mem, dma_addr_t at); /** - * gk20a_gmmu_free - Free a DMA allocation + * nvgpu_dma_free - Free a DMA allocation * * @g - The GPU. * @mem - An allocation to free. * * Free memory created with any of: * - * gk20a_gmmu_alloc() - * gk20a_gmmu_alloc_flags() - * gk20a_gmmu_alloc_sys() - * gk20a_gmmu_alloc_flags_sys() - * gk20a_gmmu_alloc_vid() - * gk20a_gmmu_alloc_flags_vid() - * gk20a_gmmu_alloc_flags_vid_at() + * nvgpu_dma_alloc() + * nvgpu_dma_alloc_flags() + * nvgpu_dma_alloc_sys() + * nvgpu_dma_alloc_flags_sys() + * nvgpu_dma_alloc_vid() + * nvgpu_dma_alloc_flags_vid() + * nvgpu_dma_alloc_flags_vid_at() */ -void gk20a_gmmu_free(struct gk20a *g, struct nvgpu_mem *mem); +void nvgpu_dma_free(struct gk20a *g, struct nvgpu_mem *mem); /** - * gk20a_gmmu_alloc_map - Allocate DMA memory and map into GMMU. + * nvgpu_dma_alloc_map - Allocate DMA memory and map into GMMU. * * @vm - VM context for GMMU mapping. * @size - Size of the allocation in bytes. @@ -198,11 +198,11 @@ void gk20a_gmmu_free(struct gk20a *g, struct nvgpu_mem *mem); * either placed in VIDMEM or SYSMEM, which ever is more convenient for the * driver. */ -int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, size_t size, +int nvgpu_dma_alloc_map(struct vm_gk20a *vm, size_t size, struct nvgpu_mem *mem); /** - * gk20a_gmmu_alloc_map_flags - Allocate DMA memory and map into GMMU. + * nvgpu_dma_alloc_map_flags - Allocate DMA memory and map into GMMU. * * @vm - VM context for GMMU mapping. * @flags - Flags modifying the operation of the DMA allocation. @@ -221,11 +221,11 @@ int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, size_t size, * %NVGPU_DMA_FORCE_CONTIGUOUS * %NVGPU_DMA_READ_ONLY */ -int gk20a_gmmu_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags, +int nvgpu_dma_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags, size_t size, struct nvgpu_mem *mem); /** - * gk20a_gmmu_alloc_map_sys - Allocate DMA memory and map into GMMU. + * nvgpu_dma_alloc_map_sys - Allocate DMA memory and map into GMMU. * * @vm - VM context for GMMU mapping. * @size - Size of the allocation in bytes. @@ -234,11 +234,11 @@ int gk20a_gmmu_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags, * Allocate memory suitable for doing DMA and map that memory into the GMMU. * This memory will be placed in SYSMEM. */ -int gk20a_gmmu_alloc_map_sys(struct vm_gk20a *vm, size_t size, +int nvgpu_dma_alloc_map_sys(struct vm_gk20a *vm, size_t size, struct nvgpu_mem *mem); /** - * gk20a_gmmu_alloc_map_flags_sys - Allocate DMA memory and map into GMMU. + * nvgpu_dma_alloc_map_flags_sys - Allocate DMA memory and map into GMMU. * * @vm - VM context for GMMU mapping. * @flags - Flags modifying the operation of the DMA allocation. @@ -255,11 +255,11 @@ int gk20a_gmmu_alloc_map_sys(struct vm_gk20a *vm, size_t size, * %NVGPU_DMA_FORCE_CONTIGUOUS * %NVGPU_DMA_READ_ONLY */ -int gk20a_gmmu_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags, +int nvgpu_dma_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags, size_t size, struct nvgpu_mem *mem); /** - * gk20a_gmmu_alloc_map_vid - Allocate DMA memory and map into GMMU. + * nvgpu_dma_alloc_map_vid - Allocate DMA memory and map into GMMU. * * @vm - VM context for GMMU mapping. * @size - Size of the allocation in bytes. @@ -268,11 +268,11 @@ int gk20a_gmmu_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags, * Allocate memory suitable for doing DMA and map that memory into the GMMU. * This memory will be placed in VIDMEM. */ -int gk20a_gmmu_alloc_map_vid(struct vm_gk20a *vm, size_t size, +int nvgpu_dma_alloc_map_vid(struct vm_gk20a *vm, size_t size, struct nvgpu_mem *mem); /** - * gk20a_gmmu_alloc_map_flags_vid - Allocate DMA memory and map into GMMU. + * nvgpu_dma_alloc_map_flags_vid - Allocate DMA memory and map into GMMU. * * @vm - VM context for GMMU mapping. * @flags - Flags modifying the operation of the DMA allocation. @@ -289,24 +289,24 @@ int gk20a_gmmu_alloc_map_vid(struct vm_gk20a *vm, size_t size, * %NVGPU_DMA_FORCE_CONTIGUOUS * %NVGPU_DMA_READ_ONLY */ -int gk20a_gmmu_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags, +int nvgpu_dma_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags, size_t size, struct nvgpu_mem *mem); /** - * gk20a_gmmu_unmap_free - Free a DMA allocation + * nvgpu_dma_unmap_free - Free a DMA allocation * * @g - The GPU. * @mem - An allocation to free. * * Free memory created with any of: * - * gk20a_gmmu_alloc_map() - * gk20a_gmmu_alloc_map_flags() - * gk20a_gmmu_alloc_map_sys() - * gk20a_gmmu_alloc_map_flags_sys() - * gk20a_gmmu_alloc_map_vid() - * gk20a_gmmu_alloc_map_flags_vid() + * nvgpu_dma_alloc_map() + * nvgpu_dma_alloc_map_flags() + * nvgpu_dma_alloc_map_sys() + * nvgpu_dma_alloc_map_flags_sys() + * nvgpu_dma_alloc_map_vid() + * nvgpu_dma_alloc_map_flags_vid() */ -void gk20a_gmmu_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem); +void nvgpu_dma_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem); #endif diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c index 59fb0c4a..e2883f7c 100644 --- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c @@ -216,7 +216,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) runlist_size = sizeof(u16) * f->num_channels; for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { - int err = gk20a_gmmu_alloc_sys(g, runlist_size, + int err = nvgpu_dma_alloc_sys(g, runlist_size, &runlist->mem[i]); if (err) { dev_err(d, "memory allocation failed\n"); @@ -260,7 +260,7 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g) f->userd_entry_size = 1 << ram_userd_base_shift_v(); - err = gk20a_gmmu_alloc_sys(g, f->userd_entry_size * f->num_channels, + err = nvgpu_dma_alloc_sys(g, f->userd_entry_size * f->num_channels, &f->userd); if (err) { dev_err(d, "memory allocation failed\n"); @@ -327,7 +327,7 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g) clean_up: gk20a_dbg_fn("fail"); /* FIXME: unmap from bar1 */ - gk20a_gmmu_free(g, &f->userd); + nvgpu_dma_free(g, &f->userd); memset(&f->userd, 0, sizeof(f->userd)); diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c index 527e12e4..da41abd4 100644 --- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c +++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c @@ -42,10 +42,10 @@ static void vgpu_gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm, gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va, gr_ctx->mem.size, gmmu_page_size_kernel); - gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.pagepool_ctxsw_buffer); - gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer); - gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer); - gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer); + nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.pagepool_ctxsw_buffer); + nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer); + nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer); + nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer); nvgpu_kfree(g, gr_ctx); } -- cgit v1.2.2