From e32f62fadfcde413bcd9b5af61ad884e27ba2bf1 Mon Sep 17 00:00:00 2001 From: Alex Waterman Date: Thu, 6 Apr 2017 15:30:01 -0700 Subject: gpu: nvgpu: Move Linux nvgpu_mem fields Hide the Linux specific nvgpu_mem fields so that in subsequent patches core code can instead of using struct sg_table it can use mem_desc. Routines for accessing system specific fields will be added as needed. This is the first step in a fairly major overhaul of the GMMU mapping routines. There are numerous issues with the current design (or lack there of): massively coupled code, system dependencies, disorganization, etc. JIRA NVGPU-12 JIRA NVGPU-30 Change-Id: I2e7d3ae3a07468cfc17c1c642d28ed1b0952474d Signed-off-by: Alex Waterman Reviewed-on: http://git-master/r/1464076 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/common/linux/dma.c | 60 +++++++++++++++--------------- drivers/gpu/nvgpu/common/linux/nvgpu_mem.c | 2 +- drivers/gpu/nvgpu/common/pramin.c | 2 +- drivers/gpu/nvgpu/common/semaphore.c | 4 +- 4 files changed, 35 insertions(+), 33 deletions(-) (limited to 'drivers/gpu/nvgpu/common') diff --git a/drivers/gpu/nvgpu/common/linux/dma.c b/drivers/gpu/nvgpu/common/linux/dma.c index 2a75ad13..832d0f47 100644 --- a/drivers/gpu/nvgpu/common/linux/dma.c +++ b/drivers/gpu/nvgpu/common/linux/dma.c @@ -107,10 +107,10 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags, nvgpu_dma_flags_to_attrs(&dma_attrs, flags); if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) { - mem->pages = dma_alloc_attrs(d, + mem->priv.pages = dma_alloc_attrs(d, size, &iova, GFP_KERNEL, __DMA_ATTR(dma_attrs)); - if (!mem->pages) + if (!mem->priv.pages) return -ENOMEM; } else { mem->cpu_va = dma_alloc_attrs(d, @@ -126,10 +126,12 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags, } if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) - err = gk20a_get_sgtable_from_pages(d, &mem->sgt, mem->pages, + err = gk20a_get_sgtable_from_pages(d, &mem->priv.sgt, + mem->priv.pages, iova, size); else { - err = gk20a_get_sgtable(d, &mem->sgt, mem->cpu_va, iova, size); + err = gk20a_get_sgtable(d, &mem->priv.sgt, mem->cpu_va, + iova, size); memset(mem->cpu_va, 0, size); } if (err) @@ -137,7 +139,7 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags, mem->size = size; mem->aperture = APERTURE_SYSMEM; - mem->flags = flags; + mem->priv.flags = flags; gk20a_dbg_fn("done"); @@ -146,7 +148,7 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags, fail_free: dma_free_coherent(d, size, mem->cpu_va, iova); mem->cpu_va = NULL; - mem->sgt = NULL; + mem->priv.sgt = NULL; return err; } @@ -204,23 +206,23 @@ int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags, else mem->fixed = false; - mem->sgt = nvgpu_kzalloc(g, sizeof(struct sg_table)); - if (!mem->sgt) { + mem->priv.sgt = nvgpu_kzalloc(g, sizeof(struct sg_table)); + if (!mem->priv.sgt) { err = -ENOMEM; goto fail_physfree; } - err = sg_alloc_table(mem->sgt, 1, GFP_KERNEL); + err = sg_alloc_table(mem->priv.sgt, 1, GFP_KERNEL); if (err) goto fail_kfree; - set_vidmem_page_alloc(mem->sgt->sgl, addr); - sg_set_page(mem->sgt->sgl, NULL, size, 0); + set_vidmem_page_alloc(mem->priv.sgt->sgl, addr); + sg_set_page(mem->priv.sgt->sgl, NULL, size, 0); mem->size = size; mem->aperture = APERTURE_VIDMEM; mem->allocator = vidmem_alloc; - mem->flags = flags; + mem->priv.flags = flags; nvgpu_init_list_node(&mem->clear_list_entry); @@ -229,7 +231,7 @@ int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags, return 0; fail_kfree: - nvgpu_kfree(g, mem->sgt); + nvgpu_kfree(g, mem->priv.sgt); fail_physfree: nvgpu_free(&g->mm.vidmem.allocator, addr); return err; @@ -283,7 +285,7 @@ int nvgpu_dma_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags, if (err) return err; - mem->gpu_va = gk20a_gmmu_map(vm, &mem->sgt, size, 0, + mem->gpu_va = gk20a_gmmu_map(vm, &mem->priv.sgt, size, 0, gk20a_mem_flag_none, false, mem->aperture); if (!mem->gpu_va) { @@ -313,7 +315,7 @@ int nvgpu_dma_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags, if (err) return err; - mem->gpu_va = gk20a_gmmu_map(vm, &mem->sgt, size, 0, + mem->gpu_va = gk20a_gmmu_map(vm, &mem->priv.sgt, size, 0, gk20a_mem_flag_none, false, mem->aperture); if (!mem->gpu_va) { @@ -332,31 +334,31 @@ static void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem) { struct device *d = dev_from_gk20a(g); - if (mem->cpu_va || mem->pages) { - if (mem->flags) { + if (mem->cpu_va || mem->priv.pages) { + if (mem->priv.flags) { DEFINE_DMA_ATTRS(dma_attrs); - nvgpu_dma_flags_to_attrs(&dma_attrs, mem->flags); + nvgpu_dma_flags_to_attrs(&dma_attrs, mem->priv.flags); - if (mem->flags & NVGPU_DMA_NO_KERNEL_MAPPING) { - dma_free_attrs(d, mem->size, mem->pages, - sg_dma_address(mem->sgt->sgl), + if (mem->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING) { + dma_free_attrs(d, mem->size, mem->priv.pages, + sg_dma_address(mem->priv.sgt->sgl), __DMA_ATTR(dma_attrs)); } else { dma_free_attrs(d, mem->size, mem->cpu_va, - sg_dma_address(mem->sgt->sgl), + sg_dma_address(mem->priv.sgt->sgl), __DMA_ATTR(dma_attrs)); } } else { dma_free_coherent(d, mem->size, mem->cpu_va, - sg_dma_address(mem->sgt->sgl)); + sg_dma_address(mem->priv.sgt->sgl)); } mem->cpu_va = NULL; - mem->pages = NULL; + mem->priv.pages = NULL; } - if (mem->sgt) - gk20a_free_sgtable(g, &mem->sgt); + if (mem->priv.sgt) + gk20a_free_sgtable(g, &mem->priv.sgt); mem->size = 0; mem->aperture = APERTURE_INVALID; @@ -368,7 +370,7 @@ static void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem) bool was_empty; /* Sanity check - only this supported when allocating. */ - WARN_ON(mem->flags != NVGPU_DMA_NO_KERNEL_MAPPING); + WARN_ON(mem->priv.flags != NVGPU_DMA_NO_KERNEL_MAPPING); if (mem->user_mem) { nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex); @@ -385,8 +387,8 @@ static void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem) } else { nvgpu_memset(g, mem, 0, 0, mem->size); nvgpu_free(mem->allocator, - (u64)get_vidmem_page_alloc(mem->sgt->sgl)); - gk20a_free_sgtable(g, &mem->sgt); + (u64)get_vidmem_page_alloc(mem->priv.sgt->sgl)); + gk20a_free_sgtable(g, &mem->priv.sgt); mem->size = 0; mem->aperture = APERTURE_INVALID; diff --git a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c index eb214aad..bb19dd61 100644 --- a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c +++ b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c @@ -57,7 +57,7 @@ int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem) return -EBUSY; } - cpu_va = vmap(mem->pages, + cpu_va = vmap(mem->priv.pages, PAGE_ALIGN(mem->size) >> PAGE_SHIFT, 0, pgprot_writecombine(PAGE_KERNEL)); diff --git a/drivers/gpu/nvgpu/common/pramin.c b/drivers/gpu/nvgpu/common/pramin.c index 378711fc..688e5ce8 100644 --- a/drivers/gpu/nvgpu/common/pramin.c +++ b/drivers/gpu/nvgpu/common/pramin.c @@ -87,7 +87,7 @@ void nvgpu_pramin_access_batched(struct gk20a *g, struct nvgpu_mem *mem, struct page_alloc_chunk *chunk = NULL; u32 byteoff, start_reg, until_end, n; - alloc = get_vidmem_page_alloc(mem->sgt->sgl); + alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl); nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks, page_alloc_chunk, list_entry) { if (offset >= chunk->length) diff --git a/drivers/gpu/nvgpu/common/semaphore.c b/drivers/gpu/nvgpu/common/semaphore.c index 9e437410..bf7b6348 100644 --- a/drivers/gpu/nvgpu/common/semaphore.c +++ b/drivers/gpu/nvgpu/common/semaphore.c @@ -60,7 +60,7 @@ static int __nvgpu_semaphore_sea_grow(struct nvgpu_semaphore_sea *sea) if (ret) goto out; - sea->ro_sg_table = sea->sea_mem.sgt; + sea->ro_sg_table = sea->sea_mem.priv.sgt; sea->size = SEMAPHORE_POOL_COUNT; sea->map_size = SEMAPHORE_POOL_COUNT * PAGE_SIZE; @@ -154,7 +154,7 @@ struct nvgpu_semaphore_pool *nvgpu_semaphore_pool_alloc( page_idx = (unsigned long)ret; - p->page = sea->sea_mem.pages[page_idx]; + p->page = sea->sea_mem.priv.pages[page_idx]; p->ro_sg_table = sea->ro_sg_table; p->page_idx = page_idx; p->sema_sea = sea; -- cgit v1.2.2