From 0090ee5aca268a3c359f34c74b8c521df3bd8593 Mon Sep 17 00:00:00 2001 From: Alex Waterman Date: Thu, 25 May 2017 16:56:50 -0700 Subject: gpu: nvgpu: nvgpu SGL implementation The last major item preventing the core MM code in the nvgpu driver from being platform agnostic is the usage of Linux scattergather tables and scattergather lists. These data structures are used throughout the mapping code to handle discontiguous DMA allocations and also overloaded to represent VIDMEM allocs. The notion of a scatter gather table is crucial to a HW device that can handle discontiguous DMA. The GPU has a MMU which allows the GPU to do page gathering and present a virtually contiguous buffer to the GPU HW. As a result it makes sense for the GPU driver to use some sort of scatter gather concept so maximize memory usage efficiency. To that end this patch keeps the notion of a scatter gather list but implements it in the nvgpu common code. It is based heavily on the Linux SGL concept. It is a singly linked list of blocks - each representing a chunk of memory. To map or use a DMA allocation SW must iterate over each block in the SGL. This patch implements the most basic level of support for this data structure. There are certainly easy optimizations that could be done to speed up the current implementation. However, this patches' goal is to simply divest the core MM code from any last Linux'isms. Speed and efficiency come next. Change-Id: Icf44641db22d87fa1d003debbd9f71b605258e42 Signed-off-by: Alex Waterman Reviewed-on: https://git-master.nvidia.com/r/1530867 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/gk20a/gk20a.h | 9 ++++--- drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 20 +++++++--------- drivers/gpu/nvgpu/gk20a/mm_gk20a.h | 43 +++++++++++++++++----------------- drivers/gpu/nvgpu/gk20a/pramin_gk20a.c | 13 +++++----- drivers/gpu/nvgpu/gk20a/pramin_gk20a.h | 6 ++--- 5 files changed, 43 insertions(+), 48 deletions(-) (limited to 'drivers/gpu/nvgpu/gk20a') diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h index 7eee2d51..355228db 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gk20a.h @@ -34,6 +34,7 @@ struct gk20a_debug_output; struct nvgpu_clk_pll_debug_data; struct nvgpu_nvhost_dev; struct nvgpu_cpu_time_correlation_sample; +struct nvgpu_mem_sgl; #include #include @@ -70,8 +71,6 @@ struct nvgpu_cpu_time_correlation_sample; #endif #include "ecc_gk20a.h" -struct page_alloc_chunk; - /* PTIMER_REF_FREQ_HZ corresponds to a period of 32 nanoseconds. 32 ns is the resolution of ptimer. */ #define PTIMER_REF_FREQ_HZ 31250000 @@ -701,7 +700,7 @@ struct gpu_ops { bool (*support_sparse)(struct gk20a *g); u64 (*gmmu_map)(struct vm_gk20a *vm, u64 map_offset, - struct sg_table *sgt, + struct nvgpu_mem_sgl *sgl, u64 buffer_offset, u64 size, int pgsz_idx, @@ -761,9 +760,9 @@ struct gpu_ops { size_t size); struct { u32 (*enter)(struct gk20a *g, struct nvgpu_mem *mem, - struct page_alloc_chunk *chunk, u32 w); + struct nvgpu_mem_sgl *sgl, u32 w); void (*exit)(struct gk20a *g, struct nvgpu_mem *mem, - struct page_alloc_chunk *chunk); + struct nvgpu_mem_sgl *sgl); u32 (*data032_r)(u32 i); } pramin; struct { diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 97b7aa80..cd34e769 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c @@ -1151,7 +1151,7 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem) struct gk20a_fence *gk20a_fence_out = NULL; struct gk20a_fence *gk20a_last_fence = NULL; struct nvgpu_page_alloc *alloc = NULL; - struct page_alloc_chunk *chunk = NULL; + struct nvgpu_mem_sgl *sgl = NULL; int err = 0; if (g->mm.vidmem.ce_ctx_id == (u32)~0) @@ -1159,16 +1159,16 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem) alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl); - nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks, - page_alloc_chunk, list_entry) { + sgl = alloc->sgl; + while (sgl) { if (gk20a_last_fence) gk20a_fence_put(gk20a_last_fence); err = gk20a_ce_execute_ops(g, g->mm.vidmem.ce_ctx_id, 0, - chunk->base, - chunk->length, + nvgpu_mem_sgl_phys(sgl), + nvgpu_mem_sgl_length(sgl), 0x00000000, NVGPU_CE_DST_LOCATION_LOCAL_FB, NVGPU_CE_MEMSET, @@ -1183,6 +1183,7 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem) } gk20a_last_fence = gk20a_fence_out; + sgl = nvgpu_mem_sgl_next(sgl); } if (gk20a_last_fence) { @@ -1262,10 +1263,10 @@ dma_addr_t gk20a_mm_gpuva_to_iova_base(struct vm_gk20a *vm, u64 gpu_vaddr) return addr; } -u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, dma_addr_t iova) +u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, u64 iova) { /* ensure it is not vidmem allocation */ - WARN_ON(is_vidmem_page_alloc((u64)iova)); + WARN_ON(is_vidmem_page_alloc(iova)); if (device_is_iommuable(dev_from_gk20a(g)) && g->ops.mm.get_physical_addr_bits) @@ -2167,11 +2168,6 @@ u32 gk20a_mm_get_physical_addr_bits(struct gk20a *g) return 34; } -u64 gk20a_mm_gpu_phys_addr(struct gk20a *g, u64 phys, u32 flags) -{ - return phys; -} - const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g, u32 big_page_size) { diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h index c77bebf8..2fdc1729 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h @@ -336,7 +336,6 @@ void gk20a_mm_dump_vm(struct vm_gk20a *vm, int gk20a_mm_suspend(struct gk20a *g); -u64 gk20a_mm_gpu_phys_addr(struct gk20a *g, u64 phys, u32 flags); u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, dma_addr_t iova); void gk20a_mm_ltc_isr(struct gk20a *g); @@ -361,29 +360,29 @@ static inline phys_addr_t gk20a_mem_phys(struct nvgpu_mem *mem) } u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, - u64 map_offset, - struct sg_table *sgt, - u64 buffer_offset, - u64 size, - int pgsz_idx, - u8 kind_v, - u32 ctag_offset, - u32 flags, - int rw_flag, - bool clear_ctags, - bool sparse, - bool priv, - struct vm_gk20a_mapping_batch *batch, - enum nvgpu_aperture aperture); + u64 map_offset, + struct nvgpu_mem_sgl *sgl, + u64 buffer_offset, + u64 size, + int pgsz_idx, + u8 kind_v, + u32 ctag_offset, + u32 flags, + int rw_flag, + bool clear_ctags, + bool sparse, + bool priv, + struct vm_gk20a_mapping_batch *batch, + enum nvgpu_aperture aperture); void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm, - u64 vaddr, - u64 size, - int pgsz_idx, - bool va_allocated, - int rw_flag, - bool sparse, - struct vm_gk20a_mapping_batch *batch); + u64 vaddr, + u64 size, + int pgsz_idx, + bool va_allocated, + int rw_flag, + bool sparse, + struct vm_gk20a_mapping_batch *batch); struct sg_table *gk20a_mm_pin(struct device *dev, struct dma_buf *dmabuf); void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf, diff --git a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c index 9d19e9e5..8a34a63c 100644 --- a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c @@ -26,9 +26,9 @@ /* WARNING: returns pramin_window_lock taken, complement with pramin_exit() */ u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem, - struct page_alloc_chunk *chunk, u32 w) + struct nvgpu_mem_sgl *sgl, u32 w) { - u64 bufbase = chunk->base; + u64 bufbase = nvgpu_mem_sgl_phys(sgl); u64 addr = bufbase + w * sizeof(u32); u32 hi = (u32)((addr & ~(u64)0xfffff) >> bus_bar0_window_target_bar0_window_base_shift_v()); @@ -40,8 +40,9 @@ u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem, gk20a_dbg(gpu_dbg_mem, "0x%08x:%08x begin for %p,%p at [%llx,%llx] (sz %llx)", - hi, lo, mem, chunk, bufbase, - bufbase + chunk->length, chunk->length); + hi, lo, mem, sgl, bufbase, + bufbase + nvgpu_mem_sgl_phys(sgl), + nvgpu_mem_sgl_length(sgl)); WARN_ON(!bufbase); @@ -57,9 +58,9 @@ u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem, } void gk20a_pramin_exit(struct gk20a *g, struct nvgpu_mem *mem, - struct page_alloc_chunk *chunk) + struct nvgpu_mem_sgl *sgl) { - gk20a_dbg(gpu_dbg_mem, "end for %p,%p", mem, chunk); + gk20a_dbg(gpu_dbg_mem, "end for %p,%p", mem, sgl); nvgpu_spinlock_release(&g->mm.pramin_window_lock); } diff --git a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.h b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.h index 1a1ac871..fc5ba919 100644 --- a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.h @@ -19,10 +19,10 @@ struct gk20a; struct nvgpu_mem; -struct page_alloc_chunk; +struct nvgpu_mem_sgl; u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem, - struct page_alloc_chunk *chunk, u32 w); + struct nvgpu_mem_sgl *sgl, u32 w); void gk20a_pramin_exit(struct gk20a *g, struct nvgpu_mem *mem, - struct page_alloc_chunk *chunk); + struct nvgpu_mem_sgl *sgl); #endif -- cgit v1.2.2