From 0090ee5aca268a3c359f34c74b8c521df3bd8593 Mon Sep 17 00:00:00 2001 From: Alex Waterman Date: Thu, 25 May 2017 16:56:50 -0700 Subject: gpu: nvgpu: nvgpu SGL implementation The last major item preventing the core MM code in the nvgpu driver from being platform agnostic is the usage of Linux scattergather tables and scattergather lists. These data structures are used throughout the mapping code to handle discontiguous DMA allocations and also overloaded to represent VIDMEM allocs. The notion of a scatter gather table is crucial to a HW device that can handle discontiguous DMA. The GPU has a MMU which allows the GPU to do page gathering and present a virtually contiguous buffer to the GPU HW. As a result it makes sense for the GPU driver to use some sort of scatter gather concept so maximize memory usage efficiency. To that end this patch keeps the notion of a scatter gather list but implements it in the nvgpu common code. It is based heavily on the Linux SGL concept. It is a singly linked list of blocks - each representing a chunk of memory. To map or use a DMA allocation SW must iterate over each block in the SGL. This patch implements the most basic level of support for this data structure. There are certainly easy optimizations that could be done to speed up the current implementation. However, this patches' goal is to simply divest the core MM code from any last Linux'isms. Speed and efficiency come next. Change-Id: Icf44641db22d87fa1d003debbd9f71b605258e42 Signed-off-by: Alex Waterman Reviewed-on: https://git-master.nvidia.com/r/1530867 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c') diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 97b7aa80..cd34e769 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c @@ -1151,7 +1151,7 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem) struct gk20a_fence *gk20a_fence_out = NULL; struct gk20a_fence *gk20a_last_fence = NULL; struct nvgpu_page_alloc *alloc = NULL; - struct page_alloc_chunk *chunk = NULL; + struct nvgpu_mem_sgl *sgl = NULL; int err = 0; if (g->mm.vidmem.ce_ctx_id == (u32)~0) @@ -1159,16 +1159,16 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem) alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl); - nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks, - page_alloc_chunk, list_entry) { + sgl = alloc->sgl; + while (sgl) { if (gk20a_last_fence) gk20a_fence_put(gk20a_last_fence); err = gk20a_ce_execute_ops(g, g->mm.vidmem.ce_ctx_id, 0, - chunk->base, - chunk->length, + nvgpu_mem_sgl_phys(sgl), + nvgpu_mem_sgl_length(sgl), 0x00000000, NVGPU_CE_DST_LOCATION_LOCAL_FB, NVGPU_CE_MEMSET, @@ -1183,6 +1183,7 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem) } gk20a_last_fence = gk20a_fence_out; + sgl = nvgpu_mem_sgl_next(sgl); } if (gk20a_last_fence) { @@ -1262,10 +1263,10 @@ dma_addr_t gk20a_mm_gpuva_to_iova_base(struct vm_gk20a *vm, u64 gpu_vaddr) return addr; } -u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, dma_addr_t iova) +u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, u64 iova) { /* ensure it is not vidmem allocation */ - WARN_ON(is_vidmem_page_alloc((u64)iova)); + WARN_ON(is_vidmem_page_alloc(iova)); if (device_is_iommuable(dev_from_gk20a(g)) && g->ops.mm.get_physical_addr_bits) @@ -2167,11 +2168,6 @@ u32 gk20a_mm_get_physical_addr_bits(struct gk20a *g) return 34; } -u64 gk20a_mm_gpu_phys_addr(struct gk20a *g, u64 phys, u32 flags) -{ - return phys; -} - const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g, u32 big_page_size) { -- cgit v1.2.2