From f43231f7a5b60cd7ebd3068dde04eaac43361d02 Mon Sep 17 00:00:00 2001 From: Deepak Nibade Date: Thu, 25 Aug 2016 14:34:55 +0530 Subject: gpu: nvgpu: enable big page support for pci While mapping the buffer, first check if buffer is in vidmem, and if yes convert allocation into base address And then walk through each chunk to decide the alignment Add new API gk20a_mm_get_align() which returns the alignment based on scatterlist and aperture, and use this API to get alignment during mapping Enable big page support for pci by unsetting disable_bigpage Jira DNVGPU-97 Change-Id: I358dc98fac8103fdf9d2bde758e61b363fea9ae9 Signed-off-by: Deepak Nibade Reviewed-on: http://git-master/r/1207673 (cherry picked from commit d14d42290eed4aa7a2dd2be25e8e996917a58e82) Reviewed-on: http://git-master/r/1210959 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 72 +++++++++++++++++++++++++++----------- 1 file changed, 51 insertions(+), 21 deletions(-) (limited to 'drivers/gpu/nvgpu/gk20a') diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 81fa38bb..08fbfb80 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c @@ -2183,6 +2183,51 @@ err_kfree: #endif } +static u64 gk20a_mm_get_align(struct gk20a *g, struct scatterlist *sgl, + enum gk20a_aperture aperture) +{ + u64 align = 0, chunk_align = 0; + u64 buf_addr; + + if (aperture == APERTURE_VIDMEM) { + struct gk20a_page_alloc *alloc = (struct gk20a_page_alloc *) + sg_dma_address(sgl); + struct page_alloc_chunk *chunk = NULL; + + list_for_each_entry(chunk, &alloc->alloc_chunks, list_entry) { + chunk_align = 1ULL << __ffs(chunk->base | chunk->length); + + if (align) + align = min(align, chunk_align); + else + align = chunk_align; + } + + return align; + } + + buf_addr = (u64)sg_dma_address(sgl); + + if (g->mm.bypass_smmu || buf_addr == DMA_ERROR_CODE || !buf_addr) { + while (sgl) { + buf_addr = (u64)sg_phys(sgl); + chunk_align = 1ULL << __ffs(buf_addr | (u64)sgl->length); + + if (align) + align = min(align, chunk_align); + else + align = chunk_align; + sgl = sg_next(sgl); + } + + return align; + } + + align = 1ULL << __ffs(buf_addr); + + return align; +} + u64 gk20a_vm_map(struct vm_gk20a *vm, struct dma_buf *dmabuf, u64 offset_align, @@ -2207,7 +2252,6 @@ u64 gk20a_vm_map(struct vm_gk20a *vm, struct gk20a_comptags comptags; bool clear_ctags = false; struct scatterlist *sgl; - u64 buf_addr; u64 ctag_map_win_size = 0; u32 ctag_map_win_ctagline = 0; struct vm_reserved_va_node *va_node = NULL; @@ -2257,22 +2301,14 @@ u64 gk20a_vm_map(struct vm_gk20a *vm, bfr.kind_v = kind; bfr.size = dmabuf->size; sgl = bfr.sgt->sgl; - buf_addr = (u64)sg_dma_address(bfr.sgt->sgl); - if (g->mm.bypass_smmu || buf_addr == DMA_ERROR_CODE || !buf_addr) { - while (sgl) { - u64 align; - buf_addr = (u64)sg_phys(sgl); + aperture = gk20a_dmabuf_aperture(g, dmabuf); + if (aperture == APERTURE_INVALID) { + err = -EINVAL; + goto clean_up; + } - align = 1ULL << __ffs(buf_addr | (u64)sgl->length); - if (bfr.align) - bfr.align = min_t(u64, align, bfr.align); - else - bfr.align = align; - sgl = sg_next(sgl); - } - } else - bfr.align = 1ULL << __ffs(buf_addr); + bfr.align = gk20a_mm_get_align(g, sgl, aperture); bfr.pgsz_idx = -1; mapping_size = mapping_size ? mapping_size : bfr.size; @@ -2388,12 +2424,6 @@ u64 gk20a_vm_map(struct vm_gk20a *vm, ctag_offset += buffer_offset >> ilog2(g->ops.fb.compression_page_size(g)); - aperture = gk20a_dmabuf_aperture(g, dmabuf); - if (aperture == APERTURE_INVALID) { - err = -EINVAL; - goto clean_up; - } - /* update gmmu ptes */ map_offset = g->ops.mm.gmmu_map(vm, map_offset, bfr.sgt, -- cgit v1.2.2