From 65c230fc83325828e152a4479d7ffefbbe784a77 Mon Sep 17 00:00:00 2001 From: Deepak Nibade Date: Wed, 2 Apr 2014 18:56:55 +0530 Subject: gpu: nvgpu: gk20a: always map ptes for 64 bit arch On 64 bit architecture, we have plenty of vmalloc space so that we can keep all the ptes mapped always But on 32 but architecture, vmalloc space is limited and hence we have to map/unmap ptes when required Hence add new APIs for arch64 and call them if IS_ENABLED(CONFIG_ARM64) is true Bug 1443071 Change-Id: I091d1d6a3883a1b158c5c88baeeff1882ea1fc8b Signed-off-by: Deepak Nibade Reviewed-on: http://git-master/r/387642 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom --- drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 94 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c') diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 208e5291..d7f0f8ef 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c @@ -475,6 +475,18 @@ static void unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va) FLUSH_CPU_DCACHE(handle, sg_phys(sgt->sgl), sgt->sgl->length); } #else +/* APIs for 64 bit arch */ +static int __alloc_gmmu_pages(struct vm_gk20a *vm, u32 order, + void **handle, + struct sg_table **sgt, + size_t *size); +static void __free_gmmu_pages(struct vm_gk20a *vm, void *handle, + struct sg_table *sgt, u32 order, + size_t size); +static int __map_gmmu_pages(void *handle, struct sg_table *sgt, + void **kva, size_t size); +static void __unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va); + static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order, void **handle, struct sg_table **sgt, @@ -490,6 +502,9 @@ static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order, gk20a_dbg_fn(""); + if (IS_ENABLED(CONFIG_ARM64)) + return __alloc_gmmu_pages(vm, order, handle, sgt, size); + *size = len; dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); pages = dma_alloc_attrs(d, len, &iova, GFP_KERNEL, &attrs); @@ -529,6 +544,11 @@ static void free_gmmu_pages(struct vm_gk20a *vm, void *handle, gk20a_dbg_fn(""); BUG_ON(sgt == NULL); + if (IS_ENABLED(CONFIG_ARM64)) { + __free_gmmu_pages(vm, handle, sgt, order, size); + return; + } + iova = sg_dma_address(sgt->sgl); gk20a_free_sgtable(&sgt); @@ -546,6 +566,9 @@ static int map_gmmu_pages(void *handle, struct sg_table *sgt, struct page **pages = (struct page **)handle; gk20a_dbg_fn(""); + if (IS_ENABLED(CONFIG_ARM64)) + return __map_gmmu_pages(handle, sgt, kva, size); + *kva = vmap(pages, count, 0, pgprot_dmacoherent(PAGE_KERNEL)); if (!(*kva)) return -ENOMEM; @@ -556,8 +579,79 @@ static int map_gmmu_pages(void *handle, struct sg_table *sgt, static void unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va) { gk20a_dbg_fn(""); + + if (IS_ENABLED(CONFIG_ARM64)) { + __unmap_gmmu_pages(handle, sgt, va); + return; + } + vunmap(va); } + +static int __alloc_gmmu_pages(struct vm_gk20a *vm, u32 order, + void **handle, + struct sg_table **sgt, + size_t *size) +{ + struct device *d = dev_from_vm(vm); + u32 num_pages = 1 << order; + u32 len = num_pages * PAGE_SIZE; + dma_addr_t iova; + void *cpuva; + int err = 0; + + *size = len; + cpuva = dma_zalloc_coherent(d, len, &iova, GFP_KERNEL); + if (!cpuva) { + gk20a_err(d, "memory allocation failed\n"); + goto err_out; + } + + err = gk20a_get_sgtable(d, sgt, cpuva, iova, len); + if (err) { + gk20a_err(d, "sgt allocation failed\n"); + goto err_free; + } + + *handle = cpuva; + + return 0; + +err_free: + dma_free_coherent(d, len, cpuva, iova); + cpuva = NULL; + iova = 0; +err_out: + return -ENOMEM; +} + +static void __free_gmmu_pages(struct vm_gk20a *vm, void *handle, + struct sg_table *sgt, u32 order, + size_t size) +{ + struct device *d = dev_from_vm(vm); + u64 iova; + + iova = sg_dma_address(sgt->sgl); + + gk20a_free_sgtable(&sgt); + + dma_free_coherent(d, size, handle, iova); + handle = NULL; + iova = 0; +} + +static int __map_gmmu_pages(void *handle, struct sg_table *sgt, + void **kva, size_t size) +{ + *kva = handle; + return 0; +} + +static void __unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va) +{ + gk20a_dbg_fn(""); +} #endif /* allocate a phys contig region big enough for a full -- cgit v1.2.2