From 69e0cd3dfd8f39bc8d3529325001dcacd774f669 Mon Sep 17 00:00:00 2001 From: Deepak Nibade Date: Wed, 27 Aug 2014 18:18:35 +0530 Subject: gpu: nvgpu: manage phys pages at runtime Current implementation is based on config GK20A_PHYS_PAGE_TABLES to have APIs to create/free/map/unmap phys pages Remove this config based implementaion and move the APIs so that they are called at runtime based on tegra_platform_is_linsim() In generic APIs, we first check if platform is linsim and if it is then we forward the call to phys page specific APIs Change-Id: I23eb6fa6a46b804441f18fc37e2390d938d62515 Signed-off-by: Deepak Nibade Reviewed-on: http://git-master/r/488843 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom Tested-by: Terje Bergstrom --- drivers/gpu/nvgpu/gk20a/ltc_common.c | 2 +- drivers/gpu/nvgpu/gk20a/ltc_gk20a.c | 2 +- drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 28 ++++++++++++++++++++-------- drivers/gpu/nvgpu/gm20b/ltc_gm20b.c | 2 +- 4 files changed, 23 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/nvgpu') diff --git a/drivers/gpu/nvgpu/gk20a/ltc_common.c b/drivers/gpu/nvgpu/gk20a/ltc_common.c index b4e3fc75..d3080db1 100644 --- a/drivers/gpu/nvgpu/gk20a/ltc_common.c +++ b/drivers/gpu/nvgpu/gk20a/ltc_common.c @@ -229,7 +229,7 @@ static void gk20a_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr) u64 compbit_store_base_iova; u64 compbit_base_post_divide64; - if (IS_ENABLED(CONFIG_GK20A_PHYS_PAGE_TABLES)) + if (tegra_platform_is_linsim()) compbit_store_base_iova = gr->compbit_store.base_iova; else compbit_store_base_iova = NV_MC_SMMU_VADDR_TRANSLATE( diff --git a/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c b/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c index db00fa1a..a966e95c 100644 --- a/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c @@ -82,7 +82,7 @@ static int gk20a_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) gk20a_dbg_info("max comptag lines : %d", max_comptag_lines); - if (IS_ENABLED(CONFIG_GK20A_PHYS_PAGE_TABLES)) + if (tegra_platform_is_linsim()) err = gk20a_ltc_alloc_phys_cbc(g, compbit_backing_size); else err = gk20a_ltc_alloc_virt_cbc(g, compbit_backing_size); diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 92095162..654938b2 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c @@ -399,8 +399,7 @@ int gk20a_init_mm_support(struct gk20a *g) return err; } -#ifdef CONFIG_GK20A_PHYS_PAGE_TABLES -static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order, +static int alloc_gmmu_phys_pages(struct vm_gk20a *vm, u32 order, void **handle, struct sg_table **sgt, size_t *size) @@ -443,18 +442,17 @@ err_out: return -ENOMEM; } -void free_gmmu_pages(struct vm_gk20a *vm, void *handle, +void free_gmmu_phys_pages(struct vm_gk20a *vm, void *handle, struct sg_table *sgt, u32 order, size_t size) { gk20a_dbg_fn(""); - BUG_ON(sgt == NULL); free_pages((unsigned long)handle, order); sg_free_table(sgt); kfree(sgt); } -int map_gmmu_pages(void *handle, struct sg_table *sgt, +int map_gmmu_phys_pages(void *handle, struct sg_table *sgt, void **va, size_t size) { FLUSH_CPU_DCACHE(handle, sg_phys(sgt->sgl), sgt->sgl->length); @@ -462,11 +460,10 @@ int map_gmmu_pages(void *handle, struct sg_table *sgt, return 0; } -void unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va) +void unmap_gmmu_phys_pages(void *handle, struct sg_table *sgt, void *va) { FLUSH_CPU_DCACHE(handle, sg_phys(sgt->sgl), sgt->sgl->length); } -#else static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order, void **handle, @@ -484,6 +481,9 @@ static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order, gk20a_dbg_fn(""); + if (tegra_platform_is_linsim()) + return alloc_gmmu_phys_pages(vm, order, handle, sgt, size); + *size = len; if (IS_ENABLED(CONFIG_ARM64)) { @@ -545,6 +545,11 @@ void free_gmmu_pages(struct vm_gk20a *vm, void *handle, gk20a_dbg_fn(""); BUG_ON(sgt == NULL); + if (tegra_platform_is_linsim()) { + free_gmmu_phys_pages(vm, handle, sgt, order, size); + return; + } + iova = sg_dma_address(sgt->sgl); gk20a_free_sgtable(&sgt); @@ -569,6 +574,9 @@ int map_gmmu_pages(void *handle, struct sg_table *sgt, struct page **pages; gk20a_dbg_fn(""); + if (tegra_platform_is_linsim()) + return map_gmmu_phys_pages(handle, sgt, kva, size); + if (IS_ENABLED(CONFIG_ARM64)) { *kva = handle; } else { @@ -585,11 +593,15 @@ void unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va) { gk20a_dbg_fn(""); + if (tegra_platform_is_linsim()) { + unmap_gmmu_phys_pages(handle, sgt, va); + return; + } + if (!IS_ENABLED(CONFIG_ARM64)) vunmap(va); va = NULL; } -#endif /* allocate a phys contig region big enough for a full * sized gmmu page table for the given gmmu_page_size. diff --git a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c index 759b1d5a..f749279c 100644 --- a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c @@ -81,7 +81,7 @@ static int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) gk20a_dbg_info("max comptag lines : %d", max_comptag_lines); - if (IS_ENABLED(CONFIG_GK20A_PHYS_PAGE_TABLES)) + if (tegra_platform_is_linsim()) err = gk20a_ltc_alloc_phys_cbc(g, compbit_backing_size); else err = gk20a_ltc_alloc_virt_cbc(g, compbit_backing_size); -- cgit v1.2.2