summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2014-08-27 08:48:35 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:11:00 -0400
commit69e0cd3dfd8f39bc8d3529325001dcacd774f669 (patch)
tree1b7ed0b7d8e38f4a70a79dfab0852e6a2742f21a /drivers
parente36d080b82aa4f14b3ed22bdf405d705d31094db (diff)
gpu: nvgpu: manage phys pages at runtime
Current implementation is based on config GK20A_PHYS_PAGE_TABLES to have APIs to create/free/map/unmap phys pages Remove this config based implementaion and move the APIs so that they are called at runtime based on tegra_platform_is_linsim() In generic APIs, we first check if platform is linsim and if it is then we forward the call to phys page specific APIs Change-Id: I23eb6fa6a46b804441f18fc37e2390d938d62515 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/488843 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/gk20a/ltc_common.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/ltc_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c28
-rw-r--r--drivers/gpu/nvgpu/gm20b/ltc_gm20b.c2
4 files changed, 23 insertions, 11 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/ltc_common.c b/drivers/gpu/nvgpu/gk20a/ltc_common.c
index b4e3fc75..d3080db1 100644
--- a/drivers/gpu/nvgpu/gk20a/ltc_common.c
+++ b/drivers/gpu/nvgpu/gk20a/ltc_common.c
@@ -229,7 +229,7 @@ static void gk20a_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
229 u64 compbit_store_base_iova; 229 u64 compbit_store_base_iova;
230 u64 compbit_base_post_divide64; 230 u64 compbit_base_post_divide64;
231 231
232 if (IS_ENABLED(CONFIG_GK20A_PHYS_PAGE_TABLES)) 232 if (tegra_platform_is_linsim())
233 compbit_store_base_iova = gr->compbit_store.base_iova; 233 compbit_store_base_iova = gr->compbit_store.base_iova;
234 else 234 else
235 compbit_store_base_iova = NV_MC_SMMU_VADDR_TRANSLATE( 235 compbit_store_base_iova = NV_MC_SMMU_VADDR_TRANSLATE(
diff --git a/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c b/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c
index db00fa1a..a966e95c 100644
--- a/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c
@@ -82,7 +82,7 @@ static int gk20a_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
82 gk20a_dbg_info("max comptag lines : %d", 82 gk20a_dbg_info("max comptag lines : %d",
83 max_comptag_lines); 83 max_comptag_lines);
84 84
85 if (IS_ENABLED(CONFIG_GK20A_PHYS_PAGE_TABLES)) 85 if (tegra_platform_is_linsim())
86 err = gk20a_ltc_alloc_phys_cbc(g, compbit_backing_size); 86 err = gk20a_ltc_alloc_phys_cbc(g, compbit_backing_size);
87 else 87 else
88 err = gk20a_ltc_alloc_virt_cbc(g, compbit_backing_size); 88 err = gk20a_ltc_alloc_virt_cbc(g, compbit_backing_size);
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 92095162..654938b2 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -399,8 +399,7 @@ int gk20a_init_mm_support(struct gk20a *g)
399 return err; 399 return err;
400} 400}
401 401
402#ifdef CONFIG_GK20A_PHYS_PAGE_TABLES 402static int alloc_gmmu_phys_pages(struct vm_gk20a *vm, u32 order,
403static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
404 void **handle, 403 void **handle,
405 struct sg_table **sgt, 404 struct sg_table **sgt,
406 size_t *size) 405 size_t *size)
@@ -443,18 +442,17 @@ err_out:
443 return -ENOMEM; 442 return -ENOMEM;
444} 443}
445 444
446void free_gmmu_pages(struct vm_gk20a *vm, void *handle, 445void free_gmmu_phys_pages(struct vm_gk20a *vm, void *handle,
447 struct sg_table *sgt, u32 order, 446 struct sg_table *sgt, u32 order,
448 size_t size) 447 size_t size)
449{ 448{
450 gk20a_dbg_fn(""); 449 gk20a_dbg_fn("");
451 BUG_ON(sgt == NULL);
452 free_pages((unsigned long)handle, order); 450 free_pages((unsigned long)handle, order);
453 sg_free_table(sgt); 451 sg_free_table(sgt);
454 kfree(sgt); 452 kfree(sgt);
455} 453}
456 454
457int map_gmmu_pages(void *handle, struct sg_table *sgt, 455int map_gmmu_phys_pages(void *handle, struct sg_table *sgt,
458 void **va, size_t size) 456 void **va, size_t size)
459{ 457{
460 FLUSH_CPU_DCACHE(handle, sg_phys(sgt->sgl), sgt->sgl->length); 458 FLUSH_CPU_DCACHE(handle, sg_phys(sgt->sgl), sgt->sgl->length);
@@ -462,11 +460,10 @@ int map_gmmu_pages(void *handle, struct sg_table *sgt,
462 return 0; 460 return 0;
463} 461}
464 462
465void unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va) 463void unmap_gmmu_phys_pages(void *handle, struct sg_table *sgt, void *va)
466{ 464{
467 FLUSH_CPU_DCACHE(handle, sg_phys(sgt->sgl), sgt->sgl->length); 465 FLUSH_CPU_DCACHE(handle, sg_phys(sgt->sgl), sgt->sgl->length);
468} 466}
469#else
470 467
471static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order, 468static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
472 void **handle, 469 void **handle,
@@ -484,6 +481,9 @@ static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
484 481
485 gk20a_dbg_fn(""); 482 gk20a_dbg_fn("");
486 483
484 if (tegra_platform_is_linsim())
485 return alloc_gmmu_phys_pages(vm, order, handle, sgt, size);
486
487 *size = len; 487 *size = len;
488 488
489 if (IS_ENABLED(CONFIG_ARM64)) { 489 if (IS_ENABLED(CONFIG_ARM64)) {
@@ -545,6 +545,11 @@ void free_gmmu_pages(struct vm_gk20a *vm, void *handle,
545 gk20a_dbg_fn(""); 545 gk20a_dbg_fn("");
546 BUG_ON(sgt == NULL); 546 BUG_ON(sgt == NULL);
547 547
548 if (tegra_platform_is_linsim()) {
549 free_gmmu_phys_pages(vm, handle, sgt, order, size);
550 return;
551 }
552
548 iova = sg_dma_address(sgt->sgl); 553 iova = sg_dma_address(sgt->sgl);
549 554
550 gk20a_free_sgtable(&sgt); 555 gk20a_free_sgtable(&sgt);
@@ -569,6 +574,9 @@ int map_gmmu_pages(void *handle, struct sg_table *sgt,
569 struct page **pages; 574 struct page **pages;
570 gk20a_dbg_fn(""); 575 gk20a_dbg_fn("");
571 576
577 if (tegra_platform_is_linsim())
578 return map_gmmu_phys_pages(handle, sgt, kva, size);
579
572 if (IS_ENABLED(CONFIG_ARM64)) { 580 if (IS_ENABLED(CONFIG_ARM64)) {
573 *kva = handle; 581 *kva = handle;
574 } else { 582 } else {
@@ -585,11 +593,15 @@ void unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va)
585{ 593{
586 gk20a_dbg_fn(""); 594 gk20a_dbg_fn("");
587 595
596 if (tegra_platform_is_linsim()) {
597 unmap_gmmu_phys_pages(handle, sgt, va);
598 return;
599 }
600
588 if (!IS_ENABLED(CONFIG_ARM64)) 601 if (!IS_ENABLED(CONFIG_ARM64))
589 vunmap(va); 602 vunmap(va);
590 va = NULL; 603 va = NULL;
591} 604}
592#endif
593 605
594/* allocate a phys contig region big enough for a full 606/* allocate a phys contig region big enough for a full
595 * sized gmmu page table for the given gmmu_page_size. 607 * sized gmmu page table for the given gmmu_page_size.
diff --git a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
index 759b1d5a..f749279c 100644
--- a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
@@ -81,7 +81,7 @@ static int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
81 gk20a_dbg_info("max comptag lines : %d", 81 gk20a_dbg_info("max comptag lines : %d",
82 max_comptag_lines); 82 max_comptag_lines);
83 83
84 if (IS_ENABLED(CONFIG_GK20A_PHYS_PAGE_TABLES)) 84 if (tegra_platform_is_linsim())
85 err = gk20a_ltc_alloc_phys_cbc(g, compbit_backing_size); 85 err = gk20a_ltc_alloc_phys_cbc(g, compbit_backing_size);
86 else 86 else
87 err = gk20a_ltc_alloc_virt_cbc(g, compbit_backing_size); 87 err = gk20a_ltc_alloc_virt_cbc(g, compbit_backing_size);