summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c94
1 files changed, 94 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 208e5291..d7f0f8ef 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -475,6 +475,18 @@ static void unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va)
475 FLUSH_CPU_DCACHE(handle, sg_phys(sgt->sgl), sgt->sgl->length); 475 FLUSH_CPU_DCACHE(handle, sg_phys(sgt->sgl), sgt->sgl->length);
476} 476}
477#else 477#else
478/* APIs for 64 bit arch */
479static int __alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
480 void **handle,
481 struct sg_table **sgt,
482 size_t *size);
483static void __free_gmmu_pages(struct vm_gk20a *vm, void *handle,
484 struct sg_table *sgt, u32 order,
485 size_t size);
486static int __map_gmmu_pages(void *handle, struct sg_table *sgt,
487 void **kva, size_t size);
488static void __unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va);
489
478static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order, 490static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
479 void **handle, 491 void **handle,
480 struct sg_table **sgt, 492 struct sg_table **sgt,
@@ -490,6 +502,9 @@ static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
490 502
491 gk20a_dbg_fn(""); 503 gk20a_dbg_fn("");
492 504
505 if (IS_ENABLED(CONFIG_ARM64))
506 return __alloc_gmmu_pages(vm, order, handle, sgt, size);
507
493 *size = len; 508 *size = len;
494 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); 509 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
495 pages = dma_alloc_attrs(d, len, &iova, GFP_KERNEL, &attrs); 510 pages = dma_alloc_attrs(d, len, &iova, GFP_KERNEL, &attrs);
@@ -529,6 +544,11 @@ static void free_gmmu_pages(struct vm_gk20a *vm, void *handle,
529 gk20a_dbg_fn(""); 544 gk20a_dbg_fn("");
530 BUG_ON(sgt == NULL); 545 BUG_ON(sgt == NULL);
531 546
547 if (IS_ENABLED(CONFIG_ARM64)) {
548 __free_gmmu_pages(vm, handle, sgt, order, size);
549 return;
550 }
551
532 iova = sg_dma_address(sgt->sgl); 552 iova = sg_dma_address(sgt->sgl);
533 553
534 gk20a_free_sgtable(&sgt); 554 gk20a_free_sgtable(&sgt);
@@ -546,6 +566,9 @@ static int map_gmmu_pages(void *handle, struct sg_table *sgt,
546 struct page **pages = (struct page **)handle; 566 struct page **pages = (struct page **)handle;
547 gk20a_dbg_fn(""); 567 gk20a_dbg_fn("");
548 568
569 if (IS_ENABLED(CONFIG_ARM64))
570 return __map_gmmu_pages(handle, sgt, kva, size);
571
549 *kva = vmap(pages, count, 0, pgprot_dmacoherent(PAGE_KERNEL)); 572 *kva = vmap(pages, count, 0, pgprot_dmacoherent(PAGE_KERNEL));
550 if (!(*kva)) 573 if (!(*kva))
551 return -ENOMEM; 574 return -ENOMEM;
@@ -556,8 +579,79 @@ static int map_gmmu_pages(void *handle, struct sg_table *sgt,
556static void unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va) 579static void unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va)
557{ 580{
558 gk20a_dbg_fn(""); 581 gk20a_dbg_fn("");
582
583 if (IS_ENABLED(CONFIG_ARM64)) {
584 __unmap_gmmu_pages(handle, sgt, va);
585 return;
586 }
587
559 vunmap(va); 588 vunmap(va);
560} 589}
590
591static int __alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
592 void **handle,
593 struct sg_table **sgt,
594 size_t *size)
595{
596 struct device *d = dev_from_vm(vm);
597 u32 num_pages = 1 << order;
598 u32 len = num_pages * PAGE_SIZE;
599 dma_addr_t iova;
600 void *cpuva;
601 int err = 0;
602
603 *size = len;
604 cpuva = dma_zalloc_coherent(d, len, &iova, GFP_KERNEL);
605 if (!cpuva) {
606 gk20a_err(d, "memory allocation failed\n");
607 goto err_out;
608 }
609
610 err = gk20a_get_sgtable(d, sgt, cpuva, iova, len);
611 if (err) {
612 gk20a_err(d, "sgt allocation failed\n");
613 goto err_free;
614 }
615
616 *handle = cpuva;
617
618 return 0;
619
620err_free:
621 dma_free_coherent(d, len, cpuva, iova);
622 cpuva = NULL;
623 iova = 0;
624err_out:
625 return -ENOMEM;
626}
627
628static void __free_gmmu_pages(struct vm_gk20a *vm, void *handle,
629 struct sg_table *sgt, u32 order,
630 size_t size)
631{
632 struct device *d = dev_from_vm(vm);
633 u64 iova;
634
635 iova = sg_dma_address(sgt->sgl);
636
637 gk20a_free_sgtable(&sgt);
638
639 dma_free_coherent(d, size, handle, iova);
640 handle = NULL;
641 iova = 0;
642}
643
644static int __map_gmmu_pages(void *handle, struct sg_table *sgt,
645 void **kva, size_t size)
646{
647 *kva = handle;
648 return 0;
649}
650
651static void __unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va)
652{
653 gk20a_dbg_fn("");
654}
561#endif 655#endif
562 656
563/* allocate a phys contig region big enough for a full 657/* allocate a phys contig region big enough for a full