summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2014-04-07 08:43:39 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:09:00 -0400
commitfa6f7f882d42022fb419371116124943c1eb82e1 (patch)
treec2437fa52f6a733ff6bcd723ca0738ac29f739f3 /drivers/gpu/nvgpu/gk20a/mm_gk20a.c
parentf5fe93456f6e47cf0fea0f4c5af8f797e51498c0 (diff)
gpu: nvgpu: gk20a: remove code duplication
Bug 1443071 Change-Id: I225114835a5923061462e238395798b274cadd7b Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c177
1 files changed, 61 insertions, 116 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index d7f0f8ef..5440b3c2 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -475,17 +475,6 @@ static void unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va)
475 FLUSH_CPU_DCACHE(handle, sg_phys(sgt->sgl), sgt->sgl->length); 475 FLUSH_CPU_DCACHE(handle, sg_phys(sgt->sgl), sgt->sgl->length);
476} 476}
477#else 477#else
478/* APIs for 64 bit arch */
479static int __alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
480 void **handle,
481 struct sg_table **sgt,
482 size_t *size);
483static void __free_gmmu_pages(struct vm_gk20a *vm, void *handle,
484 struct sg_table *sgt, u32 order,
485 size_t size);
486static int __map_gmmu_pages(void *handle, struct sg_table *sgt,
487 void **kva, size_t size);
488static void __unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va);
489 478
490static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order, 479static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
491 void **handle, 480 void **handle,
@@ -498,35 +487,55 @@ static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
498 dma_addr_t iova; 487 dma_addr_t iova;
499 DEFINE_DMA_ATTRS(attrs); 488 DEFINE_DMA_ATTRS(attrs);
500 struct page **pages; 489 struct page **pages;
490 void *cpuva;
501 int err = 0; 491 int err = 0;
502 492
503 gk20a_dbg_fn(""); 493 gk20a_dbg_fn("");
504 494
505 if (IS_ENABLED(CONFIG_ARM64))
506 return __alloc_gmmu_pages(vm, order, handle, sgt, size);
507
508 *size = len; 495 *size = len;
509 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
510 pages = dma_alloc_attrs(d, len, &iova, GFP_KERNEL, &attrs);
511 if (!pages) {
512 gk20a_err(d, "memory allocation failed\n");
513 goto err_out;
514 }
515 496
516 err = gk20a_get_sgtable_from_pages(d, sgt, pages, 497 if (IS_ENABLED(CONFIG_ARM64)) {
517 iova, len); 498 cpuva = dma_zalloc_coherent(d, len, &iova, GFP_KERNEL);
518 if (err) { 499 if (!cpuva) {
519 gk20a_err(d, "sgt allocation failed\n"); 500 gk20a_err(d, "memory allocation failed\n");
520 goto err_free; 501 goto err_out;
521 } 502 }
522 503
523 *handle = (void *)pages; 504 err = gk20a_get_sgtable(d, sgt, cpuva, iova, len);
505 if (err) {
506 gk20a_err(d, "sgt allocation failed\n");
507 goto err_free;
508 }
509
510 *handle = cpuva;
511 } else {
512 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
513 pages = dma_alloc_attrs(d, len, &iova, GFP_KERNEL, &attrs);
514 if (!pages) {
515 gk20a_err(d, "memory allocation failed\n");
516 goto err_out;
517 }
518
519 err = gk20a_get_sgtable_from_pages(d, sgt, pages,
520 iova, len);
521 if (err) {
522 gk20a_err(d, "sgt allocation failed\n");
523 goto err_free;
524 }
525
526 *handle = (void *)pages;
527 }
524 528
525 return 0; 529 return 0;
526 530
527err_free: 531err_free:
528 dma_free_attrs(d, len, pages, iova, &attrs); 532 if (IS_ENABLED(CONFIG_ARM64)) {
529 pages = NULL; 533 dma_free_coherent(d, len, handle, iova);
534 cpuva = NULL;
535 } else {
536 dma_free_attrs(d, len, pages, iova, &attrs);
537 pages = NULL;
538 }
530 iova = 0; 539 iova = 0;
531err_out: 540err_out:
532 return -ENOMEM; 541 return -ENOMEM;
@@ -539,23 +548,25 @@ static void free_gmmu_pages(struct vm_gk20a *vm, void *handle,
539 struct device *d = dev_from_vm(vm); 548 struct device *d = dev_from_vm(vm);
540 u64 iova; 549 u64 iova;
541 DEFINE_DMA_ATTRS(attrs); 550 DEFINE_DMA_ATTRS(attrs);
542 struct page **pages = (struct page **)handle; 551 struct page **pages;
543 552
544 gk20a_dbg_fn(""); 553 gk20a_dbg_fn("");
545 BUG_ON(sgt == NULL); 554 BUG_ON(sgt == NULL);
546 555
547 if (IS_ENABLED(CONFIG_ARM64)) {
548 __free_gmmu_pages(vm, handle, sgt, order, size);
549 return;
550 }
551
552 iova = sg_dma_address(sgt->sgl); 556 iova = sg_dma_address(sgt->sgl);
553 557
554 gk20a_free_sgtable(&sgt); 558 gk20a_free_sgtable(&sgt);
555 559
556 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); 560 if (IS_ENABLED(CONFIG_ARM64)) {
557 dma_free_attrs(d, size, pages, iova, &attrs); 561 dma_free_coherent(d, size, handle, iova);
558 pages = NULL; 562 } else {
563 pages = (struct page **)handle;
564 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
565 dma_free_attrs(d, size, pages, iova, &attrs);
566 pages = NULL;
567 }
568
569 handle = NULL;
559 iova = 0; 570 iova = 0;
560} 571}
561 572
@@ -563,94 +574,28 @@ static int map_gmmu_pages(void *handle, struct sg_table *sgt,
563 void **kva, size_t size) 574 void **kva, size_t size)
564{ 575{
565 int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 576 int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
566 struct page **pages = (struct page **)handle; 577 struct page **pages;
567 gk20a_dbg_fn("");
568
569 if (IS_ENABLED(CONFIG_ARM64))
570 return __map_gmmu_pages(handle, sgt, kva, size);
571
572 *kva = vmap(pages, count, 0, pgprot_dmacoherent(PAGE_KERNEL));
573 if (!(*kva))
574 return -ENOMEM;
575
576 return 0;
577}
578
579static void unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va)
580{
581 gk20a_dbg_fn(""); 578 gk20a_dbg_fn("");
582 579
583 if (IS_ENABLED(CONFIG_ARM64)) { 580 if (IS_ENABLED(CONFIG_ARM64)) {
584 __unmap_gmmu_pages(handle, sgt, va); 581 *kva = handle;
585 return; 582 } else {
586 } 583 pages = (struct page **)handle;
587 584 *kva = vmap(pages, count, 0, pgprot_dmacoherent(PAGE_KERNEL));
588 vunmap(va); 585 if (!(*kva))
589} 586 return -ENOMEM;
590
591static int __alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
592 void **handle,
593 struct sg_table **sgt,
594 size_t *size)
595{
596 struct device *d = dev_from_vm(vm);
597 u32 num_pages = 1 << order;
598 u32 len = num_pages * PAGE_SIZE;
599 dma_addr_t iova;
600 void *cpuva;
601 int err = 0;
602
603 *size = len;
604 cpuva = dma_zalloc_coherent(d, len, &iova, GFP_KERNEL);
605 if (!cpuva) {
606 gk20a_err(d, "memory allocation failed\n");
607 goto err_out;
608 }
609
610 err = gk20a_get_sgtable(d, sgt, cpuva, iova, len);
611 if (err) {
612 gk20a_err(d, "sgt allocation failed\n");
613 goto err_free;
614 } 587 }
615 588
616 *handle = cpuva;
617
618 return 0;
619
620err_free:
621 dma_free_coherent(d, len, cpuva, iova);
622 cpuva = NULL;
623 iova = 0;
624err_out:
625 return -ENOMEM;
626}
627
628static void __free_gmmu_pages(struct vm_gk20a *vm, void *handle,
629 struct sg_table *sgt, u32 order,
630 size_t size)
631{
632 struct device *d = dev_from_vm(vm);
633 u64 iova;
634
635 iova = sg_dma_address(sgt->sgl);
636
637 gk20a_free_sgtable(&sgt);
638
639 dma_free_coherent(d, size, handle, iova);
640 handle = NULL;
641 iova = 0;
642}
643
644static int __map_gmmu_pages(void *handle, struct sg_table *sgt,
645 void **kva, size_t size)
646{
647 *kva = handle;
648 return 0; 589 return 0;
649} 590}
650 591
651static void __unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va) 592static void unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va)
652{ 593{
653 gk20a_dbg_fn(""); 594 gk20a_dbg_fn("");
595
596 if (!IS_ENABLED(CONFIG_ARM64))
597 vunmap(va);
598 va = NULL;
654} 599}
655#endif 600#endif
656 601