aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/plat-omap/iovmm.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/plat-omap/iovmm.c')
-rw-r--r--arch/arm/plat-omap/iovmm.c115
1 files changed, 67 insertions, 48 deletions
diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c
index 79e7fedb8602..aa2c47893b02 100644
--- a/arch/arm/plat-omap/iovmm.c
+++ b/arch/arm/plat-omap/iovmm.c
@@ -15,6 +15,7 @@
15#include <linux/vmalloc.h> 15#include <linux/vmalloc.h>
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/scatterlist.h> 17#include <linux/scatterlist.h>
18#include <linux/iommu.h>
18 19
19#include <asm/cacheflush.h> 20#include <asm/cacheflush.h>
20#include <asm/mach/map.h> 21#include <asm/mach/map.h>
@@ -453,39 +454,38 @@ static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
453} 454}
454 455
455/* create 'da' <-> 'pa' mapping from 'sgt' */ 456/* create 'da' <-> 'pa' mapping from 'sgt' */
456static int map_iovm_area(struct iommu *obj, struct iovm_struct *new, 457static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
457 const struct sg_table *sgt, u32 flags) 458 const struct sg_table *sgt, u32 flags)
458{ 459{
459 int err; 460 int err;
460 unsigned int i, j; 461 unsigned int i, j;
461 struct scatterlist *sg; 462 struct scatterlist *sg;
462 u32 da = new->da_start; 463 u32 da = new->da_start;
464 int order;
463 465
464 if (!obj || !sgt) 466 if (!domain || !sgt)
465 return -EINVAL; 467 return -EINVAL;
466 468
467 BUG_ON(!sgtable_ok(sgt)); 469 BUG_ON(!sgtable_ok(sgt));
468 470
469 for_each_sg(sgt->sgl, sg, sgt->nents, i) { 471 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
470 u32 pa; 472 u32 pa;
471 int pgsz;
472 size_t bytes; 473 size_t bytes;
473 struct iotlb_entry e;
474 474
475 pa = sg_phys(sg); 475 pa = sg_phys(sg);
476 bytes = sg->length; 476 bytes = sg->length;
477 477
478 flags &= ~IOVMF_PGSZ_MASK; 478 flags &= ~IOVMF_PGSZ_MASK;
479 pgsz = bytes_to_iopgsz(bytes); 479
480 if (pgsz < 0) 480 if (bytes_to_iopgsz(bytes) < 0)
481 goto err_out; 481 goto err_out;
482 flags |= pgsz; 482
483 order = get_order(bytes);
483 484
484 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, 485 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
485 i, da, pa, bytes); 486 i, da, pa, bytes);
486 487
487 iotlb_init_entry(&e, da, pa, flags); 488 err = iommu_map(domain, da, pa, order, flags);
488 err = iopgtable_store_entry(obj, &e);
489 if (err) 489 if (err)
490 goto err_out; 490 goto err_out;
491 491
@@ -499,9 +499,11 @@ err_out:
499 for_each_sg(sgt->sgl, sg, i, j) { 499 for_each_sg(sgt->sgl, sg, i, j) {
500 size_t bytes; 500 size_t bytes;
501 501
502 bytes = iopgtable_clear_entry(obj, da); 502 bytes = sg->length;
503 order = get_order(bytes);
503 504
504 BUG_ON(!iopgsz_ok(bytes)); 505 /* ignore failures.. we're already handling one */
506 iommu_unmap(domain, da, order);
505 507
506 da += bytes; 508 da += bytes;
507 } 509 }
@@ -509,22 +511,31 @@ err_out:
509} 511}
510 512
511/* release 'da' <-> 'pa' mapping */ 513/* release 'da' <-> 'pa' mapping */
512static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area) 514static void unmap_iovm_area(struct iommu_domain *domain, struct iommu *obj,
515 struct iovm_struct *area)
513{ 516{
514 u32 start; 517 u32 start;
515 size_t total = area->da_end - area->da_start; 518 size_t total = area->da_end - area->da_start;
519 const struct sg_table *sgt = area->sgt;
520 struct scatterlist *sg;
521 int i, err;
516 522
523 BUG_ON(!sgtable_ok(sgt));
517 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); 524 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
518 525
519 start = area->da_start; 526 start = area->da_start;
520 while (total > 0) { 527 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
521 size_t bytes; 528 size_t bytes;
529 int order;
530
531 bytes = sg->length;
532 order = get_order(bytes);
533
534 err = iommu_unmap(domain, start, order);
535 if (err)
536 break;
522 537
523 bytes = iopgtable_clear_entry(obj, start); 538 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
524 if (bytes == 0)
525 bytes = PAGE_SIZE;
526 else
527 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
528 __func__, start, bytes, area->flags); 539 __func__, start, bytes, area->flags);
529 540
530 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); 541 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
@@ -536,7 +547,8 @@ static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area)
536} 547}
537 548
538/* template function for all unmapping */ 549/* template function for all unmapping */
539static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da, 550static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
551 struct iommu *obj, const u32 da,
540 void (*fn)(const void *), u32 flags) 552 void (*fn)(const void *), u32 flags)
541{ 553{
542 struct sg_table *sgt = NULL; 554 struct sg_table *sgt = NULL;
@@ -562,7 +574,7 @@ static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da,
562 } 574 }
563 sgt = (struct sg_table *)area->sgt; 575 sgt = (struct sg_table *)area->sgt;
564 576
565 unmap_iovm_area(obj, area); 577 unmap_iovm_area(domain, obj, area);
566 578
567 fn(area->va); 579 fn(area->va);
568 580
@@ -577,8 +589,9 @@ out:
577 return sgt; 589 return sgt;
578} 590}
579 591
580static u32 map_iommu_region(struct iommu *obj, u32 da, 592static u32 map_iommu_region(struct iommu_domain *domain, struct iommu *obj,
581 const struct sg_table *sgt, void *va, size_t bytes, u32 flags) 593 u32 da, const struct sg_table *sgt, void *va,
594 size_t bytes, u32 flags)
582{ 595{
583 int err = -ENOMEM; 596 int err = -ENOMEM;
584 struct iovm_struct *new; 597 struct iovm_struct *new;
@@ -593,7 +606,7 @@ static u32 map_iommu_region(struct iommu *obj, u32 da,
593 new->va = va; 606 new->va = va;
594 new->sgt = sgt; 607 new->sgt = sgt;
595 608
596 if (map_iovm_area(obj, new, sgt, new->flags)) 609 if (map_iovm_area(domain, new, sgt, new->flags))
597 goto err_map; 610 goto err_map;
598 611
599 mutex_unlock(&obj->mmap_lock); 612 mutex_unlock(&obj->mmap_lock);
@@ -610,10 +623,11 @@ err_alloc_iovma:
610 return err; 623 return err;
611} 624}
612 625
613static inline u32 __iommu_vmap(struct iommu *obj, u32 da, 626static inline u32 __iommu_vmap(struct iommu_domain *domain, struct iommu *obj,
614 const struct sg_table *sgt, void *va, size_t bytes, u32 flags) 627 u32 da, const struct sg_table *sgt,
628 void *va, size_t bytes, u32 flags)
615{ 629{
616 return map_iommu_region(obj, da, sgt, va, bytes, flags); 630 return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
617} 631}
618 632
619/** 633/**
@@ -625,8 +639,8 @@ static inline u32 __iommu_vmap(struct iommu *obj, u32 da,
625 * Creates 1-n-1 mapping with given @sgt and returns @da. 639 * Creates 1-n-1 mapping with given @sgt and returns @da.
626 * All @sgt element must be io page size aligned. 640 * All @sgt element must be io page size aligned.
627 */ 641 */
628u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt, 642u32 iommu_vmap(struct iommu_domain *domain, struct iommu *obj, u32 da,
629 u32 flags) 643 const struct sg_table *sgt, u32 flags)
630{ 644{
631 size_t bytes; 645 size_t bytes;
632 void *va = NULL; 646 void *va = NULL;
@@ -648,7 +662,7 @@ u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
648 flags |= IOVMF_DISCONT; 662 flags |= IOVMF_DISCONT;
649 flags |= IOVMF_MMIO; 663 flags |= IOVMF_MMIO;
650 664
651 da = __iommu_vmap(obj, da, sgt, va, bytes, flags); 665 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
652 if (IS_ERR_VALUE(da)) 666 if (IS_ERR_VALUE(da))
653 vunmap_sg(va); 667 vunmap_sg(va);
654 668
@@ -664,14 +678,16 @@ EXPORT_SYMBOL_GPL(iommu_vmap);
664 * Free the iommu virtually contiguous memory area starting at 678 * Free the iommu virtually contiguous memory area starting at
665 * @da, which was returned by 'iommu_vmap()'. 679 * @da, which was returned by 'iommu_vmap()'.
666 */ 680 */
667struct sg_table *iommu_vunmap(struct iommu *obj, u32 da) 681struct sg_table *
682iommu_vunmap(struct iommu_domain *domain, struct iommu *obj, u32 da)
668{ 683{
669 struct sg_table *sgt; 684 struct sg_table *sgt;
670 /* 685 /*
671 * 'sgt' is allocated before 'iommu_vmalloc()' is called. 686 * 'sgt' is allocated before 'iommu_vmalloc()' is called.
672 * Just returns 'sgt' to the caller to free 687 * Just returns 'sgt' to the caller to free
673 */ 688 */
674 sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO); 689 sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
690 IOVMF_DISCONT | IOVMF_MMIO);
675 if (!sgt) 691 if (!sgt)
676 dev_dbg(obj->dev, "%s: No sgt\n", __func__); 692 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
677 return sgt; 693 return sgt;
@@ -688,7 +704,8 @@ EXPORT_SYMBOL_GPL(iommu_vunmap);
688 * Allocate @bytes linearly and creates 1-n-1 mapping and returns 704 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
689 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. 705 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
690 */ 706 */
691u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) 707u32 iommu_vmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da,
708 size_t bytes, u32 flags)
692{ 709{
693 void *va; 710 void *va;
694 struct sg_table *sgt; 711 struct sg_table *sgt;
@@ -712,7 +729,7 @@ u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
712 } 729 }
713 sgtable_fill_vmalloc(sgt, va); 730 sgtable_fill_vmalloc(sgt, va);
714 731
715 da = __iommu_vmap(obj, da, sgt, va, bytes, flags); 732 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
716 if (IS_ERR_VALUE(da)) 733 if (IS_ERR_VALUE(da))
717 goto err_iommu_vmap; 734 goto err_iommu_vmap;
718 735
@@ -735,19 +752,20 @@ EXPORT_SYMBOL_GPL(iommu_vmalloc);
735 * Frees the iommu virtually continuous memory area starting at 752 * Frees the iommu virtually continuous memory area starting at
736 * @da, as obtained from 'iommu_vmalloc()'. 753 * @da, as obtained from 'iommu_vmalloc()'.
737 */ 754 */
738void iommu_vfree(struct iommu *obj, const u32 da) 755void iommu_vfree(struct iommu_domain *domain, struct iommu *obj, const u32 da)
739{ 756{
740 struct sg_table *sgt; 757 struct sg_table *sgt;
741 758
742 sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC); 759 sgt = unmap_vm_area(domain, obj, da, vfree,
760 IOVMF_DISCONT | IOVMF_ALLOC);
743 if (!sgt) 761 if (!sgt)
744 dev_dbg(obj->dev, "%s: No sgt\n", __func__); 762 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
745 sgtable_free(sgt); 763 sgtable_free(sgt);
746} 764}
747EXPORT_SYMBOL_GPL(iommu_vfree); 765EXPORT_SYMBOL_GPL(iommu_vfree);
748 766
749static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va, 767static u32 __iommu_kmap(struct iommu_domain *domain, struct iommu *obj,
750 size_t bytes, u32 flags) 768 u32 da, u32 pa, void *va, size_t bytes, u32 flags)
751{ 769{
752 struct sg_table *sgt; 770 struct sg_table *sgt;
753 771
@@ -757,7 +775,7 @@ static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
757 775
758 sgtable_fill_kmalloc(sgt, pa, da, bytes); 776 sgtable_fill_kmalloc(sgt, pa, da, bytes);
759 777
760 da = map_iommu_region(obj, da, sgt, va, bytes, flags); 778 da = map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
761 if (IS_ERR_VALUE(da)) { 779 if (IS_ERR_VALUE(da)) {
762 sgtable_drain_kmalloc(sgt); 780 sgtable_drain_kmalloc(sgt);
763 sgtable_free(sgt); 781 sgtable_free(sgt);
@@ -776,8 +794,8 @@ static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
776 * Creates 1-1-1 mapping and returns @da again, which can be 794 * Creates 1-1-1 mapping and returns @da again, which can be
777 * adjusted if 'IOVMF_DA_FIXED' is not set. 795 * adjusted if 'IOVMF_DA_FIXED' is not set.
778 */ 796 */
779u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes, 797u32 iommu_kmap(struct iommu_domain *domain, struct iommu *obj, u32 da, u32 pa,
780 u32 flags) 798 size_t bytes, u32 flags)
781{ 799{
782 void *va; 800 void *va;
783 801
@@ -793,7 +811,7 @@ u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
793 flags |= IOVMF_LINEAR; 811 flags |= IOVMF_LINEAR;
794 flags |= IOVMF_MMIO; 812 flags |= IOVMF_MMIO;
795 813
796 da = __iommu_kmap(obj, da, pa, va, bytes, flags); 814 da = __iommu_kmap(domain, obj, da, pa, va, bytes, flags);
797 if (IS_ERR_VALUE(da)) 815 if (IS_ERR_VALUE(da))
798 iounmap(va); 816 iounmap(va);
799 817
@@ -809,12 +827,12 @@ EXPORT_SYMBOL_GPL(iommu_kmap);
809 * Frees the iommu virtually contiguous memory area starting at 827 * Frees the iommu virtually contiguous memory area starting at
810 * @da, which was passed to and was returned by'iommu_kmap()'. 828 * @da, which was passed to and was returned by'iommu_kmap()'.
811 */ 829 */
812void iommu_kunmap(struct iommu *obj, u32 da) 830void iommu_kunmap(struct iommu_domain *domain, struct iommu *obj, u32 da)
813{ 831{
814 struct sg_table *sgt; 832 struct sg_table *sgt;
815 typedef void (*func_t)(const void *); 833 typedef void (*func_t)(const void *);
816 834
817 sgt = unmap_vm_area(obj, da, (func_t)iounmap, 835 sgt = unmap_vm_area(domain, obj, da, (func_t)iounmap,
818 IOVMF_LINEAR | IOVMF_MMIO); 836 IOVMF_LINEAR | IOVMF_MMIO);
819 if (!sgt) 837 if (!sgt)
820 dev_dbg(obj->dev, "%s: No sgt\n", __func__); 838 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
@@ -832,7 +850,8 @@ EXPORT_SYMBOL_GPL(iommu_kunmap);
832 * Allocate @bytes linearly and creates 1-1-1 mapping and returns 850 * Allocate @bytes linearly and creates 1-1-1 mapping and returns
833 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. 851 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
834 */ 852 */
835u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) 853u32 iommu_kmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da,
854 size_t bytes, u32 flags)
836{ 855{
837 void *va; 856 void *va;
838 u32 pa; 857 u32 pa;
@@ -850,7 +869,7 @@ u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
850 flags |= IOVMF_LINEAR; 869 flags |= IOVMF_LINEAR;
851 flags |= IOVMF_ALLOC; 870 flags |= IOVMF_ALLOC;
852 871
853 da = __iommu_kmap(obj, da, pa, va, bytes, flags); 872 da = __iommu_kmap(domain, obj, da, pa, va, bytes, flags);
854 if (IS_ERR_VALUE(da)) 873 if (IS_ERR_VALUE(da))
855 kfree(va); 874 kfree(va);
856 875
@@ -866,11 +885,11 @@ EXPORT_SYMBOL_GPL(iommu_kmalloc);
866 * Frees the iommu virtually contiguous memory area starting at 885 * Frees the iommu virtually contiguous memory area starting at
867 * @da, which was passed to and was returned by'iommu_kmalloc()'. 886 * @da, which was passed to and was returned by'iommu_kmalloc()'.
868 */ 887 */
869void iommu_kfree(struct iommu *obj, u32 da) 888void iommu_kfree(struct iommu_domain *domain, struct iommu *obj, u32 da)
870{ 889{
871 struct sg_table *sgt; 890 struct sg_table *sgt;
872 891
873 sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC); 892 sgt = unmap_vm_area(domain, obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
874 if (!sgt) 893 if (!sgt)
875 dev_dbg(obj->dev, "%s: No sgt\n", __func__); 894 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
876 sgtable_free(sgt); 895 sgtable_free(sgt);