aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMagnus Damm <damm+renesas@opensource.se>2017-05-17 06:07:10 -0400
committerJoerg Roedel <jroedel@suse.de>2017-05-17 09:21:53 -0400
commit3ae47292024f85e82b769044c43f0bd13adcd7e8 (patch)
tree5f20407183ee70c6bfb104c362d6488af9118f63
parent8e73bf659135ee7333d9a762bbdadb2ad794452f (diff)
iommu/ipmmu-vmsa: Add new IOMMU_DOMAIN_DMA ops
Introduce an alternative set of iommu_ops suitable for 64-bit ARM as well as 32-bit ARM when CONFIG_IOMMU_DMA=y. Also adjust the Kconfig to depend on ARM or IOMMU_DMA. Initialize the device from ->xlate() when CONFIG_IOMMU_DMA=y. Signed-off-by: Magnus Damm <damm+renesas@opensource.se> Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--drivers/iommu/Kconfig1
-rw-r--r--drivers/iommu/ipmmu-vmsa.c164
2 files changed, 156 insertions, 9 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 6ee3a25ae731..504ba025a54c 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -274,6 +274,7 @@ config EXYNOS_IOMMU_DEBUG
274 274
275config IPMMU_VMSA 275config IPMMU_VMSA
276 bool "Renesas VMSA-compatible IPMMU" 276 bool "Renesas VMSA-compatible IPMMU"
277 depends on ARM || IOMMU_DMA
277 depends on ARM_LPAE 278 depends on ARM_LPAE
278 depends on ARCH_RENESAS || COMPILE_TEST 279 depends on ARCH_RENESAS || COMPILE_TEST
279 select IOMMU_API 280 select IOMMU_API
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index eb5008596051..8b648f6e0e4d 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/bitmap.h> 11#include <linux/bitmap.h>
12#include <linux/delay.h> 12#include <linux/delay.h>
13#include <linux/dma-iommu.h>
13#include <linux/dma-mapping.h> 14#include <linux/dma-mapping.h>
14#include <linux/err.h> 15#include <linux/err.h>
15#include <linux/export.h> 16#include <linux/export.h>
@@ -22,8 +23,10 @@
22#include <linux/sizes.h> 23#include <linux/sizes.h>
23#include <linux/slab.h> 24#include <linux/slab.h>
24 25
26#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
25#include <asm/dma-iommu.h> 27#include <asm/dma-iommu.h>
26#include <asm/pgalloc.h> 28#include <asm/pgalloc.h>
29#endif
27 30
28#include "io-pgtable.h" 31#include "io-pgtable.h"
29 32
@@ -57,6 +60,8 @@ struct ipmmu_vmsa_archdata {
57 struct ipmmu_vmsa_device *mmu; 60 struct ipmmu_vmsa_device *mmu;
58 unsigned int *utlbs; 61 unsigned int *utlbs;
59 unsigned int num_utlbs; 62 unsigned int num_utlbs;
63 struct device *dev;
64 struct list_head list;
60}; 65};
61 66
62static DEFINE_SPINLOCK(ipmmu_devices_lock); 67static DEFINE_SPINLOCK(ipmmu_devices_lock);
@@ -522,14 +527,6 @@ static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
522 return &domain->io_domain; 527 return &domain->io_domain;
523} 528}
524 529
525static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
526{
527 if (type != IOMMU_DOMAIN_UNMANAGED)
528 return NULL;
529
530 return __ipmmu_domain_alloc(type);
531}
532
533static void ipmmu_domain_free(struct iommu_domain *io_domain) 530static void ipmmu_domain_free(struct iommu_domain *io_domain)
534{ 531{
535 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 532 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
@@ -572,7 +569,8 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
572 dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n", 569 dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
573 dev_name(mmu->dev), dev_name(domain->mmu->dev)); 570 dev_name(mmu->dev), dev_name(domain->mmu->dev));
574 ret = -EINVAL; 571 ret = -EINVAL;
575 } 572 } else
573 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
576 574
577 spin_unlock_irqrestore(&domain->lock, flags); 575 spin_unlock_irqrestore(&domain->lock, flags);
578 576
@@ -708,6 +706,7 @@ static int ipmmu_init_platform_device(struct device *dev)
708 archdata->mmu = mmu; 706 archdata->mmu = mmu;
709 archdata->utlbs = utlbs; 707 archdata->utlbs = utlbs;
710 archdata->num_utlbs = num_utlbs; 708 archdata->num_utlbs = num_utlbs;
709 archdata->dev = dev;
711 dev->archdata.iommu = archdata; 710 dev->archdata.iommu = archdata;
712 return 0; 711 return 0;
713 712
@@ -716,6 +715,16 @@ error:
716 return ret; 715 return ret;
717} 716}
718 717
718#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
719
720static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
721{
722 if (type != IOMMU_DOMAIN_UNMANAGED)
723 return NULL;
724
725 return __ipmmu_domain_alloc(type);
726}
727
719static int ipmmu_add_device(struct device *dev) 728static int ipmmu_add_device(struct device *dev)
720{ 729{
721 struct ipmmu_vmsa_archdata *archdata; 730 struct ipmmu_vmsa_archdata *archdata;
@@ -825,6 +834,141 @@ static const struct iommu_ops ipmmu_ops = {
825 .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, 834 .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
826}; 835};
827 836
837#endif /* !CONFIG_ARM && CONFIG_IOMMU_DMA */
838
839#ifdef CONFIG_IOMMU_DMA
840
841static DEFINE_SPINLOCK(ipmmu_slave_devices_lock);
842static LIST_HEAD(ipmmu_slave_devices);
843
844static struct iommu_domain *ipmmu_domain_alloc_dma(unsigned type)
845{
846 struct iommu_domain *io_domain = NULL;
847
848 switch (type) {
849 case IOMMU_DOMAIN_UNMANAGED:
850 io_domain = __ipmmu_domain_alloc(type);
851 break;
852
853 case IOMMU_DOMAIN_DMA:
854 io_domain = __ipmmu_domain_alloc(type);
855 if (io_domain)
856 iommu_get_dma_cookie(io_domain);
857 break;
858 }
859
860 return io_domain;
861}
862
863static void ipmmu_domain_free_dma(struct iommu_domain *io_domain)
864{
865 switch (io_domain->type) {
866 case IOMMU_DOMAIN_DMA:
867 iommu_put_dma_cookie(io_domain);
868 /* fall-through */
869 default:
870 ipmmu_domain_free(io_domain);
871 break;
872 }
873}
874
875static int ipmmu_add_device_dma(struct device *dev)
876{
877 struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
878 struct iommu_group *group;
879
880 /* The device has been verified in xlate() */
881 if (!archdata)
882 return -ENODEV;
883
884 group = iommu_group_get_for_dev(dev);
885 if (IS_ERR(group))
886 return PTR_ERR(group);
887
888 spin_lock(&ipmmu_slave_devices_lock);
889 list_add(&archdata->list, &ipmmu_slave_devices);
890 spin_unlock(&ipmmu_slave_devices_lock);
891 return 0;
892}
893
894static void ipmmu_remove_device_dma(struct device *dev)
895{
896 struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
897
898 spin_lock(&ipmmu_slave_devices_lock);
899 list_del(&archdata->list);
900 spin_unlock(&ipmmu_slave_devices_lock);
901
902 iommu_group_remove_device(dev);
903}
904
905static struct device *ipmmu_find_sibling_device(struct device *dev)
906{
907 struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
908 struct ipmmu_vmsa_archdata *sibling_archdata = NULL;
909 bool found = false;
910
911 spin_lock(&ipmmu_slave_devices_lock);
912
913 list_for_each_entry(sibling_archdata, &ipmmu_slave_devices, list) {
914 if (archdata == sibling_archdata)
915 continue;
916 if (sibling_archdata->mmu == archdata->mmu) {
917 found = true;
918 break;
919 }
920 }
921
922 spin_unlock(&ipmmu_slave_devices_lock);
923
924 return found ? sibling_archdata->dev : NULL;
925}
926
927static struct iommu_group *ipmmu_find_group_dma(struct device *dev)
928{
929 struct iommu_group *group;
930 struct device *sibling;
931
932 sibling = ipmmu_find_sibling_device(dev);
933 if (sibling)
934 group = iommu_group_get(sibling);
935 if (!sibling || IS_ERR(group))
936 group = generic_device_group(dev);
937
938 return group;
939}
940
941static int ipmmu_of_xlate_dma(struct device *dev,
942 struct of_phandle_args *spec)
943{
944 /* If the IPMMU device is disabled in DT then return error
945 * to make sure the of_iommu code does not install ops
946 * even though the iommu device is disabled
947 */
948 if (!of_device_is_available(spec->np))
949 return -ENODEV;
950
951 return ipmmu_init_platform_device(dev);
952}
953
954static const struct iommu_ops ipmmu_ops = {
955 .domain_alloc = ipmmu_domain_alloc_dma,
956 .domain_free = ipmmu_domain_free_dma,
957 .attach_dev = ipmmu_attach_device,
958 .detach_dev = ipmmu_detach_device,
959 .map = ipmmu_map,
960 .unmap = ipmmu_unmap,
961 .map_sg = default_iommu_map_sg,
962 .iova_to_phys = ipmmu_iova_to_phys,
963 .add_device = ipmmu_add_device_dma,
964 .remove_device = ipmmu_remove_device_dma,
965 .device_group = ipmmu_find_group_dma,
966 .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
967 .of_xlate = ipmmu_of_xlate_dma,
968};
969
970#endif /* CONFIG_IOMMU_DMA */
971
828/* ----------------------------------------------------------------------------- 972/* -----------------------------------------------------------------------------
829 * Probe/remove and init 973 * Probe/remove and init
830 */ 974 */
@@ -914,7 +1058,9 @@ static int ipmmu_remove(struct platform_device *pdev)
914 list_del(&mmu->list); 1058 list_del(&mmu->list);
915 spin_unlock(&ipmmu_devices_lock); 1059 spin_unlock(&ipmmu_devices_lock);
916 1060
1061#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
917 arm_iommu_release_mapping(mmu->mapping); 1062 arm_iommu_release_mapping(mmu->mapping);
1063#endif
918 1064
919 ipmmu_device_reset(mmu); 1065 ipmmu_device_reset(mmu);
920 1066