diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-09 18:15:47 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-09 18:15:47 -0400 |
commit | 28b47809b2171a6cfbab839936b24280639c9f85 (patch) | |
tree | 23c918f66783e269e95680136f80362d9c62070d | |
parent | 4a1e31c68e9f40be32838944931178b0d9ed9162 (diff) | |
parent | 2c0248d68880fc0e783af1048b3367ee5d4412f0 (diff) |
Merge tag 'iommu-updates-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU updates from Joerg Roedel:
- code optimizations for the Intel VT-d driver
- ability to switch off a previously enabled Intel IOMMU
- support for 'struct iommu_device' for OMAP, Rockchip and Mediatek
IOMMUs
- header optimizations for IOMMU core code headers and a few fixes that
became necessary in other parts of the kernel because of that
- ACPI/IORT updates and fixes
- Exynos IOMMU optimizations
- updates for the IOMMU dma-api code to bring it closer to use per-cpu
iova caches
- new command-line option to set default domain type allocated by the
iommu core code
- another command line option to allow the Intel IOMMU switched off in
a tboot environment
- ARM/SMMU: TLB sync optimisations for SMMUv2, Support for using an
IDENTITY domain in conjunction with DMA ops, Support for SMR masking,
Support for 16-bit ASIDs (was previously broken)
- various other small fixes and improvements
* tag 'iommu-updates-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (63 commits)
soc/qbman: Move dma-mapping.h include to qman_priv.h
soc/qbman: Fix implicit header dependency now causing build fails
iommu: Remove trace-events include from iommu.h
iommu: Remove pci.h include from trace/events/iommu.h
arm: dma-mapping: Don't override dma_ops in arch_setup_dma_ops()
ACPI/IORT: Fix CONFIG_IOMMU_API dependency
iommu/vt-d: Don't print the failure message when booting non-kdump kernel
iommu: Move report_iommu_fault() to iommu.c
iommu: Include device.h in iommu.h
x86, iommu/vt-d: Add an option to disable Intel IOMMU force on
iommu/arm-smmu: Return IOVA in iova_to_phys when SMMU is bypassed
iommu/arm-smmu: Correct sid to mask
iommu/amd: Fix incorrect error handling in amd_iommu_bind_pasid()
iommu: Make iommu_bus_notifier return NOTIFY_DONE rather than error code
omap3isp: Remove iommu_group related code
iommu/omap: Add iommu-group support
iommu/omap: Make use of 'struct iommu_device'
iommu/omap: Store iommu_dev pointer in arch_data
iommu/omap: Move data structures to omap-iommu.h
iommu/omap: Drop legacy-style device support
...
48 files changed, 1188 insertions, 765 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index e4c9e0e46b95..130e7ecaf9a6 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt | |||
@@ -1578,6 +1578,15 @@ | |||
1578 | extended tables themselves, and also PASID support. With | 1578 | extended tables themselves, and also PASID support. With |
1579 | this option set, extended tables will not be used even | 1579 | this option set, extended tables will not be used even |
1580 | on hardware which claims to support them. | 1580 | on hardware which claims to support them. |
1581 | tboot_noforce [Default Off] | ||
1582 | Do not force the Intel IOMMU enabled under tboot. | ||
1583 | By default, tboot will force Intel IOMMU on, which | ||
1584 | could harm performance of some high-throughput | ||
1585 | devices like 40GBit network cards, even if identity | ||
1586 | mapping is enabled. | ||
1587 | Note that using this option lowers the security | ||
1588 | provided by tboot because it makes the system | ||
1589 | vulnerable to DMA attacks. | ||
1581 | 1590 | ||
1582 | intel_idle.max_cstate= [KNL,HW,ACPI,X86] | 1591 | intel_idle.max_cstate= [KNL,HW,ACPI,X86] |
1583 | 0 disables intel_idle and fall back on acpi_idle. | 1592 | 0 disables intel_idle and fall back on acpi_idle. |
@@ -1644,6 +1653,12 @@ | |||
1644 | nobypass [PPC/POWERNV] | 1653 | nobypass [PPC/POWERNV] |
1645 | Disable IOMMU bypass, using IOMMU for PCI devices. | 1654 | Disable IOMMU bypass, using IOMMU for PCI devices. |
1646 | 1655 | ||
1656 | iommu.passthrough= | ||
1657 | [ARM64] Configure DMA to bypass the IOMMU by default. | ||
1658 | Format: { "0" | "1" } | ||
1659 | 0 - Use IOMMU translation for DMA. | ||
1660 | 1 - Bypass the IOMMU for DMA. | ||
1661 | unset - Use IOMMU translation for DMA. | ||
1647 | 1662 | ||
1648 | io7= [HW] IO7 for Marvel based alpha systems | 1663 | io7= [HW] IO7 for Marvel based alpha systems |
1649 | See comment before marvel_specify_io7 in | 1664 | See comment before marvel_specify_io7 in |
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt index 6cdf32d037fc..8a6ffce12af5 100644 --- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt +++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt | |||
@@ -60,6 +60,17 @@ conditions. | |||
60 | aliases of secure registers have to be used during | 60 | aliases of secure registers have to be used during |
61 | SMMU configuration. | 61 | SMMU configuration. |
62 | 62 | ||
63 | - stream-match-mask : For SMMUs supporting stream matching and using | ||
64 | #iommu-cells = <1>, specifies a mask of bits to ignore | ||
65 | when matching stream IDs (e.g. this may be programmed | ||
66 | into the SMRn.MASK field of every stream match register | ||
67 | used). For cases where it is desirable to ignore some | ||
68 | portion of every Stream ID (e.g. for certain MMU-500 | ||
69 | configurations given globally unique input IDs). This | ||
70 | property is not valid for SMMUs using stream indexing, | ||
71 | or using stream matching with #iommu-cells = <2>, and | ||
72 | may be ignored if present in such cases. | ||
73 | |||
63 | ** Deprecated properties: | 74 | ** Deprecated properties: |
64 | 75 | ||
65 | - mmu-masters (deprecated in favour of the generic "iommus" binding) : | 76 | - mmu-masters (deprecated in favour of the generic "iommus" binding) : |
@@ -109,3 +120,20 @@ conditions. | |||
109 | master3 { | 120 | master3 { |
110 | iommus = <&smmu2 1 0x30>; | 121 | iommus = <&smmu2 1 0x30>; |
111 | }; | 122 | }; |
123 | |||
124 | |||
125 | /* ARM MMU-500 with 10-bit stream ID input configuration */ | ||
126 | smmu3: iommu { | ||
127 | compatible = "arm,mmu-500", "arm,smmu-v2"; | ||
128 | ... | ||
129 | #iommu-cells = <1>; | ||
130 | /* always ignore appended 5-bit TBU number */ | ||
131 | stream-match-mask = 0x7c00; | ||
132 | }; | ||
133 | |||
134 | bus { | ||
135 | /* bus whose child devices emit one unique 10-bit stream | ||
136 | ID each, but may master through multiple SMMU TBUs */ | ||
137 | iommu-map = <0 &smmu3 0 0x400>; | ||
138 | ... | ||
139 | }; | ||
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 0268584f1fa0..c742dfd2967b 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -2408,6 +2408,15 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, | |||
2408 | const struct dma_map_ops *dma_ops; | 2408 | const struct dma_map_ops *dma_ops; |
2409 | 2409 | ||
2410 | dev->archdata.dma_coherent = coherent; | 2410 | dev->archdata.dma_coherent = coherent; |
2411 | |||
2412 | /* | ||
2413 | * Don't override the dma_ops if they have already been set. Ideally | ||
2414 | * this should be the only location where dma_ops are set, remove this | ||
2415 | * check when all other callers of set_dma_ops will have disappeared. | ||
2416 | */ | ||
2417 | if (dev->dma_ops) | ||
2418 | return; | ||
2419 | |||
2411 | if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu)) | 2420 | if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu)) |
2412 | dma_ops = arm_get_iommu_dma_map_ops(coherent); | 2421 | dma_ops = arm_get_iommu_dma_map_ops(coherent); |
2413 | else | 2422 | else |
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 4dac4afc95a5..3216e098c058 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/dma-contiguous.h> | 28 | #include <linux/dma-contiguous.h> |
29 | #include <linux/vmalloc.h> | 29 | #include <linux/vmalloc.h> |
30 | #include <linux/swiotlb.h> | 30 | #include <linux/swiotlb.h> |
31 | #include <linux/pci.h> | ||
31 | 32 | ||
32 | #include <asm/cacheflush.h> | 33 | #include <asm/cacheflush.h> |
33 | 34 | ||
@@ -879,34 +880,26 @@ static const struct dma_map_ops iommu_dma_ops = { | |||
879 | .mapping_error = iommu_dma_mapping_error, | 880 | .mapping_error = iommu_dma_mapping_error, |
880 | }; | 881 | }; |
881 | 882 | ||
882 | /* | 883 | static int __init __iommu_dma_init(void) |
883 | * TODO: Right now __iommu_setup_dma_ops() gets called too early to do | 884 | { |
884 | * everything it needs to - the device is only partially created and the | 885 | return iommu_dma_init(); |
885 | * IOMMU driver hasn't seen it yet, so it can't have a group. Thus we | 886 | } |
886 | * need this delayed attachment dance. Once IOMMU probe ordering is sorted | 887 | arch_initcall(__iommu_dma_init); |
887 | * to move the arch_setup_dma_ops() call later, all the notifier bits below | ||
888 | * become unnecessary, and will go away. | ||
889 | */ | ||
890 | struct iommu_dma_notifier_data { | ||
891 | struct list_head list; | ||
892 | struct device *dev; | ||
893 | const struct iommu_ops *ops; | ||
894 | u64 dma_base; | ||
895 | u64 size; | ||
896 | }; | ||
897 | static LIST_HEAD(iommu_dma_masters); | ||
898 | static DEFINE_MUTEX(iommu_dma_notifier_lock); | ||
899 | 888 | ||
900 | static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops, | 889 | static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, |
901 | u64 dma_base, u64 size) | 890 | const struct iommu_ops *ops) |
902 | { | 891 | { |
903 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | 892 | struct iommu_domain *domain; |
893 | |||
894 | if (!ops) | ||
895 | return; | ||
904 | 896 | ||
905 | /* | 897 | /* |
906 | * If the IOMMU driver has the DMA domain support that we require, | 898 | * The IOMMU core code allocates the default DMA domain, which the |
907 | * then the IOMMU core will have already configured a group for this | 899 | * underlying IOMMU driver needs to support via the dma-iommu layer. |
908 | * device, and allocated the default domain for that group. | ||
909 | */ | 900 | */ |
901 | domain = iommu_get_domain_for_dev(dev); | ||
902 | |||
910 | if (!domain) | 903 | if (!domain) |
911 | goto out_err; | 904 | goto out_err; |
912 | 905 | ||
@@ -917,109 +910,11 @@ static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops, | |||
917 | dev->dma_ops = &iommu_dma_ops; | 910 | dev->dma_ops = &iommu_dma_ops; |
918 | } | 911 | } |
919 | 912 | ||
920 | return true; | 913 | return; |
914 | |||
921 | out_err: | 915 | out_err: |
922 | pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", | 916 | pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", |
923 | dev_name(dev)); | 917 | dev_name(dev)); |
924 | return false; | ||
925 | } | ||
926 | |||
927 | static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops, | ||
928 | u64 dma_base, u64 size) | ||
929 | { | ||
930 | struct iommu_dma_notifier_data *iommudata; | ||
931 | |||
932 | iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL); | ||
933 | if (!iommudata) | ||
934 | return; | ||
935 | |||
936 | iommudata->dev = dev; | ||
937 | iommudata->ops = ops; | ||
938 | iommudata->dma_base = dma_base; | ||
939 | iommudata->size = size; | ||
940 | |||
941 | mutex_lock(&iommu_dma_notifier_lock); | ||
942 | list_add(&iommudata->list, &iommu_dma_masters); | ||
943 | mutex_unlock(&iommu_dma_notifier_lock); | ||
944 | } | ||
945 | |||
946 | static int __iommu_attach_notifier(struct notifier_block *nb, | ||
947 | unsigned long action, void *data) | ||
948 | { | ||
949 | struct iommu_dma_notifier_data *master, *tmp; | ||
950 | |||
951 | if (action != BUS_NOTIFY_BIND_DRIVER) | ||
952 | return 0; | ||
953 | |||
954 | mutex_lock(&iommu_dma_notifier_lock); | ||
955 | list_for_each_entry_safe(master, tmp, &iommu_dma_masters, list) { | ||
956 | if (data == master->dev && do_iommu_attach(master->dev, | ||
957 | master->ops, master->dma_base, master->size)) { | ||
958 | list_del(&master->list); | ||
959 | kfree(master); | ||
960 | break; | ||
961 | } | ||
962 | } | ||
963 | mutex_unlock(&iommu_dma_notifier_lock); | ||
964 | return 0; | ||
965 | } | ||
966 | |||
967 | static int __init register_iommu_dma_ops_notifier(struct bus_type *bus) | ||
968 | { | ||
969 | struct notifier_block *nb = kzalloc(sizeof(*nb), GFP_KERNEL); | ||
970 | int ret; | ||
971 | |||
972 | if (!nb) | ||
973 | return -ENOMEM; | ||
974 | |||
975 | nb->notifier_call = __iommu_attach_notifier; | ||
976 | |||
977 | ret = bus_register_notifier(bus, nb); | ||
978 | if (ret) { | ||
979 | pr_warn("Failed to register DMA domain notifier; IOMMU DMA ops unavailable on bus '%s'\n", | ||
980 | bus->name); | ||
981 | kfree(nb); | ||
982 | } | ||
983 | return ret; | ||
984 | } | ||
985 | |||
986 | static int __init __iommu_dma_init(void) | ||
987 | { | ||
988 | int ret; | ||
989 | |||
990 | ret = iommu_dma_init(); | ||
991 | if (!ret) | ||
992 | ret = register_iommu_dma_ops_notifier(&platform_bus_type); | ||
993 | if (!ret) | ||
994 | ret = register_iommu_dma_ops_notifier(&amba_bustype); | ||
995 | #ifdef CONFIG_PCI | ||
996 | if (!ret) | ||
997 | ret = register_iommu_dma_ops_notifier(&pci_bus_type); | ||
998 | #endif | ||
999 | return ret; | ||
1000 | } | ||
1001 | arch_initcall(__iommu_dma_init); | ||
1002 | |||
1003 | static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, | ||
1004 | const struct iommu_ops *ops) | ||
1005 | { | ||
1006 | struct iommu_group *group; | ||
1007 | |||
1008 | if (!ops) | ||
1009 | return; | ||
1010 | /* | ||
1011 | * TODO: As a concession to the future, we're ready to handle being | ||
1012 | * called both early and late (i.e. after bus_add_device). Once all | ||
1013 | * the platform bus code is reworked to call us late and the notifier | ||
1014 | * junk above goes away, move the body of do_iommu_attach here. | ||
1015 | */ | ||
1016 | group = iommu_group_get(dev); | ||
1017 | if (group) { | ||
1018 | do_iommu_attach(dev, ops, dma_base, size); | ||
1019 | iommu_group_put(group); | ||
1020 | } else { | ||
1021 | queue_iommu_attach(dev, ops, dma_base, size); | ||
1022 | } | ||
1023 | } | 918 | } |
1024 | 919 | ||
1025 | void arch_teardown_dma_ops(struct device *dev) | 920 | void arch_teardown_dma_ops(struct device *dev) |
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index d4c8011a2293..4b1724059909 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c | |||
@@ -514,6 +514,9 @@ int tboot_force_iommu(void) | |||
514 | if (!tboot_enabled()) | 514 | if (!tboot_enabled()) |
515 | return 0; | 515 | return 0; |
516 | 516 | ||
517 | if (!intel_iommu_tboot_noforce) | ||
518 | return 1; | ||
519 | |||
517 | if (no_iommu || swiotlb || dmar_disabled) | 520 | if (no_iommu || swiotlb || dmar_disabled) |
518 | pr_warning("Forcing Intel-IOMMU to enabled\n"); | 521 | pr_warning("Forcing Intel-IOMMU to enabled\n"); |
519 | 522 | ||
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 22e08d272db7..c5fecf97ee2f 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c | |||
@@ -618,6 +618,46 @@ static int arm_smmu_iort_xlate(struct device *dev, u32 streamid, | |||
618 | return ret; | 618 | return ret; |
619 | } | 619 | } |
620 | 620 | ||
621 | static inline bool iort_iommu_driver_enabled(u8 type) | ||
622 | { | ||
623 | switch (type) { | ||
624 | case ACPI_IORT_NODE_SMMU_V3: | ||
625 | return IS_BUILTIN(CONFIG_ARM_SMMU_V3); | ||
626 | case ACPI_IORT_NODE_SMMU: | ||
627 | return IS_BUILTIN(CONFIG_ARM_SMMU); | ||
628 | default: | ||
629 | pr_warn("IORT node type %u does not describe an SMMU\n", type); | ||
630 | return false; | ||
631 | } | ||
632 | } | ||
633 | |||
634 | #ifdef CONFIG_IOMMU_API | ||
635 | static inline | ||
636 | const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec) | ||
637 | { | ||
638 | return (fwspec && fwspec->ops) ? fwspec->ops : NULL; | ||
639 | } | ||
640 | |||
641 | static inline | ||
642 | int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev) | ||
643 | { | ||
644 | int err = 0; | ||
645 | |||
646 | if (!IS_ERR_OR_NULL(ops) && ops->add_device && dev->bus && | ||
647 | !dev->iommu_group) | ||
648 | err = ops->add_device(dev); | ||
649 | |||
650 | return err; | ||
651 | } | ||
652 | #else | ||
653 | static inline | ||
654 | const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec) | ||
655 | { return NULL; } | ||
656 | static inline | ||
657 | int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev) | ||
658 | { return 0; } | ||
659 | #endif | ||
660 | |||
621 | static const struct iommu_ops *iort_iommu_xlate(struct device *dev, | 661 | static const struct iommu_ops *iort_iommu_xlate(struct device *dev, |
622 | struct acpi_iort_node *node, | 662 | struct acpi_iort_node *node, |
623 | u32 streamid) | 663 | u32 streamid) |
@@ -626,14 +666,31 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev, | |||
626 | int ret = -ENODEV; | 666 | int ret = -ENODEV; |
627 | struct fwnode_handle *iort_fwnode; | 667 | struct fwnode_handle *iort_fwnode; |
628 | 668 | ||
669 | /* | ||
670 | * If we already translated the fwspec there | ||
671 | * is nothing left to do, return the iommu_ops. | ||
672 | */ | ||
673 | ops = iort_fwspec_iommu_ops(dev->iommu_fwspec); | ||
674 | if (ops) | ||
675 | return ops; | ||
676 | |||
629 | if (node) { | 677 | if (node) { |
630 | iort_fwnode = iort_get_fwnode(node); | 678 | iort_fwnode = iort_get_fwnode(node); |
631 | if (!iort_fwnode) | 679 | if (!iort_fwnode) |
632 | return NULL; | 680 | return NULL; |
633 | 681 | ||
634 | ops = iommu_ops_from_fwnode(iort_fwnode); | 682 | ops = iommu_ops_from_fwnode(iort_fwnode); |
683 | /* | ||
684 | * If the ops look-up fails, this means that either | ||
685 | * the SMMU drivers have not been probed yet or that | ||
686 | * the SMMU drivers are not built in the kernel; | ||
687 | * Depending on whether the SMMU drivers are built-in | ||
688 | * in the kernel or not, defer the IOMMU configuration | ||
689 | * or just abort it. | ||
690 | */ | ||
635 | if (!ops) | 691 | if (!ops) |
636 | return NULL; | 692 | return iort_iommu_driver_enabled(node->type) ? |
693 | ERR_PTR(-EPROBE_DEFER) : NULL; | ||
637 | 694 | ||
638 | ret = arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops); | 695 | ret = arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops); |
639 | } | 696 | } |
@@ -676,6 +733,7 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev) | |||
676 | struct acpi_iort_node *node, *parent; | 733 | struct acpi_iort_node *node, *parent; |
677 | const struct iommu_ops *ops = NULL; | 734 | const struct iommu_ops *ops = NULL; |
678 | u32 streamid = 0; | 735 | u32 streamid = 0; |
736 | int err; | ||
679 | 737 | ||
680 | if (dev_is_pci(dev)) { | 738 | if (dev_is_pci(dev)) { |
681 | struct pci_bus *bus = to_pci_dev(dev)->bus; | 739 | struct pci_bus *bus = to_pci_dev(dev)->bus; |
@@ -707,6 +765,8 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev) | |||
707 | 765 | ||
708 | while (parent) { | 766 | while (parent) { |
709 | ops = iort_iommu_xlate(dev, parent, streamid); | 767 | ops = iort_iommu_xlate(dev, parent, streamid); |
768 | if (IS_ERR_OR_NULL(ops)) | ||
769 | return ops; | ||
710 | 770 | ||
711 | parent = iort_node_map_platform_id(node, &streamid, | 771 | parent = iort_node_map_platform_id(node, &streamid, |
712 | IORT_IOMMU_TYPE, | 772 | IORT_IOMMU_TYPE, |
@@ -714,6 +774,14 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev) | |||
714 | } | 774 | } |
715 | } | 775 | } |
716 | 776 | ||
777 | /* | ||
778 | * If we have reason to believe the IOMMU driver missed the initial | ||
779 | * add_device callback for dev, replay it to get things in order. | ||
780 | */ | ||
781 | err = iort_add_device_replay(ops, dev); | ||
782 | if (err) | ||
783 | ops = ERR_PTR(err); | ||
784 | |||
717 | return ops; | 785 | return ops; |
718 | } | 786 | } |
719 | 787 | ||
@@ -1052,6 +1120,4 @@ void __init acpi_iort_init(void) | |||
1052 | } | 1120 | } |
1053 | 1121 | ||
1054 | iort_init_platform_devices(); | 1122 | iort_init_platform_devices(); |
1055 | |||
1056 | acpi_probe_device_table(iort); | ||
1057 | } | 1123 | } |
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 3e7020751d34..3be1433853bf 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c | |||
@@ -179,7 +179,6 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev) | |||
179 | struct list_head *physnode_list; | 179 | struct list_head *physnode_list; |
180 | unsigned int node_id; | 180 | unsigned int node_id; |
181 | int retval = -EINVAL; | 181 | int retval = -EINVAL; |
182 | enum dev_dma_attr attr; | ||
183 | 182 | ||
184 | if (has_acpi_companion(dev)) { | 183 | if (has_acpi_companion(dev)) { |
185 | if (acpi_dev) { | 184 | if (acpi_dev) { |
@@ -236,10 +235,6 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev) | |||
236 | if (!has_acpi_companion(dev)) | 235 | if (!has_acpi_companion(dev)) |
237 | ACPI_COMPANION_SET(dev, acpi_dev); | 236 | ACPI_COMPANION_SET(dev, acpi_dev); |
238 | 237 | ||
239 | attr = acpi_get_dma_attr(acpi_dev); | ||
240 | if (attr != DEV_DMA_NOT_SUPPORTED) | ||
241 | acpi_dma_configure(dev, attr); | ||
242 | |||
243 | acpi_physnode_link_name(physical_node_name, node_id); | 238 | acpi_physnode_link_name(physical_node_name, node_id); |
244 | retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, | 239 | retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, |
245 | physical_node_name); | 240 | physical_node_name); |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index c26931067415..e39ec7b7cb67 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -1363,20 +1363,25 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) | |||
1363 | * @dev: The pointer to the device | 1363 | * @dev: The pointer to the device |
1364 | * @attr: device dma attributes | 1364 | * @attr: device dma attributes |
1365 | */ | 1365 | */ |
1366 | void acpi_dma_configure(struct device *dev, enum dev_dma_attr attr) | 1366 | int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr) |
1367 | { | 1367 | { |
1368 | const struct iommu_ops *iommu; | 1368 | const struct iommu_ops *iommu; |
1369 | u64 size; | ||
1369 | 1370 | ||
1370 | iort_set_dma_mask(dev); | 1371 | iort_set_dma_mask(dev); |
1371 | 1372 | ||
1372 | iommu = iort_iommu_configure(dev); | 1373 | iommu = iort_iommu_configure(dev); |
1374 | if (IS_ERR(iommu)) | ||
1375 | return PTR_ERR(iommu); | ||
1373 | 1376 | ||
1377 | size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); | ||
1374 | /* | 1378 | /* |
1375 | * Assume dma valid range starts at 0 and covers the whole | 1379 | * Assume dma valid range starts at 0 and covers the whole |
1376 | * coherent_dma_mask. | 1380 | * coherent_dma_mask. |
1377 | */ | 1381 | */ |
1378 | arch_setup_dma_ops(dev, 0, dev->coherent_dma_mask + 1, iommu, | 1382 | arch_setup_dma_ops(dev, 0, size, iommu, attr == DEV_DMA_COHERENT); |
1379 | attr == DEV_DMA_COHERENT); | 1383 | |
1384 | return 0; | ||
1380 | } | 1385 | } |
1381 | EXPORT_SYMBOL_GPL(acpi_dma_configure); | 1386 | EXPORT_SYMBOL_GPL(acpi_dma_configure); |
1382 | 1387 | ||
diff --git a/drivers/base/dd.c b/drivers/base/dd.c index a1fbf55c4d3a..4882f06d12df 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c | |||
@@ -19,6 +19,7 @@ | |||
19 | 19 | ||
20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
21 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
22 | #include <linux/dma-mapping.h> | ||
22 | #include <linux/module.h> | 23 | #include <linux/module.h> |
23 | #include <linux/kthread.h> | 24 | #include <linux/kthread.h> |
24 | #include <linux/wait.h> | 25 | #include <linux/wait.h> |
@@ -356,6 +357,10 @@ re_probe: | |||
356 | if (ret) | 357 | if (ret) |
357 | goto pinctrl_bind_failed; | 358 | goto pinctrl_bind_failed; |
358 | 359 | ||
360 | ret = dma_configure(dev); | ||
361 | if (ret) | ||
362 | goto dma_failed; | ||
363 | |||
359 | if (driver_sysfs_add(dev)) { | 364 | if (driver_sysfs_add(dev)) { |
360 | printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n", | 365 | printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n", |
361 | __func__, dev_name(dev)); | 366 | __func__, dev_name(dev)); |
@@ -417,6 +422,8 @@ re_probe: | |||
417 | goto done; | 422 | goto done; |
418 | 423 | ||
419 | probe_failed: | 424 | probe_failed: |
425 | dma_deconfigure(dev); | ||
426 | dma_failed: | ||
420 | if (dev->bus) | 427 | if (dev->bus) |
421 | blocking_notifier_call_chain(&dev->bus->p->bus_notifier, | 428 | blocking_notifier_call_chain(&dev->bus->p->bus_notifier, |
422 | BUS_NOTIFY_DRIVER_NOT_BOUND, dev); | 429 | BUS_NOTIFY_DRIVER_NOT_BOUND, dev); |
@@ -826,6 +833,8 @@ static void __device_release_driver(struct device *dev, struct device *parent) | |||
826 | drv->remove(dev); | 833 | drv->remove(dev); |
827 | 834 | ||
828 | device_links_driver_cleanup(dev); | 835 | device_links_driver_cleanup(dev); |
836 | dma_deconfigure(dev); | ||
837 | |||
829 | devres_release_all(dev); | 838 | devres_release_all(dev); |
830 | dev->driver = NULL; | 839 | dev->driver = NULL; |
831 | dev_set_drvdata(dev, NULL); | 840 | dev_set_drvdata(dev, NULL); |
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 51b7061ff7c0..f3deb6af42ad 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c | |||
@@ -7,9 +7,11 @@ | |||
7 | * This file is released under the GPLv2. | 7 | * This file is released under the GPLv2. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/acpi.h> | ||
10 | #include <linux/dma-mapping.h> | 11 | #include <linux/dma-mapping.h> |
11 | #include <linux/export.h> | 12 | #include <linux/export.h> |
12 | #include <linux/gfp.h> | 13 | #include <linux/gfp.h> |
14 | #include <linux/of_device.h> | ||
13 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
14 | #include <linux/vmalloc.h> | 16 | #include <linux/vmalloc.h> |
15 | 17 | ||
@@ -340,3 +342,42 @@ void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags) | |||
340 | vunmap(cpu_addr); | 342 | vunmap(cpu_addr); |
341 | } | 343 | } |
342 | #endif | 344 | #endif |
345 | |||
346 | /* | ||
347 | * Common configuration to enable DMA API use for a device | ||
348 | */ | ||
349 | #include <linux/pci.h> | ||
350 | |||
351 | int dma_configure(struct device *dev) | ||
352 | { | ||
353 | struct device *bridge = NULL, *dma_dev = dev; | ||
354 | enum dev_dma_attr attr; | ||
355 | int ret = 0; | ||
356 | |||
357 | if (dev_is_pci(dev)) { | ||
358 | bridge = pci_get_host_bridge_device(to_pci_dev(dev)); | ||
359 | dma_dev = bridge; | ||
360 | if (IS_ENABLED(CONFIG_OF) && dma_dev->parent && | ||
361 | dma_dev->parent->of_node) | ||
362 | dma_dev = dma_dev->parent; | ||
363 | } | ||
364 | |||
365 | if (dma_dev->of_node) { | ||
366 | ret = of_dma_configure(dev, dma_dev->of_node); | ||
367 | } else if (has_acpi_companion(dma_dev)) { | ||
368 | attr = acpi_get_dma_attr(to_acpi_device_node(dma_dev->fwnode)); | ||
369 | if (attr != DEV_DMA_NOT_SUPPORTED) | ||
370 | ret = acpi_dma_configure(dev, attr); | ||
371 | } | ||
372 | |||
373 | if (bridge) | ||
374 | pci_put_host_bridge_device(bridge); | ||
375 | |||
376 | return ret; | ||
377 | } | ||
378 | |||
379 | void dma_deconfigure(struct device *dev) | ||
380 | { | ||
381 | of_dma_deconfigure(dev); | ||
382 | acpi_dma_deconfigure(dev); | ||
383 | } | ||
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index ef11e770f822..6a72095d6c7a 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <rdma/ib_user_verbs.h> | 35 | #include <rdma/ib_user_verbs.h> |
36 | #include <linux/netdevice.h> | 36 | #include <linux/netdevice.h> |
37 | #include <linux/iommu.h> | 37 | #include <linux/iommu.h> |
38 | #include <linux/pci.h> | ||
38 | #include <net/addrconf.h> | 39 | #include <net/addrconf.h> |
39 | #include <linux/qed/qede_roce.h> | 40 | #include <linux/qed/qede_roce.h> |
40 | #include <linux/qed/qed_chain.h> | 41 | #include <linux/qed/qed_chain.h> |
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 063343909b0d..6629c472eafd 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c | |||
@@ -696,9 +696,9 @@ out_clear_state: | |||
696 | 696 | ||
697 | out_unregister: | 697 | out_unregister: |
698 | mmu_notifier_unregister(&pasid_state->mn, mm); | 698 | mmu_notifier_unregister(&pasid_state->mn, mm); |
699 | mmput(mm); | ||
699 | 700 | ||
700 | out_free: | 701 | out_free: |
701 | mmput(mm); | ||
702 | free_pasid_state(pasid_state); | 702 | free_pasid_state(pasid_state); |
703 | 703 | ||
704 | out: | 704 | out: |
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 591bb96047c9..380969aa60d5 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c | |||
@@ -554,9 +554,14 @@ struct arm_smmu_s2_cfg { | |||
554 | }; | 554 | }; |
555 | 555 | ||
556 | struct arm_smmu_strtab_ent { | 556 | struct arm_smmu_strtab_ent { |
557 | bool valid; | 557 | /* |
558 | 558 | * An STE is "assigned" if the master emitting the corresponding SID | |
559 | bool bypass; /* Overrides s1/s2 config */ | 559 | * is attached to a domain. The behaviour of an unassigned STE is |
560 | * determined by the disable_bypass parameter, whereas an assigned | ||
561 | * STE behaves according to s1_cfg/s2_cfg, which themselves are | ||
562 | * configured according to the domain type. | ||
563 | */ | ||
564 | bool assigned; | ||
560 | struct arm_smmu_s1_cfg *s1_cfg; | 565 | struct arm_smmu_s1_cfg *s1_cfg; |
561 | struct arm_smmu_s2_cfg *s2_cfg; | 566 | struct arm_smmu_s2_cfg *s2_cfg; |
562 | }; | 567 | }; |
@@ -632,6 +637,7 @@ enum arm_smmu_domain_stage { | |||
632 | ARM_SMMU_DOMAIN_S1 = 0, | 637 | ARM_SMMU_DOMAIN_S1 = 0, |
633 | ARM_SMMU_DOMAIN_S2, | 638 | ARM_SMMU_DOMAIN_S2, |
634 | ARM_SMMU_DOMAIN_NESTED, | 639 | ARM_SMMU_DOMAIN_NESTED, |
640 | ARM_SMMU_DOMAIN_BYPASS, | ||
635 | }; | 641 | }; |
636 | 642 | ||
637 | struct arm_smmu_domain { | 643 | struct arm_smmu_domain { |
@@ -1005,9 +1011,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, | |||
1005 | * This is hideously complicated, but we only really care about | 1011 | * This is hideously complicated, but we only really care about |
1006 | * three cases at the moment: | 1012 | * three cases at the moment: |
1007 | * | 1013 | * |
1008 | * 1. Invalid (all zero) -> bypass (init) | 1014 | * 1. Invalid (all zero) -> bypass/fault (init) |
1009 | * 2. Bypass -> translation (attach) | 1015 | * 2. Bypass/fault -> translation/bypass (attach) |
1010 | * 3. Translation -> bypass (detach) | 1016 | * 3. Translation/bypass -> bypass/fault (detach) |
1011 | * | 1017 | * |
1012 | * Given that we can't update the STE atomically and the SMMU | 1018 | * Given that we can't update the STE atomically and the SMMU |
1013 | * doesn't read the thing in a defined order, that leaves us | 1019 | * doesn't read the thing in a defined order, that leaves us |
@@ -1046,11 +1052,15 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, | |||
1046 | } | 1052 | } |
1047 | 1053 | ||
1048 | /* Nuke the existing STE_0 value, as we're going to rewrite it */ | 1054 | /* Nuke the existing STE_0 value, as we're going to rewrite it */ |
1049 | val = ste->valid ? STRTAB_STE_0_V : 0; | 1055 | val = STRTAB_STE_0_V; |
1056 | |||
1057 | /* Bypass/fault */ | ||
1058 | if (!ste->assigned || !(ste->s1_cfg || ste->s2_cfg)) { | ||
1059 | if (!ste->assigned && disable_bypass) | ||
1060 | val |= STRTAB_STE_0_CFG_ABORT; | ||
1061 | else | ||
1062 | val |= STRTAB_STE_0_CFG_BYPASS; | ||
1050 | 1063 | ||
1051 | if (ste->bypass) { | ||
1052 | val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT | ||
1053 | : STRTAB_STE_0_CFG_BYPASS; | ||
1054 | dst[0] = cpu_to_le64(val); | 1064 | dst[0] = cpu_to_le64(val); |
1055 | dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING | 1065 | dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING |
1056 | << STRTAB_STE_1_SHCFG_SHIFT); | 1066 | << STRTAB_STE_1_SHCFG_SHIFT); |
@@ -1111,10 +1121,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, | |||
1111 | static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent) | 1121 | static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent) |
1112 | { | 1122 | { |
1113 | unsigned int i; | 1123 | unsigned int i; |
1114 | struct arm_smmu_strtab_ent ste = { | 1124 | struct arm_smmu_strtab_ent ste = { .assigned = false }; |
1115 | .valid = true, | ||
1116 | .bypass = true, | ||
1117 | }; | ||
1118 | 1125 | ||
1119 | for (i = 0; i < nent; ++i) { | 1126 | for (i = 0; i < nent; ++i) { |
1120 | arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste); | 1127 | arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste); |
@@ -1378,7 +1385,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) | |||
1378 | { | 1385 | { |
1379 | struct arm_smmu_domain *smmu_domain; | 1386 | struct arm_smmu_domain *smmu_domain; |
1380 | 1387 | ||
1381 | if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) | 1388 | if (type != IOMMU_DOMAIN_UNMANAGED && |
1389 | type != IOMMU_DOMAIN_DMA && | ||
1390 | type != IOMMU_DOMAIN_IDENTITY) | ||
1382 | return NULL; | 1391 | return NULL; |
1383 | 1392 | ||
1384 | /* | 1393 | /* |
@@ -1509,6 +1518,11 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) | |||
1509 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); | 1518 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
1510 | struct arm_smmu_device *smmu = smmu_domain->smmu; | 1519 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
1511 | 1520 | ||
1521 | if (domain->type == IOMMU_DOMAIN_IDENTITY) { | ||
1522 | smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS; | ||
1523 | return 0; | ||
1524 | } | ||
1525 | |||
1512 | /* Restrict the stage to what we can actually support */ | 1526 | /* Restrict the stage to what we can actually support */ |
1513 | if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) | 1527 | if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) |
1514 | smmu_domain->stage = ARM_SMMU_DOMAIN_S2; | 1528 | smmu_domain->stage = ARM_SMMU_DOMAIN_S2; |
@@ -1579,7 +1593,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) | |||
1579 | return step; | 1593 | return step; |
1580 | } | 1594 | } |
1581 | 1595 | ||
1582 | static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec) | 1596 | static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec) |
1583 | { | 1597 | { |
1584 | int i; | 1598 | int i; |
1585 | struct arm_smmu_master_data *master = fwspec->iommu_priv; | 1599 | struct arm_smmu_master_data *master = fwspec->iommu_priv; |
@@ -1591,17 +1605,14 @@ static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec) | |||
1591 | 1605 | ||
1592 | arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste); | 1606 | arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste); |
1593 | } | 1607 | } |
1594 | |||
1595 | return 0; | ||
1596 | } | 1608 | } |
1597 | 1609 | ||
1598 | static void arm_smmu_detach_dev(struct device *dev) | 1610 | static void arm_smmu_detach_dev(struct device *dev) |
1599 | { | 1611 | { |
1600 | struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv; | 1612 | struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv; |
1601 | 1613 | ||
1602 | master->ste.bypass = true; | 1614 | master->ste.assigned = false; |
1603 | if (arm_smmu_install_ste_for_dev(dev->iommu_fwspec) < 0) | 1615 | arm_smmu_install_ste_for_dev(dev->iommu_fwspec); |
1604 | dev_warn(dev, "failed to install bypass STE\n"); | ||
1605 | } | 1616 | } |
1606 | 1617 | ||
1607 | static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | 1618 | static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) |
@@ -1620,7 +1631,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | |||
1620 | ste = &master->ste; | 1631 | ste = &master->ste; |
1621 | 1632 | ||
1622 | /* Already attached to a different domain? */ | 1633 | /* Already attached to a different domain? */ |
1623 | if (!ste->bypass) | 1634 | if (ste->assigned) |
1624 | arm_smmu_detach_dev(dev); | 1635 | arm_smmu_detach_dev(dev); |
1625 | 1636 | ||
1626 | mutex_lock(&smmu_domain->init_mutex); | 1637 | mutex_lock(&smmu_domain->init_mutex); |
@@ -1641,10 +1652,12 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | |||
1641 | goto out_unlock; | 1652 | goto out_unlock; |
1642 | } | 1653 | } |
1643 | 1654 | ||
1644 | ste->bypass = false; | 1655 | ste->assigned = true; |
1645 | ste->valid = true; | ||
1646 | 1656 | ||
1647 | if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { | 1657 | if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) { |
1658 | ste->s1_cfg = NULL; | ||
1659 | ste->s2_cfg = NULL; | ||
1660 | } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { | ||
1648 | ste->s1_cfg = &smmu_domain->s1_cfg; | 1661 | ste->s1_cfg = &smmu_domain->s1_cfg; |
1649 | ste->s2_cfg = NULL; | 1662 | ste->s2_cfg = NULL; |
1650 | arm_smmu_write_ctx_desc(smmu, ste->s1_cfg); | 1663 | arm_smmu_write_ctx_desc(smmu, ste->s1_cfg); |
@@ -1653,10 +1666,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | |||
1653 | ste->s2_cfg = &smmu_domain->s2_cfg; | 1666 | ste->s2_cfg = &smmu_domain->s2_cfg; |
1654 | } | 1667 | } |
1655 | 1668 | ||
1656 | ret = arm_smmu_install_ste_for_dev(dev->iommu_fwspec); | 1669 | arm_smmu_install_ste_for_dev(dev->iommu_fwspec); |
1657 | if (ret < 0) | ||
1658 | ste->valid = false; | ||
1659 | |||
1660 | out_unlock: | 1670 | out_unlock: |
1661 | mutex_unlock(&smmu_domain->init_mutex); | 1671 | mutex_unlock(&smmu_domain->init_mutex); |
1662 | return ret; | 1672 | return ret; |
@@ -1704,6 +1714,9 @@ arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) | |||
1704 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); | 1714 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
1705 | struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; | 1715 | struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; |
1706 | 1716 | ||
1717 | if (domain->type == IOMMU_DOMAIN_IDENTITY) | ||
1718 | return iova; | ||
1719 | |||
1707 | if (!ops) | 1720 | if (!ops) |
1708 | return 0; | 1721 | return 0; |
1709 | 1722 | ||
@@ -1807,7 +1820,7 @@ static void arm_smmu_remove_device(struct device *dev) | |||
1807 | 1820 | ||
1808 | master = fwspec->iommu_priv; | 1821 | master = fwspec->iommu_priv; |
1809 | smmu = master->smmu; | 1822 | smmu = master->smmu; |
1810 | if (master && master->ste.valid) | 1823 | if (master && master->ste.assigned) |
1811 | arm_smmu_detach_dev(dev); | 1824 | arm_smmu_detach_dev(dev); |
1812 | iommu_group_remove_device(dev); | 1825 | iommu_group_remove_device(dev); |
1813 | iommu_device_unlink(&smmu->iommu, dev); | 1826 | iommu_device_unlink(&smmu->iommu, dev); |
@@ -1837,6 +1850,9 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain, | |||
1837 | { | 1850 | { |
1838 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); | 1851 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
1839 | 1852 | ||
1853 | if (domain->type != IOMMU_DOMAIN_UNMANAGED) | ||
1854 | return -EINVAL; | ||
1855 | |||
1840 | switch (attr) { | 1856 | switch (attr) { |
1841 | case DOMAIN_ATTR_NESTING: | 1857 | case DOMAIN_ATTR_NESTING: |
1842 | *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); | 1858 | *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); |
@@ -1852,6 +1868,9 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, | |||
1852 | int ret = 0; | 1868 | int ret = 0; |
1853 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); | 1869 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
1854 | 1870 | ||
1871 | if (domain->type != IOMMU_DOMAIN_UNMANAGED) | ||
1872 | return -EINVAL; | ||
1873 | |||
1855 | mutex_lock(&smmu_domain->init_mutex); | 1874 | mutex_lock(&smmu_domain->init_mutex); |
1856 | 1875 | ||
1857 | switch (attr) { | 1876 | switch (attr) { |
@@ -1893,6 +1912,8 @@ static void arm_smmu_get_resv_regions(struct device *dev, | |||
1893 | return; | 1912 | return; |
1894 | 1913 | ||
1895 | list_add_tail(®ion->list, head); | 1914 | list_add_tail(®ion->list, head); |
1915 | |||
1916 | iommu_dma_get_resv_regions(dev, head); | ||
1896 | } | 1917 | } |
1897 | 1918 | ||
1898 | static void arm_smmu_put_resv_regions(struct device *dev, | 1919 | static void arm_smmu_put_resv_regions(struct device *dev, |
@@ -2761,51 +2782,9 @@ static struct platform_driver arm_smmu_driver = { | |||
2761 | .probe = arm_smmu_device_probe, | 2782 | .probe = arm_smmu_device_probe, |
2762 | .remove = arm_smmu_device_remove, | 2783 | .remove = arm_smmu_device_remove, |
2763 | }; | 2784 | }; |
2785 | module_platform_driver(arm_smmu_driver); | ||
2764 | 2786 | ||
2765 | static int __init arm_smmu_init(void) | 2787 | IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", NULL); |
2766 | { | ||
2767 | static bool registered; | ||
2768 | int ret = 0; | ||
2769 | |||
2770 | if (!registered) { | ||
2771 | ret = platform_driver_register(&arm_smmu_driver); | ||
2772 | registered = !ret; | ||
2773 | } | ||
2774 | return ret; | ||
2775 | } | ||
2776 | |||
2777 | static void __exit arm_smmu_exit(void) | ||
2778 | { | ||
2779 | return platform_driver_unregister(&arm_smmu_driver); | ||
2780 | } | ||
2781 | |||
2782 | subsys_initcall(arm_smmu_init); | ||
2783 | module_exit(arm_smmu_exit); | ||
2784 | |||
2785 | static int __init arm_smmu_of_init(struct device_node *np) | ||
2786 | { | ||
2787 | int ret = arm_smmu_init(); | ||
2788 | |||
2789 | if (ret) | ||
2790 | return ret; | ||
2791 | |||
2792 | if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root)) | ||
2793 | return -ENODEV; | ||
2794 | |||
2795 | return 0; | ||
2796 | } | ||
2797 | IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", arm_smmu_of_init); | ||
2798 | |||
2799 | #ifdef CONFIG_ACPI | ||
2800 | static int __init acpi_smmu_v3_init(struct acpi_table_header *table) | ||
2801 | { | ||
2802 | if (iort_node_match(ACPI_IORT_NODE_SMMU_V3)) | ||
2803 | return arm_smmu_init(); | ||
2804 | |||
2805 | return 0; | ||
2806 | } | ||
2807 | IORT_ACPI_DECLARE(arm_smmu_v3, ACPI_SIG_IORT, acpi_smmu_v3_init); | ||
2808 | #endif | ||
2809 | 2788 | ||
2810 | MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations"); | 2789 | MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations"); |
2811 | MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); | 2790 | MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); |
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index b493c99e17f7..7ec30b08b3bd 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
@@ -162,6 +162,7 @@ | |||
162 | #define ARM_SMMU_GR0_sTLBGSTATUS 0x74 | 162 | #define ARM_SMMU_GR0_sTLBGSTATUS 0x74 |
163 | #define sTLBGSTATUS_GSACTIVE (1 << 0) | 163 | #define sTLBGSTATUS_GSACTIVE (1 << 0) |
164 | #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */ | 164 | #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */ |
165 | #define TLB_SPIN_COUNT 10 | ||
165 | 166 | ||
166 | /* Stream mapping registers */ | 167 | /* Stream mapping registers */ |
167 | #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2)) | 168 | #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2)) |
@@ -216,8 +217,7 @@ enum arm_smmu_s2cr_privcfg { | |||
216 | #define CBA2R_VMID_MASK 0xffff | 217 | #define CBA2R_VMID_MASK 0xffff |
217 | 218 | ||
218 | /* Translation context bank */ | 219 | /* Translation context bank */ |
219 | #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1)) | 220 | #define ARM_SMMU_CB(smmu, n) ((smmu)->cb_base + ((n) << (smmu)->pgshift)) |
220 | #define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift)) | ||
221 | 221 | ||
222 | #define ARM_SMMU_CB_SCTLR 0x0 | 222 | #define ARM_SMMU_CB_SCTLR 0x0 |
223 | #define ARM_SMMU_CB_ACTLR 0x4 | 223 | #define ARM_SMMU_CB_ACTLR 0x4 |
@@ -238,6 +238,8 @@ enum arm_smmu_s2cr_privcfg { | |||
238 | #define ARM_SMMU_CB_S1_TLBIVAL 0x620 | 238 | #define ARM_SMMU_CB_S1_TLBIVAL 0x620 |
239 | #define ARM_SMMU_CB_S2_TLBIIPAS2 0x630 | 239 | #define ARM_SMMU_CB_S2_TLBIIPAS2 0x630 |
240 | #define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638 | 240 | #define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638 |
241 | #define ARM_SMMU_CB_TLBSYNC 0x7f0 | ||
242 | #define ARM_SMMU_CB_TLBSTATUS 0x7f4 | ||
241 | #define ARM_SMMU_CB_ATS1PR 0x800 | 243 | #define ARM_SMMU_CB_ATS1PR 0x800 |
242 | #define ARM_SMMU_CB_ATSR 0x8f0 | 244 | #define ARM_SMMU_CB_ATSR 0x8f0 |
243 | 245 | ||
@@ -344,7 +346,7 @@ struct arm_smmu_device { | |||
344 | struct device *dev; | 346 | struct device *dev; |
345 | 347 | ||
346 | void __iomem *base; | 348 | void __iomem *base; |
347 | unsigned long size; | 349 | void __iomem *cb_base; |
348 | unsigned long pgshift; | 350 | unsigned long pgshift; |
349 | 351 | ||
350 | #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0) | 352 | #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0) |
@@ -404,18 +406,20 @@ enum arm_smmu_context_fmt { | |||
404 | struct arm_smmu_cfg { | 406 | struct arm_smmu_cfg { |
405 | u8 cbndx; | 407 | u8 cbndx; |
406 | u8 irptndx; | 408 | u8 irptndx; |
409 | union { | ||
410 | u16 asid; | ||
411 | u16 vmid; | ||
412 | }; | ||
407 | u32 cbar; | 413 | u32 cbar; |
408 | enum arm_smmu_context_fmt fmt; | 414 | enum arm_smmu_context_fmt fmt; |
409 | }; | 415 | }; |
410 | #define INVALID_IRPTNDX 0xff | 416 | #define INVALID_IRPTNDX 0xff |
411 | 417 | ||
412 | #define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx) | ||
413 | #define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1) | ||
414 | |||
415 | enum arm_smmu_domain_stage { | 418 | enum arm_smmu_domain_stage { |
416 | ARM_SMMU_DOMAIN_S1 = 0, | 419 | ARM_SMMU_DOMAIN_S1 = 0, |
417 | ARM_SMMU_DOMAIN_S2, | 420 | ARM_SMMU_DOMAIN_S2, |
418 | ARM_SMMU_DOMAIN_NESTED, | 421 | ARM_SMMU_DOMAIN_NESTED, |
422 | ARM_SMMU_DOMAIN_BYPASS, | ||
419 | }; | 423 | }; |
420 | 424 | ||
421 | struct arm_smmu_domain { | 425 | struct arm_smmu_domain { |
@@ -569,49 +573,67 @@ static void __arm_smmu_free_bitmap(unsigned long *map, int idx) | |||
569 | } | 573 | } |
570 | 574 | ||
571 | /* Wait for any pending TLB invalidations to complete */ | 575 | /* Wait for any pending TLB invalidations to complete */ |
572 | static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu) | 576 | static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, |
577 | void __iomem *sync, void __iomem *status) | ||
573 | { | 578 | { |
574 | int count = 0; | 579 | unsigned int spin_cnt, delay; |
575 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | 580 | |
576 | 581 | writel_relaxed(0, sync); | |
577 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC); | 582 | for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) { |
578 | while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS) | 583 | for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) { |
579 | & sTLBGSTATUS_GSACTIVE) { | 584 | if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE)) |
580 | cpu_relax(); | 585 | return; |
581 | if (++count == TLB_LOOP_TIMEOUT) { | 586 | cpu_relax(); |
582 | dev_err_ratelimited(smmu->dev, | ||
583 | "TLB sync timed out -- SMMU may be deadlocked\n"); | ||
584 | return; | ||
585 | } | 587 | } |
586 | udelay(1); | 588 | udelay(delay); |
587 | } | 589 | } |
590 | dev_err_ratelimited(smmu->dev, | ||
591 | "TLB sync timed out -- SMMU may be deadlocked\n"); | ||
588 | } | 592 | } |
589 | 593 | ||
590 | static void arm_smmu_tlb_sync(void *cookie) | 594 | static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu) |
595 | { | ||
596 | void __iomem *base = ARM_SMMU_GR0(smmu); | ||
597 | |||
598 | __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC, | ||
599 | base + ARM_SMMU_GR0_sTLBGSTATUS); | ||
600 | } | ||
601 | |||
602 | static void arm_smmu_tlb_sync_context(void *cookie) | ||
591 | { | 603 | { |
592 | struct arm_smmu_domain *smmu_domain = cookie; | 604 | struct arm_smmu_domain *smmu_domain = cookie; |
593 | __arm_smmu_tlb_sync(smmu_domain->smmu); | 605 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
606 | void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx); | ||
607 | |||
608 | __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC, | ||
609 | base + ARM_SMMU_CB_TLBSTATUS); | ||
594 | } | 610 | } |
595 | 611 | ||
596 | static void arm_smmu_tlb_inv_context(void *cookie) | 612 | static void arm_smmu_tlb_sync_vmid(void *cookie) |
613 | { | ||
614 | struct arm_smmu_domain *smmu_domain = cookie; | ||
615 | |||
616 | arm_smmu_tlb_sync_global(smmu_domain->smmu); | ||
617 | } | ||
618 | |||
619 | static void arm_smmu_tlb_inv_context_s1(void *cookie) | ||
597 | { | 620 | { |
598 | struct arm_smmu_domain *smmu_domain = cookie; | 621 | struct arm_smmu_domain *smmu_domain = cookie; |
599 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | 622 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
600 | struct arm_smmu_device *smmu = smmu_domain->smmu; | 623 | void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx); |
601 | bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; | ||
602 | void __iomem *base; | ||
603 | 624 | ||
604 | if (stage1) { | 625 | writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID); |
605 | base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | 626 | arm_smmu_tlb_sync_context(cookie); |
606 | writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg), | 627 | } |
607 | base + ARM_SMMU_CB_S1_TLBIASID); | 628 | |
608 | } else { | 629 | static void arm_smmu_tlb_inv_context_s2(void *cookie) |
609 | base = ARM_SMMU_GR0(smmu); | 630 | { |
610 | writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), | 631 | struct arm_smmu_domain *smmu_domain = cookie; |
611 | base + ARM_SMMU_GR0_TLBIVMID); | 632 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
612 | } | 633 | void __iomem *base = ARM_SMMU_GR0(smmu); |
613 | 634 | ||
614 | __arm_smmu_tlb_sync(smmu); | 635 | writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID); |
636 | arm_smmu_tlb_sync_global(smmu); | ||
615 | } | 637 | } |
616 | 638 | ||
617 | static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, | 639 | static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, |
@@ -619,31 +641,28 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, | |||
619 | { | 641 | { |
620 | struct arm_smmu_domain *smmu_domain = cookie; | 642 | struct arm_smmu_domain *smmu_domain = cookie; |
621 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | 643 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
622 | struct arm_smmu_device *smmu = smmu_domain->smmu; | ||
623 | bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; | 644 | bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; |
624 | void __iomem *reg; | 645 | void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx); |
625 | 646 | ||
626 | if (stage1) { | 647 | if (stage1) { |
627 | reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | ||
628 | reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; | 648 | reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; |
629 | 649 | ||
630 | if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) { | 650 | if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) { |
631 | iova &= ~12UL; | 651 | iova &= ~12UL; |
632 | iova |= ARM_SMMU_CB_ASID(smmu, cfg); | 652 | iova |= cfg->asid; |
633 | do { | 653 | do { |
634 | writel_relaxed(iova, reg); | 654 | writel_relaxed(iova, reg); |
635 | iova += granule; | 655 | iova += granule; |
636 | } while (size -= granule); | 656 | } while (size -= granule); |
637 | } else { | 657 | } else { |
638 | iova >>= 12; | 658 | iova >>= 12; |
639 | iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48; | 659 | iova |= (u64)cfg->asid << 48; |
640 | do { | 660 | do { |
641 | writeq_relaxed(iova, reg); | 661 | writeq_relaxed(iova, reg); |
642 | iova += granule >> 12; | 662 | iova += granule >> 12; |
643 | } while (size -= granule); | 663 | } while (size -= granule); |
644 | } | 664 | } |
645 | } else if (smmu->version == ARM_SMMU_V2) { | 665 | } else { |
646 | reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | ||
647 | reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : | 666 | reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : |
648 | ARM_SMMU_CB_S2_TLBIIPAS2; | 667 | ARM_SMMU_CB_S2_TLBIIPAS2; |
649 | iova >>= 12; | 668 | iova >>= 12; |
@@ -651,16 +670,40 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, | |||
651 | smmu_write_atomic_lq(iova, reg); | 670 | smmu_write_atomic_lq(iova, reg); |
652 | iova += granule >> 12; | 671 | iova += granule >> 12; |
653 | } while (size -= granule); | 672 | } while (size -= granule); |
654 | } else { | ||
655 | reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID; | ||
656 | writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg); | ||
657 | } | 673 | } |
658 | } | 674 | } |
659 | 675 | ||
660 | static const struct iommu_gather_ops arm_smmu_gather_ops = { | 676 | /* |
661 | .tlb_flush_all = arm_smmu_tlb_inv_context, | 677 | * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears |
678 | * almost negligible, but the benefit of getting the first one in as far ahead | ||
679 | * of the sync as possible is significant, hence we don't just make this a | ||
680 | * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think. | ||
681 | */ | ||
682 | static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size, | ||
683 | size_t granule, bool leaf, void *cookie) | ||
684 | { | ||
685 | struct arm_smmu_domain *smmu_domain = cookie; | ||
686 | void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu); | ||
687 | |||
688 | writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID); | ||
689 | } | ||
690 | |||
691 | static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = { | ||
692 | .tlb_flush_all = arm_smmu_tlb_inv_context_s1, | ||
662 | .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, | 693 | .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, |
663 | .tlb_sync = arm_smmu_tlb_sync, | 694 | .tlb_sync = arm_smmu_tlb_sync_context, |
695 | }; | ||
696 | |||
697 | static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = { | ||
698 | .tlb_flush_all = arm_smmu_tlb_inv_context_s2, | ||
699 | .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, | ||
700 | .tlb_sync = arm_smmu_tlb_sync_context, | ||
701 | }; | ||
702 | |||
703 | static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = { | ||
704 | .tlb_flush_all = arm_smmu_tlb_inv_context_s2, | ||
705 | .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync, | ||
706 | .tlb_sync = arm_smmu_tlb_sync_vmid, | ||
664 | }; | 707 | }; |
665 | 708 | ||
666 | static irqreturn_t arm_smmu_context_fault(int irq, void *dev) | 709 | static irqreturn_t arm_smmu_context_fault(int irq, void *dev) |
@@ -673,7 +716,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) | |||
673 | struct arm_smmu_device *smmu = smmu_domain->smmu; | 716 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
674 | void __iomem *cb_base; | 717 | void __iomem *cb_base; |
675 | 718 | ||
676 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | 719 | cb_base = ARM_SMMU_CB(smmu, cfg->cbndx); |
677 | fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR); | 720 | fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR); |
678 | 721 | ||
679 | if (!(fsr & FSR_FAULT)) | 722 | if (!(fsr & FSR_FAULT)) |
@@ -726,7 +769,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, | |||
726 | 769 | ||
727 | gr1_base = ARM_SMMU_GR1(smmu); | 770 | gr1_base = ARM_SMMU_GR1(smmu); |
728 | stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; | 771 | stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; |
729 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | 772 | cb_base = ARM_SMMU_CB(smmu, cfg->cbndx); |
730 | 773 | ||
731 | if (smmu->version > ARM_SMMU_V1) { | 774 | if (smmu->version > ARM_SMMU_V1) { |
732 | if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) | 775 | if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) |
@@ -735,7 +778,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, | |||
735 | reg = CBA2R_RW64_32BIT; | 778 | reg = CBA2R_RW64_32BIT; |
736 | /* 16-bit VMIDs live in CBA2R */ | 779 | /* 16-bit VMIDs live in CBA2R */ |
737 | if (smmu->features & ARM_SMMU_FEAT_VMID16) | 780 | if (smmu->features & ARM_SMMU_FEAT_VMID16) |
738 | reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT; | 781 | reg |= cfg->vmid << CBA2R_VMID_SHIFT; |
739 | 782 | ||
740 | writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); | 783 | writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); |
741 | } | 784 | } |
@@ -754,34 +797,15 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, | |||
754 | (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); | 797 | (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); |
755 | } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) { | 798 | } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) { |
756 | /* 8-bit VMIDs live in CBAR */ | 799 | /* 8-bit VMIDs live in CBAR */ |
757 | reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT; | 800 | reg |= cfg->vmid << CBAR_VMID_SHIFT; |
758 | } | 801 | } |
759 | writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx)); | 802 | writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx)); |
760 | 803 | ||
761 | /* TTBRs */ | 804 | /* |
762 | if (stage1) { | 805 | * TTBCR |
763 | u16 asid = ARM_SMMU_CB_ASID(smmu, cfg); | 806 | * We must write this before the TTBRs, since it determines the |
764 | 807 | * access behaviour of some fields (in particular, ASID[15:8]). | |
765 | if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { | 808 | */ |
766 | reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0]; | ||
767 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0); | ||
768 | reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1]; | ||
769 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1); | ||
770 | writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR); | ||
771 | } else { | ||
772 | reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; | ||
773 | reg64 |= (u64)asid << TTBRn_ASID_SHIFT; | ||
774 | writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0); | ||
775 | reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1]; | ||
776 | reg64 |= (u64)asid << TTBRn_ASID_SHIFT; | ||
777 | writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1); | ||
778 | } | ||
779 | } else { | ||
780 | reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; | ||
781 | writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0); | ||
782 | } | ||
783 | |||
784 | /* TTBCR */ | ||
785 | if (stage1) { | 809 | if (stage1) { |
786 | if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { | 810 | if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { |
787 | reg = pgtbl_cfg->arm_v7s_cfg.tcr; | 811 | reg = pgtbl_cfg->arm_v7s_cfg.tcr; |
@@ -800,6 +824,27 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, | |||
800 | } | 824 | } |
801 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); | 825 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); |
802 | 826 | ||
827 | /* TTBRs */ | ||
828 | if (stage1) { | ||
829 | if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { | ||
830 | reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0]; | ||
831 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0); | ||
832 | reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1]; | ||
833 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1); | ||
834 | writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR); | ||
835 | } else { | ||
836 | reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; | ||
837 | reg64 |= (u64)cfg->asid << TTBRn_ASID_SHIFT; | ||
838 | writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0); | ||
839 | reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1]; | ||
840 | reg64 |= (u64)cfg->asid << TTBRn_ASID_SHIFT; | ||
841 | writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1); | ||
842 | } | ||
843 | } else { | ||
844 | reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; | ||
845 | writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0); | ||
846 | } | ||
847 | |||
803 | /* MAIRs (stage-1 only) */ | 848 | /* MAIRs (stage-1 only) */ |
804 | if (stage1) { | 849 | if (stage1) { |
805 | if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { | 850 | if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { |
@@ -833,11 +878,18 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
833 | enum io_pgtable_fmt fmt; | 878 | enum io_pgtable_fmt fmt; |
834 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); | 879 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
835 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | 880 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
881 | const struct iommu_gather_ops *tlb_ops; | ||
836 | 882 | ||
837 | mutex_lock(&smmu_domain->init_mutex); | 883 | mutex_lock(&smmu_domain->init_mutex); |
838 | if (smmu_domain->smmu) | 884 | if (smmu_domain->smmu) |
839 | goto out_unlock; | 885 | goto out_unlock; |
840 | 886 | ||
887 | if (domain->type == IOMMU_DOMAIN_IDENTITY) { | ||
888 | smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS; | ||
889 | smmu_domain->smmu = smmu; | ||
890 | goto out_unlock; | ||
891 | } | ||
892 | |||
841 | /* | 893 | /* |
842 | * Mapping the requested stage onto what we support is surprisingly | 894 | * Mapping the requested stage onto what we support is surprisingly |
843 | * complicated, mainly because the spec allows S1+S2 SMMUs without | 895 | * complicated, mainly because the spec allows S1+S2 SMMUs without |
@@ -904,6 +956,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
904 | ias = min(ias, 32UL); | 956 | ias = min(ias, 32UL); |
905 | oas = min(oas, 32UL); | 957 | oas = min(oas, 32UL); |
906 | } | 958 | } |
959 | tlb_ops = &arm_smmu_s1_tlb_ops; | ||
907 | break; | 960 | break; |
908 | case ARM_SMMU_DOMAIN_NESTED: | 961 | case ARM_SMMU_DOMAIN_NESTED: |
909 | /* | 962 | /* |
@@ -922,12 +975,15 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
922 | ias = min(ias, 40UL); | 975 | ias = min(ias, 40UL); |
923 | oas = min(oas, 40UL); | 976 | oas = min(oas, 40UL); |
924 | } | 977 | } |
978 | if (smmu->version == ARM_SMMU_V2) | ||
979 | tlb_ops = &arm_smmu_s2_tlb_ops_v2; | ||
980 | else | ||
981 | tlb_ops = &arm_smmu_s2_tlb_ops_v1; | ||
925 | break; | 982 | break; |
926 | default: | 983 | default: |
927 | ret = -EINVAL; | 984 | ret = -EINVAL; |
928 | goto out_unlock; | 985 | goto out_unlock; |
929 | } | 986 | } |
930 | |||
931 | ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, | 987 | ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, |
932 | smmu->num_context_banks); | 988 | smmu->num_context_banks); |
933 | if (ret < 0) | 989 | if (ret < 0) |
@@ -941,11 +997,16 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
941 | cfg->irptndx = cfg->cbndx; | 997 | cfg->irptndx = cfg->cbndx; |
942 | } | 998 | } |
943 | 999 | ||
1000 | if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2) | ||
1001 | cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base; | ||
1002 | else | ||
1003 | cfg->asid = cfg->cbndx + smmu->cavium_id_base; | ||
1004 | |||
944 | pgtbl_cfg = (struct io_pgtable_cfg) { | 1005 | pgtbl_cfg = (struct io_pgtable_cfg) { |
945 | .pgsize_bitmap = smmu->pgsize_bitmap, | 1006 | .pgsize_bitmap = smmu->pgsize_bitmap, |
946 | .ias = ias, | 1007 | .ias = ias, |
947 | .oas = oas, | 1008 | .oas = oas, |
948 | .tlb = &arm_smmu_gather_ops, | 1009 | .tlb = tlb_ops, |
949 | .iommu_dev = smmu->dev, | 1010 | .iommu_dev = smmu->dev, |
950 | }; | 1011 | }; |
951 | 1012 | ||
@@ -998,14 +1059,14 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) | |||
998 | void __iomem *cb_base; | 1059 | void __iomem *cb_base; |
999 | int irq; | 1060 | int irq; |
1000 | 1061 | ||
1001 | if (!smmu) | 1062 | if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY) |
1002 | return; | 1063 | return; |
1003 | 1064 | ||
1004 | /* | 1065 | /* |
1005 | * Disable the context bank and free the page tables before freeing | 1066 | * Disable the context bank and free the page tables before freeing |
1006 | * it. | 1067 | * it. |
1007 | */ | 1068 | */ |
1008 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | 1069 | cb_base = ARM_SMMU_CB(smmu, cfg->cbndx); |
1009 | writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); | 1070 | writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); |
1010 | 1071 | ||
1011 | if (cfg->irptndx != INVALID_IRPTNDX) { | 1072 | if (cfg->irptndx != INVALID_IRPTNDX) { |
@@ -1021,7 +1082,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) | |||
1021 | { | 1082 | { |
1022 | struct arm_smmu_domain *smmu_domain; | 1083 | struct arm_smmu_domain *smmu_domain; |
1023 | 1084 | ||
1024 | if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) | 1085 | if (type != IOMMU_DOMAIN_UNMANAGED && |
1086 | type != IOMMU_DOMAIN_DMA && | ||
1087 | type != IOMMU_DOMAIN_IDENTITY) | ||
1025 | return NULL; | 1088 | return NULL; |
1026 | /* | 1089 | /* |
1027 | * Allocate the domain and initialise some of its data structures. | 1090 | * Allocate the domain and initialise some of its data structures. |
@@ -1250,10 +1313,15 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, | |||
1250 | { | 1313 | { |
1251 | struct arm_smmu_device *smmu = smmu_domain->smmu; | 1314 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
1252 | struct arm_smmu_s2cr *s2cr = smmu->s2crs; | 1315 | struct arm_smmu_s2cr *s2cr = smmu->s2crs; |
1253 | enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS; | ||
1254 | u8 cbndx = smmu_domain->cfg.cbndx; | 1316 | u8 cbndx = smmu_domain->cfg.cbndx; |
1317 | enum arm_smmu_s2cr_type type; | ||
1255 | int i, idx; | 1318 | int i, idx; |
1256 | 1319 | ||
1320 | if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) | ||
1321 | type = S2CR_TYPE_BYPASS; | ||
1322 | else | ||
1323 | type = S2CR_TYPE_TRANS; | ||
1324 | |||
1257 | for_each_cfg_sme(fwspec, i, idx) { | 1325 | for_each_cfg_sme(fwspec, i, idx) { |
1258 | if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx) | 1326 | if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx) |
1259 | continue; | 1327 | continue; |
@@ -1356,7 +1424,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, | |||
1356 | u64 phys; | 1424 | u64 phys; |
1357 | unsigned long va; | 1425 | unsigned long va; |
1358 | 1426 | ||
1359 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | 1427 | cb_base = ARM_SMMU_CB(smmu, cfg->cbndx); |
1360 | 1428 | ||
1361 | /* ATS1 registers can only be written atomically */ | 1429 | /* ATS1 registers can only be written atomically */ |
1362 | va = iova & ~0xfffUL; | 1430 | va = iova & ~0xfffUL; |
@@ -1391,6 +1459,9 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, | |||
1391 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); | 1459 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
1392 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; | 1460 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; |
1393 | 1461 | ||
1462 | if (domain->type == IOMMU_DOMAIN_IDENTITY) | ||
1463 | return iova; | ||
1464 | |||
1394 | if (!ops) | 1465 | if (!ops) |
1395 | return 0; | 1466 | return 0; |
1396 | 1467 | ||
@@ -1467,7 +1538,7 @@ static int arm_smmu_add_device(struct device *dev) | |||
1467 | } | 1538 | } |
1468 | if (mask & ~smmu->smr_mask_mask) { | 1539 | if (mask & ~smmu->smr_mask_mask) { |
1469 | dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n", | 1540 | dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n", |
1470 | sid, smmu->smr_mask_mask); | 1541 | mask, smmu->smr_mask_mask); |
1471 | goto out_free; | 1542 | goto out_free; |
1472 | } | 1543 | } |
1473 | } | 1544 | } |
@@ -1549,6 +1620,9 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain, | |||
1549 | { | 1620 | { |
1550 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); | 1621 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
1551 | 1622 | ||
1623 | if (domain->type != IOMMU_DOMAIN_UNMANAGED) | ||
1624 | return -EINVAL; | ||
1625 | |||
1552 | switch (attr) { | 1626 | switch (attr) { |
1553 | case DOMAIN_ATTR_NESTING: | 1627 | case DOMAIN_ATTR_NESTING: |
1554 | *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); | 1628 | *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); |
@@ -1564,6 +1638,9 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, | |||
1564 | int ret = 0; | 1638 | int ret = 0; |
1565 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); | 1639 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
1566 | 1640 | ||
1641 | if (domain->type != IOMMU_DOMAIN_UNMANAGED) | ||
1642 | return -EINVAL; | ||
1643 | |||
1567 | mutex_lock(&smmu_domain->init_mutex); | 1644 | mutex_lock(&smmu_domain->init_mutex); |
1568 | 1645 | ||
1569 | switch (attr) { | 1646 | switch (attr) { |
@@ -1590,13 +1667,15 @@ out_unlock: | |||
1590 | 1667 | ||
1591 | static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) | 1668 | static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) |
1592 | { | 1669 | { |
1593 | u32 fwid = 0; | 1670 | u32 mask, fwid = 0; |
1594 | 1671 | ||
1595 | if (args->args_count > 0) | 1672 | if (args->args_count > 0) |
1596 | fwid |= (u16)args->args[0]; | 1673 | fwid |= (u16)args->args[0]; |
1597 | 1674 | ||
1598 | if (args->args_count > 1) | 1675 | if (args->args_count > 1) |
1599 | fwid |= (u16)args->args[1] << SMR_MASK_SHIFT; | 1676 | fwid |= (u16)args->args[1] << SMR_MASK_SHIFT; |
1677 | else if (!of_property_read_u32(args->np, "stream-match-mask", &mask)) | ||
1678 | fwid |= (u16)mask << SMR_MASK_SHIFT; | ||
1600 | 1679 | ||
1601 | return iommu_fwspec_add_ids(dev, &fwid, 1); | 1680 | return iommu_fwspec_add_ids(dev, &fwid, 1); |
1602 | } | 1681 | } |
@@ -1613,6 +1692,8 @@ static void arm_smmu_get_resv_regions(struct device *dev, | |||
1613 | return; | 1692 | return; |
1614 | 1693 | ||
1615 | list_add_tail(®ion->list, head); | 1694 | list_add_tail(®ion->list, head); |
1695 | |||
1696 | iommu_dma_get_resv_regions(dev, head); | ||
1616 | } | 1697 | } |
1617 | 1698 | ||
1618 | static void arm_smmu_put_resv_regions(struct device *dev, | 1699 | static void arm_smmu_put_resv_regions(struct device *dev, |
@@ -1683,7 +1764,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) | |||
1683 | 1764 | ||
1684 | /* Make sure all context banks are disabled and clear CB_FSR */ | 1765 | /* Make sure all context banks are disabled and clear CB_FSR */ |
1685 | for (i = 0; i < smmu->num_context_banks; ++i) { | 1766 | for (i = 0; i < smmu->num_context_banks; ++i) { |
1686 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i); | 1767 | cb_base = ARM_SMMU_CB(smmu, i); |
1687 | writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); | 1768 | writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); |
1688 | writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR); | 1769 | writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR); |
1689 | /* | 1770 | /* |
@@ -1729,7 +1810,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) | |||
1729 | reg |= sCR0_EXIDENABLE; | 1810 | reg |= sCR0_EXIDENABLE; |
1730 | 1811 | ||
1731 | /* Push the button */ | 1812 | /* Push the button */ |
1732 | __arm_smmu_tlb_sync(smmu); | 1813 | arm_smmu_tlb_sync_global(smmu); |
1733 | writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); | 1814 | writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); |
1734 | } | 1815 | } |
1735 | 1816 | ||
@@ -1863,11 +1944,11 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
1863 | 1944 | ||
1864 | /* Check for size mismatch of SMMU address space from mapped region */ | 1945 | /* Check for size mismatch of SMMU address space from mapped region */ |
1865 | size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); | 1946 | size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); |
1866 | size *= 2 << smmu->pgshift; | 1947 | size <<= smmu->pgshift; |
1867 | if (smmu->size != size) | 1948 | if (smmu->cb_base != gr0_base + size) |
1868 | dev_warn(smmu->dev, | 1949 | dev_warn(smmu->dev, |
1869 | "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n", | 1950 | "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n", |
1870 | size, smmu->size); | 1951 | size * 2, (smmu->cb_base - gr0_base) * 2); |
1871 | 1952 | ||
1872 | smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK; | 1953 | smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK; |
1873 | smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK; | 1954 | smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK; |
@@ -1887,6 +1968,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
1887 | atomic_add_return(smmu->num_context_banks, | 1968 | atomic_add_return(smmu->num_context_banks, |
1888 | &cavium_smmu_context_count); | 1969 | &cavium_smmu_context_count); |
1889 | smmu->cavium_id_base -= smmu->num_context_banks; | 1970 | smmu->cavium_id_base -= smmu->num_context_banks; |
1971 | dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n"); | ||
1890 | } | 1972 | } |
1891 | 1973 | ||
1892 | /* ID2 */ | 1974 | /* ID2 */ |
@@ -2075,6 +2157,23 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev, | |||
2075 | return 0; | 2157 | return 0; |
2076 | } | 2158 | } |
2077 | 2159 | ||
2160 | static void arm_smmu_bus_init(void) | ||
2161 | { | ||
2162 | /* Oh, for a proper bus abstraction */ | ||
2163 | if (!iommu_present(&platform_bus_type)) | ||
2164 | bus_set_iommu(&platform_bus_type, &arm_smmu_ops); | ||
2165 | #ifdef CONFIG_ARM_AMBA | ||
2166 | if (!iommu_present(&amba_bustype)) | ||
2167 | bus_set_iommu(&amba_bustype, &arm_smmu_ops); | ||
2168 | #endif | ||
2169 | #ifdef CONFIG_PCI | ||
2170 | if (!iommu_present(&pci_bus_type)) { | ||
2171 | pci_request_acs(); | ||
2172 | bus_set_iommu(&pci_bus_type, &arm_smmu_ops); | ||
2173 | } | ||
2174 | #endif | ||
2175 | } | ||
2176 | |||
2078 | static int arm_smmu_device_probe(struct platform_device *pdev) | 2177 | static int arm_smmu_device_probe(struct platform_device *pdev) |
2079 | { | 2178 | { |
2080 | struct resource *res; | 2179 | struct resource *res; |
@@ -2103,7 +2202,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev) | |||
2103 | smmu->base = devm_ioremap_resource(dev, res); | 2202 | smmu->base = devm_ioremap_resource(dev, res); |
2104 | if (IS_ERR(smmu->base)) | 2203 | if (IS_ERR(smmu->base)) |
2105 | return PTR_ERR(smmu->base); | 2204 | return PTR_ERR(smmu->base); |
2106 | smmu->size = resource_size(res); | 2205 | smmu->cb_base = smmu->base + resource_size(res) / 2; |
2107 | 2206 | ||
2108 | num_irqs = 0; | 2207 | num_irqs = 0; |
2109 | while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) { | 2208 | while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) { |
@@ -2180,21 +2279,30 @@ static int arm_smmu_device_probe(struct platform_device *pdev) | |||
2180 | arm_smmu_device_reset(smmu); | 2279 | arm_smmu_device_reset(smmu); |
2181 | arm_smmu_test_smr_masks(smmu); | 2280 | arm_smmu_test_smr_masks(smmu); |
2182 | 2281 | ||
2183 | /* Oh, for a proper bus abstraction */ | 2282 | /* |
2184 | if (!iommu_present(&platform_bus_type)) | 2283 | * For ACPI and generic DT bindings, an SMMU will be probed before |
2185 | bus_set_iommu(&platform_bus_type, &arm_smmu_ops); | 2284 | * any device which might need it, so we want the bus ops in place |
2186 | #ifdef CONFIG_ARM_AMBA | 2285 | * ready to handle default domain setup as soon as any SMMU exists. |
2187 | if (!iommu_present(&amba_bustype)) | 2286 | */ |
2188 | bus_set_iommu(&amba_bustype, &arm_smmu_ops); | 2287 | if (!using_legacy_binding) |
2189 | #endif | 2288 | arm_smmu_bus_init(); |
2190 | #ifdef CONFIG_PCI | 2289 | |
2191 | if (!iommu_present(&pci_bus_type)) { | 2290 | return 0; |
2192 | pci_request_acs(); | 2291 | } |
2193 | bus_set_iommu(&pci_bus_type, &arm_smmu_ops); | 2292 | |
2194 | } | 2293 | /* |
2195 | #endif | 2294 | * With the legacy DT binding in play, though, we have no guarantees about |
2295 | * probe order, but then we're also not doing default domains, so we can | ||
2296 | * delay setting bus ops until we're sure every possible SMMU is ready, | ||
2297 | * and that way ensure that no add_device() calls get missed. | ||
2298 | */ | ||
2299 | static int arm_smmu_legacy_bus_init(void) | ||
2300 | { | ||
2301 | if (using_legacy_binding) | ||
2302 | arm_smmu_bus_init(); | ||
2196 | return 0; | 2303 | return 0; |
2197 | } | 2304 | } |
2305 | device_initcall_sync(arm_smmu_legacy_bus_init); | ||
2198 | 2306 | ||
2199 | static int arm_smmu_device_remove(struct platform_device *pdev) | 2307 | static int arm_smmu_device_remove(struct platform_device *pdev) |
2200 | { | 2308 | { |
@@ -2219,56 +2327,14 @@ static struct platform_driver arm_smmu_driver = { | |||
2219 | .probe = arm_smmu_device_probe, | 2327 | .probe = arm_smmu_device_probe, |
2220 | .remove = arm_smmu_device_remove, | 2328 | .remove = arm_smmu_device_remove, |
2221 | }; | 2329 | }; |
2222 | 2330 | module_platform_driver(arm_smmu_driver); | |
2223 | static int __init arm_smmu_init(void) | 2331 | |
2224 | { | 2332 | IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", NULL); |
2225 | static bool registered; | 2333 | IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", NULL); |
2226 | int ret = 0; | 2334 | IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", NULL); |
2227 | 2335 | IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", NULL); | |
2228 | if (!registered) { | 2336 | IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", NULL); |
2229 | ret = platform_driver_register(&arm_smmu_driver); | 2337 | IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", NULL); |
2230 | registered = !ret; | ||
2231 | } | ||
2232 | return ret; | ||
2233 | } | ||
2234 | |||
2235 | static void __exit arm_smmu_exit(void) | ||
2236 | { | ||
2237 | return platform_driver_unregister(&arm_smmu_driver); | ||
2238 | } | ||
2239 | |||
2240 | subsys_initcall(arm_smmu_init); | ||
2241 | module_exit(arm_smmu_exit); | ||
2242 | |||
2243 | static int __init arm_smmu_of_init(struct device_node *np) | ||
2244 | { | ||
2245 | int ret = arm_smmu_init(); | ||
2246 | |||
2247 | if (ret) | ||
2248 | return ret; | ||
2249 | |||
2250 | if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root)) | ||
2251 | return -ENODEV; | ||
2252 | |||
2253 | return 0; | ||
2254 | } | ||
2255 | IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init); | ||
2256 | IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init); | ||
2257 | IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init); | ||
2258 | IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init); | ||
2259 | IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init); | ||
2260 | IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init); | ||
2261 | |||
2262 | #ifdef CONFIG_ACPI | ||
2263 | static int __init arm_smmu_acpi_init(struct acpi_table_header *table) | ||
2264 | { | ||
2265 | if (iort_node_match(ACPI_IORT_NODE_SMMU)) | ||
2266 | return arm_smmu_init(); | ||
2267 | |||
2268 | return 0; | ||
2269 | } | ||
2270 | IORT_ACPI_DECLARE(arm_smmu, ACPI_SIG_IORT, arm_smmu_acpi_init); | ||
2271 | #endif | ||
2272 | 2338 | ||
2273 | MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations"); | 2339 | MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations"); |
2274 | MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); | 2340 | MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); |
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 48d36ce59efb..8348f366ddd1 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c | |||
@@ -61,15 +61,6 @@ static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) | |||
61 | return PAGE_SIZE; | 61 | return PAGE_SIZE; |
62 | } | 62 | } |
63 | 63 | ||
64 | static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain) | ||
65 | { | ||
66 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | ||
67 | |||
68 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) | ||
69 | return &cookie->iovad; | ||
70 | return NULL; | ||
71 | } | ||
72 | |||
73 | static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) | 64 | static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) |
74 | { | 65 | { |
75 | struct iommu_dma_cookie *cookie; | 66 | struct iommu_dma_cookie *cookie; |
@@ -167,22 +158,99 @@ void iommu_put_dma_cookie(struct iommu_domain *domain) | |||
167 | } | 158 | } |
168 | EXPORT_SYMBOL(iommu_put_dma_cookie); | 159 | EXPORT_SYMBOL(iommu_put_dma_cookie); |
169 | 160 | ||
170 | static void iova_reserve_pci_windows(struct pci_dev *dev, | 161 | /** |
171 | struct iova_domain *iovad) | 162 | * iommu_dma_get_resv_regions - Reserved region driver helper |
163 | * @dev: Device from iommu_get_resv_regions() | ||
164 | * @list: Reserved region list from iommu_get_resv_regions() | ||
165 | * | ||
166 | * IOMMU drivers can use this to implement their .get_resv_regions callback | ||
167 | * for general non-IOMMU-specific reservations. Currently, this covers host | ||
168 | * bridge windows for PCI devices. | ||
169 | */ | ||
170 | void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) | ||
172 | { | 171 | { |
173 | struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); | 172 | struct pci_host_bridge *bridge; |
174 | struct resource_entry *window; | 173 | struct resource_entry *window; |
175 | unsigned long lo, hi; | ||
176 | 174 | ||
175 | if (!dev_is_pci(dev)) | ||
176 | return; | ||
177 | |||
178 | bridge = pci_find_host_bridge(to_pci_dev(dev)->bus); | ||
177 | resource_list_for_each_entry(window, &bridge->windows) { | 179 | resource_list_for_each_entry(window, &bridge->windows) { |
178 | if (resource_type(window->res) != IORESOURCE_MEM && | 180 | struct iommu_resv_region *region; |
179 | resource_type(window->res) != IORESOURCE_IO) | 181 | phys_addr_t start; |
182 | size_t length; | ||
183 | |||
184 | if (resource_type(window->res) != IORESOURCE_MEM) | ||
185 | continue; | ||
186 | |||
187 | start = window->res->start - window->offset; | ||
188 | length = window->res->end - window->res->start + 1; | ||
189 | region = iommu_alloc_resv_region(start, length, 0, | ||
190 | IOMMU_RESV_RESERVED); | ||
191 | if (!region) | ||
192 | return; | ||
193 | |||
194 | list_add_tail(®ion->list, list); | ||
195 | } | ||
196 | } | ||
197 | EXPORT_SYMBOL(iommu_dma_get_resv_regions); | ||
198 | |||
199 | static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, | ||
200 | phys_addr_t start, phys_addr_t end) | ||
201 | { | ||
202 | struct iova_domain *iovad = &cookie->iovad; | ||
203 | struct iommu_dma_msi_page *msi_page; | ||
204 | int i, num_pages; | ||
205 | |||
206 | start -= iova_offset(iovad, start); | ||
207 | num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); | ||
208 | |||
209 | msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL); | ||
210 | if (!msi_page) | ||
211 | return -ENOMEM; | ||
212 | |||
213 | for (i = 0; i < num_pages; i++) { | ||
214 | msi_page[i].phys = start; | ||
215 | msi_page[i].iova = start; | ||
216 | INIT_LIST_HEAD(&msi_page[i].list); | ||
217 | list_add(&msi_page[i].list, &cookie->msi_page_list); | ||
218 | start += iovad->granule; | ||
219 | } | ||
220 | |||
221 | return 0; | ||
222 | } | ||
223 | |||
224 | static int iova_reserve_iommu_regions(struct device *dev, | ||
225 | struct iommu_domain *domain) | ||
226 | { | ||
227 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | ||
228 | struct iova_domain *iovad = &cookie->iovad; | ||
229 | struct iommu_resv_region *region; | ||
230 | LIST_HEAD(resv_regions); | ||
231 | int ret = 0; | ||
232 | |||
233 | iommu_get_resv_regions(dev, &resv_regions); | ||
234 | list_for_each_entry(region, &resv_regions, list) { | ||
235 | unsigned long lo, hi; | ||
236 | |||
237 | /* We ARE the software that manages these! */ | ||
238 | if (region->type == IOMMU_RESV_SW_MSI) | ||
180 | continue; | 239 | continue; |
181 | 240 | ||
182 | lo = iova_pfn(iovad, window->res->start - window->offset); | 241 | lo = iova_pfn(iovad, region->start); |
183 | hi = iova_pfn(iovad, window->res->end - window->offset); | 242 | hi = iova_pfn(iovad, region->start + region->length - 1); |
184 | reserve_iova(iovad, lo, hi); | 243 | reserve_iova(iovad, lo, hi); |
244 | |||
245 | if (region->type == IOMMU_RESV_MSI) | ||
246 | ret = cookie_init_hw_msi_region(cookie, region->start, | ||
247 | region->start + region->length); | ||
248 | if (ret) | ||
249 | break; | ||
185 | } | 250 | } |
251 | iommu_put_resv_regions(dev, &resv_regions); | ||
252 | |||
253 | return ret; | ||
186 | } | 254 | } |
187 | 255 | ||
188 | /** | 256 | /** |
@@ -203,7 +271,6 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, | |||
203 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | 271 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
204 | struct iova_domain *iovad = &cookie->iovad; | 272 | struct iova_domain *iovad = &cookie->iovad; |
205 | unsigned long order, base_pfn, end_pfn; | 273 | unsigned long order, base_pfn, end_pfn; |
206 | bool pci = dev && dev_is_pci(dev); | ||
207 | 274 | ||
208 | if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) | 275 | if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) |
209 | return -EINVAL; | 276 | return -EINVAL; |
@@ -233,7 +300,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, | |||
233 | * leave the cache limit at the top of their range to save an rb_last() | 300 | * leave the cache limit at the top of their range to save an rb_last() |
234 | * traversal on every allocation. | 301 | * traversal on every allocation. |
235 | */ | 302 | */ |
236 | if (pci) | 303 | if (dev && dev_is_pci(dev)) |
237 | end_pfn &= DMA_BIT_MASK(32) >> order; | 304 | end_pfn &= DMA_BIT_MASK(32) >> order; |
238 | 305 | ||
239 | /* start_pfn is always nonzero for an already-initialised domain */ | 306 | /* start_pfn is always nonzero for an already-initialised domain */ |
@@ -248,12 +315,15 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, | |||
248 | * area cache limit down for the benefit of the smaller one. | 315 | * area cache limit down for the benefit of the smaller one. |
249 | */ | 316 | */ |
250 | iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn); | 317 | iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn); |
251 | } else { | 318 | |
252 | init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); | 319 | return 0; |
253 | if (pci) | ||
254 | iova_reserve_pci_windows(to_pci_dev(dev), iovad); | ||
255 | } | 320 | } |
256 | return 0; | 321 | |
322 | init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); | ||
323 | if (!dev) | ||
324 | return 0; | ||
325 | |||
326 | return iova_reserve_iommu_regions(dev, domain); | ||
257 | } | 327 | } |
258 | EXPORT_SYMBOL(iommu_dma_init_domain); | 328 | EXPORT_SYMBOL(iommu_dma_init_domain); |
259 | 329 | ||
@@ -286,48 +356,67 @@ int dma_info_to_prot(enum dma_data_direction dir, bool coherent, | |||
286 | } | 356 | } |
287 | } | 357 | } |
288 | 358 | ||
289 | static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size, | 359 | static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, |
290 | dma_addr_t dma_limit, struct device *dev) | 360 | size_t size, dma_addr_t dma_limit, struct device *dev) |
291 | { | 361 | { |
292 | struct iova_domain *iovad = cookie_iovad(domain); | 362 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
293 | unsigned long shift = iova_shift(iovad); | 363 | struct iova_domain *iovad = &cookie->iovad; |
294 | unsigned long length = iova_align(iovad, size) >> shift; | 364 | unsigned long shift, iova_len, iova = 0; |
295 | struct iova *iova = NULL; | 365 | |
366 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) { | ||
367 | cookie->msi_iova += size; | ||
368 | return cookie->msi_iova - size; | ||
369 | } | ||
370 | |||
371 | shift = iova_shift(iovad); | ||
372 | iova_len = size >> shift; | ||
373 | /* | ||
374 | * Freeing non-power-of-two-sized allocations back into the IOVA caches | ||
375 | * will come back to bite us badly, so we have to waste a bit of space | ||
376 | * rounding up anything cacheable to make sure that can't happen. The | ||
377 | * order of the unadjusted size will still match upon freeing. | ||
378 | */ | ||
379 | if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) | ||
380 | iova_len = roundup_pow_of_two(iova_len); | ||
296 | 381 | ||
297 | if (domain->geometry.force_aperture) | 382 | if (domain->geometry.force_aperture) |
298 | dma_limit = min(dma_limit, domain->geometry.aperture_end); | 383 | dma_limit = min(dma_limit, domain->geometry.aperture_end); |
299 | 384 | ||
300 | /* Try to get PCI devices a SAC address */ | 385 | /* Try to get PCI devices a SAC address */ |
301 | if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) | 386 | if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) |
302 | iova = alloc_iova(iovad, length, DMA_BIT_MASK(32) >> shift, | 387 | iova = alloc_iova_fast(iovad, iova_len, DMA_BIT_MASK(32) >> shift); |
303 | true); | 388 | |
304 | /* | ||
305 | * Enforce size-alignment to be safe - there could perhaps be an | ||
306 | * attribute to control this per-device, or at least per-domain... | ||
307 | */ | ||
308 | if (!iova) | 389 | if (!iova) |
309 | iova = alloc_iova(iovad, length, dma_limit >> shift, true); | 390 | iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift); |
310 | 391 | ||
311 | return iova; | 392 | return (dma_addr_t)iova << shift; |
312 | } | 393 | } |
313 | 394 | ||
314 | /* The IOVA allocator knows what we mapped, so just unmap whatever that was */ | 395 | static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, |
315 | static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr) | 396 | dma_addr_t iova, size_t size) |
316 | { | 397 | { |
317 | struct iova_domain *iovad = cookie_iovad(domain); | 398 | struct iova_domain *iovad = &cookie->iovad; |
318 | unsigned long shift = iova_shift(iovad); | 399 | unsigned long shift = iova_shift(iovad); |
319 | unsigned long pfn = dma_addr >> shift; | ||
320 | struct iova *iova = find_iova(iovad, pfn); | ||
321 | size_t size; | ||
322 | 400 | ||
323 | if (WARN_ON(!iova)) | 401 | /* The MSI case is only ever cleaning up its most recent allocation */ |
324 | return; | 402 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) |
403 | cookie->msi_iova -= size; | ||
404 | else | ||
405 | free_iova_fast(iovad, iova >> shift, size >> shift); | ||
406 | } | ||
407 | |||
408 | static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr, | ||
409 | size_t size) | ||
410 | { | ||
411 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | ||
412 | struct iova_domain *iovad = &cookie->iovad; | ||
413 | size_t iova_off = iova_offset(iovad, dma_addr); | ||
414 | |||
415 | dma_addr -= iova_off; | ||
416 | size = iova_align(iovad, size + iova_off); | ||
325 | 417 | ||
326 | size = iova_size(iova) << shift; | 418 | WARN_ON(iommu_unmap(domain, dma_addr, size) != size); |
327 | size -= iommu_unmap(domain, pfn << shift, size); | 419 | iommu_dma_free_iova(cookie, dma_addr, size); |
328 | /* ...and if we can't, then something is horribly, horribly wrong */ | ||
329 | WARN_ON(size > 0); | ||
330 | __free_iova(iovad, iova); | ||
331 | } | 420 | } |
332 | 421 | ||
333 | static void __iommu_dma_free_pages(struct page **pages, int count) | 422 | static void __iommu_dma_free_pages(struct page **pages, int count) |
@@ -409,7 +498,7 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, | |||
409 | void iommu_dma_free(struct device *dev, struct page **pages, size_t size, | 498 | void iommu_dma_free(struct device *dev, struct page **pages, size_t size, |
410 | dma_addr_t *handle) | 499 | dma_addr_t *handle) |
411 | { | 500 | { |
412 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle); | 501 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size); |
413 | __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); | 502 | __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); |
414 | *handle = DMA_ERROR_CODE; | 503 | *handle = DMA_ERROR_CODE; |
415 | } | 504 | } |
@@ -437,11 +526,11 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, | |||
437 | void (*flush_page)(struct device *, const void *, phys_addr_t)) | 526 | void (*flush_page)(struct device *, const void *, phys_addr_t)) |
438 | { | 527 | { |
439 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | 528 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
440 | struct iova_domain *iovad = cookie_iovad(domain); | 529 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
441 | struct iova *iova; | 530 | struct iova_domain *iovad = &cookie->iovad; |
442 | struct page **pages; | 531 | struct page **pages; |
443 | struct sg_table sgt; | 532 | struct sg_table sgt; |
444 | dma_addr_t dma_addr; | 533 | dma_addr_t iova; |
445 | unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; | 534 | unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; |
446 | 535 | ||
447 | *handle = DMA_ERROR_CODE; | 536 | *handle = DMA_ERROR_CODE; |
@@ -461,11 +550,11 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, | |||
461 | if (!pages) | 550 | if (!pages) |
462 | return NULL; | 551 | return NULL; |
463 | 552 | ||
464 | iova = __alloc_iova(domain, size, dev->coherent_dma_mask, dev); | 553 | size = iova_align(iovad, size); |
554 | iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); | ||
465 | if (!iova) | 555 | if (!iova) |
466 | goto out_free_pages; | 556 | goto out_free_pages; |
467 | 557 | ||
468 | size = iova_align(iovad, size); | ||
469 | if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) | 558 | if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) |
470 | goto out_free_iova; | 559 | goto out_free_iova; |
471 | 560 | ||
@@ -481,19 +570,18 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, | |||
481 | sg_miter_stop(&miter); | 570 | sg_miter_stop(&miter); |
482 | } | 571 | } |
483 | 572 | ||
484 | dma_addr = iova_dma_addr(iovad, iova); | 573 | if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot) |
485 | if (iommu_map_sg(domain, dma_addr, sgt.sgl, sgt.orig_nents, prot) | ||
486 | < size) | 574 | < size) |
487 | goto out_free_sg; | 575 | goto out_free_sg; |
488 | 576 | ||
489 | *handle = dma_addr; | 577 | *handle = iova; |
490 | sg_free_table(&sgt); | 578 | sg_free_table(&sgt); |
491 | return pages; | 579 | return pages; |
492 | 580 | ||
493 | out_free_sg: | 581 | out_free_sg: |
494 | sg_free_table(&sgt); | 582 | sg_free_table(&sgt); |
495 | out_free_iova: | 583 | out_free_iova: |
496 | __free_iova(iovad, iova); | 584 | iommu_dma_free_iova(cookie, iova, size); |
497 | out_free_pages: | 585 | out_free_pages: |
498 | __iommu_dma_free_pages(pages, count); | 586 | __iommu_dma_free_pages(pages, count); |
499 | return NULL; | 587 | return NULL; |
@@ -527,22 +615,22 @@ int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma) | |||
527 | static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, | 615 | static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, |
528 | size_t size, int prot) | 616 | size_t size, int prot) |
529 | { | 617 | { |
530 | dma_addr_t dma_addr; | ||
531 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | 618 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
532 | struct iova_domain *iovad = cookie_iovad(domain); | 619 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
620 | struct iova_domain *iovad = &cookie->iovad; | ||
533 | size_t iova_off = iova_offset(iovad, phys); | 621 | size_t iova_off = iova_offset(iovad, phys); |
534 | size_t len = iova_align(iovad, size + iova_off); | 622 | dma_addr_t iova; |
535 | struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev), dev); | ||
536 | 623 | ||
624 | size = iova_align(iovad, size + iova_off); | ||
625 | iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); | ||
537 | if (!iova) | 626 | if (!iova) |
538 | return DMA_ERROR_CODE; | 627 | return DMA_ERROR_CODE; |
539 | 628 | ||
540 | dma_addr = iova_dma_addr(iovad, iova); | 629 | if (iommu_map(domain, iova, phys - iova_off, size, prot)) { |
541 | if (iommu_map(domain, dma_addr, phys - iova_off, len, prot)) { | 630 | iommu_dma_free_iova(cookie, iova, size); |
542 | __free_iova(iovad, iova); | ||
543 | return DMA_ERROR_CODE; | 631 | return DMA_ERROR_CODE; |
544 | } | 632 | } |
545 | return dma_addr + iova_off; | 633 | return iova + iova_off; |
546 | } | 634 | } |
547 | 635 | ||
548 | dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, | 636 | dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, |
@@ -554,7 +642,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, | |||
554 | void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, | 642 | void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, |
555 | enum dma_data_direction dir, unsigned long attrs) | 643 | enum dma_data_direction dir, unsigned long attrs) |
556 | { | 644 | { |
557 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle); | 645 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size); |
558 | } | 646 | } |
559 | 647 | ||
560 | /* | 648 | /* |
@@ -643,10 +731,10 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
643 | int nents, int prot) | 731 | int nents, int prot) |
644 | { | 732 | { |
645 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | 733 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
646 | struct iova_domain *iovad = cookie_iovad(domain); | 734 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
647 | struct iova *iova; | 735 | struct iova_domain *iovad = &cookie->iovad; |
648 | struct scatterlist *s, *prev = NULL; | 736 | struct scatterlist *s, *prev = NULL; |
649 | dma_addr_t dma_addr; | 737 | dma_addr_t iova; |
650 | size_t iova_len = 0; | 738 | size_t iova_len = 0; |
651 | unsigned long mask = dma_get_seg_boundary(dev); | 739 | unsigned long mask = dma_get_seg_boundary(dev); |
652 | int i; | 740 | int i; |
@@ -690,7 +778,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
690 | prev = s; | 778 | prev = s; |
691 | } | 779 | } |
692 | 780 | ||
693 | iova = __alloc_iova(domain, iova_len, dma_get_mask(dev), dev); | 781 | iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); |
694 | if (!iova) | 782 | if (!iova) |
695 | goto out_restore_sg; | 783 | goto out_restore_sg; |
696 | 784 | ||
@@ -698,14 +786,13 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
698 | * We'll leave any physical concatenation to the IOMMU driver's | 786 | * We'll leave any physical concatenation to the IOMMU driver's |
699 | * implementation - it knows better than we do. | 787 | * implementation - it knows better than we do. |
700 | */ | 788 | */ |
701 | dma_addr = iova_dma_addr(iovad, iova); | 789 | if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len) |
702 | if (iommu_map_sg(domain, dma_addr, sg, nents, prot) < iova_len) | ||
703 | goto out_free_iova; | 790 | goto out_free_iova; |
704 | 791 | ||
705 | return __finalise_sg(dev, sg, nents, dma_addr); | 792 | return __finalise_sg(dev, sg, nents, iova); |
706 | 793 | ||
707 | out_free_iova: | 794 | out_free_iova: |
708 | __free_iova(iovad, iova); | 795 | iommu_dma_free_iova(cookie, iova, iova_len); |
709 | out_restore_sg: | 796 | out_restore_sg: |
710 | __invalidate_sg(sg, nents); | 797 | __invalidate_sg(sg, nents); |
711 | return 0; | 798 | return 0; |
@@ -714,11 +801,21 @@ out_restore_sg: | |||
714 | void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | 801 | void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
715 | enum dma_data_direction dir, unsigned long attrs) | 802 | enum dma_data_direction dir, unsigned long attrs) |
716 | { | 803 | { |
804 | dma_addr_t start, end; | ||
805 | struct scatterlist *tmp; | ||
806 | int i; | ||
717 | /* | 807 | /* |
718 | * The scatterlist segments are mapped into a single | 808 | * The scatterlist segments are mapped into a single |
719 | * contiguous IOVA allocation, so this is incredibly easy. | 809 | * contiguous IOVA allocation, so this is incredibly easy. |
720 | */ | 810 | */ |
721 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), sg_dma_address(sg)); | 811 | start = sg_dma_address(sg); |
812 | for_each_sg(sg_next(sg), tmp, nents - 1, i) { | ||
813 | if (sg_dma_len(tmp) == 0) | ||
814 | break; | ||
815 | sg = tmp; | ||
816 | } | ||
817 | end = sg_dma_address(sg) + sg_dma_len(sg); | ||
818 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start); | ||
722 | } | 819 | } |
723 | 820 | ||
724 | dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, | 821 | dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, |
@@ -731,7 +828,7 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, | |||
731 | void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, | 828 | void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, |
732 | size_t size, enum dma_data_direction dir, unsigned long attrs) | 829 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
733 | { | 830 | { |
734 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle); | 831 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size); |
735 | } | 832 | } |
736 | 833 | ||
737 | int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 834 | int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
@@ -744,8 +841,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, | |||
744 | { | 841 | { |
745 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | 842 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
746 | struct iommu_dma_msi_page *msi_page; | 843 | struct iommu_dma_msi_page *msi_page; |
747 | struct iova_domain *iovad = cookie_iovad(domain); | 844 | dma_addr_t iova; |
748 | struct iova *iova; | ||
749 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; | 845 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
750 | size_t size = cookie_msi_granule(cookie); | 846 | size_t size = cookie_msi_granule(cookie); |
751 | 847 | ||
@@ -758,29 +854,16 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, | |||
758 | if (!msi_page) | 854 | if (!msi_page) |
759 | return NULL; | 855 | return NULL; |
760 | 856 | ||
761 | msi_page->phys = msi_addr; | 857 | iova = __iommu_dma_map(dev, msi_addr, size, prot); |
762 | if (iovad) { | 858 | if (iommu_dma_mapping_error(dev, iova)) |
763 | iova = __alloc_iova(domain, size, dma_get_mask(dev), dev); | 859 | goto out_free_page; |
764 | if (!iova) | ||
765 | goto out_free_page; | ||
766 | msi_page->iova = iova_dma_addr(iovad, iova); | ||
767 | } else { | ||
768 | msi_page->iova = cookie->msi_iova; | ||
769 | cookie->msi_iova += size; | ||
770 | } | ||
771 | |||
772 | if (iommu_map(domain, msi_page->iova, msi_addr, size, prot)) | ||
773 | goto out_free_iova; | ||
774 | 860 | ||
775 | INIT_LIST_HEAD(&msi_page->list); | 861 | INIT_LIST_HEAD(&msi_page->list); |
862 | msi_page->phys = msi_addr; | ||
863 | msi_page->iova = iova; | ||
776 | list_add(&msi_page->list, &cookie->msi_page_list); | 864 | list_add(&msi_page->list, &cookie->msi_page_list); |
777 | return msi_page; | 865 | return msi_page; |
778 | 866 | ||
779 | out_free_iova: | ||
780 | if (iovad) | ||
781 | __free_iova(iovad, iova); | ||
782 | else | ||
783 | cookie->msi_iova -= size; | ||
784 | out_free_page: | 867 | out_free_page: |
785 | kfree(msi_page); | 868 | kfree(msi_page); |
786 | return NULL; | 869 | return NULL; |
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 36e3f430d265..cbf7763d8091 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c | |||
@@ -311,7 +311,7 @@ static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info) | |||
311 | ((void *)drhd) + drhd->header.length, | 311 | ((void *)drhd) + drhd->header.length, |
312 | dmaru->segment, | 312 | dmaru->segment, |
313 | dmaru->devices, dmaru->devices_cnt); | 313 | dmaru->devices, dmaru->devices_cnt); |
314 | if (ret != 0) | 314 | if (ret) |
315 | break; | 315 | break; |
316 | } | 316 | } |
317 | if (ret >= 0) | 317 | if (ret >= 0) |
@@ -391,7 +391,7 @@ static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg) | |||
391 | { | 391 | { |
392 | struct acpi_dmar_hardware_unit *drhd; | 392 | struct acpi_dmar_hardware_unit *drhd; |
393 | struct dmar_drhd_unit *dmaru; | 393 | struct dmar_drhd_unit *dmaru; |
394 | int ret = 0; | 394 | int ret; |
395 | 395 | ||
396 | drhd = (struct acpi_dmar_hardware_unit *)header; | 396 | drhd = (struct acpi_dmar_hardware_unit *)header; |
397 | dmaru = dmar_find_dmaru(drhd); | 397 | dmaru = dmar_find_dmaru(drhd); |
@@ -551,17 +551,16 @@ static int __init dmar_table_detect(void) | |||
551 | status = AE_NOT_FOUND; | 551 | status = AE_NOT_FOUND; |
552 | } | 552 | } |
553 | 553 | ||
554 | return (ACPI_SUCCESS(status) ? 1 : 0); | 554 | return ACPI_SUCCESS(status) ? 0 : -ENOENT; |
555 | } | 555 | } |
556 | 556 | ||
557 | static int dmar_walk_remapping_entries(struct acpi_dmar_header *start, | 557 | static int dmar_walk_remapping_entries(struct acpi_dmar_header *start, |
558 | size_t len, struct dmar_res_callback *cb) | 558 | size_t len, struct dmar_res_callback *cb) |
559 | { | 559 | { |
560 | int ret = 0; | ||
561 | struct acpi_dmar_header *iter, *next; | 560 | struct acpi_dmar_header *iter, *next; |
562 | struct acpi_dmar_header *end = ((void *)start) + len; | 561 | struct acpi_dmar_header *end = ((void *)start) + len; |
563 | 562 | ||
564 | for (iter = start; iter < end && ret == 0; iter = next) { | 563 | for (iter = start; iter < end; iter = next) { |
565 | next = (void *)iter + iter->length; | 564 | next = (void *)iter + iter->length; |
566 | if (iter->length == 0) { | 565 | if (iter->length == 0) { |
567 | /* Avoid looping forever on bad ACPI tables */ | 566 | /* Avoid looping forever on bad ACPI tables */ |
@@ -570,8 +569,7 @@ static int dmar_walk_remapping_entries(struct acpi_dmar_header *start, | |||
570 | } else if (next > end) { | 569 | } else if (next > end) { |
571 | /* Avoid passing table end */ | 570 | /* Avoid passing table end */ |
572 | pr_warn(FW_BUG "Record passes table end\n"); | 571 | pr_warn(FW_BUG "Record passes table end\n"); |
573 | ret = -EINVAL; | 572 | return -EINVAL; |
574 | break; | ||
575 | } | 573 | } |
576 | 574 | ||
577 | if (cb->print_entry) | 575 | if (cb->print_entry) |
@@ -582,15 +580,19 @@ static int dmar_walk_remapping_entries(struct acpi_dmar_header *start, | |||
582 | pr_debug("Unknown DMAR structure type %d\n", | 580 | pr_debug("Unknown DMAR structure type %d\n", |
583 | iter->type); | 581 | iter->type); |
584 | } else if (cb->cb[iter->type]) { | 582 | } else if (cb->cb[iter->type]) { |
583 | int ret; | ||
584 | |||
585 | ret = cb->cb[iter->type](iter, cb->arg[iter->type]); | 585 | ret = cb->cb[iter->type](iter, cb->arg[iter->type]); |
586 | if (ret) | ||
587 | return ret; | ||
586 | } else if (!cb->ignore_unhandled) { | 588 | } else if (!cb->ignore_unhandled) { |
587 | pr_warn("No handler for DMAR structure type %d\n", | 589 | pr_warn("No handler for DMAR structure type %d\n", |
588 | iter->type); | 590 | iter->type); |
589 | ret = -EINVAL; | 591 | return -EINVAL; |
590 | } | 592 | } |
591 | } | 593 | } |
592 | 594 | ||
593 | return ret; | 595 | return 0; |
594 | } | 596 | } |
595 | 597 | ||
596 | static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar, | 598 | static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar, |
@@ -607,8 +609,8 @@ static int __init | |||
607 | parse_dmar_table(void) | 609 | parse_dmar_table(void) |
608 | { | 610 | { |
609 | struct acpi_table_dmar *dmar; | 611 | struct acpi_table_dmar *dmar; |
610 | int ret = 0; | ||
611 | int drhd_count = 0; | 612 | int drhd_count = 0; |
613 | int ret; | ||
612 | struct dmar_res_callback cb = { | 614 | struct dmar_res_callback cb = { |
613 | .print_entry = true, | 615 | .print_entry = true, |
614 | .ignore_unhandled = true, | 616 | .ignore_unhandled = true, |
@@ -891,17 +893,17 @@ int __init detect_intel_iommu(void) | |||
891 | 893 | ||
892 | down_write(&dmar_global_lock); | 894 | down_write(&dmar_global_lock); |
893 | ret = dmar_table_detect(); | 895 | ret = dmar_table_detect(); |
894 | if (ret) | 896 | if (!ret) |
895 | ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl, | 897 | ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl, |
896 | &validate_drhd_cb); | 898 | &validate_drhd_cb); |
897 | if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { | 899 | if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) { |
898 | iommu_detected = 1; | 900 | iommu_detected = 1; |
899 | /* Make sure ACS will be enabled */ | 901 | /* Make sure ACS will be enabled */ |
900 | pci_request_acs(); | 902 | pci_request_acs(); |
901 | } | 903 | } |
902 | 904 | ||
903 | #ifdef CONFIG_X86 | 905 | #ifdef CONFIG_X86 |
904 | if (ret) | 906 | if (!ret) |
905 | x86_init.iommu.iommu_init = intel_iommu_init; | 907 | x86_init.iommu.iommu_init = intel_iommu_init; |
906 | #endif | 908 | #endif |
907 | 909 | ||
@@ -911,10 +913,9 @@ int __init detect_intel_iommu(void) | |||
911 | } | 913 | } |
912 | up_write(&dmar_global_lock); | 914 | up_write(&dmar_global_lock); |
913 | 915 | ||
914 | return ret ? 1 : -ENODEV; | 916 | return ret ? ret : 1; |
915 | } | 917 | } |
916 | 918 | ||
917 | |||
918 | static void unmap_iommu(struct intel_iommu *iommu) | 919 | static void unmap_iommu(struct intel_iommu *iommu) |
919 | { | 920 | { |
920 | iounmap(iommu->reg); | 921 | iounmap(iommu->reg); |
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index c01bfcdb2383..2395478dde75 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c | |||
@@ -171,6 +171,9 @@ static u32 lv2ent_offset(sysmmu_iova_t iova) | |||
171 | #define REG_V5_PT_BASE_PFN 0x00C | 171 | #define REG_V5_PT_BASE_PFN 0x00C |
172 | #define REG_V5_MMU_FLUSH_ALL 0x010 | 172 | #define REG_V5_MMU_FLUSH_ALL 0x010 |
173 | #define REG_V5_MMU_FLUSH_ENTRY 0x014 | 173 | #define REG_V5_MMU_FLUSH_ENTRY 0x014 |
174 | #define REG_V5_MMU_FLUSH_RANGE 0x018 | ||
175 | #define REG_V5_MMU_FLUSH_START 0x020 | ||
176 | #define REG_V5_MMU_FLUSH_END 0x024 | ||
174 | #define REG_V5_INT_STATUS 0x060 | 177 | #define REG_V5_INT_STATUS 0x060 |
175 | #define REG_V5_INT_CLEAR 0x064 | 178 | #define REG_V5_INT_CLEAR 0x064 |
176 | #define REG_V5_FAULT_AR_VA 0x070 | 179 | #define REG_V5_FAULT_AR_VA 0x070 |
@@ -319,14 +322,23 @@ static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, | |||
319 | { | 322 | { |
320 | unsigned int i; | 323 | unsigned int i; |
321 | 324 | ||
322 | for (i = 0; i < num_inv; i++) { | 325 | if (MMU_MAJ_VER(data->version) < 5) { |
323 | if (MMU_MAJ_VER(data->version) < 5) | 326 | for (i = 0; i < num_inv; i++) { |
324 | writel((iova & SPAGE_MASK) | 1, | 327 | writel((iova & SPAGE_MASK) | 1, |
325 | data->sfrbase + REG_MMU_FLUSH_ENTRY); | 328 | data->sfrbase + REG_MMU_FLUSH_ENTRY); |
326 | else | 329 | iova += SPAGE_SIZE; |
330 | } | ||
331 | } else { | ||
332 | if (num_inv == 1) { | ||
327 | writel((iova & SPAGE_MASK) | 1, | 333 | writel((iova & SPAGE_MASK) | 1, |
328 | data->sfrbase + REG_V5_MMU_FLUSH_ENTRY); | 334 | data->sfrbase + REG_V5_MMU_FLUSH_ENTRY); |
329 | iova += SPAGE_SIZE; | 335 | } else { |
336 | writel((iova & SPAGE_MASK), | ||
337 | data->sfrbase + REG_V5_MMU_FLUSH_START); | ||
338 | writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE, | ||
339 | data->sfrbase + REG_V5_MMU_FLUSH_END); | ||
340 | writel(1, data->sfrbase + REG_V5_MMU_FLUSH_RANGE); | ||
341 | } | ||
330 | } | 342 | } |
331 | } | 343 | } |
332 | 344 | ||
@@ -747,16 +759,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type) | |||
747 | goto err_counter; | 759 | goto err_counter; |
748 | 760 | ||
749 | /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */ | 761 | /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */ |
750 | for (i = 0; i < NUM_LV1ENTRIES; i += 8) { | 762 | for (i = 0; i < NUM_LV1ENTRIES; i++) |
751 | domain->pgtable[i + 0] = ZERO_LV2LINK; | 763 | domain->pgtable[i] = ZERO_LV2LINK; |
752 | domain->pgtable[i + 1] = ZERO_LV2LINK; | ||
753 | domain->pgtable[i + 2] = ZERO_LV2LINK; | ||
754 | domain->pgtable[i + 3] = ZERO_LV2LINK; | ||
755 | domain->pgtable[i + 4] = ZERO_LV2LINK; | ||
756 | domain->pgtable[i + 5] = ZERO_LV2LINK; | ||
757 | domain->pgtable[i + 6] = ZERO_LV2LINK; | ||
758 | domain->pgtable[i + 7] = ZERO_LV2LINK; | ||
759 | } | ||
760 | 764 | ||
761 | handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE, | 765 | handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE, |
762 | DMA_TO_DEVICE); | 766 | DMA_TO_DEVICE); |
diff --git a/drivers/iommu/fsl_pamu.h b/drivers/iommu/fsl_pamu.h index aab723f91f12..c3434f29c967 100644 --- a/drivers/iommu/fsl_pamu.h +++ b/drivers/iommu/fsl_pamu.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #define __FSL_PAMU_H | 20 | #define __FSL_PAMU_H |
21 | 21 | ||
22 | #include <linux/iommu.h> | 22 | #include <linux/iommu.h> |
23 | #include <linux/pci.h> | ||
23 | 24 | ||
24 | #include <asm/fsl_pamu_stash.h> | 25 | #include <asm/fsl_pamu_stash.h> |
25 | 26 | ||
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index d412a313a372..90ab0115d78e 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -183,6 +183,7 @@ static int rwbf_quirk; | |||
183 | * (used when kernel is launched w/ TXT) | 183 | * (used when kernel is launched w/ TXT) |
184 | */ | 184 | */ |
185 | static int force_on = 0; | 185 | static int force_on = 0; |
186 | int intel_iommu_tboot_noforce; | ||
186 | 187 | ||
187 | /* | 188 | /* |
188 | * 0: Present | 189 | * 0: Present |
@@ -607,6 +608,10 @@ static int __init intel_iommu_setup(char *str) | |||
607 | "Intel-IOMMU: enable pre-production PASID support\n"); | 608 | "Intel-IOMMU: enable pre-production PASID support\n"); |
608 | intel_iommu_pasid28 = 1; | 609 | intel_iommu_pasid28 = 1; |
609 | iommu_identity_mapping |= IDENTMAP_GFX; | 610 | iommu_identity_mapping |= IDENTMAP_GFX; |
611 | } else if (!strncmp(str, "tboot_noforce", 13)) { | ||
612 | printk(KERN_INFO | ||
613 | "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); | ||
614 | intel_iommu_tboot_noforce = 1; | ||
610 | } | 615 | } |
611 | 616 | ||
612 | str += strcspn(str, ","); | 617 | str += strcspn(str, ","); |
@@ -4730,6 +4735,15 @@ static int intel_iommu_cpu_dead(unsigned int cpu) | |||
4730 | return 0; | 4735 | return 0; |
4731 | } | 4736 | } |
4732 | 4737 | ||
4738 | static void intel_disable_iommus(void) | ||
4739 | { | ||
4740 | struct intel_iommu *iommu = NULL; | ||
4741 | struct dmar_drhd_unit *drhd; | ||
4742 | |||
4743 | for_each_iommu(iommu, drhd) | ||
4744 | iommu_disable_translation(iommu); | ||
4745 | } | ||
4746 | |||
4733 | static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev) | 4747 | static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev) |
4734 | { | 4748 | { |
4735 | return container_of(dev, struct intel_iommu, iommu.dev); | 4749 | return container_of(dev, struct intel_iommu, iommu.dev); |
@@ -4840,8 +4854,28 @@ int __init intel_iommu_init(void) | |||
4840 | goto out_free_dmar; | 4854 | goto out_free_dmar; |
4841 | } | 4855 | } |
4842 | 4856 | ||
4843 | if (no_iommu || dmar_disabled) | 4857 | if (no_iommu || dmar_disabled) { |
4858 | /* | ||
4859 | * We exit the function here to ensure IOMMU's remapping and | ||
4860 | * mempool aren't setup, which means that the IOMMU's PMRs | ||
4861 | * won't be disabled via the call to init_dmars(). So disable | ||
4862 | * it explicitly here. The PMRs were setup by tboot prior to | ||
4863 | * calling SENTER, but the kernel is expected to reset/tear | ||
4864 | * down the PMRs. | ||
4865 | */ | ||
4866 | if (intel_iommu_tboot_noforce) { | ||
4867 | for_each_iommu(iommu, drhd) | ||
4868 | iommu_disable_protect_mem_regions(iommu); | ||
4869 | } | ||
4870 | |||
4871 | /* | ||
4872 | * Make sure the IOMMUs are switched off, even when we | ||
4873 | * boot into a kexec kernel and the previous kernel left | ||
4874 | * them enabled | ||
4875 | */ | ||
4876 | intel_disable_iommus(); | ||
4844 | goto out_free_dmar; | 4877 | goto out_free_dmar; |
4878 | } | ||
4845 | 4879 | ||
4846 | if (list_empty(&dmar_rmrr_units)) | 4880 | if (list_empty(&dmar_rmrr_units)) |
4847 | pr_info("No RMRR found\n"); | 4881 | pr_info("No RMRR found\n"); |
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index ac596928f6b4..a190cbd76ef7 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c | |||
@@ -408,14 +408,6 @@ static int iommu_load_old_irte(struct intel_iommu *iommu) | |||
408 | size_t size; | 408 | size_t size; |
409 | u64 irta; | 409 | u64 irta; |
410 | 410 | ||
411 | if (!is_kdump_kernel()) { | ||
412 | pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n", | ||
413 | iommu->name); | ||
414 | clear_ir_pre_enabled(iommu); | ||
415 | iommu_disable_irq_remapping(iommu); | ||
416 | return -EINVAL; | ||
417 | } | ||
418 | |||
419 | /* Check whether the old ir-table has the same size as ours */ | 411 | /* Check whether the old ir-table has the same size as ours */ |
420 | irta = dmar_readq(iommu->reg + DMAR_IRTA_REG); | 412 | irta = dmar_readq(iommu->reg + DMAR_IRTA_REG); |
421 | if ((irta & INTR_REMAP_TABLE_REG_SIZE_MASK) | 413 | if ((irta & INTR_REMAP_TABLE_REG_SIZE_MASK) |
@@ -567,7 +559,12 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) | |||
567 | init_ir_status(iommu); | 559 | init_ir_status(iommu); |
568 | 560 | ||
569 | if (ir_pre_enabled(iommu)) { | 561 | if (ir_pre_enabled(iommu)) { |
570 | if (iommu_load_old_irte(iommu)) | 562 | if (!is_kdump_kernel()) { |
563 | pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n", | ||
564 | iommu->name); | ||
565 | clear_ir_pre_enabled(iommu); | ||
566 | iommu_disable_irq_remapping(iommu); | ||
567 | } else if (iommu_load_old_irte(iommu)) | ||
571 | pr_err("Failed to copy IR table for %s from previous kernel\n", | 568 | pr_err("Failed to copy IR table for %s from previous kernel\n", |
572 | iommu->name); | 569 | iommu->name); |
573 | else | 570 | else |
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index f9bc6ebb8140..6e5df5e0a3bd 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c | |||
@@ -74,7 +74,7 @@ | |||
74 | 74 | ||
75 | /* Calculate the block/page mapping size at level l for pagetable in d. */ | 75 | /* Calculate the block/page mapping size at level l for pagetable in d. */ |
76 | #define ARM_LPAE_BLOCK_SIZE(l,d) \ | 76 | #define ARM_LPAE_BLOCK_SIZE(l,d) \ |
77 | (1 << (ilog2(sizeof(arm_lpae_iopte)) + \ | 77 | (1ULL << (ilog2(sizeof(arm_lpae_iopte)) + \ |
78 | ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level))) | 78 | ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level))) |
79 | 79 | ||
80 | /* Page table bits */ | 80 | /* Page table bits */ |
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 3b67144dead2..cf7ca7e70777 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c | |||
@@ -36,6 +36,7 @@ | |||
36 | 36 | ||
37 | static struct kset *iommu_group_kset; | 37 | static struct kset *iommu_group_kset; |
38 | static DEFINE_IDA(iommu_group_ida); | 38 | static DEFINE_IDA(iommu_group_ida); |
39 | static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA; | ||
39 | 40 | ||
40 | struct iommu_callback_data { | 41 | struct iommu_callback_data { |
41 | const struct iommu_ops *ops; | 42 | const struct iommu_ops *ops; |
@@ -112,6 +113,18 @@ static int __iommu_attach_group(struct iommu_domain *domain, | |||
112 | static void __iommu_detach_group(struct iommu_domain *domain, | 113 | static void __iommu_detach_group(struct iommu_domain *domain, |
113 | struct iommu_group *group); | 114 | struct iommu_group *group); |
114 | 115 | ||
116 | static int __init iommu_set_def_domain_type(char *str) | ||
117 | { | ||
118 | bool pt; | ||
119 | |||
120 | if (!str || strtobool(str, &pt)) | ||
121 | return -EINVAL; | ||
122 | |||
123 | iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA; | ||
124 | return 0; | ||
125 | } | ||
126 | early_param("iommu.passthrough", iommu_set_def_domain_type); | ||
127 | |||
115 | static ssize_t iommu_group_attr_show(struct kobject *kobj, | 128 | static ssize_t iommu_group_attr_show(struct kobject *kobj, |
116 | struct attribute *__attr, char *buf) | 129 | struct attribute *__attr, char *buf) |
117 | { | 130 | { |
@@ -1015,10 +1028,19 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev) | |||
1015 | * IOMMU driver. | 1028 | * IOMMU driver. |
1016 | */ | 1029 | */ |
1017 | if (!group->default_domain) { | 1030 | if (!group->default_domain) { |
1018 | group->default_domain = __iommu_domain_alloc(dev->bus, | 1031 | struct iommu_domain *dom; |
1019 | IOMMU_DOMAIN_DMA); | 1032 | |
1033 | dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type); | ||
1034 | if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) { | ||
1035 | dev_warn(dev, | ||
1036 | "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA", | ||
1037 | iommu_def_domain_type); | ||
1038 | dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA); | ||
1039 | } | ||
1040 | |||
1041 | group->default_domain = dom; | ||
1020 | if (!group->domain) | 1042 | if (!group->domain) |
1021 | group->domain = group->default_domain; | 1043 | group->domain = dom; |
1022 | } | 1044 | } |
1023 | 1045 | ||
1024 | ret = iommu_group_add_device(group, dev); | 1046 | ret = iommu_group_add_device(group, dev); |
@@ -1083,8 +1105,12 @@ static int iommu_bus_notifier(struct notifier_block *nb, | |||
1083 | * result in ADD/DEL notifiers to group->notifier | 1105 | * result in ADD/DEL notifiers to group->notifier |
1084 | */ | 1106 | */ |
1085 | if (action == BUS_NOTIFY_ADD_DEVICE) { | 1107 | if (action == BUS_NOTIFY_ADD_DEVICE) { |
1086 | if (ops->add_device) | 1108 | if (ops->add_device) { |
1087 | return ops->add_device(dev); | 1109 | int ret; |
1110 | |||
1111 | ret = ops->add_device(dev); | ||
1112 | return (ret) ? NOTIFY_DONE : NOTIFY_OK; | ||
1113 | } | ||
1088 | } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { | 1114 | } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { |
1089 | if (ops->remove_device && dev->iommu_group) { | 1115 | if (ops->remove_device && dev->iommu_group) { |
1090 | ops->remove_device(dev); | 1116 | ops->remove_device(dev); |
@@ -1652,6 +1678,48 @@ void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr) | |||
1652 | } | 1678 | } |
1653 | EXPORT_SYMBOL_GPL(iommu_domain_window_disable); | 1679 | EXPORT_SYMBOL_GPL(iommu_domain_window_disable); |
1654 | 1680 | ||
1681 | /** | ||
1682 | * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework | ||
1683 | * @domain: the iommu domain where the fault has happened | ||
1684 | * @dev: the device where the fault has happened | ||
1685 | * @iova: the faulting address | ||
1686 | * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) | ||
1687 | * | ||
1688 | * This function should be called by the low-level IOMMU implementations | ||
1689 | * whenever IOMMU faults happen, to allow high-level users, that are | ||
1690 | * interested in such events, to know about them. | ||
1691 | * | ||
1692 | * This event may be useful for several possible use cases: | ||
1693 | * - mere logging of the event | ||
1694 | * - dynamic TLB/PTE loading | ||
1695 | * - if restarting of the faulting device is required | ||
1696 | * | ||
1697 | * Returns 0 on success and an appropriate error code otherwise (if dynamic | ||
1698 | * PTE/TLB loading will one day be supported, implementations will be able | ||
1699 | * to tell whether it succeeded or not according to this return value). | ||
1700 | * | ||
1701 | * Specifically, -ENOSYS is returned if a fault handler isn't installed | ||
1702 | * (though fault handlers can also return -ENOSYS, in case they want to | ||
1703 | * elicit the default behavior of the IOMMU drivers). | ||
1704 | */ | ||
1705 | int report_iommu_fault(struct iommu_domain *domain, struct device *dev, | ||
1706 | unsigned long iova, int flags) | ||
1707 | { | ||
1708 | int ret = -ENOSYS; | ||
1709 | |||
1710 | /* | ||
1711 | * if upper layers showed interest and installed a fault handler, | ||
1712 | * invoke it. | ||
1713 | */ | ||
1714 | if (domain->handler) | ||
1715 | ret = domain->handler(domain, dev, iova, flags, | ||
1716 | domain->handler_token); | ||
1717 | |||
1718 | trace_io_page_fault(dev, iova, flags); | ||
1719 | return ret; | ||
1720 | } | ||
1721 | EXPORT_SYMBOL_GPL(report_iommu_fault); | ||
1722 | |||
1655 | static int __init iommu_init(void) | 1723 | static int __init iommu_init(void) |
1656 | { | 1724 | { |
1657 | iommu_group_kset = kset_create_and_add("iommu_groups", | 1725 | iommu_group_kset = kset_create_and_add("iommu_groups", |
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index e80a4105ac2a..5c88ba70e4e0 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c | |||
@@ -166,7 +166,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, | |||
166 | break; /* found a free slot */ | 166 | break; /* found a free slot */ |
167 | } | 167 | } |
168 | adjust_limit_pfn: | 168 | adjust_limit_pfn: |
169 | limit_pfn = curr_iova->pfn_lo - 1; | 169 | limit_pfn = curr_iova->pfn_lo ? (curr_iova->pfn_lo - 1) : 0; |
170 | move_left: | 170 | move_left: |
171 | prev = curr; | 171 | prev = curr; |
172 | curr = rb_prev(curr); | 172 | curr = rb_prev(curr); |
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index 19e010083408..a27ef570c328 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c | |||
@@ -431,9 +431,10 @@ err_release_mapping: | |||
431 | 431 | ||
432 | static int mtk_iommu_add_device(struct device *dev) | 432 | static int mtk_iommu_add_device(struct device *dev) |
433 | { | 433 | { |
434 | struct iommu_group *group; | ||
435 | struct of_phandle_args iommu_spec; | 434 | struct of_phandle_args iommu_spec; |
436 | struct of_phandle_iterator it; | 435 | struct of_phandle_iterator it; |
436 | struct mtk_iommu_data *data; | ||
437 | struct iommu_group *group; | ||
437 | int err; | 438 | int err; |
438 | 439 | ||
439 | of_for_each_phandle(&it, err, dev->of_node, "iommus", | 440 | of_for_each_phandle(&it, err, dev->of_node, "iommus", |
@@ -450,6 +451,9 @@ static int mtk_iommu_add_device(struct device *dev) | |||
450 | if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops) | 451 | if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops) |
451 | return -ENODEV; /* Not a iommu client device */ | 452 | return -ENODEV; /* Not a iommu client device */ |
452 | 453 | ||
454 | data = dev->iommu_fwspec->iommu_priv; | ||
455 | iommu_device_link(&data->iommu, dev); | ||
456 | |||
453 | group = iommu_group_get_for_dev(dev); | 457 | group = iommu_group_get_for_dev(dev); |
454 | if (IS_ERR(group)) | 458 | if (IS_ERR(group)) |
455 | return PTR_ERR(group); | 459 | return PTR_ERR(group); |
@@ -460,9 +464,14 @@ static int mtk_iommu_add_device(struct device *dev) | |||
460 | 464 | ||
461 | static void mtk_iommu_remove_device(struct device *dev) | 465 | static void mtk_iommu_remove_device(struct device *dev) |
462 | { | 466 | { |
467 | struct mtk_iommu_data *data; | ||
468 | |||
463 | if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops) | 469 | if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops) |
464 | return; | 470 | return; |
465 | 471 | ||
472 | data = dev->iommu_fwspec->iommu_priv; | ||
473 | iommu_device_unlink(&data->iommu, dev); | ||
474 | |||
466 | iommu_group_remove_device(dev); | 475 | iommu_group_remove_device(dev); |
467 | iommu_fwspec_free(dev); | 476 | iommu_fwspec_free(dev); |
468 | } | 477 | } |
@@ -627,6 +636,17 @@ static int mtk_iommu_probe(struct platform_device *pdev) | |||
627 | if (ret) | 636 | if (ret) |
628 | return ret; | 637 | return ret; |
629 | 638 | ||
639 | ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL, | ||
640 | dev_name(&pdev->dev)); | ||
641 | if (ret) | ||
642 | return ret; | ||
643 | |||
644 | iommu_device_set_ops(&data->iommu, &mtk_iommu_ops); | ||
645 | |||
646 | ret = iommu_device_register(&data->iommu); | ||
647 | if (ret) | ||
648 | return ret; | ||
649 | |||
630 | if (!iommu_present(&platform_bus_type)) | 650 | if (!iommu_present(&platform_bus_type)) |
631 | bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); | 651 | bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); |
632 | 652 | ||
@@ -637,6 +657,9 @@ static int mtk_iommu_remove(struct platform_device *pdev) | |||
637 | { | 657 | { |
638 | struct mtk_iommu_data *data = platform_get_drvdata(pdev); | 658 | struct mtk_iommu_data *data = platform_get_drvdata(pdev); |
639 | 659 | ||
660 | iommu_device_sysfs_remove(&data->iommu); | ||
661 | iommu_device_unregister(&data->iommu); | ||
662 | |||
640 | if (iommu_present(&platform_bus_type)) | 663 | if (iommu_present(&platform_bus_type)) |
641 | bus_set_iommu(&platform_bus_type, NULL); | 664 | bus_set_iommu(&platform_bus_type, NULL); |
642 | 665 | ||
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index 2683e9fc0dcf..9f44ee8ea1bc 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c | |||
@@ -96,6 +96,49 @@ int of_get_dma_window(struct device_node *dn, const char *prefix, int index, | |||
96 | } | 96 | } |
97 | EXPORT_SYMBOL_GPL(of_get_dma_window); | 97 | EXPORT_SYMBOL_GPL(of_get_dma_window); |
98 | 98 | ||
99 | static bool of_iommu_driver_present(struct device_node *np) | ||
100 | { | ||
101 | /* | ||
102 | * If the IOMMU still isn't ready by the time we reach init, assume | ||
103 | * it never will be. We don't want to defer indefinitely, nor attempt | ||
104 | * to dereference __iommu_of_table after it's been freed. | ||
105 | */ | ||
106 | if (system_state > SYSTEM_BOOTING) | ||
107 | return false; | ||
108 | |||
109 | return of_match_node(&__iommu_of_table, np); | ||
110 | } | ||
111 | |||
112 | static const struct iommu_ops | ||
113 | *of_iommu_xlate(struct device *dev, struct of_phandle_args *iommu_spec) | ||
114 | { | ||
115 | const struct iommu_ops *ops; | ||
116 | struct fwnode_handle *fwnode = &iommu_spec->np->fwnode; | ||
117 | int err; | ||
118 | |||
119 | ops = iommu_ops_from_fwnode(fwnode); | ||
120 | if ((ops && !ops->of_xlate) || | ||
121 | (!ops && !of_iommu_driver_present(iommu_spec->np))) | ||
122 | return NULL; | ||
123 | |||
124 | err = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops); | ||
125 | if (err) | ||
126 | return ERR_PTR(err); | ||
127 | /* | ||
128 | * The otherwise-empty fwspec handily serves to indicate the specific | ||
129 | * IOMMU device we're waiting for, which will be useful if we ever get | ||
130 | * a proper probe-ordering dependency mechanism in future. | ||
131 | */ | ||
132 | if (!ops) | ||
133 | return ERR_PTR(-EPROBE_DEFER); | ||
134 | |||
135 | err = ops->of_xlate(dev, iommu_spec); | ||
136 | if (err) | ||
137 | return ERR_PTR(err); | ||
138 | |||
139 | return ops; | ||
140 | } | ||
141 | |||
99 | static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data) | 142 | static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data) |
100 | { | 143 | { |
101 | struct of_phandle_args *iommu_spec = data; | 144 | struct of_phandle_args *iommu_spec = data; |
@@ -105,10 +148,11 @@ static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data) | |||
105 | } | 148 | } |
106 | 149 | ||
107 | static const struct iommu_ops | 150 | static const struct iommu_ops |
108 | *of_pci_iommu_configure(struct pci_dev *pdev, struct device_node *bridge_np) | 151 | *of_pci_iommu_init(struct pci_dev *pdev, struct device_node *bridge_np) |
109 | { | 152 | { |
110 | const struct iommu_ops *ops; | 153 | const struct iommu_ops *ops; |
111 | struct of_phandle_args iommu_spec; | 154 | struct of_phandle_args iommu_spec; |
155 | int err; | ||
112 | 156 | ||
113 | /* | 157 | /* |
114 | * Start by tracing the RID alias down the PCI topology as | 158 | * Start by tracing the RID alias down the PCI topology as |
@@ -123,56 +167,76 @@ static const struct iommu_ops | |||
123 | * bus into the system beyond, and which IOMMU it ends up at. | 167 | * bus into the system beyond, and which IOMMU it ends up at. |
124 | */ | 168 | */ |
125 | iommu_spec.np = NULL; | 169 | iommu_spec.np = NULL; |
126 | if (of_pci_map_rid(bridge_np, iommu_spec.args[0], "iommu-map", | 170 | err = of_pci_map_rid(bridge_np, iommu_spec.args[0], "iommu-map", |
127 | "iommu-map-mask", &iommu_spec.np, iommu_spec.args)) | 171 | "iommu-map-mask", &iommu_spec.np, |
128 | return NULL; | 172 | iommu_spec.args); |
173 | if (err) | ||
174 | return err == -ENODEV ? NULL : ERR_PTR(err); | ||
129 | 175 | ||
130 | ops = iommu_ops_from_fwnode(&iommu_spec.np->fwnode); | 176 | ops = of_iommu_xlate(&pdev->dev, &iommu_spec); |
131 | if (!ops || !ops->of_xlate || | ||
132 | iommu_fwspec_init(&pdev->dev, &iommu_spec.np->fwnode, ops) || | ||
133 | ops->of_xlate(&pdev->dev, &iommu_spec)) | ||
134 | ops = NULL; | ||
135 | 177 | ||
136 | of_node_put(iommu_spec.np); | 178 | of_node_put(iommu_spec.np); |
137 | return ops; | 179 | return ops; |
138 | } | 180 | } |
139 | 181 | ||
140 | const struct iommu_ops *of_iommu_configure(struct device *dev, | 182 | static const struct iommu_ops |
141 | struct device_node *master_np) | 183 | *of_platform_iommu_init(struct device *dev, struct device_node *np) |
142 | { | 184 | { |
143 | struct of_phandle_args iommu_spec; | 185 | struct of_phandle_args iommu_spec; |
144 | struct device_node *np; | ||
145 | const struct iommu_ops *ops = NULL; | 186 | const struct iommu_ops *ops = NULL; |
146 | int idx = 0; | 187 | int idx = 0; |
147 | 188 | ||
148 | if (dev_is_pci(dev)) | ||
149 | return of_pci_iommu_configure(to_pci_dev(dev), master_np); | ||
150 | |||
151 | /* | 189 | /* |
152 | * We don't currently walk up the tree looking for a parent IOMMU. | 190 | * We don't currently walk up the tree looking for a parent IOMMU. |
153 | * See the `Notes:' section of | 191 | * See the `Notes:' section of |
154 | * Documentation/devicetree/bindings/iommu/iommu.txt | 192 | * Documentation/devicetree/bindings/iommu/iommu.txt |
155 | */ | 193 | */ |
156 | while (!of_parse_phandle_with_args(master_np, "iommus", | 194 | while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", |
157 | "#iommu-cells", idx, | 195 | idx, &iommu_spec)) { |
158 | &iommu_spec)) { | 196 | ops = of_iommu_xlate(dev, &iommu_spec); |
159 | np = iommu_spec.np; | 197 | of_node_put(iommu_spec.np); |
160 | ops = iommu_ops_from_fwnode(&np->fwnode); | ||
161 | |||
162 | if (!ops || !ops->of_xlate || | ||
163 | iommu_fwspec_init(dev, &np->fwnode, ops) || | ||
164 | ops->of_xlate(dev, &iommu_spec)) | ||
165 | goto err_put_node; | ||
166 | |||
167 | of_node_put(np); | ||
168 | idx++; | 198 | idx++; |
199 | if (IS_ERR_OR_NULL(ops)) | ||
200 | break; | ||
169 | } | 201 | } |
170 | 202 | ||
171 | return ops; | 203 | return ops; |
204 | } | ||
205 | |||
206 | const struct iommu_ops *of_iommu_configure(struct device *dev, | ||
207 | struct device_node *master_np) | ||
208 | { | ||
209 | const struct iommu_ops *ops; | ||
210 | struct iommu_fwspec *fwspec = dev->iommu_fwspec; | ||
211 | |||
212 | if (!master_np) | ||
213 | return NULL; | ||
214 | |||
215 | if (fwspec) { | ||
216 | if (fwspec->ops) | ||
217 | return fwspec->ops; | ||
218 | |||
219 | /* In the deferred case, start again from scratch */ | ||
220 | iommu_fwspec_free(dev); | ||
221 | } | ||
172 | 222 | ||
173 | err_put_node: | 223 | if (dev_is_pci(dev)) |
174 | of_node_put(np); | 224 | ops = of_pci_iommu_init(to_pci_dev(dev), master_np); |
175 | return NULL; | 225 | else |
226 | ops = of_platform_iommu_init(dev, master_np); | ||
227 | /* | ||
228 | * If we have reason to believe the IOMMU driver missed the initial | ||
229 | * add_device callback for dev, replay it to get things in order. | ||
230 | */ | ||
231 | if (!IS_ERR_OR_NULL(ops) && ops->add_device && | ||
232 | dev->bus && !dev->iommu_group) { | ||
233 | int err = ops->add_device(dev); | ||
234 | |||
235 | if (err) | ||
236 | ops = ERR_PTR(err); | ||
237 | } | ||
238 | |||
239 | return ops; | ||
176 | } | 240 | } |
177 | 241 | ||
178 | static int __init of_iommu_init(void) | 242 | static int __init of_iommu_init(void) |
@@ -183,7 +247,7 @@ static int __init of_iommu_init(void) | |||
183 | for_each_matching_node_and_match(np, matches, &match) { | 247 | for_each_matching_node_and_match(np, matches, &match) { |
184 | const of_iommu_init_fn init_fn = match->data; | 248 | const of_iommu_init_fn init_fn = match->data; |
185 | 249 | ||
186 | if (init_fn(np)) | 250 | if (init_fn && init_fn(np)) |
187 | pr_err("Failed to initialise IOMMU %s\n", | 251 | pr_err("Failed to initialise IOMMU %s\n", |
188 | of_node_full_name(np)); | 252 | of_node_full_name(np)); |
189 | } | 253 | } |
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index e2583cce2cc1..95dfca36ccb9 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c | |||
@@ -36,28 +36,14 @@ | |||
36 | #include "omap-iopgtable.h" | 36 | #include "omap-iopgtable.h" |
37 | #include "omap-iommu.h" | 37 | #include "omap-iommu.h" |
38 | 38 | ||
39 | static const struct iommu_ops omap_iommu_ops; | ||
40 | |||
39 | #define to_iommu(dev) \ | 41 | #define to_iommu(dev) \ |
40 | ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))) | 42 | ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))) |
41 | 43 | ||
42 | /* bitmap of the page sizes currently supported */ | 44 | /* bitmap of the page sizes currently supported */ |
43 | #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) | 45 | #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) |
44 | 46 | ||
45 | /** | ||
46 | * struct omap_iommu_domain - omap iommu domain | ||
47 | * @pgtable: the page table | ||
48 | * @iommu_dev: an omap iommu device attached to this domain. only a single | ||
49 | * iommu device can be attached for now. | ||
50 | * @dev: Device using this domain. | ||
51 | * @lock: domain lock, should be taken when attaching/detaching | ||
52 | */ | ||
53 | struct omap_iommu_domain { | ||
54 | u32 *pgtable; | ||
55 | struct omap_iommu *iommu_dev; | ||
56 | struct device *dev; | ||
57 | spinlock_t lock; | ||
58 | struct iommu_domain domain; | ||
59 | }; | ||
60 | |||
61 | #define MMU_LOCK_BASE_SHIFT 10 | 47 | #define MMU_LOCK_BASE_SHIFT 10 |
62 | #define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT) | 48 | #define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT) |
63 | #define MMU_LOCK_BASE(x) \ | 49 | #define MMU_LOCK_BASE(x) \ |
@@ -818,33 +804,14 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) | |||
818 | return IRQ_NONE; | 804 | return IRQ_NONE; |
819 | } | 805 | } |
820 | 806 | ||
821 | static int device_match_by_alias(struct device *dev, void *data) | ||
822 | { | ||
823 | struct omap_iommu *obj = to_iommu(dev); | ||
824 | const char *name = data; | ||
825 | |||
826 | pr_debug("%s: %s %s\n", __func__, obj->name, name); | ||
827 | |||
828 | return strcmp(obj->name, name) == 0; | ||
829 | } | ||
830 | |||
831 | /** | 807 | /** |
832 | * omap_iommu_attach() - attach iommu device to an iommu domain | 808 | * omap_iommu_attach() - attach iommu device to an iommu domain |
833 | * @name: name of target omap iommu device | 809 | * @obj: target omap iommu device |
834 | * @iopgd: page table | 810 | * @iopgd: page table |
835 | **/ | 811 | **/ |
836 | static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) | 812 | static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd) |
837 | { | 813 | { |
838 | int err; | 814 | int err; |
839 | struct device *dev; | ||
840 | struct omap_iommu *obj; | ||
841 | |||
842 | dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name, | ||
843 | device_match_by_alias); | ||
844 | if (!dev) | ||
845 | return ERR_PTR(-ENODEV); | ||
846 | |||
847 | obj = to_iommu(dev); | ||
848 | 815 | ||
849 | spin_lock(&obj->iommu_lock); | 816 | spin_lock(&obj->iommu_lock); |
850 | 817 | ||
@@ -857,11 +824,13 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) | |||
857 | spin_unlock(&obj->iommu_lock); | 824 | spin_unlock(&obj->iommu_lock); |
858 | 825 | ||
859 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); | 826 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); |
860 | return obj; | 827 | |
828 | return 0; | ||
861 | 829 | ||
862 | err_enable: | 830 | err_enable: |
863 | spin_unlock(&obj->iommu_lock); | 831 | spin_unlock(&obj->iommu_lock); |
864 | return ERR_PTR(err); | 832 | |
833 | return err; | ||
865 | } | 834 | } |
866 | 835 | ||
867 | /** | 836 | /** |
@@ -928,28 +897,26 @@ static int omap_iommu_probe(struct platform_device *pdev) | |||
928 | int irq; | 897 | int irq; |
929 | struct omap_iommu *obj; | 898 | struct omap_iommu *obj; |
930 | struct resource *res; | 899 | struct resource *res; |
931 | struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); | ||
932 | struct device_node *of = pdev->dev.of_node; | 900 | struct device_node *of = pdev->dev.of_node; |
933 | 901 | ||
902 | if (!of) { | ||
903 | pr_err("%s: only DT-based devices are supported\n", __func__); | ||
904 | return -ENODEV; | ||
905 | } | ||
906 | |||
934 | obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); | 907 | obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); |
935 | if (!obj) | 908 | if (!obj) |
936 | return -ENOMEM; | 909 | return -ENOMEM; |
937 | 910 | ||
938 | if (of) { | 911 | obj->name = dev_name(&pdev->dev); |
939 | obj->name = dev_name(&pdev->dev); | 912 | obj->nr_tlb_entries = 32; |
940 | obj->nr_tlb_entries = 32; | 913 | err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries); |
941 | err = of_property_read_u32(of, "ti,#tlb-entries", | 914 | if (err && err != -EINVAL) |
942 | &obj->nr_tlb_entries); | 915 | return err; |
943 | if (err && err != -EINVAL) | 916 | if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8) |
944 | return err; | 917 | return -EINVAL; |
945 | if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8) | 918 | if (of_find_property(of, "ti,iommu-bus-err-back", NULL)) |
946 | return -EINVAL; | 919 | obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN; |
947 | if (of_find_property(of, "ti,iommu-bus-err-back", NULL)) | ||
948 | obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN; | ||
949 | } else { | ||
950 | obj->nr_tlb_entries = pdata->nr_tlb_entries; | ||
951 | obj->name = pdata->name; | ||
952 | } | ||
953 | 920 | ||
954 | obj->dev = &pdev->dev; | 921 | obj->dev = &pdev->dev; |
955 | obj->ctx = (void *)obj + sizeof(*obj); | 922 | obj->ctx = (void *)obj + sizeof(*obj); |
@@ -976,19 +943,46 @@ static int omap_iommu_probe(struct platform_device *pdev) | |||
976 | return err; | 943 | return err; |
977 | platform_set_drvdata(pdev, obj); | 944 | platform_set_drvdata(pdev, obj); |
978 | 945 | ||
946 | obj->group = iommu_group_alloc(); | ||
947 | if (IS_ERR(obj->group)) | ||
948 | return PTR_ERR(obj->group); | ||
949 | |||
950 | err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL, obj->name); | ||
951 | if (err) | ||
952 | goto out_group; | ||
953 | |||
954 | iommu_device_set_ops(&obj->iommu, &omap_iommu_ops); | ||
955 | |||
956 | err = iommu_device_register(&obj->iommu); | ||
957 | if (err) | ||
958 | goto out_sysfs; | ||
959 | |||
979 | pm_runtime_irq_safe(obj->dev); | 960 | pm_runtime_irq_safe(obj->dev); |
980 | pm_runtime_enable(obj->dev); | 961 | pm_runtime_enable(obj->dev); |
981 | 962 | ||
982 | omap_iommu_debugfs_add(obj); | 963 | omap_iommu_debugfs_add(obj); |
983 | 964 | ||
984 | dev_info(&pdev->dev, "%s registered\n", obj->name); | 965 | dev_info(&pdev->dev, "%s registered\n", obj->name); |
966 | |||
985 | return 0; | 967 | return 0; |
968 | |||
969 | out_sysfs: | ||
970 | iommu_device_sysfs_remove(&obj->iommu); | ||
971 | out_group: | ||
972 | iommu_group_put(obj->group); | ||
973 | return err; | ||
986 | } | 974 | } |
987 | 975 | ||
988 | static int omap_iommu_remove(struct platform_device *pdev) | 976 | static int omap_iommu_remove(struct platform_device *pdev) |
989 | { | 977 | { |
990 | struct omap_iommu *obj = platform_get_drvdata(pdev); | 978 | struct omap_iommu *obj = platform_get_drvdata(pdev); |
991 | 979 | ||
980 | iommu_group_put(obj->group); | ||
981 | obj->group = NULL; | ||
982 | |||
983 | iommu_device_sysfs_remove(&obj->iommu); | ||
984 | iommu_device_unregister(&obj->iommu); | ||
985 | |||
992 | omap_iommu_debugfs_remove(obj); | 986 | omap_iommu_debugfs_remove(obj); |
993 | 987 | ||
994 | pm_runtime_disable(obj->dev); | 988 | pm_runtime_disable(obj->dev); |
@@ -1077,11 +1071,11 @@ static int | |||
1077 | omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) | 1071 | omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) |
1078 | { | 1072 | { |
1079 | struct omap_iommu_domain *omap_domain = to_omap_domain(domain); | 1073 | struct omap_iommu_domain *omap_domain = to_omap_domain(domain); |
1080 | struct omap_iommu *oiommu; | ||
1081 | struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; | 1074 | struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; |
1075 | struct omap_iommu *oiommu; | ||
1082 | int ret = 0; | 1076 | int ret = 0; |
1083 | 1077 | ||
1084 | if (!arch_data || !arch_data->name) { | 1078 | if (!arch_data || !arch_data->iommu_dev) { |
1085 | dev_err(dev, "device doesn't have an associated iommu\n"); | 1079 | dev_err(dev, "device doesn't have an associated iommu\n"); |
1086 | return -EINVAL; | 1080 | return -EINVAL; |
1087 | } | 1081 | } |
@@ -1095,15 +1089,16 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) | |||
1095 | goto out; | 1089 | goto out; |
1096 | } | 1090 | } |
1097 | 1091 | ||
1092 | oiommu = arch_data->iommu_dev; | ||
1093 | |||
1098 | /* get a handle to and enable the omap iommu */ | 1094 | /* get a handle to and enable the omap iommu */ |
1099 | oiommu = omap_iommu_attach(arch_data->name, omap_domain->pgtable); | 1095 | ret = omap_iommu_attach(oiommu, omap_domain->pgtable); |
1100 | if (IS_ERR(oiommu)) { | 1096 | if (ret) { |
1101 | ret = PTR_ERR(oiommu); | ||
1102 | dev_err(dev, "can't get omap iommu: %d\n", ret); | 1097 | dev_err(dev, "can't get omap iommu: %d\n", ret); |
1103 | goto out; | 1098 | goto out; |
1104 | } | 1099 | } |
1105 | 1100 | ||
1106 | omap_domain->iommu_dev = arch_data->iommu_dev = oiommu; | 1101 | omap_domain->iommu_dev = oiommu; |
1107 | omap_domain->dev = dev; | 1102 | omap_domain->dev = dev; |
1108 | oiommu->domain = domain; | 1103 | oiommu->domain = domain; |
1109 | 1104 | ||
@@ -1116,7 +1111,6 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, | |||
1116 | struct device *dev) | 1111 | struct device *dev) |
1117 | { | 1112 | { |
1118 | struct omap_iommu *oiommu = dev_to_omap_iommu(dev); | 1113 | struct omap_iommu *oiommu = dev_to_omap_iommu(dev); |
1119 | struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; | ||
1120 | 1114 | ||
1121 | /* only a single device is supported per domain for now */ | 1115 | /* only a single device is supported per domain for now */ |
1122 | if (omap_domain->iommu_dev != oiommu) { | 1116 | if (omap_domain->iommu_dev != oiommu) { |
@@ -1128,7 +1122,7 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, | |||
1128 | 1122 | ||
1129 | omap_iommu_detach(oiommu); | 1123 | omap_iommu_detach(oiommu); |
1130 | 1124 | ||
1131 | omap_domain->iommu_dev = arch_data->iommu_dev = NULL; | 1125 | omap_domain->iommu_dev = NULL; |
1132 | omap_domain->dev = NULL; | 1126 | omap_domain->dev = NULL; |
1133 | oiommu->domain = NULL; | 1127 | oiommu->domain = NULL; |
1134 | } | 1128 | } |
@@ -1232,8 +1226,11 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, | |||
1232 | static int omap_iommu_add_device(struct device *dev) | 1226 | static int omap_iommu_add_device(struct device *dev) |
1233 | { | 1227 | { |
1234 | struct omap_iommu_arch_data *arch_data; | 1228 | struct omap_iommu_arch_data *arch_data; |
1229 | struct omap_iommu *oiommu; | ||
1230 | struct iommu_group *group; | ||
1235 | struct device_node *np; | 1231 | struct device_node *np; |
1236 | struct platform_device *pdev; | 1232 | struct platform_device *pdev; |
1233 | int ret; | ||
1237 | 1234 | ||
1238 | /* | 1235 | /* |
1239 | * Allocate the archdata iommu structure for DT-based devices. | 1236 | * Allocate the archdata iommu structure for DT-based devices. |
@@ -1254,15 +1251,41 @@ static int omap_iommu_add_device(struct device *dev) | |||
1254 | return -EINVAL; | 1251 | return -EINVAL; |
1255 | } | 1252 | } |
1256 | 1253 | ||
1254 | oiommu = platform_get_drvdata(pdev); | ||
1255 | if (!oiommu) { | ||
1256 | of_node_put(np); | ||
1257 | return -EINVAL; | ||
1258 | } | ||
1259 | |||
1257 | arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL); | 1260 | arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL); |
1258 | if (!arch_data) { | 1261 | if (!arch_data) { |
1259 | of_node_put(np); | 1262 | of_node_put(np); |
1260 | return -ENOMEM; | 1263 | return -ENOMEM; |
1261 | } | 1264 | } |
1262 | 1265 | ||
1263 | arch_data->name = kstrdup(dev_name(&pdev->dev), GFP_KERNEL); | 1266 | ret = iommu_device_link(&oiommu->iommu, dev); |
1267 | if (ret) { | ||
1268 | kfree(arch_data); | ||
1269 | of_node_put(np); | ||
1270 | return ret; | ||
1271 | } | ||
1272 | |||
1273 | arch_data->iommu_dev = oiommu; | ||
1264 | dev->archdata.iommu = arch_data; | 1274 | dev->archdata.iommu = arch_data; |
1265 | 1275 | ||
1276 | /* | ||
1277 | * IOMMU group initialization calls into omap_iommu_device_group, which | ||
1278 | * needs a valid dev->archdata.iommu pointer | ||
1279 | */ | ||
1280 | group = iommu_group_get_for_dev(dev); | ||
1281 | if (IS_ERR(group)) { | ||
1282 | iommu_device_unlink(&oiommu->iommu, dev); | ||
1283 | dev->archdata.iommu = NULL; | ||
1284 | kfree(arch_data); | ||
1285 | return PTR_ERR(group); | ||
1286 | } | ||
1287 | iommu_group_put(group); | ||
1288 | |||
1266 | of_node_put(np); | 1289 | of_node_put(np); |
1267 | 1290 | ||
1268 | return 0; | 1291 | return 0; |
@@ -1275,8 +1298,23 @@ static void omap_iommu_remove_device(struct device *dev) | |||
1275 | if (!dev->of_node || !arch_data) | 1298 | if (!dev->of_node || !arch_data) |
1276 | return; | 1299 | return; |
1277 | 1300 | ||
1278 | kfree(arch_data->name); | 1301 | iommu_device_unlink(&arch_data->iommu_dev->iommu, dev); |
1302 | iommu_group_remove_device(dev); | ||
1303 | |||
1304 | dev->archdata.iommu = NULL; | ||
1279 | kfree(arch_data); | 1305 | kfree(arch_data); |
1306 | |||
1307 | } | ||
1308 | |||
1309 | static struct iommu_group *omap_iommu_device_group(struct device *dev) | ||
1310 | { | ||
1311 | struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; | ||
1312 | struct iommu_group *group = NULL; | ||
1313 | |||
1314 | if (arch_data->iommu_dev) | ||
1315 | group = arch_data->iommu_dev->group; | ||
1316 | |||
1317 | return group; | ||
1280 | } | 1318 | } |
1281 | 1319 | ||
1282 | static const struct iommu_ops omap_iommu_ops = { | 1320 | static const struct iommu_ops omap_iommu_ops = { |
@@ -1290,6 +1328,7 @@ static const struct iommu_ops omap_iommu_ops = { | |||
1290 | .iova_to_phys = omap_iommu_iova_to_phys, | 1328 | .iova_to_phys = omap_iommu_iova_to_phys, |
1291 | .add_device = omap_iommu_add_device, | 1329 | .add_device = omap_iommu_add_device, |
1292 | .remove_device = omap_iommu_remove_device, | 1330 | .remove_device = omap_iommu_remove_device, |
1331 | .device_group = omap_iommu_device_group, | ||
1293 | .pgsize_bitmap = OMAP_IOMMU_PGSIZES, | 1332 | .pgsize_bitmap = OMAP_IOMMU_PGSIZES, |
1294 | }; | 1333 | }; |
1295 | 1334 | ||
@@ -1299,6 +1338,7 @@ static int __init omap_iommu_init(void) | |||
1299 | const unsigned long flags = SLAB_HWCACHE_ALIGN; | 1338 | const unsigned long flags = SLAB_HWCACHE_ALIGN; |
1300 | size_t align = 1 << 10; /* L2 pagetable alignement */ | 1339 | size_t align = 1 << 10; /* L2 pagetable alignement */ |
1301 | struct device_node *np; | 1340 | struct device_node *np; |
1341 | int ret; | ||
1302 | 1342 | ||
1303 | np = of_find_matching_node(NULL, omap_iommu_of_match); | 1343 | np = of_find_matching_node(NULL, omap_iommu_of_match); |
1304 | if (!np) | 1344 | if (!np) |
@@ -1312,11 +1352,25 @@ static int __init omap_iommu_init(void) | |||
1312 | return -ENOMEM; | 1352 | return -ENOMEM; |
1313 | iopte_cachep = p; | 1353 | iopte_cachep = p; |
1314 | 1354 | ||
1315 | bus_set_iommu(&platform_bus_type, &omap_iommu_ops); | ||
1316 | |||
1317 | omap_iommu_debugfs_init(); | 1355 | omap_iommu_debugfs_init(); |
1318 | 1356 | ||
1319 | return platform_driver_register(&omap_iommu_driver); | 1357 | ret = platform_driver_register(&omap_iommu_driver); |
1358 | if (ret) { | ||
1359 | pr_err("%s: failed to register driver\n", __func__); | ||
1360 | goto fail_driver; | ||
1361 | } | ||
1362 | |||
1363 | ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops); | ||
1364 | if (ret) | ||
1365 | goto fail_bus; | ||
1366 | |||
1367 | return 0; | ||
1368 | |||
1369 | fail_bus: | ||
1370 | platform_driver_unregister(&omap_iommu_driver); | ||
1371 | fail_driver: | ||
1372 | kmem_cache_destroy(iopte_cachep); | ||
1373 | return ret; | ||
1320 | } | 1374 | } |
1321 | subsys_initcall(omap_iommu_init); | 1375 | subsys_initcall(omap_iommu_init); |
1322 | /* must be ready before omap3isp is probed */ | 1376 | /* must be ready before omap3isp is probed */ |
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index 59628e5017b4..6e70515e6038 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #define _OMAP_IOMMU_H | 14 | #define _OMAP_IOMMU_H |
15 | 15 | ||
16 | #include <linux/bitops.h> | 16 | #include <linux/bitops.h> |
17 | #include <linux/iommu.h> | ||
17 | 18 | ||
18 | #define for_each_iotlb_cr(obj, n, __i, cr) \ | 19 | #define for_each_iotlb_cr(obj, n, __i, cr) \ |
19 | for (__i = 0; \ | 20 | for (__i = 0; \ |
@@ -27,6 +28,23 @@ struct iotlb_entry { | |||
27 | u32 endian, elsz, mixed; | 28 | u32 endian, elsz, mixed; |
28 | }; | 29 | }; |
29 | 30 | ||
31 | /** | ||
32 | * struct omap_iommu_domain - omap iommu domain | ||
33 | * @pgtable: the page table | ||
34 | * @iommu_dev: an omap iommu device attached to this domain. only a single | ||
35 | * iommu device can be attached for now. | ||
36 | * @dev: Device using this domain. | ||
37 | * @lock: domain lock, should be taken when attaching/detaching | ||
38 | * @domain: generic domain handle used by iommu core code | ||
39 | */ | ||
40 | struct omap_iommu_domain { | ||
41 | u32 *pgtable; | ||
42 | struct omap_iommu *iommu_dev; | ||
43 | struct device *dev; | ||
44 | spinlock_t lock; | ||
45 | struct iommu_domain domain; | ||
46 | }; | ||
47 | |||
30 | struct omap_iommu { | 48 | struct omap_iommu { |
31 | const char *name; | 49 | const char *name; |
32 | void __iomem *regbase; | 50 | void __iomem *regbase; |
@@ -50,6 +68,22 @@ struct omap_iommu { | |||
50 | 68 | ||
51 | int has_bus_err_back; | 69 | int has_bus_err_back; |
52 | u32 id; | 70 | u32 id; |
71 | |||
72 | struct iommu_device iommu; | ||
73 | struct iommu_group *group; | ||
74 | }; | ||
75 | |||
76 | /** | ||
77 | * struct omap_iommu_arch_data - omap iommu private data | ||
78 | * @iommu_dev: handle of the iommu device | ||
79 | * | ||
80 | * This is an omap iommu private data object, which binds an iommu user | ||
81 | * to its iommu device. This object should be placed at the iommu user's | ||
82 | * dev_archdata so generic IOMMU API can be used without having to | ||
83 | * utilize omap-specific plumbing anymore. | ||
84 | */ | ||
85 | struct omap_iommu_arch_data { | ||
86 | struct omap_iommu *iommu_dev; | ||
53 | }; | 87 | }; |
54 | 88 | ||
55 | struct cr_regs { | 89 | struct cr_regs { |
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 9afcbf79f0b0..4ba48a26b389 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/delay.h> | 8 | #include <linux/delay.h> |
9 | #include <linux/device.h> | 9 | #include <linux/device.h> |
10 | #include <linux/dma-iommu.h> | 10 | #include <linux/dma-iommu.h> |
11 | #include <linux/dma-mapping.h> | ||
11 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
12 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
13 | #include <linux/io.h> | 14 | #include <linux/io.h> |
@@ -90,6 +91,7 @@ struct rk_iommu { | |||
90 | void __iomem **bases; | 91 | void __iomem **bases; |
91 | int num_mmu; | 92 | int num_mmu; |
92 | int irq; | 93 | int irq; |
94 | struct iommu_device iommu; | ||
93 | struct list_head node; /* entry in rk_iommu_domain.iommus */ | 95 | struct list_head node; /* entry in rk_iommu_domain.iommus */ |
94 | struct iommu_domain *domain; /* domain to which iommu is attached */ | 96 | struct iommu_domain *domain; /* domain to which iommu is attached */ |
95 | }; | 97 | }; |
@@ -1032,6 +1034,7 @@ static int rk_iommu_group_set_iommudata(struct iommu_group *group, | |||
1032 | static int rk_iommu_add_device(struct device *dev) | 1034 | static int rk_iommu_add_device(struct device *dev) |
1033 | { | 1035 | { |
1034 | struct iommu_group *group; | 1036 | struct iommu_group *group; |
1037 | struct rk_iommu *iommu; | ||
1035 | int ret; | 1038 | int ret; |
1036 | 1039 | ||
1037 | if (!rk_iommu_is_dev_iommu_master(dev)) | 1040 | if (!rk_iommu_is_dev_iommu_master(dev)) |
@@ -1054,6 +1057,10 @@ static int rk_iommu_add_device(struct device *dev) | |||
1054 | if (ret) | 1057 | if (ret) |
1055 | goto err_remove_device; | 1058 | goto err_remove_device; |
1056 | 1059 | ||
1060 | iommu = rk_iommu_from_dev(dev); | ||
1061 | if (iommu) | ||
1062 | iommu_device_link(&iommu->iommu, dev); | ||
1063 | |||
1057 | iommu_group_put(group); | 1064 | iommu_group_put(group); |
1058 | 1065 | ||
1059 | return 0; | 1066 | return 0; |
@@ -1067,9 +1074,15 @@ err_put_group: | |||
1067 | 1074 | ||
1068 | static void rk_iommu_remove_device(struct device *dev) | 1075 | static void rk_iommu_remove_device(struct device *dev) |
1069 | { | 1076 | { |
1077 | struct rk_iommu *iommu; | ||
1078 | |||
1070 | if (!rk_iommu_is_dev_iommu_master(dev)) | 1079 | if (!rk_iommu_is_dev_iommu_master(dev)) |
1071 | return; | 1080 | return; |
1072 | 1081 | ||
1082 | iommu = rk_iommu_from_dev(dev); | ||
1083 | if (iommu) | ||
1084 | iommu_device_unlink(&iommu->iommu, dev); | ||
1085 | |||
1073 | iommu_group_remove_device(dev); | 1086 | iommu_group_remove_device(dev); |
1074 | } | 1087 | } |
1075 | 1088 | ||
@@ -1117,7 +1130,7 @@ static int rk_iommu_probe(struct platform_device *pdev) | |||
1117 | struct rk_iommu *iommu; | 1130 | struct rk_iommu *iommu; |
1118 | struct resource *res; | 1131 | struct resource *res; |
1119 | int num_res = pdev->num_resources; | 1132 | int num_res = pdev->num_resources; |
1120 | int i; | 1133 | int err, i; |
1121 | 1134 | ||
1122 | iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); | 1135 | iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); |
1123 | if (!iommu) | 1136 | if (!iommu) |
@@ -1150,11 +1163,25 @@ static int rk_iommu_probe(struct platform_device *pdev) | |||
1150 | return -ENXIO; | 1163 | return -ENXIO; |
1151 | } | 1164 | } |
1152 | 1165 | ||
1153 | return 0; | 1166 | err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev)); |
1167 | if (err) | ||
1168 | return err; | ||
1169 | |||
1170 | iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops); | ||
1171 | err = iommu_device_register(&iommu->iommu); | ||
1172 | |||
1173 | return err; | ||
1154 | } | 1174 | } |
1155 | 1175 | ||
1156 | static int rk_iommu_remove(struct platform_device *pdev) | 1176 | static int rk_iommu_remove(struct platform_device *pdev) |
1157 | { | 1177 | { |
1178 | struct rk_iommu *iommu = platform_get_drvdata(pdev); | ||
1179 | |||
1180 | if (iommu) { | ||
1181 | iommu_device_sysfs_remove(&iommu->iommu); | ||
1182 | iommu_device_unregister(&iommu->iommu); | ||
1183 | } | ||
1184 | |||
1158 | return 0; | 1185 | return 0; |
1159 | } | 1186 | } |
1160 | 1187 | ||
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 9305964250ac..eeb19f560a05 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/of_device.h> | 15 | #include <linux/of_device.h> |
16 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/dma-mapping.h> | ||
18 | 19 | ||
19 | #include <soc/tegra/ahb.h> | 20 | #include <soc/tegra/ahb.h> |
20 | #include <soc/tegra/mc.h> | 21 | #include <soc/tegra/mc.h> |
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c index aa44e11decca..853d598937f6 100644 --- a/drivers/media/platform/mtk-vpu/mtk_vpu.c +++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/of_reserved_mem.h> | 23 | #include <linux/of_reserved_mem.h> |
24 | #include <linux/sched.h> | 24 | #include <linux/sched.h> |
25 | #include <linux/sizes.h> | 25 | #include <linux/sizes.h> |
26 | #include <linux/dma-mapping.h> | ||
26 | 27 | ||
27 | #include "mtk_vpu.h" | 28 | #include "mtk_vpu.h" |
28 | 29 | ||
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c index 084ecf4aa9a4..0d984a28a003 100644 --- a/drivers/media/platform/omap3isp/isp.c +++ b/drivers/media/platform/omap3isp/isp.c | |||
@@ -1943,30 +1943,13 @@ static void isp_detach_iommu(struct isp_device *isp) | |||
1943 | { | 1943 | { |
1944 | arm_iommu_release_mapping(isp->mapping); | 1944 | arm_iommu_release_mapping(isp->mapping); |
1945 | isp->mapping = NULL; | 1945 | isp->mapping = NULL; |
1946 | iommu_group_remove_device(isp->dev); | ||
1947 | } | 1946 | } |
1948 | 1947 | ||
1949 | static int isp_attach_iommu(struct isp_device *isp) | 1948 | static int isp_attach_iommu(struct isp_device *isp) |
1950 | { | 1949 | { |
1951 | struct dma_iommu_mapping *mapping; | 1950 | struct dma_iommu_mapping *mapping; |
1952 | struct iommu_group *group; | ||
1953 | int ret; | 1951 | int ret; |
1954 | 1952 | ||
1955 | /* Create a device group and add the device to it. */ | ||
1956 | group = iommu_group_alloc(); | ||
1957 | if (IS_ERR(group)) { | ||
1958 | dev_err(isp->dev, "failed to allocate IOMMU group\n"); | ||
1959 | return PTR_ERR(group); | ||
1960 | } | ||
1961 | |||
1962 | ret = iommu_group_add_device(group, isp->dev); | ||
1963 | iommu_group_put(group); | ||
1964 | |||
1965 | if (ret < 0) { | ||
1966 | dev_err(isp->dev, "failed to add device to IPMMU group\n"); | ||
1967 | return ret; | ||
1968 | } | ||
1969 | |||
1970 | /* | 1953 | /* |
1971 | * Create the ARM mapping, used by the ARM DMA mapping core to allocate | 1954 | * Create the ARM mapping, used by the ARM DMA mapping core to allocate |
1972 | * VAs. This will allocate a corresponding IOMMU domain. | 1955 | * VAs. This will allocate a corresponding IOMMU domain. |
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h index 7e6f6638433b..2f2ae609c548 100644 --- a/drivers/media/platform/omap3isp/isp.h +++ b/drivers/media/platform/omap3isp/isp.h | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/clk-provider.h> | 23 | #include <linux/clk-provider.h> |
24 | #include <linux/device.h> | 24 | #include <linux/device.h> |
25 | #include <linux/io.h> | 25 | #include <linux/io.h> |
26 | #include <linux/iommu.h> | ||
27 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
28 | #include <linux/wait.h> | 27 | #include <linux/wait.h> |
29 | 28 | ||
diff --git a/drivers/of/device.c b/drivers/of/device.c index 6e2f9113b1b7..9416d052cb89 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c | |||
@@ -82,7 +82,7 @@ int of_device_add(struct platform_device *ofdev) | |||
82 | * can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events | 82 | * can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events |
83 | * to fix up DMA configuration. | 83 | * to fix up DMA configuration. |
84 | */ | 84 | */ |
85 | void of_dma_configure(struct device *dev, struct device_node *np) | 85 | int of_dma_configure(struct device *dev, struct device_node *np) |
86 | { | 86 | { |
87 | u64 dma_addr, paddr, size; | 87 | u64 dma_addr, paddr, size; |
88 | int ret; | 88 | int ret; |
@@ -107,7 +107,7 @@ void of_dma_configure(struct device *dev, struct device_node *np) | |||
107 | ret = of_dma_get_range(np, &dma_addr, &paddr, &size); | 107 | ret = of_dma_get_range(np, &dma_addr, &paddr, &size); |
108 | if (ret < 0) { | 108 | if (ret < 0) { |
109 | dma_addr = offset = 0; | 109 | dma_addr = offset = 0; |
110 | size = dev->coherent_dma_mask + 1; | 110 | size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); |
111 | } else { | 111 | } else { |
112 | offset = PFN_DOWN(paddr - dma_addr); | 112 | offset = PFN_DOWN(paddr - dma_addr); |
113 | 113 | ||
@@ -123,7 +123,7 @@ void of_dma_configure(struct device *dev, struct device_node *np) | |||
123 | 123 | ||
124 | if (!size) { | 124 | if (!size) { |
125 | dev_err(dev, "Adjusted size 0x%llx invalid\n", size); | 125 | dev_err(dev, "Adjusted size 0x%llx invalid\n", size); |
126 | return; | 126 | return -EINVAL; |
127 | } | 127 | } |
128 | dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset); | 128 | dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset); |
129 | } | 129 | } |
@@ -144,13 +144,30 @@ void of_dma_configure(struct device *dev, struct device_node *np) | |||
144 | coherent ? " " : " not "); | 144 | coherent ? " " : " not "); |
145 | 145 | ||
146 | iommu = of_iommu_configure(dev, np); | 146 | iommu = of_iommu_configure(dev, np); |
147 | if (IS_ERR(iommu)) | ||
148 | return PTR_ERR(iommu); | ||
149 | |||
147 | dev_dbg(dev, "device is%sbehind an iommu\n", | 150 | dev_dbg(dev, "device is%sbehind an iommu\n", |
148 | iommu ? " " : " not "); | 151 | iommu ? " " : " not "); |
149 | 152 | ||
150 | arch_setup_dma_ops(dev, dma_addr, size, iommu, coherent); | 153 | arch_setup_dma_ops(dev, dma_addr, size, iommu, coherent); |
154 | |||
155 | return 0; | ||
151 | } | 156 | } |
152 | EXPORT_SYMBOL_GPL(of_dma_configure); | 157 | EXPORT_SYMBOL_GPL(of_dma_configure); |
153 | 158 | ||
159 | /** | ||
160 | * of_dma_deconfigure - Clean up DMA configuration | ||
161 | * @dev: Device for which to clean up DMA configuration | ||
162 | * | ||
163 | * Clean up all configuration performed by of_dma_configure_ops() and free all | ||
164 | * resources that have been allocated. | ||
165 | */ | ||
166 | void of_dma_deconfigure(struct device *dev) | ||
167 | { | ||
168 | arch_teardown_dma_ops(dev); | ||
169 | } | ||
170 | |||
154 | int of_device_register(struct platform_device *pdev) | 171 | int of_device_register(struct platform_device *pdev) |
155 | { | 172 | { |
156 | device_initialize(&pdev->dev); | 173 | device_initialize(&pdev->dev); |
diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 45b413e5a444..71fecc2debfc 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/of_address.h> | 23 | #include <linux/of_address.h> |
24 | #include <linux/of_device.h> | 24 | #include <linux/of_device.h> |
25 | #include <linux/of_iommu.h> | ||
25 | #include <linux/of_irq.h> | 26 | #include <linux/of_irq.h> |
26 | #include <linux/of_platform.h> | 27 | #include <linux/of_platform.h> |
27 | #include <linux/platform_device.h> | 28 | #include <linux/platform_device.h> |
@@ -158,11 +159,6 @@ struct platform_device *of_device_alloc(struct device_node *np, | |||
158 | } | 159 | } |
159 | EXPORT_SYMBOL(of_device_alloc); | 160 | EXPORT_SYMBOL(of_device_alloc); |
160 | 161 | ||
161 | static void of_dma_deconfigure(struct device *dev) | ||
162 | { | ||
163 | arch_teardown_dma_ops(dev); | ||
164 | } | ||
165 | |||
166 | /** | 162 | /** |
167 | * of_platform_device_create_pdata - Alloc, initialize and register an of_device | 163 | * of_platform_device_create_pdata - Alloc, initialize and register an of_device |
168 | * @np: pointer to node to create device for | 164 | * @np: pointer to node to create device for |
@@ -191,11 +187,9 @@ static struct platform_device *of_platform_device_create_pdata( | |||
191 | 187 | ||
192 | dev->dev.bus = &platform_bus_type; | 188 | dev->dev.bus = &platform_bus_type; |
193 | dev->dev.platform_data = platform_data; | 189 | dev->dev.platform_data = platform_data; |
194 | of_dma_configure(&dev->dev, dev->dev.of_node); | ||
195 | of_msi_configure(&dev->dev, dev->dev.of_node); | 190 | of_msi_configure(&dev->dev, dev->dev.of_node); |
196 | 191 | ||
197 | if (of_device_add(dev) != 0) { | 192 | if (of_device_add(dev) != 0) { |
198 | of_dma_deconfigure(&dev->dev); | ||
199 | platform_device_put(dev); | 193 | platform_device_put(dev); |
200 | goto err_clear_flag; | 194 | goto err_clear_flag; |
201 | } | 195 | } |
@@ -253,7 +247,6 @@ static struct amba_device *of_amba_device_create(struct device_node *node, | |||
253 | dev_set_name(&dev->dev, "%s", bus_id); | 247 | dev_set_name(&dev->dev, "%s", bus_id); |
254 | else | 248 | else |
255 | of_device_make_bus_id(&dev->dev); | 249 | of_device_make_bus_id(&dev->dev); |
256 | of_dma_configure(&dev->dev, dev->dev.of_node); | ||
257 | 250 | ||
258 | /* Allow the HW Peripheral ID to be overridden */ | 251 | /* Allow the HW Peripheral ID to be overridden */ |
259 | prop = of_get_property(node, "arm,primecell-periphid", NULL); | 252 | prop = of_get_property(node, "arm,primecell-periphid", NULL); |
@@ -547,7 +540,6 @@ static int of_platform_device_destroy(struct device *dev, void *data) | |||
547 | amba_device_unregister(to_amba_device(dev)); | 540 | amba_device_unregister(to_amba_device(dev)); |
548 | #endif | 541 | #endif |
549 | 542 | ||
550 | of_dma_deconfigure(dev); | ||
551 | of_node_clear_flag(dev->of_node, OF_POPULATED); | 543 | of_node_clear_flag(dev->of_node, OF_POPULATED); |
552 | of_node_clear_flag(dev->of_node, OF_POPULATED_BUS); | 544 | of_node_clear_flag(dev->of_node, OF_POPULATED_BUS); |
553 | return 0; | 545 | return 0; |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 01eb8038fceb..19c8950c6c38 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -1914,33 +1914,6 @@ static void pci_set_msi_domain(struct pci_dev *dev) | |||
1914 | dev_set_msi_domain(&dev->dev, d); | 1914 | dev_set_msi_domain(&dev->dev, d); |
1915 | } | 1915 | } |
1916 | 1916 | ||
1917 | /** | ||
1918 | * pci_dma_configure - Setup DMA configuration | ||
1919 | * @dev: ptr to pci_dev struct of the PCI device | ||
1920 | * | ||
1921 | * Function to update PCI devices's DMA configuration using the same | ||
1922 | * info from the OF node or ACPI node of host bridge's parent (if any). | ||
1923 | */ | ||
1924 | static void pci_dma_configure(struct pci_dev *dev) | ||
1925 | { | ||
1926 | struct device *bridge = pci_get_host_bridge_device(dev); | ||
1927 | |||
1928 | if (IS_ENABLED(CONFIG_OF) && | ||
1929 | bridge->parent && bridge->parent->of_node) { | ||
1930 | of_dma_configure(&dev->dev, bridge->parent->of_node); | ||
1931 | } else if (has_acpi_companion(bridge)) { | ||
1932 | struct acpi_device *adev = to_acpi_device_node(bridge->fwnode); | ||
1933 | enum dev_dma_attr attr = acpi_get_dma_attr(adev); | ||
1934 | |||
1935 | if (attr == DEV_DMA_NOT_SUPPORTED) | ||
1936 | dev_warn(&dev->dev, "DMA not supported.\n"); | ||
1937 | else | ||
1938 | acpi_dma_configure(&dev->dev, attr); | ||
1939 | } | ||
1940 | |||
1941 | pci_put_host_bridge_device(bridge); | ||
1942 | } | ||
1943 | |||
1944 | void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) | 1917 | void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) |
1945 | { | 1918 | { |
1946 | int ret; | 1919 | int ret; |
@@ -1954,7 +1927,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) | |||
1954 | dev->dev.dma_mask = &dev->dma_mask; | 1927 | dev->dev.dma_mask = &dev->dma_mask; |
1955 | dev->dev.dma_parms = &dev->dma_parms; | 1928 | dev->dev.dma_parms = &dev->dma_parms; |
1956 | dev->dev.coherent_dma_mask = 0xffffffffull; | 1929 | dev->dev.coherent_dma_mask = 0xffffffffull; |
1957 | pci_dma_configure(dev); | ||
1958 | 1930 | ||
1959 | pci_set_dma_max_seg_size(dev, 65536); | 1931 | pci_set_dma_max_seg_size(dev, 65536); |
1960 | pci_set_dma_seg_boundary(dev, 0xffffffff); | 1932 | pci_set_dma_seg_boundary(dev, 0xffffffff); |
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h index 22725bdc6f15..5fe9faf6232e 100644 --- a/drivers/soc/fsl/qbman/qman_priv.h +++ b/drivers/soc/fsl/qbman/qman_priv.h | |||
@@ -33,6 +33,7 @@ | |||
33 | #include "dpaa_sys.h" | 33 | #include "dpaa_sys.h" |
34 | 34 | ||
35 | #include <soc/fsl/qman.h> | 35 | #include <soc/fsl/qman.h> |
36 | #include <linux/dma-mapping.h> | ||
36 | #include <linux/iommu.h> | 37 | #include <linux/iommu.h> |
37 | 38 | ||
38 | #if defined(CONFIG_FSL_PAMU) | 39 | #if defined(CONFIG_FSL_PAMU) |
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 2fc678e08d8d..73b82ac0b56b 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h | |||
@@ -577,7 +577,7 @@ struct acpi_pci_root { | |||
577 | 577 | ||
578 | bool acpi_dma_supported(struct acpi_device *adev); | 578 | bool acpi_dma_supported(struct acpi_device *adev); |
579 | enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev); | 579 | enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev); |
580 | void acpi_dma_configure(struct device *dev, enum dev_dma_attr attr); | 580 | int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr); |
581 | void acpi_dma_deconfigure(struct device *dev); | 581 | void acpi_dma_deconfigure(struct device *dev); |
582 | 582 | ||
583 | struct acpi_device *acpi_find_child_device(struct acpi_device *parent, | 583 | struct acpi_device *acpi_find_child_device(struct acpi_device *parent, |
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 3558f4eb1a86..314a0b9219c6 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -566,7 +566,6 @@ | |||
566 | IRQCHIP_OF_MATCH_TABLE() \ | 566 | IRQCHIP_OF_MATCH_TABLE() \ |
567 | ACPI_PROBE_TABLE(irqchip) \ | 567 | ACPI_PROBE_TABLE(irqchip) \ |
568 | ACPI_PROBE_TABLE(clksrc) \ | 568 | ACPI_PROBE_TABLE(clksrc) \ |
569 | ACPI_PROBE_TABLE(iort) \ | ||
570 | EARLYCON_TABLE() | 569 | EARLYCON_TABLE() |
571 | 570 | ||
572 | #define INIT_TEXT \ | 571 | #define INIT_TEXT \ |
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 0f9de30d725f..137e4a3d89c5 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -770,8 +770,11 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) | |||
770 | return DEV_DMA_NOT_SUPPORTED; | 770 | return DEV_DMA_NOT_SUPPORTED; |
771 | } | 771 | } |
772 | 772 | ||
773 | static inline void acpi_dma_configure(struct device *dev, | 773 | static inline int acpi_dma_configure(struct device *dev, |
774 | enum dev_dma_attr attr) { } | 774 | enum dev_dma_attr attr) |
775 | { | ||
776 | return 0; | ||
777 | } | ||
775 | 778 | ||
776 | static inline void acpi_dma_deconfigure(struct device *dev) { } | 779 | static inline void acpi_dma_deconfigure(struct device *dev) { } |
777 | 780 | ||
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index 26e25d85eb3e..3ff9acea8616 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h | |||
@@ -55,7 +55,4 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev) | |||
55 | { return NULL; } | 55 | { return NULL; } |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | #define IORT_ACPI_DECLARE(name, table_id, fn) \ | ||
59 | ACPI_DECLARE_PROBE_ENTRY(iort, name, table_id, 0, NULL, 0, fn) | ||
60 | |||
61 | #endif /* __ACPI_IORT_H__ */ | 58 | #endif /* __ACPI_IORT_H__ */ |
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 5725c94b1f12..4eac2670bfa1 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <asm/errno.h> | 20 | #include <asm/errno.h> |
21 | 21 | ||
22 | #ifdef CONFIG_IOMMU_DMA | 22 | #ifdef CONFIG_IOMMU_DMA |
23 | #include <linux/dma-mapping.h> | ||
23 | #include <linux/iommu.h> | 24 | #include <linux/iommu.h> |
24 | #include <linux/msi.h> | 25 | #include <linux/msi.h> |
25 | 26 | ||
@@ -71,6 +72,7 @@ int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); | |||
71 | 72 | ||
72 | /* The DMA API isn't _quite_ the whole story, though... */ | 73 | /* The DMA API isn't _quite_ the whole story, though... */ |
73 | void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg); | 74 | void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg); |
75 | void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); | ||
74 | 76 | ||
75 | #else | 77 | #else |
76 | 78 | ||
@@ -100,6 +102,10 @@ static inline void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) | |||
100 | { | 102 | { |
101 | } | 103 | } |
102 | 104 | ||
105 | static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) | ||
106 | { | ||
107 | } | ||
108 | |||
103 | #endif /* CONFIG_IOMMU_DMA */ | 109 | #endif /* CONFIG_IOMMU_DMA */ |
104 | #endif /* __KERNEL__ */ | 110 | #endif /* __KERNEL__ */ |
105 | #endif /* __DMA_IOMMU_H */ | 111 | #endif /* __DMA_IOMMU_H */ |
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 0977317c6835..4f3eecedca2d 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h | |||
@@ -728,6 +728,18 @@ dma_mark_declared_memory_occupied(struct device *dev, | |||
728 | } | 728 | } |
729 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ | 729 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
730 | 730 | ||
731 | #ifdef CONFIG_HAS_DMA | ||
732 | int dma_configure(struct device *dev); | ||
733 | void dma_deconfigure(struct device *dev); | ||
734 | #else | ||
735 | static inline int dma_configure(struct device *dev) | ||
736 | { | ||
737 | return 0; | ||
738 | } | ||
739 | |||
740 | static inline void dma_deconfigure(struct device *dev) {} | ||
741 | #endif | ||
742 | |||
731 | /* | 743 | /* |
732 | * Managed DMA API | 744 | * Managed DMA API |
733 | */ | 745 | */ |
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h index 187c10299722..90884072fa73 100644 --- a/include/linux/dma_remapping.h +++ b/include/linux/dma_remapping.h | |||
@@ -39,6 +39,7 @@ extern int iommu_calculate_agaw(struct intel_iommu *iommu); | |||
39 | extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); | 39 | extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); |
40 | extern int dmar_disabled; | 40 | extern int dmar_disabled; |
41 | extern int intel_iommu_enabled; | 41 | extern int intel_iommu_enabled; |
42 | extern int intel_iommu_tboot_noforce; | ||
42 | #else | 43 | #else |
43 | static inline int iommu_calculate_agaw(struct intel_iommu *iommu) | 44 | static inline int iommu_calculate_agaw(struct intel_iommu *iommu) |
44 | { | 45 | { |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index c573a52ae440..485a5b48f038 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
@@ -30,6 +30,8 @@ | |||
30 | #include <linux/mmu_notifier.h> | 30 | #include <linux/mmu_notifier.h> |
31 | #include <linux/list.h> | 31 | #include <linux/list.h> |
32 | #include <linux/iommu.h> | 32 | #include <linux/iommu.h> |
33 | #include <linux/io-64-nonatomic-lo-hi.h> | ||
34 | |||
33 | #include <asm/cacheflush.h> | 35 | #include <asm/cacheflush.h> |
34 | #include <asm/iommu.h> | 36 | #include <asm/iommu.h> |
35 | 37 | ||
@@ -72,24 +74,8 @@ | |||
72 | 74 | ||
73 | #define OFFSET_STRIDE (9) | 75 | #define OFFSET_STRIDE (9) |
74 | 76 | ||
75 | #ifdef CONFIG_64BIT | ||
76 | #define dmar_readq(a) readq(a) | 77 | #define dmar_readq(a) readq(a) |
77 | #define dmar_writeq(a,v) writeq(v,a) | 78 | #define dmar_writeq(a,v) writeq(v,a) |
78 | #else | ||
79 | static inline u64 dmar_readq(void __iomem *addr) | ||
80 | { | ||
81 | u32 lo, hi; | ||
82 | lo = readl(addr); | ||
83 | hi = readl(addr + 4); | ||
84 | return (((u64) hi) << 32) + lo; | ||
85 | } | ||
86 | |||
87 | static inline void dmar_writeq(void __iomem *addr, u64 val) | ||
88 | { | ||
89 | writel((u32)val, addr); | ||
90 | writel((u32)(val >> 32), addr + 4); | ||
91 | } | ||
92 | #endif | ||
93 | 79 | ||
94 | #define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4) | 80 | #define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4) |
95 | #define DMAR_VER_MINOR(v) ((v) & 0x0f) | 81 | #define DMAR_VER_MINOR(v) ((v) & 0x0f) |
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 2e4de0deee53..2cb54adc4a33 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
@@ -19,12 +19,12 @@ | |||
19 | #ifndef __LINUX_IOMMU_H | 19 | #ifndef __LINUX_IOMMU_H |
20 | #define __LINUX_IOMMU_H | 20 | #define __LINUX_IOMMU_H |
21 | 21 | ||
22 | #include <linux/scatterlist.h> | ||
23 | #include <linux/device.h> | ||
24 | #include <linux/types.h> | ||
22 | #include <linux/errno.h> | 25 | #include <linux/errno.h> |
23 | #include <linux/err.h> | 26 | #include <linux/err.h> |
24 | #include <linux/of.h> | 27 | #include <linux/of.h> |
25 | #include <linux/types.h> | ||
26 | #include <linux/scatterlist.h> | ||
27 | #include <trace/events/iommu.h> | ||
28 | 28 | ||
29 | #define IOMMU_READ (1 << 0) | 29 | #define IOMMU_READ (1 << 0) |
30 | #define IOMMU_WRITE (1 << 1) | 30 | #define IOMMU_WRITE (1 << 1) |
@@ -32,10 +32,13 @@ | |||
32 | #define IOMMU_NOEXEC (1 << 3) | 32 | #define IOMMU_NOEXEC (1 << 3) |
33 | #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ | 33 | #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ |
34 | /* | 34 | /* |
35 | * This is to make the IOMMU API setup privileged | 35 | * Where the bus hardware includes a privilege level as part of its access type |
36 | * mapppings accessible by the master only at higher | 36 | * markings, and certain devices are capable of issuing transactions marked as |
37 | * privileged execution level and inaccessible at | 37 | * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other |
38 | * less privileged levels. | 38 | * given permission flags only apply to accesses at the higher privilege level, |
39 | * and that unprivileged transactions should have as little access as possible. | ||
40 | * This would usually imply the same permissions as kernel mappings on the CPU, | ||
41 | * if the IOMMU page table format is equivalent. | ||
39 | */ | 42 | */ |
40 | #define IOMMU_PRIV (1 << 5) | 43 | #define IOMMU_PRIV (1 << 5) |
41 | 44 | ||
@@ -336,46 +339,9 @@ extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, | |||
336 | phys_addr_t offset, u64 size, | 339 | phys_addr_t offset, u64 size, |
337 | int prot); | 340 | int prot); |
338 | extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr); | 341 | extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr); |
339 | /** | ||
340 | * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework | ||
341 | * @domain: the iommu domain where the fault has happened | ||
342 | * @dev: the device where the fault has happened | ||
343 | * @iova: the faulting address | ||
344 | * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) | ||
345 | * | ||
346 | * This function should be called by the low-level IOMMU implementations | ||
347 | * whenever IOMMU faults happen, to allow high-level users, that are | ||
348 | * interested in such events, to know about them. | ||
349 | * | ||
350 | * This event may be useful for several possible use cases: | ||
351 | * - mere logging of the event | ||
352 | * - dynamic TLB/PTE loading | ||
353 | * - if restarting of the faulting device is required | ||
354 | * | ||
355 | * Returns 0 on success and an appropriate error code otherwise (if dynamic | ||
356 | * PTE/TLB loading will one day be supported, implementations will be able | ||
357 | * to tell whether it succeeded or not according to this return value). | ||
358 | * | ||
359 | * Specifically, -ENOSYS is returned if a fault handler isn't installed | ||
360 | * (though fault handlers can also return -ENOSYS, in case they want to | ||
361 | * elicit the default behavior of the IOMMU drivers). | ||
362 | */ | ||
363 | static inline int report_iommu_fault(struct iommu_domain *domain, | ||
364 | struct device *dev, unsigned long iova, int flags) | ||
365 | { | ||
366 | int ret = -ENOSYS; | ||
367 | |||
368 | /* | ||
369 | * if upper layers showed interest and installed a fault handler, | ||
370 | * invoke it. | ||
371 | */ | ||
372 | if (domain->handler) | ||
373 | ret = domain->handler(domain, dev, iova, flags, | ||
374 | domain->handler_token); | ||
375 | 342 | ||
376 | trace_io_page_fault(dev, iova, flags); | 343 | extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, |
377 | return ret; | 344 | unsigned long iova, int flags); |
378 | } | ||
379 | 345 | ||
380 | static inline size_t iommu_map_sg(struct iommu_domain *domain, | 346 | static inline size_t iommu_map_sg(struct iommu_domain *domain, |
381 | unsigned long iova, struct scatterlist *sg, | 347 | unsigned long iova, struct scatterlist *sg, |
diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 169ea0bd8eb4..b4ad8b4f8506 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h | |||
@@ -54,7 +54,8 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) | |||
54 | return of_node_get(cpu_dev->of_node); | 54 | return of_node_get(cpu_dev->of_node); |
55 | } | 55 | } |
56 | 56 | ||
57 | void of_dma_configure(struct device *dev, struct device_node *np); | 57 | int of_dma_configure(struct device *dev, struct device_node *np); |
58 | void of_dma_deconfigure(struct device *dev); | ||
58 | #else /* CONFIG_OF */ | 59 | #else /* CONFIG_OF */ |
59 | 60 | ||
60 | static inline int of_driver_match_device(struct device *dev, | 61 | static inline int of_driver_match_device(struct device *dev, |
@@ -102,7 +103,12 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) | |||
102 | { | 103 | { |
103 | return NULL; | 104 | return NULL; |
104 | } | 105 | } |
105 | static inline void of_dma_configure(struct device *dev, struct device_node *np) | 106 | |
107 | static inline int of_dma_configure(struct device *dev, struct device_node *np) | ||
108 | { | ||
109 | return 0; | ||
110 | } | ||
111 | static inline void of_dma_deconfigure(struct device *dev) | ||
106 | {} | 112 | {} |
107 | #endif /* CONFIG_OF */ | 113 | #endif /* CONFIG_OF */ |
108 | 114 | ||
diff --git a/include/linux/platform_data/iommu-omap.h b/include/linux/platform_data/iommu-omap.h index 0496d171700a..e8b12dbf6170 100644 --- a/include/linux/platform_data/iommu-omap.h +++ b/include/linux/platform_data/iommu-omap.h | |||
@@ -12,28 +12,8 @@ | |||
12 | 12 | ||
13 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
14 | 14 | ||
15 | #define MMU_REG_SIZE 256 | ||
16 | |||
17 | /** | ||
18 | * struct iommu_arch_data - omap iommu private data | ||
19 | * @name: name of the iommu device | ||
20 | * @iommu_dev: handle of the iommu device | ||
21 | * | ||
22 | * This is an omap iommu private data object, which binds an iommu user | ||
23 | * to its iommu device. This object should be placed at the iommu user's | ||
24 | * dev_archdata so generic IOMMU API can be used without having to | ||
25 | * utilize omap-specific plumbing anymore. | ||
26 | */ | ||
27 | struct omap_iommu_arch_data { | ||
28 | const char *name; | ||
29 | struct omap_iommu *iommu_dev; | ||
30 | }; | ||
31 | |||
32 | struct iommu_platform_data { | 15 | struct iommu_platform_data { |
33 | const char *name; | ||
34 | const char *reset_name; | 16 | const char *reset_name; |
35 | int nr_tlb_entries; | ||
36 | |||
37 | int (*assert_reset)(struct platform_device *pdev, const char *name); | 17 | int (*assert_reset)(struct platform_device *pdev, const char *name); |
38 | int (*deassert_reset)(struct platform_device *pdev, const char *name); | 18 | int (*deassert_reset)(struct platform_device *pdev, const char *name); |
39 | }; | 19 | }; |
diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h index 2c7befb10f13..99254ed89212 100644 --- a/include/trace/events/iommu.h +++ b/include/trace/events/iommu.h | |||
@@ -11,7 +11,6 @@ | |||
11 | #define _TRACE_IOMMU_H | 11 | #define _TRACE_IOMMU_H |
12 | 12 | ||
13 | #include <linux/tracepoint.h> | 13 | #include <linux/tracepoint.h> |
14 | #include <linux/pci.h> | ||
15 | 14 | ||
16 | struct device; | 15 | struct device; |
17 | 16 | ||