diff options
-rw-r--r-- | Documentation/devicetree/bindings/iommu/rockchip,iommu.txt | 26 | ||||
-rw-r--r-- | drivers/acpi/pci_root.c | 16 | ||||
-rw-r--r-- | drivers/iommu/Kconfig | 25 | ||||
-rw-r--r-- | drivers/iommu/Makefile | 2 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu.c | 2 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_v2.c | 8 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu.c | 135 | ||||
-rw-r--r-- | drivers/iommu/dmar.c | 532 | ||||
-rw-r--r-- | drivers/iommu/intel-iommu.c | 307 | ||||
-rw-r--r-- | drivers/iommu/intel_irq_remapping.c | 249 | ||||
-rw-r--r-- | drivers/iommu/iommu.c | 20 | ||||
-rw-r--r-- | drivers/iommu/ipmmu-vmsa.c | 2 | ||||
-rw-r--r-- | drivers/iommu/msm_iommu.c | 3 | ||||
-rw-r--r-- | drivers/iommu/msm_iommu_dev.c | 10 | ||||
-rw-r--r-- | drivers/iommu/omap-iommu-debug.c | 242 | ||||
-rw-r--r-- | drivers/iommu/omap-iommu.c | 312 | ||||
-rw-r--r-- | drivers/iommu/omap-iommu.h | 98 | ||||
-rw-r--r-- | drivers/iommu/omap-iommu2.c | 337 | ||||
-rw-r--r-- | drivers/iommu/rockchip-iommu.c | 1038 | ||||
-rw-r--r-- | include/linux/dmar.h | 50 | ||||
-rw-r--r-- | include/linux/iommu.h | 3 |
21 files changed, 2432 insertions, 985 deletions
diff --git a/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt b/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt new file mode 100644 index 000000000000..9a55ac3735e5 --- /dev/null +++ b/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt | |||
@@ -0,0 +1,26 @@ | |||
1 | Rockchip IOMMU | ||
2 | ============== | ||
3 | |||
4 | A Rockchip DRM iommu translates io virtual addresses to physical addresses for | ||
5 | its master device. Each slave device is bound to a single master device, and | ||
6 | shares its clocks, power domain and irq. | ||
7 | |||
8 | Required properties: | ||
9 | - compatible : Should be "rockchip,iommu" | ||
10 | - reg : Address space for the configuration registers | ||
11 | - interrupts : Interrupt specifier for the IOMMU instance | ||
12 | - interrupt-names : Interrupt name for the IOMMU instance | ||
13 | - #iommu-cells : Should be <0>. This indicates the iommu is a | ||
14 | "single-master" device, and needs no additional information | ||
15 | to associate with its master device. See: | ||
16 | Documentation/devicetree/bindings/iommu/iommu.txt | ||
17 | |||
18 | Example: | ||
19 | |||
20 | vopl_mmu: iommu@ff940300 { | ||
21 | compatible = "rockchip,iommu"; | ||
22 | reg = <0xff940300 0x100>; | ||
23 | interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>; | ||
24 | interrupt-names = "vopl_mmu"; | ||
25 | #iommu-cells = <0>; | ||
26 | }; | ||
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index cd4de7e038ea..c6bcb8c719d8 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/pci.h> | 33 | #include <linux/pci.h> |
34 | #include <linux/pci-acpi.h> | 34 | #include <linux/pci-acpi.h> |
35 | #include <linux/pci-aspm.h> | 35 | #include <linux/pci-aspm.h> |
36 | #include <linux/dmar.h> | ||
36 | #include <linux/acpi.h> | 37 | #include <linux/acpi.h> |
37 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
38 | #include <linux/dmi.h> | 39 | #include <linux/dmi.h> |
@@ -525,6 +526,7 @@ static int acpi_pci_root_add(struct acpi_device *device, | |||
525 | struct acpi_pci_root *root; | 526 | struct acpi_pci_root *root; |
526 | acpi_handle handle = device->handle; | 527 | acpi_handle handle = device->handle; |
527 | int no_aspm = 0, clear_aspm = 0; | 528 | int no_aspm = 0, clear_aspm = 0; |
529 | bool hotadd = system_state != SYSTEM_BOOTING; | ||
528 | 530 | ||
529 | root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); | 531 | root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); |
530 | if (!root) | 532 | if (!root) |
@@ -571,6 +573,11 @@ static int acpi_pci_root_add(struct acpi_device *device, | |||
571 | strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); | 573 | strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); |
572 | device->driver_data = root; | 574 | device->driver_data = root; |
573 | 575 | ||
576 | if (hotadd && dmar_device_add(handle)) { | ||
577 | result = -ENXIO; | ||
578 | goto end; | ||
579 | } | ||
580 | |||
574 | pr_info(PREFIX "%s [%s] (domain %04x %pR)\n", | 581 | pr_info(PREFIX "%s [%s] (domain %04x %pR)\n", |
575 | acpi_device_name(device), acpi_device_bid(device), | 582 | acpi_device_name(device), acpi_device_bid(device), |
576 | root->segment, &root->secondary); | 583 | root->segment, &root->secondary); |
@@ -597,7 +604,7 @@ static int acpi_pci_root_add(struct acpi_device *device, | |||
597 | root->segment, (unsigned int)root->secondary.start); | 604 | root->segment, (unsigned int)root->secondary.start); |
598 | device->driver_data = NULL; | 605 | device->driver_data = NULL; |
599 | result = -ENODEV; | 606 | result = -ENODEV; |
600 | goto end; | 607 | goto remove_dmar; |
601 | } | 608 | } |
602 | 609 | ||
603 | if (clear_aspm) { | 610 | if (clear_aspm) { |
@@ -611,7 +618,7 @@ static int acpi_pci_root_add(struct acpi_device *device, | |||
611 | if (device->wakeup.flags.run_wake) | 618 | if (device->wakeup.flags.run_wake) |
612 | device_set_run_wake(root->bus->bridge, true); | 619 | device_set_run_wake(root->bus->bridge, true); |
613 | 620 | ||
614 | if (system_state != SYSTEM_BOOTING) { | 621 | if (hotadd) { |
615 | pcibios_resource_survey_bus(root->bus); | 622 | pcibios_resource_survey_bus(root->bus); |
616 | pci_assign_unassigned_root_bus_resources(root->bus); | 623 | pci_assign_unassigned_root_bus_resources(root->bus); |
617 | } | 624 | } |
@@ -621,6 +628,9 @@ static int acpi_pci_root_add(struct acpi_device *device, | |||
621 | pci_unlock_rescan_remove(); | 628 | pci_unlock_rescan_remove(); |
622 | return 1; | 629 | return 1; |
623 | 630 | ||
631 | remove_dmar: | ||
632 | if (hotadd) | ||
633 | dmar_device_remove(handle); | ||
624 | end: | 634 | end: |
625 | kfree(root); | 635 | kfree(root); |
626 | return result; | 636 | return result; |
@@ -639,6 +649,8 @@ static void acpi_pci_root_remove(struct acpi_device *device) | |||
639 | 649 | ||
640 | pci_remove_root_bus(root->bus); | 650 | pci_remove_root_bus(root->bus); |
641 | 651 | ||
652 | dmar_device_remove(device->handle); | ||
653 | |||
642 | pci_unlock_rescan_remove(); | 654 | pci_unlock_rescan_remove(); |
643 | 655 | ||
644 | kfree(root); | 656 | kfree(root); |
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 6dbfbc209491..30f0e61341c5 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -144,13 +144,26 @@ config OMAP_IOMMU | |||
144 | select IOMMU_API | 144 | select IOMMU_API |
145 | 145 | ||
146 | config OMAP_IOMMU_DEBUG | 146 | config OMAP_IOMMU_DEBUG |
147 | tristate "Export OMAP IOMMU internals in DebugFS" | 147 | bool "Export OMAP IOMMU internals in DebugFS" |
148 | depends on OMAP_IOMMU && DEBUG_FS | 148 | depends on OMAP_IOMMU && DEBUG_FS |
149 | help | 149 | ---help--- |
150 | Select this to see extensive information about | 150 | Select this to see extensive information about |
151 | the internal state of OMAP IOMMU in debugfs. | 151 | the internal state of OMAP IOMMU in debugfs. |
152 | |||
153 | Say N unless you know you need this. | ||
152 | 154 | ||
153 | Say N unless you know you need this. | 155 | config ROCKCHIP_IOMMU |
156 | bool "Rockchip IOMMU Support" | ||
157 | depends on ARM | ||
158 | depends on ARCH_ROCKCHIP || COMPILE_TEST | ||
159 | select IOMMU_API | ||
160 | select ARM_DMA_USE_IOMMU | ||
161 | help | ||
162 | Support for IOMMUs found on Rockchip rk32xx SOCs. | ||
163 | These IOMMUs allow virtualization of the address space used by most | ||
164 | cores within the multimedia subsystem. | ||
165 | Say Y here if you are using a Rockchip SoC that includes an IOMMU | ||
166 | device. | ||
154 | 167 | ||
155 | config TEGRA_IOMMU_GART | 168 | config TEGRA_IOMMU_GART |
156 | bool "Tegra GART IOMMU Support" | 169 | bool "Tegra GART IOMMU Support" |
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 16edef74b8ee..7b976f294a69 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile | |||
@@ -11,8 +11,8 @@ obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o | |||
11 | obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o | 11 | obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o |
12 | obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o | 12 | obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o |
13 | obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o | 13 | obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o |
14 | obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o | ||
15 | obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o | 14 | obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o |
15 | obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o | ||
16 | obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o | 16 | obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o |
17 | obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o | 17 | obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o |
18 | obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o | 18 | obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 2d84c9edf3b8..b205f76d7129 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -3411,6 +3411,8 @@ static bool amd_iommu_capable(enum iommu_cap cap) | |||
3411 | return true; | 3411 | return true; |
3412 | case IOMMU_CAP_INTR_REMAP: | 3412 | case IOMMU_CAP_INTR_REMAP: |
3413 | return (irq_remapping_enabled == 1); | 3413 | return (irq_remapping_enabled == 1); |
3414 | case IOMMU_CAP_NOEXEC: | ||
3415 | return false; | ||
3414 | } | 3416 | } |
3415 | 3417 | ||
3416 | return false; | 3418 | return false; |
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 90d734bbf467..a2d87a60c27f 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c | |||
@@ -279,10 +279,8 @@ static void free_pasid_state(struct pasid_state *pasid_state) | |||
279 | 279 | ||
280 | static void put_pasid_state(struct pasid_state *pasid_state) | 280 | static void put_pasid_state(struct pasid_state *pasid_state) |
281 | { | 281 | { |
282 | if (atomic_dec_and_test(&pasid_state->count)) { | 282 | if (atomic_dec_and_test(&pasid_state->count)) |
283 | put_device_state(pasid_state->device_state); | ||
284 | wake_up(&pasid_state->wq); | 283 | wake_up(&pasid_state->wq); |
285 | } | ||
286 | } | 284 | } |
287 | 285 | ||
288 | static void put_pasid_state_wait(struct pasid_state *pasid_state) | 286 | static void put_pasid_state_wait(struct pasid_state *pasid_state) |
@@ -291,9 +289,7 @@ static void put_pasid_state_wait(struct pasid_state *pasid_state) | |||
291 | 289 | ||
292 | prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE); | 290 | prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE); |
293 | 291 | ||
294 | if (atomic_dec_and_test(&pasid_state->count)) | 292 | if (!atomic_dec_and_test(&pasid_state->count)) |
295 | put_device_state(pasid_state->device_state); | ||
296 | else | ||
297 | schedule(); | 293 | schedule(); |
298 | 294 | ||
299 | finish_wait(&pasid_state->wq, &wait); | 295 | finish_wait(&pasid_state->wq, &wait); |
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index e393ae01b5d2..b8aac1389a96 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
@@ -404,9 +404,16 @@ struct arm_smmu_cfg { | |||
404 | #define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx) | 404 | #define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx) |
405 | #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) | 405 | #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) |
406 | 406 | ||
407 | enum arm_smmu_domain_stage { | ||
408 | ARM_SMMU_DOMAIN_S1 = 0, | ||
409 | ARM_SMMU_DOMAIN_S2, | ||
410 | ARM_SMMU_DOMAIN_NESTED, | ||
411 | }; | ||
412 | |||
407 | struct arm_smmu_domain { | 413 | struct arm_smmu_domain { |
408 | struct arm_smmu_device *smmu; | 414 | struct arm_smmu_device *smmu; |
409 | struct arm_smmu_cfg cfg; | 415 | struct arm_smmu_cfg cfg; |
416 | enum arm_smmu_domain_stage stage; | ||
410 | spinlock_t lock; | 417 | spinlock_t lock; |
411 | }; | 418 | }; |
412 | 419 | ||
@@ -906,19 +913,46 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
906 | if (smmu_domain->smmu) | 913 | if (smmu_domain->smmu) |
907 | goto out_unlock; | 914 | goto out_unlock; |
908 | 915 | ||
909 | if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) { | 916 | /* |
917 | * Mapping the requested stage onto what we support is surprisingly | ||
918 | * complicated, mainly because the spec allows S1+S2 SMMUs without | ||
919 | * support for nested translation. That means we end up with the | ||
920 | * following table: | ||
921 | * | ||
922 | * Requested Supported Actual | ||
923 | * S1 N S1 | ||
924 | * S1 S1+S2 S1 | ||
925 | * S1 S2 S2 | ||
926 | * S1 S1 S1 | ||
927 | * N N N | ||
928 | * N S1+S2 S2 | ||
929 | * N S2 S2 | ||
930 | * N S1 S1 | ||
931 | * | ||
932 | * Note that you can't actually request stage-2 mappings. | ||
933 | */ | ||
934 | if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) | ||
935 | smmu_domain->stage = ARM_SMMU_DOMAIN_S2; | ||
936 | if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) | ||
937 | smmu_domain->stage = ARM_SMMU_DOMAIN_S1; | ||
938 | |||
939 | switch (smmu_domain->stage) { | ||
940 | case ARM_SMMU_DOMAIN_S1: | ||
941 | cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; | ||
942 | start = smmu->num_s2_context_banks; | ||
943 | break; | ||
944 | case ARM_SMMU_DOMAIN_NESTED: | ||
910 | /* | 945 | /* |
911 | * We will likely want to change this if/when KVM gets | 946 | * We will likely want to change this if/when KVM gets |
912 | * involved. | 947 | * involved. |
913 | */ | 948 | */ |
914 | cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; | 949 | case ARM_SMMU_DOMAIN_S2: |
915 | start = smmu->num_s2_context_banks; | ||
916 | } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) { | ||
917 | cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; | ||
918 | start = smmu->num_s2_context_banks; | ||
919 | } else { | ||
920 | cfg->cbar = CBAR_TYPE_S2_TRANS; | 950 | cfg->cbar = CBAR_TYPE_S2_TRANS; |
921 | start = 0; | 951 | start = 0; |
952 | break; | ||
953 | default: | ||
954 | ret = -EINVAL; | ||
955 | goto out_unlock; | ||
922 | } | 956 | } |
923 | 957 | ||
924 | ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, | 958 | ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, |
@@ -1281,7 +1315,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, | |||
1281 | unsigned long pfn, int prot, int stage) | 1315 | unsigned long pfn, int prot, int stage) |
1282 | { | 1316 | { |
1283 | pte_t *pte, *start; | 1317 | pte_t *pte, *start; |
1284 | pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN; | 1318 | pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF; |
1285 | 1319 | ||
1286 | if (pmd_none(*pmd)) { | 1320 | if (pmd_none(*pmd)) { |
1287 | /* Allocate a new set of tables */ | 1321 | /* Allocate a new set of tables */ |
@@ -1315,10 +1349,11 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, | |||
1315 | pteval |= ARM_SMMU_PTE_MEMATTR_NC; | 1349 | pteval |= ARM_SMMU_PTE_MEMATTR_NC; |
1316 | } | 1350 | } |
1317 | 1351 | ||
1352 | if (prot & IOMMU_NOEXEC) | ||
1353 | pteval |= ARM_SMMU_PTE_XN; | ||
1354 | |||
1318 | /* If no access, create a faulting entry to avoid TLB fills */ | 1355 | /* If no access, create a faulting entry to avoid TLB fills */ |
1319 | if (prot & IOMMU_EXEC) | 1356 | if (!(prot & (IOMMU_READ | IOMMU_WRITE))) |
1320 | pteval &= ~ARM_SMMU_PTE_XN; | ||
1321 | else if (!(prot & (IOMMU_READ | IOMMU_WRITE))) | ||
1322 | pteval &= ~ARM_SMMU_PTE_PAGE; | 1357 | pteval &= ~ARM_SMMU_PTE_PAGE; |
1323 | 1358 | ||
1324 | pteval |= ARM_SMMU_PTE_SH_IS; | 1359 | pteval |= ARM_SMMU_PTE_SH_IS; |
@@ -1568,6 +1603,8 @@ static bool arm_smmu_capable(enum iommu_cap cap) | |||
1568 | return true; | 1603 | return true; |
1569 | case IOMMU_CAP_INTR_REMAP: | 1604 | case IOMMU_CAP_INTR_REMAP: |
1570 | return true; /* MSIs are just memory writes */ | 1605 | return true; /* MSIs are just memory writes */ |
1606 | case IOMMU_CAP_NOEXEC: | ||
1607 | return true; | ||
1571 | default: | 1608 | default: |
1572 | return false; | 1609 | return false; |
1573 | } | 1610 | } |
@@ -1644,21 +1681,57 @@ static void arm_smmu_remove_device(struct device *dev) | |||
1644 | iommu_group_remove_device(dev); | 1681 | iommu_group_remove_device(dev); |
1645 | } | 1682 | } |
1646 | 1683 | ||
1684 | static int arm_smmu_domain_get_attr(struct iommu_domain *domain, | ||
1685 | enum iommu_attr attr, void *data) | ||
1686 | { | ||
1687 | struct arm_smmu_domain *smmu_domain = domain->priv; | ||
1688 | |||
1689 | switch (attr) { | ||
1690 | case DOMAIN_ATTR_NESTING: | ||
1691 | *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); | ||
1692 | return 0; | ||
1693 | default: | ||
1694 | return -ENODEV; | ||
1695 | } | ||
1696 | } | ||
1697 | |||
1698 | static int arm_smmu_domain_set_attr(struct iommu_domain *domain, | ||
1699 | enum iommu_attr attr, void *data) | ||
1700 | { | ||
1701 | struct arm_smmu_domain *smmu_domain = domain->priv; | ||
1702 | |||
1703 | switch (attr) { | ||
1704 | case DOMAIN_ATTR_NESTING: | ||
1705 | if (smmu_domain->smmu) | ||
1706 | return -EPERM; | ||
1707 | if (*(int *)data) | ||
1708 | smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; | ||
1709 | else | ||
1710 | smmu_domain->stage = ARM_SMMU_DOMAIN_S1; | ||
1711 | |||
1712 | return 0; | ||
1713 | default: | ||
1714 | return -ENODEV; | ||
1715 | } | ||
1716 | } | ||
1717 | |||
1647 | static const struct iommu_ops arm_smmu_ops = { | 1718 | static const struct iommu_ops arm_smmu_ops = { |
1648 | .capable = arm_smmu_capable, | 1719 | .capable = arm_smmu_capable, |
1649 | .domain_init = arm_smmu_domain_init, | 1720 | .domain_init = arm_smmu_domain_init, |
1650 | .domain_destroy = arm_smmu_domain_destroy, | 1721 | .domain_destroy = arm_smmu_domain_destroy, |
1651 | .attach_dev = arm_smmu_attach_dev, | 1722 | .attach_dev = arm_smmu_attach_dev, |
1652 | .detach_dev = arm_smmu_detach_dev, | 1723 | .detach_dev = arm_smmu_detach_dev, |
1653 | .map = arm_smmu_map, | 1724 | .map = arm_smmu_map, |
1654 | .unmap = arm_smmu_unmap, | 1725 | .unmap = arm_smmu_unmap, |
1655 | .map_sg = default_iommu_map_sg, | 1726 | .map_sg = default_iommu_map_sg, |
1656 | .iova_to_phys = arm_smmu_iova_to_phys, | 1727 | .iova_to_phys = arm_smmu_iova_to_phys, |
1657 | .add_device = arm_smmu_add_device, | 1728 | .add_device = arm_smmu_add_device, |
1658 | .remove_device = arm_smmu_remove_device, | 1729 | .remove_device = arm_smmu_remove_device, |
1659 | .pgsize_bitmap = (SECTION_SIZE | | 1730 | .domain_get_attr = arm_smmu_domain_get_attr, |
1660 | ARM_SMMU_PTE_CONT_SIZE | | 1731 | .domain_set_attr = arm_smmu_domain_set_attr, |
1661 | PAGE_SIZE), | 1732 | .pgsize_bitmap = (SECTION_SIZE | |
1733 | ARM_SMMU_PTE_CONT_SIZE | | ||
1734 | PAGE_SIZE), | ||
1662 | }; | 1735 | }; |
1663 | 1736 | ||
1664 | static void arm_smmu_device_reset(struct arm_smmu_device *smmu) | 1737 | static void arm_smmu_device_reset(struct arm_smmu_device *smmu) |
@@ -2073,8 +2146,20 @@ static struct platform_driver arm_smmu_driver = { | |||
2073 | 2146 | ||
2074 | static int __init arm_smmu_init(void) | 2147 | static int __init arm_smmu_init(void) |
2075 | { | 2148 | { |
2149 | struct device_node *np; | ||
2076 | int ret; | 2150 | int ret; |
2077 | 2151 | ||
2152 | /* | ||
2153 | * Play nice with systems that don't have an ARM SMMU by checking that | ||
2154 | * an ARM SMMU exists in the system before proceeding with the driver | ||
2155 | * and IOMMU bus operation registration. | ||
2156 | */ | ||
2157 | np = of_find_matching_node(NULL, arm_smmu_of_match); | ||
2158 | if (!np) | ||
2159 | return 0; | ||
2160 | |||
2161 | of_node_put(np); | ||
2162 | |||
2078 | ret = platform_driver_register(&arm_smmu_driver); | 2163 | ret = platform_driver_register(&arm_smmu_driver); |
2079 | if (ret) | 2164 | if (ret) |
2080 | return ret; | 2165 | return ret; |
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index c5c61cabd6e3..9847613085e1 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c | |||
@@ -44,6 +44,14 @@ | |||
44 | 44 | ||
45 | #include "irq_remapping.h" | 45 | #include "irq_remapping.h" |
46 | 46 | ||
47 | typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *); | ||
48 | struct dmar_res_callback { | ||
49 | dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED]; | ||
50 | void *arg[ACPI_DMAR_TYPE_RESERVED]; | ||
51 | bool ignore_unhandled; | ||
52 | bool print_entry; | ||
53 | }; | ||
54 | |||
47 | /* | 55 | /* |
48 | * Assumptions: | 56 | * Assumptions: |
49 | * 1) The hotplug framework guarentees that DMAR unit will be hot-added | 57 | * 1) The hotplug framework guarentees that DMAR unit will be hot-added |
@@ -62,11 +70,12 @@ LIST_HEAD(dmar_drhd_units); | |||
62 | struct acpi_table_header * __initdata dmar_tbl; | 70 | struct acpi_table_header * __initdata dmar_tbl; |
63 | static acpi_size dmar_tbl_size; | 71 | static acpi_size dmar_tbl_size; |
64 | static int dmar_dev_scope_status = 1; | 72 | static int dmar_dev_scope_status = 1; |
73 | static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)]; | ||
65 | 74 | ||
66 | static int alloc_iommu(struct dmar_drhd_unit *drhd); | 75 | static int alloc_iommu(struct dmar_drhd_unit *drhd); |
67 | static void free_iommu(struct intel_iommu *iommu); | 76 | static void free_iommu(struct intel_iommu *iommu); |
68 | 77 | ||
69 | static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) | 78 | static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) |
70 | { | 79 | { |
71 | /* | 80 | /* |
72 | * add INCLUDE_ALL at the tail, so scan the list will find it at | 81 | * add INCLUDE_ALL at the tail, so scan the list will find it at |
@@ -344,24 +353,45 @@ static struct notifier_block dmar_pci_bus_nb = { | |||
344 | .priority = INT_MIN, | 353 | .priority = INT_MIN, |
345 | }; | 354 | }; |
346 | 355 | ||
356 | static struct dmar_drhd_unit * | ||
357 | dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd) | ||
358 | { | ||
359 | struct dmar_drhd_unit *dmaru; | ||
360 | |||
361 | list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list) | ||
362 | if (dmaru->segment == drhd->segment && | ||
363 | dmaru->reg_base_addr == drhd->address) | ||
364 | return dmaru; | ||
365 | |||
366 | return NULL; | ||
367 | } | ||
368 | |||
347 | /** | 369 | /** |
348 | * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition | 370 | * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition |
349 | * structure which uniquely represent one DMA remapping hardware unit | 371 | * structure which uniquely represent one DMA remapping hardware unit |
350 | * present in the platform | 372 | * present in the platform |
351 | */ | 373 | */ |
352 | static int __init | 374 | static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg) |
353 | dmar_parse_one_drhd(struct acpi_dmar_header *header) | ||
354 | { | 375 | { |
355 | struct acpi_dmar_hardware_unit *drhd; | 376 | struct acpi_dmar_hardware_unit *drhd; |
356 | struct dmar_drhd_unit *dmaru; | 377 | struct dmar_drhd_unit *dmaru; |
357 | int ret = 0; | 378 | int ret = 0; |
358 | 379 | ||
359 | drhd = (struct acpi_dmar_hardware_unit *)header; | 380 | drhd = (struct acpi_dmar_hardware_unit *)header; |
360 | dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); | 381 | dmaru = dmar_find_dmaru(drhd); |
382 | if (dmaru) | ||
383 | goto out; | ||
384 | |||
385 | dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL); | ||
361 | if (!dmaru) | 386 | if (!dmaru) |
362 | return -ENOMEM; | 387 | return -ENOMEM; |
363 | 388 | ||
364 | dmaru->hdr = header; | 389 | /* |
390 | * If header is allocated from slab by ACPI _DSM method, we need to | ||
391 | * copy the content because the memory buffer will be freed on return. | ||
392 | */ | ||
393 | dmaru->hdr = (void *)(dmaru + 1); | ||
394 | memcpy(dmaru->hdr, header, header->length); | ||
365 | dmaru->reg_base_addr = drhd->address; | 395 | dmaru->reg_base_addr = drhd->address; |
366 | dmaru->segment = drhd->segment; | 396 | dmaru->segment = drhd->segment; |
367 | dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ | 397 | dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ |
@@ -381,6 +411,11 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) | |||
381 | return ret; | 411 | return ret; |
382 | } | 412 | } |
383 | dmar_register_drhd_unit(dmaru); | 413 | dmar_register_drhd_unit(dmaru); |
414 | |||
415 | out: | ||
416 | if (arg) | ||
417 | (*(int *)arg)++; | ||
418 | |||
384 | return 0; | 419 | return 0; |
385 | } | 420 | } |
386 | 421 | ||
@@ -393,7 +428,8 @@ static void dmar_free_drhd(struct dmar_drhd_unit *dmaru) | |||
393 | kfree(dmaru); | 428 | kfree(dmaru); |
394 | } | 429 | } |
395 | 430 | ||
396 | static int __init dmar_parse_one_andd(struct acpi_dmar_header *header) | 431 | static int __init dmar_parse_one_andd(struct acpi_dmar_header *header, |
432 | void *arg) | ||
397 | { | 433 | { |
398 | struct acpi_dmar_andd *andd = (void *)header; | 434 | struct acpi_dmar_andd *andd = (void *)header; |
399 | 435 | ||
@@ -414,8 +450,7 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header) | |||
414 | } | 450 | } |
415 | 451 | ||
416 | #ifdef CONFIG_ACPI_NUMA | 452 | #ifdef CONFIG_ACPI_NUMA |
417 | static int __init | 453 | static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg) |
418 | dmar_parse_one_rhsa(struct acpi_dmar_header *header) | ||
419 | { | 454 | { |
420 | struct acpi_dmar_rhsa *rhsa; | 455 | struct acpi_dmar_rhsa *rhsa; |
421 | struct dmar_drhd_unit *drhd; | 456 | struct dmar_drhd_unit *drhd; |
@@ -442,6 +477,8 @@ dmar_parse_one_rhsa(struct acpi_dmar_header *header) | |||
442 | 477 | ||
443 | return 0; | 478 | return 0; |
444 | } | 479 | } |
480 | #else | ||
481 | #define dmar_parse_one_rhsa dmar_res_noop | ||
445 | #endif | 482 | #endif |
446 | 483 | ||
447 | static void __init | 484 | static void __init |
@@ -503,6 +540,52 @@ static int __init dmar_table_detect(void) | |||
503 | return (ACPI_SUCCESS(status) ? 1 : 0); | 540 | return (ACPI_SUCCESS(status) ? 1 : 0); |
504 | } | 541 | } |
505 | 542 | ||
543 | static int dmar_walk_remapping_entries(struct acpi_dmar_header *start, | ||
544 | size_t len, struct dmar_res_callback *cb) | ||
545 | { | ||
546 | int ret = 0; | ||
547 | struct acpi_dmar_header *iter, *next; | ||
548 | struct acpi_dmar_header *end = ((void *)start) + len; | ||
549 | |||
550 | for (iter = start; iter < end && ret == 0; iter = next) { | ||
551 | next = (void *)iter + iter->length; | ||
552 | if (iter->length == 0) { | ||
553 | /* Avoid looping forever on bad ACPI tables */ | ||
554 | pr_debug(FW_BUG "Invalid 0-length structure\n"); | ||
555 | break; | ||
556 | } else if (next > end) { | ||
557 | /* Avoid passing table end */ | ||
558 | pr_warn(FW_BUG "record passes table end\n"); | ||
559 | ret = -EINVAL; | ||
560 | break; | ||
561 | } | ||
562 | |||
563 | if (cb->print_entry) | ||
564 | dmar_table_print_dmar_entry(iter); | ||
565 | |||
566 | if (iter->type >= ACPI_DMAR_TYPE_RESERVED) { | ||
567 | /* continue for forward compatibility */ | ||
568 | pr_debug("Unknown DMAR structure type %d\n", | ||
569 | iter->type); | ||
570 | } else if (cb->cb[iter->type]) { | ||
571 | ret = cb->cb[iter->type](iter, cb->arg[iter->type]); | ||
572 | } else if (!cb->ignore_unhandled) { | ||
573 | pr_warn("No handler for DMAR structure type %d\n", | ||
574 | iter->type); | ||
575 | ret = -EINVAL; | ||
576 | } | ||
577 | } | ||
578 | |||
579 | return ret; | ||
580 | } | ||
581 | |||
582 | static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar, | ||
583 | struct dmar_res_callback *cb) | ||
584 | { | ||
585 | return dmar_walk_remapping_entries((void *)(dmar + 1), | ||
586 | dmar->header.length - sizeof(*dmar), cb); | ||
587 | } | ||
588 | |||
506 | /** | 589 | /** |
507 | * parse_dmar_table - parses the DMA reporting table | 590 | * parse_dmar_table - parses the DMA reporting table |
508 | */ | 591 | */ |
@@ -510,9 +593,18 @@ static int __init | |||
510 | parse_dmar_table(void) | 593 | parse_dmar_table(void) |
511 | { | 594 | { |
512 | struct acpi_table_dmar *dmar; | 595 | struct acpi_table_dmar *dmar; |
513 | struct acpi_dmar_header *entry_header; | ||
514 | int ret = 0; | 596 | int ret = 0; |
515 | int drhd_count = 0; | 597 | int drhd_count = 0; |
598 | struct dmar_res_callback cb = { | ||
599 | .print_entry = true, | ||
600 | .ignore_unhandled = true, | ||
601 | .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count, | ||
602 | .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd, | ||
603 | .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr, | ||
604 | .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr, | ||
605 | .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa, | ||
606 | .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd, | ||
607 | }; | ||
516 | 608 | ||
517 | /* | 609 | /* |
518 | * Do it again, earlier dmar_tbl mapping could be mapped with | 610 | * Do it again, earlier dmar_tbl mapping could be mapped with |
@@ -536,51 +628,10 @@ parse_dmar_table(void) | |||
536 | } | 628 | } |
537 | 629 | ||
538 | pr_info("Host address width %d\n", dmar->width + 1); | 630 | pr_info("Host address width %d\n", dmar->width + 1); |
539 | 631 | ret = dmar_walk_dmar_table(dmar, &cb); | |
540 | entry_header = (struct acpi_dmar_header *)(dmar + 1); | 632 | if (ret == 0 && drhd_count == 0) |
541 | while (((unsigned long)entry_header) < | ||
542 | (((unsigned long)dmar) + dmar_tbl->length)) { | ||
543 | /* Avoid looping forever on bad ACPI tables */ | ||
544 | if (entry_header->length == 0) { | ||
545 | pr_warn("Invalid 0-length structure\n"); | ||
546 | ret = -EINVAL; | ||
547 | break; | ||
548 | } | ||
549 | |||
550 | dmar_table_print_dmar_entry(entry_header); | ||
551 | |||
552 | switch (entry_header->type) { | ||
553 | case ACPI_DMAR_TYPE_HARDWARE_UNIT: | ||
554 | drhd_count++; | ||
555 | ret = dmar_parse_one_drhd(entry_header); | ||
556 | break; | ||
557 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: | ||
558 | ret = dmar_parse_one_rmrr(entry_header); | ||
559 | break; | ||
560 | case ACPI_DMAR_TYPE_ROOT_ATS: | ||
561 | ret = dmar_parse_one_atsr(entry_header); | ||
562 | break; | ||
563 | case ACPI_DMAR_TYPE_HARDWARE_AFFINITY: | ||
564 | #ifdef CONFIG_ACPI_NUMA | ||
565 | ret = dmar_parse_one_rhsa(entry_header); | ||
566 | #endif | ||
567 | break; | ||
568 | case ACPI_DMAR_TYPE_NAMESPACE: | ||
569 | ret = dmar_parse_one_andd(entry_header); | ||
570 | break; | ||
571 | default: | ||
572 | pr_warn("Unknown DMAR structure type %d\n", | ||
573 | entry_header->type); | ||
574 | ret = 0; /* for forward compatibility */ | ||
575 | break; | ||
576 | } | ||
577 | if (ret) | ||
578 | break; | ||
579 | |||
580 | entry_header = ((void *)entry_header + entry_header->length); | ||
581 | } | ||
582 | if (drhd_count == 0) | ||
583 | pr_warn(FW_BUG "No DRHD structure found in DMAR table\n"); | 633 | pr_warn(FW_BUG "No DRHD structure found in DMAR table\n"); |
634 | |||
584 | return ret; | 635 | return ret; |
585 | } | 636 | } |
586 | 637 | ||
@@ -778,76 +829,68 @@ static void warn_invalid_dmar(u64 addr, const char *message) | |||
778 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | 829 | dmi_get_system_info(DMI_PRODUCT_VERSION)); |
779 | } | 830 | } |
780 | 831 | ||
781 | static int __init check_zero_address(void) | 832 | static int __ref |
833 | dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg) | ||
782 | { | 834 | { |
783 | struct acpi_table_dmar *dmar; | ||
784 | struct acpi_dmar_header *entry_header; | ||
785 | struct acpi_dmar_hardware_unit *drhd; | 835 | struct acpi_dmar_hardware_unit *drhd; |
836 | void __iomem *addr; | ||
837 | u64 cap, ecap; | ||
786 | 838 | ||
787 | dmar = (struct acpi_table_dmar *)dmar_tbl; | 839 | drhd = (void *)entry; |
788 | entry_header = (struct acpi_dmar_header *)(dmar + 1); | 840 | if (!drhd->address) { |
789 | 841 | warn_invalid_dmar(0, ""); | |
790 | while (((unsigned long)entry_header) < | 842 | return -EINVAL; |
791 | (((unsigned long)dmar) + dmar_tbl->length)) { | 843 | } |
792 | /* Avoid looping forever on bad ACPI tables */ | ||
793 | if (entry_header->length == 0) { | ||
794 | pr_warn("Invalid 0-length structure\n"); | ||
795 | return 0; | ||
796 | } | ||
797 | 844 | ||
798 | if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) { | 845 | if (arg) |
799 | void __iomem *addr; | 846 | addr = ioremap(drhd->address, VTD_PAGE_SIZE); |
800 | u64 cap, ecap; | 847 | else |
848 | addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); | ||
849 | if (!addr) { | ||
850 | pr_warn("IOMMU: can't validate: %llx\n", drhd->address); | ||
851 | return -EINVAL; | ||
852 | } | ||
801 | 853 | ||
802 | drhd = (void *)entry_header; | 854 | cap = dmar_readq(addr + DMAR_CAP_REG); |
803 | if (!drhd->address) { | 855 | ecap = dmar_readq(addr + DMAR_ECAP_REG); |
804 | warn_invalid_dmar(0, ""); | ||
805 | goto failed; | ||
806 | } | ||
807 | 856 | ||
808 | addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); | 857 | if (arg) |
809 | if (!addr ) { | 858 | iounmap(addr); |
810 | printk("IOMMU: can't validate: %llx\n", drhd->address); | 859 | else |
811 | goto failed; | 860 | early_iounmap(addr, VTD_PAGE_SIZE); |
812 | } | ||
813 | cap = dmar_readq(addr + DMAR_CAP_REG); | ||
814 | ecap = dmar_readq(addr + DMAR_ECAP_REG); | ||
815 | early_iounmap(addr, VTD_PAGE_SIZE); | ||
816 | if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) { | ||
817 | warn_invalid_dmar(drhd->address, | ||
818 | " returns all ones"); | ||
819 | goto failed; | ||
820 | } | ||
821 | } | ||
822 | 861 | ||
823 | entry_header = ((void *)entry_header + entry_header->length); | 862 | if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) { |
863 | warn_invalid_dmar(drhd->address, " returns all ones"); | ||
864 | return -EINVAL; | ||
824 | } | 865 | } |
825 | return 1; | ||
826 | 866 | ||
827 | failed: | ||
828 | return 0; | 867 | return 0; |
829 | } | 868 | } |
830 | 869 | ||
831 | int __init detect_intel_iommu(void) | 870 | int __init detect_intel_iommu(void) |
832 | { | 871 | { |
833 | int ret; | 872 | int ret; |
873 | struct dmar_res_callback validate_drhd_cb = { | ||
874 | .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd, | ||
875 | .ignore_unhandled = true, | ||
876 | }; | ||
834 | 877 | ||
835 | down_write(&dmar_global_lock); | 878 | down_write(&dmar_global_lock); |
836 | ret = dmar_table_detect(); | 879 | ret = dmar_table_detect(); |
837 | if (ret) | 880 | if (ret) |
838 | ret = check_zero_address(); | 881 | ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl, |
839 | { | 882 | &validate_drhd_cb); |
840 | if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { | 883 | if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { |
841 | iommu_detected = 1; | 884 | iommu_detected = 1; |
842 | /* Make sure ACS will be enabled */ | 885 | /* Make sure ACS will be enabled */ |
843 | pci_request_acs(); | 886 | pci_request_acs(); |
844 | } | 887 | } |
845 | 888 | ||
846 | #ifdef CONFIG_X86 | 889 | #ifdef CONFIG_X86 |
847 | if (ret) | 890 | if (ret) |
848 | x86_init.iommu.iommu_init = intel_iommu_init; | 891 | x86_init.iommu.iommu_init = intel_iommu_init; |
849 | #endif | 892 | #endif |
850 | } | 893 | |
851 | early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size); | 894 | early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size); |
852 | dmar_tbl = NULL; | 895 | dmar_tbl = NULL; |
853 | up_write(&dmar_global_lock); | 896 | up_write(&dmar_global_lock); |
@@ -931,11 +974,32 @@ out: | |||
931 | return err; | 974 | return err; |
932 | } | 975 | } |
933 | 976 | ||
977 | static int dmar_alloc_seq_id(struct intel_iommu *iommu) | ||
978 | { | ||
979 | iommu->seq_id = find_first_zero_bit(dmar_seq_ids, | ||
980 | DMAR_UNITS_SUPPORTED); | ||
981 | if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) { | ||
982 | iommu->seq_id = -1; | ||
983 | } else { | ||
984 | set_bit(iommu->seq_id, dmar_seq_ids); | ||
985 | sprintf(iommu->name, "dmar%d", iommu->seq_id); | ||
986 | } | ||
987 | |||
988 | return iommu->seq_id; | ||
989 | } | ||
990 | |||
991 | static void dmar_free_seq_id(struct intel_iommu *iommu) | ||
992 | { | ||
993 | if (iommu->seq_id >= 0) { | ||
994 | clear_bit(iommu->seq_id, dmar_seq_ids); | ||
995 | iommu->seq_id = -1; | ||
996 | } | ||
997 | } | ||
998 | |||
934 | static int alloc_iommu(struct dmar_drhd_unit *drhd) | 999 | static int alloc_iommu(struct dmar_drhd_unit *drhd) |
935 | { | 1000 | { |
936 | struct intel_iommu *iommu; | 1001 | struct intel_iommu *iommu; |
937 | u32 ver, sts; | 1002 | u32 ver, sts; |
938 | static int iommu_allocated = 0; | ||
939 | int agaw = 0; | 1003 | int agaw = 0; |
940 | int msagaw = 0; | 1004 | int msagaw = 0; |
941 | int err; | 1005 | int err; |
@@ -949,13 +1013,16 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
949 | if (!iommu) | 1013 | if (!iommu) |
950 | return -ENOMEM; | 1014 | return -ENOMEM; |
951 | 1015 | ||
952 | iommu->seq_id = iommu_allocated++; | 1016 | if (dmar_alloc_seq_id(iommu) < 0) { |
953 | sprintf (iommu->name, "dmar%d", iommu->seq_id); | 1017 | pr_err("IOMMU: failed to allocate seq_id\n"); |
1018 | err = -ENOSPC; | ||
1019 | goto error; | ||
1020 | } | ||
954 | 1021 | ||
955 | err = map_iommu(iommu, drhd->reg_base_addr); | 1022 | err = map_iommu(iommu, drhd->reg_base_addr); |
956 | if (err) { | 1023 | if (err) { |
957 | pr_err("IOMMU: failed to map %s\n", iommu->name); | 1024 | pr_err("IOMMU: failed to map %s\n", iommu->name); |
958 | goto error; | 1025 | goto error_free_seq_id; |
959 | } | 1026 | } |
960 | 1027 | ||
961 | err = -EINVAL; | 1028 | err = -EINVAL; |
@@ -1005,9 +1072,11 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
1005 | 1072 | ||
1006 | return 0; | 1073 | return 0; |
1007 | 1074 | ||
1008 | err_unmap: | 1075 | err_unmap: |
1009 | unmap_iommu(iommu); | 1076 | unmap_iommu(iommu); |
1010 | error: | 1077 | error_free_seq_id: |
1078 | dmar_free_seq_id(iommu); | ||
1079 | error: | ||
1011 | kfree(iommu); | 1080 | kfree(iommu); |
1012 | return err; | 1081 | return err; |
1013 | } | 1082 | } |
@@ -1031,6 +1100,7 @@ static void free_iommu(struct intel_iommu *iommu) | |||
1031 | if (iommu->reg) | 1100 | if (iommu->reg) |
1032 | unmap_iommu(iommu); | 1101 | unmap_iommu(iommu); |
1033 | 1102 | ||
1103 | dmar_free_seq_id(iommu); | ||
1034 | kfree(iommu); | 1104 | kfree(iommu); |
1035 | } | 1105 | } |
1036 | 1106 | ||
@@ -1661,12 +1731,17 @@ int __init dmar_ir_support(void) | |||
1661 | return dmar->flags & 0x1; | 1731 | return dmar->flags & 0x1; |
1662 | } | 1732 | } |
1663 | 1733 | ||
1734 | /* Check whether DMAR units are in use */ | ||
1735 | static inline bool dmar_in_use(void) | ||
1736 | { | ||
1737 | return irq_remapping_enabled || intel_iommu_enabled; | ||
1738 | } | ||
1739 | |||
1664 | static int __init dmar_free_unused_resources(void) | 1740 | static int __init dmar_free_unused_resources(void) |
1665 | { | 1741 | { |
1666 | struct dmar_drhd_unit *dmaru, *dmaru_n; | 1742 | struct dmar_drhd_unit *dmaru, *dmaru_n; |
1667 | 1743 | ||
1668 | /* DMAR units are in use */ | 1744 | if (dmar_in_use()) |
1669 | if (irq_remapping_enabled || intel_iommu_enabled) | ||
1670 | return 0; | 1745 | return 0; |
1671 | 1746 | ||
1672 | if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units)) | 1747 | if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units)) |
@@ -1684,3 +1759,242 @@ static int __init dmar_free_unused_resources(void) | |||
1684 | 1759 | ||
1685 | late_initcall(dmar_free_unused_resources); | 1760 | late_initcall(dmar_free_unused_resources); |
1686 | IOMMU_INIT_POST(detect_intel_iommu); | 1761 | IOMMU_INIT_POST(detect_intel_iommu); |
1762 | |||
1763 | /* | ||
1764 | * DMAR Hotplug Support | ||
1765 | * For more details, please refer to Intel(R) Virtualization Technology | ||
1766 | * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8 | ||
1767 | * "Remapping Hardware Unit Hot Plug". | ||
1768 | */ | ||
1769 | static u8 dmar_hp_uuid[] = { | ||
1770 | /* 0000 */ 0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C, | ||
1771 | /* 0008 */ 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF | ||
1772 | }; | ||
1773 | |||
1774 | /* | ||
1775 | * Currently there's only one revision and BIOS will not check the revision id, | ||
1776 | * so use 0 for safety. | ||
1777 | */ | ||
1778 | #define DMAR_DSM_REV_ID 0 | ||
1779 | #define DMAR_DSM_FUNC_DRHD 1 | ||
1780 | #define DMAR_DSM_FUNC_ATSR 2 | ||
1781 | #define DMAR_DSM_FUNC_RHSA 3 | ||
1782 | |||
1783 | static inline bool dmar_detect_dsm(acpi_handle handle, int func) | ||
1784 | { | ||
1785 | return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func); | ||
1786 | } | ||
1787 | |||
1788 | static int dmar_walk_dsm_resource(acpi_handle handle, int func, | ||
1789 | dmar_res_handler_t handler, void *arg) | ||
1790 | { | ||
1791 | int ret = -ENODEV; | ||
1792 | union acpi_object *obj; | ||
1793 | struct acpi_dmar_header *start; | ||
1794 | struct dmar_res_callback callback; | ||
1795 | static int res_type[] = { | ||
1796 | [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT, | ||
1797 | [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS, | ||
1798 | [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY, | ||
1799 | }; | ||
1800 | |||
1801 | if (!dmar_detect_dsm(handle, func)) | ||
1802 | return 0; | ||
1803 | |||
1804 | obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, | ||
1805 | func, NULL, ACPI_TYPE_BUFFER); | ||
1806 | if (!obj) | ||
1807 | return -ENODEV; | ||
1808 | |||
1809 | memset(&callback, 0, sizeof(callback)); | ||
1810 | callback.cb[res_type[func]] = handler; | ||
1811 | callback.arg[res_type[func]] = arg; | ||
1812 | start = (struct acpi_dmar_header *)obj->buffer.pointer; | ||
1813 | ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback); | ||
1814 | |||
1815 | ACPI_FREE(obj); | ||
1816 | |||
1817 | return ret; | ||
1818 | } | ||
1819 | |||
1820 | static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg) | ||
1821 | { | ||
1822 | int ret; | ||
1823 | struct dmar_drhd_unit *dmaru; | ||
1824 | |||
1825 | dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header); | ||
1826 | if (!dmaru) | ||
1827 | return -ENODEV; | ||
1828 | |||
1829 | ret = dmar_ir_hotplug(dmaru, true); | ||
1830 | if (ret == 0) | ||
1831 | ret = dmar_iommu_hotplug(dmaru, true); | ||
1832 | |||
1833 | return ret; | ||
1834 | } | ||
1835 | |||
1836 | static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg) | ||
1837 | { | ||
1838 | int i, ret; | ||
1839 | struct device *dev; | ||
1840 | struct dmar_drhd_unit *dmaru; | ||
1841 | |||
1842 | dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header); | ||
1843 | if (!dmaru) | ||
1844 | return 0; | ||
1845 | |||
1846 | /* | ||
1847 | * All PCI devices managed by this unit should have been destroyed. | ||
1848 | */ | ||
1849 | if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) | ||
1850 | for_each_active_dev_scope(dmaru->devices, | ||
1851 | dmaru->devices_cnt, i, dev) | ||
1852 | return -EBUSY; | ||
1853 | |||
1854 | ret = dmar_ir_hotplug(dmaru, false); | ||
1855 | if (ret == 0) | ||
1856 | ret = dmar_iommu_hotplug(dmaru, false); | ||
1857 | |||
1858 | return ret; | ||
1859 | } | ||
1860 | |||
1861 | static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg) | ||
1862 | { | ||
1863 | struct dmar_drhd_unit *dmaru; | ||
1864 | |||
1865 | dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header); | ||
1866 | if (dmaru) { | ||
1867 | list_del_rcu(&dmaru->list); | ||
1868 | synchronize_rcu(); | ||
1869 | dmar_free_drhd(dmaru); | ||
1870 | } | ||
1871 | |||
1872 | return 0; | ||
1873 | } | ||
1874 | |||
1875 | static int dmar_hotplug_insert(acpi_handle handle) | ||
1876 | { | ||
1877 | int ret; | ||
1878 | int drhd_count = 0; | ||
1879 | |||
1880 | ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, | ||
1881 | &dmar_validate_one_drhd, (void *)1); | ||
1882 | if (ret) | ||
1883 | goto out; | ||
1884 | |||
1885 | ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, | ||
1886 | &dmar_parse_one_drhd, (void *)&drhd_count); | ||
1887 | if (ret == 0 && drhd_count == 0) { | ||
1888 | pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n"); | ||
1889 | goto out; | ||
1890 | } else if (ret) { | ||
1891 | goto release_drhd; | ||
1892 | } | ||
1893 | |||
1894 | ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA, | ||
1895 | &dmar_parse_one_rhsa, NULL); | ||
1896 | if (ret) | ||
1897 | goto release_drhd; | ||
1898 | |||
1899 | ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR, | ||
1900 | &dmar_parse_one_atsr, NULL); | ||
1901 | if (ret) | ||
1902 | goto release_atsr; | ||
1903 | |||
1904 | ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, | ||
1905 | &dmar_hp_add_drhd, NULL); | ||
1906 | if (!ret) | ||
1907 | return 0; | ||
1908 | |||
1909 | dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, | ||
1910 | &dmar_hp_remove_drhd, NULL); | ||
1911 | release_atsr: | ||
1912 | dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR, | ||
1913 | &dmar_release_one_atsr, NULL); | ||
1914 | release_drhd: | ||
1915 | dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, | ||
1916 | &dmar_hp_release_drhd, NULL); | ||
1917 | out: | ||
1918 | return ret; | ||
1919 | } | ||
1920 | |||
1921 | static int dmar_hotplug_remove(acpi_handle handle) | ||
1922 | { | ||
1923 | int ret; | ||
1924 | |||
1925 | ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR, | ||
1926 | &dmar_check_one_atsr, NULL); | ||
1927 | if (ret) | ||
1928 | return ret; | ||
1929 | |||
1930 | ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, | ||
1931 | &dmar_hp_remove_drhd, NULL); | ||
1932 | if (ret == 0) { | ||
1933 | WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR, | ||
1934 | &dmar_release_one_atsr, NULL)); | ||
1935 | WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, | ||
1936 | &dmar_hp_release_drhd, NULL)); | ||
1937 | } else { | ||
1938 | dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, | ||
1939 | &dmar_hp_add_drhd, NULL); | ||
1940 | } | ||
1941 | |||
1942 | return ret; | ||
1943 | } | ||
1944 | |||
1945 | static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl, | ||
1946 | void *context, void **retval) | ||
1947 | { | ||
1948 | acpi_handle *phdl = retval; | ||
1949 | |||
1950 | if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) { | ||
1951 | *phdl = handle; | ||
1952 | return AE_CTRL_TERMINATE; | ||
1953 | } | ||
1954 | |||
1955 | return AE_OK; | ||
1956 | } | ||
1957 | |||
1958 | static int dmar_device_hotplug(acpi_handle handle, bool insert) | ||
1959 | { | ||
1960 | int ret; | ||
1961 | acpi_handle tmp = NULL; | ||
1962 | acpi_status status; | ||
1963 | |||
1964 | if (!dmar_in_use()) | ||
1965 | return 0; | ||
1966 | |||
1967 | if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) { | ||
1968 | tmp = handle; | ||
1969 | } else { | ||
1970 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, | ||
1971 | ACPI_UINT32_MAX, | ||
1972 | dmar_get_dsm_handle, | ||
1973 | NULL, NULL, &tmp); | ||
1974 | if (ACPI_FAILURE(status)) { | ||
1975 | pr_warn("Failed to locate _DSM method.\n"); | ||
1976 | return -ENXIO; | ||
1977 | } | ||
1978 | } | ||
1979 | if (tmp == NULL) | ||
1980 | return 0; | ||
1981 | |||
1982 | down_write(&dmar_global_lock); | ||
1983 | if (insert) | ||
1984 | ret = dmar_hotplug_insert(tmp); | ||
1985 | else | ||
1986 | ret = dmar_hotplug_remove(tmp); | ||
1987 | up_write(&dmar_global_lock); | ||
1988 | |||
1989 | return ret; | ||
1990 | } | ||
1991 | |||
1992 | int dmar_device_add(acpi_handle handle) | ||
1993 | { | ||
1994 | return dmar_device_hotplug(handle, true); | ||
1995 | } | ||
1996 | |||
1997 | int dmar_device_remove(acpi_handle handle) | ||
1998 | { | ||
1999 | return dmar_device_hotplug(handle, false); | ||
2000 | } | ||
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 02cd26a17fe0..1232336b960e 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -195,6 +195,7 @@ static inline void set_root_present(struct root_entry *root) | |||
195 | } | 195 | } |
196 | static inline void set_root_value(struct root_entry *root, unsigned long value) | 196 | static inline void set_root_value(struct root_entry *root, unsigned long value) |
197 | { | 197 | { |
198 | root->val &= ~VTD_PAGE_MASK; | ||
198 | root->val |= value & VTD_PAGE_MASK; | 199 | root->val |= value & VTD_PAGE_MASK; |
199 | } | 200 | } |
200 | 201 | ||
@@ -247,6 +248,7 @@ static inline void context_set_translation_type(struct context_entry *context, | |||
247 | static inline void context_set_address_root(struct context_entry *context, | 248 | static inline void context_set_address_root(struct context_entry *context, |
248 | unsigned long value) | 249 | unsigned long value) |
249 | { | 250 | { |
251 | context->lo &= ~VTD_PAGE_MASK; | ||
250 | context->lo |= value & VTD_PAGE_MASK; | 252 | context->lo |= value & VTD_PAGE_MASK; |
251 | } | 253 | } |
252 | 254 | ||
@@ -328,17 +330,10 @@ static int hw_pass_through = 1; | |||
328 | /* si_domain contains mulitple devices */ | 330 | /* si_domain contains mulitple devices */ |
329 | #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1) | 331 | #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1) |
330 | 332 | ||
331 | /* define the limit of IOMMUs supported in each domain */ | ||
332 | #ifdef CONFIG_X86 | ||
333 | # define IOMMU_UNITS_SUPPORTED MAX_IO_APICS | ||
334 | #else | ||
335 | # define IOMMU_UNITS_SUPPORTED 64 | ||
336 | #endif | ||
337 | |||
338 | struct dmar_domain { | 333 | struct dmar_domain { |
339 | int id; /* domain id */ | 334 | int id; /* domain id */ |
340 | int nid; /* node id */ | 335 | int nid; /* node id */ |
341 | DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED); | 336 | DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED); |
342 | /* bitmap of iommus this domain uses*/ | 337 | /* bitmap of iommus this domain uses*/ |
343 | 338 | ||
344 | struct list_head devices; /* all devices' list */ | 339 | struct list_head devices; /* all devices' list */ |
@@ -1132,8 +1127,11 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) | |||
1132 | unsigned long flags; | 1127 | unsigned long flags; |
1133 | 1128 | ||
1134 | root = (struct root_entry *)alloc_pgtable_page(iommu->node); | 1129 | root = (struct root_entry *)alloc_pgtable_page(iommu->node); |
1135 | if (!root) | 1130 | if (!root) { |
1131 | pr_err("IOMMU: allocating root entry for %s failed\n", | ||
1132 | iommu->name); | ||
1136 | return -ENOMEM; | 1133 | return -ENOMEM; |
1134 | } | ||
1137 | 1135 | ||
1138 | __iommu_flush_cache(iommu, root, ROOT_SIZE); | 1136 | __iommu_flush_cache(iommu, root, ROOT_SIZE); |
1139 | 1137 | ||
@@ -1473,7 +1471,7 @@ static int iommu_init_domains(struct intel_iommu *iommu) | |||
1473 | return 0; | 1471 | return 0; |
1474 | } | 1472 | } |
1475 | 1473 | ||
1476 | static void free_dmar_iommu(struct intel_iommu *iommu) | 1474 | static void disable_dmar_iommu(struct intel_iommu *iommu) |
1477 | { | 1475 | { |
1478 | struct dmar_domain *domain; | 1476 | struct dmar_domain *domain; |
1479 | int i; | 1477 | int i; |
@@ -1497,11 +1495,16 @@ static void free_dmar_iommu(struct intel_iommu *iommu) | |||
1497 | 1495 | ||
1498 | if (iommu->gcmd & DMA_GCMD_TE) | 1496 | if (iommu->gcmd & DMA_GCMD_TE) |
1499 | iommu_disable_translation(iommu); | 1497 | iommu_disable_translation(iommu); |
1498 | } | ||
1500 | 1499 | ||
1501 | kfree(iommu->domains); | 1500 | static void free_dmar_iommu(struct intel_iommu *iommu) |
1502 | kfree(iommu->domain_ids); | 1501 | { |
1503 | iommu->domains = NULL; | 1502 | if ((iommu->domains) && (iommu->domain_ids)) { |
1504 | iommu->domain_ids = NULL; | 1503 | kfree(iommu->domains); |
1504 | kfree(iommu->domain_ids); | ||
1505 | iommu->domains = NULL; | ||
1506 | iommu->domain_ids = NULL; | ||
1507 | } | ||
1505 | 1508 | ||
1506 | g_iommus[iommu->seq_id] = NULL; | 1509 | g_iommus[iommu->seq_id] = NULL; |
1507 | 1510 | ||
@@ -1983,7 +1986,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | |||
1983 | { | 1986 | { |
1984 | struct dma_pte *first_pte = NULL, *pte = NULL; | 1987 | struct dma_pte *first_pte = NULL, *pte = NULL; |
1985 | phys_addr_t uninitialized_var(pteval); | 1988 | phys_addr_t uninitialized_var(pteval); |
1986 | unsigned long sg_res; | 1989 | unsigned long sg_res = 0; |
1987 | unsigned int largepage_lvl = 0; | 1990 | unsigned int largepage_lvl = 0; |
1988 | unsigned long lvl_pages = 0; | 1991 | unsigned long lvl_pages = 0; |
1989 | 1992 | ||
@@ -1994,10 +1997,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | |||
1994 | 1997 | ||
1995 | prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP; | 1998 | prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP; |
1996 | 1999 | ||
1997 | if (sg) | 2000 | if (!sg) { |
1998 | sg_res = 0; | 2001 | sg_res = nr_pages; |
1999 | else { | ||
2000 | sg_res = nr_pages + 1; | ||
2001 | pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot; | 2002 | pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot; |
2002 | } | 2003 | } |
2003 | 2004 | ||
@@ -2708,6 +2709,41 @@ static int __init iommu_prepare_static_identity_mapping(int hw) | |||
2708 | return 0; | 2709 | return 0; |
2709 | } | 2710 | } |
2710 | 2711 | ||
2712 | static void intel_iommu_init_qi(struct intel_iommu *iommu) | ||
2713 | { | ||
2714 | /* | ||
2715 | * Start from the sane iommu hardware state. | ||
2716 | * If the queued invalidation is already initialized by us | ||
2717 | * (for example, while enabling interrupt-remapping) then | ||
2718 | * we got the things already rolling from a sane state. | ||
2719 | */ | ||
2720 | if (!iommu->qi) { | ||
2721 | /* | ||
2722 | * Clear any previous faults. | ||
2723 | */ | ||
2724 | dmar_fault(-1, iommu); | ||
2725 | /* | ||
2726 | * Disable queued invalidation if supported and already enabled | ||
2727 | * before OS handover. | ||
2728 | */ | ||
2729 | dmar_disable_qi(iommu); | ||
2730 | } | ||
2731 | |||
2732 | if (dmar_enable_qi(iommu)) { | ||
2733 | /* | ||
2734 | * Queued Invalidate not enabled, use Register Based Invalidate | ||
2735 | */ | ||
2736 | iommu->flush.flush_context = __iommu_flush_context; | ||
2737 | iommu->flush.flush_iotlb = __iommu_flush_iotlb; | ||
2738 | pr_info("IOMMU: %s using Register based invalidation\n", | ||
2739 | iommu->name); | ||
2740 | } else { | ||
2741 | iommu->flush.flush_context = qi_flush_context; | ||
2742 | iommu->flush.flush_iotlb = qi_flush_iotlb; | ||
2743 | pr_info("IOMMU: %s using Queued invalidation\n", iommu->name); | ||
2744 | } | ||
2745 | } | ||
2746 | |||
2711 | static int __init init_dmars(void) | 2747 | static int __init init_dmars(void) |
2712 | { | 2748 | { |
2713 | struct dmar_drhd_unit *drhd; | 2749 | struct dmar_drhd_unit *drhd; |
@@ -2728,14 +2764,18 @@ static int __init init_dmars(void) | |||
2728 | * threaded kernel __init code path all other access are read | 2764 | * threaded kernel __init code path all other access are read |
2729 | * only | 2765 | * only |
2730 | */ | 2766 | */ |
2731 | if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) { | 2767 | if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) { |
2732 | g_num_of_iommus++; | 2768 | g_num_of_iommus++; |
2733 | continue; | 2769 | continue; |
2734 | } | 2770 | } |
2735 | printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n", | 2771 | printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n", |
2736 | IOMMU_UNITS_SUPPORTED); | 2772 | DMAR_UNITS_SUPPORTED); |
2737 | } | 2773 | } |
2738 | 2774 | ||
2775 | /* Preallocate enough resources for IOMMU hot-addition */ | ||
2776 | if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) | ||
2777 | g_num_of_iommus = DMAR_UNITS_SUPPORTED; | ||
2778 | |||
2739 | g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *), | 2779 | g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *), |
2740 | GFP_KERNEL); | 2780 | GFP_KERNEL); |
2741 | if (!g_iommus) { | 2781 | if (!g_iommus) { |
@@ -2764,58 +2804,14 @@ static int __init init_dmars(void) | |||
2764 | * among all IOMMU's. Need to Split it later. | 2804 | * among all IOMMU's. Need to Split it later. |
2765 | */ | 2805 | */ |
2766 | ret = iommu_alloc_root_entry(iommu); | 2806 | ret = iommu_alloc_root_entry(iommu); |
2767 | if (ret) { | 2807 | if (ret) |
2768 | printk(KERN_ERR "IOMMU: allocate root entry failed\n"); | ||
2769 | goto free_iommu; | 2808 | goto free_iommu; |
2770 | } | ||
2771 | if (!ecap_pass_through(iommu->ecap)) | 2809 | if (!ecap_pass_through(iommu->ecap)) |
2772 | hw_pass_through = 0; | 2810 | hw_pass_through = 0; |
2773 | } | 2811 | } |
2774 | 2812 | ||
2775 | /* | 2813 | for_each_active_iommu(iommu, drhd) |
2776 | * Start from the sane iommu hardware state. | 2814 | intel_iommu_init_qi(iommu); |
2777 | */ | ||
2778 | for_each_active_iommu(iommu, drhd) { | ||
2779 | /* | ||
2780 | * If the queued invalidation is already initialized by us | ||
2781 | * (for example, while enabling interrupt-remapping) then | ||
2782 | * we got the things already rolling from a sane state. | ||
2783 | */ | ||
2784 | if (iommu->qi) | ||
2785 | continue; | ||
2786 | |||
2787 | /* | ||
2788 | * Clear any previous faults. | ||
2789 | */ | ||
2790 | dmar_fault(-1, iommu); | ||
2791 | /* | ||
2792 | * Disable queued invalidation if supported and already enabled | ||
2793 | * before OS handover. | ||
2794 | */ | ||
2795 | dmar_disable_qi(iommu); | ||
2796 | } | ||
2797 | |||
2798 | for_each_active_iommu(iommu, drhd) { | ||
2799 | if (dmar_enable_qi(iommu)) { | ||
2800 | /* | ||
2801 | * Queued Invalidate not enabled, use Register Based | ||
2802 | * Invalidate | ||
2803 | */ | ||
2804 | iommu->flush.flush_context = __iommu_flush_context; | ||
2805 | iommu->flush.flush_iotlb = __iommu_flush_iotlb; | ||
2806 | printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based " | ||
2807 | "invalidation\n", | ||
2808 | iommu->seq_id, | ||
2809 | (unsigned long long)drhd->reg_base_addr); | ||
2810 | } else { | ||
2811 | iommu->flush.flush_context = qi_flush_context; | ||
2812 | iommu->flush.flush_iotlb = qi_flush_iotlb; | ||
2813 | printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued " | ||
2814 | "invalidation\n", | ||
2815 | iommu->seq_id, | ||
2816 | (unsigned long long)drhd->reg_base_addr); | ||
2817 | } | ||
2818 | } | ||
2819 | 2815 | ||
2820 | if (iommu_pass_through) | 2816 | if (iommu_pass_through) |
2821 | iommu_identity_mapping |= IDENTMAP_ALL; | 2817 | iommu_identity_mapping |= IDENTMAP_ALL; |
@@ -2901,8 +2897,10 @@ static int __init init_dmars(void) | |||
2901 | return 0; | 2897 | return 0; |
2902 | 2898 | ||
2903 | free_iommu: | 2899 | free_iommu: |
2904 | for_each_active_iommu(iommu, drhd) | 2900 | for_each_active_iommu(iommu, drhd) { |
2901 | disable_dmar_iommu(iommu); | ||
2905 | free_dmar_iommu(iommu); | 2902 | free_dmar_iommu(iommu); |
2903 | } | ||
2906 | kfree(deferred_flush); | 2904 | kfree(deferred_flush); |
2907 | free_g_iommus: | 2905 | free_g_iommus: |
2908 | kfree(g_iommus); | 2906 | kfree(g_iommus); |
@@ -3682,7 +3680,7 @@ static inline void init_iommu_pm_ops(void) {} | |||
3682 | #endif /* CONFIG_PM */ | 3680 | #endif /* CONFIG_PM */ |
3683 | 3681 | ||
3684 | 3682 | ||
3685 | int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header) | 3683 | int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg) |
3686 | { | 3684 | { |
3687 | struct acpi_dmar_reserved_memory *rmrr; | 3685 | struct acpi_dmar_reserved_memory *rmrr; |
3688 | struct dmar_rmrr_unit *rmrru; | 3686 | struct dmar_rmrr_unit *rmrru; |
@@ -3708,17 +3706,48 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header) | |||
3708 | return 0; | 3706 | return 0; |
3709 | } | 3707 | } |
3710 | 3708 | ||
3711 | int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr) | 3709 | static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr) |
3710 | { | ||
3711 | struct dmar_atsr_unit *atsru; | ||
3712 | struct acpi_dmar_atsr *tmp; | ||
3713 | |||
3714 | list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) { | ||
3715 | tmp = (struct acpi_dmar_atsr *)atsru->hdr; | ||
3716 | if (atsr->segment != tmp->segment) | ||
3717 | continue; | ||
3718 | if (atsr->header.length != tmp->header.length) | ||
3719 | continue; | ||
3720 | if (memcmp(atsr, tmp, atsr->header.length) == 0) | ||
3721 | return atsru; | ||
3722 | } | ||
3723 | |||
3724 | return NULL; | ||
3725 | } | ||
3726 | |||
3727 | int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg) | ||
3712 | { | 3728 | { |
3713 | struct acpi_dmar_atsr *atsr; | 3729 | struct acpi_dmar_atsr *atsr; |
3714 | struct dmar_atsr_unit *atsru; | 3730 | struct dmar_atsr_unit *atsru; |
3715 | 3731 | ||
3732 | if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled) | ||
3733 | return 0; | ||
3734 | |||
3716 | atsr = container_of(hdr, struct acpi_dmar_atsr, header); | 3735 | atsr = container_of(hdr, struct acpi_dmar_atsr, header); |
3717 | atsru = kzalloc(sizeof(*atsru), GFP_KERNEL); | 3736 | atsru = dmar_find_atsr(atsr); |
3737 | if (atsru) | ||
3738 | return 0; | ||
3739 | |||
3740 | atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL); | ||
3718 | if (!atsru) | 3741 | if (!atsru) |
3719 | return -ENOMEM; | 3742 | return -ENOMEM; |
3720 | 3743 | ||
3721 | atsru->hdr = hdr; | 3744 | /* |
3745 | * If memory is allocated from slab by ACPI _DSM method, we need to | ||
3746 | * copy the memory content because the memory buffer will be freed | ||
3747 | * on return. | ||
3748 | */ | ||
3749 | atsru->hdr = (void *)(atsru + 1); | ||
3750 | memcpy(atsru->hdr, hdr, hdr->length); | ||
3722 | atsru->include_all = atsr->flags & 0x1; | 3751 | atsru->include_all = atsr->flags & 0x1; |
3723 | if (!atsru->include_all) { | 3752 | if (!atsru->include_all) { |
3724 | atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1), | 3753 | atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1), |
@@ -3741,6 +3770,138 @@ static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru) | |||
3741 | kfree(atsru); | 3770 | kfree(atsru); |
3742 | } | 3771 | } |
3743 | 3772 | ||
3773 | int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg) | ||
3774 | { | ||
3775 | struct acpi_dmar_atsr *atsr; | ||
3776 | struct dmar_atsr_unit *atsru; | ||
3777 | |||
3778 | atsr = container_of(hdr, struct acpi_dmar_atsr, header); | ||
3779 | atsru = dmar_find_atsr(atsr); | ||
3780 | if (atsru) { | ||
3781 | list_del_rcu(&atsru->list); | ||
3782 | synchronize_rcu(); | ||
3783 | intel_iommu_free_atsr(atsru); | ||
3784 | } | ||
3785 | |||
3786 | return 0; | ||
3787 | } | ||
3788 | |||
3789 | int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg) | ||
3790 | { | ||
3791 | int i; | ||
3792 | struct device *dev; | ||
3793 | struct acpi_dmar_atsr *atsr; | ||
3794 | struct dmar_atsr_unit *atsru; | ||
3795 | |||
3796 | atsr = container_of(hdr, struct acpi_dmar_atsr, header); | ||
3797 | atsru = dmar_find_atsr(atsr); | ||
3798 | if (!atsru) | ||
3799 | return 0; | ||
3800 | |||
3801 | if (!atsru->include_all && atsru->devices && atsru->devices_cnt) | ||
3802 | for_each_active_dev_scope(atsru->devices, atsru->devices_cnt, | ||
3803 | i, dev) | ||
3804 | return -EBUSY; | ||
3805 | |||
3806 | return 0; | ||
3807 | } | ||
3808 | |||
3809 | static int intel_iommu_add(struct dmar_drhd_unit *dmaru) | ||
3810 | { | ||
3811 | int sp, ret = 0; | ||
3812 | struct intel_iommu *iommu = dmaru->iommu; | ||
3813 | |||
3814 | if (g_iommus[iommu->seq_id]) | ||
3815 | return 0; | ||
3816 | |||
3817 | if (hw_pass_through && !ecap_pass_through(iommu->ecap)) { | ||
3818 | pr_warn("IOMMU: %s doesn't support hardware pass through.\n", | ||
3819 | iommu->name); | ||
3820 | return -ENXIO; | ||
3821 | } | ||
3822 | if (!ecap_sc_support(iommu->ecap) && | ||
3823 | domain_update_iommu_snooping(iommu)) { | ||
3824 | pr_warn("IOMMU: %s doesn't support snooping.\n", | ||
3825 | iommu->name); | ||
3826 | return -ENXIO; | ||
3827 | } | ||
3828 | sp = domain_update_iommu_superpage(iommu) - 1; | ||
3829 | if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) { | ||
3830 | pr_warn("IOMMU: %s doesn't support large page.\n", | ||
3831 | iommu->name); | ||
3832 | return -ENXIO; | ||
3833 | } | ||
3834 | |||
3835 | /* | ||
3836 | * Disable translation if already enabled prior to OS handover. | ||
3837 | */ | ||
3838 | if (iommu->gcmd & DMA_GCMD_TE) | ||
3839 | iommu_disable_translation(iommu); | ||
3840 | |||
3841 | g_iommus[iommu->seq_id] = iommu; | ||
3842 | ret = iommu_init_domains(iommu); | ||
3843 | if (ret == 0) | ||
3844 | ret = iommu_alloc_root_entry(iommu); | ||
3845 | if (ret) | ||
3846 | goto out; | ||
3847 | |||
3848 | if (dmaru->ignored) { | ||
3849 | /* | ||
3850 | * we always have to disable PMRs or DMA may fail on this device | ||
3851 | */ | ||
3852 | if (force_on) | ||
3853 | iommu_disable_protect_mem_regions(iommu); | ||
3854 | return 0; | ||
3855 | } | ||
3856 | |||
3857 | intel_iommu_init_qi(iommu); | ||
3858 | iommu_flush_write_buffer(iommu); | ||
3859 | ret = dmar_set_interrupt(iommu); | ||
3860 | if (ret) | ||
3861 | goto disable_iommu; | ||
3862 | |||
3863 | iommu_set_root_entry(iommu); | ||
3864 | iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); | ||
3865 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); | ||
3866 | iommu_enable_translation(iommu); | ||
3867 | |||
3868 | if (si_domain) { | ||
3869 | ret = iommu_attach_domain(si_domain, iommu); | ||
3870 | if (ret < 0 || si_domain->id != ret) | ||
3871 | goto disable_iommu; | ||
3872 | domain_attach_iommu(si_domain, iommu); | ||
3873 | } | ||
3874 | |||
3875 | iommu_disable_protect_mem_regions(iommu); | ||
3876 | return 0; | ||
3877 | |||
3878 | disable_iommu: | ||
3879 | disable_dmar_iommu(iommu); | ||
3880 | out: | ||
3881 | free_dmar_iommu(iommu); | ||
3882 | return ret; | ||
3883 | } | ||
3884 | |||
3885 | int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert) | ||
3886 | { | ||
3887 | int ret = 0; | ||
3888 | struct intel_iommu *iommu = dmaru->iommu; | ||
3889 | |||
3890 | if (!intel_iommu_enabled) | ||
3891 | return 0; | ||
3892 | if (iommu == NULL) | ||
3893 | return -EINVAL; | ||
3894 | |||
3895 | if (insert) { | ||
3896 | ret = intel_iommu_add(dmaru); | ||
3897 | } else { | ||
3898 | disable_dmar_iommu(iommu); | ||
3899 | free_dmar_iommu(iommu); | ||
3900 | } | ||
3901 | |||
3902 | return ret; | ||
3903 | } | ||
3904 | |||
3744 | static void intel_iommu_free_dmars(void) | 3905 | static void intel_iommu_free_dmars(void) |
3745 | { | 3906 | { |
3746 | struct dmar_rmrr_unit *rmrru, *rmrr_n; | 3907 | struct dmar_rmrr_unit *rmrru, *rmrr_n; |
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 7c80661b35c1..27541d440849 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c | |||
@@ -36,7 +36,6 @@ struct hpet_scope { | |||
36 | 36 | ||
37 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; | 37 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; |
38 | static struct hpet_scope ir_hpet[MAX_HPET_TBS]; | 38 | static struct hpet_scope ir_hpet[MAX_HPET_TBS]; |
39 | static int ir_ioapic_num, ir_hpet_num; | ||
40 | 39 | ||
41 | /* | 40 | /* |
42 | * Lock ordering: | 41 | * Lock ordering: |
@@ -206,7 +205,7 @@ static struct intel_iommu *map_hpet_to_ir(u8 hpet_id) | |||
206 | int i; | 205 | int i; |
207 | 206 | ||
208 | for (i = 0; i < MAX_HPET_TBS; i++) | 207 | for (i = 0; i < MAX_HPET_TBS; i++) |
209 | if (ir_hpet[i].id == hpet_id) | 208 | if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu) |
210 | return ir_hpet[i].iommu; | 209 | return ir_hpet[i].iommu; |
211 | return NULL; | 210 | return NULL; |
212 | } | 211 | } |
@@ -216,7 +215,7 @@ static struct intel_iommu *map_ioapic_to_ir(int apic) | |||
216 | int i; | 215 | int i; |
217 | 216 | ||
218 | for (i = 0; i < MAX_IO_APICS; i++) | 217 | for (i = 0; i < MAX_IO_APICS; i++) |
219 | if (ir_ioapic[i].id == apic) | 218 | if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu) |
220 | return ir_ioapic[i].iommu; | 219 | return ir_ioapic[i].iommu; |
221 | return NULL; | 220 | return NULL; |
222 | } | 221 | } |
@@ -325,7 +324,7 @@ static int set_ioapic_sid(struct irte *irte, int apic) | |||
325 | 324 | ||
326 | down_read(&dmar_global_lock); | 325 | down_read(&dmar_global_lock); |
327 | for (i = 0; i < MAX_IO_APICS; i++) { | 326 | for (i = 0; i < MAX_IO_APICS; i++) { |
328 | if (ir_ioapic[i].id == apic) { | 327 | if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) { |
329 | sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; | 328 | sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; |
330 | break; | 329 | break; |
331 | } | 330 | } |
@@ -352,7 +351,7 @@ static int set_hpet_sid(struct irte *irte, u8 id) | |||
352 | 351 | ||
353 | down_read(&dmar_global_lock); | 352 | down_read(&dmar_global_lock); |
354 | for (i = 0; i < MAX_HPET_TBS; i++) { | 353 | for (i = 0; i < MAX_HPET_TBS; i++) { |
355 | if (ir_hpet[i].id == id) { | 354 | if (ir_hpet[i].iommu && ir_hpet[i].id == id) { |
356 | sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; | 355 | sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; |
357 | break; | 356 | break; |
358 | } | 357 | } |
@@ -473,17 +472,17 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode) | |||
473 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); | 472 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
474 | } | 473 | } |
475 | 474 | ||
476 | 475 | static int intel_setup_irq_remapping(struct intel_iommu *iommu) | |
477 | static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode) | ||
478 | { | 476 | { |
479 | struct ir_table *ir_table; | 477 | struct ir_table *ir_table; |
480 | struct page *pages; | 478 | struct page *pages; |
481 | unsigned long *bitmap; | 479 | unsigned long *bitmap; |
482 | 480 | ||
483 | ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), | 481 | if (iommu->ir_table) |
484 | GFP_ATOMIC); | 482 | return 0; |
485 | 483 | ||
486 | if (!iommu->ir_table) | 484 | ir_table = kzalloc(sizeof(struct ir_table), GFP_ATOMIC); |
485 | if (!ir_table) | ||
487 | return -ENOMEM; | 486 | return -ENOMEM; |
488 | 487 | ||
489 | pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, | 488 | pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, |
@@ -492,24 +491,37 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode) | |||
492 | if (!pages) { | 491 | if (!pages) { |
493 | pr_err("IR%d: failed to allocate pages of order %d\n", | 492 | pr_err("IR%d: failed to allocate pages of order %d\n", |
494 | iommu->seq_id, INTR_REMAP_PAGE_ORDER); | 493 | iommu->seq_id, INTR_REMAP_PAGE_ORDER); |
495 | kfree(iommu->ir_table); | 494 | goto out_free_table; |
496 | return -ENOMEM; | ||
497 | } | 495 | } |
498 | 496 | ||
499 | bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES), | 497 | bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES), |
500 | sizeof(long), GFP_ATOMIC); | 498 | sizeof(long), GFP_ATOMIC); |
501 | if (bitmap == NULL) { | 499 | if (bitmap == NULL) { |
502 | pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id); | 500 | pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id); |
503 | __free_pages(pages, INTR_REMAP_PAGE_ORDER); | 501 | goto out_free_pages; |
504 | kfree(ir_table); | ||
505 | return -ENOMEM; | ||
506 | } | 502 | } |
507 | 503 | ||
508 | ir_table->base = page_address(pages); | 504 | ir_table->base = page_address(pages); |
509 | ir_table->bitmap = bitmap; | 505 | ir_table->bitmap = bitmap; |
510 | 506 | iommu->ir_table = ir_table; | |
511 | iommu_set_irq_remapping(iommu, mode); | ||
512 | return 0; | 507 | return 0; |
508 | |||
509 | out_free_pages: | ||
510 | __free_pages(pages, INTR_REMAP_PAGE_ORDER); | ||
511 | out_free_table: | ||
512 | kfree(ir_table); | ||
513 | return -ENOMEM; | ||
514 | } | ||
515 | |||
516 | static void intel_teardown_irq_remapping(struct intel_iommu *iommu) | ||
517 | { | ||
518 | if (iommu && iommu->ir_table) { | ||
519 | free_pages((unsigned long)iommu->ir_table->base, | ||
520 | INTR_REMAP_PAGE_ORDER); | ||
521 | kfree(iommu->ir_table->bitmap); | ||
522 | kfree(iommu->ir_table); | ||
523 | iommu->ir_table = NULL; | ||
524 | } | ||
513 | } | 525 | } |
514 | 526 | ||
515 | /* | 527 | /* |
@@ -666,9 +678,10 @@ static int __init intel_enable_irq_remapping(void) | |||
666 | if (!ecap_ir_support(iommu->ecap)) | 678 | if (!ecap_ir_support(iommu->ecap)) |
667 | continue; | 679 | continue; |
668 | 680 | ||
669 | if (intel_setup_irq_remapping(iommu, eim)) | 681 | if (intel_setup_irq_remapping(iommu)) |
670 | goto error; | 682 | goto error; |
671 | 683 | ||
684 | iommu_set_irq_remapping(iommu, eim); | ||
672 | setup = 1; | 685 | setup = 1; |
673 | } | 686 | } |
674 | 687 | ||
@@ -689,9 +702,11 @@ static int __init intel_enable_irq_remapping(void) | |||
689 | return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE; | 702 | return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE; |
690 | 703 | ||
691 | error: | 704 | error: |
692 | /* | 705 | for_each_iommu(iommu, drhd) |
693 | * handle error condition gracefully here! | 706 | if (ecap_ir_support(iommu->ecap)) { |
694 | */ | 707 | iommu_disable_irq_remapping(iommu); |
708 | intel_teardown_irq_remapping(iommu); | ||
709 | } | ||
695 | 710 | ||
696 | if (x2apic_present) | 711 | if (x2apic_present) |
697 | pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); | 712 | pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); |
@@ -699,12 +714,13 @@ error: | |||
699 | return -1; | 714 | return -1; |
700 | } | 715 | } |
701 | 716 | ||
702 | static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, | 717 | static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, |
703 | struct intel_iommu *iommu) | 718 | struct intel_iommu *iommu, |
719 | struct acpi_dmar_hardware_unit *drhd) | ||
704 | { | 720 | { |
705 | struct acpi_dmar_pci_path *path; | 721 | struct acpi_dmar_pci_path *path; |
706 | u8 bus; | 722 | u8 bus; |
707 | int count; | 723 | int count, free = -1; |
708 | 724 | ||
709 | bus = scope->bus; | 725 | bus = scope->bus; |
710 | path = (struct acpi_dmar_pci_path *)(scope + 1); | 726 | path = (struct acpi_dmar_pci_path *)(scope + 1); |
@@ -720,19 +736,36 @@ static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, | |||
720 | PCI_SECONDARY_BUS); | 736 | PCI_SECONDARY_BUS); |
721 | path++; | 737 | path++; |
722 | } | 738 | } |
723 | ir_hpet[ir_hpet_num].bus = bus; | 739 | |
724 | ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function); | 740 | for (count = 0; count < MAX_HPET_TBS; count++) { |
725 | ir_hpet[ir_hpet_num].iommu = iommu; | 741 | if (ir_hpet[count].iommu == iommu && |
726 | ir_hpet[ir_hpet_num].id = scope->enumeration_id; | 742 | ir_hpet[count].id == scope->enumeration_id) |
727 | ir_hpet_num++; | 743 | return 0; |
744 | else if (ir_hpet[count].iommu == NULL && free == -1) | ||
745 | free = count; | ||
746 | } | ||
747 | if (free == -1) { | ||
748 | pr_warn("Exceeded Max HPET blocks\n"); | ||
749 | return -ENOSPC; | ||
750 | } | ||
751 | |||
752 | ir_hpet[free].iommu = iommu; | ||
753 | ir_hpet[free].id = scope->enumeration_id; | ||
754 | ir_hpet[free].bus = bus; | ||
755 | ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function); | ||
756 | pr_info("HPET id %d under DRHD base 0x%Lx\n", | ||
757 | scope->enumeration_id, drhd->address); | ||
758 | |||
759 | return 0; | ||
728 | } | 760 | } |
729 | 761 | ||
730 | static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, | 762 | static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, |
731 | struct intel_iommu *iommu) | 763 | struct intel_iommu *iommu, |
764 | struct acpi_dmar_hardware_unit *drhd) | ||
732 | { | 765 | { |
733 | struct acpi_dmar_pci_path *path; | 766 | struct acpi_dmar_pci_path *path; |
734 | u8 bus; | 767 | u8 bus; |
735 | int count; | 768 | int count, free = -1; |
736 | 769 | ||
737 | bus = scope->bus; | 770 | bus = scope->bus; |
738 | path = (struct acpi_dmar_pci_path *)(scope + 1); | 771 | path = (struct acpi_dmar_pci_path *)(scope + 1); |
@@ -749,54 +782,63 @@ static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, | |||
749 | path++; | 782 | path++; |
750 | } | 783 | } |
751 | 784 | ||
752 | ir_ioapic[ir_ioapic_num].bus = bus; | 785 | for (count = 0; count < MAX_IO_APICS; count++) { |
753 | ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function); | 786 | if (ir_ioapic[count].iommu == iommu && |
754 | ir_ioapic[ir_ioapic_num].iommu = iommu; | 787 | ir_ioapic[count].id == scope->enumeration_id) |
755 | ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; | 788 | return 0; |
756 | ir_ioapic_num++; | 789 | else if (ir_ioapic[count].iommu == NULL && free == -1) |
790 | free = count; | ||
791 | } | ||
792 | if (free == -1) { | ||
793 | pr_warn("Exceeded Max IO APICS\n"); | ||
794 | return -ENOSPC; | ||
795 | } | ||
796 | |||
797 | ir_ioapic[free].bus = bus; | ||
798 | ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function); | ||
799 | ir_ioapic[free].iommu = iommu; | ||
800 | ir_ioapic[free].id = scope->enumeration_id; | ||
801 | pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n", | ||
802 | scope->enumeration_id, drhd->address, iommu->seq_id); | ||
803 | |||
804 | return 0; | ||
757 | } | 805 | } |
758 | 806 | ||
759 | static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, | 807 | static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, |
760 | struct intel_iommu *iommu) | 808 | struct intel_iommu *iommu) |
761 | { | 809 | { |
810 | int ret = 0; | ||
762 | struct acpi_dmar_hardware_unit *drhd; | 811 | struct acpi_dmar_hardware_unit *drhd; |
763 | struct acpi_dmar_device_scope *scope; | 812 | struct acpi_dmar_device_scope *scope; |
764 | void *start, *end; | 813 | void *start, *end; |
765 | 814 | ||
766 | drhd = (struct acpi_dmar_hardware_unit *)header; | 815 | drhd = (struct acpi_dmar_hardware_unit *)header; |
767 | |||
768 | start = (void *)(drhd + 1); | 816 | start = (void *)(drhd + 1); |
769 | end = ((void *)drhd) + header->length; | 817 | end = ((void *)drhd) + header->length; |
770 | 818 | ||
771 | while (start < end) { | 819 | while (start < end && ret == 0) { |
772 | scope = start; | 820 | scope = start; |
773 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { | 821 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) |
774 | if (ir_ioapic_num == MAX_IO_APICS) { | 822 | ret = ir_parse_one_ioapic_scope(scope, iommu, drhd); |
775 | printk(KERN_WARNING "Exceeded Max IO APICS\n"); | 823 | else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) |
776 | return -1; | 824 | ret = ir_parse_one_hpet_scope(scope, iommu, drhd); |
777 | } | 825 | start += scope->length; |
778 | 826 | } | |
779 | printk(KERN_INFO "IOAPIC id %d under DRHD base " | ||
780 | " 0x%Lx IOMMU %d\n", scope->enumeration_id, | ||
781 | drhd->address, iommu->seq_id); | ||
782 | 827 | ||
783 | ir_parse_one_ioapic_scope(scope, iommu); | 828 | return ret; |
784 | } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) { | 829 | } |
785 | if (ir_hpet_num == MAX_HPET_TBS) { | ||
786 | printk(KERN_WARNING "Exceeded Max HPET blocks\n"); | ||
787 | return -1; | ||
788 | } | ||
789 | 830 | ||
790 | printk(KERN_INFO "HPET id %d under DRHD base" | 831 | static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu) |
791 | " 0x%Lx\n", scope->enumeration_id, | 832 | { |
792 | drhd->address); | 833 | int i; |
793 | 834 | ||
794 | ir_parse_one_hpet_scope(scope, iommu); | 835 | for (i = 0; i < MAX_HPET_TBS; i++) |
795 | } | 836 | if (ir_hpet[i].iommu == iommu) |
796 | start += scope->length; | 837 | ir_hpet[i].iommu = NULL; |
797 | } | ||
798 | 838 | ||
799 | return 0; | 839 | for (i = 0; i < MAX_IO_APICS; i++) |
840 | if (ir_ioapic[i].iommu == iommu) | ||
841 | ir_ioapic[i].iommu = NULL; | ||
800 | } | 842 | } |
801 | 843 | ||
802 | /* | 844 | /* |
@@ -1171,3 +1213,86 @@ struct irq_remap_ops intel_irq_remap_ops = { | |||
1171 | .msi_setup_irq = intel_msi_setup_irq, | 1213 | .msi_setup_irq = intel_msi_setup_irq, |
1172 | .alloc_hpet_msi = intel_alloc_hpet_msi, | 1214 | .alloc_hpet_msi = intel_alloc_hpet_msi, |
1173 | }; | 1215 | }; |
1216 | |||
1217 | /* | ||
1218 | * Support of Interrupt Remapping Unit Hotplug | ||
1219 | */ | ||
1220 | static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu) | ||
1221 | { | ||
1222 | int ret; | ||
1223 | int eim = x2apic_enabled(); | ||
1224 | |||
1225 | if (eim && !ecap_eim_support(iommu->ecap)) { | ||
1226 | pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n", | ||
1227 | iommu->reg_phys, iommu->ecap); | ||
1228 | return -ENODEV; | ||
1229 | } | ||
1230 | |||
1231 | if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) { | ||
1232 | pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n", | ||
1233 | iommu->reg_phys); | ||
1234 | return -ENODEV; | ||
1235 | } | ||
1236 | |||
1237 | /* TODO: check all IOAPICs are covered by IOMMU */ | ||
1238 | |||
1239 | /* Setup Interrupt-remapping now. */ | ||
1240 | ret = intel_setup_irq_remapping(iommu); | ||
1241 | if (ret) { | ||
1242 | pr_err("DRHD %Lx: failed to allocate resource\n", | ||
1243 | iommu->reg_phys); | ||
1244 | ir_remove_ioapic_hpet_scope(iommu); | ||
1245 | return ret; | ||
1246 | } | ||
1247 | |||
1248 | if (!iommu->qi) { | ||
1249 | /* Clear previous faults. */ | ||
1250 | dmar_fault(-1, iommu); | ||
1251 | iommu_disable_irq_remapping(iommu); | ||
1252 | dmar_disable_qi(iommu); | ||
1253 | } | ||
1254 | |||
1255 | /* Enable queued invalidation */ | ||
1256 | ret = dmar_enable_qi(iommu); | ||
1257 | if (!ret) { | ||
1258 | iommu_set_irq_remapping(iommu, eim); | ||
1259 | } else { | ||
1260 | pr_err("DRHD %Lx: failed to enable queued invalidation, ecap %Lx, ret %d\n", | ||
1261 | iommu->reg_phys, iommu->ecap, ret); | ||
1262 | intel_teardown_irq_remapping(iommu); | ||
1263 | ir_remove_ioapic_hpet_scope(iommu); | ||
1264 | } | ||
1265 | |||
1266 | return ret; | ||
1267 | } | ||
1268 | |||
1269 | int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert) | ||
1270 | { | ||
1271 | int ret = 0; | ||
1272 | struct intel_iommu *iommu = dmaru->iommu; | ||
1273 | |||
1274 | if (!irq_remapping_enabled) | ||
1275 | return 0; | ||
1276 | if (iommu == NULL) | ||
1277 | return -EINVAL; | ||
1278 | if (!ecap_ir_support(iommu->ecap)) | ||
1279 | return 0; | ||
1280 | |||
1281 | if (insert) { | ||
1282 | if (!iommu->ir_table) | ||
1283 | ret = dmar_ir_add(dmaru, iommu); | ||
1284 | } else { | ||
1285 | if (iommu->ir_table) { | ||
1286 | if (!bitmap_empty(iommu->ir_table->bitmap, | ||
1287 | INTR_REMAP_TABLE_ENTRIES)) { | ||
1288 | ret = -EBUSY; | ||
1289 | } else { | ||
1290 | iommu_disable_irq_remapping(iommu); | ||
1291 | intel_teardown_irq_remapping(iommu); | ||
1292 | ir_remove_ioapic_hpet_scope(iommu); | ||
1293 | } | ||
1294 | } | ||
1295 | } | ||
1296 | |||
1297 | return ret; | ||
1298 | } | ||
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 02e4313e937c..1bd63352ab17 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c | |||
@@ -1143,14 +1143,24 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, | |||
1143 | { | 1143 | { |
1144 | struct scatterlist *s; | 1144 | struct scatterlist *s; |
1145 | size_t mapped = 0; | 1145 | size_t mapped = 0; |
1146 | unsigned int i; | 1146 | unsigned int i, min_pagesz; |
1147 | int ret; | 1147 | int ret; |
1148 | 1148 | ||
1149 | for_each_sg(sg, s, nents, i) { | 1149 | if (unlikely(domain->ops->pgsize_bitmap == 0UL)) |
1150 | phys_addr_t phys = page_to_phys(sg_page(s)); | 1150 | return 0; |
1151 | 1151 | ||
1152 | /* We are mapping on page boundarys, so offset must be 0 */ | 1152 | min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); |
1153 | if (s->offset) | 1153 | |
1154 | for_each_sg(sg, s, nents, i) { | ||
1155 | phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; | ||
1156 | |||
1157 | /* | ||
1158 | * We are mapping on IOMMU page boundaries, so offset within | ||
1159 | * the page must be 0. However, the IOMMU may support pages | ||
1160 | * smaller than PAGE_SIZE, so s->offset may still represent | ||
1161 | * an offset of that boundary within the CPU page. | ||
1162 | */ | ||
1163 | if (!IS_ALIGNED(s->offset, min_pagesz)) | ||
1154 | goto out_err; | 1164 | goto out_err; |
1155 | 1165 | ||
1156 | ret = iommu_map(domain, iova + mapped, phys, s->length, prot); | 1166 | ret = iommu_map(domain, iova + mapped, phys, s->length, prot); |
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index e509c58eee92..99effbb17191 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c | |||
@@ -1185,7 +1185,7 @@ static int ipmmu_probe(struct platform_device *pdev) | |||
1185 | dev_name(&pdev->dev), mmu); | 1185 | dev_name(&pdev->dev), mmu); |
1186 | if (ret < 0) { | 1186 | if (ret < 0) { |
1187 | dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); | 1187 | dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); |
1188 | return irq; | 1188 | return ret; |
1189 | } | 1189 | } |
1190 | 1190 | ||
1191 | ipmmu_device_reset(mmu); | 1191 | ipmmu_device_reset(mmu); |
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 1c7b78ecf3e3..e1b05379ca0e 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c | |||
@@ -73,8 +73,7 @@ fail: | |||
73 | 73 | ||
74 | static void __disable_clocks(struct msm_iommu_drvdata *drvdata) | 74 | static void __disable_clocks(struct msm_iommu_drvdata *drvdata) |
75 | { | 75 | { |
76 | if (drvdata->clk) | 76 | clk_disable(drvdata->clk); |
77 | clk_disable(drvdata->clk); | ||
78 | clk_disable(drvdata->pclk); | 77 | clk_disable(drvdata->pclk); |
79 | } | 78 | } |
80 | 79 | ||
diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c index 61def7cb5263..b6d01f97e537 100644 --- a/drivers/iommu/msm_iommu_dev.c +++ b/drivers/iommu/msm_iommu_dev.c | |||
@@ -131,7 +131,7 @@ static int msm_iommu_probe(struct platform_device *pdev) | |||
131 | struct clk *iommu_clk; | 131 | struct clk *iommu_clk; |
132 | struct clk *iommu_pclk; | 132 | struct clk *iommu_pclk; |
133 | struct msm_iommu_drvdata *drvdata; | 133 | struct msm_iommu_drvdata *drvdata; |
134 | struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data; | 134 | struct msm_iommu_dev *iommu_dev = dev_get_platdata(&pdev->dev); |
135 | void __iomem *regs_base; | 135 | void __iomem *regs_base; |
136 | int ret, irq, par; | 136 | int ret, irq, par; |
137 | 137 | ||
@@ -224,8 +224,7 @@ static int msm_iommu_probe(struct platform_device *pdev) | |||
224 | 224 | ||
225 | platform_set_drvdata(pdev, drvdata); | 225 | platform_set_drvdata(pdev, drvdata); |
226 | 226 | ||
227 | if (iommu_clk) | 227 | clk_disable(iommu_clk); |
228 | clk_disable(iommu_clk); | ||
229 | 228 | ||
230 | clk_disable(iommu_pclk); | 229 | clk_disable(iommu_pclk); |
231 | 230 | ||
@@ -264,7 +263,7 @@ static int msm_iommu_remove(struct platform_device *pdev) | |||
264 | 263 | ||
265 | static int msm_iommu_ctx_probe(struct platform_device *pdev) | 264 | static int msm_iommu_ctx_probe(struct platform_device *pdev) |
266 | { | 265 | { |
267 | struct msm_iommu_ctx_dev *c = pdev->dev.platform_data; | 266 | struct msm_iommu_ctx_dev *c = dev_get_platdata(&pdev->dev); |
268 | struct msm_iommu_drvdata *drvdata; | 267 | struct msm_iommu_drvdata *drvdata; |
269 | struct msm_iommu_ctx_drvdata *ctx_drvdata; | 268 | struct msm_iommu_ctx_drvdata *ctx_drvdata; |
270 | int i, ret; | 269 | int i, ret; |
@@ -323,8 +322,7 @@ static int msm_iommu_ctx_probe(struct platform_device *pdev) | |||
323 | SET_NSCFG(drvdata->base, mid, 3); | 322 | SET_NSCFG(drvdata->base, mid, 3); |
324 | } | 323 | } |
325 | 324 | ||
326 | if (drvdata->clk) | 325 | clk_disable(drvdata->clk); |
327 | clk_disable(drvdata->clk); | ||
328 | clk_disable(drvdata->pclk); | 326 | clk_disable(drvdata->pclk); |
329 | 327 | ||
330 | dev_info(&pdev->dev, "context %s using bank %d\n", c->name, c->num); | 328 | dev_info(&pdev->dev, "context %s using bank %d\n", c->name, c->num); |
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c index 531658d17333..f3d20a2039d2 100644 --- a/drivers/iommu/omap-iommu-debug.c +++ b/drivers/iommu/omap-iommu-debug.c | |||
@@ -10,45 +10,35 @@ | |||
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/err.h> | 13 | #include <linux/err.h> |
15 | #include <linux/clk.h> | ||
16 | #include <linux/io.h> | 14 | #include <linux/io.h> |
17 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
18 | #include <linux/uaccess.h> | 16 | #include <linux/uaccess.h> |
19 | #include <linux/platform_device.h> | ||
20 | #include <linux/debugfs.h> | 17 | #include <linux/debugfs.h> |
21 | #include <linux/omap-iommu.h> | ||
22 | #include <linux/platform_data/iommu-omap.h> | 18 | #include <linux/platform_data/iommu-omap.h> |
23 | 19 | ||
24 | #include "omap-iopgtable.h" | 20 | #include "omap-iopgtable.h" |
25 | #include "omap-iommu.h" | 21 | #include "omap-iommu.h" |
26 | 22 | ||
27 | #define MAXCOLUMN 100 /* for short messages */ | ||
28 | |||
29 | static DEFINE_MUTEX(iommu_debug_lock); | 23 | static DEFINE_MUTEX(iommu_debug_lock); |
30 | 24 | ||
31 | static struct dentry *iommu_debug_root; | 25 | static struct dentry *iommu_debug_root; |
32 | 26 | ||
33 | static ssize_t debug_read_ver(struct file *file, char __user *userbuf, | 27 | static inline bool is_omap_iommu_detached(struct omap_iommu *obj) |
34 | size_t count, loff_t *ppos) | ||
35 | { | 28 | { |
36 | u32 ver = omap_iommu_arch_version(); | 29 | return !obj->domain; |
37 | char buf[MAXCOLUMN], *p = buf; | ||
38 | |||
39 | p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf); | ||
40 | |||
41 | return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); | ||
42 | } | 30 | } |
43 | 31 | ||
44 | static ssize_t debug_read_regs(struct file *file, char __user *userbuf, | 32 | static ssize_t debug_read_regs(struct file *file, char __user *userbuf, |
45 | size_t count, loff_t *ppos) | 33 | size_t count, loff_t *ppos) |
46 | { | 34 | { |
47 | struct device *dev = file->private_data; | 35 | struct omap_iommu *obj = file->private_data; |
48 | struct omap_iommu *obj = dev_to_omap_iommu(dev); | ||
49 | char *p, *buf; | 36 | char *p, *buf; |
50 | ssize_t bytes; | 37 | ssize_t bytes; |
51 | 38 | ||
39 | if (is_omap_iommu_detached(obj)) | ||
40 | return -EPERM; | ||
41 | |||
52 | buf = kmalloc(count, GFP_KERNEL); | 42 | buf = kmalloc(count, GFP_KERNEL); |
53 | if (!buf) | 43 | if (!buf) |
54 | return -ENOMEM; | 44 | return -ENOMEM; |
@@ -68,11 +58,13 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf, | |||
68 | static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, | 58 | static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, |
69 | size_t count, loff_t *ppos) | 59 | size_t count, loff_t *ppos) |
70 | { | 60 | { |
71 | struct device *dev = file->private_data; | 61 | struct omap_iommu *obj = file->private_data; |
72 | struct omap_iommu *obj = dev_to_omap_iommu(dev); | ||
73 | char *p, *buf; | 62 | char *p, *buf; |
74 | ssize_t bytes, rest; | 63 | ssize_t bytes, rest; |
75 | 64 | ||
65 | if (is_omap_iommu_detached(obj)) | ||
66 | return -EPERM; | ||
67 | |||
76 | buf = kmalloc(count, GFP_KERNEL); | 68 | buf = kmalloc(count, GFP_KERNEL); |
77 | if (!buf) | 69 | if (!buf) |
78 | return -ENOMEM; | 70 | return -ENOMEM; |
@@ -93,133 +85,69 @@ static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, | |||
93 | return bytes; | 85 | return bytes; |
94 | } | 86 | } |
95 | 87 | ||
96 | static ssize_t debug_write_pagetable(struct file *file, | 88 | static void dump_ioptable(struct seq_file *s) |
97 | const char __user *userbuf, size_t count, loff_t *ppos) | ||
98 | { | 89 | { |
99 | struct iotlb_entry e; | 90 | int i, j; |
100 | struct cr_regs cr; | 91 | u32 da; |
101 | int err; | 92 | u32 *iopgd, *iopte; |
102 | struct device *dev = file->private_data; | 93 | struct omap_iommu *obj = s->private; |
103 | struct omap_iommu *obj = dev_to_omap_iommu(dev); | ||
104 | char buf[MAXCOLUMN], *p = buf; | ||
105 | |||
106 | count = min(count, sizeof(buf)); | ||
107 | |||
108 | mutex_lock(&iommu_debug_lock); | ||
109 | if (copy_from_user(p, userbuf, count)) { | ||
110 | mutex_unlock(&iommu_debug_lock); | ||
111 | return -EFAULT; | ||
112 | } | ||
113 | |||
114 | sscanf(p, "%x %x", &cr.cam, &cr.ram); | ||
115 | if (!cr.cam || !cr.ram) { | ||
116 | mutex_unlock(&iommu_debug_lock); | ||
117 | return -EINVAL; | ||
118 | } | ||
119 | |||
120 | omap_iotlb_cr_to_e(&cr, &e); | ||
121 | err = omap_iopgtable_store_entry(obj, &e); | ||
122 | if (err) | ||
123 | dev_err(obj->dev, "%s: fail to store cr\n", __func__); | ||
124 | |||
125 | mutex_unlock(&iommu_debug_lock); | ||
126 | return count; | ||
127 | } | ||
128 | |||
129 | #define dump_ioptable_entry_one(lv, da, val) \ | ||
130 | ({ \ | ||
131 | int __err = 0; \ | ||
132 | ssize_t bytes; \ | ||
133 | const int maxcol = 22; \ | ||
134 | const char *str = "%d: %08x %08x\n"; \ | ||
135 | bytes = snprintf(p, maxcol, str, lv, da, val); \ | ||
136 | p += bytes; \ | ||
137 | len -= bytes; \ | ||
138 | if (len < maxcol) \ | ||
139 | __err = -ENOMEM; \ | ||
140 | __err; \ | ||
141 | }) | ||
142 | |||
143 | static ssize_t dump_ioptable(struct omap_iommu *obj, char *buf, ssize_t len) | ||
144 | { | ||
145 | int i; | ||
146 | u32 *iopgd; | ||
147 | char *p = buf; | ||
148 | 94 | ||
149 | spin_lock(&obj->page_table_lock); | 95 | spin_lock(&obj->page_table_lock); |
150 | 96 | ||
151 | iopgd = iopgd_offset(obj, 0); | 97 | iopgd = iopgd_offset(obj, 0); |
152 | for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) { | 98 | for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) { |
153 | int j, err; | ||
154 | u32 *iopte; | ||
155 | u32 da; | ||
156 | |||
157 | if (!*iopgd) | 99 | if (!*iopgd) |
158 | continue; | 100 | continue; |
159 | 101 | ||
160 | if (!(*iopgd & IOPGD_TABLE)) { | 102 | if (!(*iopgd & IOPGD_TABLE)) { |
161 | da = i << IOPGD_SHIFT; | 103 | da = i << IOPGD_SHIFT; |
162 | 104 | seq_printf(s, "1: 0x%08x 0x%08x\n", da, *iopgd); | |
163 | err = dump_ioptable_entry_one(1, da, *iopgd); | ||
164 | if (err) | ||
165 | goto out; | ||
166 | continue; | 105 | continue; |
167 | } | 106 | } |
168 | 107 | ||
169 | iopte = iopte_offset(iopgd, 0); | 108 | iopte = iopte_offset(iopgd, 0); |
170 | |||
171 | for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) { | 109 | for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) { |
172 | if (!*iopte) | 110 | if (!*iopte) |
173 | continue; | 111 | continue; |
174 | 112 | ||
175 | da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT); | 113 | da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT); |
176 | err = dump_ioptable_entry_one(2, da, *iopgd); | 114 | seq_printf(s, "2: 0x%08x 0x%08x\n", da, *iopte); |
177 | if (err) | ||
178 | goto out; | ||
179 | } | 115 | } |
180 | } | 116 | } |
181 | out: | ||
182 | spin_unlock(&obj->page_table_lock); | ||
183 | 117 | ||
184 | return p - buf; | 118 | spin_unlock(&obj->page_table_lock); |
185 | } | 119 | } |
186 | 120 | ||
187 | static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, | 121 | static int debug_read_pagetable(struct seq_file *s, void *data) |
188 | size_t count, loff_t *ppos) | ||
189 | { | 122 | { |
190 | struct device *dev = file->private_data; | 123 | struct omap_iommu *obj = s->private; |
191 | struct omap_iommu *obj = dev_to_omap_iommu(dev); | ||
192 | char *p, *buf; | ||
193 | size_t bytes; | ||
194 | 124 | ||
195 | buf = (char *)__get_free_page(GFP_KERNEL); | 125 | if (is_omap_iommu_detached(obj)) |
196 | if (!buf) | 126 | return -EPERM; |
197 | return -ENOMEM; | ||
198 | p = buf; | ||
199 | |||
200 | p += sprintf(p, "L: %8s %8s\n", "da:", "pa:"); | ||
201 | p += sprintf(p, "-----------------------------------------\n"); | ||
202 | 127 | ||
203 | mutex_lock(&iommu_debug_lock); | 128 | mutex_lock(&iommu_debug_lock); |
204 | 129 | ||
205 | bytes = PAGE_SIZE - (p - buf); | 130 | seq_printf(s, "L: %8s %8s\n", "da:", "pte:"); |
206 | p += dump_ioptable(obj, p, bytes); | 131 | seq_puts(s, "--------------------------\n"); |
207 | 132 | dump_ioptable(s); | |
208 | bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); | ||
209 | 133 | ||
210 | mutex_unlock(&iommu_debug_lock); | 134 | mutex_unlock(&iommu_debug_lock); |
211 | free_page((unsigned long)buf); | ||
212 | 135 | ||
213 | return bytes; | 136 | return 0; |
214 | } | 137 | } |
215 | 138 | ||
216 | #define DEBUG_FOPS(name) \ | 139 | #define DEBUG_SEQ_FOPS_RO(name) \ |
217 | static const struct file_operations debug_##name##_fops = { \ | 140 | static int debug_open_##name(struct inode *inode, struct file *file) \ |
218 | .open = simple_open, \ | 141 | { \ |
219 | .read = debug_read_##name, \ | 142 | return single_open(file, debug_read_##name, inode->i_private); \ |
220 | .write = debug_write_##name, \ | 143 | } \ |
221 | .llseek = generic_file_llseek, \ | 144 | \ |
222 | }; | 145 | static const struct file_operations debug_##name##_fops = { \ |
146 | .open = debug_open_##name, \ | ||
147 | .read = seq_read, \ | ||
148 | .llseek = seq_lseek, \ | ||
149 | .release = single_release, \ | ||
150 | } | ||
223 | 151 | ||
224 | #define DEBUG_FOPS_RO(name) \ | 152 | #define DEBUG_FOPS_RO(name) \ |
225 | static const struct file_operations debug_##name##_fops = { \ | 153 | static const struct file_operations debug_##name##_fops = { \ |
@@ -228,103 +156,63 @@ static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, | |||
228 | .llseek = generic_file_llseek, \ | 156 | .llseek = generic_file_llseek, \ |
229 | }; | 157 | }; |
230 | 158 | ||
231 | DEBUG_FOPS_RO(ver); | ||
232 | DEBUG_FOPS_RO(regs); | 159 | DEBUG_FOPS_RO(regs); |
233 | DEBUG_FOPS_RO(tlb); | 160 | DEBUG_FOPS_RO(tlb); |
234 | DEBUG_FOPS(pagetable); | 161 | DEBUG_SEQ_FOPS_RO(pagetable); |
235 | 162 | ||
236 | #define __DEBUG_ADD_FILE(attr, mode) \ | 163 | #define __DEBUG_ADD_FILE(attr, mode) \ |
237 | { \ | 164 | { \ |
238 | struct dentry *dent; \ | 165 | struct dentry *dent; \ |
239 | dent = debugfs_create_file(#attr, mode, parent, \ | 166 | dent = debugfs_create_file(#attr, mode, obj->debug_dir, \ |
240 | dev, &debug_##attr##_fops); \ | 167 | obj, &debug_##attr##_fops); \ |
241 | if (!dent) \ | 168 | if (!dent) \ |
242 | return -ENOMEM; \ | 169 | goto err; \ |
243 | } | 170 | } |
244 | 171 | ||
245 | #define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 0600) | ||
246 | #define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400) | 172 | #define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400) |
247 | 173 | ||
248 | static int iommu_debug_register(struct device *dev, void *data) | 174 | void omap_iommu_debugfs_add(struct omap_iommu *obj) |
249 | { | 175 | { |
250 | struct platform_device *pdev = to_platform_device(dev); | 176 | struct dentry *d; |
251 | struct omap_iommu *obj = platform_get_drvdata(pdev); | ||
252 | struct omap_iommu_arch_data *arch_data; | ||
253 | struct dentry *d, *parent; | ||
254 | |||
255 | if (!obj || !obj->dev) | ||
256 | return -EINVAL; | ||
257 | |||
258 | arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL); | ||
259 | if (!arch_data) | ||
260 | return -ENOMEM; | ||
261 | |||
262 | arch_data->iommu_dev = obj; | ||
263 | 177 | ||
264 | dev->archdata.iommu = arch_data; | 178 | if (!iommu_debug_root) |
179 | return; | ||
265 | 180 | ||
266 | d = debugfs_create_dir(obj->name, iommu_debug_root); | 181 | obj->debug_dir = debugfs_create_dir(obj->name, iommu_debug_root); |
267 | if (!d) | 182 | if (!obj->debug_dir) |
268 | goto nomem; | 183 | return; |
269 | parent = d; | ||
270 | 184 | ||
271 | d = debugfs_create_u8("nr_tlb_entries", 400, parent, | 185 | d = debugfs_create_u8("nr_tlb_entries", 0400, obj->debug_dir, |
272 | (u8 *)&obj->nr_tlb_entries); | 186 | (u8 *)&obj->nr_tlb_entries); |
273 | if (!d) | 187 | if (!d) |
274 | goto nomem; | 188 | return; |
275 | 189 | ||
276 | DEBUG_ADD_FILE_RO(ver); | ||
277 | DEBUG_ADD_FILE_RO(regs); | 190 | DEBUG_ADD_FILE_RO(regs); |
278 | DEBUG_ADD_FILE_RO(tlb); | 191 | DEBUG_ADD_FILE_RO(tlb); |
279 | DEBUG_ADD_FILE(pagetable); | 192 | DEBUG_ADD_FILE_RO(pagetable); |
280 | 193 | ||
281 | return 0; | 194 | return; |
282 | 195 | ||
283 | nomem: | 196 | err: |
284 | kfree(arch_data); | 197 | debugfs_remove_recursive(obj->debug_dir); |
285 | return -ENOMEM; | ||
286 | } | 198 | } |
287 | 199 | ||
288 | static int iommu_debug_unregister(struct device *dev, void *data) | 200 | void omap_iommu_debugfs_remove(struct omap_iommu *obj) |
289 | { | 201 | { |
290 | if (!dev->archdata.iommu) | 202 | if (!obj->debug_dir) |
291 | return 0; | 203 | return; |
292 | |||
293 | kfree(dev->archdata.iommu); | ||
294 | 204 | ||
295 | dev->archdata.iommu = NULL; | 205 | debugfs_remove_recursive(obj->debug_dir); |
296 | |||
297 | return 0; | ||
298 | } | 206 | } |
299 | 207 | ||
300 | static int __init iommu_debug_init(void) | 208 | void __init omap_iommu_debugfs_init(void) |
301 | { | 209 | { |
302 | struct dentry *d; | 210 | iommu_debug_root = debugfs_create_dir("omap_iommu", NULL); |
303 | int err; | 211 | if (!iommu_debug_root) |
304 | 212 | pr_err("can't create debugfs dir\n"); | |
305 | d = debugfs_create_dir("iommu", NULL); | ||
306 | if (!d) | ||
307 | return -ENOMEM; | ||
308 | iommu_debug_root = d; | ||
309 | |||
310 | err = omap_foreach_iommu_device(d, iommu_debug_register); | ||
311 | if (err) | ||
312 | goto err_out; | ||
313 | return 0; | ||
314 | |||
315 | err_out: | ||
316 | debugfs_remove_recursive(iommu_debug_root); | ||
317 | return err; | ||
318 | } | 213 | } |
319 | module_init(iommu_debug_init) | ||
320 | 214 | ||
321 | static void __exit iommu_debugfs_exit(void) | 215 | void __exit omap_iommu_debugfs_exit(void) |
322 | { | 216 | { |
323 | debugfs_remove_recursive(iommu_debug_root); | 217 | debugfs_remove(iommu_debug_root); |
324 | omap_foreach_iommu_device(NULL, iommu_debug_unregister); | ||
325 | } | 218 | } |
326 | module_exit(iommu_debugfs_exit) | ||
327 | |||
328 | MODULE_DESCRIPTION("omap iommu: debugfs interface"); | ||
329 | MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); | ||
330 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 18003c044454..bbb7dcef02d3 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c | |||
@@ -76,53 +76,23 @@ struct iotlb_lock { | |||
76 | short vict; | 76 | short vict; |
77 | }; | 77 | }; |
78 | 78 | ||
79 | /* accommodate the difference between omap1 and omap2/3 */ | ||
80 | static const struct iommu_functions *arch_iommu; | ||
81 | |||
82 | static struct platform_driver omap_iommu_driver; | 79 | static struct platform_driver omap_iommu_driver; |
83 | static struct kmem_cache *iopte_cachep; | 80 | static struct kmem_cache *iopte_cachep; |
84 | 81 | ||
85 | /** | 82 | /** |
86 | * omap_install_iommu_arch - Install archtecure specific iommu functions | ||
87 | * @ops: a pointer to architecture specific iommu functions | ||
88 | * | ||
89 | * There are several kind of iommu algorithm(tlb, pagetable) among | ||
90 | * omap series. This interface installs such an iommu algorighm. | ||
91 | **/ | ||
92 | int omap_install_iommu_arch(const struct iommu_functions *ops) | ||
93 | { | ||
94 | if (arch_iommu) | ||
95 | return -EBUSY; | ||
96 | |||
97 | arch_iommu = ops; | ||
98 | return 0; | ||
99 | } | ||
100 | EXPORT_SYMBOL_GPL(omap_install_iommu_arch); | ||
101 | |||
102 | /** | ||
103 | * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions | ||
104 | * @ops: a pointer to architecture specific iommu functions | ||
105 | * | ||
106 | * This interface uninstalls the iommu algorighm installed previously. | ||
107 | **/ | ||
108 | void omap_uninstall_iommu_arch(const struct iommu_functions *ops) | ||
109 | { | ||
110 | if (arch_iommu != ops) | ||
111 | pr_err("%s: not your arch\n", __func__); | ||
112 | |||
113 | arch_iommu = NULL; | ||
114 | } | ||
115 | EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch); | ||
116 | |||
117 | /** | ||
118 | * omap_iommu_save_ctx - Save registers for pm off-mode support | 83 | * omap_iommu_save_ctx - Save registers for pm off-mode support |
119 | * @dev: client device | 84 | * @dev: client device |
120 | **/ | 85 | **/ |
121 | void omap_iommu_save_ctx(struct device *dev) | 86 | void omap_iommu_save_ctx(struct device *dev) |
122 | { | 87 | { |
123 | struct omap_iommu *obj = dev_to_omap_iommu(dev); | 88 | struct omap_iommu *obj = dev_to_omap_iommu(dev); |
89 | u32 *p = obj->ctx; | ||
90 | int i; | ||
124 | 91 | ||
125 | arch_iommu->save_ctx(obj); | 92 | for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { |
93 | p[i] = iommu_read_reg(obj, i * sizeof(u32)); | ||
94 | dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); | ||
95 | } | ||
126 | } | 96 | } |
127 | EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); | 97 | EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); |
128 | 98 | ||
@@ -133,28 +103,74 @@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); | |||
133 | void omap_iommu_restore_ctx(struct device *dev) | 103 | void omap_iommu_restore_ctx(struct device *dev) |
134 | { | 104 | { |
135 | struct omap_iommu *obj = dev_to_omap_iommu(dev); | 105 | struct omap_iommu *obj = dev_to_omap_iommu(dev); |
106 | u32 *p = obj->ctx; | ||
107 | int i; | ||
136 | 108 | ||
137 | arch_iommu->restore_ctx(obj); | 109 | for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { |
110 | iommu_write_reg(obj, p[i], i * sizeof(u32)); | ||
111 | dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); | ||
112 | } | ||
138 | } | 113 | } |
139 | EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); | 114 | EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); |
140 | 115 | ||
141 | /** | 116 | static void __iommu_set_twl(struct omap_iommu *obj, bool on) |
142 | * omap_iommu_arch_version - Return running iommu arch version | ||
143 | **/ | ||
144 | u32 omap_iommu_arch_version(void) | ||
145 | { | 117 | { |
146 | return arch_iommu->version; | 118 | u32 l = iommu_read_reg(obj, MMU_CNTL); |
119 | |||
120 | if (on) | ||
121 | iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE); | ||
122 | else | ||
123 | iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE); | ||
124 | |||
125 | l &= ~MMU_CNTL_MASK; | ||
126 | if (on) | ||
127 | l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN); | ||
128 | else | ||
129 | l |= (MMU_CNTL_MMU_EN); | ||
130 | |||
131 | iommu_write_reg(obj, l, MMU_CNTL); | ||
132 | } | ||
133 | |||
134 | static int omap2_iommu_enable(struct omap_iommu *obj) | ||
135 | { | ||
136 | u32 l, pa; | ||
137 | |||
138 | if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K)) | ||
139 | return -EINVAL; | ||
140 | |||
141 | pa = virt_to_phys(obj->iopgd); | ||
142 | if (!IS_ALIGNED(pa, SZ_16K)) | ||
143 | return -EINVAL; | ||
144 | |||
145 | l = iommu_read_reg(obj, MMU_REVISION); | ||
146 | dev_info(obj->dev, "%s: version %d.%d\n", obj->name, | ||
147 | (l >> 4) & 0xf, l & 0xf); | ||
148 | |||
149 | iommu_write_reg(obj, pa, MMU_TTB); | ||
150 | |||
151 | if (obj->has_bus_err_back) | ||
152 | iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG); | ||
153 | |||
154 | __iommu_set_twl(obj, true); | ||
155 | |||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | static void omap2_iommu_disable(struct omap_iommu *obj) | ||
160 | { | ||
161 | u32 l = iommu_read_reg(obj, MMU_CNTL); | ||
162 | |||
163 | l &= ~MMU_CNTL_MASK; | ||
164 | iommu_write_reg(obj, l, MMU_CNTL); | ||
165 | |||
166 | dev_dbg(obj->dev, "%s is shutting down\n", obj->name); | ||
147 | } | 167 | } |
148 | EXPORT_SYMBOL_GPL(omap_iommu_arch_version); | ||
149 | 168 | ||
150 | static int iommu_enable(struct omap_iommu *obj) | 169 | static int iommu_enable(struct omap_iommu *obj) |
151 | { | 170 | { |
152 | int err; | 171 | int err; |
153 | struct platform_device *pdev = to_platform_device(obj->dev); | 172 | struct platform_device *pdev = to_platform_device(obj->dev); |
154 | struct iommu_platform_data *pdata = pdev->dev.platform_data; | 173 | struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); |
155 | |||
156 | if (!arch_iommu) | ||
157 | return -ENODEV; | ||
158 | 174 | ||
159 | if (pdata && pdata->deassert_reset) { | 175 | if (pdata && pdata->deassert_reset) { |
160 | err = pdata->deassert_reset(pdev, pdata->reset_name); | 176 | err = pdata->deassert_reset(pdev, pdata->reset_name); |
@@ -166,7 +182,7 @@ static int iommu_enable(struct omap_iommu *obj) | |||
166 | 182 | ||
167 | pm_runtime_get_sync(obj->dev); | 183 | pm_runtime_get_sync(obj->dev); |
168 | 184 | ||
169 | err = arch_iommu->enable(obj); | 185 | err = omap2_iommu_enable(obj); |
170 | 186 | ||
171 | return err; | 187 | return err; |
172 | } | 188 | } |
@@ -174,9 +190,9 @@ static int iommu_enable(struct omap_iommu *obj) | |||
174 | static void iommu_disable(struct omap_iommu *obj) | 190 | static void iommu_disable(struct omap_iommu *obj) |
175 | { | 191 | { |
176 | struct platform_device *pdev = to_platform_device(obj->dev); | 192 | struct platform_device *pdev = to_platform_device(obj->dev); |
177 | struct iommu_platform_data *pdata = pdev->dev.platform_data; | 193 | struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); |
178 | 194 | ||
179 | arch_iommu->disable(obj); | 195 | omap2_iommu_disable(obj); |
180 | 196 | ||
181 | pm_runtime_put_sync(obj->dev); | 197 | pm_runtime_put_sync(obj->dev); |
182 | 198 | ||
@@ -187,44 +203,51 @@ static void iommu_disable(struct omap_iommu *obj) | |||
187 | /* | 203 | /* |
188 | * TLB operations | 204 | * TLB operations |
189 | */ | 205 | */ |
190 | void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) | ||
191 | { | ||
192 | BUG_ON(!cr || !e); | ||
193 | |||
194 | arch_iommu->cr_to_e(cr, e); | ||
195 | } | ||
196 | EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e); | ||
197 | |||
198 | static inline int iotlb_cr_valid(struct cr_regs *cr) | 206 | static inline int iotlb_cr_valid(struct cr_regs *cr) |
199 | { | 207 | { |
200 | if (!cr) | 208 | if (!cr) |
201 | return -EINVAL; | 209 | return -EINVAL; |
202 | 210 | ||
203 | return arch_iommu->cr_valid(cr); | 211 | return cr->cam & MMU_CAM_V; |
204 | } | ||
205 | |||
206 | static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj, | ||
207 | struct iotlb_entry *e) | ||
208 | { | ||
209 | if (!e) | ||
210 | return NULL; | ||
211 | |||
212 | return arch_iommu->alloc_cr(obj, e); | ||
213 | } | 212 | } |
214 | 213 | ||
215 | static u32 iotlb_cr_to_virt(struct cr_regs *cr) | 214 | static u32 iotlb_cr_to_virt(struct cr_regs *cr) |
216 | { | 215 | { |
217 | return arch_iommu->cr_to_virt(cr); | 216 | u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK; |
217 | u32 mask = get_cam_va_mask(cr->cam & page_size); | ||
218 | |||
219 | return cr->cam & mask; | ||
218 | } | 220 | } |
219 | 221 | ||
220 | static u32 get_iopte_attr(struct iotlb_entry *e) | 222 | static u32 get_iopte_attr(struct iotlb_entry *e) |
221 | { | 223 | { |
222 | return arch_iommu->get_pte_attr(e); | 224 | u32 attr; |
225 | |||
226 | attr = e->mixed << 5; | ||
227 | attr |= e->endian; | ||
228 | attr |= e->elsz >> 3; | ||
229 | attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) || | ||
230 | (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6); | ||
231 | return attr; | ||
223 | } | 232 | } |
224 | 233 | ||
225 | static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) | 234 | static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) |
226 | { | 235 | { |
227 | return arch_iommu->fault_isr(obj, da); | 236 | u32 status, fault_addr; |
237 | |||
238 | status = iommu_read_reg(obj, MMU_IRQSTATUS); | ||
239 | status &= MMU_IRQ_MASK; | ||
240 | if (!status) { | ||
241 | *da = 0; | ||
242 | return 0; | ||
243 | } | ||
244 | |||
245 | fault_addr = iommu_read_reg(obj, MMU_FAULT_AD); | ||
246 | *da = fault_addr; | ||
247 | |||
248 | iommu_write_reg(obj, status, MMU_IRQSTATUS); | ||
249 | |||
250 | return status; | ||
228 | } | 251 | } |
229 | 252 | ||
230 | static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) | 253 | static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) |
@@ -250,31 +273,19 @@ static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l) | |||
250 | 273 | ||
251 | static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) | 274 | static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) |
252 | { | 275 | { |
253 | arch_iommu->tlb_read_cr(obj, cr); | 276 | cr->cam = iommu_read_reg(obj, MMU_READ_CAM); |
277 | cr->ram = iommu_read_reg(obj, MMU_READ_RAM); | ||
254 | } | 278 | } |
255 | 279 | ||
256 | static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) | 280 | static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) |
257 | { | 281 | { |
258 | arch_iommu->tlb_load_cr(obj, cr); | 282 | iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM); |
283 | iommu_write_reg(obj, cr->ram, MMU_RAM); | ||
259 | 284 | ||
260 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); | 285 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); |
261 | iommu_write_reg(obj, 1, MMU_LD_TLB); | 286 | iommu_write_reg(obj, 1, MMU_LD_TLB); |
262 | } | 287 | } |
263 | 288 | ||
264 | /** | ||
265 | * iotlb_dump_cr - Dump an iommu tlb entry into buf | ||
266 | * @obj: target iommu | ||
267 | * @cr: contents of cam and ram register | ||
268 | * @buf: output buffer | ||
269 | **/ | ||
270 | static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, | ||
271 | char *buf) | ||
272 | { | ||
273 | BUG_ON(!cr || !buf); | ||
274 | |||
275 | return arch_iommu->dump_cr(obj, cr, buf); | ||
276 | } | ||
277 | |||
278 | /* only used in iotlb iteration for-loop */ | 289 | /* only used in iotlb iteration for-loop */ |
279 | static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) | 290 | static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) |
280 | { | 291 | { |
@@ -289,12 +300,36 @@ static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) | |||
289 | return cr; | 300 | return cr; |
290 | } | 301 | } |
291 | 302 | ||
303 | #ifdef PREFETCH_IOTLB | ||
304 | static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj, | ||
305 | struct iotlb_entry *e) | ||
306 | { | ||
307 | struct cr_regs *cr; | ||
308 | |||
309 | if (!e) | ||
310 | return NULL; | ||
311 | |||
312 | if (e->da & ~(get_cam_va_mask(e->pgsz))) { | ||
313 | dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__, | ||
314 | e->da); | ||
315 | return ERR_PTR(-EINVAL); | ||
316 | } | ||
317 | |||
318 | cr = kmalloc(sizeof(*cr), GFP_KERNEL); | ||
319 | if (!cr) | ||
320 | return ERR_PTR(-ENOMEM); | ||
321 | |||
322 | cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid; | ||
323 | cr->ram = e->pa | e->endian | e->elsz | e->mixed; | ||
324 | |||
325 | return cr; | ||
326 | } | ||
327 | |||
292 | /** | 328 | /** |
293 | * load_iotlb_entry - Set an iommu tlb entry | 329 | * load_iotlb_entry - Set an iommu tlb entry |
294 | * @obj: target iommu | 330 | * @obj: target iommu |
295 | * @e: an iommu tlb entry info | 331 | * @e: an iommu tlb entry info |
296 | **/ | 332 | **/ |
297 | #ifdef PREFETCH_IOTLB | ||
298 | static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) | 333 | static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) |
299 | { | 334 | { |
300 | int err = 0; | 335 | int err = 0; |
@@ -423,7 +458,45 @@ static void flush_iotlb_all(struct omap_iommu *obj) | |||
423 | pm_runtime_put_sync(obj->dev); | 458 | pm_runtime_put_sync(obj->dev); |
424 | } | 459 | } |
425 | 460 | ||
426 | #if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) | 461 | #ifdef CONFIG_OMAP_IOMMU_DEBUG |
462 | |||
463 | #define pr_reg(name) \ | ||
464 | do { \ | ||
465 | ssize_t bytes; \ | ||
466 | const char *str = "%20s: %08x\n"; \ | ||
467 | const int maxcol = 32; \ | ||
468 | bytes = snprintf(p, maxcol, str, __stringify(name), \ | ||
469 | iommu_read_reg(obj, MMU_##name)); \ | ||
470 | p += bytes; \ | ||
471 | len -= bytes; \ | ||
472 | if (len < maxcol) \ | ||
473 | goto out; \ | ||
474 | } while (0) | ||
475 | |||
476 | static ssize_t | ||
477 | omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len) | ||
478 | { | ||
479 | char *p = buf; | ||
480 | |||
481 | pr_reg(REVISION); | ||
482 | pr_reg(IRQSTATUS); | ||
483 | pr_reg(IRQENABLE); | ||
484 | pr_reg(WALKING_ST); | ||
485 | pr_reg(CNTL); | ||
486 | pr_reg(FAULT_AD); | ||
487 | pr_reg(TTB); | ||
488 | pr_reg(LOCK); | ||
489 | pr_reg(LD_TLB); | ||
490 | pr_reg(CAM); | ||
491 | pr_reg(RAM); | ||
492 | pr_reg(GFLUSH); | ||
493 | pr_reg(FLUSH_ENTRY); | ||
494 | pr_reg(READ_CAM); | ||
495 | pr_reg(READ_RAM); | ||
496 | pr_reg(EMU_FAULT_AD); | ||
497 | out: | ||
498 | return p - buf; | ||
499 | } | ||
427 | 500 | ||
428 | ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) | 501 | ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) |
429 | { | 502 | { |
@@ -432,13 +505,12 @@ ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) | |||
432 | 505 | ||
433 | pm_runtime_get_sync(obj->dev); | 506 | pm_runtime_get_sync(obj->dev); |
434 | 507 | ||
435 | bytes = arch_iommu->dump_ctx(obj, buf, bytes); | 508 | bytes = omap2_iommu_dump_ctx(obj, buf, bytes); |
436 | 509 | ||
437 | pm_runtime_put_sync(obj->dev); | 510 | pm_runtime_put_sync(obj->dev); |
438 | 511 | ||
439 | return bytes; | 512 | return bytes; |
440 | } | 513 | } |
441 | EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx); | ||
442 | 514 | ||
443 | static int | 515 | static int |
444 | __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) | 516 | __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) |
@@ -464,6 +536,24 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) | |||
464 | } | 536 | } |
465 | 537 | ||
466 | /** | 538 | /** |
539 | * iotlb_dump_cr - Dump an iommu tlb entry into buf | ||
540 | * @obj: target iommu | ||
541 | * @cr: contents of cam and ram register | ||
542 | * @buf: output buffer | ||
543 | **/ | ||
544 | static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, | ||
545 | char *buf) | ||
546 | { | ||
547 | char *p = buf; | ||
548 | |||
549 | /* FIXME: Need more detail analysis of cam/ram */ | ||
550 | p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram, | ||
551 | (cr->cam & MMU_CAM_P) ? 1 : 0); | ||
552 | |||
553 | return p - buf; | ||
554 | } | ||
555 | |||
556 | /** | ||
467 | * omap_dump_tlb_entries - dump cr arrays to given buffer | 557 | * omap_dump_tlb_entries - dump cr arrays to given buffer |
468 | * @obj: target iommu | 558 | * @obj: target iommu |
469 | * @buf: output buffer | 559 | * @buf: output buffer |
@@ -488,16 +578,8 @@ size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes) | |||
488 | 578 | ||
489 | return p - buf; | 579 | return p - buf; |
490 | } | 580 | } |
491 | EXPORT_SYMBOL_GPL(omap_dump_tlb_entries); | ||
492 | |||
493 | int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) | ||
494 | { | ||
495 | return driver_for_each_device(&omap_iommu_driver.driver, | ||
496 | NULL, data, fn); | ||
497 | } | ||
498 | EXPORT_SYMBOL_GPL(omap_foreach_iommu_device); | ||
499 | 581 | ||
500 | #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ | 582 | #endif /* CONFIG_OMAP_IOMMU_DEBUG */ |
501 | 583 | ||
502 | /* | 584 | /* |
503 | * H/W pagetable operations | 585 | * H/W pagetable operations |
@@ -680,7 +762,8 @@ iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e) | |||
680 | * @obj: target iommu | 762 | * @obj: target iommu |
681 | * @e: an iommu tlb entry info | 763 | * @e: an iommu tlb entry info |
682 | **/ | 764 | **/ |
683 | int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) | 765 | static int |
766 | omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) | ||
684 | { | 767 | { |
685 | int err; | 768 | int err; |
686 | 769 | ||
@@ -690,7 +773,6 @@ int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) | |||
690 | prefetch_iotlb_entry(obj, e); | 773 | prefetch_iotlb_entry(obj, e); |
691 | return err; | 774 | return err; |
692 | } | 775 | } |
693 | EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry); | ||
694 | 776 | ||
695 | /** | 777 | /** |
696 | * iopgtable_lookup_entry - Lookup an iommu pte entry | 778 | * iopgtable_lookup_entry - Lookup an iommu pte entry |
@@ -819,8 +901,9 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) | |||
819 | u32 *iopgd, *iopte; | 901 | u32 *iopgd, *iopte; |
820 | struct omap_iommu *obj = data; | 902 | struct omap_iommu *obj = data; |
821 | struct iommu_domain *domain = obj->domain; | 903 | struct iommu_domain *domain = obj->domain; |
904 | struct omap_iommu_domain *omap_domain = domain->priv; | ||
822 | 905 | ||
823 | if (!obj->refcount) | 906 | if (!omap_domain->iommu_dev) |
824 | return IRQ_NONE; | 907 | return IRQ_NONE; |
825 | 908 | ||
826 | errs = iommu_report_fault(obj, &da); | 909 | errs = iommu_report_fault(obj, &da); |
@@ -880,13 +963,6 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) | |||
880 | 963 | ||
881 | spin_lock(&obj->iommu_lock); | 964 | spin_lock(&obj->iommu_lock); |
882 | 965 | ||
883 | /* an iommu device can only be attached once */ | ||
884 | if (++obj->refcount > 1) { | ||
885 | dev_err(dev, "%s: already attached!\n", obj->name); | ||
886 | err = -EBUSY; | ||
887 | goto err_enable; | ||
888 | } | ||
889 | |||
890 | obj->iopgd = iopgd; | 966 | obj->iopgd = iopgd; |
891 | err = iommu_enable(obj); | 967 | err = iommu_enable(obj); |
892 | if (err) | 968 | if (err) |
@@ -899,7 +975,6 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) | |||
899 | return obj; | 975 | return obj; |
900 | 976 | ||
901 | err_enable: | 977 | err_enable: |
902 | obj->refcount--; | ||
903 | spin_unlock(&obj->iommu_lock); | 978 | spin_unlock(&obj->iommu_lock); |
904 | return ERR_PTR(err); | 979 | return ERR_PTR(err); |
905 | } | 980 | } |
@@ -915,9 +990,7 @@ static void omap_iommu_detach(struct omap_iommu *obj) | |||
915 | 990 | ||
916 | spin_lock(&obj->iommu_lock); | 991 | spin_lock(&obj->iommu_lock); |
917 | 992 | ||
918 | if (--obj->refcount == 0) | 993 | iommu_disable(obj); |
919 | iommu_disable(obj); | ||
920 | |||
921 | obj->iopgd = NULL; | 994 | obj->iopgd = NULL; |
922 | 995 | ||
923 | spin_unlock(&obj->iommu_lock); | 996 | spin_unlock(&obj->iommu_lock); |
@@ -934,7 +1007,7 @@ static int omap_iommu_probe(struct platform_device *pdev) | |||
934 | int irq; | 1007 | int irq; |
935 | struct omap_iommu *obj; | 1008 | struct omap_iommu *obj; |
936 | struct resource *res; | 1009 | struct resource *res; |
937 | struct iommu_platform_data *pdata = pdev->dev.platform_data; | 1010 | struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); |
938 | struct device_node *of = pdev->dev.of_node; | 1011 | struct device_node *of = pdev->dev.of_node; |
939 | 1012 | ||
940 | obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); | 1013 | obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); |
@@ -981,6 +1054,8 @@ static int omap_iommu_probe(struct platform_device *pdev) | |||
981 | pm_runtime_irq_safe(obj->dev); | 1054 | pm_runtime_irq_safe(obj->dev); |
982 | pm_runtime_enable(obj->dev); | 1055 | pm_runtime_enable(obj->dev); |
983 | 1056 | ||
1057 | omap_iommu_debugfs_add(obj); | ||
1058 | |||
984 | dev_info(&pdev->dev, "%s registered\n", obj->name); | 1059 | dev_info(&pdev->dev, "%s registered\n", obj->name); |
985 | return 0; | 1060 | return 0; |
986 | } | 1061 | } |
@@ -990,6 +1065,7 @@ static int omap_iommu_remove(struct platform_device *pdev) | |||
990 | struct omap_iommu *obj = platform_get_drvdata(pdev); | 1065 | struct omap_iommu *obj = platform_get_drvdata(pdev); |
991 | 1066 | ||
992 | iopgtable_clear_entry_all(obj); | 1067 | iopgtable_clear_entry_all(obj); |
1068 | omap_iommu_debugfs_remove(obj); | ||
993 | 1069 | ||
994 | pm_runtime_disable(obj->dev); | 1070 | pm_runtime_disable(obj->dev); |
995 | 1071 | ||
@@ -1026,7 +1102,6 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz) | |||
1026 | e->da = da; | 1102 | e->da = da; |
1027 | e->pa = pa; | 1103 | e->pa = pa; |
1028 | e->valid = MMU_CAM_V; | 1104 | e->valid = MMU_CAM_V; |
1029 | /* FIXME: add OMAP1 support */ | ||
1030 | e->pgsz = pgsz; | 1105 | e->pgsz = pgsz; |
1031 | e->endian = MMU_RAM_ENDIAN_LITTLE; | 1106 | e->endian = MMU_RAM_ENDIAN_LITTLE; |
1032 | e->elsz = MMU_RAM_ELSZ_8; | 1107 | e->elsz = MMU_RAM_ELSZ_8; |
@@ -1131,6 +1206,7 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, | |||
1131 | 1206 | ||
1132 | omap_domain->iommu_dev = arch_data->iommu_dev = NULL; | 1207 | omap_domain->iommu_dev = arch_data->iommu_dev = NULL; |
1133 | omap_domain->dev = NULL; | 1208 | omap_domain->dev = NULL; |
1209 | oiommu->domain = NULL; | ||
1134 | } | 1210 | } |
1135 | 1211 | ||
1136 | static void omap_iommu_detach_dev(struct iommu_domain *domain, | 1212 | static void omap_iommu_detach_dev(struct iommu_domain *domain, |
@@ -1309,6 +1385,8 @@ static int __init omap_iommu_init(void) | |||
1309 | 1385 | ||
1310 | bus_set_iommu(&platform_bus_type, &omap_iommu_ops); | 1386 | bus_set_iommu(&platform_bus_type, &omap_iommu_ops); |
1311 | 1387 | ||
1388 | omap_iommu_debugfs_init(); | ||
1389 | |||
1312 | return platform_driver_register(&omap_iommu_driver); | 1390 | return platform_driver_register(&omap_iommu_driver); |
1313 | } | 1391 | } |
1314 | /* must be ready before omap3isp is probed */ | 1392 | /* must be ready before omap3isp is probed */ |
@@ -1319,6 +1397,8 @@ static void __exit omap_iommu_exit(void) | |||
1319 | kmem_cache_destroy(iopte_cachep); | 1397 | kmem_cache_destroy(iopte_cachep); |
1320 | 1398 | ||
1321 | platform_driver_unregister(&omap_iommu_driver); | 1399 | platform_driver_unregister(&omap_iommu_driver); |
1400 | |||
1401 | omap_iommu_debugfs_exit(); | ||
1322 | } | 1402 | } |
1323 | module_exit(omap_iommu_exit); | 1403 | module_exit(omap_iommu_exit); |
1324 | 1404 | ||
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index 4f1b68c08c15..d736630df3c8 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h | |||
@@ -10,9 +10,8 @@ | |||
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #if defined(CONFIG_ARCH_OMAP1) | 13 | #ifndef _OMAP_IOMMU_H |
14 | #error "iommu for this processor not implemented yet" | 14 | #define _OMAP_IOMMU_H |
15 | #endif | ||
16 | 15 | ||
17 | struct iotlb_entry { | 16 | struct iotlb_entry { |
18 | u32 da; | 17 | u32 da; |
@@ -30,10 +29,9 @@ struct omap_iommu { | |||
30 | const char *name; | 29 | const char *name; |
31 | void __iomem *regbase; | 30 | void __iomem *regbase; |
32 | struct device *dev; | 31 | struct device *dev; |
33 | void *isr_priv; | ||
34 | struct iommu_domain *domain; | 32 | struct iommu_domain *domain; |
33 | struct dentry *debug_dir; | ||
35 | 34 | ||
36 | unsigned int refcount; | ||
37 | spinlock_t iommu_lock; /* global for this whole object */ | 35 | spinlock_t iommu_lock; /* global for this whole object */ |
38 | 36 | ||
39 | /* | 37 | /* |
@@ -67,34 +65,6 @@ struct cr_regs { | |||
67 | }; | 65 | }; |
68 | }; | 66 | }; |
69 | 67 | ||
70 | /* architecture specific functions */ | ||
71 | struct iommu_functions { | ||
72 | unsigned long version; | ||
73 | |||
74 | int (*enable)(struct omap_iommu *obj); | ||
75 | void (*disable)(struct omap_iommu *obj); | ||
76 | void (*set_twl)(struct omap_iommu *obj, bool on); | ||
77 | u32 (*fault_isr)(struct omap_iommu *obj, u32 *ra); | ||
78 | |||
79 | void (*tlb_read_cr)(struct omap_iommu *obj, struct cr_regs *cr); | ||
80 | void (*tlb_load_cr)(struct omap_iommu *obj, struct cr_regs *cr); | ||
81 | |||
82 | struct cr_regs *(*alloc_cr)(struct omap_iommu *obj, | ||
83 | struct iotlb_entry *e); | ||
84 | int (*cr_valid)(struct cr_regs *cr); | ||
85 | u32 (*cr_to_virt)(struct cr_regs *cr); | ||
86 | void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e); | ||
87 | ssize_t (*dump_cr)(struct omap_iommu *obj, struct cr_regs *cr, | ||
88 | char *buf); | ||
89 | |||
90 | u32 (*get_pte_attr)(struct iotlb_entry *e); | ||
91 | |||
92 | void (*save_ctx)(struct omap_iommu *obj); | ||
93 | void (*restore_ctx)(struct omap_iommu *obj); | ||
94 | ssize_t (*dump_ctx)(struct omap_iommu *obj, char *buf, ssize_t len); | ||
95 | }; | ||
96 | |||
97 | #ifdef CONFIG_IOMMU_API | ||
98 | /** | 68 | /** |
99 | * dev_to_omap_iommu() - retrieves an omap iommu object from a user device | 69 | * dev_to_omap_iommu() - retrieves an omap iommu object from a user device |
100 | * @dev: iommu client device | 70 | * @dev: iommu client device |
@@ -105,7 +75,6 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) | |||
105 | 75 | ||
106 | return arch_data->iommu_dev; | 76 | return arch_data->iommu_dev; |
107 | } | 77 | } |
108 | #endif | ||
109 | 78 | ||
110 | /* | 79 | /* |
111 | * MMU Register offsets | 80 | * MMU Register offsets |
@@ -133,6 +102,28 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) | |||
133 | /* | 102 | /* |
134 | * MMU Register bit definitions | 103 | * MMU Register bit definitions |
135 | */ | 104 | */ |
105 | /* IRQSTATUS & IRQENABLE */ | ||
106 | #define MMU_IRQ_MULTIHITFAULT (1 << 4) | ||
107 | #define MMU_IRQ_TABLEWALKFAULT (1 << 3) | ||
108 | #define MMU_IRQ_EMUMISS (1 << 2) | ||
109 | #define MMU_IRQ_TRANSLATIONFAULT (1 << 1) | ||
110 | #define MMU_IRQ_TLBMISS (1 << 0) | ||
111 | |||
112 | #define __MMU_IRQ_FAULT \ | ||
113 | (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT) | ||
114 | #define MMU_IRQ_MASK \ | ||
115 | (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS) | ||
116 | #define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT) | ||
117 | #define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS) | ||
118 | |||
119 | /* MMU_CNTL */ | ||
120 | #define MMU_CNTL_SHIFT 1 | ||
121 | #define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT) | ||
122 | #define MMU_CNTL_EML_TLB (1 << 3) | ||
123 | #define MMU_CNTL_TWL_EN (1 << 2) | ||
124 | #define MMU_CNTL_MMU_EN (1 << 1) | ||
125 | |||
126 | /* CAM */ | ||
136 | #define MMU_CAM_VATAG_SHIFT 12 | 127 | #define MMU_CAM_VATAG_SHIFT 12 |
137 | #define MMU_CAM_VATAG_MASK \ | 128 | #define MMU_CAM_VATAG_MASK \ |
138 | ((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT) | 129 | ((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT) |
@@ -144,6 +135,7 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) | |||
144 | #define MMU_CAM_PGSZ_4K (2 << 0) | 135 | #define MMU_CAM_PGSZ_4K (2 << 0) |
145 | #define MMU_CAM_PGSZ_16M (3 << 0) | 136 | #define MMU_CAM_PGSZ_16M (3 << 0) |
146 | 137 | ||
138 | /* RAM */ | ||
147 | #define MMU_RAM_PADDR_SHIFT 12 | 139 | #define MMU_RAM_PADDR_SHIFT 12 |
148 | #define MMU_RAM_PADDR_MASK \ | 140 | #define MMU_RAM_PADDR_MASK \ |
149 | ((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT) | 141 | ((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT) |
@@ -165,6 +157,12 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) | |||
165 | 157 | ||
166 | #define MMU_GP_REG_BUS_ERR_BACK_EN 0x1 | 158 | #define MMU_GP_REG_BUS_ERR_BACK_EN 0x1 |
167 | 159 | ||
160 | #define get_cam_va_mask(pgsz) \ | ||
161 | (((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \ | ||
162 | ((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \ | ||
163 | ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \ | ||
164 | ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0) | ||
165 | |||
168 | /* | 166 | /* |
169 | * utilities for super page(16MB, 1MB, 64KB and 4KB) | 167 | * utilities for super page(16MB, 1MB, 64KB and 4KB) |
170 | */ | 168 | */ |
@@ -192,27 +190,25 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) | |||
192 | /* | 190 | /* |
193 | * global functions | 191 | * global functions |
194 | */ | 192 | */ |
195 | extern u32 omap_iommu_arch_version(void); | 193 | #ifdef CONFIG_OMAP_IOMMU_DEBUG |
196 | |||
197 | extern void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e); | ||
198 | |||
199 | extern int | ||
200 | omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e); | ||
201 | |||
202 | extern void omap_iommu_save_ctx(struct device *dev); | ||
203 | extern void omap_iommu_restore_ctx(struct device *dev); | ||
204 | |||
205 | extern int omap_foreach_iommu_device(void *data, | ||
206 | int (*fn)(struct device *, void *)); | ||
207 | |||
208 | extern int omap_install_iommu_arch(const struct iommu_functions *ops); | ||
209 | extern void omap_uninstall_iommu_arch(const struct iommu_functions *ops); | ||
210 | |||
211 | extern ssize_t | 194 | extern ssize_t |
212 | omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len); | 195 | omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len); |
213 | extern size_t | 196 | extern size_t |
214 | omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len); | 197 | omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len); |
215 | 198 | ||
199 | void omap_iommu_debugfs_init(void); | ||
200 | void omap_iommu_debugfs_exit(void); | ||
201 | |||
202 | void omap_iommu_debugfs_add(struct omap_iommu *obj); | ||
203 | void omap_iommu_debugfs_remove(struct omap_iommu *obj); | ||
204 | #else | ||
205 | static inline void omap_iommu_debugfs_init(void) { } | ||
206 | static inline void omap_iommu_debugfs_exit(void) { } | ||
207 | |||
208 | static inline void omap_iommu_debugfs_add(struct omap_iommu *obj) { } | ||
209 | static inline void omap_iommu_debugfs_remove(struct omap_iommu *obj) { } | ||
210 | #endif | ||
211 | |||
216 | /* | 212 | /* |
217 | * register accessors | 213 | * register accessors |
218 | */ | 214 | */ |
@@ -225,3 +221,5 @@ static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs) | |||
225 | { | 221 | { |
226 | __raw_writel(val, obj->regbase + offs); | 222 | __raw_writel(val, obj->regbase + offs); |
227 | } | 223 | } |
224 | |||
225 | #endif /* _OMAP_IOMMU_H */ | ||
diff --git a/drivers/iommu/omap-iommu2.c b/drivers/iommu/omap-iommu2.c deleted file mode 100644 index 5e1ea3b0bf16..000000000000 --- a/drivers/iommu/omap-iommu2.c +++ /dev/null | |||
@@ -1,337 +0,0 @@ | |||
1 | /* | ||
2 | * omap iommu: omap2/3 architecture specific functions | ||
3 | * | ||
4 | * Copyright (C) 2008-2009 Nokia Corporation | ||
5 | * | ||
6 | * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, | ||
7 | * Paul Mundt and Toshihiro Kobayashi | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #include <linux/err.h> | ||
15 | #include <linux/device.h> | ||
16 | #include <linux/io.h> | ||
17 | #include <linux/jiffies.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/omap-iommu.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/stringify.h> | ||
22 | #include <linux/platform_data/iommu-omap.h> | ||
23 | |||
24 | #include "omap-iommu.h" | ||
25 | |||
26 | /* | ||
27 | * omap2 architecture specific register bit definitions | ||
28 | */ | ||
29 | #define IOMMU_ARCH_VERSION 0x00000011 | ||
30 | |||
31 | /* IRQSTATUS & IRQENABLE */ | ||
32 | #define MMU_IRQ_MULTIHITFAULT (1 << 4) | ||
33 | #define MMU_IRQ_TABLEWALKFAULT (1 << 3) | ||
34 | #define MMU_IRQ_EMUMISS (1 << 2) | ||
35 | #define MMU_IRQ_TRANSLATIONFAULT (1 << 1) | ||
36 | #define MMU_IRQ_TLBMISS (1 << 0) | ||
37 | |||
38 | #define __MMU_IRQ_FAULT \ | ||
39 | (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT) | ||
40 | #define MMU_IRQ_MASK \ | ||
41 | (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS) | ||
42 | #define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT) | ||
43 | #define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS) | ||
44 | |||
45 | /* MMU_CNTL */ | ||
46 | #define MMU_CNTL_SHIFT 1 | ||
47 | #define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT) | ||
48 | #define MMU_CNTL_EML_TLB (1 << 3) | ||
49 | #define MMU_CNTL_TWL_EN (1 << 2) | ||
50 | #define MMU_CNTL_MMU_EN (1 << 1) | ||
51 | |||
52 | #define get_cam_va_mask(pgsz) \ | ||
53 | (((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \ | ||
54 | ((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \ | ||
55 | ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \ | ||
56 | ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0) | ||
57 | |||
58 | /* IOMMU errors */ | ||
59 | #define OMAP_IOMMU_ERR_TLB_MISS (1 << 0) | ||
60 | #define OMAP_IOMMU_ERR_TRANS_FAULT (1 << 1) | ||
61 | #define OMAP_IOMMU_ERR_EMU_MISS (1 << 2) | ||
62 | #define OMAP_IOMMU_ERR_TBLWALK_FAULT (1 << 3) | ||
63 | #define OMAP_IOMMU_ERR_MULTIHIT_FAULT (1 << 4) | ||
64 | |||
65 | static void __iommu_set_twl(struct omap_iommu *obj, bool on) | ||
66 | { | ||
67 | u32 l = iommu_read_reg(obj, MMU_CNTL); | ||
68 | |||
69 | if (on) | ||
70 | iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE); | ||
71 | else | ||
72 | iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE); | ||
73 | |||
74 | l &= ~MMU_CNTL_MASK; | ||
75 | if (on) | ||
76 | l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN); | ||
77 | else | ||
78 | l |= (MMU_CNTL_MMU_EN); | ||
79 | |||
80 | iommu_write_reg(obj, l, MMU_CNTL); | ||
81 | } | ||
82 | |||
83 | |||
84 | static int omap2_iommu_enable(struct omap_iommu *obj) | ||
85 | { | ||
86 | u32 l, pa; | ||
87 | |||
88 | if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K)) | ||
89 | return -EINVAL; | ||
90 | |||
91 | pa = virt_to_phys(obj->iopgd); | ||
92 | if (!IS_ALIGNED(pa, SZ_16K)) | ||
93 | return -EINVAL; | ||
94 | |||
95 | l = iommu_read_reg(obj, MMU_REVISION); | ||
96 | dev_info(obj->dev, "%s: version %d.%d\n", obj->name, | ||
97 | (l >> 4) & 0xf, l & 0xf); | ||
98 | |||
99 | iommu_write_reg(obj, pa, MMU_TTB); | ||
100 | |||
101 | if (obj->has_bus_err_back) | ||
102 | iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG); | ||
103 | |||
104 | __iommu_set_twl(obj, true); | ||
105 | |||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | static void omap2_iommu_disable(struct omap_iommu *obj) | ||
110 | { | ||
111 | u32 l = iommu_read_reg(obj, MMU_CNTL); | ||
112 | |||
113 | l &= ~MMU_CNTL_MASK; | ||
114 | iommu_write_reg(obj, l, MMU_CNTL); | ||
115 | |||
116 | dev_dbg(obj->dev, "%s is shutting down\n", obj->name); | ||
117 | } | ||
118 | |||
119 | static void omap2_iommu_set_twl(struct omap_iommu *obj, bool on) | ||
120 | { | ||
121 | __iommu_set_twl(obj, false); | ||
122 | } | ||
123 | |||
124 | static u32 omap2_iommu_fault_isr(struct omap_iommu *obj, u32 *ra) | ||
125 | { | ||
126 | u32 stat, da; | ||
127 | u32 errs = 0; | ||
128 | |||
129 | stat = iommu_read_reg(obj, MMU_IRQSTATUS); | ||
130 | stat &= MMU_IRQ_MASK; | ||
131 | if (!stat) { | ||
132 | *ra = 0; | ||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | da = iommu_read_reg(obj, MMU_FAULT_AD); | ||
137 | *ra = da; | ||
138 | |||
139 | if (stat & MMU_IRQ_TLBMISS) | ||
140 | errs |= OMAP_IOMMU_ERR_TLB_MISS; | ||
141 | if (stat & MMU_IRQ_TRANSLATIONFAULT) | ||
142 | errs |= OMAP_IOMMU_ERR_TRANS_FAULT; | ||
143 | if (stat & MMU_IRQ_EMUMISS) | ||
144 | errs |= OMAP_IOMMU_ERR_EMU_MISS; | ||
145 | if (stat & MMU_IRQ_TABLEWALKFAULT) | ||
146 | errs |= OMAP_IOMMU_ERR_TBLWALK_FAULT; | ||
147 | if (stat & MMU_IRQ_MULTIHITFAULT) | ||
148 | errs |= OMAP_IOMMU_ERR_MULTIHIT_FAULT; | ||
149 | iommu_write_reg(obj, stat, MMU_IRQSTATUS); | ||
150 | |||
151 | return errs; | ||
152 | } | ||
153 | |||
154 | static void omap2_tlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) | ||
155 | { | ||
156 | cr->cam = iommu_read_reg(obj, MMU_READ_CAM); | ||
157 | cr->ram = iommu_read_reg(obj, MMU_READ_RAM); | ||
158 | } | ||
159 | |||
160 | static void omap2_tlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) | ||
161 | { | ||
162 | iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM); | ||
163 | iommu_write_reg(obj, cr->ram, MMU_RAM); | ||
164 | } | ||
165 | |||
166 | static u32 omap2_cr_to_virt(struct cr_regs *cr) | ||
167 | { | ||
168 | u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK; | ||
169 | u32 mask = get_cam_va_mask(cr->cam & page_size); | ||
170 | |||
171 | return cr->cam & mask; | ||
172 | } | ||
173 | |||
174 | static struct cr_regs *omap2_alloc_cr(struct omap_iommu *obj, | ||
175 | struct iotlb_entry *e) | ||
176 | { | ||
177 | struct cr_regs *cr; | ||
178 | |||
179 | if (e->da & ~(get_cam_va_mask(e->pgsz))) { | ||
180 | dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__, | ||
181 | e->da); | ||
182 | return ERR_PTR(-EINVAL); | ||
183 | } | ||
184 | |||
185 | cr = kmalloc(sizeof(*cr), GFP_KERNEL); | ||
186 | if (!cr) | ||
187 | return ERR_PTR(-ENOMEM); | ||
188 | |||
189 | cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid; | ||
190 | cr->ram = e->pa | e->endian | e->elsz | e->mixed; | ||
191 | |||
192 | return cr; | ||
193 | } | ||
194 | |||
195 | static inline int omap2_cr_valid(struct cr_regs *cr) | ||
196 | { | ||
197 | return cr->cam & MMU_CAM_V; | ||
198 | } | ||
199 | |||
200 | static u32 omap2_get_pte_attr(struct iotlb_entry *e) | ||
201 | { | ||
202 | u32 attr; | ||
203 | |||
204 | attr = e->mixed << 5; | ||
205 | attr |= e->endian; | ||
206 | attr |= e->elsz >> 3; | ||
207 | attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) || | ||
208 | (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6); | ||
209 | return attr; | ||
210 | } | ||
211 | |||
212 | static ssize_t | ||
213 | omap2_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, char *buf) | ||
214 | { | ||
215 | char *p = buf; | ||
216 | |||
217 | /* FIXME: Need more detail analysis of cam/ram */ | ||
218 | p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram, | ||
219 | (cr->cam & MMU_CAM_P) ? 1 : 0); | ||
220 | |||
221 | return p - buf; | ||
222 | } | ||
223 | |||
224 | #define pr_reg(name) \ | ||
225 | do { \ | ||
226 | ssize_t bytes; \ | ||
227 | const char *str = "%20s: %08x\n"; \ | ||
228 | const int maxcol = 32; \ | ||
229 | bytes = snprintf(p, maxcol, str, __stringify(name), \ | ||
230 | iommu_read_reg(obj, MMU_##name)); \ | ||
231 | p += bytes; \ | ||
232 | len -= bytes; \ | ||
233 | if (len < maxcol) \ | ||
234 | goto out; \ | ||
235 | } while (0) | ||
236 | |||
237 | static ssize_t | ||
238 | omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len) | ||
239 | { | ||
240 | char *p = buf; | ||
241 | |||
242 | pr_reg(REVISION); | ||
243 | pr_reg(IRQSTATUS); | ||
244 | pr_reg(IRQENABLE); | ||
245 | pr_reg(WALKING_ST); | ||
246 | pr_reg(CNTL); | ||
247 | pr_reg(FAULT_AD); | ||
248 | pr_reg(TTB); | ||
249 | pr_reg(LOCK); | ||
250 | pr_reg(LD_TLB); | ||
251 | pr_reg(CAM); | ||
252 | pr_reg(RAM); | ||
253 | pr_reg(GFLUSH); | ||
254 | pr_reg(FLUSH_ENTRY); | ||
255 | pr_reg(READ_CAM); | ||
256 | pr_reg(READ_RAM); | ||
257 | pr_reg(EMU_FAULT_AD); | ||
258 | out: | ||
259 | return p - buf; | ||
260 | } | ||
261 | |||
262 | static void omap2_iommu_save_ctx(struct omap_iommu *obj) | ||
263 | { | ||
264 | int i; | ||
265 | u32 *p = obj->ctx; | ||
266 | |||
267 | for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { | ||
268 | p[i] = iommu_read_reg(obj, i * sizeof(u32)); | ||
269 | dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); | ||
270 | } | ||
271 | |||
272 | BUG_ON(p[0] != IOMMU_ARCH_VERSION); | ||
273 | } | ||
274 | |||
275 | static void omap2_iommu_restore_ctx(struct omap_iommu *obj) | ||
276 | { | ||
277 | int i; | ||
278 | u32 *p = obj->ctx; | ||
279 | |||
280 | for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { | ||
281 | iommu_write_reg(obj, p[i], i * sizeof(u32)); | ||
282 | dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); | ||
283 | } | ||
284 | |||
285 | BUG_ON(p[0] != IOMMU_ARCH_VERSION); | ||
286 | } | ||
287 | |||
288 | static void omap2_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) | ||
289 | { | ||
290 | e->da = cr->cam & MMU_CAM_VATAG_MASK; | ||
291 | e->pa = cr->ram & MMU_RAM_PADDR_MASK; | ||
292 | e->valid = cr->cam & MMU_CAM_V; | ||
293 | e->pgsz = cr->cam & MMU_CAM_PGSZ_MASK; | ||
294 | e->endian = cr->ram & MMU_RAM_ENDIAN_MASK; | ||
295 | e->elsz = cr->ram & MMU_RAM_ELSZ_MASK; | ||
296 | e->mixed = cr->ram & MMU_RAM_MIXED; | ||
297 | } | ||
298 | |||
299 | static const struct iommu_functions omap2_iommu_ops = { | ||
300 | .version = IOMMU_ARCH_VERSION, | ||
301 | |||
302 | .enable = omap2_iommu_enable, | ||
303 | .disable = omap2_iommu_disable, | ||
304 | .set_twl = omap2_iommu_set_twl, | ||
305 | .fault_isr = omap2_iommu_fault_isr, | ||
306 | |||
307 | .tlb_read_cr = omap2_tlb_read_cr, | ||
308 | .tlb_load_cr = omap2_tlb_load_cr, | ||
309 | |||
310 | .cr_to_e = omap2_cr_to_e, | ||
311 | .cr_to_virt = omap2_cr_to_virt, | ||
312 | .alloc_cr = omap2_alloc_cr, | ||
313 | .cr_valid = omap2_cr_valid, | ||
314 | .dump_cr = omap2_dump_cr, | ||
315 | |||
316 | .get_pte_attr = omap2_get_pte_attr, | ||
317 | |||
318 | .save_ctx = omap2_iommu_save_ctx, | ||
319 | .restore_ctx = omap2_iommu_restore_ctx, | ||
320 | .dump_ctx = omap2_iommu_dump_ctx, | ||
321 | }; | ||
322 | |||
323 | static int __init omap2_iommu_init(void) | ||
324 | { | ||
325 | return omap_install_iommu_arch(&omap2_iommu_ops); | ||
326 | } | ||
327 | module_init(omap2_iommu_init); | ||
328 | |||
329 | static void __exit omap2_iommu_exit(void) | ||
330 | { | ||
331 | omap_uninstall_iommu_arch(&omap2_iommu_ops); | ||
332 | } | ||
333 | module_exit(omap2_iommu_exit); | ||
334 | |||
335 | MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi"); | ||
336 | MODULE_DESCRIPTION("omap iommu: omap2/3 architecture specific functions"); | ||
337 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c new file mode 100644 index 000000000000..b2023af384b9 --- /dev/null +++ b/drivers/iommu/rockchip-iommu.c | |||
@@ -0,0 +1,1038 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License version 2 as | ||
4 | * published by the Free Software Foundation. | ||
5 | */ | ||
6 | |||
7 | #include <asm/cacheflush.h> | ||
8 | #include <asm/pgtable.h> | ||
9 | #include <linux/compiler.h> | ||
10 | #include <linux/delay.h> | ||
11 | #include <linux/device.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/io.h> | ||
15 | #include <linux/iommu.h> | ||
16 | #include <linux/jiffies.h> | ||
17 | #include <linux/list.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/of.h> | ||
21 | #include <linux/of_platform.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | |||
26 | /** MMU register offsets */ | ||
27 | #define RK_MMU_DTE_ADDR 0x00 /* Directory table address */ | ||
28 | #define RK_MMU_STATUS 0x04 | ||
29 | #define RK_MMU_COMMAND 0x08 | ||
30 | #define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */ | ||
31 | #define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */ | ||
32 | #define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */ | ||
33 | #define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */ | ||
34 | #define RK_MMU_INT_MASK 0x1C /* IRQ enable */ | ||
35 | #define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */ | ||
36 | #define RK_MMU_AUTO_GATING 0x24 | ||
37 | |||
38 | #define DTE_ADDR_DUMMY 0xCAFEBABE | ||
39 | #define FORCE_RESET_TIMEOUT 100 /* ms */ | ||
40 | |||
41 | /* RK_MMU_STATUS fields */ | ||
42 | #define RK_MMU_STATUS_PAGING_ENABLED BIT(0) | ||
43 | #define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1) | ||
44 | #define RK_MMU_STATUS_STALL_ACTIVE BIT(2) | ||
45 | #define RK_MMU_STATUS_IDLE BIT(3) | ||
46 | #define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4) | ||
47 | #define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5) | ||
48 | #define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31) | ||
49 | |||
50 | /* RK_MMU_COMMAND command values */ | ||
51 | #define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */ | ||
52 | #define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */ | ||
53 | #define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */ | ||
54 | #define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */ | ||
55 | #define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */ | ||
56 | #define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */ | ||
57 | #define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */ | ||
58 | |||
59 | /* RK_MMU_INT_* register fields */ | ||
60 | #define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */ | ||
61 | #define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */ | ||
62 | #define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR) | ||
63 | |||
64 | #define NUM_DT_ENTRIES 1024 | ||
65 | #define NUM_PT_ENTRIES 1024 | ||
66 | |||
67 | #define SPAGE_ORDER 12 | ||
68 | #define SPAGE_SIZE (1 << SPAGE_ORDER) | ||
69 | |||
70 | /* | ||
71 | * Support mapping any size that fits in one page table: | ||
72 | * 4 KiB to 4 MiB | ||
73 | */ | ||
74 | #define RK_IOMMU_PGSIZE_BITMAP 0x007ff000 | ||
75 | |||
76 | #define IOMMU_REG_POLL_COUNT_FAST 1000 | ||
77 | |||
78 | struct rk_iommu_domain { | ||
79 | struct list_head iommus; | ||
80 | u32 *dt; /* page directory table */ | ||
81 | spinlock_t iommus_lock; /* lock for iommus list */ | ||
82 | spinlock_t dt_lock; /* lock for modifying page directory table */ | ||
83 | }; | ||
84 | |||
85 | struct rk_iommu { | ||
86 | struct device *dev; | ||
87 | void __iomem *base; | ||
88 | int irq; | ||
89 | struct list_head node; /* entry in rk_iommu_domain.iommus */ | ||
90 | struct iommu_domain *domain; /* domain to which iommu is attached */ | ||
91 | }; | ||
92 | |||
93 | static inline void rk_table_flush(u32 *va, unsigned int count) | ||
94 | { | ||
95 | phys_addr_t pa_start = virt_to_phys(va); | ||
96 | phys_addr_t pa_end = virt_to_phys(va + count); | ||
97 | size_t size = pa_end - pa_start; | ||
98 | |||
99 | __cpuc_flush_dcache_area(va, size); | ||
100 | outer_flush_range(pa_start, pa_end); | ||
101 | } | ||
102 | |||
103 | /** | ||
104 | * Inspired by _wait_for in intel_drv.h | ||
105 | * This is NOT safe for use in interrupt context. | ||
106 | * | ||
107 | * Note that it's important that we check the condition again after having | ||
108 | * timed out, since the timeout could be due to preemption or similar and | ||
109 | * we've never had a chance to check the condition before the timeout. | ||
110 | */ | ||
111 | #define rk_wait_for(COND, MS) ({ \ | ||
112 | unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \ | ||
113 | int ret__ = 0; \ | ||
114 | while (!(COND)) { \ | ||
115 | if (time_after(jiffies, timeout__)) { \ | ||
116 | ret__ = (COND) ? 0 : -ETIMEDOUT; \ | ||
117 | break; \ | ||
118 | } \ | ||
119 | usleep_range(50, 100); \ | ||
120 | } \ | ||
121 | ret__; \ | ||
122 | }) | ||
123 | |||
124 | /* | ||
125 | * The Rockchip rk3288 iommu uses a 2-level page table. | ||
126 | * The first level is the "Directory Table" (DT). | ||
127 | * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing | ||
128 | * to a "Page Table". | ||
129 | * The second level is the 1024 Page Tables (PT). | ||
130 | * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to | ||
131 | * a 4 KB page of physical memory. | ||
132 | * | ||
133 | * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries). | ||
134 | * Each iommu device has a MMU_DTE_ADDR register that contains the physical | ||
135 | * address of the start of the DT page. | ||
136 | * | ||
137 | * The structure of the page table is as follows: | ||
138 | * | ||
139 | * DT | ||
140 | * MMU_DTE_ADDR -> +-----+ | ||
141 | * | | | ||
142 | * +-----+ PT | ||
143 | * | DTE | -> +-----+ | ||
144 | * +-----+ | | Memory | ||
145 | * | | +-----+ Page | ||
146 | * | | | PTE | -> +-----+ | ||
147 | * +-----+ +-----+ | | | ||
148 | * | | | | | ||
149 | * | | | | | ||
150 | * +-----+ | | | ||
151 | * | | | ||
152 | * | | | ||
153 | * +-----+ | ||
154 | */ | ||
155 | |||
156 | /* | ||
157 | * Each DTE has a PT address and a valid bit: | ||
158 | * +---------------------+-----------+-+ | ||
159 | * | PT address | Reserved |V| | ||
160 | * +---------------------+-----------+-+ | ||
161 | * 31:12 - PT address (PTs always starts on a 4 KB boundary) | ||
162 | * 11: 1 - Reserved | ||
163 | * 0 - 1 if PT @ PT address is valid | ||
164 | */ | ||
165 | #define RK_DTE_PT_ADDRESS_MASK 0xfffff000 | ||
166 | #define RK_DTE_PT_VALID BIT(0) | ||
167 | |||
168 | static inline phys_addr_t rk_dte_pt_address(u32 dte) | ||
169 | { | ||
170 | return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK; | ||
171 | } | ||
172 | |||
173 | static inline bool rk_dte_is_pt_valid(u32 dte) | ||
174 | { | ||
175 | return dte & RK_DTE_PT_VALID; | ||
176 | } | ||
177 | |||
178 | static u32 rk_mk_dte(u32 *pt) | ||
179 | { | ||
180 | phys_addr_t pt_phys = virt_to_phys(pt); | ||
181 | return (pt_phys & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID; | ||
182 | } | ||
183 | |||
184 | /* | ||
185 | * Each PTE has a Page address, some flags and a valid bit: | ||
186 | * +---------------------+---+-------+-+ | ||
187 | * | Page address |Rsv| Flags |V| | ||
188 | * +---------------------+---+-------+-+ | ||
189 | * 31:12 - Page address (Pages always start on a 4 KB boundary) | ||
190 | * 11: 9 - Reserved | ||
191 | * 8: 1 - Flags | ||
192 | * 8 - Read allocate - allocate cache space on read misses | ||
193 | * 7 - Read cache - enable cache & prefetch of data | ||
194 | * 6 - Write buffer - enable delaying writes on their way to memory | ||
195 | * 5 - Write allocate - allocate cache space on write misses | ||
196 | * 4 - Write cache - different writes can be merged together | ||
197 | * 3 - Override cache attributes | ||
198 | * if 1, bits 4-8 control cache attributes | ||
199 | * if 0, the system bus defaults are used | ||
200 | * 2 - Writable | ||
201 | * 1 - Readable | ||
202 | * 0 - 1 if Page @ Page address is valid | ||
203 | */ | ||
204 | #define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000 | ||
205 | #define RK_PTE_PAGE_FLAGS_MASK 0x000001fe | ||
206 | #define RK_PTE_PAGE_WRITABLE BIT(2) | ||
207 | #define RK_PTE_PAGE_READABLE BIT(1) | ||
208 | #define RK_PTE_PAGE_VALID BIT(0) | ||
209 | |||
210 | static inline phys_addr_t rk_pte_page_address(u32 pte) | ||
211 | { | ||
212 | return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK; | ||
213 | } | ||
214 | |||
215 | static inline bool rk_pte_is_page_valid(u32 pte) | ||
216 | { | ||
217 | return pte & RK_PTE_PAGE_VALID; | ||
218 | } | ||
219 | |||
220 | /* TODO: set cache flags per prot IOMMU_CACHE */ | ||
221 | static u32 rk_mk_pte(phys_addr_t page, int prot) | ||
222 | { | ||
223 | u32 flags = 0; | ||
224 | flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0; | ||
225 | flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0; | ||
226 | page &= RK_PTE_PAGE_ADDRESS_MASK; | ||
227 | return page | flags | RK_PTE_PAGE_VALID; | ||
228 | } | ||
229 | |||
230 | static u32 rk_mk_pte_invalid(u32 pte) | ||
231 | { | ||
232 | return pte & ~RK_PTE_PAGE_VALID; | ||
233 | } | ||
234 | |||
235 | /* | ||
236 | * rk3288 iova (IOMMU Virtual Address) format | ||
237 | * 31 22.21 12.11 0 | ||
238 | * +-----------+-----------+-------------+ | ||
239 | * | DTE index | PTE index | Page offset | | ||
240 | * +-----------+-----------+-------------+ | ||
241 | * 31:22 - DTE index - index of DTE in DT | ||
242 | * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address | ||
243 | * 11: 0 - Page offset - offset into page @ PTE.page_address | ||
244 | */ | ||
245 | #define RK_IOVA_DTE_MASK 0xffc00000 | ||
246 | #define RK_IOVA_DTE_SHIFT 22 | ||
247 | #define RK_IOVA_PTE_MASK 0x003ff000 | ||
248 | #define RK_IOVA_PTE_SHIFT 12 | ||
249 | #define RK_IOVA_PAGE_MASK 0x00000fff | ||
250 | #define RK_IOVA_PAGE_SHIFT 0 | ||
251 | |||
252 | static u32 rk_iova_dte_index(dma_addr_t iova) | ||
253 | { | ||
254 | return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT; | ||
255 | } | ||
256 | |||
257 | static u32 rk_iova_pte_index(dma_addr_t iova) | ||
258 | { | ||
259 | return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT; | ||
260 | } | ||
261 | |||
262 | static u32 rk_iova_page_offset(dma_addr_t iova) | ||
263 | { | ||
264 | return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT; | ||
265 | } | ||
266 | |||
267 | static u32 rk_iommu_read(struct rk_iommu *iommu, u32 offset) | ||
268 | { | ||
269 | return readl(iommu->base + offset); | ||
270 | } | ||
271 | |||
272 | static void rk_iommu_write(struct rk_iommu *iommu, u32 offset, u32 value) | ||
273 | { | ||
274 | writel(value, iommu->base + offset); | ||
275 | } | ||
276 | |||
277 | static void rk_iommu_command(struct rk_iommu *iommu, u32 command) | ||
278 | { | ||
279 | writel(command, iommu->base + RK_MMU_COMMAND); | ||
280 | } | ||
281 | |||
282 | static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova, | ||
283 | size_t size) | ||
284 | { | ||
285 | dma_addr_t iova_end = iova + size; | ||
286 | /* | ||
287 | * TODO(djkurtz): Figure out when it is more efficient to shootdown the | ||
288 | * entire iotlb rather than iterate over individual iovas. | ||
289 | */ | ||
290 | for (; iova < iova_end; iova += SPAGE_SIZE) | ||
291 | rk_iommu_write(iommu, RK_MMU_ZAP_ONE_LINE, iova); | ||
292 | } | ||
293 | |||
294 | static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) | ||
295 | { | ||
296 | return rk_iommu_read(iommu, RK_MMU_STATUS) & RK_MMU_STATUS_STALL_ACTIVE; | ||
297 | } | ||
298 | |||
299 | static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu) | ||
300 | { | ||
301 | return rk_iommu_read(iommu, RK_MMU_STATUS) & | ||
302 | RK_MMU_STATUS_PAGING_ENABLED; | ||
303 | } | ||
304 | |||
305 | static int rk_iommu_enable_stall(struct rk_iommu *iommu) | ||
306 | { | ||
307 | int ret; | ||
308 | |||
309 | if (rk_iommu_is_stall_active(iommu)) | ||
310 | return 0; | ||
311 | |||
312 | /* Stall can only be enabled if paging is enabled */ | ||
313 | if (!rk_iommu_is_paging_enabled(iommu)) | ||
314 | return 0; | ||
315 | |||
316 | rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL); | ||
317 | |||
318 | ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1); | ||
319 | if (ret) | ||
320 | dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n", | ||
321 | rk_iommu_read(iommu, RK_MMU_STATUS)); | ||
322 | |||
323 | return ret; | ||
324 | } | ||
325 | |||
326 | static int rk_iommu_disable_stall(struct rk_iommu *iommu) | ||
327 | { | ||
328 | int ret; | ||
329 | |||
330 | if (!rk_iommu_is_stall_active(iommu)) | ||
331 | return 0; | ||
332 | |||
333 | rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL); | ||
334 | |||
335 | ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1); | ||
336 | if (ret) | ||
337 | dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n", | ||
338 | rk_iommu_read(iommu, RK_MMU_STATUS)); | ||
339 | |||
340 | return ret; | ||
341 | } | ||
342 | |||
343 | static int rk_iommu_enable_paging(struct rk_iommu *iommu) | ||
344 | { | ||
345 | int ret; | ||
346 | |||
347 | if (rk_iommu_is_paging_enabled(iommu)) | ||
348 | return 0; | ||
349 | |||
350 | rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING); | ||
351 | |||
352 | ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1); | ||
353 | if (ret) | ||
354 | dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n", | ||
355 | rk_iommu_read(iommu, RK_MMU_STATUS)); | ||
356 | |||
357 | return ret; | ||
358 | } | ||
359 | |||
360 | static int rk_iommu_disable_paging(struct rk_iommu *iommu) | ||
361 | { | ||
362 | int ret; | ||
363 | |||
364 | if (!rk_iommu_is_paging_enabled(iommu)) | ||
365 | return 0; | ||
366 | |||
367 | rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING); | ||
368 | |||
369 | ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1); | ||
370 | if (ret) | ||
371 | dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n", | ||
372 | rk_iommu_read(iommu, RK_MMU_STATUS)); | ||
373 | |||
374 | return ret; | ||
375 | } | ||
376 | |||
377 | static int rk_iommu_force_reset(struct rk_iommu *iommu) | ||
378 | { | ||
379 | int ret; | ||
380 | u32 dte_addr; | ||
381 | |||
382 | /* | ||
383 | * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY | ||
384 | * and verifying that upper 5 nybbles are read back. | ||
385 | */ | ||
386 | rk_iommu_write(iommu, RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY); | ||
387 | |||
388 | dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR); | ||
389 | if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) { | ||
390 | dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n"); | ||
391 | return -EFAULT; | ||
392 | } | ||
393 | |||
394 | rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET); | ||
395 | |||
396 | ret = rk_wait_for(rk_iommu_read(iommu, RK_MMU_DTE_ADDR) == 0x00000000, | ||
397 | FORCE_RESET_TIMEOUT); | ||
398 | if (ret) | ||
399 | dev_err(iommu->dev, "FORCE_RESET command timed out\n"); | ||
400 | |||
401 | return ret; | ||
402 | } | ||
403 | |||
404 | static void log_iova(struct rk_iommu *iommu, dma_addr_t iova) | ||
405 | { | ||
406 | u32 dte_index, pte_index, page_offset; | ||
407 | u32 mmu_dte_addr; | ||
408 | phys_addr_t mmu_dte_addr_phys, dte_addr_phys; | ||
409 | u32 *dte_addr; | ||
410 | u32 dte; | ||
411 | phys_addr_t pte_addr_phys = 0; | ||
412 | u32 *pte_addr = NULL; | ||
413 | u32 pte = 0; | ||
414 | phys_addr_t page_addr_phys = 0; | ||
415 | u32 page_flags = 0; | ||
416 | |||
417 | dte_index = rk_iova_dte_index(iova); | ||
418 | pte_index = rk_iova_pte_index(iova); | ||
419 | page_offset = rk_iova_page_offset(iova); | ||
420 | |||
421 | mmu_dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR); | ||
422 | mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr; | ||
423 | |||
424 | dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index); | ||
425 | dte_addr = phys_to_virt(dte_addr_phys); | ||
426 | dte = *dte_addr; | ||
427 | |||
428 | if (!rk_dte_is_pt_valid(dte)) | ||
429 | goto print_it; | ||
430 | |||
431 | pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4); | ||
432 | pte_addr = phys_to_virt(pte_addr_phys); | ||
433 | pte = *pte_addr; | ||
434 | |||
435 | if (!rk_pte_is_page_valid(pte)) | ||
436 | goto print_it; | ||
437 | |||
438 | page_addr_phys = rk_pte_page_address(pte) + page_offset; | ||
439 | page_flags = pte & RK_PTE_PAGE_FLAGS_MASK; | ||
440 | |||
441 | print_it: | ||
442 | dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n", | ||
443 | &iova, dte_index, pte_index, page_offset); | ||
444 | dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n", | ||
445 | &mmu_dte_addr_phys, &dte_addr_phys, dte, | ||
446 | rk_dte_is_pt_valid(dte), &pte_addr_phys, pte, | ||
447 | rk_pte_is_page_valid(pte), &page_addr_phys, page_flags); | ||
448 | } | ||
449 | |||
450 | static irqreturn_t rk_iommu_irq(int irq, void *dev_id) | ||
451 | { | ||
452 | struct rk_iommu *iommu = dev_id; | ||
453 | u32 status; | ||
454 | u32 int_status; | ||
455 | dma_addr_t iova; | ||
456 | |||
457 | int_status = rk_iommu_read(iommu, RK_MMU_INT_STATUS); | ||
458 | if (int_status == 0) | ||
459 | return IRQ_NONE; | ||
460 | |||
461 | iova = rk_iommu_read(iommu, RK_MMU_PAGE_FAULT_ADDR); | ||
462 | |||
463 | if (int_status & RK_MMU_IRQ_PAGE_FAULT) { | ||
464 | int flags; | ||
465 | |||
466 | status = rk_iommu_read(iommu, RK_MMU_STATUS); | ||
467 | flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ? | ||
468 | IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; | ||
469 | |||
470 | dev_err(iommu->dev, "Page fault at %pad of type %s\n", | ||
471 | &iova, | ||
472 | (flags == IOMMU_FAULT_WRITE) ? "write" : "read"); | ||
473 | |||
474 | log_iova(iommu, iova); | ||
475 | |||
476 | /* | ||
477 | * Report page fault to any installed handlers. | ||
478 | * Ignore the return code, though, since we always zap cache | ||
479 | * and clear the page fault anyway. | ||
480 | */ | ||
481 | if (iommu->domain) | ||
482 | report_iommu_fault(iommu->domain, iommu->dev, iova, | ||
483 | flags); | ||
484 | else | ||
485 | dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n"); | ||
486 | |||
487 | rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE); | ||
488 | rk_iommu_command(iommu, RK_MMU_CMD_PAGE_FAULT_DONE); | ||
489 | } | ||
490 | |||
491 | if (int_status & RK_MMU_IRQ_BUS_ERROR) | ||
492 | dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova); | ||
493 | |||
494 | if (int_status & ~RK_MMU_IRQ_MASK) | ||
495 | dev_err(iommu->dev, "unexpected int_status: %#08x\n", | ||
496 | int_status); | ||
497 | |||
498 | rk_iommu_write(iommu, RK_MMU_INT_CLEAR, int_status); | ||
499 | |||
500 | return IRQ_HANDLED; | ||
501 | } | ||
502 | |||
503 | static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, | ||
504 | dma_addr_t iova) | ||
505 | { | ||
506 | struct rk_iommu_domain *rk_domain = domain->priv; | ||
507 | unsigned long flags; | ||
508 | phys_addr_t pt_phys, phys = 0; | ||
509 | u32 dte, pte; | ||
510 | u32 *page_table; | ||
511 | |||
512 | spin_lock_irqsave(&rk_domain->dt_lock, flags); | ||
513 | |||
514 | dte = rk_domain->dt[rk_iova_dte_index(iova)]; | ||
515 | if (!rk_dte_is_pt_valid(dte)) | ||
516 | goto out; | ||
517 | |||
518 | pt_phys = rk_dte_pt_address(dte); | ||
519 | page_table = (u32 *)phys_to_virt(pt_phys); | ||
520 | pte = page_table[rk_iova_pte_index(iova)]; | ||
521 | if (!rk_pte_is_page_valid(pte)) | ||
522 | goto out; | ||
523 | |||
524 | phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova); | ||
525 | out: | ||
526 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); | ||
527 | |||
528 | return phys; | ||
529 | } | ||
530 | |||
531 | static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain, | ||
532 | dma_addr_t iova, size_t size) | ||
533 | { | ||
534 | struct list_head *pos; | ||
535 | unsigned long flags; | ||
536 | |||
537 | /* shootdown these iova from all iommus using this domain */ | ||
538 | spin_lock_irqsave(&rk_domain->iommus_lock, flags); | ||
539 | list_for_each(pos, &rk_domain->iommus) { | ||
540 | struct rk_iommu *iommu; | ||
541 | iommu = list_entry(pos, struct rk_iommu, node); | ||
542 | rk_iommu_zap_lines(iommu, iova, size); | ||
543 | } | ||
544 | spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); | ||
545 | } | ||
546 | |||
547 | static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, | ||
548 | dma_addr_t iova) | ||
549 | { | ||
550 | u32 *page_table, *dte_addr; | ||
551 | u32 dte; | ||
552 | phys_addr_t pt_phys; | ||
553 | |||
554 | assert_spin_locked(&rk_domain->dt_lock); | ||
555 | |||
556 | dte_addr = &rk_domain->dt[rk_iova_dte_index(iova)]; | ||
557 | dte = *dte_addr; | ||
558 | if (rk_dte_is_pt_valid(dte)) | ||
559 | goto done; | ||
560 | |||
561 | page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); | ||
562 | if (!page_table) | ||
563 | return ERR_PTR(-ENOMEM); | ||
564 | |||
565 | dte = rk_mk_dte(page_table); | ||
566 | *dte_addr = dte; | ||
567 | |||
568 | rk_table_flush(page_table, NUM_PT_ENTRIES); | ||
569 | rk_table_flush(dte_addr, 1); | ||
570 | |||
571 | /* | ||
572 | * Zap the first iova of newly allocated page table so iommu evicts | ||
573 | * old cached value of new dte from the iotlb. | ||
574 | */ | ||
575 | rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE); | ||
576 | |||
577 | done: | ||
578 | pt_phys = rk_dte_pt_address(dte); | ||
579 | return (u32 *)phys_to_virt(pt_phys); | ||
580 | } | ||
581 | |||
582 | static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain, | ||
583 | u32 *pte_addr, dma_addr_t iova, size_t size) | ||
584 | { | ||
585 | unsigned int pte_count; | ||
586 | unsigned int pte_total = size / SPAGE_SIZE; | ||
587 | |||
588 | assert_spin_locked(&rk_domain->dt_lock); | ||
589 | |||
590 | for (pte_count = 0; pte_count < pte_total; pte_count++) { | ||
591 | u32 pte = pte_addr[pte_count]; | ||
592 | if (!rk_pte_is_page_valid(pte)) | ||
593 | break; | ||
594 | |||
595 | pte_addr[pte_count] = rk_mk_pte_invalid(pte); | ||
596 | } | ||
597 | |||
598 | rk_table_flush(pte_addr, pte_count); | ||
599 | |||
600 | return pte_count * SPAGE_SIZE; | ||
601 | } | ||
602 | |||
603 | static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, | ||
604 | dma_addr_t iova, phys_addr_t paddr, size_t size, | ||
605 | int prot) | ||
606 | { | ||
607 | unsigned int pte_count; | ||
608 | unsigned int pte_total = size / SPAGE_SIZE; | ||
609 | phys_addr_t page_phys; | ||
610 | |||
611 | assert_spin_locked(&rk_domain->dt_lock); | ||
612 | |||
613 | for (pte_count = 0; pte_count < pte_total; pte_count++) { | ||
614 | u32 pte = pte_addr[pte_count]; | ||
615 | |||
616 | if (rk_pte_is_page_valid(pte)) | ||
617 | goto unwind; | ||
618 | |||
619 | pte_addr[pte_count] = rk_mk_pte(paddr, prot); | ||
620 | |||
621 | paddr += SPAGE_SIZE; | ||
622 | } | ||
623 | |||
624 | rk_table_flush(pte_addr, pte_count); | ||
625 | |||
626 | return 0; | ||
627 | unwind: | ||
628 | /* Unmap the range of iovas that we just mapped */ | ||
629 | rk_iommu_unmap_iova(rk_domain, pte_addr, iova, pte_count * SPAGE_SIZE); | ||
630 | |||
631 | iova += pte_count * SPAGE_SIZE; | ||
632 | page_phys = rk_pte_page_address(pte_addr[pte_count]); | ||
633 | pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n", | ||
634 | &iova, &page_phys, &paddr, prot); | ||
635 | |||
636 | return -EADDRINUSE; | ||
637 | } | ||
638 | |||
639 | static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, | ||
640 | phys_addr_t paddr, size_t size, int prot) | ||
641 | { | ||
642 | struct rk_iommu_domain *rk_domain = domain->priv; | ||
643 | unsigned long flags; | ||
644 | dma_addr_t iova = (dma_addr_t)_iova; | ||
645 | u32 *page_table, *pte_addr; | ||
646 | int ret; | ||
647 | |||
648 | spin_lock_irqsave(&rk_domain->dt_lock, flags); | ||
649 | |||
650 | /* | ||
651 | * pgsize_bitmap specifies iova sizes that fit in one page table | ||
652 | * (1024 4-KiB pages = 4 MiB). | ||
653 | * So, size will always be 4096 <= size <= 4194304. | ||
654 | * Since iommu_map() guarantees that both iova and size will be | ||
655 | * aligned, we will always only be mapping from a single dte here. | ||
656 | */ | ||
657 | page_table = rk_dte_get_page_table(rk_domain, iova); | ||
658 | if (IS_ERR(page_table)) { | ||
659 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); | ||
660 | return PTR_ERR(page_table); | ||
661 | } | ||
662 | |||
663 | pte_addr = &page_table[rk_iova_pte_index(iova)]; | ||
664 | ret = rk_iommu_map_iova(rk_domain, pte_addr, iova, paddr, size, prot); | ||
665 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); | ||
666 | |||
667 | return ret; | ||
668 | } | ||
669 | |||
670 | static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, | ||
671 | size_t size) | ||
672 | { | ||
673 | struct rk_iommu_domain *rk_domain = domain->priv; | ||
674 | unsigned long flags; | ||
675 | dma_addr_t iova = (dma_addr_t)_iova; | ||
676 | phys_addr_t pt_phys; | ||
677 | u32 dte; | ||
678 | u32 *pte_addr; | ||
679 | size_t unmap_size; | ||
680 | |||
681 | spin_lock_irqsave(&rk_domain->dt_lock, flags); | ||
682 | |||
683 | /* | ||
684 | * pgsize_bitmap specifies iova sizes that fit in one page table | ||
685 | * (1024 4-KiB pages = 4 MiB). | ||
686 | * So, size will always be 4096 <= size <= 4194304. | ||
687 | * Since iommu_unmap() guarantees that both iova and size will be | ||
688 | * aligned, we will always only be unmapping from a single dte here. | ||
689 | */ | ||
690 | dte = rk_domain->dt[rk_iova_dte_index(iova)]; | ||
691 | /* Just return 0 if iova is unmapped */ | ||
692 | if (!rk_dte_is_pt_valid(dte)) { | ||
693 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); | ||
694 | return 0; | ||
695 | } | ||
696 | |||
697 | pt_phys = rk_dte_pt_address(dte); | ||
698 | pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova); | ||
699 | unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, iova, size); | ||
700 | |||
701 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); | ||
702 | |||
703 | /* Shootdown iotlb entries for iova range that was just unmapped */ | ||
704 | rk_iommu_zap_iova(rk_domain, iova, unmap_size); | ||
705 | |||
706 | return unmap_size; | ||
707 | } | ||
708 | |||
709 | static struct rk_iommu *rk_iommu_from_dev(struct device *dev) | ||
710 | { | ||
711 | struct iommu_group *group; | ||
712 | struct device *iommu_dev; | ||
713 | struct rk_iommu *rk_iommu; | ||
714 | |||
715 | group = iommu_group_get(dev); | ||
716 | if (!group) | ||
717 | return NULL; | ||
718 | iommu_dev = iommu_group_get_iommudata(group); | ||
719 | rk_iommu = dev_get_drvdata(iommu_dev); | ||
720 | iommu_group_put(group); | ||
721 | |||
722 | return rk_iommu; | ||
723 | } | ||
724 | |||
725 | static int rk_iommu_attach_device(struct iommu_domain *domain, | ||
726 | struct device *dev) | ||
727 | { | ||
728 | struct rk_iommu *iommu; | ||
729 | struct rk_iommu_domain *rk_domain = domain->priv; | ||
730 | unsigned long flags; | ||
731 | int ret; | ||
732 | phys_addr_t dte_addr; | ||
733 | |||
734 | /* | ||
735 | * Allow 'virtual devices' (e.g., drm) to attach to domain. | ||
736 | * Such a device does not belong to an iommu group. | ||
737 | */ | ||
738 | iommu = rk_iommu_from_dev(dev); | ||
739 | if (!iommu) | ||
740 | return 0; | ||
741 | |||
742 | ret = rk_iommu_enable_stall(iommu); | ||
743 | if (ret) | ||
744 | return ret; | ||
745 | |||
746 | ret = rk_iommu_force_reset(iommu); | ||
747 | if (ret) | ||
748 | return ret; | ||
749 | |||
750 | iommu->domain = domain; | ||
751 | |||
752 | ret = devm_request_irq(dev, iommu->irq, rk_iommu_irq, | ||
753 | IRQF_SHARED, dev_name(dev), iommu); | ||
754 | if (ret) | ||
755 | return ret; | ||
756 | |||
757 | dte_addr = virt_to_phys(rk_domain->dt); | ||
758 | rk_iommu_write(iommu, RK_MMU_DTE_ADDR, dte_addr); | ||
759 | rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE); | ||
760 | rk_iommu_write(iommu, RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); | ||
761 | |||
762 | ret = rk_iommu_enable_paging(iommu); | ||
763 | if (ret) | ||
764 | return ret; | ||
765 | |||
766 | spin_lock_irqsave(&rk_domain->iommus_lock, flags); | ||
767 | list_add_tail(&iommu->node, &rk_domain->iommus); | ||
768 | spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); | ||
769 | |||
770 | dev_info(dev, "Attached to iommu domain\n"); | ||
771 | |||
772 | rk_iommu_disable_stall(iommu); | ||
773 | |||
774 | return 0; | ||
775 | } | ||
776 | |||
777 | static void rk_iommu_detach_device(struct iommu_domain *domain, | ||
778 | struct device *dev) | ||
779 | { | ||
780 | struct rk_iommu *iommu; | ||
781 | struct rk_iommu_domain *rk_domain = domain->priv; | ||
782 | unsigned long flags; | ||
783 | |||
784 | /* Allow 'virtual devices' (eg drm) to detach from domain */ | ||
785 | iommu = rk_iommu_from_dev(dev); | ||
786 | if (!iommu) | ||
787 | return; | ||
788 | |||
789 | spin_lock_irqsave(&rk_domain->iommus_lock, flags); | ||
790 | list_del_init(&iommu->node); | ||
791 | spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); | ||
792 | |||
793 | /* Ignore error while disabling, just keep going */ | ||
794 | rk_iommu_enable_stall(iommu); | ||
795 | rk_iommu_disable_paging(iommu); | ||
796 | rk_iommu_write(iommu, RK_MMU_INT_MASK, 0); | ||
797 | rk_iommu_write(iommu, RK_MMU_DTE_ADDR, 0); | ||
798 | rk_iommu_disable_stall(iommu); | ||
799 | |||
800 | devm_free_irq(dev, iommu->irq, iommu); | ||
801 | |||
802 | iommu->domain = NULL; | ||
803 | |||
804 | dev_info(dev, "Detached from iommu domain\n"); | ||
805 | } | ||
806 | |||
807 | static int rk_iommu_domain_init(struct iommu_domain *domain) | ||
808 | { | ||
809 | struct rk_iommu_domain *rk_domain; | ||
810 | |||
811 | rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL); | ||
812 | if (!rk_domain) | ||
813 | return -ENOMEM; | ||
814 | |||
815 | /* | ||
816 | * rk32xx iommus use a 2 level pagetable. | ||
817 | * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries. | ||
818 | * Allocate one 4 KiB page for each table. | ||
819 | */ | ||
820 | rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32); | ||
821 | if (!rk_domain->dt) | ||
822 | goto err_dt; | ||
823 | |||
824 | rk_table_flush(rk_domain->dt, NUM_DT_ENTRIES); | ||
825 | |||
826 | spin_lock_init(&rk_domain->iommus_lock); | ||
827 | spin_lock_init(&rk_domain->dt_lock); | ||
828 | INIT_LIST_HEAD(&rk_domain->iommus); | ||
829 | |||
830 | domain->priv = rk_domain; | ||
831 | |||
832 | return 0; | ||
833 | err_dt: | ||
834 | kfree(rk_domain); | ||
835 | return -ENOMEM; | ||
836 | } | ||
837 | |||
838 | static void rk_iommu_domain_destroy(struct iommu_domain *domain) | ||
839 | { | ||
840 | struct rk_iommu_domain *rk_domain = domain->priv; | ||
841 | int i; | ||
842 | |||
843 | WARN_ON(!list_empty(&rk_domain->iommus)); | ||
844 | |||
845 | for (i = 0; i < NUM_DT_ENTRIES; i++) { | ||
846 | u32 dte = rk_domain->dt[i]; | ||
847 | if (rk_dte_is_pt_valid(dte)) { | ||
848 | phys_addr_t pt_phys = rk_dte_pt_address(dte); | ||
849 | u32 *page_table = phys_to_virt(pt_phys); | ||
850 | free_page((unsigned long)page_table); | ||
851 | } | ||
852 | } | ||
853 | |||
854 | free_page((unsigned long)rk_domain->dt); | ||
855 | kfree(domain->priv); | ||
856 | domain->priv = NULL; | ||
857 | } | ||
858 | |||
859 | static bool rk_iommu_is_dev_iommu_master(struct device *dev) | ||
860 | { | ||
861 | struct device_node *np = dev->of_node; | ||
862 | int ret; | ||
863 | |||
864 | /* | ||
865 | * An iommu master has an iommus property containing a list of phandles | ||
866 | * to iommu nodes, each with an #iommu-cells property with value 0. | ||
867 | */ | ||
868 | ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells"); | ||
869 | return (ret > 0); | ||
870 | } | ||
871 | |||
872 | static int rk_iommu_group_set_iommudata(struct iommu_group *group, | ||
873 | struct device *dev) | ||
874 | { | ||
875 | struct device_node *np = dev->of_node; | ||
876 | struct platform_device *pd; | ||
877 | int ret; | ||
878 | struct of_phandle_args args; | ||
879 | |||
880 | /* | ||
881 | * An iommu master has an iommus property containing a list of phandles | ||
882 | * to iommu nodes, each with an #iommu-cells property with value 0. | ||
883 | */ | ||
884 | ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0, | ||
885 | &args); | ||
886 | if (ret) { | ||
887 | dev_err(dev, "of_parse_phandle_with_args(%s) => %d\n", | ||
888 | np->full_name, ret); | ||
889 | return ret; | ||
890 | } | ||
891 | if (args.args_count != 0) { | ||
892 | dev_err(dev, "incorrect number of iommu params found for %s (found %d, expected 0)\n", | ||
893 | args.np->full_name, args.args_count); | ||
894 | return -EINVAL; | ||
895 | } | ||
896 | |||
897 | pd = of_find_device_by_node(args.np); | ||
898 | of_node_put(args.np); | ||
899 | if (!pd) { | ||
900 | dev_err(dev, "iommu %s not found\n", args.np->full_name); | ||
901 | return -EPROBE_DEFER; | ||
902 | } | ||
903 | |||
904 | /* TODO(djkurtz): handle multiple slave iommus for a single master */ | ||
905 | iommu_group_set_iommudata(group, &pd->dev, NULL); | ||
906 | |||
907 | return 0; | ||
908 | } | ||
909 | |||
910 | static int rk_iommu_add_device(struct device *dev) | ||
911 | { | ||
912 | struct iommu_group *group; | ||
913 | int ret; | ||
914 | |||
915 | if (!rk_iommu_is_dev_iommu_master(dev)) | ||
916 | return -ENODEV; | ||
917 | |||
918 | group = iommu_group_get(dev); | ||
919 | if (!group) { | ||
920 | group = iommu_group_alloc(); | ||
921 | if (IS_ERR(group)) { | ||
922 | dev_err(dev, "Failed to allocate IOMMU group\n"); | ||
923 | return PTR_ERR(group); | ||
924 | } | ||
925 | } | ||
926 | |||
927 | ret = iommu_group_add_device(group, dev); | ||
928 | if (ret) | ||
929 | goto err_put_group; | ||
930 | |||
931 | ret = rk_iommu_group_set_iommudata(group, dev); | ||
932 | if (ret) | ||
933 | goto err_remove_device; | ||
934 | |||
935 | iommu_group_put(group); | ||
936 | |||
937 | return 0; | ||
938 | |||
939 | err_remove_device: | ||
940 | iommu_group_remove_device(dev); | ||
941 | err_put_group: | ||
942 | iommu_group_put(group); | ||
943 | return ret; | ||
944 | } | ||
945 | |||
946 | static void rk_iommu_remove_device(struct device *dev) | ||
947 | { | ||
948 | if (!rk_iommu_is_dev_iommu_master(dev)) | ||
949 | return; | ||
950 | |||
951 | iommu_group_remove_device(dev); | ||
952 | } | ||
953 | |||
954 | static const struct iommu_ops rk_iommu_ops = { | ||
955 | .domain_init = rk_iommu_domain_init, | ||
956 | .domain_destroy = rk_iommu_domain_destroy, | ||
957 | .attach_dev = rk_iommu_attach_device, | ||
958 | .detach_dev = rk_iommu_detach_device, | ||
959 | .map = rk_iommu_map, | ||
960 | .unmap = rk_iommu_unmap, | ||
961 | .add_device = rk_iommu_add_device, | ||
962 | .remove_device = rk_iommu_remove_device, | ||
963 | .iova_to_phys = rk_iommu_iova_to_phys, | ||
964 | .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP, | ||
965 | }; | ||
966 | |||
967 | static int rk_iommu_probe(struct platform_device *pdev) | ||
968 | { | ||
969 | struct device *dev = &pdev->dev; | ||
970 | struct rk_iommu *iommu; | ||
971 | struct resource *res; | ||
972 | |||
973 | iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); | ||
974 | if (!iommu) | ||
975 | return -ENOMEM; | ||
976 | |||
977 | platform_set_drvdata(pdev, iommu); | ||
978 | iommu->dev = dev; | ||
979 | |||
980 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
981 | iommu->base = devm_ioremap_resource(&pdev->dev, res); | ||
982 | if (IS_ERR(iommu->base)) | ||
983 | return PTR_ERR(iommu->base); | ||
984 | |||
985 | iommu->irq = platform_get_irq(pdev, 0); | ||
986 | if (iommu->irq < 0) { | ||
987 | dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq); | ||
988 | return -ENXIO; | ||
989 | } | ||
990 | |||
991 | return 0; | ||
992 | } | ||
993 | |||
994 | static int rk_iommu_remove(struct platform_device *pdev) | ||
995 | { | ||
996 | return 0; | ||
997 | } | ||
998 | |||
999 | #ifdef CONFIG_OF | ||
1000 | static const struct of_device_id rk_iommu_dt_ids[] = { | ||
1001 | { .compatible = "rockchip,iommu" }, | ||
1002 | { /* sentinel */ } | ||
1003 | }; | ||
1004 | MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids); | ||
1005 | #endif | ||
1006 | |||
1007 | static struct platform_driver rk_iommu_driver = { | ||
1008 | .probe = rk_iommu_probe, | ||
1009 | .remove = rk_iommu_remove, | ||
1010 | .driver = { | ||
1011 | .name = "rk_iommu", | ||
1012 | .owner = THIS_MODULE, | ||
1013 | .of_match_table = of_match_ptr(rk_iommu_dt_ids), | ||
1014 | }, | ||
1015 | }; | ||
1016 | |||
1017 | static int __init rk_iommu_init(void) | ||
1018 | { | ||
1019 | int ret; | ||
1020 | |||
1021 | ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); | ||
1022 | if (ret) | ||
1023 | return ret; | ||
1024 | |||
1025 | return platform_driver_register(&rk_iommu_driver); | ||
1026 | } | ||
1027 | static void __exit rk_iommu_exit(void) | ||
1028 | { | ||
1029 | platform_driver_unregister(&rk_iommu_driver); | ||
1030 | } | ||
1031 | |||
1032 | subsys_initcall(rk_iommu_init); | ||
1033 | module_exit(rk_iommu_exit); | ||
1034 | |||
1035 | MODULE_DESCRIPTION("IOMMU API for Rockchip"); | ||
1036 | MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>"); | ||
1037 | MODULE_ALIAS("platform:rockchip-iommu"); | ||
1038 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 593fff99e6bf..30624954dec5 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h | |||
@@ -30,6 +30,12 @@ | |||
30 | 30 | ||
31 | struct acpi_dmar_header; | 31 | struct acpi_dmar_header; |
32 | 32 | ||
33 | #ifdef CONFIG_X86 | ||
34 | # define DMAR_UNITS_SUPPORTED MAX_IO_APICS | ||
35 | #else | ||
36 | # define DMAR_UNITS_SUPPORTED 64 | ||
37 | #endif | ||
38 | |||
33 | /* DMAR Flags */ | 39 | /* DMAR Flags */ |
34 | #define DMAR_INTR_REMAP 0x1 | 40 | #define DMAR_INTR_REMAP 0x1 |
35 | #define DMAR_X2APIC_OPT_OUT 0x2 | 41 | #define DMAR_X2APIC_OPT_OUT 0x2 |
@@ -120,28 +126,60 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, | |||
120 | /* Intel IOMMU detection */ | 126 | /* Intel IOMMU detection */ |
121 | extern int detect_intel_iommu(void); | 127 | extern int detect_intel_iommu(void); |
122 | extern int enable_drhd_fault_handling(void); | 128 | extern int enable_drhd_fault_handling(void); |
129 | extern int dmar_device_add(acpi_handle handle); | ||
130 | extern int dmar_device_remove(acpi_handle handle); | ||
131 | |||
132 | static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg) | ||
133 | { | ||
134 | return 0; | ||
135 | } | ||
123 | 136 | ||
124 | #ifdef CONFIG_INTEL_IOMMU | 137 | #ifdef CONFIG_INTEL_IOMMU |
125 | extern int iommu_detected, no_iommu; | 138 | extern int iommu_detected, no_iommu; |
126 | extern int intel_iommu_init(void); | 139 | extern int intel_iommu_init(void); |
127 | extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header); | 140 | extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg); |
128 | extern int dmar_parse_one_atsr(struct acpi_dmar_header *header); | 141 | extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg); |
142 | extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg); | ||
143 | extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg); | ||
144 | extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert); | ||
129 | extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); | 145 | extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); |
130 | #else /* !CONFIG_INTEL_IOMMU: */ | 146 | #else /* !CONFIG_INTEL_IOMMU: */ |
131 | static inline int intel_iommu_init(void) { return -ENODEV; } | 147 | static inline int intel_iommu_init(void) { return -ENODEV; } |
132 | static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header) | 148 | |
149 | #define dmar_parse_one_rmrr dmar_res_noop | ||
150 | #define dmar_parse_one_atsr dmar_res_noop | ||
151 | #define dmar_check_one_atsr dmar_res_noop | ||
152 | #define dmar_release_one_atsr dmar_res_noop | ||
153 | |||
154 | static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) | ||
133 | { | 155 | { |
134 | return 0; | 156 | return 0; |
135 | } | 157 | } |
136 | static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header) | 158 | |
159 | static inline int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert) | ||
137 | { | 160 | { |
138 | return 0; | 161 | return 0; |
139 | } | 162 | } |
140 | static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) | 163 | #endif /* CONFIG_INTEL_IOMMU */ |
164 | |||
165 | #ifdef CONFIG_IRQ_REMAP | ||
166 | extern int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert); | ||
167 | #else /* CONFIG_IRQ_REMAP */ | ||
168 | static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert) | ||
169 | { return 0; } | ||
170 | #endif /* CONFIG_IRQ_REMAP */ | ||
171 | |||
172 | #else /* CONFIG_DMAR_TABLE */ | ||
173 | |||
174 | static inline int dmar_device_add(void *handle) | ||
175 | { | ||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | static inline int dmar_device_remove(void *handle) | ||
141 | { | 180 | { |
142 | return 0; | 181 | return 0; |
143 | } | 182 | } |
144 | #endif /* CONFIG_INTEL_IOMMU */ | ||
145 | 183 | ||
146 | #endif /* CONFIG_DMAR_TABLE */ | 184 | #endif /* CONFIG_DMAR_TABLE */ |
147 | 185 | ||
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index b29a5982e1c3..7a7bd15e54f1 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
@@ -28,7 +28,7 @@ | |||
28 | #define IOMMU_READ (1 << 0) | 28 | #define IOMMU_READ (1 << 0) |
29 | #define IOMMU_WRITE (1 << 1) | 29 | #define IOMMU_WRITE (1 << 1) |
30 | #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ | 30 | #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ |
31 | #define IOMMU_EXEC (1 << 3) | 31 | #define IOMMU_NOEXEC (1 << 3) |
32 | 32 | ||
33 | struct iommu_ops; | 33 | struct iommu_ops; |
34 | struct iommu_group; | 34 | struct iommu_group; |
@@ -62,6 +62,7 @@ enum iommu_cap { | |||
62 | IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA | 62 | IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA |
63 | transactions */ | 63 | transactions */ |
64 | IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ | 64 | IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ |
65 | IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ | ||
65 | }; | 66 | }; |
66 | 67 | ||
67 | /* | 68 | /* |