diff options
28 files changed, 1521 insertions, 942 deletions
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt index 7b94c88cf2ee..be57550e14e4 100644 --- a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt +++ b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt | |||
@@ -27,6 +27,12 @@ the PCIe specification. | |||
27 | * "cmdq-sync" - CMD_SYNC complete | 27 | * "cmdq-sync" - CMD_SYNC complete |
28 | * "gerror" - Global Error activated | 28 | * "gerror" - Global Error activated |
29 | 29 | ||
30 | - #iommu-cells : See the generic IOMMU binding described in | ||
31 | devicetree/bindings/pci/pci-iommu.txt | ||
32 | for details. For SMMUv3, must be 1, with each cell | ||
33 | describing a single stream ID. All possible stream | ||
34 | IDs which a device may emit must be described. | ||
35 | |||
30 | ** SMMUv3 optional properties: | 36 | ** SMMUv3 optional properties: |
31 | 37 | ||
32 | - dma-coherent : Present if DMA operations made by the SMMU (page | 38 | - dma-coherent : Present if DMA operations made by the SMMU (page |
@@ -54,6 +60,6 @@ the PCIe specification. | |||
54 | <GIC_SPI 79 IRQ_TYPE_EDGE_RISING>; | 60 | <GIC_SPI 79 IRQ_TYPE_EDGE_RISING>; |
55 | interrupt-names = "eventq", "priq", "cmdq-sync", "gerror"; | 61 | interrupt-names = "eventq", "priq", "cmdq-sync", "gerror"; |
56 | dma-coherent; | 62 | dma-coherent; |
57 | #iommu-cells = <0>; | 63 | #iommu-cells = <1>; |
58 | msi-parent = <&its 0xff0000>; | 64 | msi-parent = <&its 0xff0000>; |
59 | }; | 65 | }; |
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt index 19fe6f2c83f6..e862d1485205 100644 --- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt +++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt | |||
@@ -35,12 +35,16 @@ conditions. | |||
35 | interrupt per context bank. In the case of a single, | 35 | interrupt per context bank. In the case of a single, |
36 | combined interrupt, it must be listed multiple times. | 36 | combined interrupt, it must be listed multiple times. |
37 | 37 | ||
38 | - mmu-masters : A list of phandles to device nodes representing bus | 38 | - #iommu-cells : See Documentation/devicetree/bindings/iommu/iommu.txt |
39 | masters for which the SMMU can provide a translation | 39 | for details. With a value of 1, each "iommus" entry |
40 | and their corresponding StreamIDs (see example below). | 40 | represents a distinct stream ID emitted by that device |
41 | Each device node linked from this list must have a | 41 | into the relevant SMMU. |
42 | "#stream-id-cells" property, indicating the number of | 42 | |
43 | StreamIDs associated with it. | 43 | SMMUs with stream matching support and complex masters |
44 | may use a value of 2, where the second cell represents | ||
45 | an SMR mask to combine with the ID in the first cell. | ||
46 | Care must be taken to ensure the set of matched IDs | ||
47 | does not result in conflicts. | ||
44 | 48 | ||
45 | ** System MMU optional properties: | 49 | ** System MMU optional properties: |
46 | 50 | ||
@@ -56,9 +60,20 @@ conditions. | |||
56 | aliases of secure registers have to be used during | 60 | aliases of secure registers have to be used during |
57 | SMMU configuration. | 61 | SMMU configuration. |
58 | 62 | ||
59 | Example: | 63 | ** Deprecated properties: |
64 | |||
65 | - mmu-masters (deprecated in favour of the generic "iommus" binding) : | ||
66 | A list of phandles to device nodes representing bus | ||
67 | masters for which the SMMU can provide a translation | ||
68 | and their corresponding Stream IDs. Each device node | ||
69 | linked from this list must have a "#stream-id-cells" | ||
70 | property, indicating the number of Stream ID | ||
71 | arguments associated with its phandle. | ||
60 | 72 | ||
61 | smmu { | 73 | ** Examples: |
74 | |||
75 | /* SMMU with stream matching or stream indexing */ | ||
76 | smmu1: iommu { | ||
62 | compatible = "arm,smmu-v1"; | 77 | compatible = "arm,smmu-v1"; |
63 | reg = <0xba5e0000 0x10000>; | 78 | reg = <0xba5e0000 0x10000>; |
64 | #global-interrupts = <2>; | 79 | #global-interrupts = <2>; |
@@ -68,11 +83,29 @@ Example: | |||
68 | <0 35 4>, | 83 | <0 35 4>, |
69 | <0 36 4>, | 84 | <0 36 4>, |
70 | <0 37 4>; | 85 | <0 37 4>; |
86 | #iommu-cells = <1>; | ||
87 | }; | ||
88 | |||
89 | /* device with two stream IDs, 0 and 7 */ | ||
90 | master1 { | ||
91 | iommus = <&smmu1 0>, | ||
92 | <&smmu1 7>; | ||
93 | }; | ||
94 | |||
95 | |||
96 | /* SMMU with stream matching */ | ||
97 | smmu2: iommu { | ||
98 | ... | ||
99 | #iommu-cells = <2>; | ||
100 | }; | ||
101 | |||
102 | /* device with stream IDs 0 and 7 */ | ||
103 | master2 { | ||
104 | iommus = <&smmu2 0 0>, | ||
105 | <&smmu2 7 0>; | ||
106 | }; | ||
71 | 107 | ||
72 | /* | 108 | /* device with stream IDs 1, 17, 33 and 49 */ |
73 | * Two DMA controllers, the first with two StreamIDs (0xd01d | 109 | master3 { |
74 | * and 0xd01e) and the second with only one (0xd11c). | 110 | iommus = <&smmu2 1 0x30>; |
75 | */ | ||
76 | mmu-masters = <&dma0 0xd01d 0xd01e>, | ||
77 | <&dma1 0xd11c>; | ||
78 | }; | 111 | }; |
diff --git a/Documentation/devicetree/bindings/pci/pci-iommu.txt b/Documentation/devicetree/bindings/pci/pci-iommu.txt new file mode 100644 index 000000000000..56c829621b9a --- /dev/null +++ b/Documentation/devicetree/bindings/pci/pci-iommu.txt | |||
@@ -0,0 +1,171 @@ | |||
1 | This document describes the generic device tree binding for describing the | ||
2 | relationship between PCI(e) devices and IOMMU(s). | ||
3 | |||
4 | Each PCI(e) device under a root complex is uniquely identified by its Requester | ||
5 | ID (AKA RID). A Requester ID is a triplet of a Bus number, Device number, and | ||
6 | Function number. | ||
7 | |||
8 | For the purpose of this document, when treated as a numeric value, a RID is | ||
9 | formatted such that: | ||
10 | |||
11 | * Bits [15:8] are the Bus number. | ||
12 | * Bits [7:3] are the Device number. | ||
13 | * Bits [2:0] are the Function number. | ||
14 | * Any other bits required for padding must be zero. | ||
15 | |||
16 | IOMMUs may distinguish PCI devices through sideband data derived from the | ||
17 | Requester ID. While a given PCI device can only master through one IOMMU, a | ||
18 | root complex may split masters across a set of IOMMUs (e.g. with one IOMMU per | ||
19 | bus). | ||
20 | |||
21 | The generic 'iommus' property is insufficient to describe this relationship, | ||
22 | and a mechanism is required to map from a PCI device to its IOMMU and sideband | ||
23 | data. | ||
24 | |||
25 | For generic IOMMU bindings, see | ||
26 | Documentation/devicetree/bindings/iommu/iommu.txt. | ||
27 | |||
28 | |||
29 | PCI root complex | ||
30 | ================ | ||
31 | |||
32 | Optional properties | ||
33 | ------------------- | ||
34 | |||
35 | - iommu-map: Maps a Requester ID to an IOMMU and associated iommu-specifier | ||
36 | data. | ||
37 | |||
38 | The property is an arbitrary number of tuples of | ||
39 | (rid-base,iommu,iommu-base,length). | ||
40 | |||
41 | Any RID r in the interval [rid-base, rid-base + length) is associated with | ||
42 | the listed IOMMU, with the iommu-specifier (r - rid-base + iommu-base). | ||
43 | |||
44 | - iommu-map-mask: A mask to be applied to each Requester ID prior to being | ||
45 | mapped to an iommu-specifier per the iommu-map property. | ||
46 | |||
47 | |||
48 | Example (1) | ||
49 | =========== | ||
50 | |||
51 | / { | ||
52 | #address-cells = <1>; | ||
53 | #size-cells = <1>; | ||
54 | |||
55 | iommu: iommu@a { | ||
56 | reg = <0xa 0x1>; | ||
57 | compatible = "vendor,some-iommu"; | ||
58 | #iommu-cells = <1>; | ||
59 | }; | ||
60 | |||
61 | pci: pci@f { | ||
62 | reg = <0xf 0x1>; | ||
63 | compatible = "vendor,pcie-root-complex"; | ||
64 | device_type = "pci"; | ||
65 | |||
66 | /* | ||
67 | * The sideband data provided to the IOMMU is the RID, | ||
68 | * identity-mapped. | ||
69 | */ | ||
70 | iommu-map = <0x0 &iommu 0x0 0x10000>; | ||
71 | }; | ||
72 | }; | ||
73 | |||
74 | |||
75 | Example (2) | ||
76 | =========== | ||
77 | |||
78 | / { | ||
79 | #address-cells = <1>; | ||
80 | #size-cells = <1>; | ||
81 | |||
82 | iommu: iommu@a { | ||
83 | reg = <0xa 0x1>; | ||
84 | compatible = "vendor,some-iommu"; | ||
85 | #iommu-cells = <1>; | ||
86 | }; | ||
87 | |||
88 | pci: pci@f { | ||
89 | reg = <0xf 0x1>; | ||
90 | compatible = "vendor,pcie-root-complex"; | ||
91 | device_type = "pci"; | ||
92 | |||
93 | /* | ||
94 | * The sideband data provided to the IOMMU is the RID with the | ||
95 | * function bits masked out. | ||
96 | */ | ||
97 | iommu-map = <0x0 &iommu 0x0 0x10000>; | ||
98 | iommu-map-mask = <0xfff8>; | ||
99 | }; | ||
100 | }; | ||
101 | |||
102 | |||
103 | Example (3) | ||
104 | =========== | ||
105 | |||
106 | / { | ||
107 | #address-cells = <1>; | ||
108 | #size-cells = <1>; | ||
109 | |||
110 | iommu: iommu@a { | ||
111 | reg = <0xa 0x1>; | ||
112 | compatible = "vendor,some-iommu"; | ||
113 | #iommu-cells = <1>; | ||
114 | }; | ||
115 | |||
116 | pci: pci@f { | ||
117 | reg = <0xf 0x1>; | ||
118 | compatible = "vendor,pcie-root-complex"; | ||
119 | device_type = "pci"; | ||
120 | |||
121 | /* | ||
122 | * The sideband data provided to the IOMMU is the RID, | ||
123 | * but the high bits of the bus number are flipped. | ||
124 | */ | ||
125 | iommu-map = <0x0000 &iommu 0x8000 0x8000>, | ||
126 | <0x8000 &iommu 0x0000 0x8000>; | ||
127 | }; | ||
128 | }; | ||
129 | |||
130 | |||
131 | Example (4) | ||
132 | =========== | ||
133 | |||
134 | / { | ||
135 | #address-cells = <1>; | ||
136 | #size-cells = <1>; | ||
137 | |||
138 | iommu_a: iommu@a { | ||
139 | reg = <0xa 0x1>; | ||
140 | compatible = "vendor,some-iommu"; | ||
141 | #iommu-cells = <1>; | ||
142 | }; | ||
143 | |||
144 | iommu_b: iommu@b { | ||
145 | reg = <0xb 0x1>; | ||
146 | compatible = "vendor,some-iommu"; | ||
147 | #iommu-cells = <1>; | ||
148 | }; | ||
149 | |||
150 | iommu_c: iommu@c { | ||
151 | reg = <0xc 0x1>; | ||
152 | compatible = "vendor,some-iommu"; | ||
153 | #iommu-cells = <1>; | ||
154 | }; | ||
155 | |||
156 | pci: pci@f { | ||
157 | reg = <0xf 0x1>; | ||
158 | compatible = "vendor,pcie-root-complex"; | ||
159 | device_type = "pci"; | ||
160 | |||
161 | /* | ||
162 | * Devices with bus number 0-127 are mastered via IOMMU | ||
163 | * a, with sideband data being RID[14:0]. | ||
164 | * Devices with bus number 128-255 are mastered via | ||
165 | * IOMMU b, with sideband data being RID[14:0]. | ||
166 | * No devices master via IOMMU c. | ||
167 | */ | ||
168 | iommu-map = <0x0000 &iommu_a 0x0000 0x8000>, | ||
169 | <0x8000 &iommu_b 0x0000 0x8000>; | ||
170 | }; | ||
171 | }; | ||
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index bdacead5b802..3f74d0d98de6 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c | |||
@@ -828,7 +828,7 @@ static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops, | |||
828 | * then the IOMMU core will have already configured a group for this | 828 | * then the IOMMU core will have already configured a group for this |
829 | * device, and allocated the default domain for that group. | 829 | * device, and allocated the default domain for that group. |
830 | */ | 830 | */ |
831 | if (!domain || iommu_dma_init_domain(domain, dma_base, size)) { | 831 | if (!domain || iommu_dma_init_domain(domain, dma_base, size, dev)) { |
832 | pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", | 832 | pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", |
833 | dev_name(dev)); | 833 | dev_name(dev)); |
834 | return false; | 834 | return false; |
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index d28bdabcc87e..7ef4a099defc 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig | |||
@@ -255,7 +255,6 @@ CONFIG_RTC_CLASS=y | |||
255 | CONFIG_DMADEVICES=y | 255 | CONFIG_DMADEVICES=y |
256 | CONFIG_EEEPC_LAPTOP=y | 256 | CONFIG_EEEPC_LAPTOP=y |
257 | CONFIG_AMD_IOMMU=y | 257 | CONFIG_AMD_IOMMU=y |
258 | CONFIG_AMD_IOMMU_STATS=y | ||
259 | CONFIG_INTEL_IOMMU=y | 258 | CONFIG_INTEL_IOMMU=y |
260 | # CONFIG_INTEL_IOMMU_DEFAULT_ON is not set | 259 | # CONFIG_INTEL_IOMMU_DEFAULT_ON is not set |
261 | CONFIG_EFI_VARS=y | 260 | CONFIG_EFI_VARS=y |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h index c8de4913fdbe..87f6b5672e11 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h +++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h | |||
@@ -66,7 +66,7 @@ static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv, | |||
66 | if (ret) | 66 | if (ret) |
67 | goto free_domain; | 67 | goto free_domain; |
68 | 68 | ||
69 | ret = iommu_dma_init_domain(domain, start, size); | 69 | ret = iommu_dma_init_domain(domain, start, size, NULL); |
70 | if (ret) | 70 | if (ret) |
71 | goto put_cookie; | 71 | goto put_cookie; |
72 | 72 | ||
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index d432ca828472..8ee54d71c7eb 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -309,7 +309,7 @@ config ARM_SMMU | |||
309 | 309 | ||
310 | config ARM_SMMU_V3 | 310 | config ARM_SMMU_V3 |
311 | bool "ARM Ltd. System MMU Version 3 (SMMUv3) Support" | 311 | bool "ARM Ltd. System MMU Version 3 (SMMUv3) Support" |
312 | depends on ARM64 && PCI | 312 | depends on ARM64 |
313 | select IOMMU_API | 313 | select IOMMU_API |
314 | select IOMMU_IO_PGTABLE_LPAE | 314 | select IOMMU_IO_PGTABLE_LPAE |
315 | select GENERIC_MSI_IRQ_DOMAIN | 315 | select GENERIC_MSI_IRQ_DOMAIN |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 58fa8cc0262b..754595ee11b6 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -103,7 +103,7 @@ struct flush_queue { | |||
103 | struct flush_queue_entry *entries; | 103 | struct flush_queue_entry *entries; |
104 | }; | 104 | }; |
105 | 105 | ||
106 | DEFINE_PER_CPU(struct flush_queue, flush_queue); | 106 | static DEFINE_PER_CPU(struct flush_queue, flush_queue); |
107 | 107 | ||
108 | static atomic_t queue_timer_on; | 108 | static atomic_t queue_timer_on; |
109 | static struct timer_list queue_timer; | 109 | static struct timer_list queue_timer; |
@@ -1361,7 +1361,8 @@ static u64 *alloc_pte(struct protection_domain *domain, | |||
1361 | 1361 | ||
1362 | __npte = PM_LEVEL_PDE(level, virt_to_phys(page)); | 1362 | __npte = PM_LEVEL_PDE(level, virt_to_phys(page)); |
1363 | 1363 | ||
1364 | if (cmpxchg64(pte, __pte, __npte)) { | 1364 | /* pte could have been changed somewhere. */ |
1365 | if (cmpxchg64(pte, __pte, __npte) != __pte) { | ||
1365 | free_page((unsigned long)page); | 1366 | free_page((unsigned long)page); |
1366 | continue; | 1367 | continue; |
1367 | } | 1368 | } |
@@ -1741,6 +1742,9 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom) | |||
1741 | 1742 | ||
1742 | free_pagetable(&dom->domain); | 1743 | free_pagetable(&dom->domain); |
1743 | 1744 | ||
1745 | if (dom->domain.id) | ||
1746 | domain_id_free(dom->domain.id); | ||
1747 | |||
1744 | kfree(dom); | 1748 | kfree(dom); |
1745 | } | 1749 | } |
1746 | 1750 | ||
@@ -3649,7 +3653,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) | |||
3649 | 3653 | ||
3650 | table = irq_lookup_table[devid]; | 3654 | table = irq_lookup_table[devid]; |
3651 | if (table) | 3655 | if (table) |
3652 | goto out; | 3656 | goto out_unlock; |
3653 | 3657 | ||
3654 | alias = amd_iommu_alias_table[devid]; | 3658 | alias = amd_iommu_alias_table[devid]; |
3655 | table = irq_lookup_table[alias]; | 3659 | table = irq_lookup_table[alias]; |
@@ -3663,7 +3667,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) | |||
3663 | /* Nothing there yet, allocate new irq remapping table */ | 3667 | /* Nothing there yet, allocate new irq remapping table */ |
3664 | table = kzalloc(sizeof(*table), GFP_ATOMIC); | 3668 | table = kzalloc(sizeof(*table), GFP_ATOMIC); |
3665 | if (!table) | 3669 | if (!table) |
3666 | goto out; | 3670 | goto out_unlock; |
3667 | 3671 | ||
3668 | /* Initialize table spin-lock */ | 3672 | /* Initialize table spin-lock */ |
3669 | spin_lock_init(&table->lock); | 3673 | spin_lock_init(&table->lock); |
@@ -3676,7 +3680,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) | |||
3676 | if (!table->table) { | 3680 | if (!table->table) { |
3677 | kfree(table); | 3681 | kfree(table); |
3678 | table = NULL; | 3682 | table = NULL; |
3679 | goto out; | 3683 | goto out_unlock; |
3680 | } | 3684 | } |
3681 | 3685 | ||
3682 | if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) | 3686 | if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) |
@@ -4153,6 +4157,7 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, | |||
4153 | } | 4157 | } |
4154 | if (index < 0) { | 4158 | if (index < 0) { |
4155 | pr_warn("Failed to allocate IRTE\n"); | 4159 | pr_warn("Failed to allocate IRTE\n"); |
4160 | ret = index; | ||
4156 | goto out_free_parent; | 4161 | goto out_free_parent; |
4157 | } | 4162 | } |
4158 | 4163 | ||
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index cd1713631a4a..157e93421fb8 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
21 | #include <linux/acpi.h> | 21 | #include <linux/acpi.h> |
22 | #include <linux/list.h> | 22 | #include <linux/list.h> |
23 | #include <linux/bitmap.h> | ||
23 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
24 | #include <linux/syscore_ops.h> | 25 | #include <linux/syscore_ops.h> |
25 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
@@ -2285,7 +2286,7 @@ static int __init early_amd_iommu_init(void) | |||
2285 | * never allocate domain 0 because its used as the non-allocated and | 2286 | * never allocate domain 0 because its used as the non-allocated and |
2286 | * error value placeholder | 2287 | * error value placeholder |
2287 | */ | 2288 | */ |
2288 | amd_iommu_pd_alloc_bitmap[0] = 1; | 2289 | __set_bit(0, amd_iommu_pd_alloc_bitmap); |
2289 | 2290 | ||
2290 | spin_lock_init(&amd_iommu_pd_lock); | 2291 | spin_lock_init(&amd_iommu_pd_lock); |
2291 | 2292 | ||
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h index faa3b4895cf0..7eb60c15c582 100644 --- a/drivers/iommu/amd_iommu_proto.h +++ b/drivers/iommu/amd_iommu_proto.h | |||
@@ -79,12 +79,6 @@ static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu) | |||
79 | extern int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid, | 79 | extern int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid, |
80 | int status, int tag); | 80 | int status, int tag); |
81 | 81 | ||
82 | #ifndef CONFIG_AMD_IOMMU_STATS | ||
83 | |||
84 | static inline void amd_iommu_stats_init(void) { } | ||
85 | |||
86 | #endif /* !CONFIG_AMD_IOMMU_STATS */ | ||
87 | |||
88 | static inline bool is_rd890_iommu(struct pci_dev *pdev) | 82 | static inline bool is_rd890_iommu(struct pci_dev *pdev) |
89 | { | 83 | { |
90 | return (pdev->vendor == PCI_VENDOR_ID_ATI) && | 84 | return (pdev->vendor == PCI_VENDOR_ID_ATI) && |
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 641e88761319..15c01c3cd540 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c | |||
@@ -30,10 +30,13 @@ | |||
30 | #include <linux/msi.h> | 30 | #include <linux/msi.h> |
31 | #include <linux/of.h> | 31 | #include <linux/of.h> |
32 | #include <linux/of_address.h> | 32 | #include <linux/of_address.h> |
33 | #include <linux/of_iommu.h> | ||
33 | #include <linux/of_platform.h> | 34 | #include <linux/of_platform.h> |
34 | #include <linux/pci.h> | 35 | #include <linux/pci.h> |
35 | #include <linux/platform_device.h> | 36 | #include <linux/platform_device.h> |
36 | 37 | ||
38 | #include <linux/amba/bus.h> | ||
39 | |||
37 | #include "io-pgtable.h" | 40 | #include "io-pgtable.h" |
38 | 41 | ||
39 | /* MMIO registers */ | 42 | /* MMIO registers */ |
@@ -123,6 +126,10 @@ | |||
123 | #define CR2_RECINVSID (1 << 1) | 126 | #define CR2_RECINVSID (1 << 1) |
124 | #define CR2_E2H (1 << 0) | 127 | #define CR2_E2H (1 << 0) |
125 | 128 | ||
129 | #define ARM_SMMU_GBPA 0x44 | ||
130 | #define GBPA_ABORT (1 << 20) | ||
131 | #define GBPA_UPDATE (1 << 31) | ||
132 | |||
126 | #define ARM_SMMU_IRQ_CTRL 0x50 | 133 | #define ARM_SMMU_IRQ_CTRL 0x50 |
127 | #define IRQ_CTRL_EVTQ_IRQEN (1 << 2) | 134 | #define IRQ_CTRL_EVTQ_IRQEN (1 << 2) |
128 | #define IRQ_CTRL_PRIQ_IRQEN (1 << 1) | 135 | #define IRQ_CTRL_PRIQ_IRQEN (1 << 1) |
@@ -260,6 +267,9 @@ | |||
260 | #define STRTAB_STE_1_SHCFG_INCOMING 1UL | 267 | #define STRTAB_STE_1_SHCFG_INCOMING 1UL |
261 | #define STRTAB_STE_1_SHCFG_SHIFT 44 | 268 | #define STRTAB_STE_1_SHCFG_SHIFT 44 |
262 | 269 | ||
270 | #define STRTAB_STE_1_PRIVCFG_UNPRIV 2UL | ||
271 | #define STRTAB_STE_1_PRIVCFG_SHIFT 48 | ||
272 | |||
263 | #define STRTAB_STE_2_S2VMID_SHIFT 0 | 273 | #define STRTAB_STE_2_S2VMID_SHIFT 0 |
264 | #define STRTAB_STE_2_S2VMID_MASK 0xffffUL | 274 | #define STRTAB_STE_2_S2VMID_MASK 0xffffUL |
265 | #define STRTAB_STE_2_VTCR_SHIFT 32 | 275 | #define STRTAB_STE_2_VTCR_SHIFT 32 |
@@ -606,12 +616,9 @@ struct arm_smmu_device { | |||
606 | struct arm_smmu_strtab_cfg strtab_cfg; | 616 | struct arm_smmu_strtab_cfg strtab_cfg; |
607 | }; | 617 | }; |
608 | 618 | ||
609 | /* SMMU private data for an IOMMU group */ | 619 | /* SMMU private data for each master */ |
610 | struct arm_smmu_group { | 620 | struct arm_smmu_master_data { |
611 | struct arm_smmu_device *smmu; | 621 | struct arm_smmu_device *smmu; |
612 | struct arm_smmu_domain *domain; | ||
613 | int num_sids; | ||
614 | u32 *sids; | ||
615 | struct arm_smmu_strtab_ent ste; | 622 | struct arm_smmu_strtab_ent ste; |
616 | }; | 623 | }; |
617 | 624 | ||
@@ -713,19 +720,15 @@ static void queue_inc_prod(struct arm_smmu_queue *q) | |||
713 | writel(q->prod, q->prod_reg); | 720 | writel(q->prod, q->prod_reg); |
714 | } | 721 | } |
715 | 722 | ||
716 | static bool __queue_cons_before(struct arm_smmu_queue *q, u32 until) | 723 | /* |
717 | { | 724 | * Wait for the SMMU to consume items. If drain is true, wait until the queue |
718 | if (Q_WRP(q, q->cons) == Q_WRP(q, until)) | 725 | * is empty. Otherwise, wait until there is at least one free slot. |
719 | return Q_IDX(q, q->cons) < Q_IDX(q, until); | 726 | */ |
720 | 727 | static int queue_poll_cons(struct arm_smmu_queue *q, bool drain, bool wfe) | |
721 | return Q_IDX(q, q->cons) >= Q_IDX(q, until); | ||
722 | } | ||
723 | |||
724 | static int queue_poll_cons(struct arm_smmu_queue *q, u32 until, bool wfe) | ||
725 | { | 728 | { |
726 | ktime_t timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US); | 729 | ktime_t timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US); |
727 | 730 | ||
728 | while (queue_sync_cons(q), __queue_cons_before(q, until)) { | 731 | while (queue_sync_cons(q), (drain ? !queue_empty(q) : queue_full(q))) { |
729 | if (ktime_compare(ktime_get(), timeout) > 0) | 732 | if (ktime_compare(ktime_get(), timeout) > 0) |
730 | return -ETIMEDOUT; | 733 | return -ETIMEDOUT; |
731 | 734 | ||
@@ -896,8 +899,8 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) | |||
896 | static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, | 899 | static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, |
897 | struct arm_smmu_cmdq_ent *ent) | 900 | struct arm_smmu_cmdq_ent *ent) |
898 | { | 901 | { |
899 | u32 until; | ||
900 | u64 cmd[CMDQ_ENT_DWORDS]; | 902 | u64 cmd[CMDQ_ENT_DWORDS]; |
903 | unsigned long flags; | ||
901 | bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); | 904 | bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); |
902 | struct arm_smmu_queue *q = &smmu->cmdq.q; | 905 | struct arm_smmu_queue *q = &smmu->cmdq.q; |
903 | 906 | ||
@@ -907,20 +910,15 @@ static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, | |||
907 | return; | 910 | return; |
908 | } | 911 | } |
909 | 912 | ||
910 | spin_lock(&smmu->cmdq.lock); | 913 | spin_lock_irqsave(&smmu->cmdq.lock, flags); |
911 | while (until = q->prod + 1, queue_insert_raw(q, cmd) == -ENOSPC) { | 914 | while (queue_insert_raw(q, cmd) == -ENOSPC) { |
912 | /* | 915 | if (queue_poll_cons(q, false, wfe)) |
913 | * Keep the queue locked, otherwise the producer could wrap | ||
914 | * twice and we could see a future consumer pointer that looks | ||
915 | * like it's behind us. | ||
916 | */ | ||
917 | if (queue_poll_cons(q, until, wfe)) | ||
918 | dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); | 916 | dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); |
919 | } | 917 | } |
920 | 918 | ||
921 | if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, until, wfe)) | 919 | if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, true, wfe)) |
922 | dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n"); | 920 | dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n"); |
923 | spin_unlock(&smmu->cmdq.lock); | 921 | spin_unlock_irqrestore(&smmu->cmdq.lock, flags); |
924 | } | 922 | } |
925 | 923 | ||
926 | /* Context descriptor manipulation functions */ | 924 | /* Context descriptor manipulation functions */ |
@@ -1073,7 +1071,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, | |||
1073 | #ifdef CONFIG_PCI_ATS | 1071 | #ifdef CONFIG_PCI_ATS |
1074 | STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT | | 1072 | STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT | |
1075 | #endif | 1073 | #endif |
1076 | STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT); | 1074 | STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT | |
1075 | STRTAB_STE_1_PRIVCFG_UNPRIV << | ||
1076 | STRTAB_STE_1_PRIVCFG_SHIFT); | ||
1077 | 1077 | ||
1078 | if (smmu->features & ARM_SMMU_FEAT_STALLS) | 1078 | if (smmu->features & ARM_SMMU_FEAT_STALLS) |
1079 | dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD); | 1079 | dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD); |
@@ -1161,36 +1161,66 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev) | |||
1161 | struct arm_smmu_queue *q = &smmu->evtq.q; | 1161 | struct arm_smmu_queue *q = &smmu->evtq.q; |
1162 | u64 evt[EVTQ_ENT_DWORDS]; | 1162 | u64 evt[EVTQ_ENT_DWORDS]; |
1163 | 1163 | ||
1164 | while (!queue_remove_raw(q, evt)) { | 1164 | do { |
1165 | u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK; | 1165 | while (!queue_remove_raw(q, evt)) { |
1166 | u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK; | ||
1166 | 1167 | ||
1167 | dev_info(smmu->dev, "event 0x%02x received:\n", id); | 1168 | dev_info(smmu->dev, "event 0x%02x received:\n", id); |
1168 | for (i = 0; i < ARRAY_SIZE(evt); ++i) | 1169 | for (i = 0; i < ARRAY_SIZE(evt); ++i) |
1169 | dev_info(smmu->dev, "\t0x%016llx\n", | 1170 | dev_info(smmu->dev, "\t0x%016llx\n", |
1170 | (unsigned long long)evt[i]); | 1171 | (unsigned long long)evt[i]); |
1171 | } | 1172 | |
1173 | } | ||
1174 | |||
1175 | /* | ||
1176 | * Not much we can do on overflow, so scream and pretend we're | ||
1177 | * trying harder. | ||
1178 | */ | ||
1179 | if (queue_sync_prod(q) == -EOVERFLOW) | ||
1180 | dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n"); | ||
1181 | } while (!queue_empty(q)); | ||
1172 | 1182 | ||
1173 | /* Sync our overflow flag, as we believe we're up to speed */ | 1183 | /* Sync our overflow flag, as we believe we're up to speed */ |
1174 | q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons); | 1184 | q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons); |
1175 | return IRQ_HANDLED; | 1185 | return IRQ_HANDLED; |
1176 | } | 1186 | } |
1177 | 1187 | ||
1178 | static irqreturn_t arm_smmu_evtq_handler(int irq, void *dev) | 1188 | static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt) |
1179 | { | 1189 | { |
1180 | irqreturn_t ret = IRQ_WAKE_THREAD; | 1190 | u32 sid, ssid; |
1181 | struct arm_smmu_device *smmu = dev; | 1191 | u16 grpid; |
1182 | struct arm_smmu_queue *q = &smmu->evtq.q; | 1192 | bool ssv, last; |
1193 | |||
1194 | sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK; | ||
1195 | ssv = evt[0] & PRIQ_0_SSID_V; | ||
1196 | ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0; | ||
1197 | last = evt[0] & PRIQ_0_PRG_LAST; | ||
1198 | grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK; | ||
1199 | |||
1200 | dev_info(smmu->dev, "unexpected PRI request received:\n"); | ||
1201 | dev_info(smmu->dev, | ||
1202 | "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n", | ||
1203 | sid, ssid, grpid, last ? "L" : "", | ||
1204 | evt[0] & PRIQ_0_PERM_PRIV ? "" : "un", | ||
1205 | evt[0] & PRIQ_0_PERM_READ ? "R" : "", | ||
1206 | evt[0] & PRIQ_0_PERM_WRITE ? "W" : "", | ||
1207 | evt[0] & PRIQ_0_PERM_EXEC ? "X" : "", | ||
1208 | evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT); | ||
1209 | |||
1210 | if (last) { | ||
1211 | struct arm_smmu_cmdq_ent cmd = { | ||
1212 | .opcode = CMDQ_OP_PRI_RESP, | ||
1213 | .substream_valid = ssv, | ||
1214 | .pri = { | ||
1215 | .sid = sid, | ||
1216 | .ssid = ssid, | ||
1217 | .grpid = grpid, | ||
1218 | .resp = PRI_RESP_DENY, | ||
1219 | }, | ||
1220 | }; | ||
1183 | 1221 | ||
1184 | /* | 1222 | arm_smmu_cmdq_issue_cmd(smmu, &cmd); |
1185 | * Not much we can do on overflow, so scream and pretend we're | 1223 | } |
1186 | * trying harder. | ||
1187 | */ | ||
1188 | if (queue_sync_prod(q) == -EOVERFLOW) | ||
1189 | dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n"); | ||
1190 | else if (queue_empty(q)) | ||
1191 | ret = IRQ_NONE; | ||
1192 | |||
1193 | return ret; | ||
1194 | } | 1224 | } |
1195 | 1225 | ||
1196 | static irqreturn_t arm_smmu_priq_thread(int irq, void *dev) | 1226 | static irqreturn_t arm_smmu_priq_thread(int irq, void *dev) |
@@ -1199,63 +1229,19 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev) | |||
1199 | struct arm_smmu_queue *q = &smmu->priq.q; | 1229 | struct arm_smmu_queue *q = &smmu->priq.q; |
1200 | u64 evt[PRIQ_ENT_DWORDS]; | 1230 | u64 evt[PRIQ_ENT_DWORDS]; |
1201 | 1231 | ||
1202 | while (!queue_remove_raw(q, evt)) { | 1232 | do { |
1203 | u32 sid, ssid; | 1233 | while (!queue_remove_raw(q, evt)) |
1204 | u16 grpid; | 1234 | arm_smmu_handle_ppr(smmu, evt); |
1205 | bool ssv, last; | ||
1206 | 1235 | ||
1207 | sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK; | 1236 | if (queue_sync_prod(q) == -EOVERFLOW) |
1208 | ssv = evt[0] & PRIQ_0_SSID_V; | 1237 | dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n"); |
1209 | ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0; | 1238 | } while (!queue_empty(q)); |
1210 | last = evt[0] & PRIQ_0_PRG_LAST; | ||
1211 | grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK; | ||
1212 | |||
1213 | dev_info(smmu->dev, "unexpected PRI request received:\n"); | ||
1214 | dev_info(smmu->dev, | ||
1215 | "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n", | ||
1216 | sid, ssid, grpid, last ? "L" : "", | ||
1217 | evt[0] & PRIQ_0_PERM_PRIV ? "" : "un", | ||
1218 | evt[0] & PRIQ_0_PERM_READ ? "R" : "", | ||
1219 | evt[0] & PRIQ_0_PERM_WRITE ? "W" : "", | ||
1220 | evt[0] & PRIQ_0_PERM_EXEC ? "X" : "", | ||
1221 | evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT); | ||
1222 | |||
1223 | if (last) { | ||
1224 | struct arm_smmu_cmdq_ent cmd = { | ||
1225 | .opcode = CMDQ_OP_PRI_RESP, | ||
1226 | .substream_valid = ssv, | ||
1227 | .pri = { | ||
1228 | .sid = sid, | ||
1229 | .ssid = ssid, | ||
1230 | .grpid = grpid, | ||
1231 | .resp = PRI_RESP_DENY, | ||
1232 | }, | ||
1233 | }; | ||
1234 | |||
1235 | arm_smmu_cmdq_issue_cmd(smmu, &cmd); | ||
1236 | } | ||
1237 | } | ||
1238 | 1239 | ||
1239 | /* Sync our overflow flag, as we believe we're up to speed */ | 1240 | /* Sync our overflow flag, as we believe we're up to speed */ |
1240 | q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons); | 1241 | q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons); |
1241 | return IRQ_HANDLED; | 1242 | return IRQ_HANDLED; |
1242 | } | 1243 | } |
1243 | 1244 | ||
1244 | static irqreturn_t arm_smmu_priq_handler(int irq, void *dev) | ||
1245 | { | ||
1246 | irqreturn_t ret = IRQ_WAKE_THREAD; | ||
1247 | struct arm_smmu_device *smmu = dev; | ||
1248 | struct arm_smmu_queue *q = &smmu->priq.q; | ||
1249 | |||
1250 | /* PRIQ overflow indicates a programming error */ | ||
1251 | if (queue_sync_prod(q) == -EOVERFLOW) | ||
1252 | dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n"); | ||
1253 | else if (queue_empty(q)) | ||
1254 | ret = IRQ_NONE; | ||
1255 | |||
1256 | return ret; | ||
1257 | } | ||
1258 | |||
1259 | static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev) | 1245 | static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev) |
1260 | { | 1246 | { |
1261 | /* We don't actually use CMD_SYNC interrupts for anything */ | 1247 | /* We don't actually use CMD_SYNC interrupts for anything */ |
@@ -1288,15 +1274,11 @@ static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev) | |||
1288 | if (active & GERROR_MSI_GERROR_ABT_ERR) | 1274 | if (active & GERROR_MSI_GERROR_ABT_ERR) |
1289 | dev_warn(smmu->dev, "GERROR MSI write aborted\n"); | 1275 | dev_warn(smmu->dev, "GERROR MSI write aborted\n"); |
1290 | 1276 | ||
1291 | if (active & GERROR_MSI_PRIQ_ABT_ERR) { | 1277 | if (active & GERROR_MSI_PRIQ_ABT_ERR) |
1292 | dev_warn(smmu->dev, "PRIQ MSI write aborted\n"); | 1278 | dev_warn(smmu->dev, "PRIQ MSI write aborted\n"); |
1293 | arm_smmu_priq_handler(irq, smmu->dev); | ||
1294 | } | ||
1295 | 1279 | ||
1296 | if (active & GERROR_MSI_EVTQ_ABT_ERR) { | 1280 | if (active & GERROR_MSI_EVTQ_ABT_ERR) |
1297 | dev_warn(smmu->dev, "EVTQ MSI write aborted\n"); | 1281 | dev_warn(smmu->dev, "EVTQ MSI write aborted\n"); |
1298 | arm_smmu_evtq_handler(irq, smmu->dev); | ||
1299 | } | ||
1300 | 1282 | ||
1301 | if (active & GERROR_MSI_CMDQ_ABT_ERR) { | 1283 | if (active & GERROR_MSI_CMDQ_ABT_ERR) { |
1302 | dev_warn(smmu->dev, "CMDQ MSI write aborted\n"); | 1284 | dev_warn(smmu->dev, "CMDQ MSI write aborted\n"); |
@@ -1569,6 +1551,8 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) | |||
1569 | return -ENOMEM; | 1551 | return -ENOMEM; |
1570 | 1552 | ||
1571 | domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; | 1553 | domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; |
1554 | domain->geometry.aperture_end = (1UL << ias) - 1; | ||
1555 | domain->geometry.force_aperture = true; | ||
1572 | smmu_domain->pgtbl_ops = pgtbl_ops; | 1556 | smmu_domain->pgtbl_ops = pgtbl_ops; |
1573 | 1557 | ||
1574 | ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg); | 1558 | ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg); |
@@ -1578,20 +1562,6 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) | |||
1578 | return ret; | 1562 | return ret; |
1579 | } | 1563 | } |
1580 | 1564 | ||
1581 | static struct arm_smmu_group *arm_smmu_group_get(struct device *dev) | ||
1582 | { | ||
1583 | struct iommu_group *group; | ||
1584 | struct arm_smmu_group *smmu_group; | ||
1585 | |||
1586 | group = iommu_group_get(dev); | ||
1587 | if (!group) | ||
1588 | return NULL; | ||
1589 | |||
1590 | smmu_group = iommu_group_get_iommudata(group); | ||
1591 | iommu_group_put(group); | ||
1592 | return smmu_group; | ||
1593 | } | ||
1594 | |||
1595 | static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) | 1565 | static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) |
1596 | { | 1566 | { |
1597 | __le64 *step; | 1567 | __le64 *step; |
@@ -1614,27 +1584,17 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) | |||
1614 | return step; | 1584 | return step; |
1615 | } | 1585 | } |
1616 | 1586 | ||
1617 | static int arm_smmu_install_ste_for_group(struct arm_smmu_group *smmu_group) | 1587 | static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec) |
1618 | { | 1588 | { |
1619 | int i; | 1589 | int i; |
1620 | struct arm_smmu_domain *smmu_domain = smmu_group->domain; | 1590 | struct arm_smmu_master_data *master = fwspec->iommu_priv; |
1621 | struct arm_smmu_strtab_ent *ste = &smmu_group->ste; | 1591 | struct arm_smmu_device *smmu = master->smmu; |
1622 | struct arm_smmu_device *smmu = smmu_group->smmu; | ||
1623 | 1592 | ||
1624 | if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { | 1593 | for (i = 0; i < fwspec->num_ids; ++i) { |
1625 | ste->s1_cfg = &smmu_domain->s1_cfg; | 1594 | u32 sid = fwspec->ids[i]; |
1626 | ste->s2_cfg = NULL; | ||
1627 | arm_smmu_write_ctx_desc(smmu, ste->s1_cfg); | ||
1628 | } else { | ||
1629 | ste->s1_cfg = NULL; | ||
1630 | ste->s2_cfg = &smmu_domain->s2_cfg; | ||
1631 | } | ||
1632 | |||
1633 | for (i = 0; i < smmu_group->num_sids; ++i) { | ||
1634 | u32 sid = smmu_group->sids[i]; | ||
1635 | __le64 *step = arm_smmu_get_step_for_sid(smmu, sid); | 1595 | __le64 *step = arm_smmu_get_step_for_sid(smmu, sid); |
1636 | 1596 | ||
1637 | arm_smmu_write_strtab_ent(smmu, sid, step, ste); | 1597 | arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste); |
1638 | } | 1598 | } |
1639 | 1599 | ||
1640 | return 0; | 1600 | return 0; |
@@ -1642,13 +1602,11 @@ static int arm_smmu_install_ste_for_group(struct arm_smmu_group *smmu_group) | |||
1642 | 1602 | ||
1643 | static void arm_smmu_detach_dev(struct device *dev) | 1603 | static void arm_smmu_detach_dev(struct device *dev) |
1644 | { | 1604 | { |
1645 | struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev); | 1605 | struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv; |
1646 | 1606 | ||
1647 | smmu_group->ste.bypass = true; | 1607 | master->ste.bypass = true; |
1648 | if (arm_smmu_install_ste_for_group(smmu_group) < 0) | 1608 | if (arm_smmu_install_ste_for_dev(dev->iommu_fwspec) < 0) |
1649 | dev_warn(dev, "failed to install bypass STE\n"); | 1609 | dev_warn(dev, "failed to install bypass STE\n"); |
1650 | |||
1651 | smmu_group->domain = NULL; | ||
1652 | } | 1610 | } |
1653 | 1611 | ||
1654 | static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | 1612 | static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) |
@@ -1656,16 +1614,20 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | |||
1656 | int ret = 0; | 1614 | int ret = 0; |
1657 | struct arm_smmu_device *smmu; | 1615 | struct arm_smmu_device *smmu; |
1658 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); | 1616 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
1659 | struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev); | 1617 | struct arm_smmu_master_data *master; |
1618 | struct arm_smmu_strtab_ent *ste; | ||
1660 | 1619 | ||
1661 | if (!smmu_group) | 1620 | if (!dev->iommu_fwspec) |
1662 | return -ENOENT; | 1621 | return -ENOENT; |
1663 | 1622 | ||
1623 | master = dev->iommu_fwspec->iommu_priv; | ||
1624 | smmu = master->smmu; | ||
1625 | ste = &master->ste; | ||
1626 | |||
1664 | /* Already attached to a different domain? */ | 1627 | /* Already attached to a different domain? */ |
1665 | if (smmu_group->domain && smmu_group->domain != smmu_domain) | 1628 | if (!ste->bypass) |
1666 | arm_smmu_detach_dev(dev); | 1629 | arm_smmu_detach_dev(dev); |
1667 | 1630 | ||
1668 | smmu = smmu_group->smmu; | ||
1669 | mutex_lock(&smmu_domain->init_mutex); | 1631 | mutex_lock(&smmu_domain->init_mutex); |
1670 | 1632 | ||
1671 | if (!smmu_domain->smmu) { | 1633 | if (!smmu_domain->smmu) { |
@@ -1684,21 +1646,21 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | |||
1684 | goto out_unlock; | 1646 | goto out_unlock; |
1685 | } | 1647 | } |
1686 | 1648 | ||
1687 | /* Group already attached to this domain? */ | 1649 | ste->bypass = false; |
1688 | if (smmu_group->domain) | 1650 | ste->valid = true; |
1689 | goto out_unlock; | ||
1690 | |||
1691 | smmu_group->domain = smmu_domain; | ||
1692 | 1651 | ||
1693 | /* | 1652 | if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { |
1694 | * FIXME: This should always be "false" once we have IOMMU-backed | 1653 | ste->s1_cfg = &smmu_domain->s1_cfg; |
1695 | * DMA ops for all devices behind the SMMU. | 1654 | ste->s2_cfg = NULL; |
1696 | */ | 1655 | arm_smmu_write_ctx_desc(smmu, ste->s1_cfg); |
1697 | smmu_group->ste.bypass = domain->type == IOMMU_DOMAIN_DMA; | 1656 | } else { |
1657 | ste->s1_cfg = NULL; | ||
1658 | ste->s2_cfg = &smmu_domain->s2_cfg; | ||
1659 | } | ||
1698 | 1660 | ||
1699 | ret = arm_smmu_install_ste_for_group(smmu_group); | 1661 | ret = arm_smmu_install_ste_for_dev(dev->iommu_fwspec); |
1700 | if (ret < 0) | 1662 | if (ret < 0) |
1701 | smmu_group->domain = NULL; | 1663 | ste->valid = false; |
1702 | 1664 | ||
1703 | out_unlock: | 1665 | out_unlock: |
1704 | mutex_unlock(&smmu_domain->init_mutex); | 1666 | mutex_unlock(&smmu_domain->init_mutex); |
@@ -1757,40 +1719,19 @@ arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) | |||
1757 | return ret; | 1719 | return ret; |
1758 | } | 1720 | } |
1759 | 1721 | ||
1760 | static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *sidp) | 1722 | static struct platform_driver arm_smmu_driver; |
1761 | { | ||
1762 | *(u32 *)sidp = alias; | ||
1763 | return 0; /* Continue walking */ | ||
1764 | } | ||
1765 | 1723 | ||
1766 | static void __arm_smmu_release_pci_iommudata(void *data) | 1724 | static int arm_smmu_match_node(struct device *dev, void *data) |
1767 | { | 1725 | { |
1768 | kfree(data); | 1726 | return dev->of_node == data; |
1769 | } | 1727 | } |
1770 | 1728 | ||
1771 | static struct arm_smmu_device *arm_smmu_get_for_pci_dev(struct pci_dev *pdev) | 1729 | static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np) |
1772 | { | 1730 | { |
1773 | struct device_node *of_node; | 1731 | struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL, |
1774 | struct platform_device *smmu_pdev; | 1732 | np, arm_smmu_match_node); |
1775 | struct arm_smmu_device *smmu = NULL; | 1733 | put_device(dev); |
1776 | struct pci_bus *bus = pdev->bus; | 1734 | return dev ? dev_get_drvdata(dev) : NULL; |
1777 | |||
1778 | /* Walk up to the root bus */ | ||
1779 | while (!pci_is_root_bus(bus)) | ||
1780 | bus = bus->parent; | ||
1781 | |||
1782 | /* Follow the "iommus" phandle from the host controller */ | ||
1783 | of_node = of_parse_phandle(bus->bridge->parent->of_node, "iommus", 0); | ||
1784 | if (!of_node) | ||
1785 | return NULL; | ||
1786 | |||
1787 | /* See if we can find an SMMU corresponding to the phandle */ | ||
1788 | smmu_pdev = of_find_device_by_node(of_node); | ||
1789 | if (smmu_pdev) | ||
1790 | smmu = platform_get_drvdata(smmu_pdev); | ||
1791 | |||
1792 | of_node_put(of_node); | ||
1793 | return smmu; | ||
1794 | } | 1735 | } |
1795 | 1736 | ||
1796 | static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid) | 1737 | static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid) |
@@ -1803,94 +1744,91 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid) | |||
1803 | return sid < limit; | 1744 | return sid < limit; |
1804 | } | 1745 | } |
1805 | 1746 | ||
1747 | static struct iommu_ops arm_smmu_ops; | ||
1748 | |||
1806 | static int arm_smmu_add_device(struct device *dev) | 1749 | static int arm_smmu_add_device(struct device *dev) |
1807 | { | 1750 | { |
1808 | int i, ret; | 1751 | int i, ret; |
1809 | u32 sid, *sids; | ||
1810 | struct pci_dev *pdev; | ||
1811 | struct iommu_group *group; | ||
1812 | struct arm_smmu_group *smmu_group; | ||
1813 | struct arm_smmu_device *smmu; | 1752 | struct arm_smmu_device *smmu; |
1753 | struct arm_smmu_master_data *master; | ||
1754 | struct iommu_fwspec *fwspec = dev->iommu_fwspec; | ||
1755 | struct iommu_group *group; | ||
1814 | 1756 | ||
1815 | /* We only support PCI, for now */ | 1757 | if (!fwspec || fwspec->ops != &arm_smmu_ops) |
1816 | if (!dev_is_pci(dev)) | ||
1817 | return -ENODEV; | 1758 | return -ENODEV; |
1818 | 1759 | /* | |
1819 | pdev = to_pci_dev(dev); | 1760 | * We _can_ actually withstand dodgy bus code re-calling add_device() |
1820 | group = iommu_group_get_for_dev(dev); | 1761 | * without an intervening remove_device()/of_xlate() sequence, but |
1821 | if (IS_ERR(group)) | 1762 | * we're not going to do so quietly... |
1822 | return PTR_ERR(group); | 1763 | */ |
1823 | 1764 | if (WARN_ON_ONCE(fwspec->iommu_priv)) { | |
1824 | smmu_group = iommu_group_get_iommudata(group); | 1765 | master = fwspec->iommu_priv; |
1825 | if (!smmu_group) { | 1766 | smmu = master->smmu; |
1826 | smmu = arm_smmu_get_for_pci_dev(pdev); | ||
1827 | if (!smmu) { | ||
1828 | ret = -ENOENT; | ||
1829 | goto out_remove_dev; | ||
1830 | } | ||
1831 | |||
1832 | smmu_group = kzalloc(sizeof(*smmu_group), GFP_KERNEL); | ||
1833 | if (!smmu_group) { | ||
1834 | ret = -ENOMEM; | ||
1835 | goto out_remove_dev; | ||
1836 | } | ||
1837 | |||
1838 | smmu_group->ste.valid = true; | ||
1839 | smmu_group->smmu = smmu; | ||
1840 | iommu_group_set_iommudata(group, smmu_group, | ||
1841 | __arm_smmu_release_pci_iommudata); | ||
1842 | } else { | 1767 | } else { |
1843 | smmu = smmu_group->smmu; | 1768 | smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode)); |
1844 | } | 1769 | if (!smmu) |
1770 | return -ENODEV; | ||
1771 | master = kzalloc(sizeof(*master), GFP_KERNEL); | ||
1772 | if (!master) | ||
1773 | return -ENOMEM; | ||
1845 | 1774 | ||
1846 | /* Assume SID == RID until firmware tells us otherwise */ | 1775 | master->smmu = smmu; |
1847 | pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid); | 1776 | fwspec->iommu_priv = master; |
1848 | for (i = 0; i < smmu_group->num_sids; ++i) { | ||
1849 | /* If we already know about this SID, then we're done */ | ||
1850 | if (smmu_group->sids[i] == sid) | ||
1851 | goto out_put_group; | ||
1852 | } | 1777 | } |
1853 | 1778 | ||
1854 | /* Check the SID is in range of the SMMU and our stream table */ | 1779 | /* Check the SIDs are in range of the SMMU and our stream table */ |
1855 | if (!arm_smmu_sid_in_range(smmu, sid)) { | 1780 | for (i = 0; i < fwspec->num_ids; i++) { |
1856 | ret = -ERANGE; | 1781 | u32 sid = fwspec->ids[i]; |
1857 | goto out_remove_dev; | ||
1858 | } | ||
1859 | 1782 | ||
1860 | /* Ensure l2 strtab is initialised */ | 1783 | if (!arm_smmu_sid_in_range(smmu, sid)) |
1861 | if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) { | 1784 | return -ERANGE; |
1862 | ret = arm_smmu_init_l2_strtab(smmu, sid); | ||
1863 | if (ret) | ||
1864 | goto out_remove_dev; | ||
1865 | } | ||
1866 | 1785 | ||
1867 | /* Resize the SID array for the group */ | 1786 | /* Ensure l2 strtab is initialised */ |
1868 | smmu_group->num_sids++; | 1787 | if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) { |
1869 | sids = krealloc(smmu_group->sids, smmu_group->num_sids * sizeof(*sids), | 1788 | ret = arm_smmu_init_l2_strtab(smmu, sid); |
1870 | GFP_KERNEL); | 1789 | if (ret) |
1871 | if (!sids) { | 1790 | return ret; |
1872 | smmu_group->num_sids--; | 1791 | } |
1873 | ret = -ENOMEM; | ||
1874 | goto out_remove_dev; | ||
1875 | } | 1792 | } |
1876 | 1793 | ||
1877 | /* Add the new SID */ | 1794 | group = iommu_group_get_for_dev(dev); |
1878 | sids[smmu_group->num_sids - 1] = sid; | 1795 | if (!IS_ERR(group)) |
1879 | smmu_group->sids = sids; | 1796 | iommu_group_put(group); |
1880 | |||
1881 | out_put_group: | ||
1882 | iommu_group_put(group); | ||
1883 | return 0; | ||
1884 | 1797 | ||
1885 | out_remove_dev: | 1798 | return PTR_ERR_OR_ZERO(group); |
1886 | iommu_group_remove_device(dev); | ||
1887 | iommu_group_put(group); | ||
1888 | return ret; | ||
1889 | } | 1799 | } |
1890 | 1800 | ||
1891 | static void arm_smmu_remove_device(struct device *dev) | 1801 | static void arm_smmu_remove_device(struct device *dev) |
1892 | { | 1802 | { |
1803 | struct iommu_fwspec *fwspec = dev->iommu_fwspec; | ||
1804 | struct arm_smmu_master_data *master; | ||
1805 | |||
1806 | if (!fwspec || fwspec->ops != &arm_smmu_ops) | ||
1807 | return; | ||
1808 | |||
1809 | master = fwspec->iommu_priv; | ||
1810 | if (master && master->ste.valid) | ||
1811 | arm_smmu_detach_dev(dev); | ||
1893 | iommu_group_remove_device(dev); | 1812 | iommu_group_remove_device(dev); |
1813 | kfree(master); | ||
1814 | iommu_fwspec_free(dev); | ||
1815 | } | ||
1816 | |||
1817 | static struct iommu_group *arm_smmu_device_group(struct device *dev) | ||
1818 | { | ||
1819 | struct iommu_group *group; | ||
1820 | |||
1821 | /* | ||
1822 | * We don't support devices sharing stream IDs other than PCI RID | ||
1823 | * aliases, since the necessary ID-to-device lookup becomes rather | ||
1824 | * impractical given a potential sparse 32-bit stream ID space. | ||
1825 | */ | ||
1826 | if (dev_is_pci(dev)) | ||
1827 | group = pci_device_group(dev); | ||
1828 | else | ||
1829 | group = generic_device_group(dev); | ||
1830 | |||
1831 | return group; | ||
1894 | } | 1832 | } |
1895 | 1833 | ||
1896 | static int arm_smmu_domain_get_attr(struct iommu_domain *domain, | 1834 | static int arm_smmu_domain_get_attr(struct iommu_domain *domain, |
@@ -1937,6 +1875,11 @@ out_unlock: | |||
1937 | return ret; | 1875 | return ret; |
1938 | } | 1876 | } |
1939 | 1877 | ||
1878 | static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) | ||
1879 | { | ||
1880 | return iommu_fwspec_add_ids(dev, args->args, 1); | ||
1881 | } | ||
1882 | |||
1940 | static struct iommu_ops arm_smmu_ops = { | 1883 | static struct iommu_ops arm_smmu_ops = { |
1941 | .capable = arm_smmu_capable, | 1884 | .capable = arm_smmu_capable, |
1942 | .domain_alloc = arm_smmu_domain_alloc, | 1885 | .domain_alloc = arm_smmu_domain_alloc, |
@@ -1948,9 +1891,10 @@ static struct iommu_ops arm_smmu_ops = { | |||
1948 | .iova_to_phys = arm_smmu_iova_to_phys, | 1891 | .iova_to_phys = arm_smmu_iova_to_phys, |
1949 | .add_device = arm_smmu_add_device, | 1892 | .add_device = arm_smmu_add_device, |
1950 | .remove_device = arm_smmu_remove_device, | 1893 | .remove_device = arm_smmu_remove_device, |
1951 | .device_group = pci_device_group, | 1894 | .device_group = arm_smmu_device_group, |
1952 | .domain_get_attr = arm_smmu_domain_get_attr, | 1895 | .domain_get_attr = arm_smmu_domain_get_attr, |
1953 | .domain_set_attr = arm_smmu_domain_set_attr, | 1896 | .domain_set_attr = arm_smmu_domain_set_attr, |
1897 | .of_xlate = arm_smmu_of_xlate, | ||
1954 | .pgsize_bitmap = -1UL, /* Restricted during device attach */ | 1898 | .pgsize_bitmap = -1UL, /* Restricted during device attach */ |
1955 | }; | 1899 | }; |
1956 | 1900 | ||
@@ -2151,6 +2095,24 @@ static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val, | |||
2151 | 1, ARM_SMMU_POLL_TIMEOUT_US); | 2095 | 1, ARM_SMMU_POLL_TIMEOUT_US); |
2152 | } | 2096 | } |
2153 | 2097 | ||
2098 | /* GBPA is "special" */ | ||
2099 | static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr) | ||
2100 | { | ||
2101 | int ret; | ||
2102 | u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA; | ||
2103 | |||
2104 | ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE), | ||
2105 | 1, ARM_SMMU_POLL_TIMEOUT_US); | ||
2106 | if (ret) | ||
2107 | return ret; | ||
2108 | |||
2109 | reg &= ~clr; | ||
2110 | reg |= set; | ||
2111 | writel_relaxed(reg | GBPA_UPDATE, gbpa); | ||
2112 | return readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE), | ||
2113 | 1, ARM_SMMU_POLL_TIMEOUT_US); | ||
2114 | } | ||
2115 | |||
2154 | static void arm_smmu_free_msis(void *data) | 2116 | static void arm_smmu_free_msis(void *data) |
2155 | { | 2117 | { |
2156 | struct device *dev = data; | 2118 | struct device *dev = data; |
@@ -2235,10 +2197,10 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) | |||
2235 | /* Request interrupt lines */ | 2197 | /* Request interrupt lines */ |
2236 | irq = smmu->evtq.q.irq; | 2198 | irq = smmu->evtq.q.irq; |
2237 | if (irq) { | 2199 | if (irq) { |
2238 | ret = devm_request_threaded_irq(smmu->dev, irq, | 2200 | ret = devm_request_threaded_irq(smmu->dev, irq, NULL, |
2239 | arm_smmu_evtq_handler, | ||
2240 | arm_smmu_evtq_thread, | 2201 | arm_smmu_evtq_thread, |
2241 | 0, "arm-smmu-v3-evtq", smmu); | 2202 | IRQF_ONESHOT, |
2203 | "arm-smmu-v3-evtq", smmu); | ||
2242 | if (ret < 0) | 2204 | if (ret < 0) |
2243 | dev_warn(smmu->dev, "failed to enable evtq irq\n"); | 2205 | dev_warn(smmu->dev, "failed to enable evtq irq\n"); |
2244 | } | 2206 | } |
@@ -2263,10 +2225,10 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) | |||
2263 | if (smmu->features & ARM_SMMU_FEAT_PRI) { | 2225 | if (smmu->features & ARM_SMMU_FEAT_PRI) { |
2264 | irq = smmu->priq.q.irq; | 2226 | irq = smmu->priq.q.irq; |
2265 | if (irq) { | 2227 | if (irq) { |
2266 | ret = devm_request_threaded_irq(smmu->dev, irq, | 2228 | ret = devm_request_threaded_irq(smmu->dev, irq, NULL, |
2267 | arm_smmu_priq_handler, | ||
2268 | arm_smmu_priq_thread, | 2229 | arm_smmu_priq_thread, |
2269 | 0, "arm-smmu-v3-priq", | 2230 | IRQF_ONESHOT, |
2231 | "arm-smmu-v3-priq", | ||
2270 | smmu); | 2232 | smmu); |
2271 | if (ret < 0) | 2233 | if (ret < 0) |
2272 | dev_warn(smmu->dev, | 2234 | dev_warn(smmu->dev, |
@@ -2296,7 +2258,7 @@ static int arm_smmu_device_disable(struct arm_smmu_device *smmu) | |||
2296 | return ret; | 2258 | return ret; |
2297 | } | 2259 | } |
2298 | 2260 | ||
2299 | static int arm_smmu_device_reset(struct arm_smmu_device *smmu) | 2261 | static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) |
2300 | { | 2262 | { |
2301 | int ret; | 2263 | int ret; |
2302 | u32 reg, enables; | 2264 | u32 reg, enables; |
@@ -2397,8 +2359,17 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu) | |||
2397 | return ret; | 2359 | return ret; |
2398 | } | 2360 | } |
2399 | 2361 | ||
2400 | /* Enable the SMMU interface */ | 2362 | |
2401 | enables |= CR0_SMMUEN; | 2363 | /* Enable the SMMU interface, or ensure bypass */ |
2364 | if (!bypass || disable_bypass) { | ||
2365 | enables |= CR0_SMMUEN; | ||
2366 | } else { | ||
2367 | ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT); | ||
2368 | if (ret) { | ||
2369 | dev_err(smmu->dev, "GBPA not responding to update\n"); | ||
2370 | return ret; | ||
2371 | } | ||
2372 | } | ||
2402 | ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, | 2373 | ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, |
2403 | ARM_SMMU_CR0ACK); | 2374 | ARM_SMMU_CR0ACK); |
2404 | if (ret) { | 2375 | if (ret) { |
@@ -2597,6 +2568,15 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) | |||
2597 | struct resource *res; | 2568 | struct resource *res; |
2598 | struct arm_smmu_device *smmu; | 2569 | struct arm_smmu_device *smmu; |
2599 | struct device *dev = &pdev->dev; | 2570 | struct device *dev = &pdev->dev; |
2571 | bool bypass = true; | ||
2572 | u32 cells; | ||
2573 | |||
2574 | if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells)) | ||
2575 | dev_err(dev, "missing #iommu-cells property\n"); | ||
2576 | else if (cells != 1) | ||
2577 | dev_err(dev, "invalid #iommu-cells value (%d)\n", cells); | ||
2578 | else | ||
2579 | bypass = false; | ||
2600 | 2580 | ||
2601 | smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); | 2581 | smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); |
2602 | if (!smmu) { | 2582 | if (!smmu) { |
@@ -2649,7 +2629,24 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) | |||
2649 | platform_set_drvdata(pdev, smmu); | 2629 | platform_set_drvdata(pdev, smmu); |
2650 | 2630 | ||
2651 | /* Reset the device */ | 2631 | /* Reset the device */ |
2652 | return arm_smmu_device_reset(smmu); | 2632 | ret = arm_smmu_device_reset(smmu, bypass); |
2633 | if (ret) | ||
2634 | return ret; | ||
2635 | |||
2636 | /* And we're up. Go go go! */ | ||
2637 | of_iommu_set_ops(dev->of_node, &arm_smmu_ops); | ||
2638 | #ifdef CONFIG_PCI | ||
2639 | pci_request_acs(); | ||
2640 | ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops); | ||
2641 | if (ret) | ||
2642 | return ret; | ||
2643 | #endif | ||
2644 | #ifdef CONFIG_ARM_AMBA | ||
2645 | ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops); | ||
2646 | if (ret) | ||
2647 | return ret; | ||
2648 | #endif | ||
2649 | return bus_set_iommu(&platform_bus_type, &arm_smmu_ops); | ||
2653 | } | 2650 | } |
2654 | 2651 | ||
2655 | static int arm_smmu_device_remove(struct platform_device *pdev) | 2652 | static int arm_smmu_device_remove(struct platform_device *pdev) |
@@ -2677,22 +2674,14 @@ static struct platform_driver arm_smmu_driver = { | |||
2677 | 2674 | ||
2678 | static int __init arm_smmu_init(void) | 2675 | static int __init arm_smmu_init(void) |
2679 | { | 2676 | { |
2680 | struct device_node *np; | 2677 | static bool registered; |
2681 | int ret; | 2678 | int ret = 0; |
2682 | |||
2683 | np = of_find_matching_node(NULL, arm_smmu_of_match); | ||
2684 | if (!np) | ||
2685 | return 0; | ||
2686 | |||
2687 | of_node_put(np); | ||
2688 | |||
2689 | ret = platform_driver_register(&arm_smmu_driver); | ||
2690 | if (ret) | ||
2691 | return ret; | ||
2692 | |||
2693 | pci_request_acs(); | ||
2694 | 2679 | ||
2695 | return bus_set_iommu(&pci_bus_type, &arm_smmu_ops); | 2680 | if (!registered) { |
2681 | ret = platform_driver_register(&arm_smmu_driver); | ||
2682 | registered = !ret; | ||
2683 | } | ||
2684 | return ret; | ||
2696 | } | 2685 | } |
2697 | 2686 | ||
2698 | static void __exit arm_smmu_exit(void) | 2687 | static void __exit arm_smmu_exit(void) |
@@ -2703,6 +2692,20 @@ static void __exit arm_smmu_exit(void) | |||
2703 | subsys_initcall(arm_smmu_init); | 2692 | subsys_initcall(arm_smmu_init); |
2704 | module_exit(arm_smmu_exit); | 2693 | module_exit(arm_smmu_exit); |
2705 | 2694 | ||
2695 | static int __init arm_smmu_of_init(struct device_node *np) | ||
2696 | { | ||
2697 | int ret = arm_smmu_init(); | ||
2698 | |||
2699 | if (ret) | ||
2700 | return ret; | ||
2701 | |||
2702 | if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root)) | ||
2703 | return -ENODEV; | ||
2704 | |||
2705 | return 0; | ||
2706 | } | ||
2707 | IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", arm_smmu_of_init); | ||
2708 | |||
2706 | MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations"); | 2709 | MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations"); |
2707 | MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); | 2710 | MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); |
2708 | MODULE_LICENSE("GPL v2"); | 2711 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 2db74ebc3240..c841eb7a1a74 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
@@ -28,6 +28,7 @@ | |||
28 | 28 | ||
29 | #define pr_fmt(fmt) "arm-smmu: " fmt | 29 | #define pr_fmt(fmt) "arm-smmu: " fmt |
30 | 30 | ||
31 | #include <linux/atomic.h> | ||
31 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
32 | #include <linux/dma-iommu.h> | 33 | #include <linux/dma-iommu.h> |
33 | #include <linux/dma-mapping.h> | 34 | #include <linux/dma-mapping.h> |
@@ -40,6 +41,8 @@ | |||
40 | #include <linux/module.h> | 41 | #include <linux/module.h> |
41 | #include <linux/of.h> | 42 | #include <linux/of.h> |
42 | #include <linux/of_address.h> | 43 | #include <linux/of_address.h> |
44 | #include <linux/of_device.h> | ||
45 | #include <linux/of_iommu.h> | ||
43 | #include <linux/pci.h> | 46 | #include <linux/pci.h> |
44 | #include <linux/platform_device.h> | 47 | #include <linux/platform_device.h> |
45 | #include <linux/slab.h> | 48 | #include <linux/slab.h> |
@@ -49,15 +52,9 @@ | |||
49 | 52 | ||
50 | #include "io-pgtable.h" | 53 | #include "io-pgtable.h" |
51 | 54 | ||
52 | /* Maximum number of stream IDs assigned to a single device */ | ||
53 | #define MAX_MASTER_STREAMIDS 128 | ||
54 | |||
55 | /* Maximum number of context banks per SMMU */ | 55 | /* Maximum number of context banks per SMMU */ |
56 | #define ARM_SMMU_MAX_CBS 128 | 56 | #define ARM_SMMU_MAX_CBS 128 |
57 | 57 | ||
58 | /* Maximum number of mapping groups per SMMU */ | ||
59 | #define ARM_SMMU_MAX_SMRS 128 | ||
60 | |||
61 | /* SMMU global address space */ | 58 | /* SMMU global address space */ |
62 | #define ARM_SMMU_GR0(smmu) ((smmu)->base) | 59 | #define ARM_SMMU_GR0(smmu) ((smmu)->base) |
63 | #define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift)) | 60 | #define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift)) |
@@ -165,21 +162,27 @@ | |||
165 | #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2)) | 162 | #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2)) |
166 | #define SMR_VALID (1 << 31) | 163 | #define SMR_VALID (1 << 31) |
167 | #define SMR_MASK_SHIFT 16 | 164 | #define SMR_MASK_SHIFT 16 |
168 | #define SMR_MASK_MASK 0x7fff | ||
169 | #define SMR_ID_SHIFT 0 | 165 | #define SMR_ID_SHIFT 0 |
170 | #define SMR_ID_MASK 0x7fff | ||
171 | 166 | ||
172 | #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) | 167 | #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) |
173 | #define S2CR_CBNDX_SHIFT 0 | 168 | #define S2CR_CBNDX_SHIFT 0 |
174 | #define S2CR_CBNDX_MASK 0xff | 169 | #define S2CR_CBNDX_MASK 0xff |
175 | #define S2CR_TYPE_SHIFT 16 | 170 | #define S2CR_TYPE_SHIFT 16 |
176 | #define S2CR_TYPE_MASK 0x3 | 171 | #define S2CR_TYPE_MASK 0x3 |
177 | #define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT) | 172 | enum arm_smmu_s2cr_type { |
178 | #define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT) | 173 | S2CR_TYPE_TRANS, |
179 | #define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT) | 174 | S2CR_TYPE_BYPASS, |
175 | S2CR_TYPE_FAULT, | ||
176 | }; | ||
180 | 177 | ||
181 | #define S2CR_PRIVCFG_SHIFT 24 | 178 | #define S2CR_PRIVCFG_SHIFT 24 |
182 | #define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT) | 179 | #define S2CR_PRIVCFG_MASK 0x3 |
180 | enum arm_smmu_s2cr_privcfg { | ||
181 | S2CR_PRIVCFG_DEFAULT, | ||
182 | S2CR_PRIVCFG_DIPAN, | ||
183 | S2CR_PRIVCFG_UNPRIV, | ||
184 | S2CR_PRIVCFG_PRIV, | ||
185 | }; | ||
183 | 186 | ||
184 | /* Context bank attribute registers */ | 187 | /* Context bank attribute registers */ |
185 | #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) | 188 | #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) |
@@ -217,6 +220,7 @@ | |||
217 | #define ARM_SMMU_CB_TTBR0 0x20 | 220 | #define ARM_SMMU_CB_TTBR0 0x20 |
218 | #define ARM_SMMU_CB_TTBR1 0x28 | 221 | #define ARM_SMMU_CB_TTBR1 0x28 |
219 | #define ARM_SMMU_CB_TTBCR 0x30 | 222 | #define ARM_SMMU_CB_TTBCR 0x30 |
223 | #define ARM_SMMU_CB_CONTEXTIDR 0x34 | ||
220 | #define ARM_SMMU_CB_S1_MAIR0 0x38 | 224 | #define ARM_SMMU_CB_S1_MAIR0 0x38 |
221 | #define ARM_SMMU_CB_S1_MAIR1 0x3c | 225 | #define ARM_SMMU_CB_S1_MAIR1 0x3c |
222 | #define ARM_SMMU_CB_PAR 0x50 | 226 | #define ARM_SMMU_CB_PAR 0x50 |
@@ -239,7 +243,6 @@ | |||
239 | #define SCTLR_AFE (1 << 2) | 243 | #define SCTLR_AFE (1 << 2) |
240 | #define SCTLR_TRE (1 << 1) | 244 | #define SCTLR_TRE (1 << 1) |
241 | #define SCTLR_M (1 << 0) | 245 | #define SCTLR_M (1 << 0) |
242 | #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE) | ||
243 | 246 | ||
244 | #define ARM_MMU500_ACTLR_CPRE (1 << 1) | 247 | #define ARM_MMU500_ACTLR_CPRE (1 << 1) |
245 | 248 | ||
@@ -296,23 +299,33 @@ enum arm_smmu_implementation { | |||
296 | CAVIUM_SMMUV2, | 299 | CAVIUM_SMMUV2, |
297 | }; | 300 | }; |
298 | 301 | ||
302 | struct arm_smmu_s2cr { | ||
303 | struct iommu_group *group; | ||
304 | int count; | ||
305 | enum arm_smmu_s2cr_type type; | ||
306 | enum arm_smmu_s2cr_privcfg privcfg; | ||
307 | u8 cbndx; | ||
308 | }; | ||
309 | |||
310 | #define s2cr_init_val (struct arm_smmu_s2cr){ \ | ||
311 | .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \ | ||
312 | } | ||
313 | |||
299 | struct arm_smmu_smr { | 314 | struct arm_smmu_smr { |
300 | u8 idx; | ||
301 | u16 mask; | 315 | u16 mask; |
302 | u16 id; | 316 | u16 id; |
317 | bool valid; | ||
303 | }; | 318 | }; |
304 | 319 | ||
305 | struct arm_smmu_master_cfg { | 320 | struct arm_smmu_master_cfg { |
306 | int num_streamids; | 321 | struct arm_smmu_device *smmu; |
307 | u16 streamids[MAX_MASTER_STREAMIDS]; | 322 | s16 smendx[]; |
308 | struct arm_smmu_smr *smrs; | ||
309 | }; | ||
310 | |||
311 | struct arm_smmu_master { | ||
312 | struct device_node *of_node; | ||
313 | struct rb_node node; | ||
314 | struct arm_smmu_master_cfg cfg; | ||
315 | }; | 323 | }; |
324 | #define INVALID_SMENDX -1 | ||
325 | #define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv) | ||
326 | #define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu) | ||
327 | #define for_each_cfg_sme(fw, i, idx) \ | ||
328 | for (i = 0; idx = __fwspec_cfg(fw)->smendx[i], i < fw->num_ids; ++i) | ||
316 | 329 | ||
317 | struct arm_smmu_device { | 330 | struct arm_smmu_device { |
318 | struct device *dev; | 331 | struct device *dev; |
@@ -346,7 +359,11 @@ struct arm_smmu_device { | |||
346 | atomic_t irptndx; | 359 | atomic_t irptndx; |
347 | 360 | ||
348 | u32 num_mapping_groups; | 361 | u32 num_mapping_groups; |
349 | DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS); | 362 | u16 streamid_mask; |
363 | u16 smr_mask_mask; | ||
364 | struct arm_smmu_smr *smrs; | ||
365 | struct arm_smmu_s2cr *s2crs; | ||
366 | struct mutex stream_map_mutex; | ||
350 | 367 | ||
351 | unsigned long va_size; | 368 | unsigned long va_size; |
352 | unsigned long ipa_size; | 369 | unsigned long ipa_size; |
@@ -357,9 +374,6 @@ struct arm_smmu_device { | |||
357 | u32 num_context_irqs; | 374 | u32 num_context_irqs; |
358 | unsigned int *irqs; | 375 | unsigned int *irqs; |
359 | 376 | ||
360 | struct list_head list; | ||
361 | struct rb_root masters; | ||
362 | |||
363 | u32 cavium_id_base; /* Specific to Cavium */ | 377 | u32 cavium_id_base; /* Specific to Cavium */ |
364 | }; | 378 | }; |
365 | 379 | ||
@@ -397,15 +411,6 @@ struct arm_smmu_domain { | |||
397 | struct iommu_domain domain; | 411 | struct iommu_domain domain; |
398 | }; | 412 | }; |
399 | 413 | ||
400 | struct arm_smmu_phandle_args { | ||
401 | struct device_node *np; | ||
402 | int args_count; | ||
403 | uint32_t args[MAX_MASTER_STREAMIDS]; | ||
404 | }; | ||
405 | |||
406 | static DEFINE_SPINLOCK(arm_smmu_devices_lock); | ||
407 | static LIST_HEAD(arm_smmu_devices); | ||
408 | |||
409 | struct arm_smmu_option_prop { | 414 | struct arm_smmu_option_prop { |
410 | u32 opt; | 415 | u32 opt; |
411 | const char *prop; | 416 | const char *prop; |
@@ -413,6 +418,8 @@ struct arm_smmu_option_prop { | |||
413 | 418 | ||
414 | static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0); | 419 | static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0); |
415 | 420 | ||
421 | static bool using_legacy_binding, using_generic_binding; | ||
422 | |||
416 | static struct arm_smmu_option_prop arm_smmu_options[] = { | 423 | static struct arm_smmu_option_prop arm_smmu_options[] = { |
417 | { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" }, | 424 | { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" }, |
418 | { 0, NULL}, | 425 | { 0, NULL}, |
@@ -444,131 +451,86 @@ static struct device_node *dev_get_dev_node(struct device *dev) | |||
444 | 451 | ||
445 | while (!pci_is_root_bus(bus)) | 452 | while (!pci_is_root_bus(bus)) |
446 | bus = bus->parent; | 453 | bus = bus->parent; |
447 | return bus->bridge->parent->of_node; | 454 | return of_node_get(bus->bridge->parent->of_node); |
448 | } | 455 | } |
449 | 456 | ||
450 | return dev->of_node; | 457 | return of_node_get(dev->of_node); |
451 | } | 458 | } |
452 | 459 | ||
453 | static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, | 460 | static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data) |
454 | struct device_node *dev_node) | ||
455 | { | 461 | { |
456 | struct rb_node *node = smmu->masters.rb_node; | 462 | *((__be32 *)data) = cpu_to_be32(alias); |
457 | 463 | return 0; /* Continue walking */ | |
458 | while (node) { | ||
459 | struct arm_smmu_master *master; | ||
460 | |||
461 | master = container_of(node, struct arm_smmu_master, node); | ||
462 | |||
463 | if (dev_node < master->of_node) | ||
464 | node = node->rb_left; | ||
465 | else if (dev_node > master->of_node) | ||
466 | node = node->rb_right; | ||
467 | else | ||
468 | return master; | ||
469 | } | ||
470 | |||
471 | return NULL; | ||
472 | } | 464 | } |
473 | 465 | ||
474 | static struct arm_smmu_master_cfg * | 466 | static int __find_legacy_master_phandle(struct device *dev, void *data) |
475 | find_smmu_master_cfg(struct device *dev) | ||
476 | { | 467 | { |
477 | struct arm_smmu_master_cfg *cfg = NULL; | 468 | struct of_phandle_iterator *it = *(void **)data; |
478 | struct iommu_group *group = iommu_group_get(dev); | 469 | struct device_node *np = it->node; |
479 | 470 | int err; | |
480 | if (group) { | 471 | |
481 | cfg = iommu_group_get_iommudata(group); | 472 | of_for_each_phandle(it, err, dev->of_node, "mmu-masters", |
482 | iommu_group_put(group); | 473 | "#stream-id-cells", 0) |
483 | } | 474 | if (it->node == np) { |
484 | 475 | *(void **)data = dev; | |
485 | return cfg; | 476 | return 1; |
477 | } | ||
478 | it->node = np; | ||
479 | return err == -ENOENT ? 0 : err; | ||
486 | } | 480 | } |
487 | 481 | ||
488 | static int insert_smmu_master(struct arm_smmu_device *smmu, | 482 | static struct platform_driver arm_smmu_driver; |
489 | struct arm_smmu_master *master) | 483 | static struct iommu_ops arm_smmu_ops; |
484 | |||
485 | static int arm_smmu_register_legacy_master(struct device *dev, | ||
486 | struct arm_smmu_device **smmu) | ||
490 | { | 487 | { |
491 | struct rb_node **new, *parent; | 488 | struct device *smmu_dev; |
492 | 489 | struct device_node *np; | |
493 | new = &smmu->masters.rb_node; | 490 | struct of_phandle_iterator it; |
494 | parent = NULL; | 491 | void *data = ⁢ |
495 | while (*new) { | 492 | u32 *sids; |
496 | struct arm_smmu_master *this | 493 | __be32 pci_sid; |
497 | = container_of(*new, struct arm_smmu_master, node); | 494 | int err; |
498 | 495 | ||
499 | parent = *new; | 496 | np = dev_get_dev_node(dev); |
500 | if (master->of_node < this->of_node) | 497 | if (!np || !of_find_property(np, "#stream-id-cells", NULL)) { |
501 | new = &((*new)->rb_left); | 498 | of_node_put(np); |
502 | else if (master->of_node > this->of_node) | 499 | return -ENODEV; |
503 | new = &((*new)->rb_right); | ||
504 | else | ||
505 | return -EEXIST; | ||
506 | } | 500 | } |
507 | 501 | ||
508 | rb_link_node(&master->node, parent, new); | 502 | it.node = np; |
509 | rb_insert_color(&master->node, &smmu->masters); | 503 | err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data, |
510 | return 0; | 504 | __find_legacy_master_phandle); |
511 | } | 505 | smmu_dev = data; |
512 | 506 | of_node_put(np); | |
513 | static int register_smmu_master(struct arm_smmu_device *smmu, | 507 | if (err == 0) |
514 | struct device *dev, | 508 | return -ENODEV; |
515 | struct arm_smmu_phandle_args *masterspec) | 509 | if (err < 0) |
516 | { | 510 | return err; |
517 | int i; | ||
518 | struct arm_smmu_master *master; | ||
519 | 511 | ||
520 | master = find_smmu_master(smmu, masterspec->np); | 512 | if (dev_is_pci(dev)) { |
521 | if (master) { | 513 | /* "mmu-masters" assumes Stream ID == Requester ID */ |
522 | dev_err(dev, | 514 | pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid, |
523 | "rejecting multiple registrations for master device %s\n", | 515 | &pci_sid); |
524 | masterspec->np->name); | 516 | it.cur = &pci_sid; |
525 | return -EBUSY; | 517 | it.cur_count = 1; |
526 | } | 518 | } |
527 | 519 | ||
528 | if (masterspec->args_count > MAX_MASTER_STREAMIDS) { | 520 | err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode, |
529 | dev_err(dev, | 521 | &arm_smmu_ops); |
530 | "reached maximum number (%d) of stream IDs for master device %s\n", | 522 | if (err) |
531 | MAX_MASTER_STREAMIDS, masterspec->np->name); | 523 | return err; |
532 | return -ENOSPC; | ||
533 | } | ||
534 | 524 | ||
535 | master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL); | 525 | sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL); |
536 | if (!master) | 526 | if (!sids) |
537 | return -ENOMEM; | 527 | return -ENOMEM; |
538 | 528 | ||
539 | master->of_node = masterspec->np; | 529 | *smmu = dev_get_drvdata(smmu_dev); |
540 | master->cfg.num_streamids = masterspec->args_count; | 530 | of_phandle_iterator_args(&it, sids, it.cur_count); |
541 | 531 | err = iommu_fwspec_add_ids(dev, sids, it.cur_count); | |
542 | for (i = 0; i < master->cfg.num_streamids; ++i) { | 532 | kfree(sids); |
543 | u16 streamid = masterspec->args[i]; | 533 | return err; |
544 | |||
545 | if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && | ||
546 | (streamid >= smmu->num_mapping_groups)) { | ||
547 | dev_err(dev, | ||
548 | "stream ID for master device %s greater than maximum allowed (%d)\n", | ||
549 | masterspec->np->name, smmu->num_mapping_groups); | ||
550 | return -ERANGE; | ||
551 | } | ||
552 | master->cfg.streamids[i] = streamid; | ||
553 | } | ||
554 | return insert_smmu_master(smmu, master); | ||
555 | } | ||
556 | |||
557 | static struct arm_smmu_device *find_smmu_for_device(struct device *dev) | ||
558 | { | ||
559 | struct arm_smmu_device *smmu; | ||
560 | struct arm_smmu_master *master = NULL; | ||
561 | struct device_node *dev_node = dev_get_dev_node(dev); | ||
562 | |||
563 | spin_lock(&arm_smmu_devices_lock); | ||
564 | list_for_each_entry(smmu, &arm_smmu_devices, list) { | ||
565 | master = find_smmu_master(smmu, dev_node); | ||
566 | if (master) | ||
567 | break; | ||
568 | } | ||
569 | spin_unlock(&arm_smmu_devices_lock); | ||
570 | |||
571 | return master ? smmu : NULL; | ||
572 | } | 534 | } |
573 | 535 | ||
574 | static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end) | 536 | static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end) |
@@ -738,7 +700,7 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev) | |||
738 | static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, | 700 | static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, |
739 | struct io_pgtable_cfg *pgtbl_cfg) | 701 | struct io_pgtable_cfg *pgtbl_cfg) |
740 | { | 702 | { |
741 | u32 reg; | 703 | u32 reg, reg2; |
742 | u64 reg64; | 704 | u64 reg64; |
743 | bool stage1; | 705 | bool stage1; |
744 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | 706 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
@@ -781,14 +743,22 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, | |||
781 | 743 | ||
782 | /* TTBRs */ | 744 | /* TTBRs */ |
783 | if (stage1) { | 745 | if (stage1) { |
784 | reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; | 746 | u16 asid = ARM_SMMU_CB_ASID(smmu, cfg); |
785 | 747 | ||
786 | reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT; | 748 | if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { |
787 | writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0); | 749 | reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0]; |
788 | 750 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0); | |
789 | reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1]; | 751 | reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1]; |
790 | reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT; | 752 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1); |
791 | writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1); | 753 | writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR); |
754 | } else { | ||
755 | reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; | ||
756 | reg64 |= (u64)asid << TTBRn_ASID_SHIFT; | ||
757 | writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0); | ||
758 | reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1]; | ||
759 | reg64 |= (u64)asid << TTBRn_ASID_SHIFT; | ||
760 | writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1); | ||
761 | } | ||
792 | } else { | 762 | } else { |
793 | reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; | 763 | reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; |
794 | writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0); | 764 | writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0); |
@@ -796,28 +766,36 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, | |||
796 | 766 | ||
797 | /* TTBCR */ | 767 | /* TTBCR */ |
798 | if (stage1) { | 768 | if (stage1) { |
799 | reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr; | 769 | if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { |
800 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); | 770 | reg = pgtbl_cfg->arm_v7s_cfg.tcr; |
801 | if (smmu->version > ARM_SMMU_V1) { | 771 | reg2 = 0; |
802 | reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; | 772 | } else { |
803 | reg |= TTBCR2_SEP_UPSTREAM; | 773 | reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr; |
804 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); | 774 | reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; |
775 | reg2 |= TTBCR2_SEP_UPSTREAM; | ||
805 | } | 776 | } |
777 | if (smmu->version > ARM_SMMU_V1) | ||
778 | writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2); | ||
806 | } else { | 779 | } else { |
807 | reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr; | 780 | reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr; |
808 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); | ||
809 | } | 781 | } |
782 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); | ||
810 | 783 | ||
811 | /* MAIRs (stage-1 only) */ | 784 | /* MAIRs (stage-1 only) */ |
812 | if (stage1) { | 785 | if (stage1) { |
813 | reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0]; | 786 | if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { |
787 | reg = pgtbl_cfg->arm_v7s_cfg.prrr; | ||
788 | reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr; | ||
789 | } else { | ||
790 | reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0]; | ||
791 | reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1]; | ||
792 | } | ||
814 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0); | 793 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0); |
815 | reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1]; | 794 | writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1); |
816 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1); | ||
817 | } | 795 | } |
818 | 796 | ||
819 | /* SCTLR */ | 797 | /* SCTLR */ |
820 | reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; | 798 | reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M; |
821 | if (stage1) | 799 | if (stage1) |
822 | reg |= SCTLR_S1_ASIDPNE; | 800 | reg |= SCTLR_S1_ASIDPNE; |
823 | #ifdef __BIG_ENDIAN | 801 | #ifdef __BIG_ENDIAN |
@@ -841,12 +819,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
841 | if (smmu_domain->smmu) | 819 | if (smmu_domain->smmu) |
842 | goto out_unlock; | 820 | goto out_unlock; |
843 | 821 | ||
844 | /* We're bypassing these SIDs, so don't allocate an actual context */ | ||
845 | if (domain->type == IOMMU_DOMAIN_DMA) { | ||
846 | smmu_domain->smmu = smmu; | ||
847 | goto out_unlock; | ||
848 | } | ||
849 | |||
850 | /* | 822 | /* |
851 | * Mapping the requested stage onto what we support is surprisingly | 823 | * Mapping the requested stage onto what we support is surprisingly |
852 | * complicated, mainly because the spec allows S1+S2 SMMUs without | 824 | * complicated, mainly because the spec allows S1+S2 SMMUs without |
@@ -880,6 +852,11 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
880 | */ | 852 | */ |
881 | if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L) | 853 | if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L) |
882 | cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L; | 854 | cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L; |
855 | if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) && | ||
856 | !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) && | ||
857 | (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) && | ||
858 | (smmu_domain->stage == ARM_SMMU_DOMAIN_S1)) | ||
859 | cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S; | ||
883 | if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) && | 860 | if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) && |
884 | (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K | | 861 | (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K | |
885 | ARM_SMMU_FEAT_FMT_AARCH64_16K | | 862 | ARM_SMMU_FEAT_FMT_AARCH64_16K | |
@@ -899,10 +876,14 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
899 | oas = smmu->ipa_size; | 876 | oas = smmu->ipa_size; |
900 | if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) { | 877 | if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) { |
901 | fmt = ARM_64_LPAE_S1; | 878 | fmt = ARM_64_LPAE_S1; |
902 | } else { | 879 | } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) { |
903 | fmt = ARM_32_LPAE_S1; | 880 | fmt = ARM_32_LPAE_S1; |
904 | ias = min(ias, 32UL); | 881 | ias = min(ias, 32UL); |
905 | oas = min(oas, 40UL); | 882 | oas = min(oas, 40UL); |
883 | } else { | ||
884 | fmt = ARM_V7S; | ||
885 | ias = min(ias, 32UL); | ||
886 | oas = min(oas, 32UL); | ||
906 | } | 887 | } |
907 | break; | 888 | break; |
908 | case ARM_SMMU_DOMAIN_NESTED: | 889 | case ARM_SMMU_DOMAIN_NESTED: |
@@ -958,6 +939,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
958 | 939 | ||
959 | /* Update the domain's page sizes to reflect the page table format */ | 940 | /* Update the domain's page sizes to reflect the page table format */ |
960 | domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; | 941 | domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; |
942 | domain->geometry.aperture_end = (1UL << ias) - 1; | ||
943 | domain->geometry.force_aperture = true; | ||
961 | 944 | ||
962 | /* Initialise the context bank with our page table cfg */ | 945 | /* Initialise the context bank with our page table cfg */ |
963 | arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg); | 946 | arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg); |
@@ -996,7 +979,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) | |||
996 | void __iomem *cb_base; | 979 | void __iomem *cb_base; |
997 | int irq; | 980 | int irq; |
998 | 981 | ||
999 | if (!smmu || domain->type == IOMMU_DOMAIN_DMA) | 982 | if (!smmu) |
1000 | return; | 983 | return; |
1001 | 984 | ||
1002 | /* | 985 | /* |
@@ -1030,8 +1013,8 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) | |||
1030 | if (!smmu_domain) | 1013 | if (!smmu_domain) |
1031 | return NULL; | 1014 | return NULL; |
1032 | 1015 | ||
1033 | if (type == IOMMU_DOMAIN_DMA && | 1016 | if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding || |
1034 | iommu_get_dma_cookie(&smmu_domain->domain)) { | 1017 | iommu_get_dma_cookie(&smmu_domain->domain))) { |
1035 | kfree(smmu_domain); | 1018 | kfree(smmu_domain); |
1036 | return NULL; | 1019 | return NULL; |
1037 | } | 1020 | } |
@@ -1055,162 +1038,197 @@ static void arm_smmu_domain_free(struct iommu_domain *domain) | |||
1055 | kfree(smmu_domain); | 1038 | kfree(smmu_domain); |
1056 | } | 1039 | } |
1057 | 1040 | ||
1058 | static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu, | 1041 | static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx) |
1059 | struct arm_smmu_master_cfg *cfg) | ||
1060 | { | 1042 | { |
1061 | int i; | 1043 | struct arm_smmu_smr *smr = smmu->smrs + idx; |
1062 | struct arm_smmu_smr *smrs; | 1044 | u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT; |
1063 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | ||
1064 | 1045 | ||
1065 | if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH)) | 1046 | if (smr->valid) |
1066 | return 0; | 1047 | reg |= SMR_VALID; |
1048 | writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx)); | ||
1049 | } | ||
1067 | 1050 | ||
1068 | if (cfg->smrs) | 1051 | static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) |
1069 | return -EEXIST; | 1052 | { |
1053 | struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx; | ||
1054 | u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT | | ||
1055 | (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT | | ||
1056 | (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT; | ||
1070 | 1057 | ||
1071 | smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL); | 1058 | writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx)); |
1072 | if (!smrs) { | 1059 | } |
1073 | dev_err(smmu->dev, "failed to allocate %d SMRs\n", | ||
1074 | cfg->num_streamids); | ||
1075 | return -ENOMEM; | ||
1076 | } | ||
1077 | 1060 | ||
1078 | /* Allocate the SMRs on the SMMU */ | 1061 | static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx) |
1079 | for (i = 0; i < cfg->num_streamids; ++i) { | 1062 | { |
1080 | int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0, | 1063 | arm_smmu_write_s2cr(smmu, idx); |
1081 | smmu->num_mapping_groups); | 1064 | if (smmu->smrs) |
1082 | if (idx < 0) { | 1065 | arm_smmu_write_smr(smmu, idx); |
1083 | dev_err(smmu->dev, "failed to allocate free SMR\n"); | 1066 | } |
1084 | goto err_free_smrs; | ||
1085 | } | ||
1086 | 1067 | ||
1087 | smrs[i] = (struct arm_smmu_smr) { | 1068 | static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask) |
1088 | .idx = idx, | 1069 | { |
1089 | .mask = 0, /* We don't currently share SMRs */ | 1070 | struct arm_smmu_smr *smrs = smmu->smrs; |
1090 | .id = cfg->streamids[i], | 1071 | int i, free_idx = -ENOSPC; |
1091 | }; | ||
1092 | } | ||
1093 | 1072 | ||
1094 | /* It worked! Now, poke the actual hardware */ | 1073 | /* Stream indexing is blissfully easy */ |
1095 | for (i = 0; i < cfg->num_streamids; ++i) { | 1074 | if (!smrs) |
1096 | u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT | | 1075 | return id; |
1097 | smrs[i].mask << SMR_MASK_SHIFT; | ||
1098 | writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx)); | ||
1099 | } | ||
1100 | 1076 | ||
1101 | cfg->smrs = smrs; | 1077 | /* Validating SMRs is... less so */ |
1102 | return 0; | 1078 | for (i = 0; i < smmu->num_mapping_groups; ++i) { |
1079 | if (!smrs[i].valid) { | ||
1080 | /* | ||
1081 | * Note the first free entry we come across, which | ||
1082 | * we'll claim in the end if nothing else matches. | ||
1083 | */ | ||
1084 | if (free_idx < 0) | ||
1085 | free_idx = i; | ||
1086 | continue; | ||
1087 | } | ||
1088 | /* | ||
1089 | * If the new entry is _entirely_ matched by an existing entry, | ||
1090 | * then reuse that, with the guarantee that there also cannot | ||
1091 | * be any subsequent conflicting entries. In normal use we'd | ||
1092 | * expect simply identical entries for this case, but there's | ||
1093 | * no harm in accommodating the generalisation. | ||
1094 | */ | ||
1095 | if ((mask & smrs[i].mask) == mask && | ||
1096 | !((id ^ smrs[i].id) & ~smrs[i].mask)) | ||
1097 | return i; | ||
1098 | /* | ||
1099 | * If the new entry has any other overlap with an existing one, | ||
1100 | * though, then there always exists at least one stream ID | ||
1101 | * which would cause a conflict, and we can't allow that risk. | ||
1102 | */ | ||
1103 | if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask))) | ||
1104 | return -EINVAL; | ||
1105 | } | ||
1103 | 1106 | ||
1104 | err_free_smrs: | 1107 | return free_idx; |
1105 | while (--i >= 0) | ||
1106 | __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx); | ||
1107 | kfree(smrs); | ||
1108 | return -ENOSPC; | ||
1109 | } | 1108 | } |
1110 | 1109 | ||
1111 | static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu, | 1110 | static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx) |
1112 | struct arm_smmu_master_cfg *cfg) | ||
1113 | { | 1111 | { |
1114 | int i; | 1112 | if (--smmu->s2crs[idx].count) |
1115 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | 1113 | return false; |
1116 | struct arm_smmu_smr *smrs = cfg->smrs; | ||
1117 | |||
1118 | if (!smrs) | ||
1119 | return; | ||
1120 | |||
1121 | /* Invalidate the SMRs before freeing back to the allocator */ | ||
1122 | for (i = 0; i < cfg->num_streamids; ++i) { | ||
1123 | u8 idx = smrs[i].idx; | ||
1124 | 1114 | ||
1125 | writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx)); | 1115 | smmu->s2crs[idx] = s2cr_init_val; |
1126 | __arm_smmu_free_bitmap(smmu->smr_map, idx); | 1116 | if (smmu->smrs) |
1127 | } | 1117 | smmu->smrs[idx].valid = false; |
1128 | 1118 | ||
1129 | cfg->smrs = NULL; | 1119 | return true; |
1130 | kfree(smrs); | ||
1131 | } | 1120 | } |
1132 | 1121 | ||
1133 | static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, | 1122 | static int arm_smmu_master_alloc_smes(struct device *dev) |
1134 | struct arm_smmu_master_cfg *cfg) | ||
1135 | { | 1123 | { |
1136 | int i, ret; | 1124 | struct iommu_fwspec *fwspec = dev->iommu_fwspec; |
1137 | struct arm_smmu_device *smmu = smmu_domain->smmu; | 1125 | struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv; |
1138 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | 1126 | struct arm_smmu_device *smmu = cfg->smmu; |
1127 | struct arm_smmu_smr *smrs = smmu->smrs; | ||
1128 | struct iommu_group *group; | ||
1129 | int i, idx, ret; | ||
1139 | 1130 | ||
1140 | /* | 1131 | mutex_lock(&smmu->stream_map_mutex); |
1141 | * FIXME: This won't be needed once we have IOMMU-backed DMA ops | 1132 | /* Figure out a viable stream map entry allocation */ |
1142 | * for all devices behind the SMMU. Note that we need to take | 1133 | for_each_cfg_sme(fwspec, i, idx) { |
1143 | * care configuring SMRs for devices both a platform_device and | 1134 | u16 sid = fwspec->ids[i]; |
1144 | * and a PCI device (i.e. a PCI host controller) | 1135 | u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT; |
1145 | */ | ||
1146 | if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA) | ||
1147 | return 0; | ||
1148 | 1136 | ||
1149 | /* Devices in an IOMMU group may already be configured */ | 1137 | if (idx != INVALID_SMENDX) { |
1150 | ret = arm_smmu_master_configure_smrs(smmu, cfg); | 1138 | ret = -EEXIST; |
1151 | if (ret) | 1139 | goto out_err; |
1152 | return ret == -EEXIST ? 0 : ret; | 1140 | } |
1153 | 1141 | ||
1154 | for (i = 0; i < cfg->num_streamids; ++i) { | 1142 | ret = arm_smmu_find_sme(smmu, sid, mask); |
1155 | u32 idx, s2cr; | 1143 | if (ret < 0) |
1144 | goto out_err; | ||
1145 | |||
1146 | idx = ret; | ||
1147 | if (smrs && smmu->s2crs[idx].count == 0) { | ||
1148 | smrs[idx].id = sid; | ||
1149 | smrs[idx].mask = mask; | ||
1150 | smrs[idx].valid = true; | ||
1151 | } | ||
1152 | smmu->s2crs[idx].count++; | ||
1153 | cfg->smendx[i] = (s16)idx; | ||
1154 | } | ||
1156 | 1155 | ||
1157 | idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i]; | 1156 | group = iommu_group_get_for_dev(dev); |
1158 | s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV | | 1157 | if (!group) |
1159 | (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT); | 1158 | group = ERR_PTR(-ENOMEM); |
1160 | writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx)); | 1159 | if (IS_ERR(group)) { |
1160 | ret = PTR_ERR(group); | ||
1161 | goto out_err; | ||
1161 | } | 1162 | } |
1163 | iommu_group_put(group); | ||
1162 | 1164 | ||
1165 | /* It worked! Now, poke the actual hardware */ | ||
1166 | for_each_cfg_sme(fwspec, i, idx) { | ||
1167 | arm_smmu_write_sme(smmu, idx); | ||
1168 | smmu->s2crs[idx].group = group; | ||
1169 | } | ||
1170 | |||
1171 | mutex_unlock(&smmu->stream_map_mutex); | ||
1163 | return 0; | 1172 | return 0; |
1173 | |||
1174 | out_err: | ||
1175 | while (i--) { | ||
1176 | arm_smmu_free_sme(smmu, cfg->smendx[i]); | ||
1177 | cfg->smendx[i] = INVALID_SMENDX; | ||
1178 | } | ||
1179 | mutex_unlock(&smmu->stream_map_mutex); | ||
1180 | return ret; | ||
1164 | } | 1181 | } |
1165 | 1182 | ||
1166 | static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, | 1183 | static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec) |
1167 | struct arm_smmu_master_cfg *cfg) | ||
1168 | { | 1184 | { |
1169 | int i; | 1185 | struct arm_smmu_device *smmu = fwspec_smmu(fwspec); |
1170 | struct arm_smmu_device *smmu = smmu_domain->smmu; | 1186 | struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv; |
1171 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | 1187 | int i, idx; |
1172 | |||
1173 | /* An IOMMU group is torn down by the first device to be removed */ | ||
1174 | if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs) | ||
1175 | return; | ||
1176 | 1188 | ||
1177 | /* | 1189 | mutex_lock(&smmu->stream_map_mutex); |
1178 | * We *must* clear the S2CR first, because freeing the SMR means | 1190 | for_each_cfg_sme(fwspec, i, idx) { |
1179 | * that it can be re-allocated immediately. | 1191 | if (arm_smmu_free_sme(smmu, idx)) |
1180 | */ | 1192 | arm_smmu_write_sme(smmu, idx); |
1181 | for (i = 0; i < cfg->num_streamids; ++i) { | 1193 | cfg->smendx[i] = INVALID_SMENDX; |
1182 | u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i]; | ||
1183 | u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS; | ||
1184 | |||
1185 | writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx)); | ||
1186 | } | 1194 | } |
1187 | 1195 | mutex_unlock(&smmu->stream_map_mutex); | |
1188 | arm_smmu_master_free_smrs(smmu, cfg); | ||
1189 | } | 1196 | } |
1190 | 1197 | ||
1191 | static void arm_smmu_detach_dev(struct device *dev, | 1198 | static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, |
1192 | struct arm_smmu_master_cfg *cfg) | 1199 | struct iommu_fwspec *fwspec) |
1193 | { | 1200 | { |
1194 | struct iommu_domain *domain = dev->archdata.iommu; | 1201 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
1195 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); | 1202 | struct arm_smmu_s2cr *s2cr = smmu->s2crs; |
1203 | enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS; | ||
1204 | u8 cbndx = smmu_domain->cfg.cbndx; | ||
1205 | int i, idx; | ||
1206 | |||
1207 | for_each_cfg_sme(fwspec, i, idx) { | ||
1208 | if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx) | ||
1209 | continue; | ||
1196 | 1210 | ||
1197 | dev->archdata.iommu = NULL; | 1211 | s2cr[idx].type = type; |
1198 | arm_smmu_domain_remove_master(smmu_domain, cfg); | 1212 | s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV; |
1213 | s2cr[idx].cbndx = cbndx; | ||
1214 | arm_smmu_write_s2cr(smmu, idx); | ||
1215 | } | ||
1216 | return 0; | ||
1199 | } | 1217 | } |
1200 | 1218 | ||
1201 | static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | 1219 | static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) |
1202 | { | 1220 | { |
1203 | int ret; | 1221 | int ret; |
1204 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); | 1222 | struct iommu_fwspec *fwspec = dev->iommu_fwspec; |
1205 | struct arm_smmu_device *smmu; | 1223 | struct arm_smmu_device *smmu; |
1206 | struct arm_smmu_master_cfg *cfg; | 1224 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
1207 | 1225 | ||
1208 | smmu = find_smmu_for_device(dev); | 1226 | if (!fwspec || fwspec->ops != &arm_smmu_ops) { |
1209 | if (!smmu) { | ||
1210 | dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); | 1227 | dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); |
1211 | return -ENXIO; | 1228 | return -ENXIO; |
1212 | } | 1229 | } |
1213 | 1230 | ||
1231 | smmu = fwspec_smmu(fwspec); | ||
1214 | /* Ensure that the domain is finalised */ | 1232 | /* Ensure that the domain is finalised */ |
1215 | ret = arm_smmu_init_domain_context(domain, smmu); | 1233 | ret = arm_smmu_init_domain_context(domain, smmu); |
1216 | if (ret < 0) | 1234 | if (ret < 0) |
@@ -1228,18 +1246,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | |||
1228 | } | 1246 | } |
1229 | 1247 | ||
1230 | /* Looks ok, so add the device to the domain */ | 1248 | /* Looks ok, so add the device to the domain */ |
1231 | cfg = find_smmu_master_cfg(dev); | 1249 | return arm_smmu_domain_add_master(smmu_domain, fwspec); |
1232 | if (!cfg) | ||
1233 | return -ENODEV; | ||
1234 | |||
1235 | /* Detach the dev from its current domain */ | ||
1236 | if (dev->archdata.iommu) | ||
1237 | arm_smmu_detach_dev(dev, cfg); | ||
1238 | |||
1239 | ret = arm_smmu_domain_add_master(smmu_domain, cfg); | ||
1240 | if (!ret) | ||
1241 | dev->archdata.iommu = domain; | ||
1242 | return ret; | ||
1243 | } | 1250 | } |
1244 | 1251 | ||
1245 | static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, | 1252 | static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, |
@@ -1358,110 +1365,113 @@ static bool arm_smmu_capable(enum iommu_cap cap) | |||
1358 | } | 1365 | } |
1359 | } | 1366 | } |
1360 | 1367 | ||
1361 | static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data) | 1368 | static int arm_smmu_match_node(struct device *dev, void *data) |
1362 | { | 1369 | { |
1363 | *((u16 *)data) = alias; | 1370 | return dev->of_node == data; |
1364 | return 0; /* Continue walking */ | ||
1365 | } | 1371 | } |
1366 | 1372 | ||
1367 | static void __arm_smmu_release_pci_iommudata(void *data) | 1373 | static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np) |
1368 | { | 1374 | { |
1369 | kfree(data); | 1375 | struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL, |
1376 | np, arm_smmu_match_node); | ||
1377 | put_device(dev); | ||
1378 | return dev ? dev_get_drvdata(dev) : NULL; | ||
1370 | } | 1379 | } |
1371 | 1380 | ||
1372 | static int arm_smmu_init_pci_device(struct pci_dev *pdev, | 1381 | static int arm_smmu_add_device(struct device *dev) |
1373 | struct iommu_group *group) | ||
1374 | { | 1382 | { |
1383 | struct arm_smmu_device *smmu; | ||
1375 | struct arm_smmu_master_cfg *cfg; | 1384 | struct arm_smmu_master_cfg *cfg; |
1376 | u16 sid; | 1385 | struct iommu_fwspec *fwspec = dev->iommu_fwspec; |
1377 | int i; | 1386 | int i, ret; |
1378 | |||
1379 | cfg = iommu_group_get_iommudata(group); | ||
1380 | if (!cfg) { | ||
1381 | cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); | ||
1382 | if (!cfg) | ||
1383 | return -ENOMEM; | ||
1384 | 1387 | ||
1385 | iommu_group_set_iommudata(group, cfg, | 1388 | if (using_legacy_binding) { |
1386 | __arm_smmu_release_pci_iommudata); | 1389 | ret = arm_smmu_register_legacy_master(dev, &smmu); |
1390 | fwspec = dev->iommu_fwspec; | ||
1391 | if (ret) | ||
1392 | goto out_free; | ||
1393 | } else if (fwspec) { | ||
1394 | smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode)); | ||
1395 | } else { | ||
1396 | return -ENODEV; | ||
1387 | } | 1397 | } |
1388 | 1398 | ||
1389 | if (cfg->num_streamids >= MAX_MASTER_STREAMIDS) | 1399 | ret = -EINVAL; |
1390 | return -ENOSPC; | 1400 | for (i = 0; i < fwspec->num_ids; i++) { |
1401 | u16 sid = fwspec->ids[i]; | ||
1402 | u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT; | ||
1391 | 1403 | ||
1392 | /* | 1404 | if (sid & ~smmu->streamid_mask) { |
1393 | * Assume Stream ID == Requester ID for now. | 1405 | dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n", |
1394 | * We need a way to describe the ID mappings in FDT. | 1406 | sid, smmu->streamid_mask); |
1395 | */ | 1407 | goto out_free; |
1396 | pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid); | 1408 | } |
1397 | for (i = 0; i < cfg->num_streamids; ++i) | 1409 | if (mask & ~smmu->smr_mask_mask) { |
1398 | if (cfg->streamids[i] == sid) | 1410 | dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n", |
1399 | break; | 1411 | sid, smmu->smr_mask_mask); |
1400 | 1412 | goto out_free; | |
1401 | /* Avoid duplicate SIDs, as this can lead to SMR conflicts */ | 1413 | } |
1402 | if (i == cfg->num_streamids) | 1414 | } |
1403 | cfg->streamids[cfg->num_streamids++] = sid; | ||
1404 | |||
1405 | return 0; | ||
1406 | } | ||
1407 | |||
1408 | static int arm_smmu_init_platform_device(struct device *dev, | ||
1409 | struct iommu_group *group) | ||
1410 | { | ||
1411 | struct arm_smmu_device *smmu = find_smmu_for_device(dev); | ||
1412 | struct arm_smmu_master *master; | ||
1413 | 1415 | ||
1414 | if (!smmu) | 1416 | ret = -ENOMEM; |
1415 | return -ENODEV; | 1417 | cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]), |
1418 | GFP_KERNEL); | ||
1419 | if (!cfg) | ||
1420 | goto out_free; | ||
1416 | 1421 | ||
1417 | master = find_smmu_master(smmu, dev->of_node); | 1422 | cfg->smmu = smmu; |
1418 | if (!master) | 1423 | fwspec->iommu_priv = cfg; |
1419 | return -ENODEV; | 1424 | while (i--) |
1425 | cfg->smendx[i] = INVALID_SMENDX; | ||
1420 | 1426 | ||
1421 | iommu_group_set_iommudata(group, &master->cfg, NULL); | 1427 | ret = arm_smmu_master_alloc_smes(dev); |
1428 | if (ret) | ||
1429 | goto out_free; | ||
1422 | 1430 | ||
1423 | return 0; | 1431 | return 0; |
1424 | } | ||
1425 | 1432 | ||
1426 | static int arm_smmu_add_device(struct device *dev) | 1433 | out_free: |
1427 | { | 1434 | if (fwspec) |
1428 | struct iommu_group *group; | 1435 | kfree(fwspec->iommu_priv); |
1429 | 1436 | iommu_fwspec_free(dev); | |
1430 | group = iommu_group_get_for_dev(dev); | 1437 | return ret; |
1431 | if (IS_ERR(group)) | ||
1432 | return PTR_ERR(group); | ||
1433 | |||
1434 | iommu_group_put(group); | ||
1435 | return 0; | ||
1436 | } | 1438 | } |
1437 | 1439 | ||
1438 | static void arm_smmu_remove_device(struct device *dev) | 1440 | static void arm_smmu_remove_device(struct device *dev) |
1439 | { | 1441 | { |
1442 | struct iommu_fwspec *fwspec = dev->iommu_fwspec; | ||
1443 | |||
1444 | if (!fwspec || fwspec->ops != &arm_smmu_ops) | ||
1445 | return; | ||
1446 | |||
1447 | arm_smmu_master_free_smes(fwspec); | ||
1440 | iommu_group_remove_device(dev); | 1448 | iommu_group_remove_device(dev); |
1449 | kfree(fwspec->iommu_priv); | ||
1450 | iommu_fwspec_free(dev); | ||
1441 | } | 1451 | } |
1442 | 1452 | ||
1443 | static struct iommu_group *arm_smmu_device_group(struct device *dev) | 1453 | static struct iommu_group *arm_smmu_device_group(struct device *dev) |
1444 | { | 1454 | { |
1445 | struct iommu_group *group; | 1455 | struct iommu_fwspec *fwspec = dev->iommu_fwspec; |
1446 | int ret; | 1456 | struct arm_smmu_device *smmu = fwspec_smmu(fwspec); |
1457 | struct iommu_group *group = NULL; | ||
1458 | int i, idx; | ||
1447 | 1459 | ||
1448 | if (dev_is_pci(dev)) | 1460 | for_each_cfg_sme(fwspec, i, idx) { |
1449 | group = pci_device_group(dev); | 1461 | if (group && smmu->s2crs[idx].group && |
1450 | else | 1462 | group != smmu->s2crs[idx].group) |
1451 | group = generic_device_group(dev); | 1463 | return ERR_PTR(-EINVAL); |
1464 | |||
1465 | group = smmu->s2crs[idx].group; | ||
1466 | } | ||
1452 | 1467 | ||
1453 | if (IS_ERR(group)) | 1468 | if (group) |
1454 | return group; | 1469 | return group; |
1455 | 1470 | ||
1456 | if (dev_is_pci(dev)) | 1471 | if (dev_is_pci(dev)) |
1457 | ret = arm_smmu_init_pci_device(to_pci_dev(dev), group); | 1472 | group = pci_device_group(dev); |
1458 | else | 1473 | else |
1459 | ret = arm_smmu_init_platform_device(dev, group); | 1474 | group = generic_device_group(dev); |
1460 | |||
1461 | if (ret) { | ||
1462 | iommu_group_put(group); | ||
1463 | group = ERR_PTR(ret); | ||
1464 | } | ||
1465 | 1475 | ||
1466 | return group; | 1476 | return group; |
1467 | } | 1477 | } |
@@ -1510,6 +1520,19 @@ out_unlock: | |||
1510 | return ret; | 1520 | return ret; |
1511 | } | 1521 | } |
1512 | 1522 | ||
1523 | static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) | ||
1524 | { | ||
1525 | u32 fwid = 0; | ||
1526 | |||
1527 | if (args->args_count > 0) | ||
1528 | fwid |= (u16)args->args[0]; | ||
1529 | |||
1530 | if (args->args_count > 1) | ||
1531 | fwid |= (u16)args->args[1] << SMR_MASK_SHIFT; | ||
1532 | |||
1533 | return iommu_fwspec_add_ids(dev, &fwid, 1); | ||
1534 | } | ||
1535 | |||
1513 | static struct iommu_ops arm_smmu_ops = { | 1536 | static struct iommu_ops arm_smmu_ops = { |
1514 | .capable = arm_smmu_capable, | 1537 | .capable = arm_smmu_capable, |
1515 | .domain_alloc = arm_smmu_domain_alloc, | 1538 | .domain_alloc = arm_smmu_domain_alloc, |
@@ -1524,6 +1547,7 @@ static struct iommu_ops arm_smmu_ops = { | |||
1524 | .device_group = arm_smmu_device_group, | 1547 | .device_group = arm_smmu_device_group, |
1525 | .domain_get_attr = arm_smmu_domain_get_attr, | 1548 | .domain_get_attr = arm_smmu_domain_get_attr, |
1526 | .domain_set_attr = arm_smmu_domain_set_attr, | 1549 | .domain_set_attr = arm_smmu_domain_set_attr, |
1550 | .of_xlate = arm_smmu_of_xlate, | ||
1527 | .pgsize_bitmap = -1UL, /* Restricted during device attach */ | 1551 | .pgsize_bitmap = -1UL, /* Restricted during device attach */ |
1528 | }; | 1552 | }; |
1529 | 1553 | ||
@@ -1531,19 +1555,19 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) | |||
1531 | { | 1555 | { |
1532 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | 1556 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); |
1533 | void __iomem *cb_base; | 1557 | void __iomem *cb_base; |
1534 | int i = 0; | 1558 | int i; |
1535 | u32 reg, major; | 1559 | u32 reg, major; |
1536 | 1560 | ||
1537 | /* clear global FSR */ | 1561 | /* clear global FSR */ |
1538 | reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); | 1562 | reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); |
1539 | writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); | 1563 | writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); |
1540 | 1564 | ||
1541 | /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */ | 1565 | /* |
1542 | reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS; | 1566 | * Reset stream mapping groups: Initial values mark all SMRn as |
1543 | for (i = 0; i < smmu->num_mapping_groups; ++i) { | 1567 | * invalid and all S2CRn as bypass unless overridden. |
1544 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i)); | 1568 | */ |
1545 | writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i)); | 1569 | for (i = 0; i < smmu->num_mapping_groups; ++i) |
1546 | } | 1570 | arm_smmu_write_sme(smmu, i); |
1547 | 1571 | ||
1548 | /* | 1572 | /* |
1549 | * Before clearing ARM_MMU500_ACTLR_CPRE, need to | 1573 | * Before clearing ARM_MMU500_ACTLR_CPRE, need to |
@@ -1632,6 +1656,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
1632 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | 1656 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); |
1633 | u32 id; | 1657 | u32 id; |
1634 | bool cttw_dt, cttw_reg; | 1658 | bool cttw_dt, cttw_reg; |
1659 | int i; | ||
1635 | 1660 | ||
1636 | dev_notice(smmu->dev, "probing hardware configuration...\n"); | 1661 | dev_notice(smmu->dev, "probing hardware configuration...\n"); |
1637 | dev_notice(smmu->dev, "SMMUv%d with:\n", | 1662 | dev_notice(smmu->dev, "SMMUv%d with:\n", |
@@ -1690,39 +1715,55 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
1690 | dev_notice(smmu->dev, | 1715 | dev_notice(smmu->dev, |
1691 | "\t(IDR0.CTTW overridden by dma-coherent property)\n"); | 1716 | "\t(IDR0.CTTW overridden by dma-coherent property)\n"); |
1692 | 1717 | ||
1718 | /* Max. number of entries we have for stream matching/indexing */ | ||
1719 | size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK); | ||
1720 | smmu->streamid_mask = size - 1; | ||
1693 | if (id & ID0_SMS) { | 1721 | if (id & ID0_SMS) { |
1694 | u32 smr, sid, mask; | 1722 | u32 smr; |
1695 | 1723 | ||
1696 | smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; | 1724 | smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; |
1697 | smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) & | 1725 | size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK; |
1698 | ID0_NUMSMRG_MASK; | 1726 | if (size == 0) { |
1699 | if (smmu->num_mapping_groups == 0) { | ||
1700 | dev_err(smmu->dev, | 1727 | dev_err(smmu->dev, |
1701 | "stream-matching supported, but no SMRs present!\n"); | 1728 | "stream-matching supported, but no SMRs present!\n"); |
1702 | return -ENODEV; | 1729 | return -ENODEV; |
1703 | } | 1730 | } |
1704 | 1731 | ||
1705 | smr = SMR_MASK_MASK << SMR_MASK_SHIFT; | 1732 | /* |
1706 | smr |= (SMR_ID_MASK << SMR_ID_SHIFT); | 1733 | * SMR.ID bits may not be preserved if the corresponding MASK |
1734 | * bits are set, so check each one separately. We can reject | ||
1735 | * masters later if they try to claim IDs outside these masks. | ||
1736 | */ | ||
1737 | smr = smmu->streamid_mask << SMR_ID_SHIFT; | ||
1707 | writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); | 1738 | writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); |
1708 | smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); | 1739 | smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); |
1740 | smmu->streamid_mask = smr >> SMR_ID_SHIFT; | ||
1709 | 1741 | ||
1710 | mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK; | 1742 | smr = smmu->streamid_mask << SMR_MASK_SHIFT; |
1711 | sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK; | 1743 | writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); |
1712 | if ((mask & sid) != sid) { | 1744 | smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); |
1713 | dev_err(smmu->dev, | 1745 | smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT; |
1714 | "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n", | 1746 | |
1715 | mask, sid); | 1747 | /* Zero-initialised to mark as invalid */ |
1716 | return -ENODEV; | 1748 | smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs), |
1717 | } | 1749 | GFP_KERNEL); |
1750 | if (!smmu->smrs) | ||
1751 | return -ENOMEM; | ||
1718 | 1752 | ||
1719 | dev_notice(smmu->dev, | 1753 | dev_notice(smmu->dev, |
1720 | "\tstream matching with %u register groups, mask 0x%x", | 1754 | "\tstream matching with %lu register groups, mask 0x%x", |
1721 | smmu->num_mapping_groups, mask); | 1755 | size, smmu->smr_mask_mask); |
1722 | } else { | ||
1723 | smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) & | ||
1724 | ID0_NUMSIDB_MASK; | ||
1725 | } | 1756 | } |
1757 | /* s2cr->type == 0 means translation, so initialise explicitly */ | ||
1758 | smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs), | ||
1759 | GFP_KERNEL); | ||
1760 | if (!smmu->s2crs) | ||
1761 | return -ENOMEM; | ||
1762 | for (i = 0; i < size; i++) | ||
1763 | smmu->s2crs[i] = s2cr_init_val; | ||
1764 | |||
1765 | smmu->num_mapping_groups = size; | ||
1766 | mutex_init(&smmu->stream_map_mutex); | ||
1726 | 1767 | ||
1727 | if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) { | 1768 | if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) { |
1728 | smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L; | 1769 | smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L; |
@@ -1855,15 +1896,24 @@ MODULE_DEVICE_TABLE(of, arm_smmu_of_match); | |||
1855 | 1896 | ||
1856 | static int arm_smmu_device_dt_probe(struct platform_device *pdev) | 1897 | static int arm_smmu_device_dt_probe(struct platform_device *pdev) |
1857 | { | 1898 | { |
1858 | const struct of_device_id *of_id; | ||
1859 | const struct arm_smmu_match_data *data; | 1899 | const struct arm_smmu_match_data *data; |
1860 | struct resource *res; | 1900 | struct resource *res; |
1861 | struct arm_smmu_device *smmu; | 1901 | struct arm_smmu_device *smmu; |
1862 | struct device *dev = &pdev->dev; | 1902 | struct device *dev = &pdev->dev; |
1863 | struct rb_node *node; | ||
1864 | struct of_phandle_iterator it; | ||
1865 | struct arm_smmu_phandle_args *masterspec; | ||
1866 | int num_irqs, i, err; | 1903 | int num_irqs, i, err; |
1904 | bool legacy_binding; | ||
1905 | |||
1906 | legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL); | ||
1907 | if (legacy_binding && !using_generic_binding) { | ||
1908 | if (!using_legacy_binding) | ||
1909 | pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n"); | ||
1910 | using_legacy_binding = true; | ||
1911 | } else if (!legacy_binding && !using_legacy_binding) { | ||
1912 | using_generic_binding = true; | ||
1913 | } else { | ||
1914 | dev_err(dev, "not probing due to mismatched DT properties\n"); | ||
1915 | return -ENODEV; | ||
1916 | } | ||
1867 | 1917 | ||
1868 | smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); | 1918 | smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); |
1869 | if (!smmu) { | 1919 | if (!smmu) { |
@@ -1872,8 +1922,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) | |||
1872 | } | 1922 | } |
1873 | smmu->dev = dev; | 1923 | smmu->dev = dev; |
1874 | 1924 | ||
1875 | of_id = of_match_node(arm_smmu_of_match, dev->of_node); | 1925 | data = of_device_get_match_data(dev); |
1876 | data = of_id->data; | ||
1877 | smmu->version = data->version; | 1926 | smmu->version = data->version; |
1878 | smmu->model = data->model; | 1927 | smmu->model = data->model; |
1879 | 1928 | ||
@@ -1923,37 +1972,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) | |||
1923 | if (err) | 1972 | if (err) |
1924 | return err; | 1973 | return err; |
1925 | 1974 | ||
1926 | i = 0; | ||
1927 | smmu->masters = RB_ROOT; | ||
1928 | |||
1929 | err = -ENOMEM; | ||
1930 | /* No need to zero the memory for masterspec */ | ||
1931 | masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL); | ||
1932 | if (!masterspec) | ||
1933 | goto out_put_masters; | ||
1934 | |||
1935 | of_for_each_phandle(&it, err, dev->of_node, | ||
1936 | "mmu-masters", "#stream-id-cells", 0) { | ||
1937 | int count = of_phandle_iterator_args(&it, masterspec->args, | ||
1938 | MAX_MASTER_STREAMIDS); | ||
1939 | masterspec->np = of_node_get(it.node); | ||
1940 | masterspec->args_count = count; | ||
1941 | |||
1942 | err = register_smmu_master(smmu, dev, masterspec); | ||
1943 | if (err) { | ||
1944 | dev_err(dev, "failed to add master %s\n", | ||
1945 | masterspec->np->name); | ||
1946 | kfree(masterspec); | ||
1947 | goto out_put_masters; | ||
1948 | } | ||
1949 | |||
1950 | i++; | ||
1951 | } | ||
1952 | |||
1953 | dev_notice(dev, "registered %d master devices\n", i); | ||
1954 | |||
1955 | kfree(masterspec); | ||
1956 | |||
1957 | parse_driver_options(smmu); | 1975 | parse_driver_options(smmu); |
1958 | 1976 | ||
1959 | if (smmu->version == ARM_SMMU_V2 && | 1977 | if (smmu->version == ARM_SMMU_V2 && |
@@ -1961,8 +1979,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) | |||
1961 | dev_err(dev, | 1979 | dev_err(dev, |
1962 | "found only %d context interrupt(s) but %d required\n", | 1980 | "found only %d context interrupt(s) but %d required\n", |
1963 | smmu->num_context_irqs, smmu->num_context_banks); | 1981 | smmu->num_context_irqs, smmu->num_context_banks); |
1964 | err = -ENODEV; | 1982 | return -ENODEV; |
1965 | goto out_put_masters; | ||
1966 | } | 1983 | } |
1967 | 1984 | ||
1968 | for (i = 0; i < smmu->num_global_irqs; ++i) { | 1985 | for (i = 0; i < smmu->num_global_irqs; ++i) { |
@@ -1974,59 +1991,39 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) | |||
1974 | if (err) { | 1991 | if (err) { |
1975 | dev_err(dev, "failed to request global IRQ %d (%u)\n", | 1992 | dev_err(dev, "failed to request global IRQ %d (%u)\n", |
1976 | i, smmu->irqs[i]); | 1993 | i, smmu->irqs[i]); |
1977 | goto out_put_masters; | 1994 | return err; |
1978 | } | 1995 | } |
1979 | } | 1996 | } |
1980 | 1997 | ||
1981 | INIT_LIST_HEAD(&smmu->list); | 1998 | of_iommu_set_ops(dev->of_node, &arm_smmu_ops); |
1982 | spin_lock(&arm_smmu_devices_lock); | 1999 | platform_set_drvdata(pdev, smmu); |
1983 | list_add(&smmu->list, &arm_smmu_devices); | ||
1984 | spin_unlock(&arm_smmu_devices_lock); | ||
1985 | |||
1986 | arm_smmu_device_reset(smmu); | 2000 | arm_smmu_device_reset(smmu); |
1987 | return 0; | ||
1988 | 2001 | ||
1989 | out_put_masters: | 2002 | /* Oh, for a proper bus abstraction */ |
1990 | for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { | 2003 | if (!iommu_present(&platform_bus_type)) |
1991 | struct arm_smmu_master *master | 2004 | bus_set_iommu(&platform_bus_type, &arm_smmu_ops); |
1992 | = container_of(node, struct arm_smmu_master, node); | 2005 | #ifdef CONFIG_ARM_AMBA |
1993 | of_node_put(master->of_node); | 2006 | if (!iommu_present(&amba_bustype)) |
2007 | bus_set_iommu(&amba_bustype, &arm_smmu_ops); | ||
2008 | #endif | ||
2009 | #ifdef CONFIG_PCI | ||
2010 | if (!iommu_present(&pci_bus_type)) { | ||
2011 | pci_request_acs(); | ||
2012 | bus_set_iommu(&pci_bus_type, &arm_smmu_ops); | ||
1994 | } | 2013 | } |
1995 | 2014 | #endif | |
1996 | return err; | 2015 | return 0; |
1997 | } | 2016 | } |
1998 | 2017 | ||
1999 | static int arm_smmu_device_remove(struct platform_device *pdev) | 2018 | static int arm_smmu_device_remove(struct platform_device *pdev) |
2000 | { | 2019 | { |
2001 | int i; | 2020 | struct arm_smmu_device *smmu = platform_get_drvdata(pdev); |
2002 | struct device *dev = &pdev->dev; | ||
2003 | struct arm_smmu_device *curr, *smmu = NULL; | ||
2004 | struct rb_node *node; | ||
2005 | |||
2006 | spin_lock(&arm_smmu_devices_lock); | ||
2007 | list_for_each_entry(curr, &arm_smmu_devices, list) { | ||
2008 | if (curr->dev == dev) { | ||
2009 | smmu = curr; | ||
2010 | list_del(&smmu->list); | ||
2011 | break; | ||
2012 | } | ||
2013 | } | ||
2014 | spin_unlock(&arm_smmu_devices_lock); | ||
2015 | 2021 | ||
2016 | if (!smmu) | 2022 | if (!smmu) |
2017 | return -ENODEV; | 2023 | return -ENODEV; |
2018 | 2024 | ||
2019 | for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { | ||
2020 | struct arm_smmu_master *master | ||
2021 | = container_of(node, struct arm_smmu_master, node); | ||
2022 | of_node_put(master->of_node); | ||
2023 | } | ||
2024 | |||
2025 | if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS)) | 2025 | if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS)) |
2026 | dev_err(dev, "removing device with active domains!\n"); | 2026 | dev_err(&pdev->dev, "removing device with active domains!\n"); |
2027 | |||
2028 | for (i = 0; i < smmu->num_global_irqs; ++i) | ||
2029 | devm_free_irq(smmu->dev, smmu->irqs[i], smmu); | ||
2030 | 2027 | ||
2031 | /* Turn the thing off */ | 2028 | /* Turn the thing off */ |
2032 | writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); | 2029 | writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); |
@@ -2044,41 +2041,14 @@ static struct platform_driver arm_smmu_driver = { | |||
2044 | 2041 | ||
2045 | static int __init arm_smmu_init(void) | 2042 | static int __init arm_smmu_init(void) |
2046 | { | 2043 | { |
2047 | struct device_node *np; | 2044 | static bool registered; |
2048 | int ret; | 2045 | int ret = 0; |
2049 | |||
2050 | /* | ||
2051 | * Play nice with systems that don't have an ARM SMMU by checking that | ||
2052 | * an ARM SMMU exists in the system before proceeding with the driver | ||
2053 | * and IOMMU bus operation registration. | ||
2054 | */ | ||
2055 | np = of_find_matching_node(NULL, arm_smmu_of_match); | ||
2056 | if (!np) | ||
2057 | return 0; | ||
2058 | |||
2059 | of_node_put(np); | ||
2060 | |||
2061 | ret = platform_driver_register(&arm_smmu_driver); | ||
2062 | if (ret) | ||
2063 | return ret; | ||
2064 | |||
2065 | /* Oh, for a proper bus abstraction */ | ||
2066 | if (!iommu_present(&platform_bus_type)) | ||
2067 | bus_set_iommu(&platform_bus_type, &arm_smmu_ops); | ||
2068 | |||
2069 | #ifdef CONFIG_ARM_AMBA | ||
2070 | if (!iommu_present(&amba_bustype)) | ||
2071 | bus_set_iommu(&amba_bustype, &arm_smmu_ops); | ||
2072 | #endif | ||
2073 | 2046 | ||
2074 | #ifdef CONFIG_PCI | 2047 | if (!registered) { |
2075 | if (!iommu_present(&pci_bus_type)) { | 2048 | ret = platform_driver_register(&arm_smmu_driver); |
2076 | pci_request_acs(); | 2049 | registered = !ret; |
2077 | bus_set_iommu(&pci_bus_type, &arm_smmu_ops); | ||
2078 | } | 2050 | } |
2079 | #endif | 2051 | return ret; |
2080 | |||
2081 | return 0; | ||
2082 | } | 2052 | } |
2083 | 2053 | ||
2084 | static void __exit arm_smmu_exit(void) | 2054 | static void __exit arm_smmu_exit(void) |
@@ -2089,6 +2059,25 @@ static void __exit arm_smmu_exit(void) | |||
2089 | subsys_initcall(arm_smmu_init); | 2059 | subsys_initcall(arm_smmu_init); |
2090 | module_exit(arm_smmu_exit); | 2060 | module_exit(arm_smmu_exit); |
2091 | 2061 | ||
2062 | static int __init arm_smmu_of_init(struct device_node *np) | ||
2063 | { | ||
2064 | int ret = arm_smmu_init(); | ||
2065 | |||
2066 | if (ret) | ||
2067 | return ret; | ||
2068 | |||
2069 | if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root)) | ||
2070 | return -ENODEV; | ||
2071 | |||
2072 | return 0; | ||
2073 | } | ||
2074 | IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init); | ||
2075 | IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init); | ||
2076 | IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init); | ||
2077 | IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init); | ||
2078 | IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init); | ||
2079 | IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init); | ||
2080 | |||
2092 | MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations"); | 2081 | MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations"); |
2093 | MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); | 2082 | MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); |
2094 | MODULE_LICENSE("GPL v2"); | 2083 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 00c8a08d56e7..c5ab8667e6f2 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c | |||
@@ -25,10 +25,29 @@ | |||
25 | #include <linux/huge_mm.h> | 25 | #include <linux/huge_mm.h> |
26 | #include <linux/iommu.h> | 26 | #include <linux/iommu.h> |
27 | #include <linux/iova.h> | 27 | #include <linux/iova.h> |
28 | #include <linux/irq.h> | ||
28 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
30 | #include <linux/pci.h> | ||
29 | #include <linux/scatterlist.h> | 31 | #include <linux/scatterlist.h> |
30 | #include <linux/vmalloc.h> | 32 | #include <linux/vmalloc.h> |
31 | 33 | ||
34 | struct iommu_dma_msi_page { | ||
35 | struct list_head list; | ||
36 | dma_addr_t iova; | ||
37 | phys_addr_t phys; | ||
38 | }; | ||
39 | |||
40 | struct iommu_dma_cookie { | ||
41 | struct iova_domain iovad; | ||
42 | struct list_head msi_page_list; | ||
43 | spinlock_t msi_lock; | ||
44 | }; | ||
45 | |||
46 | static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain) | ||
47 | { | ||
48 | return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad; | ||
49 | } | ||
50 | |||
32 | int iommu_dma_init(void) | 51 | int iommu_dma_init(void) |
33 | { | 52 | { |
34 | return iova_cache_get(); | 53 | return iova_cache_get(); |
@@ -43,15 +62,19 @@ int iommu_dma_init(void) | |||
43 | */ | 62 | */ |
44 | int iommu_get_dma_cookie(struct iommu_domain *domain) | 63 | int iommu_get_dma_cookie(struct iommu_domain *domain) |
45 | { | 64 | { |
46 | struct iova_domain *iovad; | 65 | struct iommu_dma_cookie *cookie; |
47 | 66 | ||
48 | if (domain->iova_cookie) | 67 | if (domain->iova_cookie) |
49 | return -EEXIST; | 68 | return -EEXIST; |
50 | 69 | ||
51 | iovad = kzalloc(sizeof(*iovad), GFP_KERNEL); | 70 | cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); |
52 | domain->iova_cookie = iovad; | 71 | if (!cookie) |
72 | return -ENOMEM; | ||
53 | 73 | ||
54 | return iovad ? 0 : -ENOMEM; | 74 | spin_lock_init(&cookie->msi_lock); |
75 | INIT_LIST_HEAD(&cookie->msi_page_list); | ||
76 | domain->iova_cookie = cookie; | ||
77 | return 0; | ||
55 | } | 78 | } |
56 | EXPORT_SYMBOL(iommu_get_dma_cookie); | 79 | EXPORT_SYMBOL(iommu_get_dma_cookie); |
57 | 80 | ||
@@ -63,32 +86,58 @@ EXPORT_SYMBOL(iommu_get_dma_cookie); | |||
63 | */ | 86 | */ |
64 | void iommu_put_dma_cookie(struct iommu_domain *domain) | 87 | void iommu_put_dma_cookie(struct iommu_domain *domain) |
65 | { | 88 | { |
66 | struct iova_domain *iovad = domain->iova_cookie; | 89 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
90 | struct iommu_dma_msi_page *msi, *tmp; | ||
67 | 91 | ||
68 | if (!iovad) | 92 | if (!cookie) |
69 | return; | 93 | return; |
70 | 94 | ||
71 | if (iovad->granule) | 95 | if (cookie->iovad.granule) |
72 | put_iova_domain(iovad); | 96 | put_iova_domain(&cookie->iovad); |
73 | kfree(iovad); | 97 | |
98 | list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { | ||
99 | list_del(&msi->list); | ||
100 | kfree(msi); | ||
101 | } | ||
102 | kfree(cookie); | ||
74 | domain->iova_cookie = NULL; | 103 | domain->iova_cookie = NULL; |
75 | } | 104 | } |
76 | EXPORT_SYMBOL(iommu_put_dma_cookie); | 105 | EXPORT_SYMBOL(iommu_put_dma_cookie); |
77 | 106 | ||
107 | static void iova_reserve_pci_windows(struct pci_dev *dev, | ||
108 | struct iova_domain *iovad) | ||
109 | { | ||
110 | struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); | ||
111 | struct resource_entry *window; | ||
112 | unsigned long lo, hi; | ||
113 | |||
114 | resource_list_for_each_entry(window, &bridge->windows) { | ||
115 | if (resource_type(window->res) != IORESOURCE_MEM && | ||
116 | resource_type(window->res) != IORESOURCE_IO) | ||
117 | continue; | ||
118 | |||
119 | lo = iova_pfn(iovad, window->res->start - window->offset); | ||
120 | hi = iova_pfn(iovad, window->res->end - window->offset); | ||
121 | reserve_iova(iovad, lo, hi); | ||
122 | } | ||
123 | } | ||
124 | |||
78 | /** | 125 | /** |
79 | * iommu_dma_init_domain - Initialise a DMA mapping domain | 126 | * iommu_dma_init_domain - Initialise a DMA mapping domain |
80 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() | 127 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() |
81 | * @base: IOVA at which the mappable address space starts | 128 | * @base: IOVA at which the mappable address space starts |
82 | * @size: Size of IOVA space | 129 | * @size: Size of IOVA space |
130 | * @dev: Device the domain is being initialised for | ||
83 | * | 131 | * |
84 | * @base and @size should be exact multiples of IOMMU page granularity to | 132 | * @base and @size should be exact multiples of IOMMU page granularity to |
85 | * avoid rounding surprises. If necessary, we reserve the page at address 0 | 133 | * avoid rounding surprises. If necessary, we reserve the page at address 0 |
86 | * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but | 134 | * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but |
87 | * any change which could make prior IOVAs invalid will fail. | 135 | * any change which could make prior IOVAs invalid will fail. |
88 | */ | 136 | */ |
89 | int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size) | 137 | int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, |
138 | u64 size, struct device *dev) | ||
90 | { | 139 | { |
91 | struct iova_domain *iovad = domain->iova_cookie; | 140 | struct iova_domain *iovad = cookie_iovad(domain); |
92 | unsigned long order, base_pfn, end_pfn; | 141 | unsigned long order, base_pfn, end_pfn; |
93 | 142 | ||
94 | if (!iovad) | 143 | if (!iovad) |
@@ -124,6 +173,8 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size | |||
124 | iovad->dma_32bit_pfn = end_pfn; | 173 | iovad->dma_32bit_pfn = end_pfn; |
125 | } else { | 174 | } else { |
126 | init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); | 175 | init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); |
176 | if (dev && dev_is_pci(dev)) | ||
177 | iova_reserve_pci_windows(to_pci_dev(dev), iovad); | ||
127 | } | 178 | } |
128 | return 0; | 179 | return 0; |
129 | } | 180 | } |
@@ -155,7 +206,7 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent) | |||
155 | static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size, | 206 | static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size, |
156 | dma_addr_t dma_limit) | 207 | dma_addr_t dma_limit) |
157 | { | 208 | { |
158 | struct iova_domain *iovad = domain->iova_cookie; | 209 | struct iova_domain *iovad = cookie_iovad(domain); |
159 | unsigned long shift = iova_shift(iovad); | 210 | unsigned long shift = iova_shift(iovad); |
160 | unsigned long length = iova_align(iovad, size) >> shift; | 211 | unsigned long length = iova_align(iovad, size) >> shift; |
161 | 212 | ||
@@ -171,7 +222,7 @@ static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size, | |||
171 | /* The IOVA allocator knows what we mapped, so just unmap whatever that was */ | 222 | /* The IOVA allocator knows what we mapped, so just unmap whatever that was */ |
172 | static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr) | 223 | static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr) |
173 | { | 224 | { |
174 | struct iova_domain *iovad = domain->iova_cookie; | 225 | struct iova_domain *iovad = cookie_iovad(domain); |
175 | unsigned long shift = iova_shift(iovad); | 226 | unsigned long shift = iova_shift(iovad); |
176 | unsigned long pfn = dma_addr >> shift; | 227 | unsigned long pfn = dma_addr >> shift; |
177 | struct iova *iova = find_iova(iovad, pfn); | 228 | struct iova *iova = find_iova(iovad, pfn); |
@@ -294,7 +345,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, | |||
294 | void (*flush_page)(struct device *, const void *, phys_addr_t)) | 345 | void (*flush_page)(struct device *, const void *, phys_addr_t)) |
295 | { | 346 | { |
296 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | 347 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
297 | struct iova_domain *iovad = domain->iova_cookie; | 348 | struct iova_domain *iovad = cookie_iovad(domain); |
298 | struct iova *iova; | 349 | struct iova *iova; |
299 | struct page **pages; | 350 | struct page **pages; |
300 | struct sg_table sgt; | 351 | struct sg_table sgt; |
@@ -386,7 +437,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, | |||
386 | { | 437 | { |
387 | dma_addr_t dma_addr; | 438 | dma_addr_t dma_addr; |
388 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | 439 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
389 | struct iova_domain *iovad = domain->iova_cookie; | 440 | struct iova_domain *iovad = cookie_iovad(domain); |
390 | phys_addr_t phys = page_to_phys(page) + offset; | 441 | phys_addr_t phys = page_to_phys(page) + offset; |
391 | size_t iova_off = iova_offset(iovad, phys); | 442 | size_t iova_off = iova_offset(iovad, phys); |
392 | size_t len = iova_align(iovad, size + iova_off); | 443 | size_t len = iova_align(iovad, size + iova_off); |
@@ -495,7 +546,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
495 | int nents, int prot) | 546 | int nents, int prot) |
496 | { | 547 | { |
497 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | 548 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
498 | struct iova_domain *iovad = domain->iova_cookie; | 549 | struct iova_domain *iovad = cookie_iovad(domain); |
499 | struct iova *iova; | 550 | struct iova *iova; |
500 | struct scatterlist *s, *prev = NULL; | 551 | struct scatterlist *s, *prev = NULL; |
501 | dma_addr_t dma_addr; | 552 | dma_addr_t dma_addr; |
@@ -587,3 +638,81 @@ int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |||
587 | { | 638 | { |
588 | return dma_addr == DMA_ERROR_CODE; | 639 | return dma_addr == DMA_ERROR_CODE; |
589 | } | 640 | } |
641 | |||
642 | static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, | ||
643 | phys_addr_t msi_addr, struct iommu_domain *domain) | ||
644 | { | ||
645 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | ||
646 | struct iommu_dma_msi_page *msi_page; | ||
647 | struct iova_domain *iovad = &cookie->iovad; | ||
648 | struct iova *iova; | ||
649 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; | ||
650 | |||
651 | msi_addr &= ~(phys_addr_t)iova_mask(iovad); | ||
652 | list_for_each_entry(msi_page, &cookie->msi_page_list, list) | ||
653 | if (msi_page->phys == msi_addr) | ||
654 | return msi_page; | ||
655 | |||
656 | msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC); | ||
657 | if (!msi_page) | ||
658 | return NULL; | ||
659 | |||
660 | iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev)); | ||
661 | if (!iova) | ||
662 | goto out_free_page; | ||
663 | |||
664 | msi_page->phys = msi_addr; | ||
665 | msi_page->iova = iova_dma_addr(iovad, iova); | ||
666 | if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot)) | ||
667 | goto out_free_iova; | ||
668 | |||
669 | INIT_LIST_HEAD(&msi_page->list); | ||
670 | list_add(&msi_page->list, &cookie->msi_page_list); | ||
671 | return msi_page; | ||
672 | |||
673 | out_free_iova: | ||
674 | __free_iova(iovad, iova); | ||
675 | out_free_page: | ||
676 | kfree(msi_page); | ||
677 | return NULL; | ||
678 | } | ||
679 | |||
680 | void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) | ||
681 | { | ||
682 | struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq)); | ||
683 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | ||
684 | struct iommu_dma_cookie *cookie; | ||
685 | struct iommu_dma_msi_page *msi_page; | ||
686 | phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo; | ||
687 | unsigned long flags; | ||
688 | |||
689 | if (!domain || !domain->iova_cookie) | ||
690 | return; | ||
691 | |||
692 | cookie = domain->iova_cookie; | ||
693 | |||
694 | /* | ||
695 | * We disable IRQs to rule out a possible inversion against | ||
696 | * irq_desc_lock if, say, someone tries to retarget the affinity | ||
697 | * of an MSI from within an IPI handler. | ||
698 | */ | ||
699 | spin_lock_irqsave(&cookie->msi_lock, flags); | ||
700 | msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); | ||
701 | spin_unlock_irqrestore(&cookie->msi_lock, flags); | ||
702 | |||
703 | if (WARN_ON(!msi_page)) { | ||
704 | /* | ||
705 | * We're called from a void callback, so the best we can do is | ||
706 | * 'fail' by filling the message with obviously bogus values. | ||
707 | * Since we got this far due to an IOMMU being present, it's | ||
708 | * not like the existing address would have worked anyway... | ||
709 | */ | ||
710 | msg->address_hi = ~0U; | ||
711 | msg->address_lo = ~0U; | ||
712 | msg->data = ~0U; | ||
713 | } else { | ||
714 | msg->address_hi = upper_32_bits(msi_page->iova); | ||
715 | msg->address_lo &= iova_mask(&cookie->iovad); | ||
716 | msg->address_lo += lower_32_bits(msi_page->iova); | ||
717 | } | ||
718 | } | ||
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 33dcc29ec200..30808e91b775 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c | |||
@@ -1345,8 +1345,8 @@ static int __init exynos_iommu_of_setup(struct device_node *np) | |||
1345 | exynos_iommu_init(); | 1345 | exynos_iommu_init(); |
1346 | 1346 | ||
1347 | pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root); | 1347 | pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root); |
1348 | if (IS_ERR(pdev)) | 1348 | if (!pdev) |
1349 | return PTR_ERR(pdev); | 1349 | return -ENODEV; |
1350 | 1350 | ||
1351 | /* | 1351 | /* |
1352 | * use the first registered sysmmu device for performing | 1352 | * use the first registered sysmmu device for performing |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index ebb5bf3ddbd9..a4407eabf0e6 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -2452,20 +2452,15 @@ static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque) | |||
2452 | return 0; | 2452 | return 0; |
2453 | } | 2453 | } |
2454 | 2454 | ||
2455 | /* domain is initialized */ | 2455 | static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw) |
2456 | static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw) | ||
2457 | { | 2456 | { |
2458 | struct device_domain_info *info = NULL; | 2457 | struct device_domain_info *info = NULL; |
2459 | struct dmar_domain *domain, *tmp; | 2458 | struct dmar_domain *domain = NULL; |
2460 | struct intel_iommu *iommu; | 2459 | struct intel_iommu *iommu; |
2461 | u16 req_id, dma_alias; | 2460 | u16 req_id, dma_alias; |
2462 | unsigned long flags; | 2461 | unsigned long flags; |
2463 | u8 bus, devfn; | 2462 | u8 bus, devfn; |
2464 | 2463 | ||
2465 | domain = find_domain(dev); | ||
2466 | if (domain) | ||
2467 | return domain; | ||
2468 | |||
2469 | iommu = device_to_iommu(dev, &bus, &devfn); | 2464 | iommu = device_to_iommu(dev, &bus, &devfn); |
2470 | if (!iommu) | 2465 | if (!iommu) |
2471 | return NULL; | 2466 | return NULL; |
@@ -2487,9 +2482,9 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw) | |||
2487 | } | 2482 | } |
2488 | spin_unlock_irqrestore(&device_domain_lock, flags); | 2483 | spin_unlock_irqrestore(&device_domain_lock, flags); |
2489 | 2484 | ||
2490 | /* DMA alias already has a domain, uses it */ | 2485 | /* DMA alias already has a domain, use it */ |
2491 | if (info) | 2486 | if (info) |
2492 | goto found_domain; | 2487 | goto out; |
2493 | } | 2488 | } |
2494 | 2489 | ||
2495 | /* Allocate and initialize new domain for the device */ | 2490 | /* Allocate and initialize new domain for the device */ |
@@ -2501,28 +2496,67 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw) | |||
2501 | return NULL; | 2496 | return NULL; |
2502 | } | 2497 | } |
2503 | 2498 | ||
2504 | /* register PCI DMA alias device */ | 2499 | out: |
2505 | if (dev_is_pci(dev) && req_id != dma_alias) { | ||
2506 | tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias), | ||
2507 | dma_alias & 0xff, NULL, domain); | ||
2508 | 2500 | ||
2509 | if (!tmp || tmp != domain) { | 2501 | return domain; |
2510 | domain_exit(domain); | 2502 | } |
2511 | domain = tmp; | ||
2512 | } | ||
2513 | 2503 | ||
2514 | if (!domain) | 2504 | static struct dmar_domain *set_domain_for_dev(struct device *dev, |
2515 | return NULL; | 2505 | struct dmar_domain *domain) |
2506 | { | ||
2507 | struct intel_iommu *iommu; | ||
2508 | struct dmar_domain *tmp; | ||
2509 | u16 req_id, dma_alias; | ||
2510 | u8 bus, devfn; | ||
2511 | |||
2512 | iommu = device_to_iommu(dev, &bus, &devfn); | ||
2513 | if (!iommu) | ||
2514 | return NULL; | ||
2515 | |||
2516 | req_id = ((u16)bus << 8) | devfn; | ||
2517 | |||
2518 | if (dev_is_pci(dev)) { | ||
2519 | struct pci_dev *pdev = to_pci_dev(dev); | ||
2520 | |||
2521 | pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias); | ||
2522 | |||
2523 | /* register PCI DMA alias device */ | ||
2524 | if (req_id != dma_alias) { | ||
2525 | tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias), | ||
2526 | dma_alias & 0xff, NULL, domain); | ||
2527 | |||
2528 | if (!tmp || tmp != domain) | ||
2529 | return tmp; | ||
2530 | } | ||
2516 | } | 2531 | } |
2517 | 2532 | ||
2518 | found_domain: | ||
2519 | tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain); | 2533 | tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain); |
2534 | if (!tmp || tmp != domain) | ||
2535 | return tmp; | ||
2536 | |||
2537 | return domain; | ||
2538 | } | ||
2520 | 2539 | ||
2521 | if (!tmp || tmp != domain) { | 2540 | static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw) |
2541 | { | ||
2542 | struct dmar_domain *domain, *tmp; | ||
2543 | |||
2544 | domain = find_domain(dev); | ||
2545 | if (domain) | ||
2546 | goto out; | ||
2547 | |||
2548 | domain = find_or_alloc_domain(dev, gaw); | ||
2549 | if (!domain) | ||
2550 | goto out; | ||
2551 | |||
2552 | tmp = set_domain_for_dev(dev, domain); | ||
2553 | if (!tmp || domain != tmp) { | ||
2522 | domain_exit(domain); | 2554 | domain_exit(domain); |
2523 | domain = tmp; | 2555 | domain = tmp; |
2524 | } | 2556 | } |
2525 | 2557 | ||
2558 | out: | ||
2559 | |||
2526 | return domain; | 2560 | return domain; |
2527 | } | 2561 | } |
2528 | 2562 | ||
@@ -3394,17 +3428,18 @@ static unsigned long intel_alloc_iova(struct device *dev, | |||
3394 | 3428 | ||
3395 | static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev) | 3429 | static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev) |
3396 | { | 3430 | { |
3431 | struct dmar_domain *domain, *tmp; | ||
3397 | struct dmar_rmrr_unit *rmrr; | 3432 | struct dmar_rmrr_unit *rmrr; |
3398 | struct dmar_domain *domain; | ||
3399 | struct device *i_dev; | 3433 | struct device *i_dev; |
3400 | int i, ret; | 3434 | int i, ret; |
3401 | 3435 | ||
3402 | domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH); | 3436 | domain = find_domain(dev); |
3403 | if (!domain) { | 3437 | if (domain) |
3404 | pr_err("Allocating domain for %s failed\n", | 3438 | goto out; |
3405 | dev_name(dev)); | 3439 | |
3406 | return NULL; | 3440 | domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH); |
3407 | } | 3441 | if (!domain) |
3442 | goto out; | ||
3408 | 3443 | ||
3409 | /* We have a new domain - setup possible RMRRs for the device */ | 3444 | /* We have a new domain - setup possible RMRRs for the device */ |
3410 | rcu_read_lock(); | 3445 | rcu_read_lock(); |
@@ -3423,6 +3458,18 @@ static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev) | |||
3423 | } | 3458 | } |
3424 | rcu_read_unlock(); | 3459 | rcu_read_unlock(); |
3425 | 3460 | ||
3461 | tmp = set_domain_for_dev(dev, domain); | ||
3462 | if (!tmp || domain != tmp) { | ||
3463 | domain_exit(domain); | ||
3464 | domain = tmp; | ||
3465 | } | ||
3466 | |||
3467 | out: | ||
3468 | |||
3469 | if (!domain) | ||
3470 | pr_err("Allocating domain for %s failed\n", dev_name(dev)); | ||
3471 | |||
3472 | |||
3426 | return domain; | 3473 | return domain; |
3427 | } | 3474 | } |
3428 | 3475 | ||
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index def8ca1c982d..f50e51c1a9c8 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c | |||
@@ -633,6 +633,10 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, | |||
633 | { | 633 | { |
634 | struct arm_v7s_io_pgtable *data; | 634 | struct arm_v7s_io_pgtable *data; |
635 | 635 | ||
636 | #ifdef PHYS_OFFSET | ||
637 | if (upper_32_bits(PHYS_OFFSET)) | ||
638 | return NULL; | ||
639 | #endif | ||
636 | if (cfg->ias > ARM_V7S_ADDR_BITS || cfg->oas > ARM_V7S_ADDR_BITS) | 640 | if (cfg->ias > ARM_V7S_ADDR_BITS || cfg->oas > ARM_V7S_ADDR_BITS) |
637 | return NULL; | 641 | return NULL; |
638 | 642 | ||
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index b06d93594436..9a2f1960873b 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/err.h> | 31 | #include <linux/err.h> |
32 | #include <linux/pci.h> | 32 | #include <linux/pci.h> |
33 | #include <linux/bitops.h> | 33 | #include <linux/bitops.h> |
34 | #include <linux/property.h> | ||
34 | #include <trace/events/iommu.h> | 35 | #include <trace/events/iommu.h> |
35 | 36 | ||
36 | static struct kset *iommu_group_kset; | 37 | static struct kset *iommu_group_kset; |
@@ -1613,3 +1614,60 @@ out: | |||
1613 | 1614 | ||
1614 | return ret; | 1615 | return ret; |
1615 | } | 1616 | } |
1617 | |||
1618 | int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, | ||
1619 | const struct iommu_ops *ops) | ||
1620 | { | ||
1621 | struct iommu_fwspec *fwspec = dev->iommu_fwspec; | ||
1622 | |||
1623 | if (fwspec) | ||
1624 | return ops == fwspec->ops ? 0 : -EINVAL; | ||
1625 | |||
1626 | fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL); | ||
1627 | if (!fwspec) | ||
1628 | return -ENOMEM; | ||
1629 | |||
1630 | of_node_get(to_of_node(iommu_fwnode)); | ||
1631 | fwspec->iommu_fwnode = iommu_fwnode; | ||
1632 | fwspec->ops = ops; | ||
1633 | dev->iommu_fwspec = fwspec; | ||
1634 | return 0; | ||
1635 | } | ||
1636 | EXPORT_SYMBOL_GPL(iommu_fwspec_init); | ||
1637 | |||
1638 | void iommu_fwspec_free(struct device *dev) | ||
1639 | { | ||
1640 | struct iommu_fwspec *fwspec = dev->iommu_fwspec; | ||
1641 | |||
1642 | if (fwspec) { | ||
1643 | fwnode_handle_put(fwspec->iommu_fwnode); | ||
1644 | kfree(fwspec); | ||
1645 | dev->iommu_fwspec = NULL; | ||
1646 | } | ||
1647 | } | ||
1648 | EXPORT_SYMBOL_GPL(iommu_fwspec_free); | ||
1649 | |||
1650 | int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) | ||
1651 | { | ||
1652 | struct iommu_fwspec *fwspec = dev->iommu_fwspec; | ||
1653 | size_t size; | ||
1654 | int i; | ||
1655 | |||
1656 | if (!fwspec) | ||
1657 | return -EINVAL; | ||
1658 | |||
1659 | size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]); | ||
1660 | if (size > sizeof(*fwspec)) { | ||
1661 | fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL); | ||
1662 | if (!fwspec) | ||
1663 | return -ENOMEM; | ||
1664 | } | ||
1665 | |||
1666 | for (i = 0; i < num_ids; i++) | ||
1667 | fwspec->ids[fwspec->num_ids + i] = ids[i]; | ||
1668 | |||
1669 | fwspec->num_ids += num_ids; | ||
1670 | dev->iommu_fwspec = fwspec; | ||
1671 | return 0; | ||
1672 | } | ||
1673 | EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); | ||
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 2fdbac67a77f..ace331da6459 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c | |||
@@ -636,7 +636,7 @@ static int ipmmu_add_device(struct device *dev) | |||
636 | spin_unlock(&ipmmu_devices_lock); | 636 | spin_unlock(&ipmmu_devices_lock); |
637 | 637 | ||
638 | if (ret < 0) | 638 | if (ret < 0) |
639 | return -ENODEV; | 639 | goto error; |
640 | 640 | ||
641 | for (i = 0; i < num_utlbs; ++i) { | 641 | for (i = 0; i < num_utlbs; ++i) { |
642 | if (utlbs[i] >= mmu->num_utlbs) { | 642 | if (utlbs[i] >= mmu->num_utlbs) { |
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index 57f23eaaa2f9..5b82862f571f 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/limits.h> | 22 | #include <linux/limits.h> |
23 | #include <linux/of.h> | 23 | #include <linux/of.h> |
24 | #include <linux/of_iommu.h> | 24 | #include <linux/of_iommu.h> |
25 | #include <linux/of_pci.h> | ||
25 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
26 | 27 | ||
27 | static const struct of_device_id __iommu_of_table_sentinel | 28 | static const struct of_device_id __iommu_of_table_sentinel |
@@ -134,6 +135,47 @@ const struct iommu_ops *of_iommu_get_ops(struct device_node *np) | |||
134 | return ops; | 135 | return ops; |
135 | } | 136 | } |
136 | 137 | ||
138 | static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data) | ||
139 | { | ||
140 | struct of_phandle_args *iommu_spec = data; | ||
141 | |||
142 | iommu_spec->args[0] = alias; | ||
143 | return iommu_spec->np == pdev->bus->dev.of_node; | ||
144 | } | ||
145 | |||
146 | static const struct iommu_ops | ||
147 | *of_pci_iommu_configure(struct pci_dev *pdev, struct device_node *bridge_np) | ||
148 | { | ||
149 | const struct iommu_ops *ops; | ||
150 | struct of_phandle_args iommu_spec; | ||
151 | |||
152 | /* | ||
153 | * Start by tracing the RID alias down the PCI topology as | ||
154 | * far as the host bridge whose OF node we have... | ||
155 | * (we're not even attempting to handle multi-alias devices yet) | ||
156 | */ | ||
157 | iommu_spec.args_count = 1; | ||
158 | iommu_spec.np = bridge_np; | ||
159 | pci_for_each_dma_alias(pdev, __get_pci_rid, &iommu_spec); | ||
160 | /* | ||
161 | * ...then find out what that becomes once it escapes the PCI | ||
162 | * bus into the system beyond, and which IOMMU it ends up at. | ||
163 | */ | ||
164 | iommu_spec.np = NULL; | ||
165 | if (of_pci_map_rid(bridge_np, iommu_spec.args[0], "iommu-map", | ||
166 | "iommu-map-mask", &iommu_spec.np, iommu_spec.args)) | ||
167 | return NULL; | ||
168 | |||
169 | ops = of_iommu_get_ops(iommu_spec.np); | ||
170 | if (!ops || !ops->of_xlate || | ||
171 | iommu_fwspec_init(&pdev->dev, &iommu_spec.np->fwnode, ops) || | ||
172 | ops->of_xlate(&pdev->dev, &iommu_spec)) | ||
173 | ops = NULL; | ||
174 | |||
175 | of_node_put(iommu_spec.np); | ||
176 | return ops; | ||
177 | } | ||
178 | |||
137 | const struct iommu_ops *of_iommu_configure(struct device *dev, | 179 | const struct iommu_ops *of_iommu_configure(struct device *dev, |
138 | struct device_node *master_np) | 180 | struct device_node *master_np) |
139 | { | 181 | { |
@@ -142,12 +184,8 @@ const struct iommu_ops *of_iommu_configure(struct device *dev, | |||
142 | const struct iommu_ops *ops = NULL; | 184 | const struct iommu_ops *ops = NULL; |
143 | int idx = 0; | 185 | int idx = 0; |
144 | 186 | ||
145 | /* | ||
146 | * We can't do much for PCI devices without knowing how | ||
147 | * device IDs are wired up from the PCI bus to the IOMMU. | ||
148 | */ | ||
149 | if (dev_is_pci(dev)) | 187 | if (dev_is_pci(dev)) |
150 | return NULL; | 188 | return of_pci_iommu_configure(to_pci_dev(dev), master_np); |
151 | 189 | ||
152 | /* | 190 | /* |
153 | * We don't currently walk up the tree looking for a parent IOMMU. | 191 | * We don't currently walk up the tree looking for a parent IOMMU. |
@@ -160,7 +198,9 @@ const struct iommu_ops *of_iommu_configure(struct device *dev, | |||
160 | np = iommu_spec.np; | 198 | np = iommu_spec.np; |
161 | ops = of_iommu_get_ops(np); | 199 | ops = of_iommu_get_ops(np); |
162 | 200 | ||
163 | if (!ops || !ops->of_xlate || ops->of_xlate(dev, &iommu_spec)) | 201 | if (!ops || !ops->of_xlate || |
202 | iommu_fwspec_init(dev, &np->fwnode, ops) || | ||
203 | ops->of_xlate(dev, &iommu_spec)) | ||
164 | goto err_put_node; | 204 | goto err_put_node; |
165 | 205 | ||
166 | of_node_put(np); | 206 | of_node_put(np); |
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c index 35eb7ac5d21f..863e073c6f7f 100644 --- a/drivers/irqchip/irq-gic-v2m.c +++ b/drivers/irqchip/irq-gic-v2m.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #define pr_fmt(fmt) "GICv2m: " fmt | 16 | #define pr_fmt(fmt) "GICv2m: " fmt |
17 | 17 | ||
18 | #include <linux/acpi.h> | 18 | #include <linux/acpi.h> |
19 | #include <linux/dma-iommu.h> | ||
19 | #include <linux/irq.h> | 20 | #include <linux/irq.h> |
20 | #include <linux/irqdomain.h> | 21 | #include <linux/irqdomain.h> |
21 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
@@ -108,6 +109,8 @@ static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) | |||
108 | 109 | ||
109 | if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET) | 110 | if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET) |
110 | msg->data -= v2m->spi_offset; | 111 | msg->data -= v2m->spi_offset; |
112 | |||
113 | iommu_dma_map_msi_msg(data->irq, msg); | ||
111 | } | 114 | } |
112 | 115 | ||
113 | static struct irq_chip gicv2m_irq_chip = { | 116 | static struct irq_chip gicv2m_irq_chip = { |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 35c851c14e49..003495d91f9c 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/bitmap.h> | 19 | #include <linux/bitmap.h> |
20 | #include <linux/cpu.h> | 20 | #include <linux/cpu.h> |
21 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
22 | #include <linux/dma-iommu.h> | ||
22 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
23 | #include <linux/irqdomain.h> | 24 | #include <linux/irqdomain.h> |
24 | #include <linux/acpi_iort.h> | 25 | #include <linux/acpi_iort.h> |
@@ -659,6 +660,8 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) | |||
659 | msg->address_lo = addr & ((1UL << 32) - 1); | 660 | msg->address_lo = addr & ((1UL << 32) - 1); |
660 | msg->address_hi = addr >> 32; | 661 | msg->address_hi = addr >> 32; |
661 | msg->data = its_get_event_id(d); | 662 | msg->data = its_get_event_id(d); |
663 | |||
664 | iommu_dma_map_msi_msg(d->irq, msg); | ||
662 | } | 665 | } |
663 | 666 | ||
664 | static struct irq_chip its_irq_chip = { | 667 | static struct irq_chip its_irq_chip = { |
diff --git a/drivers/of/irq.c b/drivers/of/irq.c index a2e68f740eda..393fea85eb4e 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/of.h> | 27 | #include <linux/of.h> |
28 | #include <linux/of_irq.h> | 28 | #include <linux/of_irq.h> |
29 | #include <linux/of_pci.h> | ||
29 | #include <linux/string.h> | 30 | #include <linux/string.h> |
30 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
31 | 32 | ||
@@ -592,87 +593,16 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np, | |||
592 | u32 rid_in) | 593 | u32 rid_in) |
593 | { | 594 | { |
594 | struct device *parent_dev; | 595 | struct device *parent_dev; |
595 | struct device_node *msi_controller_node; | ||
596 | struct device_node *msi_np = *np; | ||
597 | u32 map_mask, masked_rid, rid_base, msi_base, rid_len, phandle; | ||
598 | int msi_map_len; | ||
599 | bool matched; | ||
600 | u32 rid_out = rid_in; | 596 | u32 rid_out = rid_in; |
601 | const __be32 *msi_map = NULL; | ||
602 | 597 | ||
603 | /* | 598 | /* |
604 | * Walk up the device parent links looking for one with a | 599 | * Walk up the device parent links looking for one with a |
605 | * "msi-map" property. | 600 | * "msi-map" property. |
606 | */ | 601 | */ |
607 | for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent) { | 602 | for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent) |
608 | if (!parent_dev->of_node) | 603 | if (!of_pci_map_rid(parent_dev->of_node, rid_in, "msi-map", |
609 | continue; | 604 | "msi-map-mask", np, &rid_out)) |
610 | |||
611 | msi_map = of_get_property(parent_dev->of_node, | ||
612 | "msi-map", &msi_map_len); | ||
613 | if (!msi_map) | ||
614 | continue; | ||
615 | |||
616 | if (msi_map_len % (4 * sizeof(__be32))) { | ||
617 | dev_err(parent_dev, "Error: Bad msi-map length: %d\n", | ||
618 | msi_map_len); | ||
619 | return rid_out; | ||
620 | } | ||
621 | /* We have a good parent_dev and msi_map, let's use them. */ | ||
622 | break; | ||
623 | } | ||
624 | if (!msi_map) | ||
625 | return rid_out; | ||
626 | |||
627 | /* The default is to select all bits. */ | ||
628 | map_mask = 0xffffffff; | ||
629 | |||
630 | /* | ||
631 | * Can be overridden by "msi-map-mask" property. If | ||
632 | * of_property_read_u32() fails, the default is used. | ||
633 | */ | ||
634 | of_property_read_u32(parent_dev->of_node, "msi-map-mask", &map_mask); | ||
635 | |||
636 | masked_rid = map_mask & rid_in; | ||
637 | matched = false; | ||
638 | while (!matched && msi_map_len >= 4 * sizeof(__be32)) { | ||
639 | rid_base = be32_to_cpup(msi_map + 0); | ||
640 | phandle = be32_to_cpup(msi_map + 1); | ||
641 | msi_base = be32_to_cpup(msi_map + 2); | ||
642 | rid_len = be32_to_cpup(msi_map + 3); | ||
643 | |||
644 | if (rid_base & ~map_mask) { | ||
645 | dev_err(parent_dev, | ||
646 | "Invalid msi-map translation - msi-map-mask (0x%x) ignores rid-base (0x%x)\n", | ||
647 | map_mask, rid_base); | ||
648 | return rid_out; | ||
649 | } | ||
650 | |||
651 | msi_controller_node = of_find_node_by_phandle(phandle); | ||
652 | |||
653 | matched = (masked_rid >= rid_base && | ||
654 | masked_rid < rid_base + rid_len); | ||
655 | if (msi_np) | ||
656 | matched &= msi_np == msi_controller_node; | ||
657 | |||
658 | if (matched && !msi_np) { | ||
659 | *np = msi_np = msi_controller_node; | ||
660 | break; | 605 | break; |
661 | } | ||
662 | |||
663 | of_node_put(msi_controller_node); | ||
664 | msi_map_len -= 4 * sizeof(__be32); | ||
665 | msi_map += 4; | ||
666 | } | ||
667 | if (!matched) | ||
668 | return rid_out; | ||
669 | |||
670 | rid_out = masked_rid - rid_base + msi_base; | ||
671 | dev_dbg(dev, | ||
672 | "msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n", | ||
673 | dev_name(parent_dev), map_mask, rid_base, msi_base, | ||
674 | rid_len, rid_in, rid_out); | ||
675 | |||
676 | return rid_out; | 606 | return rid_out; |
677 | } | 607 | } |
678 | 608 | ||
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c index 589b30c68e14..b58be12ab277 100644 --- a/drivers/of/of_pci.c +++ b/drivers/of/of_pci.c | |||
@@ -308,3 +308,105 @@ struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node) | |||
308 | EXPORT_SYMBOL_GPL(of_pci_find_msi_chip_by_node); | 308 | EXPORT_SYMBOL_GPL(of_pci_find_msi_chip_by_node); |
309 | 309 | ||
310 | #endif /* CONFIG_PCI_MSI */ | 310 | #endif /* CONFIG_PCI_MSI */ |
311 | |||
312 | /** | ||
313 | * of_pci_map_rid - Translate a requester ID through a downstream mapping. | ||
314 | * @np: root complex device node. | ||
315 | * @rid: PCI requester ID to map. | ||
316 | * @map_name: property name of the map to use. | ||
317 | * @map_mask_name: optional property name of the mask to use. | ||
318 | * @target: optional pointer to a target device node. | ||
319 | * @id_out: optional pointer to receive the translated ID. | ||
320 | * | ||
321 | * Given a PCI requester ID, look up the appropriate implementation-defined | ||
322 | * platform ID and/or the target device which receives transactions on that | ||
323 | * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or | ||
324 | * @id_out may be NULL if only the other is required. If @target points to | ||
325 | * a non-NULL device node pointer, only entries targeting that node will be | ||
326 | * matched; if it points to a NULL value, it will receive the device node of | ||
327 | * the first matching target phandle, with a reference held. | ||
328 | * | ||
329 | * Return: 0 on success or a standard error code on failure. | ||
330 | */ | ||
331 | int of_pci_map_rid(struct device_node *np, u32 rid, | ||
332 | const char *map_name, const char *map_mask_name, | ||
333 | struct device_node **target, u32 *id_out) | ||
334 | { | ||
335 | u32 map_mask, masked_rid; | ||
336 | int map_len; | ||
337 | const __be32 *map = NULL; | ||
338 | |||
339 | if (!np || !map_name || (!target && !id_out)) | ||
340 | return -EINVAL; | ||
341 | |||
342 | map = of_get_property(np, map_name, &map_len); | ||
343 | if (!map) { | ||
344 | if (target) | ||
345 | return -ENODEV; | ||
346 | /* Otherwise, no map implies no translation */ | ||
347 | *id_out = rid; | ||
348 | return 0; | ||
349 | } | ||
350 | |||
351 | if (!map_len || map_len % (4 * sizeof(*map))) { | ||
352 | pr_err("%s: Error: Bad %s length: %d\n", np->full_name, | ||
353 | map_name, map_len); | ||
354 | return -EINVAL; | ||
355 | } | ||
356 | |||
357 | /* The default is to select all bits. */ | ||
358 | map_mask = 0xffffffff; | ||
359 | |||
360 | /* | ||
361 | * Can be overridden by "{iommu,msi}-map-mask" property. | ||
362 | * If of_property_read_u32() fails, the default is used. | ||
363 | */ | ||
364 | if (map_mask_name) | ||
365 | of_property_read_u32(np, map_mask_name, &map_mask); | ||
366 | |||
367 | masked_rid = map_mask & rid; | ||
368 | for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) { | ||
369 | struct device_node *phandle_node; | ||
370 | u32 rid_base = be32_to_cpup(map + 0); | ||
371 | u32 phandle = be32_to_cpup(map + 1); | ||
372 | u32 out_base = be32_to_cpup(map + 2); | ||
373 | u32 rid_len = be32_to_cpup(map + 3); | ||
374 | |||
375 | if (rid_base & ~map_mask) { | ||
376 | pr_err("%s: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n", | ||
377 | np->full_name, map_name, map_name, | ||
378 | map_mask, rid_base); | ||
379 | return -EFAULT; | ||
380 | } | ||
381 | |||
382 | if (masked_rid < rid_base || masked_rid >= rid_base + rid_len) | ||
383 | continue; | ||
384 | |||
385 | phandle_node = of_find_node_by_phandle(phandle); | ||
386 | if (!phandle_node) | ||
387 | return -ENODEV; | ||
388 | |||
389 | if (target) { | ||
390 | if (*target) | ||
391 | of_node_put(phandle_node); | ||
392 | else | ||
393 | *target = phandle_node; | ||
394 | |||
395 | if (*target != phandle_node) | ||
396 | continue; | ||
397 | } | ||
398 | |||
399 | if (id_out) | ||
400 | *id_out = masked_rid - rid_base + out_base; | ||
401 | |||
402 | pr_debug("%s: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n", | ||
403 | np->full_name, map_name, map_mask, rid_base, out_base, | ||
404 | rid_len, rid, *id_out); | ||
405 | return 0; | ||
406 | } | ||
407 | |||
408 | pr_err("%s: Invalid %s translation - no match for rid 0x%x on %s\n", | ||
409 | np->full_name, map_name, rid, | ||
410 | target && *target ? (*target)->full_name : "any target"); | ||
411 | return -EFAULT; | ||
412 | } | ||
diff --git a/include/dt-bindings/memory/mt2701-larb-port.h b/include/dt-bindings/memory/mt2701-larb-port.h index 78f66786da91..6764d7447422 100644 --- a/include/dt-bindings/memory/mt2701-larb-port.h +++ b/include/dt-bindings/memory/mt2701-larb-port.h | |||
@@ -26,7 +26,7 @@ | |||
26 | #define LARB0_PORT_OFFSET 0 | 26 | #define LARB0_PORT_OFFSET 0 |
27 | #define LARB1_PORT_OFFSET 11 | 27 | #define LARB1_PORT_OFFSET 11 |
28 | #define LARB2_PORT_OFFSET 21 | 28 | #define LARB2_PORT_OFFSET 21 |
29 | #define LARB3_PORT_OFFSET 43 | 29 | #define LARB3_PORT_OFFSET 44 |
30 | 30 | ||
31 | #define MT2701_M4U_ID_LARB0(port) ((port) + LARB0_PORT_OFFSET) | 31 | #define MT2701_M4U_ID_LARB0(port) ((port) + LARB0_PORT_OFFSET) |
32 | #define MT2701_M4U_ID_LARB1(port) ((port) + LARB1_PORT_OFFSET) | 32 | #define MT2701_M4U_ID_LARB1(port) ((port) + LARB1_PORT_OFFSET) |
diff --git a/include/linux/device.h b/include/linux/device.h index 38f02814d53a..bc41e87a969b 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -41,6 +41,7 @@ struct device_node; | |||
41 | struct fwnode_handle; | 41 | struct fwnode_handle; |
42 | struct iommu_ops; | 42 | struct iommu_ops; |
43 | struct iommu_group; | 43 | struct iommu_group; |
44 | struct iommu_fwspec; | ||
44 | 45 | ||
45 | struct bus_attribute { | 46 | struct bus_attribute { |
46 | struct attribute attr; | 47 | struct attribute attr; |
@@ -765,6 +766,7 @@ struct device_dma_parameters { | |||
765 | * gone away. This should be set by the allocator of the | 766 | * gone away. This should be set by the allocator of the |
766 | * device (i.e. the bus driver that discovered the device). | 767 | * device (i.e. the bus driver that discovered the device). |
767 | * @iommu_group: IOMMU group the device belongs to. | 768 | * @iommu_group: IOMMU group the device belongs to. |
769 | * @iommu_fwspec: IOMMU-specific properties supplied by firmware. | ||
768 | * | 770 | * |
769 | * @offline_disabled: If set, the device is permanently online. | 771 | * @offline_disabled: If set, the device is permanently online. |
770 | * @offline: Set after successful invocation of bus type's .offline(). | 772 | * @offline: Set after successful invocation of bus type's .offline(). |
@@ -849,6 +851,7 @@ struct device { | |||
849 | 851 | ||
850 | void (*release)(struct device *dev); | 852 | void (*release)(struct device *dev); |
851 | struct iommu_group *iommu_group; | 853 | struct iommu_group *iommu_group; |
854 | struct iommu_fwspec *iommu_fwspec; | ||
852 | 855 | ||
853 | bool offline_disabled:1; | 856 | bool offline_disabled:1; |
854 | bool offline:1; | 857 | bool offline:1; |
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 81c5c8d167ad..32c589062bd9 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #ifdef CONFIG_IOMMU_DMA | 22 | #ifdef CONFIG_IOMMU_DMA |
23 | #include <linux/iommu.h> | 23 | #include <linux/iommu.h> |
24 | #include <linux/msi.h> | ||
24 | 25 | ||
25 | int iommu_dma_init(void); | 26 | int iommu_dma_init(void); |
26 | 27 | ||
@@ -29,7 +30,8 @@ int iommu_get_dma_cookie(struct iommu_domain *domain); | |||
29 | void iommu_put_dma_cookie(struct iommu_domain *domain); | 30 | void iommu_put_dma_cookie(struct iommu_domain *domain); |
30 | 31 | ||
31 | /* Setup call for arch DMA mapping code */ | 32 | /* Setup call for arch DMA mapping code */ |
32 | int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size); | 33 | int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, |
34 | u64 size, struct device *dev); | ||
33 | 35 | ||
34 | /* General helpers for DMA-API <-> IOMMU-API interaction */ | 36 | /* General helpers for DMA-API <-> IOMMU-API interaction */ |
35 | int dma_direction_to_prot(enum dma_data_direction dir, bool coherent); | 37 | int dma_direction_to_prot(enum dma_data_direction dir, bool coherent); |
@@ -62,9 +64,13 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
62 | int iommu_dma_supported(struct device *dev, u64 mask); | 64 | int iommu_dma_supported(struct device *dev, u64 mask); |
63 | int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); | 65 | int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); |
64 | 66 | ||
67 | /* The DMA API isn't _quite_ the whole story, though... */ | ||
68 | void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg); | ||
69 | |||
65 | #else | 70 | #else |
66 | 71 | ||
67 | struct iommu_domain; | 72 | struct iommu_domain; |
73 | struct msi_msg; | ||
68 | 74 | ||
69 | static inline int iommu_dma_init(void) | 75 | static inline int iommu_dma_init(void) |
70 | { | 76 | { |
@@ -80,6 +86,10 @@ static inline void iommu_put_dma_cookie(struct iommu_domain *domain) | |||
80 | { | 86 | { |
81 | } | 87 | } |
82 | 88 | ||
89 | static inline void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) | ||
90 | { | ||
91 | } | ||
92 | |||
83 | #endif /* CONFIG_IOMMU_DMA */ | 93 | #endif /* CONFIG_IOMMU_DMA */ |
84 | #endif /* __KERNEL__ */ | 94 | #endif /* __KERNEL__ */ |
85 | #endif /* __DMA_IOMMU_H */ | 95 | #endif /* __DMA_IOMMU_H */ |
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index a35fb8b42e1a..436dc21318af 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
@@ -331,10 +331,32 @@ extern struct iommu_group *pci_device_group(struct device *dev); | |||
331 | /* Generic device grouping function */ | 331 | /* Generic device grouping function */ |
332 | extern struct iommu_group *generic_device_group(struct device *dev); | 332 | extern struct iommu_group *generic_device_group(struct device *dev); |
333 | 333 | ||
334 | /** | ||
335 | * struct iommu_fwspec - per-device IOMMU instance data | ||
336 | * @ops: ops for this device's IOMMU | ||
337 | * @iommu_fwnode: firmware handle for this device's IOMMU | ||
338 | * @iommu_priv: IOMMU driver private data for this device | ||
339 | * @num_ids: number of associated device IDs | ||
340 | * @ids: IDs which this device may present to the IOMMU | ||
341 | */ | ||
342 | struct iommu_fwspec { | ||
343 | const struct iommu_ops *ops; | ||
344 | struct fwnode_handle *iommu_fwnode; | ||
345 | void *iommu_priv; | ||
346 | unsigned int num_ids; | ||
347 | u32 ids[1]; | ||
348 | }; | ||
349 | |||
350 | int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, | ||
351 | const struct iommu_ops *ops); | ||
352 | void iommu_fwspec_free(struct device *dev); | ||
353 | int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); | ||
354 | |||
334 | #else /* CONFIG_IOMMU_API */ | 355 | #else /* CONFIG_IOMMU_API */ |
335 | 356 | ||
336 | struct iommu_ops {}; | 357 | struct iommu_ops {}; |
337 | struct iommu_group {}; | 358 | struct iommu_group {}; |
359 | struct iommu_fwspec {}; | ||
338 | 360 | ||
339 | static inline bool iommu_present(struct bus_type *bus) | 361 | static inline bool iommu_present(struct bus_type *bus) |
340 | { | 362 | { |
@@ -541,6 +563,23 @@ static inline void iommu_device_unlink(struct device *dev, struct device *link) | |||
541 | { | 563 | { |
542 | } | 564 | } |
543 | 565 | ||
566 | static inline int iommu_fwspec_init(struct device *dev, | ||
567 | struct fwnode_handle *iommu_fwnode, | ||
568 | const struct iommu_ops *ops) | ||
569 | { | ||
570 | return -ENODEV; | ||
571 | } | ||
572 | |||
573 | static inline void iommu_fwspec_free(struct device *dev) | ||
574 | { | ||
575 | } | ||
576 | |||
577 | static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, | ||
578 | int num_ids) | ||
579 | { | ||
580 | return -ENODEV; | ||
581 | } | ||
582 | |||
544 | #endif /* CONFIG_IOMMU_API */ | 583 | #endif /* CONFIG_IOMMU_API */ |
545 | 584 | ||
546 | #endif /* __LINUX_IOMMU_H */ | 585 | #endif /* __LINUX_IOMMU_H */ |
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h index b969e9443962..7fd5cfce9140 100644 --- a/include/linux/of_pci.h +++ b/include/linux/of_pci.h | |||
@@ -17,6 +17,9 @@ int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); | |||
17 | int of_pci_parse_bus_range(struct device_node *node, struct resource *res); | 17 | int of_pci_parse_bus_range(struct device_node *node, struct resource *res); |
18 | int of_get_pci_domain_nr(struct device_node *node); | 18 | int of_get_pci_domain_nr(struct device_node *node); |
19 | void of_pci_check_probe_only(void); | 19 | void of_pci_check_probe_only(void); |
20 | int of_pci_map_rid(struct device_node *np, u32 rid, | ||
21 | const char *map_name, const char *map_mask_name, | ||
22 | struct device_node **target, u32 *id_out); | ||
20 | #else | 23 | #else |
21 | static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq) | 24 | static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq) |
22 | { | 25 | { |
@@ -52,6 +55,13 @@ of_get_pci_domain_nr(struct device_node *node) | |||
52 | return -1; | 55 | return -1; |
53 | } | 56 | } |
54 | 57 | ||
58 | static inline int of_pci_map_rid(struct device_node *np, u32 rid, | ||
59 | const char *map_name, const char *map_mask_name, | ||
60 | struct device_node **target, u32 *id_out) | ||
61 | { | ||
62 | return -EINVAL; | ||
63 | } | ||
64 | |||
55 | static inline void of_pci_check_probe_only(void) { } | 65 | static inline void of_pci_check_probe_only(void) { } |
56 | #endif | 66 | #endif |
57 | 67 | ||