diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-05 18:59:35 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-05 18:59:35 -0400 |
commit | dc7aafba6bfa1ea5806c6ac402e690682f950f75 (patch) | |
tree | 01b8dbd05444d2de91cf9058a596e40c3cbc2a6d | |
parent | 161d2e0a195292a8a67b0f5c48e12a9984f75dac (diff) | |
parent | 4c5e9d9f0ddf5d4ba9c51eee2ef1a4d6e93ccf56 (diff) |
Merge tag 'iommu-updates-v3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull iommu updates from Joerg Roedel:
"This time with:
- support for the generic PCI device alias code in x86 IOMMU drivers
- a new sysfs interface for IOMMUs
- preparations for hotplug support in the Intel IOMMU driver
- change the AMD IOMMUv2 driver to not hold references to core data
structures like mm_struct or task_struct. Rely on mmu_notifers
instead.
- removal of the OMAP IOVMM interface, all users of it are converted
to DMA-API now
- make the struct iommu_ops const everywhere
- initial PCI support for the ARM SMMU driver
- there is now a generic device tree binding documented for ARM
IOMMUs
- various fixes and cleanups all over the place
Also included are some changes to the OMAP code, which are acked by
the maintainer"
* tag 'iommu-updates-v3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (67 commits)
devicetree: Add generic IOMMU device tree bindings
iommu/vt-d: Fix race setting IRQ CPU affinity while freeing IRQ
iommu/amd: Fix 2 typos in comments
iommu/amd: Fix device_state reference counting
iommu/amd: Remove change_pte mmu_notifier call-back
iommu/amd: Don't set pasid_state->mm to NULL in unbind_pasid
iommu/exynos: Select ARM_DMA_USE_IOMMU
iommu/vt-d: Exclude devices using RMRRs from IOMMU API domains
iommu/omap: Remove platform data da_start and da_end fields
ARM: omap: Don't set iommu pdata da_start and da_end fields
iommu/omap: Remove virtual memory manager
iommu/vt-d: Fix issue in computing domain's iommu_snooping flag
iommu/vt-d: Introduce helper function iova_size() to improve code readability
iommu/vt-d: Introduce helper domain_pfn_within_range() to simplify code
iommu/vt-d: Simplify intel_unmap_sg() and kill duplicated code
iommu/vt-d: Change iommu_enable/disable_translation to return void
iommu/vt-d: Simplify include/linux/dmar.h
iommu/vt-d: Avoid freeing virtual machine domain in free_dmar_iommu()
iommu/vt-d: Fix possible invalid memory access caused by free_dmar_iommu()
iommu/vt-d: Allocate dynamic domain id for virtual domains only
...
41 files changed, 1567 insertions, 2050 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-iommu b/Documentation/ABI/testing/sysfs-class-iommu new file mode 100644 index 000000000000..6d0a1b4be82d --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-iommu | |||
@@ -0,0 +1,17 @@ | |||
1 | What: /sys/class/iommu/<iommu>/devices/ | ||
2 | Date: June 2014 | ||
3 | KernelVersion: 3.17 | ||
4 | Contact: Alex Williamson <alex.williamson@redhat.com> | ||
5 | Description: | ||
6 | IOMMU drivers are able to link devices managed by a | ||
7 | given IOMMU here to allow association of IOMMU to | ||
8 | device. | ||
9 | |||
10 | What: /sys/devices/.../iommu | ||
11 | Date: June 2014 | ||
12 | KernelVersion: 3.17 | ||
13 | Contact: Alex Williamson <alex.williamson@redhat.com> | ||
14 | Description: | ||
15 | IOMMU drivers are able to link the IOMMU for a | ||
16 | given device here to allow association of device to | ||
17 | IOMMU. | ||
diff --git a/Documentation/ABI/testing/sysfs-class-iommu-amd-iommu b/Documentation/ABI/testing/sysfs-class-iommu-amd-iommu new file mode 100644 index 000000000000..d6ba8e8a4a97 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-iommu-amd-iommu | |||
@@ -0,0 +1,14 @@ | |||
1 | What: /sys/class/iommu/<iommu>/amd-iommu/cap | ||
2 | Date: June 2014 | ||
3 | KernelVersion: 3.17 | ||
4 | Contact: Alex Williamson <alex.williamson@redhat.com> | ||
5 | Description: | ||
6 | IOMMU capability header as documented in the AMD IOMMU | ||
7 | specification. Format: %x | ||
8 | |||
9 | What: /sys/class/iommu/<iommu>/amd-iommu/features | ||
10 | Date: June 2014 | ||
11 | KernelVersion: 3.17 | ||
12 | Contact: Alex Williamson <alex.williamson@redhat.com> | ||
13 | Description: | ||
14 | Extended features of the IOMMU. Format: %llx | ||
diff --git a/Documentation/ABI/testing/sysfs-class-iommu-intel-iommu b/Documentation/ABI/testing/sysfs-class-iommu-intel-iommu new file mode 100644 index 000000000000..258cc246d98e --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-iommu-intel-iommu | |||
@@ -0,0 +1,32 @@ | |||
1 | What: /sys/class/iommu/<iommu>/intel-iommu/address | ||
2 | Date: June 2014 | ||
3 | KernelVersion: 3.17 | ||
4 | Contact: Alex Williamson <alex.williamson@redhat.com> | ||
5 | Description: | ||
6 | Physical address of the VT-d DRHD for this IOMMU. | ||
7 | Format: %llx. This allows association of a sysfs | ||
8 | intel-iommu with a DMAR DRHD table entry. | ||
9 | |||
10 | What: /sys/class/iommu/<iommu>/intel-iommu/cap | ||
11 | Date: June 2014 | ||
12 | KernelVersion: 3.17 | ||
13 | Contact: Alex Williamson <alex.williamson@redhat.com> | ||
14 | Description: | ||
15 | The cached hardware capability register value | ||
16 | of this DRHD unit. Format: %llx. | ||
17 | |||
18 | What: /sys/class/iommu/<iommu>/intel-iommu/ecap | ||
19 | Date: June 2014 | ||
20 | KernelVersion: 3.17 | ||
21 | Contact: Alex Williamson <alex.williamson@redhat.com> | ||
22 | Description: | ||
23 | The cached hardware extended capability register | ||
24 | value of this DRHD unit. Format: %llx. | ||
25 | |||
26 | What: /sys/class/iommu/<iommu>/intel-iommu/version | ||
27 | Date: June 2014 | ||
28 | KernelVersion: 3.17 | ||
29 | Contact: Alex Williamson <alex.williamson@redhat.com> | ||
30 | Description: | ||
31 | The architecture version as reported from the | ||
32 | VT-d VER_REG. Format: %d:%d, major:minor | ||
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt index f284b99402bc..2d0f7cd867ea 100644 --- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt +++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt | |||
@@ -42,12 +42,6 @@ conditions. | |||
42 | 42 | ||
43 | ** System MMU optional properties: | 43 | ** System MMU optional properties: |
44 | 44 | ||
45 | - smmu-parent : When multiple SMMUs are chained together, this | ||
46 | property can be used to provide a phandle to the | ||
47 | parent SMMU (that is the next SMMU on the path going | ||
48 | from the mmu-masters towards memory) node for this | ||
49 | SMMU. | ||
50 | |||
51 | - calxeda,smmu-secure-config-access : Enable proper handling of buggy | 45 | - calxeda,smmu-secure-config-access : Enable proper handling of buggy |
52 | implementations that always use secure access to | 46 | implementations that always use secure access to |
53 | SMMU configuration registers. In this case non-secure | 47 | SMMU configuration registers. In this case non-secure |
diff --git a/Documentation/devicetree/bindings/iommu/iommu.txt b/Documentation/devicetree/bindings/iommu/iommu.txt new file mode 100644 index 000000000000..5a8b4624defc --- /dev/null +++ b/Documentation/devicetree/bindings/iommu/iommu.txt | |||
@@ -0,0 +1,182 @@ | |||
1 | This document describes the generic device tree binding for IOMMUs and their | ||
2 | master(s). | ||
3 | |||
4 | |||
5 | IOMMU device node: | ||
6 | ================== | ||
7 | |||
8 | An IOMMU can provide the following services: | ||
9 | |||
10 | * Remap address space to allow devices to access physical memory ranges that | ||
11 | they otherwise wouldn't be capable of accessing. | ||
12 | |||
13 | Example: 32-bit DMA to 64-bit physical addresses | ||
14 | |||
15 | * Implement scatter-gather at page level granularity so that the device does | ||
16 | not have to. | ||
17 | |||
18 | * Provide system protection against "rogue" DMA by forcing all accesses to go | ||
19 | through the IOMMU and faulting when encountering accesses to unmapped | ||
20 | address regions. | ||
21 | |||
22 | * Provide address space isolation between multiple contexts. | ||
23 | |||
24 | Example: Virtualization | ||
25 | |||
26 | Device nodes compatible with this binding represent hardware with some of the | ||
27 | above capabilities. | ||
28 | |||
29 | IOMMUs can be single-master or multiple-master. Single-master IOMMU devices | ||
30 | typically have a fixed association to the master device, whereas multiple- | ||
31 | master IOMMU devices can translate accesses from more than one master. | ||
32 | |||
33 | The device tree node of the IOMMU device's parent bus must contain a valid | ||
34 | "dma-ranges" property that describes how the physical address space of the | ||
35 | IOMMU maps to memory. An empty "dma-ranges" property means that there is a | ||
36 | 1:1 mapping from IOMMU to memory. | ||
37 | |||
38 | Required properties: | ||
39 | -------------------- | ||
40 | - #iommu-cells: The number of cells in an IOMMU specifier needed to encode an | ||
41 | address. | ||
42 | |||
43 | The meaning of the IOMMU specifier is defined by the device tree binding of | ||
44 | the specific IOMMU. Below are a few examples of typical use-cases: | ||
45 | |||
46 | - #iommu-cells = <0>: Single master IOMMU devices are not configurable and | ||
47 | therefore no additional information needs to be encoded in the specifier. | ||
48 | This may also apply to multiple master IOMMU devices that do not allow the | ||
49 | association of masters to be configured. Note that an IOMMU can by design | ||
50 | be multi-master yet only expose a single master in a given configuration. | ||
51 | In such cases the number of cells will usually be 1 as in the next case. | ||
52 | - #iommu-cells = <1>: Multiple master IOMMU devices may need to be configured | ||
53 | in order to enable translation for a given master. In such cases the single | ||
54 | address cell corresponds to the master device's ID. In some cases more than | ||
55 | one cell can be required to represent a single master ID. | ||
56 | - #iommu-cells = <4>: Some IOMMU devices allow the DMA window for masters to | ||
57 | be configured. The first cell of the address in this may contain the master | ||
58 | device's ID for example, while the second cell could contain the start of | ||
59 | the DMA window for the given device. The length of the DMA window is given | ||
60 | by the third and fourth cells. | ||
61 | |||
62 | Note that these are merely examples and real-world use-cases may use different | ||
63 | definitions to represent their individual needs. Always refer to the specific | ||
64 | IOMMU binding for the exact meaning of the cells that make up the specifier. | ||
65 | |||
66 | |||
67 | IOMMU master node: | ||
68 | ================== | ||
69 | |||
70 | Devices that access memory through an IOMMU are called masters. A device can | ||
71 | have multiple master interfaces (to one or more IOMMU devices). | ||
72 | |||
73 | Required properties: | ||
74 | -------------------- | ||
75 | - iommus: A list of phandle and IOMMU specifier pairs that describe the IOMMU | ||
76 | master interfaces of the device. One entry in the list describes one master | ||
77 | interface of the device. | ||
78 | |||
79 | When an "iommus" property is specified in a device tree node, the IOMMU will | ||
80 | be used for address translation. If a "dma-ranges" property exists in the | ||
81 | device's parent node it will be ignored. An exception to this rule is if the | ||
82 | referenced IOMMU is disabled, in which case the "dma-ranges" property of the | ||
83 | parent shall take effect. Note that merely disabling a device tree node does | ||
84 | not guarantee that the IOMMU is really disabled since the hardware may not | ||
85 | have a means to turn off translation. But it is invalid in such cases to | ||
86 | disable the IOMMU's device tree node in the first place because it would | ||
87 | prevent any driver from properly setting up the translations. | ||
88 | |||
89 | |||
90 | Notes: | ||
91 | ====== | ||
92 | |||
93 | One possible extension to the above is to use an "iommus" property along with | ||
94 | a "dma-ranges" property in a bus device node (such as PCI host bridges). This | ||
95 | can be useful to describe how children on the bus relate to the IOMMU if they | ||
96 | are not explicitly listed in the device tree (e.g. PCI devices). However, the | ||
97 | requirements of that use-case haven't been fully determined yet. Implementing | ||
98 | this is therefore not recommended without further discussion and extension of | ||
99 | this binding. | ||
100 | |||
101 | |||
102 | Examples: | ||
103 | ========= | ||
104 | |||
105 | Single-master IOMMU: | ||
106 | -------------------- | ||
107 | |||
108 | iommu { | ||
109 | #iommu-cells = <0>; | ||
110 | }; | ||
111 | |||
112 | master { | ||
113 | iommus = <&{/iommu}>; | ||
114 | }; | ||
115 | |||
116 | Multiple-master IOMMU with fixed associations: | ||
117 | ---------------------------------------------- | ||
118 | |||
119 | /* multiple-master IOMMU */ | ||
120 | iommu { | ||
121 | /* | ||
122 | * Masters are statically associated with this IOMMU and share | ||
123 | * the same address translations because the IOMMU does not | ||
124 | * have sufficient information to distinguish between masters. | ||
125 | * | ||
126 | * Consequently address translation is always on or off for | ||
127 | * all masters at any given point in time. | ||
128 | */ | ||
129 | #iommu-cells = <0>; | ||
130 | }; | ||
131 | |||
132 | /* static association with IOMMU */ | ||
133 | master@1 { | ||
134 | reg = <1>; | ||
135 | iommus = <&{/iommu}>; | ||
136 | }; | ||
137 | |||
138 | /* static association with IOMMU */ | ||
139 | master@2 { | ||
140 | reg = <2>; | ||
141 | iommus = <&{/iommu}>; | ||
142 | }; | ||
143 | |||
144 | Multiple-master IOMMU: | ||
145 | ---------------------- | ||
146 | |||
147 | iommu { | ||
148 | /* the specifier represents the ID of the master */ | ||
149 | #iommu-cells = <1>; | ||
150 | }; | ||
151 | |||
152 | master@1 { | ||
153 | /* device has master ID 42 in the IOMMU */ | ||
154 | iommus = <&{/iommu} 42>; | ||
155 | }; | ||
156 | |||
157 | master@2 { | ||
158 | /* device has master IDs 23 and 24 in the IOMMU */ | ||
159 | iommus = <&{/iommu} 23>, <&{/iommu} 24>; | ||
160 | }; | ||
161 | |||
162 | Multiple-master IOMMU with configurable DMA window: | ||
163 | --------------------------------------------------- | ||
164 | |||
165 | / { | ||
166 | iommu { | ||
167 | /* | ||
168 | * One cell for the master ID and one cell for the | ||
169 | * address of the DMA window. The length of the DMA | ||
170 | * window is encoded in two cells. | ||
171 | * | ||
172 | * The DMA window is the range addressable by the | ||
173 | * master (i.e. the I/O virtual address space). | ||
174 | */ | ||
175 | #iommu-cells = <4>; | ||
176 | }; | ||
177 | |||
178 | master { | ||
179 | /* master ID 42, 4 GiB DMA window starting at 0 */ | ||
180 | iommus = <&{/iommu} 42 0 0x1 0x0>; | ||
181 | }; | ||
182 | }; | ||
diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c index f1fab5684a24..4068350f9059 100644 --- a/arch/arm/mach-omap2/omap-iommu.c +++ b/arch/arm/mach-omap2/omap-iommu.c | |||
@@ -34,8 +34,6 @@ static int __init omap_iommu_dev_init(struct omap_hwmod *oh, void *unused) | |||
34 | 34 | ||
35 | pdata->name = oh->name; | 35 | pdata->name = oh->name; |
36 | pdata->nr_tlb_entries = a->nr_tlb_entries; | 36 | pdata->nr_tlb_entries = a->nr_tlb_entries; |
37 | pdata->da_start = a->da_start; | ||
38 | pdata->da_end = a->da_end; | ||
39 | 37 | ||
40 | if (oh->rst_lines_cnt == 1) { | 38 | if (oh->rst_lines_cnt == 1) { |
41 | pdata->reset_name = oh->rst_lines->name; | 39 | pdata->reset_name = oh->rst_lines->name; |
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index 1cd0cfdc03e0..e9516b454e76 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | |||
@@ -2986,8 +2986,6 @@ static struct omap_hwmod_class omap3xxx_mmu_hwmod_class = { | |||
2986 | /* mmu isp */ | 2986 | /* mmu isp */ |
2987 | 2987 | ||
2988 | static struct omap_mmu_dev_attr mmu_isp_dev_attr = { | 2988 | static struct omap_mmu_dev_attr mmu_isp_dev_attr = { |
2989 | .da_start = 0x0, | ||
2990 | .da_end = 0xfffff000, | ||
2991 | .nr_tlb_entries = 8, | 2989 | .nr_tlb_entries = 8, |
2992 | }; | 2990 | }; |
2993 | 2991 | ||
@@ -3026,8 +3024,6 @@ static struct omap_hwmod omap3xxx_mmu_isp_hwmod = { | |||
3026 | /* mmu iva */ | 3024 | /* mmu iva */ |
3027 | 3025 | ||
3028 | static struct omap_mmu_dev_attr mmu_iva_dev_attr = { | 3026 | static struct omap_mmu_dev_attr mmu_iva_dev_attr = { |
3029 | .da_start = 0x11000000, | ||
3030 | .da_end = 0xfffff000, | ||
3031 | .nr_tlb_entries = 32, | 3027 | .nr_tlb_entries = 32, |
3032 | }; | 3028 | }; |
3033 | 3029 | ||
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 41e54f759934..b4acc0a7576f 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c | |||
@@ -2084,8 +2084,6 @@ static struct omap_hwmod_class omap44xx_mmu_hwmod_class = { | |||
2084 | /* mmu ipu */ | 2084 | /* mmu ipu */ |
2085 | 2085 | ||
2086 | static struct omap_mmu_dev_attr mmu_ipu_dev_attr = { | 2086 | static struct omap_mmu_dev_attr mmu_ipu_dev_attr = { |
2087 | .da_start = 0x0, | ||
2088 | .da_end = 0xfffff000, | ||
2089 | .nr_tlb_entries = 32, | 2087 | .nr_tlb_entries = 32, |
2090 | }; | 2088 | }; |
2091 | 2089 | ||
@@ -2133,8 +2131,6 @@ static struct omap_hwmod omap44xx_mmu_ipu_hwmod = { | |||
2133 | /* mmu dsp */ | 2131 | /* mmu dsp */ |
2134 | 2132 | ||
2135 | static struct omap_mmu_dev_attr mmu_dsp_dev_attr = { | 2133 | static struct omap_mmu_dev_attr mmu_dsp_dev_attr = { |
2136 | .da_start = 0x0, | ||
2137 | .da_end = 0xfffff000, | ||
2138 | .nr_tlb_entries = 32, | 2134 | .nr_tlb_entries = 32, |
2139 | }; | 2135 | }; |
2140 | 2136 | ||
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index d260605e6d5f..dd5112265cc9 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -76,7 +76,7 @@ config AMD_IOMMU_STATS | |||
76 | 76 | ||
77 | config AMD_IOMMU_V2 | 77 | config AMD_IOMMU_V2 |
78 | tristate "AMD IOMMU Version 2 driver" | 78 | tristate "AMD IOMMU Version 2 driver" |
79 | depends on AMD_IOMMU && PROFILING | 79 | depends on AMD_IOMMU |
80 | select MMU_NOTIFIER | 80 | select MMU_NOTIFIER |
81 | ---help--- | 81 | ---help--- |
82 | This option enables support for the AMD IOMMUv2 features of the IOMMU | 82 | This option enables support for the AMD IOMMUv2 features of the IOMMU |
@@ -143,16 +143,12 @@ config OMAP_IOMMU | |||
143 | depends on ARCH_OMAP2PLUS | 143 | depends on ARCH_OMAP2PLUS |
144 | select IOMMU_API | 144 | select IOMMU_API |
145 | 145 | ||
146 | config OMAP_IOVMM | ||
147 | tristate "OMAP IO Virtual Memory Manager Support" | ||
148 | depends on OMAP_IOMMU | ||
149 | |||
150 | config OMAP_IOMMU_DEBUG | 146 | config OMAP_IOMMU_DEBUG |
151 | tristate "Export OMAP IOMMU/IOVMM internals in DebugFS" | 147 | tristate "Export OMAP IOMMU internals in DebugFS" |
152 | depends on OMAP_IOVMM && DEBUG_FS | 148 | depends on OMAP_IOMMU && DEBUG_FS |
153 | help | 149 | help |
154 | Select this to see extensive information about | 150 | Select this to see extensive information about |
155 | the internal state of OMAP IOMMU/IOVMM in debugfs. | 151 | the internal state of OMAP IOMMU in debugfs. |
156 | 152 | ||
157 | Say N unless you know you need this. | 153 | Say N unless you know you need this. |
158 | 154 | ||
@@ -180,6 +176,7 @@ config EXYNOS_IOMMU | |||
180 | bool "Exynos IOMMU Support" | 176 | bool "Exynos IOMMU Support" |
181 | depends on ARCH_EXYNOS | 177 | depends on ARCH_EXYNOS |
182 | select IOMMU_API | 178 | select IOMMU_API |
179 | select ARM_DMA_USE_IOMMU | ||
183 | help | 180 | help |
184 | Support for the IOMMU (System MMU) of Samsung Exynos application | 181 | Support for the IOMMU (System MMU) of Samsung Exynos application |
185 | processor family. This enables H/W multimedia accelerators to see | 182 | processor family. This enables H/W multimedia accelerators to see |
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 8893bad048e0..16edef74b8ee 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile | |||
@@ -1,5 +1,6 @@ | |||
1 | obj-$(CONFIG_IOMMU_API) += iommu.o | 1 | obj-$(CONFIG_IOMMU_API) += iommu.o |
2 | obj-$(CONFIG_IOMMU_API) += iommu-traces.o | 2 | obj-$(CONFIG_IOMMU_API) += iommu-traces.o |
3 | obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o | ||
3 | obj-$(CONFIG_OF_IOMMU) += of_iommu.o | 4 | obj-$(CONFIG_OF_IOMMU) += of_iommu.o |
4 | obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o | 5 | obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o |
5 | obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o | 6 | obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o |
@@ -11,7 +12,6 @@ obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o | |||
11 | obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o | 12 | obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o |
12 | obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o | 13 | obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o |
13 | obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o | 14 | obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o |
14 | obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o | ||
15 | obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o | 15 | obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o |
16 | obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o | 16 | obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o |
17 | obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o | 17 | obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 4aec6a29e316..18405314168b 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -46,7 +46,6 @@ | |||
46 | #include "amd_iommu_proto.h" | 46 | #include "amd_iommu_proto.h" |
47 | #include "amd_iommu_types.h" | 47 | #include "amd_iommu_types.h" |
48 | #include "irq_remapping.h" | 48 | #include "irq_remapping.h" |
49 | #include "pci.h" | ||
50 | 49 | ||
51 | #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) | 50 | #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) |
52 | 51 | ||
@@ -81,7 +80,7 @@ LIST_HEAD(hpet_map); | |||
81 | */ | 80 | */ |
82 | static struct protection_domain *pt_domain; | 81 | static struct protection_domain *pt_domain; |
83 | 82 | ||
84 | static struct iommu_ops amd_iommu_ops; | 83 | static const struct iommu_ops amd_iommu_ops; |
85 | 84 | ||
86 | static ATOMIC_NOTIFIER_HEAD(ppr_notifier); | 85 | static ATOMIC_NOTIFIER_HEAD(ppr_notifier); |
87 | int amd_iommu_max_glx_val = -1; | 86 | int amd_iommu_max_glx_val = -1; |
@@ -133,9 +132,6 @@ static void free_dev_data(struct iommu_dev_data *dev_data) | |||
133 | list_del(&dev_data->dev_data_list); | 132 | list_del(&dev_data->dev_data_list); |
134 | spin_unlock_irqrestore(&dev_data_list_lock, flags); | 133 | spin_unlock_irqrestore(&dev_data_list_lock, flags); |
135 | 134 | ||
136 | if (dev_data->group) | ||
137 | iommu_group_put(dev_data->group); | ||
138 | |||
139 | kfree(dev_data); | 135 | kfree(dev_data); |
140 | } | 136 | } |
141 | 137 | ||
@@ -264,167 +260,79 @@ static bool check_device(struct device *dev) | |||
264 | return true; | 260 | return true; |
265 | } | 261 | } |
266 | 262 | ||
267 | static struct pci_bus *find_hosted_bus(struct pci_bus *bus) | 263 | static int init_iommu_group(struct device *dev) |
268 | { | ||
269 | while (!bus->self) { | ||
270 | if (!pci_is_root_bus(bus)) | ||
271 | bus = bus->parent; | ||
272 | else | ||
273 | return ERR_PTR(-ENODEV); | ||
274 | } | ||
275 | |||
276 | return bus; | ||
277 | } | ||
278 | |||
279 | #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) | ||
280 | |||
281 | static struct pci_dev *get_isolation_root(struct pci_dev *pdev) | ||
282 | { | ||
283 | struct pci_dev *dma_pdev = pdev; | ||
284 | |||
285 | /* Account for quirked devices */ | ||
286 | swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev)); | ||
287 | |||
288 | /* | ||
289 | * If it's a multifunction device that does not support our | ||
290 | * required ACS flags, add to the same group as lowest numbered | ||
291 | * function that also does not suport the required ACS flags. | ||
292 | */ | ||
293 | if (dma_pdev->multifunction && | ||
294 | !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) { | ||
295 | u8 i, slot = PCI_SLOT(dma_pdev->devfn); | ||
296 | |||
297 | for (i = 0; i < 8; i++) { | ||
298 | struct pci_dev *tmp; | ||
299 | |||
300 | tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i)); | ||
301 | if (!tmp) | ||
302 | continue; | ||
303 | |||
304 | if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) { | ||
305 | swap_pci_ref(&dma_pdev, tmp); | ||
306 | break; | ||
307 | } | ||
308 | pci_dev_put(tmp); | ||
309 | } | ||
310 | } | ||
311 | |||
312 | /* | ||
313 | * Devices on the root bus go through the iommu. If that's not us, | ||
314 | * find the next upstream device and test ACS up to the root bus. | ||
315 | * Finding the next device may require skipping virtual buses. | ||
316 | */ | ||
317 | while (!pci_is_root_bus(dma_pdev->bus)) { | ||
318 | struct pci_bus *bus = find_hosted_bus(dma_pdev->bus); | ||
319 | if (IS_ERR(bus)) | ||
320 | break; | ||
321 | |||
322 | if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) | ||
323 | break; | ||
324 | |||
325 | swap_pci_ref(&dma_pdev, pci_dev_get(bus->self)); | ||
326 | } | ||
327 | |||
328 | return dma_pdev; | ||
329 | } | ||
330 | |||
331 | static int use_pdev_iommu_group(struct pci_dev *pdev, struct device *dev) | ||
332 | { | 264 | { |
333 | struct iommu_group *group = iommu_group_get(&pdev->dev); | 265 | struct iommu_group *group; |
334 | int ret; | ||
335 | 266 | ||
336 | if (!group) { | 267 | group = iommu_group_get_for_dev(dev); |
337 | group = iommu_group_alloc(); | ||
338 | if (IS_ERR(group)) | ||
339 | return PTR_ERR(group); | ||
340 | 268 | ||
341 | WARN_ON(&pdev->dev != dev); | 269 | if (IS_ERR(group)) |
342 | } | 270 | return PTR_ERR(group); |
343 | 271 | ||
344 | ret = iommu_group_add_device(group, dev); | ||
345 | iommu_group_put(group); | 272 | iommu_group_put(group); |
346 | return ret; | 273 | return 0; |
347 | } | 274 | } |
348 | 275 | ||
349 | static int use_dev_data_iommu_group(struct iommu_dev_data *dev_data, | 276 | static int __last_alias(struct pci_dev *pdev, u16 alias, void *data) |
350 | struct device *dev) | ||
351 | { | 277 | { |
352 | if (!dev_data->group) { | 278 | *(u16 *)data = alias; |
353 | struct iommu_group *group = iommu_group_alloc(); | 279 | return 0; |
354 | if (IS_ERR(group)) | ||
355 | return PTR_ERR(group); | ||
356 | |||
357 | dev_data->group = group; | ||
358 | } | ||
359 | |||
360 | return iommu_group_add_device(dev_data->group, dev); | ||
361 | } | 280 | } |
362 | 281 | ||
363 | static int init_iommu_group(struct device *dev) | 282 | static u16 get_alias(struct device *dev) |
364 | { | 283 | { |
365 | struct iommu_dev_data *dev_data; | 284 | struct pci_dev *pdev = to_pci_dev(dev); |
366 | struct iommu_group *group; | 285 | u16 devid, ivrs_alias, pci_alias; |
367 | struct pci_dev *dma_pdev; | ||
368 | int ret; | ||
369 | |||
370 | group = iommu_group_get(dev); | ||
371 | if (group) { | ||
372 | iommu_group_put(group); | ||
373 | return 0; | ||
374 | } | ||
375 | |||
376 | dev_data = find_dev_data(get_device_id(dev)); | ||
377 | if (!dev_data) | ||
378 | return -ENOMEM; | ||
379 | 286 | ||
380 | if (dev_data->alias_data) { | 287 | devid = get_device_id(dev); |
381 | u16 alias; | 288 | ivrs_alias = amd_iommu_alias_table[devid]; |
382 | struct pci_bus *bus; | 289 | pci_for_each_dma_alias(pdev, __last_alias, &pci_alias); |
383 | 290 | ||
384 | if (dev_data->alias_data->group) | 291 | if (ivrs_alias == pci_alias) |
385 | goto use_group; | 292 | return ivrs_alias; |
386 | 293 | ||
387 | /* | 294 | /* |
388 | * If the alias device exists, it's effectively just a first | 295 | * DMA alias showdown |
389 | * level quirk for finding the DMA source. | 296 | * |
390 | */ | 297 | * The IVRS is fairly reliable in telling us about aliases, but it |
391 | alias = amd_iommu_alias_table[dev_data->devid]; | 298 | * can't know about every screwy device. If we don't have an IVRS |
392 | dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff); | 299 | * reported alias, use the PCI reported alias. In that case we may |
393 | if (dma_pdev) { | 300 | * still need to initialize the rlookup and dev_table entries if the |
394 | dma_pdev = get_isolation_root(dma_pdev); | 301 | * alias is to a non-existent device. |
395 | goto use_pdev; | 302 | */ |
303 | if (ivrs_alias == devid) { | ||
304 | if (!amd_iommu_rlookup_table[pci_alias]) { | ||
305 | amd_iommu_rlookup_table[pci_alias] = | ||
306 | amd_iommu_rlookup_table[devid]; | ||
307 | memcpy(amd_iommu_dev_table[pci_alias].data, | ||
308 | amd_iommu_dev_table[devid].data, | ||
309 | sizeof(amd_iommu_dev_table[pci_alias].data)); | ||
396 | } | 310 | } |
397 | 311 | ||
398 | /* | 312 | return pci_alias; |
399 | * If the alias is virtual, try to find a parent device | 313 | } |
400 | * and test whether the IOMMU group is actualy rooted above | ||
401 | * the alias. Be careful to also test the parent device if | ||
402 | * we think the alias is the root of the group. | ||
403 | */ | ||
404 | bus = pci_find_bus(0, alias >> 8); | ||
405 | if (!bus) | ||
406 | goto use_group; | ||
407 | |||
408 | bus = find_hosted_bus(bus); | ||
409 | if (IS_ERR(bus) || !bus->self) | ||
410 | goto use_group; | ||
411 | 314 | ||
412 | dma_pdev = get_isolation_root(pci_dev_get(bus->self)); | 315 | pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d " |
413 | if (dma_pdev != bus->self || (dma_pdev->multifunction && | 316 | "for device %s[%04x:%04x], kernel reported alias " |
414 | !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))) | 317 | "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias), |
415 | goto use_pdev; | 318 | PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device, |
319 | PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias), | ||
320 | PCI_FUNC(pci_alias)); | ||
416 | 321 | ||
417 | pci_dev_put(dma_pdev); | 322 | /* |
418 | goto use_group; | 323 | * If we don't have a PCI DMA alias and the IVRS alias is on the same |
324 | * bus, then the IVRS table may know about a quirk that we don't. | ||
325 | */ | ||
326 | if (pci_alias == devid && | ||
327 | PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) { | ||
328 | pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN; | ||
329 | pdev->dma_alias_devfn = ivrs_alias & 0xff; | ||
330 | pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n", | ||
331 | PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias), | ||
332 | dev_name(dev)); | ||
419 | } | 333 | } |
420 | 334 | ||
421 | dma_pdev = get_isolation_root(pci_dev_get(to_pci_dev(dev))); | 335 | return ivrs_alias; |
422 | use_pdev: | ||
423 | ret = use_pdev_iommu_group(dma_pdev, dev); | ||
424 | pci_dev_put(dma_pdev); | ||
425 | return ret; | ||
426 | use_group: | ||
427 | return use_dev_data_iommu_group(dev_data->alias_data, dev); | ||
428 | } | 336 | } |
429 | 337 | ||
430 | static int iommu_init_device(struct device *dev) | 338 | static int iommu_init_device(struct device *dev) |
@@ -441,7 +349,8 @@ static int iommu_init_device(struct device *dev) | |||
441 | if (!dev_data) | 349 | if (!dev_data) |
442 | return -ENOMEM; | 350 | return -ENOMEM; |
443 | 351 | ||
444 | alias = amd_iommu_alias_table[dev_data->devid]; | 352 | alias = get_alias(dev); |
353 | |||
445 | if (alias != dev_data->devid) { | 354 | if (alias != dev_data->devid) { |
446 | struct iommu_dev_data *alias_data; | 355 | struct iommu_dev_data *alias_data; |
447 | 356 | ||
@@ -470,6 +379,9 @@ static int iommu_init_device(struct device *dev) | |||
470 | 379 | ||
471 | dev->archdata.iommu = dev_data; | 380 | dev->archdata.iommu = dev_data; |
472 | 381 | ||
382 | iommu_device_link(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev, | ||
383 | dev); | ||
384 | |||
473 | return 0; | 385 | return 0; |
474 | } | 386 | } |
475 | 387 | ||
@@ -489,12 +401,22 @@ static void iommu_ignore_device(struct device *dev) | |||
489 | 401 | ||
490 | static void iommu_uninit_device(struct device *dev) | 402 | static void iommu_uninit_device(struct device *dev) |
491 | { | 403 | { |
404 | struct iommu_dev_data *dev_data = search_dev_data(get_device_id(dev)); | ||
405 | |||
406 | if (!dev_data) | ||
407 | return; | ||
408 | |||
409 | iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev, | ||
410 | dev); | ||
411 | |||
492 | iommu_group_remove_device(dev); | 412 | iommu_group_remove_device(dev); |
493 | 413 | ||
414 | /* Unlink from alias, it may change if another device is re-plugged */ | ||
415 | dev_data->alias_data = NULL; | ||
416 | |||
494 | /* | 417 | /* |
495 | * Nothing to do here - we keep dev_data around for unplugged devices | 418 | * We keep dev_data around for unplugged devices and reuse it when the |
496 | * and reuse it when the device is re-plugged - not doing so would | 419 | * device is re-plugged - not doing so would introduce a ton of races. |
497 | * introduce a ton of races. | ||
498 | */ | 420 | */ |
499 | } | 421 | } |
500 | 422 | ||
@@ -3473,7 +3395,7 @@ static int amd_iommu_domain_has_cap(struct iommu_domain *domain, | |||
3473 | return 0; | 3395 | return 0; |
3474 | } | 3396 | } |
3475 | 3397 | ||
3476 | static struct iommu_ops amd_iommu_ops = { | 3398 | static const struct iommu_ops amd_iommu_ops = { |
3477 | .domain_init = amd_iommu_domain_init, | 3399 | .domain_init = amd_iommu_domain_init, |
3478 | .domain_destroy = amd_iommu_domain_destroy, | 3400 | .domain_destroy = amd_iommu_domain_destroy, |
3479 | .attach_dev = amd_iommu_attach_device, | 3401 | .attach_dev = amd_iommu_attach_device, |
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 0e08545d7298..3783e0b44df6 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/msi.h> | 26 | #include <linux/msi.h> |
27 | #include <linux/amd-iommu.h> | 27 | #include <linux/amd-iommu.h> |
28 | #include <linux/export.h> | 28 | #include <linux/export.h> |
29 | #include <linux/iommu.h> | ||
29 | #include <asm/pci-direct.h> | 30 | #include <asm/pci-direct.h> |
30 | #include <asm/iommu.h> | 31 | #include <asm/iommu.h> |
31 | #include <asm/gart.h> | 32 | #include <asm/gart.h> |
@@ -1197,6 +1198,39 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu) | |||
1197 | iommu->max_counters = (u8) ((val >> 7) & 0xf); | 1198 | iommu->max_counters = (u8) ((val >> 7) & 0xf); |
1198 | } | 1199 | } |
1199 | 1200 | ||
1201 | static ssize_t amd_iommu_show_cap(struct device *dev, | ||
1202 | struct device_attribute *attr, | ||
1203 | char *buf) | ||
1204 | { | ||
1205 | struct amd_iommu *iommu = dev_get_drvdata(dev); | ||
1206 | return sprintf(buf, "%x\n", iommu->cap); | ||
1207 | } | ||
1208 | static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL); | ||
1209 | |||
1210 | static ssize_t amd_iommu_show_features(struct device *dev, | ||
1211 | struct device_attribute *attr, | ||
1212 | char *buf) | ||
1213 | { | ||
1214 | struct amd_iommu *iommu = dev_get_drvdata(dev); | ||
1215 | return sprintf(buf, "%llx\n", iommu->features); | ||
1216 | } | ||
1217 | static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL); | ||
1218 | |||
1219 | static struct attribute *amd_iommu_attrs[] = { | ||
1220 | &dev_attr_cap.attr, | ||
1221 | &dev_attr_features.attr, | ||
1222 | NULL, | ||
1223 | }; | ||
1224 | |||
1225 | static struct attribute_group amd_iommu_group = { | ||
1226 | .name = "amd-iommu", | ||
1227 | .attrs = amd_iommu_attrs, | ||
1228 | }; | ||
1229 | |||
1230 | static const struct attribute_group *amd_iommu_groups[] = { | ||
1231 | &amd_iommu_group, | ||
1232 | NULL, | ||
1233 | }; | ||
1200 | 1234 | ||
1201 | static int iommu_init_pci(struct amd_iommu *iommu) | 1235 | static int iommu_init_pci(struct amd_iommu *iommu) |
1202 | { | 1236 | { |
@@ -1297,6 +1331,10 @@ static int iommu_init_pci(struct amd_iommu *iommu) | |||
1297 | 1331 | ||
1298 | amd_iommu_erratum_746_workaround(iommu); | 1332 | amd_iommu_erratum_746_workaround(iommu); |
1299 | 1333 | ||
1334 | iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu, | ||
1335 | amd_iommu_groups, "ivhd%d", | ||
1336 | iommu->index); | ||
1337 | |||
1300 | return pci_enable_device(iommu->dev); | 1338 | return pci_enable_device(iommu->dev); |
1301 | } | 1339 | } |
1302 | 1340 | ||
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index f1a5abf11acf..8e43b7cba133 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h | |||
@@ -390,12 +390,6 @@ struct amd_iommu_fault { | |||
390 | 390 | ||
391 | }; | 391 | }; |
392 | 392 | ||
393 | #define PPR_FAULT_EXEC (1 << 1) | ||
394 | #define PPR_FAULT_READ (1 << 2) | ||
395 | #define PPR_FAULT_WRITE (1 << 5) | ||
396 | #define PPR_FAULT_USER (1 << 6) | ||
397 | #define PPR_FAULT_RSVD (1 << 7) | ||
398 | #define PPR_FAULT_GN (1 << 8) | ||
399 | 393 | ||
400 | struct iommu_domain; | 394 | struct iommu_domain; |
401 | 395 | ||
@@ -432,7 +426,6 @@ struct iommu_dev_data { | |||
432 | struct iommu_dev_data *alias_data;/* The alias dev_data */ | 426 | struct iommu_dev_data *alias_data;/* The alias dev_data */ |
433 | struct protection_domain *domain; /* Domain the device is bound to */ | 427 | struct protection_domain *domain; /* Domain the device is bound to */ |
434 | atomic_t bind; /* Domain attach reference count */ | 428 | atomic_t bind; /* Domain attach reference count */ |
435 | struct iommu_group *group; /* IOMMU group for virtual aliases */ | ||
436 | u16 devid; /* PCI Device ID */ | 429 | u16 devid; /* PCI Device ID */ |
437 | bool iommu_v2; /* Device can make use of IOMMUv2 */ | 430 | bool iommu_v2; /* Device can make use of IOMMUv2 */ |
438 | bool passthrough; /* Default for device is pt_domain */ | 431 | bool passthrough; /* Default for device is pt_domain */ |
@@ -578,6 +571,9 @@ struct amd_iommu { | |||
578 | /* default dma_ops domain for that IOMMU */ | 571 | /* default dma_ops domain for that IOMMU */ |
579 | struct dma_ops_domain *default_dom; | 572 | struct dma_ops_domain *default_dom; |
580 | 573 | ||
574 | /* IOMMU sysfs device */ | ||
575 | struct device *iommu_dev; | ||
576 | |||
581 | /* | 577 | /* |
582 | * We can't rely on the BIOS to restore all values on reinit, so we | 578 | * We can't rely on the BIOS to restore all values on reinit, so we |
583 | * need to stash them | 579 | * need to stash them |
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 499b4366a98d..5f578e850fc5 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c | |||
@@ -47,12 +47,13 @@ struct pasid_state { | |||
47 | atomic_t count; /* Reference count */ | 47 | atomic_t count; /* Reference count */ |
48 | unsigned mmu_notifier_count; /* Counting nested mmu_notifier | 48 | unsigned mmu_notifier_count; /* Counting nested mmu_notifier |
49 | calls */ | 49 | calls */ |
50 | struct task_struct *task; /* Task bound to this PASID */ | ||
51 | struct mm_struct *mm; /* mm_struct for the faults */ | 50 | struct mm_struct *mm; /* mm_struct for the faults */ |
52 | struct mmu_notifier mn; /* mmu_otifier handle */ | 51 | struct mmu_notifier mn; /* mmu_notifier handle */ |
53 | struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */ | 52 | struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */ |
54 | struct device_state *device_state; /* Link to our device_state */ | 53 | struct device_state *device_state; /* Link to our device_state */ |
55 | int pasid; /* PASID index */ | 54 | int pasid; /* PASID index */ |
55 | bool invalid; /* Used during setup and | ||
56 | teardown of the pasid */ | ||
56 | spinlock_t lock; /* Protect pri_queues and | 57 | spinlock_t lock; /* Protect pri_queues and |
57 | mmu_notifer_count */ | 58 | mmu_notifer_count */ |
58 | wait_queue_head_t wq; /* To wait for count == 0 */ | 59 | wait_queue_head_t wq; /* To wait for count == 0 */ |
@@ -99,7 +100,6 @@ static struct workqueue_struct *iommu_wq; | |||
99 | static u64 *empty_page_table; | 100 | static u64 *empty_page_table; |
100 | 101 | ||
101 | static void free_pasid_states(struct device_state *dev_state); | 102 | static void free_pasid_states(struct device_state *dev_state); |
102 | static void unbind_pasid(struct device_state *dev_state, int pasid); | ||
103 | 103 | ||
104 | static u16 device_id(struct pci_dev *pdev) | 104 | static u16 device_id(struct pci_dev *pdev) |
105 | { | 105 | { |
@@ -297,37 +297,29 @@ static void put_pasid_state_wait(struct pasid_state *pasid_state) | |||
297 | schedule(); | 297 | schedule(); |
298 | 298 | ||
299 | finish_wait(&pasid_state->wq, &wait); | 299 | finish_wait(&pasid_state->wq, &wait); |
300 | mmput(pasid_state->mm); | ||
301 | free_pasid_state(pasid_state); | 300 | free_pasid_state(pasid_state); |
302 | } | 301 | } |
303 | 302 | ||
304 | static void __unbind_pasid(struct pasid_state *pasid_state) | 303 | static void unbind_pasid(struct pasid_state *pasid_state) |
305 | { | 304 | { |
306 | struct iommu_domain *domain; | 305 | struct iommu_domain *domain; |
307 | 306 | ||
308 | domain = pasid_state->device_state->domain; | 307 | domain = pasid_state->device_state->domain; |
309 | 308 | ||
309 | /* | ||
310 | * Mark pasid_state as invalid, no more faults will we added to the | ||
311 | * work queue after this is visible everywhere. | ||
312 | */ | ||
313 | pasid_state->invalid = true; | ||
314 | |||
315 | /* Make sure this is visible */ | ||
316 | smp_wmb(); | ||
317 | |||
318 | /* After this the device/pasid can't access the mm anymore */ | ||
310 | amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid); | 319 | amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid); |
311 | clear_pasid_state(pasid_state->device_state, pasid_state->pasid); | ||
312 | 320 | ||
313 | /* Make sure no more pending faults are in the queue */ | 321 | /* Make sure no more pending faults are in the queue */ |
314 | flush_workqueue(iommu_wq); | 322 | flush_workqueue(iommu_wq); |
315 | |||
316 | mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); | ||
317 | |||
318 | put_pasid_state(pasid_state); /* Reference taken in bind() function */ | ||
319 | } | ||
320 | |||
321 | static void unbind_pasid(struct device_state *dev_state, int pasid) | ||
322 | { | ||
323 | struct pasid_state *pasid_state; | ||
324 | |||
325 | pasid_state = get_pasid_state(dev_state, pasid); | ||
326 | if (pasid_state == NULL) | ||
327 | return; | ||
328 | |||
329 | __unbind_pasid(pasid_state); | ||
330 | put_pasid_state_wait(pasid_state); /* Reference taken in this function */ | ||
331 | } | 323 | } |
332 | 324 | ||
333 | static void free_pasid_states_level1(struct pasid_state **tbl) | 325 | static void free_pasid_states_level1(struct pasid_state **tbl) |
@@ -373,6 +365,12 @@ static void free_pasid_states(struct device_state *dev_state) | |||
373 | * unbind the PASID | 365 | * unbind the PASID |
374 | */ | 366 | */ |
375 | mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); | 367 | mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); |
368 | |||
369 | put_pasid_state_wait(pasid_state); /* Reference taken in | ||
370 | amd_iommu_bind_pasid */ | ||
371 | |||
372 | /* Drop reference taken in amd_iommu_bind_pasid */ | ||
373 | put_device_state(dev_state); | ||
376 | } | 374 | } |
377 | 375 | ||
378 | if (dev_state->pasid_levels == 2) | 376 | if (dev_state->pasid_levels == 2) |
@@ -411,14 +409,6 @@ static int mn_clear_flush_young(struct mmu_notifier *mn, | |||
411 | return 0; | 409 | return 0; |
412 | } | 410 | } |
413 | 411 | ||
414 | static void mn_change_pte(struct mmu_notifier *mn, | ||
415 | struct mm_struct *mm, | ||
416 | unsigned long address, | ||
417 | pte_t pte) | ||
418 | { | ||
419 | __mn_flush_page(mn, address); | ||
420 | } | ||
421 | |||
422 | static void mn_invalidate_page(struct mmu_notifier *mn, | 412 | static void mn_invalidate_page(struct mmu_notifier *mn, |
423 | struct mm_struct *mm, | 413 | struct mm_struct *mm, |
424 | unsigned long address) | 414 | unsigned long address) |
@@ -472,22 +462,23 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm) | |||
472 | { | 462 | { |
473 | struct pasid_state *pasid_state; | 463 | struct pasid_state *pasid_state; |
474 | struct device_state *dev_state; | 464 | struct device_state *dev_state; |
465 | bool run_inv_ctx_cb; | ||
475 | 466 | ||
476 | might_sleep(); | 467 | might_sleep(); |
477 | 468 | ||
478 | pasid_state = mn_to_state(mn); | 469 | pasid_state = mn_to_state(mn); |
479 | dev_state = pasid_state->device_state; | 470 | dev_state = pasid_state->device_state; |
471 | run_inv_ctx_cb = !pasid_state->invalid; | ||
480 | 472 | ||
481 | if (pasid_state->device_state->inv_ctx_cb) | 473 | if (run_inv_ctx_cb && pasid_state->device_state->inv_ctx_cb) |
482 | dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid); | 474 | dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid); |
483 | 475 | ||
484 | unbind_pasid(dev_state, pasid_state->pasid); | 476 | unbind_pasid(pasid_state); |
485 | } | 477 | } |
486 | 478 | ||
487 | static struct mmu_notifier_ops iommu_mn = { | 479 | static struct mmu_notifier_ops iommu_mn = { |
488 | .release = mn_release, | 480 | .release = mn_release, |
489 | .clear_flush_young = mn_clear_flush_young, | 481 | .clear_flush_young = mn_clear_flush_young, |
490 | .change_pte = mn_change_pte, | ||
491 | .invalidate_page = mn_invalidate_page, | 482 | .invalidate_page = mn_invalidate_page, |
492 | .invalidate_range_start = mn_invalidate_range_start, | 483 | .invalidate_range_start = mn_invalidate_range_start, |
493 | .invalidate_range_end = mn_invalidate_range_end, | 484 | .invalidate_range_end = mn_invalidate_range_end, |
@@ -529,7 +520,7 @@ static void do_fault(struct work_struct *work) | |||
529 | write = !!(fault->flags & PPR_FAULT_WRITE); | 520 | write = !!(fault->flags & PPR_FAULT_WRITE); |
530 | 521 | ||
531 | down_read(&fault->state->mm->mmap_sem); | 522 | down_read(&fault->state->mm->mmap_sem); |
532 | npages = get_user_pages(fault->state->task, fault->state->mm, | 523 | npages = get_user_pages(NULL, fault->state->mm, |
533 | fault->address, 1, write, 0, &page, NULL); | 524 | fault->address, 1, write, 0, &page, NULL); |
534 | up_read(&fault->state->mm->mmap_sem); | 525 | up_read(&fault->state->mm->mmap_sem); |
535 | 526 | ||
@@ -587,7 +578,7 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data) | |||
587 | goto out; | 578 | goto out; |
588 | 579 | ||
589 | pasid_state = get_pasid_state(dev_state, iommu_fault->pasid); | 580 | pasid_state = get_pasid_state(dev_state, iommu_fault->pasid); |
590 | if (pasid_state == NULL) { | 581 | if (pasid_state == NULL || pasid_state->invalid) { |
591 | /* We know the device but not the PASID -> send INVALID */ | 582 | /* We know the device but not the PASID -> send INVALID */ |
592 | amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid, | 583 | amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid, |
593 | PPR_INVALID, tag); | 584 | PPR_INVALID, tag); |
@@ -612,6 +603,7 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data) | |||
612 | fault->state = pasid_state; | 603 | fault->state = pasid_state; |
613 | fault->tag = tag; | 604 | fault->tag = tag; |
614 | fault->finish = finish; | 605 | fault->finish = finish; |
606 | fault->pasid = iommu_fault->pasid; | ||
615 | fault->flags = iommu_fault->flags; | 607 | fault->flags = iommu_fault->flags; |
616 | INIT_WORK(&fault->work, do_fault); | 608 | INIT_WORK(&fault->work, do_fault); |
617 | 609 | ||
@@ -620,6 +612,10 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data) | |||
620 | ret = NOTIFY_OK; | 612 | ret = NOTIFY_OK; |
621 | 613 | ||
622 | out_drop_state: | 614 | out_drop_state: |
615 | |||
616 | if (ret != NOTIFY_OK && pasid_state) | ||
617 | put_pasid_state(pasid_state); | ||
618 | |||
623 | put_device_state(dev_state); | 619 | put_device_state(dev_state); |
624 | 620 | ||
625 | out: | 621 | out: |
@@ -635,6 +631,7 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid, | |||
635 | { | 631 | { |
636 | struct pasid_state *pasid_state; | 632 | struct pasid_state *pasid_state; |
637 | struct device_state *dev_state; | 633 | struct device_state *dev_state; |
634 | struct mm_struct *mm; | ||
638 | u16 devid; | 635 | u16 devid; |
639 | int ret; | 636 | int ret; |
640 | 637 | ||
@@ -658,20 +655,23 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid, | |||
658 | if (pasid_state == NULL) | 655 | if (pasid_state == NULL) |
659 | goto out; | 656 | goto out; |
660 | 657 | ||
658 | |||
661 | atomic_set(&pasid_state->count, 1); | 659 | atomic_set(&pasid_state->count, 1); |
662 | init_waitqueue_head(&pasid_state->wq); | 660 | init_waitqueue_head(&pasid_state->wq); |
663 | spin_lock_init(&pasid_state->lock); | 661 | spin_lock_init(&pasid_state->lock); |
664 | 662 | ||
665 | pasid_state->task = task; | 663 | mm = get_task_mm(task); |
666 | pasid_state->mm = get_task_mm(task); | 664 | pasid_state->mm = mm; |
667 | pasid_state->device_state = dev_state; | 665 | pasid_state->device_state = dev_state; |
668 | pasid_state->pasid = pasid; | 666 | pasid_state->pasid = pasid; |
667 | pasid_state->invalid = true; /* Mark as valid only if we are | ||
668 | done with setting up the pasid */ | ||
669 | pasid_state->mn.ops = &iommu_mn; | 669 | pasid_state->mn.ops = &iommu_mn; |
670 | 670 | ||
671 | if (pasid_state->mm == NULL) | 671 | if (pasid_state->mm == NULL) |
672 | goto out_free; | 672 | goto out_free; |
673 | 673 | ||
674 | mmu_notifier_register(&pasid_state->mn, pasid_state->mm); | 674 | mmu_notifier_register(&pasid_state->mn, mm); |
675 | 675 | ||
676 | ret = set_pasid_state(dev_state, pasid_state, pasid); | 676 | ret = set_pasid_state(dev_state, pasid_state, pasid); |
677 | if (ret) | 677 | if (ret) |
@@ -682,15 +682,26 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid, | |||
682 | if (ret) | 682 | if (ret) |
683 | goto out_clear_state; | 683 | goto out_clear_state; |
684 | 684 | ||
685 | /* Now we are ready to handle faults */ | ||
686 | pasid_state->invalid = false; | ||
687 | |||
688 | /* | ||
689 | * Drop the reference to the mm_struct here. We rely on the | ||
690 | * mmu_notifier release call-back to inform us when the mm | ||
691 | * is going away. | ||
692 | */ | ||
693 | mmput(mm); | ||
694 | |||
685 | return 0; | 695 | return 0; |
686 | 696 | ||
687 | out_clear_state: | 697 | out_clear_state: |
688 | clear_pasid_state(dev_state, pasid); | 698 | clear_pasid_state(dev_state, pasid); |
689 | 699 | ||
690 | out_unregister: | 700 | out_unregister: |
691 | mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); | 701 | mmu_notifier_unregister(&pasid_state->mn, mm); |
692 | 702 | ||
693 | out_free: | 703 | out_free: |
704 | mmput(mm); | ||
694 | free_pasid_state(pasid_state); | 705 | free_pasid_state(pasid_state); |
695 | 706 | ||
696 | out: | 707 | out: |
@@ -728,10 +739,22 @@ void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid) | |||
728 | */ | 739 | */ |
729 | put_pasid_state(pasid_state); | 740 | put_pasid_state(pasid_state); |
730 | 741 | ||
731 | /* This will call the mn_release function and unbind the PASID */ | 742 | /* Clear the pasid state so that the pasid can be re-used */ |
743 | clear_pasid_state(dev_state, pasid_state->pasid); | ||
744 | |||
745 | /* | ||
746 | * Call mmu_notifier_unregister to drop our reference | ||
747 | * to pasid_state->mm | ||
748 | */ | ||
732 | mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); | 749 | mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); |
733 | 750 | ||
751 | put_pasid_state_wait(pasid_state); /* Reference taken in | ||
752 | amd_iommu_bind_pasid */ | ||
734 | out: | 753 | out: |
754 | /* Drop reference taken in this function */ | ||
755 | put_device_state(dev_state); | ||
756 | |||
757 | /* Drop reference taken in amd_iommu_bind_pasid */ | ||
735 | put_device_state(dev_state); | 758 | put_device_state(dev_state); |
736 | } | 759 | } |
737 | EXPORT_SYMBOL(amd_iommu_unbind_pasid); | 760 | EXPORT_SYMBOL(amd_iommu_unbind_pasid); |
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 1599354e974d..ca18d6d42a9b 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/mm.h> | 39 | #include <linux/mm.h> |
40 | #include <linux/module.h> | 40 | #include <linux/module.h> |
41 | #include <linux/of.h> | 41 | #include <linux/of.h> |
42 | #include <linux/pci.h> | ||
42 | #include <linux/platform_device.h> | 43 | #include <linux/platform_device.h> |
43 | #include <linux/slab.h> | 44 | #include <linux/slab.h> |
44 | #include <linux/spinlock.h> | 45 | #include <linux/spinlock.h> |
@@ -316,9 +317,9 @@ | |||
316 | #define FSR_AFF (1 << 2) | 317 | #define FSR_AFF (1 << 2) |
317 | #define FSR_TF (1 << 1) | 318 | #define FSR_TF (1 << 1) |
318 | 319 | ||
319 | #define FSR_IGN (FSR_AFF | FSR_ASF | FSR_TLBMCF | \ | 320 | #define FSR_IGN (FSR_AFF | FSR_ASF | \ |
320 | FSR_TLBLKF) | 321 | FSR_TLBMCF | FSR_TLBLKF) |
321 | #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \ | 322 | #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \ |
322 | FSR_EF | FSR_PF | FSR_TF | FSR_IGN) | 323 | FSR_EF | FSR_PF | FSR_TF | FSR_IGN) |
323 | 324 | ||
324 | #define FSYNR0_WNR (1 << 4) | 325 | #define FSYNR0_WNR (1 << 4) |
@@ -329,27 +330,20 @@ struct arm_smmu_smr { | |||
329 | u16 id; | 330 | u16 id; |
330 | }; | 331 | }; |
331 | 332 | ||
332 | struct arm_smmu_master { | 333 | struct arm_smmu_master_cfg { |
333 | struct device_node *of_node; | ||
334 | |||
335 | /* | ||
336 | * The following is specific to the master's position in the | ||
337 | * SMMU chain. | ||
338 | */ | ||
339 | struct rb_node node; | ||
340 | int num_streamids; | 334 | int num_streamids; |
341 | u16 streamids[MAX_MASTER_STREAMIDS]; | 335 | u16 streamids[MAX_MASTER_STREAMIDS]; |
342 | |||
343 | /* | ||
344 | * We only need to allocate these on the root SMMU, as we | ||
345 | * configure unmatched streams to bypass translation. | ||
346 | */ | ||
347 | struct arm_smmu_smr *smrs; | 336 | struct arm_smmu_smr *smrs; |
348 | }; | 337 | }; |
349 | 338 | ||
339 | struct arm_smmu_master { | ||
340 | struct device_node *of_node; | ||
341 | struct rb_node node; | ||
342 | struct arm_smmu_master_cfg cfg; | ||
343 | }; | ||
344 | |||
350 | struct arm_smmu_device { | 345 | struct arm_smmu_device { |
351 | struct device *dev; | 346 | struct device *dev; |
352 | struct device_node *parent_of_node; | ||
353 | 347 | ||
354 | void __iomem *base; | 348 | void __iomem *base; |
355 | unsigned long size; | 349 | unsigned long size; |
@@ -387,7 +381,6 @@ struct arm_smmu_device { | |||
387 | }; | 381 | }; |
388 | 382 | ||
389 | struct arm_smmu_cfg { | 383 | struct arm_smmu_cfg { |
390 | struct arm_smmu_device *smmu; | ||
391 | u8 cbndx; | 384 | u8 cbndx; |
392 | u8 irptndx; | 385 | u8 irptndx; |
393 | u32 cbar; | 386 | u32 cbar; |
@@ -399,15 +392,8 @@ struct arm_smmu_cfg { | |||
399 | #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) | 392 | #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) |
400 | 393 | ||
401 | struct arm_smmu_domain { | 394 | struct arm_smmu_domain { |
402 | /* | 395 | struct arm_smmu_device *smmu; |
403 | * A domain can span across multiple, chained SMMUs and requires | 396 | struct arm_smmu_cfg cfg; |
404 | * all devices within the domain to follow the same translation | ||
405 | * path. | ||
406 | */ | ||
407 | struct arm_smmu_device *leaf_smmu; | ||
408 | struct arm_smmu_cfg root_cfg; | ||
409 | phys_addr_t output_mask; | ||
410 | |||
411 | spinlock_t lock; | 397 | spinlock_t lock; |
412 | }; | 398 | }; |
413 | 399 | ||
@@ -419,7 +405,7 @@ struct arm_smmu_option_prop { | |||
419 | const char *prop; | 405 | const char *prop; |
420 | }; | 406 | }; |
421 | 407 | ||
422 | static struct arm_smmu_option_prop arm_smmu_options [] = { | 408 | static struct arm_smmu_option_prop arm_smmu_options[] = { |
423 | { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" }, | 409 | { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" }, |
424 | { 0, NULL}, | 410 | { 0, NULL}, |
425 | }; | 411 | }; |
@@ -427,6 +413,7 @@ static struct arm_smmu_option_prop arm_smmu_options [] = { | |||
427 | static void parse_driver_options(struct arm_smmu_device *smmu) | 413 | static void parse_driver_options(struct arm_smmu_device *smmu) |
428 | { | 414 | { |
429 | int i = 0; | 415 | int i = 0; |
416 | |||
430 | do { | 417 | do { |
431 | if (of_property_read_bool(smmu->dev->of_node, | 418 | if (of_property_read_bool(smmu->dev->of_node, |
432 | arm_smmu_options[i].prop)) { | 419 | arm_smmu_options[i].prop)) { |
@@ -437,6 +424,19 @@ static void parse_driver_options(struct arm_smmu_device *smmu) | |||
437 | } while (arm_smmu_options[++i].opt); | 424 | } while (arm_smmu_options[++i].opt); |
438 | } | 425 | } |
439 | 426 | ||
427 | static struct device *dev_get_master_dev(struct device *dev) | ||
428 | { | ||
429 | if (dev_is_pci(dev)) { | ||
430 | struct pci_bus *bus = to_pci_dev(dev)->bus; | ||
431 | |||
432 | while (!pci_is_root_bus(bus)) | ||
433 | bus = bus->parent; | ||
434 | return bus->bridge->parent; | ||
435 | } | ||
436 | |||
437 | return dev; | ||
438 | } | ||
439 | |||
440 | static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, | 440 | static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, |
441 | struct device_node *dev_node) | 441 | struct device_node *dev_node) |
442 | { | 442 | { |
@@ -444,6 +444,7 @@ static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, | |||
444 | 444 | ||
445 | while (node) { | 445 | while (node) { |
446 | struct arm_smmu_master *master; | 446 | struct arm_smmu_master *master; |
447 | |||
447 | master = container_of(node, struct arm_smmu_master, node); | 448 | master = container_of(node, struct arm_smmu_master, node); |
448 | 449 | ||
449 | if (dev_node < master->of_node) | 450 | if (dev_node < master->of_node) |
@@ -457,6 +458,18 @@ static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, | |||
457 | return NULL; | 458 | return NULL; |
458 | } | 459 | } |
459 | 460 | ||
461 | static struct arm_smmu_master_cfg * | ||
462 | find_smmu_master_cfg(struct arm_smmu_device *smmu, struct device *dev) | ||
463 | { | ||
464 | struct arm_smmu_master *master; | ||
465 | |||
466 | if (dev_is_pci(dev)) | ||
467 | return dev->archdata.iommu; | ||
468 | |||
469 | master = find_smmu_master(smmu, dev->of_node); | ||
470 | return master ? &master->cfg : NULL; | ||
471 | } | ||
472 | |||
460 | static int insert_smmu_master(struct arm_smmu_device *smmu, | 473 | static int insert_smmu_master(struct arm_smmu_device *smmu, |
461 | struct arm_smmu_master *master) | 474 | struct arm_smmu_master *master) |
462 | { | 475 | { |
@@ -465,8 +478,8 @@ static int insert_smmu_master(struct arm_smmu_device *smmu, | |||
465 | new = &smmu->masters.rb_node; | 478 | new = &smmu->masters.rb_node; |
466 | parent = NULL; | 479 | parent = NULL; |
467 | while (*new) { | 480 | while (*new) { |
468 | struct arm_smmu_master *this; | 481 | struct arm_smmu_master *this |
469 | this = container_of(*new, struct arm_smmu_master, node); | 482 | = container_of(*new, struct arm_smmu_master, node); |
470 | 483 | ||
471 | parent = *new; | 484 | parent = *new; |
472 | if (master->of_node < this->of_node) | 485 | if (master->of_node < this->of_node) |
@@ -508,33 +521,30 @@ static int register_smmu_master(struct arm_smmu_device *smmu, | |||
508 | if (!master) | 521 | if (!master) |
509 | return -ENOMEM; | 522 | return -ENOMEM; |
510 | 523 | ||
511 | master->of_node = masterspec->np; | 524 | master->of_node = masterspec->np; |
512 | master->num_streamids = masterspec->args_count; | 525 | master->cfg.num_streamids = masterspec->args_count; |
513 | 526 | ||
514 | for (i = 0; i < master->num_streamids; ++i) | 527 | for (i = 0; i < master->cfg.num_streamids; ++i) |
515 | master->streamids[i] = masterspec->args[i]; | 528 | master->cfg.streamids[i] = masterspec->args[i]; |
516 | 529 | ||
517 | return insert_smmu_master(smmu, master); | 530 | return insert_smmu_master(smmu, master); |
518 | } | 531 | } |
519 | 532 | ||
520 | static struct arm_smmu_device *find_parent_smmu(struct arm_smmu_device *smmu) | 533 | static struct arm_smmu_device *find_smmu_for_device(struct device *dev) |
521 | { | 534 | { |
522 | struct arm_smmu_device *parent; | 535 | struct arm_smmu_device *smmu; |
523 | 536 | struct arm_smmu_master *master = NULL; | |
524 | if (!smmu->parent_of_node) | 537 | struct device_node *dev_node = dev_get_master_dev(dev)->of_node; |
525 | return NULL; | ||
526 | 538 | ||
527 | spin_lock(&arm_smmu_devices_lock); | 539 | spin_lock(&arm_smmu_devices_lock); |
528 | list_for_each_entry(parent, &arm_smmu_devices, list) | 540 | list_for_each_entry(smmu, &arm_smmu_devices, list) { |
529 | if (parent->dev->of_node == smmu->parent_of_node) | 541 | master = find_smmu_master(smmu, dev_node); |
530 | goto out_unlock; | 542 | if (master) |
531 | 543 | break; | |
532 | parent = NULL; | 544 | } |
533 | dev_warn(smmu->dev, | ||
534 | "Failed to find SMMU parent despite parent in DT\n"); | ||
535 | out_unlock: | ||
536 | spin_unlock(&arm_smmu_devices_lock); | 545 | spin_unlock(&arm_smmu_devices_lock); |
537 | return parent; | 546 | |
547 | return master ? smmu : NULL; | ||
538 | } | 548 | } |
539 | 549 | ||
540 | static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end) | 550 | static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end) |
@@ -574,9 +584,10 @@ static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu) | |||
574 | } | 584 | } |
575 | } | 585 | } |
576 | 586 | ||
577 | static void arm_smmu_tlb_inv_context(struct arm_smmu_cfg *cfg) | 587 | static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain) |
578 | { | 588 | { |
579 | struct arm_smmu_device *smmu = cfg->smmu; | 589 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
590 | struct arm_smmu_device *smmu = smmu_domain->smmu; | ||
580 | void __iomem *base = ARM_SMMU_GR0(smmu); | 591 | void __iomem *base = ARM_SMMU_GR0(smmu); |
581 | bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; | 592 | bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; |
582 | 593 | ||
@@ -600,11 +611,11 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) | |||
600 | unsigned long iova; | 611 | unsigned long iova; |
601 | struct iommu_domain *domain = dev; | 612 | struct iommu_domain *domain = dev; |
602 | struct arm_smmu_domain *smmu_domain = domain->priv; | 613 | struct arm_smmu_domain *smmu_domain = domain->priv; |
603 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | 614 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
604 | struct arm_smmu_device *smmu = root_cfg->smmu; | 615 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
605 | void __iomem *cb_base; | 616 | void __iomem *cb_base; |
606 | 617 | ||
607 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx); | 618 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); |
608 | fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR); | 619 | fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR); |
609 | 620 | ||
610 | if (!(fsr & FSR_FAULT)) | 621 | if (!(fsr & FSR_FAULT)) |
@@ -631,7 +642,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) | |||
631 | } else { | 642 | } else { |
632 | dev_err_ratelimited(smmu->dev, | 643 | dev_err_ratelimited(smmu->dev, |
633 | "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n", | 644 | "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n", |
634 | iova, fsynr, root_cfg->cbndx); | 645 | iova, fsynr, cfg->cbndx); |
635 | ret = IRQ_NONE; | 646 | ret = IRQ_NONE; |
636 | resume = RESUME_TERMINATE; | 647 | resume = RESUME_TERMINATE; |
637 | } | 648 | } |
@@ -696,19 +707,19 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) | |||
696 | { | 707 | { |
697 | u32 reg; | 708 | u32 reg; |
698 | bool stage1; | 709 | bool stage1; |
699 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | 710 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
700 | struct arm_smmu_device *smmu = root_cfg->smmu; | 711 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
701 | void __iomem *cb_base, *gr0_base, *gr1_base; | 712 | void __iomem *cb_base, *gr0_base, *gr1_base; |
702 | 713 | ||
703 | gr0_base = ARM_SMMU_GR0(smmu); | 714 | gr0_base = ARM_SMMU_GR0(smmu); |
704 | gr1_base = ARM_SMMU_GR1(smmu); | 715 | gr1_base = ARM_SMMU_GR1(smmu); |
705 | stage1 = root_cfg->cbar != CBAR_TYPE_S2_TRANS; | 716 | stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; |
706 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx); | 717 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); |
707 | 718 | ||
708 | /* CBAR */ | 719 | /* CBAR */ |
709 | reg = root_cfg->cbar; | 720 | reg = cfg->cbar; |
710 | if (smmu->version == 1) | 721 | if (smmu->version == 1) |
711 | reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT; | 722 | reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT; |
712 | 723 | ||
713 | /* | 724 | /* |
714 | * Use the weakest shareability/memory types, so they are | 725 | * Use the weakest shareability/memory types, so they are |
@@ -718,9 +729,9 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) | |||
718 | reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) | | 729 | reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) | |
719 | (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); | 730 | (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); |
720 | } else { | 731 | } else { |
721 | reg |= ARM_SMMU_CB_VMID(root_cfg) << CBAR_VMID_SHIFT; | 732 | reg |= ARM_SMMU_CB_VMID(cfg) << CBAR_VMID_SHIFT; |
722 | } | 733 | } |
723 | writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx)); | 734 | writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx)); |
724 | 735 | ||
725 | if (smmu->version > 1) { | 736 | if (smmu->version > 1) { |
726 | /* CBA2R */ | 737 | /* CBA2R */ |
@@ -730,7 +741,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) | |||
730 | reg = CBA2R_RW64_32BIT; | 741 | reg = CBA2R_RW64_32BIT; |
731 | #endif | 742 | #endif |
732 | writel_relaxed(reg, | 743 | writel_relaxed(reg, |
733 | gr1_base + ARM_SMMU_GR1_CBA2R(root_cfg->cbndx)); | 744 | gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); |
734 | 745 | ||
735 | /* TTBCR2 */ | 746 | /* TTBCR2 */ |
736 | switch (smmu->input_size) { | 747 | switch (smmu->input_size) { |
@@ -780,13 +791,13 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) | |||
780 | } | 791 | } |
781 | 792 | ||
782 | /* TTBR0 */ | 793 | /* TTBR0 */ |
783 | arm_smmu_flush_pgtable(smmu, root_cfg->pgd, | 794 | arm_smmu_flush_pgtable(smmu, cfg->pgd, |
784 | PTRS_PER_PGD * sizeof(pgd_t)); | 795 | PTRS_PER_PGD * sizeof(pgd_t)); |
785 | reg = __pa(root_cfg->pgd); | 796 | reg = __pa(cfg->pgd); |
786 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); | 797 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); |
787 | reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32; | 798 | reg = (phys_addr_t)__pa(cfg->pgd) >> 32; |
788 | if (stage1) | 799 | if (stage1) |
789 | reg |= ARM_SMMU_CB_ASID(root_cfg) << TTBRn_HI_ASID_SHIFT; | 800 | reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT; |
790 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); | 801 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); |
791 | 802 | ||
792 | /* | 803 | /* |
@@ -800,6 +811,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) | |||
800 | reg = TTBCR_TG0_64K; | 811 | reg = TTBCR_TG0_64K; |
801 | 812 | ||
802 | if (!stage1) { | 813 | if (!stage1) { |
814 | reg |= (64 - smmu->s1_output_size) << TTBCR_T0SZ_SHIFT; | ||
815 | |||
803 | switch (smmu->s2_output_size) { | 816 | switch (smmu->s2_output_size) { |
804 | case 32: | 817 | case 32: |
805 | reg |= (TTBCR2_ADDR_32 << TTBCR_PASIZE_SHIFT); | 818 | reg |= (TTBCR2_ADDR_32 << TTBCR_PASIZE_SHIFT); |
@@ -821,7 +834,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) | |||
821 | break; | 834 | break; |
822 | } | 835 | } |
823 | } else { | 836 | } else { |
824 | reg |= (64 - smmu->s1_output_size) << TTBCR_T0SZ_SHIFT; | 837 | reg |= (64 - smmu->input_size) << TTBCR_T0SZ_SHIFT; |
825 | } | 838 | } |
826 | } else { | 839 | } else { |
827 | reg = 0; | 840 | reg = 0; |
@@ -853,44 +866,25 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) | |||
853 | } | 866 | } |
854 | 867 | ||
855 | static int arm_smmu_init_domain_context(struct iommu_domain *domain, | 868 | static int arm_smmu_init_domain_context(struct iommu_domain *domain, |
856 | struct device *dev) | 869 | struct arm_smmu_device *smmu) |
857 | { | 870 | { |
858 | int irq, ret, start; | 871 | int irq, ret, start; |
859 | struct arm_smmu_domain *smmu_domain = domain->priv; | 872 | struct arm_smmu_domain *smmu_domain = domain->priv; |
860 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | 873 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
861 | struct arm_smmu_device *smmu, *parent; | ||
862 | |||
863 | /* | ||
864 | * Walk the SMMU chain to find the root device for this chain. | ||
865 | * We assume that no masters have translations which terminate | ||
866 | * early, and therefore check that the root SMMU does indeed have | ||
867 | * a StreamID for the master in question. | ||
868 | */ | ||
869 | parent = dev->archdata.iommu; | ||
870 | smmu_domain->output_mask = -1; | ||
871 | do { | ||
872 | smmu = parent; | ||
873 | smmu_domain->output_mask &= (1ULL << smmu->s2_output_size) - 1; | ||
874 | } while ((parent = find_parent_smmu(smmu))); | ||
875 | |||
876 | if (!find_smmu_master(smmu, dev->of_node)) { | ||
877 | dev_err(dev, "unable to find root SMMU for device\n"); | ||
878 | return -ENODEV; | ||
879 | } | ||
880 | 874 | ||
881 | if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) { | 875 | if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) { |
882 | /* | 876 | /* |
883 | * We will likely want to change this if/when KVM gets | 877 | * We will likely want to change this if/when KVM gets |
884 | * involved. | 878 | * involved. |
885 | */ | 879 | */ |
886 | root_cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; | 880 | cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; |
887 | start = smmu->num_s2_context_banks; | 881 | start = smmu->num_s2_context_banks; |
888 | } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) { | 882 | } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) { |
889 | root_cfg->cbar = CBAR_TYPE_S2_TRANS; | 883 | cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; |
890 | start = 0; | ||
891 | } else { | ||
892 | root_cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; | ||
893 | start = smmu->num_s2_context_banks; | 884 | start = smmu->num_s2_context_banks; |
885 | } else { | ||
886 | cfg->cbar = CBAR_TYPE_S2_TRANS; | ||
887 | start = 0; | ||
894 | } | 888 | } |
895 | 889 | ||
896 | ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, | 890 | ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, |
@@ -898,38 +892,38 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
898 | if (IS_ERR_VALUE(ret)) | 892 | if (IS_ERR_VALUE(ret)) |
899 | return ret; | 893 | return ret; |
900 | 894 | ||
901 | root_cfg->cbndx = ret; | 895 | cfg->cbndx = ret; |
902 | if (smmu->version == 1) { | 896 | if (smmu->version == 1) { |
903 | root_cfg->irptndx = atomic_inc_return(&smmu->irptndx); | 897 | cfg->irptndx = atomic_inc_return(&smmu->irptndx); |
904 | root_cfg->irptndx %= smmu->num_context_irqs; | 898 | cfg->irptndx %= smmu->num_context_irqs; |
905 | } else { | 899 | } else { |
906 | root_cfg->irptndx = root_cfg->cbndx; | 900 | cfg->irptndx = cfg->cbndx; |
907 | } | 901 | } |
908 | 902 | ||
909 | irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx]; | 903 | irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; |
910 | ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, | 904 | ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, |
911 | "arm-smmu-context-fault", domain); | 905 | "arm-smmu-context-fault", domain); |
912 | if (IS_ERR_VALUE(ret)) { | 906 | if (IS_ERR_VALUE(ret)) { |
913 | dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", | 907 | dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", |
914 | root_cfg->irptndx, irq); | 908 | cfg->irptndx, irq); |
915 | root_cfg->irptndx = INVALID_IRPTNDX; | 909 | cfg->irptndx = INVALID_IRPTNDX; |
916 | goto out_free_context; | 910 | goto out_free_context; |
917 | } | 911 | } |
918 | 912 | ||
919 | root_cfg->smmu = smmu; | 913 | smmu_domain->smmu = smmu; |
920 | arm_smmu_init_context_bank(smmu_domain); | 914 | arm_smmu_init_context_bank(smmu_domain); |
921 | return ret; | 915 | return 0; |
922 | 916 | ||
923 | out_free_context: | 917 | out_free_context: |
924 | __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx); | 918 | __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); |
925 | return ret; | 919 | return ret; |
926 | } | 920 | } |
927 | 921 | ||
928 | static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) | 922 | static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) |
929 | { | 923 | { |
930 | struct arm_smmu_domain *smmu_domain = domain->priv; | 924 | struct arm_smmu_domain *smmu_domain = domain->priv; |
931 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | 925 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
932 | struct arm_smmu_device *smmu = root_cfg->smmu; | 926 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
933 | void __iomem *cb_base; | 927 | void __iomem *cb_base; |
934 | int irq; | 928 | int irq; |
935 | 929 | ||
@@ -937,16 +931,16 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) | |||
937 | return; | 931 | return; |
938 | 932 | ||
939 | /* Disable the context bank and nuke the TLB before freeing it. */ | 933 | /* Disable the context bank and nuke the TLB before freeing it. */ |
940 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx); | 934 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); |
941 | writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); | 935 | writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); |
942 | arm_smmu_tlb_inv_context(root_cfg); | 936 | arm_smmu_tlb_inv_context(smmu_domain); |
943 | 937 | ||
944 | if (root_cfg->irptndx != INVALID_IRPTNDX) { | 938 | if (cfg->irptndx != INVALID_IRPTNDX) { |
945 | irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx]; | 939 | irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; |
946 | free_irq(irq, domain); | 940 | free_irq(irq, domain); |
947 | } | 941 | } |
948 | 942 | ||
949 | __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx); | 943 | __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); |
950 | } | 944 | } |
951 | 945 | ||
952 | static int arm_smmu_domain_init(struct iommu_domain *domain) | 946 | static int arm_smmu_domain_init(struct iommu_domain *domain) |
@@ -963,10 +957,10 @@ static int arm_smmu_domain_init(struct iommu_domain *domain) | |||
963 | if (!smmu_domain) | 957 | if (!smmu_domain) |
964 | return -ENOMEM; | 958 | return -ENOMEM; |
965 | 959 | ||
966 | pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); | 960 | pgd = kcalloc(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL); |
967 | if (!pgd) | 961 | if (!pgd) |
968 | goto out_free_domain; | 962 | goto out_free_domain; |
969 | smmu_domain->root_cfg.pgd = pgd; | 963 | smmu_domain->cfg.pgd = pgd; |
970 | 964 | ||
971 | spin_lock_init(&smmu_domain->lock); | 965 | spin_lock_init(&smmu_domain->lock); |
972 | domain->priv = smmu_domain; | 966 | domain->priv = smmu_domain; |
@@ -980,6 +974,7 @@ out_free_domain: | |||
980 | static void arm_smmu_free_ptes(pmd_t *pmd) | 974 | static void arm_smmu_free_ptes(pmd_t *pmd) |
981 | { | 975 | { |
982 | pgtable_t table = pmd_pgtable(*pmd); | 976 | pgtable_t table = pmd_pgtable(*pmd); |
977 | |||
983 | pgtable_page_dtor(table); | 978 | pgtable_page_dtor(table); |
984 | __free_page(table); | 979 | __free_page(table); |
985 | } | 980 | } |
@@ -1021,8 +1016,8 @@ static void arm_smmu_free_puds(pgd_t *pgd) | |||
1021 | static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain) | 1016 | static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain) |
1022 | { | 1017 | { |
1023 | int i; | 1018 | int i; |
1024 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | 1019 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
1025 | pgd_t *pgd, *pgd_base = root_cfg->pgd; | 1020 | pgd_t *pgd, *pgd_base = cfg->pgd; |
1026 | 1021 | ||
1027 | /* | 1022 | /* |
1028 | * Recursively free the page tables for this domain. We don't | 1023 | * Recursively free the page tables for this domain. We don't |
@@ -1054,7 +1049,7 @@ static void arm_smmu_domain_destroy(struct iommu_domain *domain) | |||
1054 | } | 1049 | } |
1055 | 1050 | ||
1056 | static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu, | 1051 | static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu, |
1057 | struct arm_smmu_master *master) | 1052 | struct arm_smmu_master_cfg *cfg) |
1058 | { | 1053 | { |
1059 | int i; | 1054 | int i; |
1060 | struct arm_smmu_smr *smrs; | 1055 | struct arm_smmu_smr *smrs; |
@@ -1063,18 +1058,18 @@ static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu, | |||
1063 | if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH)) | 1058 | if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH)) |
1064 | return 0; | 1059 | return 0; |
1065 | 1060 | ||
1066 | if (master->smrs) | 1061 | if (cfg->smrs) |
1067 | return -EEXIST; | 1062 | return -EEXIST; |
1068 | 1063 | ||
1069 | smrs = kmalloc(sizeof(*smrs) * master->num_streamids, GFP_KERNEL); | 1064 | smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL); |
1070 | if (!smrs) { | 1065 | if (!smrs) { |
1071 | dev_err(smmu->dev, "failed to allocate %d SMRs for master %s\n", | 1066 | dev_err(smmu->dev, "failed to allocate %d SMRs\n", |
1072 | master->num_streamids, master->of_node->name); | 1067 | cfg->num_streamids); |
1073 | return -ENOMEM; | 1068 | return -ENOMEM; |
1074 | } | 1069 | } |
1075 | 1070 | ||
1076 | /* Allocate the SMRs on the root SMMU */ | 1071 | /* Allocate the SMRs on the SMMU */ |
1077 | for (i = 0; i < master->num_streamids; ++i) { | 1072 | for (i = 0; i < cfg->num_streamids; ++i) { |
1078 | int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0, | 1073 | int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0, |
1079 | smmu->num_mapping_groups); | 1074 | smmu->num_mapping_groups); |
1080 | if (IS_ERR_VALUE(idx)) { | 1075 | if (IS_ERR_VALUE(idx)) { |
@@ -1085,18 +1080,18 @@ static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu, | |||
1085 | smrs[i] = (struct arm_smmu_smr) { | 1080 | smrs[i] = (struct arm_smmu_smr) { |
1086 | .idx = idx, | 1081 | .idx = idx, |
1087 | .mask = 0, /* We don't currently share SMRs */ | 1082 | .mask = 0, /* We don't currently share SMRs */ |
1088 | .id = master->streamids[i], | 1083 | .id = cfg->streamids[i], |
1089 | }; | 1084 | }; |
1090 | } | 1085 | } |
1091 | 1086 | ||
1092 | /* It worked! Now, poke the actual hardware */ | 1087 | /* It worked! Now, poke the actual hardware */ |
1093 | for (i = 0; i < master->num_streamids; ++i) { | 1088 | for (i = 0; i < cfg->num_streamids; ++i) { |
1094 | u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT | | 1089 | u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT | |
1095 | smrs[i].mask << SMR_MASK_SHIFT; | 1090 | smrs[i].mask << SMR_MASK_SHIFT; |
1096 | writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx)); | 1091 | writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx)); |
1097 | } | 1092 | } |
1098 | 1093 | ||
1099 | master->smrs = smrs; | 1094 | cfg->smrs = smrs; |
1100 | return 0; | 1095 | return 0; |
1101 | 1096 | ||
1102 | err_free_smrs: | 1097 | err_free_smrs: |
@@ -1107,68 +1102,55 @@ err_free_smrs: | |||
1107 | } | 1102 | } |
1108 | 1103 | ||
1109 | static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu, | 1104 | static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu, |
1110 | struct arm_smmu_master *master) | 1105 | struct arm_smmu_master_cfg *cfg) |
1111 | { | 1106 | { |
1112 | int i; | 1107 | int i; |
1113 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | 1108 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); |
1114 | struct arm_smmu_smr *smrs = master->smrs; | 1109 | struct arm_smmu_smr *smrs = cfg->smrs; |
1115 | 1110 | ||
1116 | /* Invalidate the SMRs before freeing back to the allocator */ | 1111 | /* Invalidate the SMRs before freeing back to the allocator */ |
1117 | for (i = 0; i < master->num_streamids; ++i) { | 1112 | for (i = 0; i < cfg->num_streamids; ++i) { |
1118 | u8 idx = smrs[i].idx; | 1113 | u8 idx = smrs[i].idx; |
1114 | |||
1119 | writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx)); | 1115 | writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx)); |
1120 | __arm_smmu_free_bitmap(smmu->smr_map, idx); | 1116 | __arm_smmu_free_bitmap(smmu->smr_map, idx); |
1121 | } | 1117 | } |
1122 | 1118 | ||
1123 | master->smrs = NULL; | 1119 | cfg->smrs = NULL; |
1124 | kfree(smrs); | 1120 | kfree(smrs); |
1125 | } | 1121 | } |
1126 | 1122 | ||
1127 | static void arm_smmu_bypass_stream_mapping(struct arm_smmu_device *smmu, | 1123 | static void arm_smmu_bypass_stream_mapping(struct arm_smmu_device *smmu, |
1128 | struct arm_smmu_master *master) | 1124 | struct arm_smmu_master_cfg *cfg) |
1129 | { | 1125 | { |
1130 | int i; | 1126 | int i; |
1131 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | 1127 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); |
1132 | 1128 | ||
1133 | for (i = 0; i < master->num_streamids; ++i) { | 1129 | for (i = 0; i < cfg->num_streamids; ++i) { |
1134 | u16 sid = master->streamids[i]; | 1130 | u16 sid = cfg->streamids[i]; |
1131 | |||
1135 | writel_relaxed(S2CR_TYPE_BYPASS, | 1132 | writel_relaxed(S2CR_TYPE_BYPASS, |
1136 | gr0_base + ARM_SMMU_GR0_S2CR(sid)); | 1133 | gr0_base + ARM_SMMU_GR0_S2CR(sid)); |
1137 | } | 1134 | } |
1138 | } | 1135 | } |
1139 | 1136 | ||
1140 | static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, | 1137 | static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, |
1141 | struct arm_smmu_master *master) | 1138 | struct arm_smmu_master_cfg *cfg) |
1142 | { | 1139 | { |
1143 | int i, ret; | 1140 | int i, ret; |
1144 | struct arm_smmu_device *parent, *smmu = smmu_domain->root_cfg.smmu; | 1141 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
1145 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | 1142 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); |
1146 | 1143 | ||
1147 | ret = arm_smmu_master_configure_smrs(smmu, master); | 1144 | ret = arm_smmu_master_configure_smrs(smmu, cfg); |
1148 | if (ret) | 1145 | if (ret) |
1149 | return ret; | 1146 | return ret; |
1150 | 1147 | ||
1151 | /* Bypass the leaves */ | 1148 | for (i = 0; i < cfg->num_streamids; ++i) { |
1152 | smmu = smmu_domain->leaf_smmu; | ||
1153 | while ((parent = find_parent_smmu(smmu))) { | ||
1154 | /* | ||
1155 | * We won't have a StreamID match for anything but the root | ||
1156 | * smmu, so we only need to worry about StreamID indexing, | ||
1157 | * where we must install bypass entries in the S2CRs. | ||
1158 | */ | ||
1159 | if (smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) | ||
1160 | continue; | ||
1161 | |||
1162 | arm_smmu_bypass_stream_mapping(smmu, master); | ||
1163 | smmu = parent; | ||
1164 | } | ||
1165 | |||
1166 | /* Now we're at the root, time to point at our context bank */ | ||
1167 | for (i = 0; i < master->num_streamids; ++i) { | ||
1168 | u32 idx, s2cr; | 1149 | u32 idx, s2cr; |
1169 | idx = master->smrs ? master->smrs[i].idx : master->streamids[i]; | 1150 | |
1151 | idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i]; | ||
1170 | s2cr = S2CR_TYPE_TRANS | | 1152 | s2cr = S2CR_TYPE_TRANS | |
1171 | (smmu_domain->root_cfg.cbndx << S2CR_CBNDX_SHIFT); | 1153 | (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT); |
1172 | writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx)); | 1154 | writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx)); |
1173 | } | 1155 | } |
1174 | 1156 | ||
@@ -1176,58 +1158,57 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, | |||
1176 | } | 1158 | } |
1177 | 1159 | ||
1178 | static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, | 1160 | static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, |
1179 | struct arm_smmu_master *master) | 1161 | struct arm_smmu_master_cfg *cfg) |
1180 | { | 1162 | { |
1181 | struct arm_smmu_device *smmu = smmu_domain->root_cfg.smmu; | 1163 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
1182 | 1164 | ||
1183 | /* | 1165 | /* |
1184 | * We *must* clear the S2CR first, because freeing the SMR means | 1166 | * We *must* clear the S2CR first, because freeing the SMR means |
1185 | * that it can be re-allocated immediately. | 1167 | * that it can be re-allocated immediately. |
1186 | */ | 1168 | */ |
1187 | arm_smmu_bypass_stream_mapping(smmu, master); | 1169 | arm_smmu_bypass_stream_mapping(smmu, cfg); |
1188 | arm_smmu_master_free_smrs(smmu, master); | 1170 | arm_smmu_master_free_smrs(smmu, cfg); |
1189 | } | 1171 | } |
1190 | 1172 | ||
1191 | static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | 1173 | static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) |
1192 | { | 1174 | { |
1193 | int ret = -EINVAL; | 1175 | int ret = -EINVAL; |
1194 | struct arm_smmu_domain *smmu_domain = domain->priv; | 1176 | struct arm_smmu_domain *smmu_domain = domain->priv; |
1195 | struct arm_smmu_device *device_smmu = dev->archdata.iommu; | 1177 | struct arm_smmu_device *smmu; |
1196 | struct arm_smmu_master *master; | 1178 | struct arm_smmu_master_cfg *cfg; |
1197 | unsigned long flags; | 1179 | unsigned long flags; |
1198 | 1180 | ||
1199 | if (!device_smmu) { | 1181 | smmu = dev_get_master_dev(dev)->archdata.iommu; |
1182 | if (!smmu) { | ||
1200 | dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); | 1183 | dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); |
1201 | return -ENXIO; | 1184 | return -ENXIO; |
1202 | } | 1185 | } |
1203 | 1186 | ||
1204 | /* | 1187 | /* |
1205 | * Sanity check the domain. We don't currently support domains | 1188 | * Sanity check the domain. We don't support domains across |
1206 | * that cross between different SMMU chains. | 1189 | * different SMMUs. |
1207 | */ | 1190 | */ |
1208 | spin_lock_irqsave(&smmu_domain->lock, flags); | 1191 | spin_lock_irqsave(&smmu_domain->lock, flags); |
1209 | if (!smmu_domain->leaf_smmu) { | 1192 | if (!smmu_domain->smmu) { |
1210 | /* Now that we have a master, we can finalise the domain */ | 1193 | /* Now that we have a master, we can finalise the domain */ |
1211 | ret = arm_smmu_init_domain_context(domain, dev); | 1194 | ret = arm_smmu_init_domain_context(domain, smmu); |
1212 | if (IS_ERR_VALUE(ret)) | 1195 | if (IS_ERR_VALUE(ret)) |
1213 | goto err_unlock; | 1196 | goto err_unlock; |
1214 | 1197 | } else if (smmu_domain->smmu != smmu) { | |
1215 | smmu_domain->leaf_smmu = device_smmu; | ||
1216 | } else if (smmu_domain->leaf_smmu != device_smmu) { | ||
1217 | dev_err(dev, | 1198 | dev_err(dev, |
1218 | "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", | 1199 | "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", |
1219 | dev_name(smmu_domain->leaf_smmu->dev), | 1200 | dev_name(smmu_domain->smmu->dev), |
1220 | dev_name(device_smmu->dev)); | 1201 | dev_name(smmu->dev)); |
1221 | goto err_unlock; | 1202 | goto err_unlock; |
1222 | } | 1203 | } |
1223 | spin_unlock_irqrestore(&smmu_domain->lock, flags); | 1204 | spin_unlock_irqrestore(&smmu_domain->lock, flags); |
1224 | 1205 | ||
1225 | /* Looks ok, so add the device to the domain */ | 1206 | /* Looks ok, so add the device to the domain */ |
1226 | master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); | 1207 | cfg = find_smmu_master_cfg(smmu_domain->smmu, dev); |
1227 | if (!master) | 1208 | if (!cfg) |
1228 | return -ENODEV; | 1209 | return -ENODEV; |
1229 | 1210 | ||
1230 | return arm_smmu_domain_add_master(smmu_domain, master); | 1211 | return arm_smmu_domain_add_master(smmu_domain, cfg); |
1231 | 1212 | ||
1232 | err_unlock: | 1213 | err_unlock: |
1233 | spin_unlock_irqrestore(&smmu_domain->lock, flags); | 1214 | spin_unlock_irqrestore(&smmu_domain->lock, flags); |
@@ -1237,11 +1218,11 @@ err_unlock: | |||
1237 | static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) | 1218 | static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) |
1238 | { | 1219 | { |
1239 | struct arm_smmu_domain *smmu_domain = domain->priv; | 1220 | struct arm_smmu_domain *smmu_domain = domain->priv; |
1240 | struct arm_smmu_master *master; | 1221 | struct arm_smmu_master_cfg *cfg; |
1241 | 1222 | ||
1242 | master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); | 1223 | cfg = find_smmu_master_cfg(smmu_domain->smmu, dev); |
1243 | if (master) | 1224 | if (cfg) |
1244 | arm_smmu_domain_remove_master(smmu_domain, master); | 1225 | arm_smmu_domain_remove_master(smmu_domain, cfg); |
1245 | } | 1226 | } |
1246 | 1227 | ||
1247 | static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, | 1228 | static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, |
@@ -1261,6 +1242,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, | |||
1261 | if (pmd_none(*pmd)) { | 1242 | if (pmd_none(*pmd)) { |
1262 | /* Allocate a new set of tables */ | 1243 | /* Allocate a new set of tables */ |
1263 | pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO); | 1244 | pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO); |
1245 | |||
1264 | if (!table) | 1246 | if (!table) |
1265 | return -ENOMEM; | 1247 | return -ENOMEM; |
1266 | 1248 | ||
@@ -1326,6 +1308,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, | |||
1326 | */ | 1308 | */ |
1327 | do { | 1309 | do { |
1328 | int i = 1; | 1310 | int i = 1; |
1311 | |||
1329 | pteval &= ~ARM_SMMU_PTE_CONT; | 1312 | pteval &= ~ARM_SMMU_PTE_CONT; |
1330 | 1313 | ||
1331 | if (arm_smmu_pte_is_contiguous_range(addr, end)) { | 1314 | if (arm_smmu_pte_is_contiguous_range(addr, end)) { |
@@ -1340,7 +1323,8 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, | |||
1340 | idx &= ~(ARM_SMMU_PTE_CONT_ENTRIES - 1); | 1323 | idx &= ~(ARM_SMMU_PTE_CONT_ENTRIES - 1); |
1341 | cont_start = pmd_page_vaddr(*pmd) + idx; | 1324 | cont_start = pmd_page_vaddr(*pmd) + idx; |
1342 | for (j = 0; j < ARM_SMMU_PTE_CONT_ENTRIES; ++j) | 1325 | for (j = 0; j < ARM_SMMU_PTE_CONT_ENTRIES; ++j) |
1343 | pte_val(*(cont_start + j)) &= ~ARM_SMMU_PTE_CONT; | 1326 | pte_val(*(cont_start + j)) &= |
1327 | ~ARM_SMMU_PTE_CONT; | ||
1344 | 1328 | ||
1345 | arm_smmu_flush_pgtable(smmu, cont_start, | 1329 | arm_smmu_flush_pgtable(smmu, cont_start, |
1346 | sizeof(*pte) * | 1330 | sizeof(*pte) * |
@@ -1429,12 +1413,12 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, | |||
1429 | int ret, stage; | 1413 | int ret, stage; |
1430 | unsigned long end; | 1414 | unsigned long end; |
1431 | phys_addr_t input_mask, output_mask; | 1415 | phys_addr_t input_mask, output_mask; |
1432 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | 1416 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
1433 | pgd_t *pgd = root_cfg->pgd; | 1417 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
1434 | struct arm_smmu_device *smmu = root_cfg->smmu; | 1418 | pgd_t *pgd = cfg->pgd; |
1435 | unsigned long flags; | 1419 | unsigned long flags; |
1436 | 1420 | ||
1437 | if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) { | 1421 | if (cfg->cbar == CBAR_TYPE_S2_TRANS) { |
1438 | stage = 2; | 1422 | stage = 2; |
1439 | output_mask = (1ULL << smmu->s2_output_size) - 1; | 1423 | output_mask = (1ULL << smmu->s2_output_size) - 1; |
1440 | } else { | 1424 | } else { |
@@ -1484,10 +1468,6 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, | |||
1484 | if (!smmu_domain) | 1468 | if (!smmu_domain) |
1485 | return -ENODEV; | 1469 | return -ENODEV; |
1486 | 1470 | ||
1487 | /* Check for silent address truncation up the SMMU chain. */ | ||
1488 | if ((phys_addr_t)iova & ~smmu_domain->output_mask) | ||
1489 | return -ERANGE; | ||
1490 | |||
1491 | return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot); | 1471 | return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot); |
1492 | } | 1472 | } |
1493 | 1473 | ||
@@ -1498,7 +1478,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, | |||
1498 | struct arm_smmu_domain *smmu_domain = domain->priv; | 1478 | struct arm_smmu_domain *smmu_domain = domain->priv; |
1499 | 1479 | ||
1500 | ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0); | 1480 | ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0); |
1501 | arm_smmu_tlb_inv_context(&smmu_domain->root_cfg); | 1481 | arm_smmu_tlb_inv_context(smmu_domain); |
1502 | return ret ? 0 : size; | 1482 | return ret ? 0 : size; |
1503 | } | 1483 | } |
1504 | 1484 | ||
@@ -1510,9 +1490,9 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, | |||
1510 | pmd_t pmd; | 1490 | pmd_t pmd; |
1511 | pte_t pte; | 1491 | pte_t pte; |
1512 | struct arm_smmu_domain *smmu_domain = domain->priv; | 1492 | struct arm_smmu_domain *smmu_domain = domain->priv; |
1513 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | 1493 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
1514 | 1494 | ||
1515 | pgdp = root_cfg->pgd; | 1495 | pgdp = cfg->pgd; |
1516 | if (!pgdp) | 1496 | if (!pgdp) |
1517 | return 0; | 1497 | return 0; |
1518 | 1498 | ||
@@ -1538,19 +1518,29 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, | |||
1538 | static int arm_smmu_domain_has_cap(struct iommu_domain *domain, | 1518 | static int arm_smmu_domain_has_cap(struct iommu_domain *domain, |
1539 | unsigned long cap) | 1519 | unsigned long cap) |
1540 | { | 1520 | { |
1541 | unsigned long caps = 0; | ||
1542 | struct arm_smmu_domain *smmu_domain = domain->priv; | 1521 | struct arm_smmu_domain *smmu_domain = domain->priv; |
1522 | struct arm_smmu_device *smmu = smmu_domain->smmu; | ||
1523 | u32 features = smmu ? smmu->features : 0; | ||
1524 | |||
1525 | switch (cap) { | ||
1526 | case IOMMU_CAP_CACHE_COHERENCY: | ||
1527 | return features & ARM_SMMU_FEAT_COHERENT_WALK; | ||
1528 | case IOMMU_CAP_INTR_REMAP: | ||
1529 | return 1; /* MSIs are just memory writes */ | ||
1530 | default: | ||
1531 | return 0; | ||
1532 | } | ||
1533 | } | ||
1543 | 1534 | ||
1544 | if (smmu_domain->root_cfg.smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) | 1535 | static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data) |
1545 | caps |= IOMMU_CAP_CACHE_COHERENCY; | 1536 | { |
1546 | 1537 | *((u16 *)data) = alias; | |
1547 | return !!(cap & caps); | 1538 | return 0; /* Continue walking */ |
1548 | } | 1539 | } |
1549 | 1540 | ||
1550 | static int arm_smmu_add_device(struct device *dev) | 1541 | static int arm_smmu_add_device(struct device *dev) |
1551 | { | 1542 | { |
1552 | struct arm_smmu_device *child, *parent, *smmu; | 1543 | struct arm_smmu_device *smmu; |
1553 | struct arm_smmu_master *master = NULL; | ||
1554 | struct iommu_group *group; | 1544 | struct iommu_group *group; |
1555 | int ret; | 1545 | int ret; |
1556 | 1546 | ||
@@ -1559,35 +1549,8 @@ static int arm_smmu_add_device(struct device *dev) | |||
1559 | return -EINVAL; | 1549 | return -EINVAL; |
1560 | } | 1550 | } |
1561 | 1551 | ||
1562 | spin_lock(&arm_smmu_devices_lock); | 1552 | smmu = find_smmu_for_device(dev); |
1563 | list_for_each_entry(parent, &arm_smmu_devices, list) { | 1553 | if (!smmu) |
1564 | smmu = parent; | ||
1565 | |||
1566 | /* Try to find a child of the current SMMU. */ | ||
1567 | list_for_each_entry(child, &arm_smmu_devices, list) { | ||
1568 | if (child->parent_of_node == parent->dev->of_node) { | ||
1569 | /* Does the child sit above our master? */ | ||
1570 | master = find_smmu_master(child, dev->of_node); | ||
1571 | if (master) { | ||
1572 | smmu = NULL; | ||
1573 | break; | ||
1574 | } | ||
1575 | } | ||
1576 | } | ||
1577 | |||
1578 | /* We found some children, so keep searching. */ | ||
1579 | if (!smmu) { | ||
1580 | master = NULL; | ||
1581 | continue; | ||
1582 | } | ||
1583 | |||
1584 | master = find_smmu_master(smmu, dev->of_node); | ||
1585 | if (master) | ||
1586 | break; | ||
1587 | } | ||
1588 | spin_unlock(&arm_smmu_devices_lock); | ||
1589 | |||
1590 | if (!master) | ||
1591 | return -ENODEV; | 1554 | return -ENODEV; |
1592 | 1555 | ||
1593 | group = iommu_group_alloc(); | 1556 | group = iommu_group_alloc(); |
@@ -1596,20 +1559,45 @@ static int arm_smmu_add_device(struct device *dev) | |||
1596 | return PTR_ERR(group); | 1559 | return PTR_ERR(group); |
1597 | } | 1560 | } |
1598 | 1561 | ||
1562 | if (dev_is_pci(dev)) { | ||
1563 | struct arm_smmu_master_cfg *cfg; | ||
1564 | struct pci_dev *pdev = to_pci_dev(dev); | ||
1565 | |||
1566 | cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); | ||
1567 | if (!cfg) { | ||
1568 | ret = -ENOMEM; | ||
1569 | goto out_put_group; | ||
1570 | } | ||
1571 | |||
1572 | cfg->num_streamids = 1; | ||
1573 | /* | ||
1574 | * Assume Stream ID == Requester ID for now. | ||
1575 | * We need a way to describe the ID mappings in FDT. | ||
1576 | */ | ||
1577 | pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, | ||
1578 | &cfg->streamids[0]); | ||
1579 | dev->archdata.iommu = cfg; | ||
1580 | } else { | ||
1581 | dev->archdata.iommu = smmu; | ||
1582 | } | ||
1583 | |||
1599 | ret = iommu_group_add_device(group, dev); | 1584 | ret = iommu_group_add_device(group, dev); |
1600 | iommu_group_put(group); | ||
1601 | dev->archdata.iommu = smmu; | ||
1602 | 1585 | ||
1586 | out_put_group: | ||
1587 | iommu_group_put(group); | ||
1603 | return ret; | 1588 | return ret; |
1604 | } | 1589 | } |
1605 | 1590 | ||
1606 | static void arm_smmu_remove_device(struct device *dev) | 1591 | static void arm_smmu_remove_device(struct device *dev) |
1607 | { | 1592 | { |
1593 | if (dev_is_pci(dev)) | ||
1594 | kfree(dev->archdata.iommu); | ||
1595 | |||
1608 | dev->archdata.iommu = NULL; | 1596 | dev->archdata.iommu = NULL; |
1609 | iommu_group_remove_device(dev); | 1597 | iommu_group_remove_device(dev); |
1610 | } | 1598 | } |
1611 | 1599 | ||
1612 | static struct iommu_ops arm_smmu_ops = { | 1600 | static const struct iommu_ops arm_smmu_ops = { |
1613 | .domain_init = arm_smmu_domain_init, | 1601 | .domain_init = arm_smmu_domain_init, |
1614 | .domain_destroy = arm_smmu_domain_destroy, | 1602 | .domain_destroy = arm_smmu_domain_destroy, |
1615 | .attach_dev = arm_smmu_attach_dev, | 1603 | .attach_dev = arm_smmu_attach_dev, |
@@ -1639,7 +1627,8 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) | |||
1639 | /* Mark all SMRn as invalid and all S2CRn as bypass */ | 1627 | /* Mark all SMRn as invalid and all S2CRn as bypass */ |
1640 | for (i = 0; i < smmu->num_mapping_groups; ++i) { | 1628 | for (i = 0; i < smmu->num_mapping_groups; ++i) { |
1641 | writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(i)); | 1629 | writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(i)); |
1642 | writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i)); | 1630 | writel_relaxed(S2CR_TYPE_BYPASS, |
1631 | gr0_base + ARM_SMMU_GR0_S2CR(i)); | ||
1643 | } | 1632 | } |
1644 | 1633 | ||
1645 | /* Make sure all context banks are disabled and clear CB_FSR */ | 1634 | /* Make sure all context banks are disabled and clear CB_FSR */ |
@@ -1779,11 +1768,13 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
1779 | smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K; | 1768 | smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K; |
1780 | 1769 | ||
1781 | /* Check for size mismatch of SMMU address space from mapped region */ | 1770 | /* Check for size mismatch of SMMU address space from mapped region */ |
1782 | size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); | 1771 | size = 1 << |
1772 | (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); | ||
1783 | size *= (smmu->pagesize << 1); | 1773 | size *= (smmu->pagesize << 1); |
1784 | if (smmu->size != size) | 1774 | if (smmu->size != size) |
1785 | dev_warn(smmu->dev, "SMMU address space size (0x%lx) differs " | 1775 | dev_warn(smmu->dev, |
1786 | "from mapped region size (0x%lx)!\n", size, smmu->size); | 1776 | "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n", |
1777 | size, smmu->size); | ||
1787 | 1778 | ||
1788 | smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & | 1779 | smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & |
1789 | ID1_NUMS2CB_MASK; | 1780 | ID1_NUMS2CB_MASK; |
@@ -1804,14 +1795,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
1804 | * allocation (PTRS_PER_PGD). | 1795 | * allocation (PTRS_PER_PGD). |
1805 | */ | 1796 | */ |
1806 | #ifdef CONFIG_64BIT | 1797 | #ifdef CONFIG_64BIT |
1807 | smmu->s1_output_size = min((unsigned long)VA_BITS, size); | 1798 | smmu->s1_output_size = min_t(unsigned long, VA_BITS, size); |
1808 | #else | 1799 | #else |
1809 | smmu->s1_output_size = min(32UL, size); | 1800 | smmu->s1_output_size = min(32UL, size); |
1810 | #endif | 1801 | #endif |
1811 | 1802 | ||
1812 | /* The stage-2 output mask is also applied for bypass */ | 1803 | /* The stage-2 output mask is also applied for bypass */ |
1813 | size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); | 1804 | size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); |
1814 | smmu->s2_output_size = min((unsigned long)PHYS_MASK_SHIFT, size); | 1805 | smmu->s2_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size); |
1815 | 1806 | ||
1816 | if (smmu->version == 1) { | 1807 | if (smmu->version == 1) { |
1817 | smmu->input_size = 32; | 1808 | smmu->input_size = 32; |
@@ -1835,7 +1826,8 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
1835 | 1826 | ||
1836 | dev_notice(smmu->dev, | 1827 | dev_notice(smmu->dev, |
1837 | "\t%lu-bit VA, %lu-bit IPA, %lu-bit PA\n", | 1828 | "\t%lu-bit VA, %lu-bit IPA, %lu-bit PA\n", |
1838 | smmu->input_size, smmu->s1_output_size, smmu->s2_output_size); | 1829 | smmu->input_size, smmu->s1_output_size, |
1830 | smmu->s2_output_size); | ||
1839 | return 0; | 1831 | return 0; |
1840 | } | 1832 | } |
1841 | 1833 | ||
@@ -1843,7 +1835,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) | |||
1843 | { | 1835 | { |
1844 | struct resource *res; | 1836 | struct resource *res; |
1845 | struct arm_smmu_device *smmu; | 1837 | struct arm_smmu_device *smmu; |
1846 | struct device_node *dev_node; | ||
1847 | struct device *dev = &pdev->dev; | 1838 | struct device *dev = &pdev->dev; |
1848 | struct rb_node *node; | 1839 | struct rb_node *node; |
1849 | struct of_phandle_args masterspec; | 1840 | struct of_phandle_args masterspec; |
@@ -1890,6 +1881,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) | |||
1890 | 1881 | ||
1891 | for (i = 0; i < num_irqs; ++i) { | 1882 | for (i = 0; i < num_irqs; ++i) { |
1892 | int irq = platform_get_irq(pdev, i); | 1883 | int irq = platform_get_irq(pdev, i); |
1884 | |||
1893 | if (irq < 0) { | 1885 | if (irq < 0) { |
1894 | dev_err(dev, "failed to get irq index %d\n", i); | 1886 | dev_err(dev, "failed to get irq index %d\n", i); |
1895 | return -ENODEV; | 1887 | return -ENODEV; |
@@ -1913,12 +1905,9 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) | |||
1913 | } | 1905 | } |
1914 | dev_notice(dev, "registered %d master devices\n", i); | 1906 | dev_notice(dev, "registered %d master devices\n", i); |
1915 | 1907 | ||
1916 | if ((dev_node = of_parse_phandle(dev->of_node, "smmu-parent", 0))) | ||
1917 | smmu->parent_of_node = dev_node; | ||
1918 | |||
1919 | err = arm_smmu_device_cfg_probe(smmu); | 1908 | err = arm_smmu_device_cfg_probe(smmu); |
1920 | if (err) | 1909 | if (err) |
1921 | goto out_put_parent; | 1910 | goto out_put_masters; |
1922 | 1911 | ||
1923 | parse_driver_options(smmu); | 1912 | parse_driver_options(smmu); |
1924 | 1913 | ||
@@ -1928,7 +1917,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) | |||
1928 | "found only %d context interrupt(s) but %d required\n", | 1917 | "found only %d context interrupt(s) but %d required\n", |
1929 | smmu->num_context_irqs, smmu->num_context_banks); | 1918 | smmu->num_context_irqs, smmu->num_context_banks); |
1930 | err = -ENODEV; | 1919 | err = -ENODEV; |
1931 | goto out_put_parent; | 1920 | goto out_put_masters; |
1932 | } | 1921 | } |
1933 | 1922 | ||
1934 | for (i = 0; i < smmu->num_global_irqs; ++i) { | 1923 | for (i = 0; i < smmu->num_global_irqs; ++i) { |
@@ -1956,14 +1945,10 @@ out_free_irqs: | |||
1956 | while (i--) | 1945 | while (i--) |
1957 | free_irq(smmu->irqs[i], smmu); | 1946 | free_irq(smmu->irqs[i], smmu); |
1958 | 1947 | ||
1959 | out_put_parent: | ||
1960 | if (smmu->parent_of_node) | ||
1961 | of_node_put(smmu->parent_of_node); | ||
1962 | |||
1963 | out_put_masters: | 1948 | out_put_masters: |
1964 | for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { | 1949 | for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { |
1965 | struct arm_smmu_master *master; | 1950 | struct arm_smmu_master *master |
1966 | master = container_of(node, struct arm_smmu_master, node); | 1951 | = container_of(node, struct arm_smmu_master, node); |
1967 | of_node_put(master->of_node); | 1952 | of_node_put(master->of_node); |
1968 | } | 1953 | } |
1969 | 1954 | ||
@@ -1990,12 +1975,9 @@ static int arm_smmu_device_remove(struct platform_device *pdev) | |||
1990 | if (!smmu) | 1975 | if (!smmu) |
1991 | return -ENODEV; | 1976 | return -ENODEV; |
1992 | 1977 | ||
1993 | if (smmu->parent_of_node) | ||
1994 | of_node_put(smmu->parent_of_node); | ||
1995 | |||
1996 | for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { | 1978 | for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { |
1997 | struct arm_smmu_master *master; | 1979 | struct arm_smmu_master *master |
1998 | master = container_of(node, struct arm_smmu_master, node); | 1980 | = container_of(node, struct arm_smmu_master, node); |
1999 | of_node_put(master->of_node); | 1981 | of_node_put(master->of_node); |
2000 | } | 1982 | } |
2001 | 1983 | ||
@@ -2006,7 +1988,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev) | |||
2006 | free_irq(smmu->irqs[i], smmu); | 1988 | free_irq(smmu->irqs[i], smmu); |
2007 | 1989 | ||
2008 | /* Turn the thing off */ | 1990 | /* Turn the thing off */ |
2009 | writel(sCR0_CLIENTPD,ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); | 1991 | writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); |
2010 | return 0; | 1992 | return 0; |
2011 | } | 1993 | } |
2012 | 1994 | ||
@@ -2048,6 +2030,11 @@ static int __init arm_smmu_init(void) | |||
2048 | bus_set_iommu(&amba_bustype, &arm_smmu_ops); | 2030 | bus_set_iommu(&amba_bustype, &arm_smmu_ops); |
2049 | #endif | 2031 | #endif |
2050 | 2032 | ||
2033 | #ifdef CONFIG_PCI | ||
2034 | if (!iommu_present(&pci_bus_type)) | ||
2035 | bus_set_iommu(&pci_bus_type, &arm_smmu_ops); | ||
2036 | #endif | ||
2037 | |||
2051 | return 0; | 2038 | return 0; |
2052 | } | 2039 | } |
2053 | 2040 | ||
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 9a4f05e5b23f..4306885f48b1 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/tboot.h> | 38 | #include <linux/tboot.h> |
39 | #include <linux/dmi.h> | 39 | #include <linux/dmi.h> |
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | #include <linux/iommu.h> | ||
41 | #include <asm/irq_remapping.h> | 42 | #include <asm/irq_remapping.h> |
42 | #include <asm/iommu_table.h> | 43 | #include <asm/iommu_table.h> |
43 | 44 | ||
@@ -980,6 +981,12 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
980 | raw_spin_lock_init(&iommu->register_lock); | 981 | raw_spin_lock_init(&iommu->register_lock); |
981 | 982 | ||
982 | drhd->iommu = iommu; | 983 | drhd->iommu = iommu; |
984 | |||
985 | if (intel_iommu_enabled) | ||
986 | iommu->iommu_dev = iommu_device_create(NULL, iommu, | ||
987 | intel_iommu_groups, | ||
988 | iommu->name); | ||
989 | |||
983 | return 0; | 990 | return 0; |
984 | 991 | ||
985 | err_unmap: | 992 | err_unmap: |
@@ -991,6 +998,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
991 | 998 | ||
992 | static void free_iommu(struct intel_iommu *iommu) | 999 | static void free_iommu(struct intel_iommu *iommu) |
993 | { | 1000 | { |
1001 | iommu_device_destroy(iommu->iommu_dev); | ||
1002 | |||
994 | if (iommu->irq) { | 1003 | if (iommu->irq) { |
995 | free_irq(iommu->irq, iommu); | 1004 | free_irq(iommu->irq, iommu); |
996 | irq_set_handler_data(iommu->irq, NULL); | 1005 | irq_set_handler_data(iommu->irq, NULL); |
@@ -1339,9 +1348,6 @@ int dmar_enable_qi(struct intel_iommu *iommu) | |||
1339 | return -ENOMEM; | 1348 | return -ENOMEM; |
1340 | } | 1349 | } |
1341 | 1350 | ||
1342 | qi->free_head = qi->free_tail = 0; | ||
1343 | qi->free_cnt = QI_LENGTH; | ||
1344 | |||
1345 | raw_spin_lock_init(&qi->q_lock); | 1351 | raw_spin_lock_init(&qi->q_lock); |
1346 | 1352 | ||
1347 | __dmar_enable_qi(iommu); | 1353 | __dmar_enable_qi(iommu); |
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 99054d2c040d..d037e87a1fe5 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c | |||
@@ -1170,7 +1170,7 @@ static void exynos_iommu_remove_device(struct device *dev) | |||
1170 | iommu_group_remove_device(dev); | 1170 | iommu_group_remove_device(dev); |
1171 | } | 1171 | } |
1172 | 1172 | ||
1173 | static struct iommu_ops exynos_iommu_ops = { | 1173 | static const struct iommu_ops exynos_iommu_ops = { |
1174 | .domain_init = exynos_iommu_domain_init, | 1174 | .domain_init = exynos_iommu_domain_init, |
1175 | .domain_destroy = exynos_iommu_domain_destroy, | 1175 | .domain_destroy = exynos_iommu_domain_destroy, |
1176 | .attach_dev = exynos_iommu_attach_device, | 1176 | .attach_dev = exynos_iommu_attach_device, |
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c index bb446d742a2d..2b6ce9387af1 100644 --- a/drivers/iommu/fsl_pamu.c +++ b/drivers/iommu/fsl_pamu.c | |||
@@ -92,7 +92,7 @@ struct gen_pool *spaace_pool; | |||
92 | * subwindow count per liodn. | 92 | * subwindow count per liodn. |
93 | * | 93 | * |
94 | */ | 94 | */ |
95 | u32 pamu_get_max_subwin_cnt() | 95 | u32 pamu_get_max_subwin_cnt(void) |
96 | { | 96 | { |
97 | return max_subwindow_count; | 97 | return max_subwindow_count; |
98 | } | 98 | } |
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index af47648301a9..61d1dafa242d 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c | |||
@@ -38,7 +38,6 @@ | |||
38 | #include <sysdev/fsl_pci.h> | 38 | #include <sysdev/fsl_pci.h> |
39 | 39 | ||
40 | #include "fsl_pamu_domain.h" | 40 | #include "fsl_pamu_domain.h" |
41 | #include "pci.h" | ||
42 | 41 | ||
43 | /* | 42 | /* |
44 | * Global spinlock that needs to be held while | 43 | * Global spinlock that needs to be held while |
@@ -887,8 +886,6 @@ static int fsl_pamu_get_domain_attr(struct iommu_domain *domain, | |||
887 | return ret; | 886 | return ret; |
888 | } | 887 | } |
889 | 888 | ||
890 | #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) | ||
891 | |||
892 | static struct iommu_group *get_device_iommu_group(struct device *dev) | 889 | static struct iommu_group *get_device_iommu_group(struct device *dev) |
893 | { | 890 | { |
894 | struct iommu_group *group; | 891 | struct iommu_group *group; |
@@ -945,74 +942,13 @@ static struct iommu_group *get_pci_device_group(struct pci_dev *pdev) | |||
945 | struct pci_controller *pci_ctl; | 942 | struct pci_controller *pci_ctl; |
946 | bool pci_endpt_partioning; | 943 | bool pci_endpt_partioning; |
947 | struct iommu_group *group = NULL; | 944 | struct iommu_group *group = NULL; |
948 | struct pci_dev *bridge, *dma_pdev = NULL; | ||
949 | 945 | ||
950 | pci_ctl = pci_bus_to_host(pdev->bus); | 946 | pci_ctl = pci_bus_to_host(pdev->bus); |
951 | pci_endpt_partioning = check_pci_ctl_endpt_part(pci_ctl); | 947 | pci_endpt_partioning = check_pci_ctl_endpt_part(pci_ctl); |
952 | /* We can partition PCIe devices so assign device group to the device */ | 948 | /* We can partition PCIe devices so assign device group to the device */ |
953 | if (pci_endpt_partioning) { | 949 | if (pci_endpt_partioning) { |
954 | bridge = pci_find_upstream_pcie_bridge(pdev); | 950 | group = iommu_group_get_for_dev(&pdev->dev); |
955 | if (bridge) { | ||
956 | if (pci_is_pcie(bridge)) | ||
957 | dma_pdev = pci_get_domain_bus_and_slot( | ||
958 | pci_domain_nr(pdev->bus), | ||
959 | bridge->subordinate->number, 0); | ||
960 | if (!dma_pdev) | ||
961 | dma_pdev = pci_dev_get(bridge); | ||
962 | } else | ||
963 | dma_pdev = pci_dev_get(pdev); | ||
964 | |||
965 | /* Account for quirked devices */ | ||
966 | swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev)); | ||
967 | |||
968 | /* | ||
969 | * If it's a multifunction device that does not support our | ||
970 | * required ACS flags, add to the same group as lowest numbered | ||
971 | * function that also does not suport the required ACS flags. | ||
972 | */ | ||
973 | if (dma_pdev->multifunction && | ||
974 | !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) { | ||
975 | u8 i, slot = PCI_SLOT(dma_pdev->devfn); | ||
976 | |||
977 | for (i = 0; i < 8; i++) { | ||
978 | struct pci_dev *tmp; | ||
979 | |||
980 | tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i)); | ||
981 | if (!tmp) | ||
982 | continue; | ||
983 | |||
984 | if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) { | ||
985 | swap_pci_ref(&dma_pdev, tmp); | ||
986 | break; | ||
987 | } | ||
988 | pci_dev_put(tmp); | ||
989 | } | ||
990 | } | ||
991 | |||
992 | /* | ||
993 | * Devices on the root bus go through the iommu. If that's not us, | ||
994 | * find the next upstream device and test ACS up to the root bus. | ||
995 | * Finding the next device may require skipping virtual buses. | ||
996 | */ | ||
997 | while (!pci_is_root_bus(dma_pdev->bus)) { | ||
998 | struct pci_bus *bus = dma_pdev->bus; | ||
999 | |||
1000 | while (!bus->self) { | ||
1001 | if (!pci_is_root_bus(bus)) | ||
1002 | bus = bus->parent; | ||
1003 | else | ||
1004 | goto root_bus; | ||
1005 | } | ||
1006 | |||
1007 | if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) | ||
1008 | break; | ||
1009 | |||
1010 | swap_pci_ref(&dma_pdev, pci_dev_get(bus->self)); | ||
1011 | } | ||
1012 | 951 | ||
1013 | root_bus: | ||
1014 | group = get_device_iommu_group(&dma_pdev->dev); | ||
1015 | pci_dev_put(dma_pdev); | ||
1016 | /* | 952 | /* |
1017 | * PCIe controller is not a paritionable entity | 953 | * PCIe controller is not a paritionable entity |
1018 | * free the controller device iommu_group. | 954 | * free the controller device iommu_group. |
@@ -1116,8 +1052,7 @@ static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count) | |||
1116 | ret = pamu_set_domain_geometry(dma_domain, &domain->geometry, | 1052 | ret = pamu_set_domain_geometry(dma_domain, &domain->geometry, |
1117 | ((w_count > 1) ? w_count : 0)); | 1053 | ((w_count > 1) ? w_count : 0)); |
1118 | if (!ret) { | 1054 | if (!ret) { |
1119 | if (dma_domain->win_arr) | 1055 | kfree(dma_domain->win_arr); |
1120 | kfree(dma_domain->win_arr); | ||
1121 | dma_domain->win_arr = kzalloc(sizeof(struct dma_window) * | 1056 | dma_domain->win_arr = kzalloc(sizeof(struct dma_window) * |
1122 | w_count, GFP_ATOMIC); | 1057 | w_count, GFP_ATOMIC); |
1123 | if (!dma_domain->win_arr) { | 1058 | if (!dma_domain->win_arr) { |
@@ -1138,7 +1073,7 @@ static u32 fsl_pamu_get_windows(struct iommu_domain *domain) | |||
1138 | return dma_domain->win_cnt; | 1073 | return dma_domain->win_cnt; |
1139 | } | 1074 | } |
1140 | 1075 | ||
1141 | static struct iommu_ops fsl_pamu_ops = { | 1076 | static const struct iommu_ops fsl_pamu_ops = { |
1142 | .domain_init = fsl_pamu_domain_init, | 1077 | .domain_init = fsl_pamu_domain_init, |
1143 | .domain_destroy = fsl_pamu_domain_destroy, | 1078 | .domain_destroy = fsl_pamu_domain_destroy, |
1144 | .attach_dev = fsl_pamu_attach_device, | 1079 | .attach_dev = fsl_pamu_attach_device, |
@@ -1155,7 +1090,7 @@ static struct iommu_ops fsl_pamu_ops = { | |||
1155 | .remove_device = fsl_pamu_remove_device, | 1090 | .remove_device = fsl_pamu_remove_device, |
1156 | }; | 1091 | }; |
1157 | 1092 | ||
1158 | int pamu_domain_init() | 1093 | int pamu_domain_init(void) |
1159 | { | 1094 | { |
1160 | int ret = 0; | 1095 | int ret = 0; |
1161 | 1096 | ||
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 51b6b77dc3e5..d1f5caad04f9 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -45,7 +45,6 @@ | |||
45 | #include <asm/iommu.h> | 45 | #include <asm/iommu.h> |
46 | 46 | ||
47 | #include "irq_remapping.h" | 47 | #include "irq_remapping.h" |
48 | #include "pci.h" | ||
49 | 48 | ||
50 | #define ROOT_SIZE VTD_PAGE_SIZE | 49 | #define ROOT_SIZE VTD_PAGE_SIZE |
51 | #define CONTEXT_SIZE VTD_PAGE_SIZE | 50 | #define CONTEXT_SIZE VTD_PAGE_SIZE |
@@ -304,7 +303,7 @@ static inline bool dma_pte_present(struct dma_pte *pte) | |||
304 | 303 | ||
305 | static inline bool dma_pte_superpage(struct dma_pte *pte) | 304 | static inline bool dma_pte_superpage(struct dma_pte *pte) |
306 | { | 305 | { |
307 | return (pte->val & (1 << 7)); | 306 | return (pte->val & DMA_PTE_LARGE_PAGE); |
308 | } | 307 | } |
309 | 308 | ||
310 | static inline int first_pte_in_page(struct dma_pte *pte) | 309 | static inline int first_pte_in_page(struct dma_pte *pte) |
@@ -321,16 +320,13 @@ static inline int first_pte_in_page(struct dma_pte *pte) | |||
321 | static struct dmar_domain *si_domain; | 320 | static struct dmar_domain *si_domain; |
322 | static int hw_pass_through = 1; | 321 | static int hw_pass_through = 1; |
323 | 322 | ||
324 | /* devices under the same p2p bridge are owned in one domain */ | ||
325 | #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) | ||
326 | |||
327 | /* domain represents a virtual machine, more than one devices | 323 | /* domain represents a virtual machine, more than one devices |
328 | * across iommus may be owned in one domain, e.g. kvm guest. | 324 | * across iommus may be owned in one domain, e.g. kvm guest. |
329 | */ | 325 | */ |
330 | #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1) | 326 | #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0) |
331 | 327 | ||
332 | /* si_domain contains mulitple devices */ | 328 | /* si_domain contains mulitple devices */ |
333 | #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2) | 329 | #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1) |
334 | 330 | ||
335 | /* define the limit of IOMMUs supported in each domain */ | 331 | /* define the limit of IOMMUs supported in each domain */ |
336 | #ifdef CONFIG_X86 | 332 | #ifdef CONFIG_X86 |
@@ -429,6 +425,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, | |||
429 | struct device *dev); | 425 | struct device *dev); |
430 | static void iommu_detach_dependent_devices(struct intel_iommu *iommu, | 426 | static void iommu_detach_dependent_devices(struct intel_iommu *iommu, |
431 | struct device *dev); | 427 | struct device *dev); |
428 | static int domain_detach_iommu(struct dmar_domain *domain, | ||
429 | struct intel_iommu *iommu); | ||
432 | 430 | ||
433 | #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON | 431 | #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON |
434 | int dmar_disabled = 0; | 432 | int dmar_disabled = 0; |
@@ -451,7 +449,7 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); | |||
451 | static DEFINE_SPINLOCK(device_domain_lock); | 449 | static DEFINE_SPINLOCK(device_domain_lock); |
452 | static LIST_HEAD(device_domain_list); | 450 | static LIST_HEAD(device_domain_list); |
453 | 451 | ||
454 | static struct iommu_ops intel_iommu_ops; | 452 | static const struct iommu_ops intel_iommu_ops; |
455 | 453 | ||
456 | static int __init intel_iommu_setup(char *str) | 454 | static int __init intel_iommu_setup(char *str) |
457 | { | 455 | { |
@@ -540,6 +538,24 @@ void free_iova_mem(struct iova *iova) | |||
540 | kmem_cache_free(iommu_iova_cache, iova); | 538 | kmem_cache_free(iommu_iova_cache, iova); |
541 | } | 539 | } |
542 | 540 | ||
541 | static inline int domain_type_is_vm(struct dmar_domain *domain) | ||
542 | { | ||
543 | return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE; | ||
544 | } | ||
545 | |||
546 | static inline int domain_type_is_vm_or_si(struct dmar_domain *domain) | ||
547 | { | ||
548 | return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE | | ||
549 | DOMAIN_FLAG_STATIC_IDENTITY); | ||
550 | } | ||
551 | |||
552 | static inline int domain_pfn_supported(struct dmar_domain *domain, | ||
553 | unsigned long pfn) | ||
554 | { | ||
555 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; | ||
556 | |||
557 | return !(addr_width < BITS_PER_LONG && pfn >> addr_width); | ||
558 | } | ||
543 | 559 | ||
544 | static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) | 560 | static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) |
545 | { | 561 | { |
@@ -580,9 +596,7 @@ static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) | |||
580 | int iommu_id; | 596 | int iommu_id; |
581 | 597 | ||
582 | /* si_domain and vm domain should not get here. */ | 598 | /* si_domain and vm domain should not get here. */ |
583 | BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE); | 599 | BUG_ON(domain_type_is_vm_or_si(domain)); |
584 | BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY); | ||
585 | |||
586 | iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus); | 600 | iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus); |
587 | if (iommu_id < 0 || iommu_id >= g_num_of_iommus) | 601 | if (iommu_id < 0 || iommu_id >= g_num_of_iommus) |
588 | return NULL; | 602 | return NULL; |
@@ -619,50 +633,56 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain) | |||
619 | rcu_read_unlock(); | 633 | rcu_read_unlock(); |
620 | } | 634 | } |
621 | 635 | ||
622 | static void domain_update_iommu_snooping(struct dmar_domain *domain) | 636 | static int domain_update_iommu_snooping(struct intel_iommu *skip) |
623 | { | 637 | { |
624 | int i; | 638 | struct dmar_drhd_unit *drhd; |
625 | 639 | struct intel_iommu *iommu; | |
626 | domain->iommu_snooping = 1; | 640 | int ret = 1; |
627 | 641 | ||
628 | for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) { | 642 | rcu_read_lock(); |
629 | if (!ecap_sc_support(g_iommus[i]->ecap)) { | 643 | for_each_active_iommu(iommu, drhd) { |
630 | domain->iommu_snooping = 0; | 644 | if (iommu != skip) { |
631 | break; | 645 | if (!ecap_sc_support(iommu->ecap)) { |
646 | ret = 0; | ||
647 | break; | ||
648 | } | ||
632 | } | 649 | } |
633 | } | 650 | } |
651 | rcu_read_unlock(); | ||
652 | |||
653 | return ret; | ||
634 | } | 654 | } |
635 | 655 | ||
636 | static void domain_update_iommu_superpage(struct dmar_domain *domain) | 656 | static int domain_update_iommu_superpage(struct intel_iommu *skip) |
637 | { | 657 | { |
638 | struct dmar_drhd_unit *drhd; | 658 | struct dmar_drhd_unit *drhd; |
639 | struct intel_iommu *iommu = NULL; | 659 | struct intel_iommu *iommu; |
640 | int mask = 0xf; | 660 | int mask = 0xf; |
641 | 661 | ||
642 | if (!intel_iommu_superpage) { | 662 | if (!intel_iommu_superpage) { |
643 | domain->iommu_superpage = 0; | 663 | return 0; |
644 | return; | ||
645 | } | 664 | } |
646 | 665 | ||
647 | /* set iommu_superpage to the smallest common denominator */ | 666 | /* set iommu_superpage to the smallest common denominator */ |
648 | rcu_read_lock(); | 667 | rcu_read_lock(); |
649 | for_each_active_iommu(iommu, drhd) { | 668 | for_each_active_iommu(iommu, drhd) { |
650 | mask &= cap_super_page_val(iommu->cap); | 669 | if (iommu != skip) { |
651 | if (!mask) { | 670 | mask &= cap_super_page_val(iommu->cap); |
652 | break; | 671 | if (!mask) |
672 | break; | ||
653 | } | 673 | } |
654 | } | 674 | } |
655 | rcu_read_unlock(); | 675 | rcu_read_unlock(); |
656 | 676 | ||
657 | domain->iommu_superpage = fls(mask); | 677 | return fls(mask); |
658 | } | 678 | } |
659 | 679 | ||
660 | /* Some capabilities may be different across iommus */ | 680 | /* Some capabilities may be different across iommus */ |
661 | static void domain_update_iommu_cap(struct dmar_domain *domain) | 681 | static void domain_update_iommu_cap(struct dmar_domain *domain) |
662 | { | 682 | { |
663 | domain_update_iommu_coherency(domain); | 683 | domain_update_iommu_coherency(domain); |
664 | domain_update_iommu_snooping(domain); | 684 | domain->iommu_snooping = domain_update_iommu_snooping(NULL); |
665 | domain_update_iommu_superpage(domain); | 685 | domain->iommu_superpage = domain_update_iommu_superpage(NULL); |
666 | } | 686 | } |
667 | 687 | ||
668 | static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) | 688 | static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) |
@@ -671,7 +691,7 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf | |||
671 | struct intel_iommu *iommu; | 691 | struct intel_iommu *iommu; |
672 | struct device *tmp; | 692 | struct device *tmp; |
673 | struct pci_dev *ptmp, *pdev = NULL; | 693 | struct pci_dev *ptmp, *pdev = NULL; |
674 | u16 segment; | 694 | u16 segment = 0; |
675 | int i; | 695 | int i; |
676 | 696 | ||
677 | if (dev_is_pci(dev)) { | 697 | if (dev_is_pci(dev)) { |
@@ -816,14 +836,13 @@ out: | |||
816 | static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, | 836 | static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, |
817 | unsigned long pfn, int *target_level) | 837 | unsigned long pfn, int *target_level) |
818 | { | 838 | { |
819 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; | ||
820 | struct dma_pte *parent, *pte = NULL; | 839 | struct dma_pte *parent, *pte = NULL; |
821 | int level = agaw_to_level(domain->agaw); | 840 | int level = agaw_to_level(domain->agaw); |
822 | int offset; | 841 | int offset; |
823 | 842 | ||
824 | BUG_ON(!domain->pgd); | 843 | BUG_ON(!domain->pgd); |
825 | 844 | ||
826 | if (addr_width < BITS_PER_LONG && pfn >> addr_width) | 845 | if (!domain_pfn_supported(domain, pfn)) |
827 | /* Address beyond IOMMU's addressing capabilities. */ | 846 | /* Address beyond IOMMU's addressing capabilities. */ |
828 | return NULL; | 847 | return NULL; |
829 | 848 | ||
@@ -849,13 +868,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, | |||
849 | 868 | ||
850 | domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); | 869 | domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); |
851 | pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; | 870 | pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; |
852 | if (cmpxchg64(&pte->val, 0ULL, pteval)) { | 871 | if (cmpxchg64(&pte->val, 0ULL, pteval)) |
853 | /* Someone else set it while we were thinking; use theirs. */ | 872 | /* Someone else set it while we were thinking; use theirs. */ |
854 | free_pgtable_page(tmp_page); | 873 | free_pgtable_page(tmp_page); |
855 | } else { | 874 | else |
856 | dma_pte_addr(pte); | ||
857 | domain_flush_cache(domain, pte, sizeof(*pte)); | 875 | domain_flush_cache(domain, pte, sizeof(*pte)); |
858 | } | ||
859 | } | 876 | } |
860 | if (level == 1) | 877 | if (level == 1) |
861 | break; | 878 | break; |
@@ -892,7 +909,7 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, | |||
892 | break; | 909 | break; |
893 | } | 910 | } |
894 | 911 | ||
895 | if (pte->val & DMA_PTE_LARGE_PAGE) { | 912 | if (dma_pte_superpage(pte)) { |
896 | *large_page = total; | 913 | *large_page = total; |
897 | return pte; | 914 | return pte; |
898 | } | 915 | } |
@@ -908,12 +925,11 @@ static void dma_pte_clear_range(struct dmar_domain *domain, | |||
908 | unsigned long start_pfn, | 925 | unsigned long start_pfn, |
909 | unsigned long last_pfn) | 926 | unsigned long last_pfn) |
910 | { | 927 | { |
911 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; | ||
912 | unsigned int large_page = 1; | 928 | unsigned int large_page = 1; |
913 | struct dma_pte *first_pte, *pte; | 929 | struct dma_pte *first_pte, *pte; |
914 | 930 | ||
915 | BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); | 931 | BUG_ON(!domain_pfn_supported(domain, start_pfn)); |
916 | BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); | 932 | BUG_ON(!domain_pfn_supported(domain, last_pfn)); |
917 | BUG_ON(start_pfn > last_pfn); | 933 | BUG_ON(start_pfn > last_pfn); |
918 | 934 | ||
919 | /* we don't need lock here; nobody else touches the iova range */ | 935 | /* we don't need lock here; nobody else touches the iova range */ |
@@ -974,12 +990,12 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, | |||
974 | unsigned long start_pfn, | 990 | unsigned long start_pfn, |
975 | unsigned long last_pfn) | 991 | unsigned long last_pfn) |
976 | { | 992 | { |
977 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; | 993 | BUG_ON(!domain_pfn_supported(domain, start_pfn)); |
978 | 994 | BUG_ON(!domain_pfn_supported(domain, last_pfn)); | |
979 | BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); | ||
980 | BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); | ||
981 | BUG_ON(start_pfn > last_pfn); | 995 | BUG_ON(start_pfn > last_pfn); |
982 | 996 | ||
997 | dma_pte_clear_range(domain, start_pfn, last_pfn); | ||
998 | |||
983 | /* We don't need lock here; nobody else touches the iova range */ | 999 | /* We don't need lock here; nobody else touches the iova range */ |
984 | dma_pte_free_level(domain, agaw_to_level(domain->agaw), | 1000 | dma_pte_free_level(domain, agaw_to_level(domain->agaw), |
985 | domain->pgd, 0, start_pfn, last_pfn); | 1001 | domain->pgd, 0, start_pfn, last_pfn); |
@@ -1077,11 +1093,10 @@ struct page *domain_unmap(struct dmar_domain *domain, | |||
1077 | unsigned long start_pfn, | 1093 | unsigned long start_pfn, |
1078 | unsigned long last_pfn) | 1094 | unsigned long last_pfn) |
1079 | { | 1095 | { |
1080 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; | ||
1081 | struct page *freelist = NULL; | 1096 | struct page *freelist = NULL; |
1082 | 1097 | ||
1083 | BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); | 1098 | BUG_ON(!domain_pfn_supported(domain, start_pfn)); |
1084 | BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); | 1099 | BUG_ON(!domain_pfn_supported(domain, last_pfn)); |
1085 | BUG_ON(start_pfn > last_pfn); | 1100 | BUG_ON(start_pfn > last_pfn); |
1086 | 1101 | ||
1087 | /* we don't need lock here; nobody else touches the iova range */ | 1102 | /* we don't need lock here; nobody else touches the iova range */ |
@@ -1275,7 +1290,8 @@ iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, | |||
1275 | 1290 | ||
1276 | spin_lock_irqsave(&device_domain_lock, flags); | 1291 | spin_lock_irqsave(&device_domain_lock, flags); |
1277 | list_for_each_entry(info, &domain->devices, link) | 1292 | list_for_each_entry(info, &domain->devices, link) |
1278 | if (info->bus == bus && info->devfn == devfn) { | 1293 | if (info->iommu == iommu && info->bus == bus && |
1294 | info->devfn == devfn) { | ||
1279 | found = 1; | 1295 | found = 1; |
1280 | break; | 1296 | break; |
1281 | } | 1297 | } |
@@ -1384,7 +1400,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) | |||
1384 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); | 1400 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
1385 | } | 1401 | } |
1386 | 1402 | ||
1387 | static int iommu_enable_translation(struct intel_iommu *iommu) | 1403 | static void iommu_enable_translation(struct intel_iommu *iommu) |
1388 | { | 1404 | { |
1389 | u32 sts; | 1405 | u32 sts; |
1390 | unsigned long flags; | 1406 | unsigned long flags; |
@@ -1398,10 +1414,9 @@ static int iommu_enable_translation(struct intel_iommu *iommu) | |||
1398 | readl, (sts & DMA_GSTS_TES), sts); | 1414 | readl, (sts & DMA_GSTS_TES), sts); |
1399 | 1415 | ||
1400 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); | 1416 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
1401 | return 0; | ||
1402 | } | 1417 | } |
1403 | 1418 | ||
1404 | static int iommu_disable_translation(struct intel_iommu *iommu) | 1419 | static void iommu_disable_translation(struct intel_iommu *iommu) |
1405 | { | 1420 | { |
1406 | u32 sts; | 1421 | u32 sts; |
1407 | unsigned long flag; | 1422 | unsigned long flag; |
@@ -1415,7 +1430,6 @@ static int iommu_disable_translation(struct intel_iommu *iommu) | |||
1415 | readl, (!(sts & DMA_GSTS_TES)), sts); | 1430 | readl, (!(sts & DMA_GSTS_TES)), sts); |
1416 | 1431 | ||
1417 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); | 1432 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
1418 | return 0; | ||
1419 | } | 1433 | } |
1420 | 1434 | ||
1421 | 1435 | ||
@@ -1462,8 +1476,7 @@ static int iommu_init_domains(struct intel_iommu *iommu) | |||
1462 | static void free_dmar_iommu(struct intel_iommu *iommu) | 1476 | static void free_dmar_iommu(struct intel_iommu *iommu) |
1463 | { | 1477 | { |
1464 | struct dmar_domain *domain; | 1478 | struct dmar_domain *domain; |
1465 | int i, count; | 1479 | int i; |
1466 | unsigned long flags; | ||
1467 | 1480 | ||
1468 | if ((iommu->domains) && (iommu->domain_ids)) { | 1481 | if ((iommu->domains) && (iommu->domain_ids)) { |
1469 | for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) { | 1482 | for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) { |
@@ -1476,11 +1489,8 @@ static void free_dmar_iommu(struct intel_iommu *iommu) | |||
1476 | 1489 | ||
1477 | domain = iommu->domains[i]; | 1490 | domain = iommu->domains[i]; |
1478 | clear_bit(i, iommu->domain_ids); | 1491 | clear_bit(i, iommu->domain_ids); |
1479 | 1492 | if (domain_detach_iommu(domain, iommu) == 0 && | |
1480 | spin_lock_irqsave(&domain->iommu_lock, flags); | 1493 | !domain_type_is_vm(domain)) |
1481 | count = --domain->iommu_count; | ||
1482 | spin_unlock_irqrestore(&domain->iommu_lock, flags); | ||
1483 | if (count == 0) | ||
1484 | domain_exit(domain); | 1494 | domain_exit(domain); |
1485 | } | 1495 | } |
1486 | } | 1496 | } |
@@ -1499,7 +1509,7 @@ static void free_dmar_iommu(struct intel_iommu *iommu) | |||
1499 | free_context_table(iommu); | 1509 | free_context_table(iommu); |
1500 | } | 1510 | } |
1501 | 1511 | ||
1502 | static struct dmar_domain *alloc_domain(bool vm) | 1512 | static struct dmar_domain *alloc_domain(int flags) |
1503 | { | 1513 | { |
1504 | /* domain id for virtual machine, it won't be set in context */ | 1514 | /* domain id for virtual machine, it won't be set in context */ |
1505 | static atomic_t vm_domid = ATOMIC_INIT(0); | 1515 | static atomic_t vm_domid = ATOMIC_INIT(0); |
@@ -1509,46 +1519,62 @@ static struct dmar_domain *alloc_domain(bool vm) | |||
1509 | if (!domain) | 1519 | if (!domain) |
1510 | return NULL; | 1520 | return NULL; |
1511 | 1521 | ||
1522 | memset(domain, 0, sizeof(*domain)); | ||
1512 | domain->nid = -1; | 1523 | domain->nid = -1; |
1513 | domain->iommu_count = 0; | 1524 | domain->flags = flags; |
1514 | memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp)); | ||
1515 | domain->flags = 0; | ||
1516 | spin_lock_init(&domain->iommu_lock); | 1525 | spin_lock_init(&domain->iommu_lock); |
1517 | INIT_LIST_HEAD(&domain->devices); | 1526 | INIT_LIST_HEAD(&domain->devices); |
1518 | if (vm) { | 1527 | if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE) |
1519 | domain->id = atomic_inc_return(&vm_domid); | 1528 | domain->id = atomic_inc_return(&vm_domid); |
1520 | domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE; | ||
1521 | } | ||
1522 | 1529 | ||
1523 | return domain; | 1530 | return domain; |
1524 | } | 1531 | } |
1525 | 1532 | ||
1526 | static int iommu_attach_domain(struct dmar_domain *domain, | 1533 | static int __iommu_attach_domain(struct dmar_domain *domain, |
1527 | struct intel_iommu *iommu) | 1534 | struct intel_iommu *iommu) |
1528 | { | 1535 | { |
1529 | int num; | 1536 | int num; |
1530 | unsigned long ndomains; | 1537 | unsigned long ndomains; |
1531 | unsigned long flags; | ||
1532 | 1538 | ||
1533 | ndomains = cap_ndoms(iommu->cap); | 1539 | ndomains = cap_ndoms(iommu->cap); |
1534 | |||
1535 | spin_lock_irqsave(&iommu->lock, flags); | ||
1536 | |||
1537 | num = find_first_zero_bit(iommu->domain_ids, ndomains); | 1540 | num = find_first_zero_bit(iommu->domain_ids, ndomains); |
1538 | if (num >= ndomains) { | 1541 | if (num < ndomains) { |
1539 | spin_unlock_irqrestore(&iommu->lock, flags); | 1542 | set_bit(num, iommu->domain_ids); |
1540 | printk(KERN_ERR "IOMMU: no free domain ids\n"); | 1543 | iommu->domains[num] = domain; |
1541 | return -ENOMEM; | 1544 | } else { |
1545 | num = -ENOSPC; | ||
1542 | } | 1546 | } |
1543 | 1547 | ||
1544 | domain->id = num; | 1548 | return num; |
1545 | domain->iommu_count++; | 1549 | } |
1546 | set_bit(num, iommu->domain_ids); | 1550 | |
1547 | set_bit(iommu->seq_id, domain->iommu_bmp); | 1551 | static int iommu_attach_domain(struct dmar_domain *domain, |
1548 | iommu->domains[num] = domain; | 1552 | struct intel_iommu *iommu) |
1553 | { | ||
1554 | int num; | ||
1555 | unsigned long flags; | ||
1556 | |||
1557 | spin_lock_irqsave(&iommu->lock, flags); | ||
1558 | num = __iommu_attach_domain(domain, iommu); | ||
1549 | spin_unlock_irqrestore(&iommu->lock, flags); | 1559 | spin_unlock_irqrestore(&iommu->lock, flags); |
1560 | if (num < 0) | ||
1561 | pr_err("IOMMU: no free domain ids\n"); | ||
1550 | 1562 | ||
1551 | return 0; | 1563 | return num; |
1564 | } | ||
1565 | |||
1566 | static int iommu_attach_vm_domain(struct dmar_domain *domain, | ||
1567 | struct intel_iommu *iommu) | ||
1568 | { | ||
1569 | int num; | ||
1570 | unsigned long ndomains; | ||
1571 | |||
1572 | ndomains = cap_ndoms(iommu->cap); | ||
1573 | for_each_set_bit(num, iommu->domain_ids, ndomains) | ||
1574 | if (iommu->domains[num] == domain) | ||
1575 | return num; | ||
1576 | |||
1577 | return __iommu_attach_domain(domain, iommu); | ||
1552 | } | 1578 | } |
1553 | 1579 | ||
1554 | static void iommu_detach_domain(struct dmar_domain *domain, | 1580 | static void iommu_detach_domain(struct dmar_domain *domain, |
@@ -1558,17 +1584,53 @@ static void iommu_detach_domain(struct dmar_domain *domain, | |||
1558 | int num, ndomains; | 1584 | int num, ndomains; |
1559 | 1585 | ||
1560 | spin_lock_irqsave(&iommu->lock, flags); | 1586 | spin_lock_irqsave(&iommu->lock, flags); |
1561 | ndomains = cap_ndoms(iommu->cap); | 1587 | if (domain_type_is_vm_or_si(domain)) { |
1562 | for_each_set_bit(num, iommu->domain_ids, ndomains) { | 1588 | ndomains = cap_ndoms(iommu->cap); |
1563 | if (iommu->domains[num] == domain) { | 1589 | for_each_set_bit(num, iommu->domain_ids, ndomains) { |
1564 | clear_bit(num, iommu->domain_ids); | 1590 | if (iommu->domains[num] == domain) { |
1565 | iommu->domains[num] = NULL; | 1591 | clear_bit(num, iommu->domain_ids); |
1566 | break; | 1592 | iommu->domains[num] = NULL; |
1593 | break; | ||
1594 | } | ||
1567 | } | 1595 | } |
1596 | } else { | ||
1597 | clear_bit(domain->id, iommu->domain_ids); | ||
1598 | iommu->domains[domain->id] = NULL; | ||
1568 | } | 1599 | } |
1569 | spin_unlock_irqrestore(&iommu->lock, flags); | 1600 | spin_unlock_irqrestore(&iommu->lock, flags); |
1570 | } | 1601 | } |
1571 | 1602 | ||
1603 | static void domain_attach_iommu(struct dmar_domain *domain, | ||
1604 | struct intel_iommu *iommu) | ||
1605 | { | ||
1606 | unsigned long flags; | ||
1607 | |||
1608 | spin_lock_irqsave(&domain->iommu_lock, flags); | ||
1609 | if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) { | ||
1610 | domain->iommu_count++; | ||
1611 | if (domain->iommu_count == 1) | ||
1612 | domain->nid = iommu->node; | ||
1613 | domain_update_iommu_cap(domain); | ||
1614 | } | ||
1615 | spin_unlock_irqrestore(&domain->iommu_lock, flags); | ||
1616 | } | ||
1617 | |||
1618 | static int domain_detach_iommu(struct dmar_domain *domain, | ||
1619 | struct intel_iommu *iommu) | ||
1620 | { | ||
1621 | unsigned long flags; | ||
1622 | int count = INT_MAX; | ||
1623 | |||
1624 | spin_lock_irqsave(&domain->iommu_lock, flags); | ||
1625 | if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) { | ||
1626 | count = --domain->iommu_count; | ||
1627 | domain_update_iommu_cap(domain); | ||
1628 | } | ||
1629 | spin_unlock_irqrestore(&domain->iommu_lock, flags); | ||
1630 | |||
1631 | return count; | ||
1632 | } | ||
1633 | |||
1572 | static struct iova_domain reserved_iova_list; | 1634 | static struct iova_domain reserved_iova_list; |
1573 | static struct lock_class_key reserved_rbtree_key; | 1635 | static struct lock_class_key reserved_rbtree_key; |
1574 | 1636 | ||
@@ -1706,9 +1768,7 @@ static void domain_exit(struct dmar_domain *domain) | |||
1706 | /* clear attached or cached domains */ | 1768 | /* clear attached or cached domains */ |
1707 | rcu_read_lock(); | 1769 | rcu_read_lock(); |
1708 | for_each_active_iommu(iommu, drhd) | 1770 | for_each_active_iommu(iommu, drhd) |
1709 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || | 1771 | iommu_detach_domain(domain, iommu); |
1710 | test_bit(iommu->seq_id, domain->iommu_bmp)) | ||
1711 | iommu_detach_domain(domain, iommu); | ||
1712 | rcu_read_unlock(); | 1772 | rcu_read_unlock(); |
1713 | 1773 | ||
1714 | dma_free_pagelist(freelist); | 1774 | dma_free_pagelist(freelist); |
@@ -1723,8 +1783,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, | |||
1723 | struct context_entry *context; | 1783 | struct context_entry *context; |
1724 | unsigned long flags; | 1784 | unsigned long flags; |
1725 | struct dma_pte *pgd; | 1785 | struct dma_pte *pgd; |
1726 | unsigned long num; | ||
1727 | unsigned long ndomains; | ||
1728 | int id; | 1786 | int id; |
1729 | int agaw; | 1787 | int agaw; |
1730 | struct device_domain_info *info = NULL; | 1788 | struct device_domain_info *info = NULL; |
@@ -1748,31 +1806,14 @@ static int domain_context_mapping_one(struct dmar_domain *domain, | |||
1748 | id = domain->id; | 1806 | id = domain->id; |
1749 | pgd = domain->pgd; | 1807 | pgd = domain->pgd; |
1750 | 1808 | ||
1751 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || | 1809 | if (domain_type_is_vm_or_si(domain)) { |
1752 | domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) { | 1810 | if (domain_type_is_vm(domain)) { |
1753 | int found = 0; | 1811 | id = iommu_attach_vm_domain(domain, iommu); |
1754 | 1812 | if (id < 0) { | |
1755 | /* find an available domain id for this device in iommu */ | ||
1756 | ndomains = cap_ndoms(iommu->cap); | ||
1757 | for_each_set_bit(num, iommu->domain_ids, ndomains) { | ||
1758 | if (iommu->domains[num] == domain) { | ||
1759 | id = num; | ||
1760 | found = 1; | ||
1761 | break; | ||
1762 | } | ||
1763 | } | ||
1764 | |||
1765 | if (found == 0) { | ||
1766 | num = find_first_zero_bit(iommu->domain_ids, ndomains); | ||
1767 | if (num >= ndomains) { | ||
1768 | spin_unlock_irqrestore(&iommu->lock, flags); | 1813 | spin_unlock_irqrestore(&iommu->lock, flags); |
1769 | printk(KERN_ERR "IOMMU: no free domain ids\n"); | 1814 | pr_err("IOMMU: no free domain ids\n"); |
1770 | return -EFAULT; | 1815 | return -EFAULT; |
1771 | } | 1816 | } |
1772 | |||
1773 | set_bit(num, iommu->domain_ids); | ||
1774 | iommu->domains[num] = domain; | ||
1775 | id = num; | ||
1776 | } | 1817 | } |
1777 | 1818 | ||
1778 | /* Skip top levels of page tables for | 1819 | /* Skip top levels of page tables for |
@@ -1824,72 +1865,68 @@ static int domain_context_mapping_one(struct dmar_domain *domain, | |||
1824 | (((u16)bus) << 8) | devfn, | 1865 | (((u16)bus) << 8) | devfn, |
1825 | DMA_CCMD_MASK_NOBIT, | 1866 | DMA_CCMD_MASK_NOBIT, |
1826 | DMA_CCMD_DEVICE_INVL); | 1867 | DMA_CCMD_DEVICE_INVL); |
1827 | iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH); | 1868 | iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH); |
1828 | } else { | 1869 | } else { |
1829 | iommu_flush_write_buffer(iommu); | 1870 | iommu_flush_write_buffer(iommu); |
1830 | } | 1871 | } |
1831 | iommu_enable_dev_iotlb(info); | 1872 | iommu_enable_dev_iotlb(info); |
1832 | spin_unlock_irqrestore(&iommu->lock, flags); | 1873 | spin_unlock_irqrestore(&iommu->lock, flags); |
1833 | 1874 | ||
1834 | spin_lock_irqsave(&domain->iommu_lock, flags); | 1875 | domain_attach_iommu(domain, iommu); |
1835 | if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) { | 1876 | |
1836 | domain->iommu_count++; | ||
1837 | if (domain->iommu_count == 1) | ||
1838 | domain->nid = iommu->node; | ||
1839 | domain_update_iommu_cap(domain); | ||
1840 | } | ||
1841 | spin_unlock_irqrestore(&domain->iommu_lock, flags); | ||
1842 | return 0; | 1877 | return 0; |
1843 | } | 1878 | } |
1844 | 1879 | ||
1880 | struct domain_context_mapping_data { | ||
1881 | struct dmar_domain *domain; | ||
1882 | struct intel_iommu *iommu; | ||
1883 | int translation; | ||
1884 | }; | ||
1885 | |||
1886 | static int domain_context_mapping_cb(struct pci_dev *pdev, | ||
1887 | u16 alias, void *opaque) | ||
1888 | { | ||
1889 | struct domain_context_mapping_data *data = opaque; | ||
1890 | |||
1891 | return domain_context_mapping_one(data->domain, data->iommu, | ||
1892 | PCI_BUS_NUM(alias), alias & 0xff, | ||
1893 | data->translation); | ||
1894 | } | ||
1895 | |||
1845 | static int | 1896 | static int |
1846 | domain_context_mapping(struct dmar_domain *domain, struct device *dev, | 1897 | domain_context_mapping(struct dmar_domain *domain, struct device *dev, |
1847 | int translation) | 1898 | int translation) |
1848 | { | 1899 | { |
1849 | int ret; | ||
1850 | struct pci_dev *pdev, *tmp, *parent; | ||
1851 | struct intel_iommu *iommu; | 1900 | struct intel_iommu *iommu; |
1852 | u8 bus, devfn; | 1901 | u8 bus, devfn; |
1902 | struct domain_context_mapping_data data; | ||
1853 | 1903 | ||
1854 | iommu = device_to_iommu(dev, &bus, &devfn); | 1904 | iommu = device_to_iommu(dev, &bus, &devfn); |
1855 | if (!iommu) | 1905 | if (!iommu) |
1856 | return -ENODEV; | 1906 | return -ENODEV; |
1857 | 1907 | ||
1858 | ret = domain_context_mapping_one(domain, iommu, bus, devfn, | 1908 | if (!dev_is_pci(dev)) |
1859 | translation); | 1909 | return domain_context_mapping_one(domain, iommu, bus, devfn, |
1860 | if (ret || !dev_is_pci(dev)) | ||
1861 | return ret; | ||
1862 | |||
1863 | /* dependent device mapping */ | ||
1864 | pdev = to_pci_dev(dev); | ||
1865 | tmp = pci_find_upstream_pcie_bridge(pdev); | ||
1866 | if (!tmp) | ||
1867 | return 0; | ||
1868 | /* Secondary interface's bus number and devfn 0 */ | ||
1869 | parent = pdev->bus->self; | ||
1870 | while (parent != tmp) { | ||
1871 | ret = domain_context_mapping_one(domain, iommu, | ||
1872 | parent->bus->number, | ||
1873 | parent->devfn, translation); | ||
1874 | if (ret) | ||
1875 | return ret; | ||
1876 | parent = parent->bus->self; | ||
1877 | } | ||
1878 | if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */ | ||
1879 | return domain_context_mapping_one(domain, iommu, | ||
1880 | tmp->subordinate->number, 0, | ||
1881 | translation); | ||
1882 | else /* this is a legacy PCI bridge */ | ||
1883 | return domain_context_mapping_one(domain, iommu, | ||
1884 | tmp->bus->number, | ||
1885 | tmp->devfn, | ||
1886 | translation); | 1910 | translation); |
1911 | |||
1912 | data.domain = domain; | ||
1913 | data.iommu = iommu; | ||
1914 | data.translation = translation; | ||
1915 | |||
1916 | return pci_for_each_dma_alias(to_pci_dev(dev), | ||
1917 | &domain_context_mapping_cb, &data); | ||
1918 | } | ||
1919 | |||
1920 | static int domain_context_mapped_cb(struct pci_dev *pdev, | ||
1921 | u16 alias, void *opaque) | ||
1922 | { | ||
1923 | struct intel_iommu *iommu = opaque; | ||
1924 | |||
1925 | return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff); | ||
1887 | } | 1926 | } |
1888 | 1927 | ||
1889 | static int domain_context_mapped(struct device *dev) | 1928 | static int domain_context_mapped(struct device *dev) |
1890 | { | 1929 | { |
1891 | int ret; | ||
1892 | struct pci_dev *pdev, *tmp, *parent; | ||
1893 | struct intel_iommu *iommu; | 1930 | struct intel_iommu *iommu; |
1894 | u8 bus, devfn; | 1931 | u8 bus, devfn; |
1895 | 1932 | ||
@@ -1897,30 +1934,11 @@ static int domain_context_mapped(struct device *dev) | |||
1897 | if (!iommu) | 1934 | if (!iommu) |
1898 | return -ENODEV; | 1935 | return -ENODEV; |
1899 | 1936 | ||
1900 | ret = device_context_mapped(iommu, bus, devfn); | 1937 | if (!dev_is_pci(dev)) |
1901 | if (!ret || !dev_is_pci(dev)) | 1938 | return device_context_mapped(iommu, bus, devfn); |
1902 | return ret; | ||
1903 | 1939 | ||
1904 | /* dependent device mapping */ | 1940 | return !pci_for_each_dma_alias(to_pci_dev(dev), |
1905 | pdev = to_pci_dev(dev); | 1941 | domain_context_mapped_cb, iommu); |
1906 | tmp = pci_find_upstream_pcie_bridge(pdev); | ||
1907 | if (!tmp) | ||
1908 | return ret; | ||
1909 | /* Secondary interface's bus number and devfn 0 */ | ||
1910 | parent = pdev->bus->self; | ||
1911 | while (parent != tmp) { | ||
1912 | ret = device_context_mapped(iommu, parent->bus->number, | ||
1913 | parent->devfn); | ||
1914 | if (!ret) | ||
1915 | return ret; | ||
1916 | parent = parent->bus->self; | ||
1917 | } | ||
1918 | if (pci_is_pcie(tmp)) | ||
1919 | return device_context_mapped(iommu, tmp->subordinate->number, | ||
1920 | 0); | ||
1921 | else | ||
1922 | return device_context_mapped(iommu, tmp->bus->number, | ||
1923 | tmp->devfn); | ||
1924 | } | 1942 | } |
1925 | 1943 | ||
1926 | /* Returns a number of VTD pages, but aligned to MM page size */ | 1944 | /* Returns a number of VTD pages, but aligned to MM page size */ |
@@ -1965,12 +1983,11 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | |||
1965 | { | 1983 | { |
1966 | struct dma_pte *first_pte = NULL, *pte = NULL; | 1984 | struct dma_pte *first_pte = NULL, *pte = NULL; |
1967 | phys_addr_t uninitialized_var(pteval); | 1985 | phys_addr_t uninitialized_var(pteval); |
1968 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; | ||
1969 | unsigned long sg_res; | 1986 | unsigned long sg_res; |
1970 | unsigned int largepage_lvl = 0; | 1987 | unsigned int largepage_lvl = 0; |
1971 | unsigned long lvl_pages = 0; | 1988 | unsigned long lvl_pages = 0; |
1972 | 1989 | ||
1973 | BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width); | 1990 | BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1)); |
1974 | 1991 | ||
1975 | if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) | 1992 | if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) |
1976 | return -EINVAL; | 1993 | return -EINVAL; |
@@ -2004,12 +2021,14 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | |||
2004 | /* It is large page*/ | 2021 | /* It is large page*/ |
2005 | if (largepage_lvl > 1) { | 2022 | if (largepage_lvl > 1) { |
2006 | pteval |= DMA_PTE_LARGE_PAGE; | 2023 | pteval |= DMA_PTE_LARGE_PAGE; |
2007 | /* Ensure that old small page tables are removed to make room | 2024 | lvl_pages = lvl_to_nr_pages(largepage_lvl); |
2008 | for superpage, if they exist. */ | 2025 | /* |
2009 | dma_pte_clear_range(domain, iov_pfn, | 2026 | * Ensure that old small page tables are |
2010 | iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1); | 2027 | * removed to make room for superpage, |
2028 | * if they exist. | ||
2029 | */ | ||
2011 | dma_pte_free_pagetable(domain, iov_pfn, | 2030 | dma_pte_free_pagetable(domain, iov_pfn, |
2012 | iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1); | 2031 | iov_pfn + lvl_pages - 1); |
2013 | } else { | 2032 | } else { |
2014 | pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE; | 2033 | pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE; |
2015 | } | 2034 | } |
@@ -2102,31 +2121,20 @@ static inline void unlink_domain_info(struct device_domain_info *info) | |||
2102 | 2121 | ||
2103 | static void domain_remove_dev_info(struct dmar_domain *domain) | 2122 | static void domain_remove_dev_info(struct dmar_domain *domain) |
2104 | { | 2123 | { |
2105 | struct device_domain_info *info; | 2124 | struct device_domain_info *info, *tmp; |
2106 | unsigned long flags, flags2; | 2125 | unsigned long flags; |
2107 | 2126 | ||
2108 | spin_lock_irqsave(&device_domain_lock, flags); | 2127 | spin_lock_irqsave(&device_domain_lock, flags); |
2109 | while (!list_empty(&domain->devices)) { | 2128 | list_for_each_entry_safe(info, tmp, &domain->devices, link) { |
2110 | info = list_entry(domain->devices.next, | ||
2111 | struct device_domain_info, link); | ||
2112 | unlink_domain_info(info); | 2129 | unlink_domain_info(info); |
2113 | spin_unlock_irqrestore(&device_domain_lock, flags); | 2130 | spin_unlock_irqrestore(&device_domain_lock, flags); |
2114 | 2131 | ||
2115 | iommu_disable_dev_iotlb(info); | 2132 | iommu_disable_dev_iotlb(info); |
2116 | iommu_detach_dev(info->iommu, info->bus, info->devfn); | 2133 | iommu_detach_dev(info->iommu, info->bus, info->devfn); |
2117 | 2134 | ||
2118 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) { | 2135 | if (domain_type_is_vm(domain)) { |
2119 | iommu_detach_dependent_devices(info->iommu, info->dev); | 2136 | iommu_detach_dependent_devices(info->iommu, info->dev); |
2120 | /* clear this iommu in iommu_bmp, update iommu count | 2137 | domain_detach_iommu(domain, info->iommu); |
2121 | * and capabilities | ||
2122 | */ | ||
2123 | spin_lock_irqsave(&domain->iommu_lock, flags2); | ||
2124 | if (test_and_clear_bit(info->iommu->seq_id, | ||
2125 | domain->iommu_bmp)) { | ||
2126 | domain->iommu_count--; | ||
2127 | domain_update_iommu_cap(domain); | ||
2128 | } | ||
2129 | spin_unlock_irqrestore(&domain->iommu_lock, flags2); | ||
2130 | } | 2138 | } |
2131 | 2139 | ||
2132 | free_devinfo_mem(info); | 2140 | free_devinfo_mem(info); |
@@ -2181,8 +2189,6 @@ static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu, | |||
2181 | info->dev = dev; | 2189 | info->dev = dev; |
2182 | info->domain = domain; | 2190 | info->domain = domain; |
2183 | info->iommu = iommu; | 2191 | info->iommu = iommu; |
2184 | if (!dev) | ||
2185 | domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES; | ||
2186 | 2192 | ||
2187 | spin_lock_irqsave(&device_domain_lock, flags); | 2193 | spin_lock_irqsave(&device_domain_lock, flags); |
2188 | if (dev) | 2194 | if (dev) |
@@ -2209,79 +2215,86 @@ static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu, | |||
2209 | return domain; | 2215 | return domain; |
2210 | } | 2216 | } |
2211 | 2217 | ||
2218 | static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque) | ||
2219 | { | ||
2220 | *(u16 *)opaque = alias; | ||
2221 | return 0; | ||
2222 | } | ||
2223 | |||
2212 | /* domain is initialized */ | 2224 | /* domain is initialized */ |
2213 | static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw) | 2225 | static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw) |
2214 | { | 2226 | { |
2215 | struct dmar_domain *domain, *free = NULL; | 2227 | struct dmar_domain *domain, *tmp; |
2216 | struct intel_iommu *iommu = NULL; | 2228 | struct intel_iommu *iommu; |
2217 | struct device_domain_info *info; | 2229 | struct device_domain_info *info; |
2218 | struct pci_dev *dev_tmp = NULL; | 2230 | u16 dma_alias; |
2219 | unsigned long flags; | 2231 | unsigned long flags; |
2220 | u8 bus, devfn, bridge_bus, bridge_devfn; | 2232 | u8 bus, devfn; |
2221 | 2233 | ||
2222 | domain = find_domain(dev); | 2234 | domain = find_domain(dev); |
2223 | if (domain) | 2235 | if (domain) |
2224 | return domain; | 2236 | return domain; |
2225 | 2237 | ||
2238 | iommu = device_to_iommu(dev, &bus, &devfn); | ||
2239 | if (!iommu) | ||
2240 | return NULL; | ||
2241 | |||
2226 | if (dev_is_pci(dev)) { | 2242 | if (dev_is_pci(dev)) { |
2227 | struct pci_dev *pdev = to_pci_dev(dev); | 2243 | struct pci_dev *pdev = to_pci_dev(dev); |
2228 | u16 segment; | ||
2229 | 2244 | ||
2230 | segment = pci_domain_nr(pdev->bus); | 2245 | pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias); |
2231 | dev_tmp = pci_find_upstream_pcie_bridge(pdev); | 2246 | |
2232 | if (dev_tmp) { | 2247 | spin_lock_irqsave(&device_domain_lock, flags); |
2233 | if (pci_is_pcie(dev_tmp)) { | 2248 | info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus), |
2234 | bridge_bus = dev_tmp->subordinate->number; | 2249 | PCI_BUS_NUM(dma_alias), |
2235 | bridge_devfn = 0; | 2250 | dma_alias & 0xff); |
2236 | } else { | 2251 | if (info) { |
2237 | bridge_bus = dev_tmp->bus->number; | 2252 | iommu = info->iommu; |
2238 | bridge_devfn = dev_tmp->devfn; | 2253 | domain = info->domain; |
2239 | } | ||
2240 | spin_lock_irqsave(&device_domain_lock, flags); | ||
2241 | info = dmar_search_domain_by_dev_info(segment, | ||
2242 | bridge_bus, | ||
2243 | bridge_devfn); | ||
2244 | if (info) { | ||
2245 | iommu = info->iommu; | ||
2246 | domain = info->domain; | ||
2247 | } | ||
2248 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
2249 | /* pcie-pci bridge already has a domain, uses it */ | ||
2250 | if (info) | ||
2251 | goto found_domain; | ||
2252 | } | 2254 | } |
2253 | } | 2255 | spin_unlock_irqrestore(&device_domain_lock, flags); |
2254 | 2256 | ||
2255 | iommu = device_to_iommu(dev, &bus, &devfn); | 2257 | /* DMA alias already has a domain, uses it */ |
2256 | if (!iommu) | 2258 | if (info) |
2257 | goto error; | 2259 | goto found_domain; |
2260 | } | ||
2258 | 2261 | ||
2259 | /* Allocate and initialize new domain for the device */ | 2262 | /* Allocate and initialize new domain for the device */ |
2260 | domain = alloc_domain(false); | 2263 | domain = alloc_domain(0); |
2261 | if (!domain) | 2264 | if (!domain) |
2262 | goto error; | 2265 | return NULL; |
2263 | if (iommu_attach_domain(domain, iommu)) { | 2266 | domain->id = iommu_attach_domain(domain, iommu); |
2267 | if (domain->id < 0) { | ||
2264 | free_domain_mem(domain); | 2268 | free_domain_mem(domain); |
2265 | domain = NULL; | 2269 | return NULL; |
2266 | goto error; | ||
2267 | } | 2270 | } |
2268 | free = domain; | 2271 | domain_attach_iommu(domain, iommu); |
2269 | if (domain_init(domain, gaw)) | 2272 | if (domain_init(domain, gaw)) { |
2270 | goto error; | 2273 | domain_exit(domain); |
2274 | return NULL; | ||
2275 | } | ||
2276 | |||
2277 | /* register PCI DMA alias device */ | ||
2278 | if (dev_is_pci(dev)) { | ||
2279 | tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias), | ||
2280 | dma_alias & 0xff, NULL, domain); | ||
2281 | |||
2282 | if (!tmp || tmp != domain) { | ||
2283 | domain_exit(domain); | ||
2284 | domain = tmp; | ||
2285 | } | ||
2271 | 2286 | ||
2272 | /* register pcie-to-pci device */ | ||
2273 | if (dev_tmp) { | ||
2274 | domain = dmar_insert_dev_info(iommu, bridge_bus, bridge_devfn, | ||
2275 | NULL, domain); | ||
2276 | if (!domain) | 2287 | if (!domain) |
2277 | goto error; | 2288 | return NULL; |
2278 | } | 2289 | } |
2279 | 2290 | ||
2280 | found_domain: | 2291 | found_domain: |
2281 | domain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain); | 2292 | tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain); |
2282 | error: | 2293 | |
2283 | if (free != domain) | 2294 | if (!tmp || tmp != domain) { |
2284 | domain_exit(free); | 2295 | domain_exit(domain); |
2296 | domain = tmp; | ||
2297 | } | ||
2285 | 2298 | ||
2286 | return domain; | 2299 | return domain; |
2287 | } | 2300 | } |
@@ -2405,6 +2418,7 @@ static inline void iommu_prepare_isa(void) | |||
2405 | printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; " | 2418 | printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; " |
2406 | "floppy might not work\n"); | 2419 | "floppy might not work\n"); |
2407 | 2420 | ||
2421 | pci_dev_put(pdev); | ||
2408 | } | 2422 | } |
2409 | #else | 2423 | #else |
2410 | static inline void iommu_prepare_isa(void) | 2424 | static inline void iommu_prepare_isa(void) |
@@ -2420,19 +2434,25 @@ static int __init si_domain_init(int hw) | |||
2420 | struct dmar_drhd_unit *drhd; | 2434 | struct dmar_drhd_unit *drhd; |
2421 | struct intel_iommu *iommu; | 2435 | struct intel_iommu *iommu; |
2422 | int nid, ret = 0; | 2436 | int nid, ret = 0; |
2437 | bool first = true; | ||
2423 | 2438 | ||
2424 | si_domain = alloc_domain(false); | 2439 | si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY); |
2425 | if (!si_domain) | 2440 | if (!si_domain) |
2426 | return -EFAULT; | 2441 | return -EFAULT; |
2427 | 2442 | ||
2428 | si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; | ||
2429 | |||
2430 | for_each_active_iommu(iommu, drhd) { | 2443 | for_each_active_iommu(iommu, drhd) { |
2431 | ret = iommu_attach_domain(si_domain, iommu); | 2444 | ret = iommu_attach_domain(si_domain, iommu); |
2432 | if (ret) { | 2445 | if (ret < 0) { |
2446 | domain_exit(si_domain); | ||
2447 | return -EFAULT; | ||
2448 | } else if (first) { | ||
2449 | si_domain->id = ret; | ||
2450 | first = false; | ||
2451 | } else if (si_domain->id != ret) { | ||
2433 | domain_exit(si_domain); | 2452 | domain_exit(si_domain); |
2434 | return -EFAULT; | 2453 | return -EFAULT; |
2435 | } | 2454 | } |
2455 | domain_attach_iommu(si_domain, iommu); | ||
2436 | } | 2456 | } |
2437 | 2457 | ||
2438 | if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { | 2458 | if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { |
@@ -2523,22 +2543,46 @@ static bool device_has_rmrr(struct device *dev) | |||
2523 | return false; | 2543 | return false; |
2524 | } | 2544 | } |
2525 | 2545 | ||
2546 | /* | ||
2547 | * There are a couple cases where we need to restrict the functionality of | ||
2548 | * devices associated with RMRRs. The first is when evaluating a device for | ||
2549 | * identity mapping because problems exist when devices are moved in and out | ||
2550 | * of domains and their respective RMRR information is lost. This means that | ||
2551 | * a device with associated RMRRs will never be in a "passthrough" domain. | ||
2552 | * The second is use of the device through the IOMMU API. This interface | ||
2553 | * expects to have full control of the IOVA space for the device. We cannot | ||
2554 | * satisfy both the requirement that RMRR access is maintained and have an | ||
2555 | * unencumbered IOVA space. We also have no ability to quiesce the device's | ||
2556 | * use of the RMRR space or even inform the IOMMU API user of the restriction. | ||
2557 | * We therefore prevent devices associated with an RMRR from participating in | ||
2558 | * the IOMMU API, which eliminates them from device assignment. | ||
2559 | * | ||
2560 | * In both cases we assume that PCI USB devices with RMRRs have them largely | ||
2561 | * for historical reasons and that the RMRR space is not actively used post | ||
2562 | * boot. This exclusion may change if vendors begin to abuse it. | ||
2563 | */ | ||
2564 | static bool device_is_rmrr_locked(struct device *dev) | ||
2565 | { | ||
2566 | if (!device_has_rmrr(dev)) | ||
2567 | return false; | ||
2568 | |||
2569 | if (dev_is_pci(dev)) { | ||
2570 | struct pci_dev *pdev = to_pci_dev(dev); | ||
2571 | |||
2572 | if ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB) | ||
2573 | return false; | ||
2574 | } | ||
2575 | |||
2576 | return true; | ||
2577 | } | ||
2578 | |||
2526 | static int iommu_should_identity_map(struct device *dev, int startup) | 2579 | static int iommu_should_identity_map(struct device *dev, int startup) |
2527 | { | 2580 | { |
2528 | 2581 | ||
2529 | if (dev_is_pci(dev)) { | 2582 | if (dev_is_pci(dev)) { |
2530 | struct pci_dev *pdev = to_pci_dev(dev); | 2583 | struct pci_dev *pdev = to_pci_dev(dev); |
2531 | 2584 | ||
2532 | /* | 2585 | if (device_is_rmrr_locked(dev)) |
2533 | * We want to prevent any device associated with an RMRR from | ||
2534 | * getting placed into the SI Domain. This is done because | ||
2535 | * problems exist when devices are moved in and out of domains | ||
2536 | * and their respective RMRR info is lost. We exempt USB devices | ||
2537 | * from this process due to their usage of RMRRs that are known | ||
2538 | * to not be needed after BIOS hand-off to OS. | ||
2539 | */ | ||
2540 | if (device_has_rmrr(dev) && | ||
2541 | (pdev->class >> 8) != PCI_CLASS_SERIAL_USB) | ||
2542 | return 0; | 2586 | return 0; |
2543 | 2587 | ||
2544 | if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) | 2588 | if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) |
@@ -2850,11 +2894,7 @@ static int __init init_dmars(void) | |||
2850 | 2894 | ||
2851 | iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); | 2895 | iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); |
2852 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); | 2896 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); |
2853 | 2897 | iommu_enable_translation(iommu); | |
2854 | ret = iommu_enable_translation(iommu); | ||
2855 | if (ret) | ||
2856 | goto free_iommu; | ||
2857 | |||
2858 | iommu_disable_protect_mem_regions(iommu); | 2898 | iommu_disable_protect_mem_regions(iommu); |
2859 | } | 2899 | } |
2860 | 2900 | ||
@@ -3091,10 +3131,10 @@ static void flush_unmaps(void) | |||
3091 | /* On real hardware multiple invalidations are expensive */ | 3131 | /* On real hardware multiple invalidations are expensive */ |
3092 | if (cap_caching_mode(iommu->cap)) | 3132 | if (cap_caching_mode(iommu->cap)) |
3093 | iommu_flush_iotlb_psi(iommu, domain->id, | 3133 | iommu_flush_iotlb_psi(iommu, domain->id, |
3094 | iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, | 3134 | iova->pfn_lo, iova_size(iova), |
3095 | !deferred_flush[i].freelist[j], 0); | 3135 | !deferred_flush[i].freelist[j], 0); |
3096 | else { | 3136 | else { |
3097 | mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1)); | 3137 | mask = ilog2(mm_to_dma_pfn(iova_size(iova))); |
3098 | iommu_flush_dev_iotlb(deferred_flush[i].domain[j], | 3138 | iommu_flush_dev_iotlb(deferred_flush[i].domain[j], |
3099 | (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask); | 3139 | (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask); |
3100 | } | 3140 | } |
@@ -3144,9 +3184,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *f | |||
3144 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); | 3184 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); |
3145 | } | 3185 | } |
3146 | 3186 | ||
3147 | static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, | 3187 | static void intel_unmap(struct device *dev, dma_addr_t dev_addr) |
3148 | size_t size, enum dma_data_direction dir, | ||
3149 | struct dma_attrs *attrs) | ||
3150 | { | 3188 | { |
3151 | struct dmar_domain *domain; | 3189 | struct dmar_domain *domain; |
3152 | unsigned long start_pfn, last_pfn; | 3190 | unsigned long start_pfn, last_pfn; |
@@ -3190,6 +3228,13 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, | |||
3190 | } | 3228 | } |
3191 | } | 3229 | } |
3192 | 3230 | ||
3231 | static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, | ||
3232 | size_t size, enum dma_data_direction dir, | ||
3233 | struct dma_attrs *attrs) | ||
3234 | { | ||
3235 | intel_unmap(dev, dev_addr); | ||
3236 | } | ||
3237 | |||
3193 | static void *intel_alloc_coherent(struct device *dev, size_t size, | 3238 | static void *intel_alloc_coherent(struct device *dev, size_t size, |
3194 | dma_addr_t *dma_handle, gfp_t flags, | 3239 | dma_addr_t *dma_handle, gfp_t flags, |
3195 | struct dma_attrs *attrs) | 3240 | struct dma_attrs *attrs) |
@@ -3246,7 +3291,7 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
3246 | size = PAGE_ALIGN(size); | 3291 | size = PAGE_ALIGN(size); |
3247 | order = get_order(size); | 3292 | order = get_order(size); |
3248 | 3293 | ||
3249 | intel_unmap_page(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); | 3294 | intel_unmap(dev, dma_handle); |
3250 | if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) | 3295 | if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) |
3251 | __free_pages(page, order); | 3296 | __free_pages(page, order); |
3252 | } | 3297 | } |
@@ -3255,43 +3300,7 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
3255 | int nelems, enum dma_data_direction dir, | 3300 | int nelems, enum dma_data_direction dir, |
3256 | struct dma_attrs *attrs) | 3301 | struct dma_attrs *attrs) |
3257 | { | 3302 | { |
3258 | struct dmar_domain *domain; | 3303 | intel_unmap(dev, sglist[0].dma_address); |
3259 | unsigned long start_pfn, last_pfn; | ||
3260 | struct iova *iova; | ||
3261 | struct intel_iommu *iommu; | ||
3262 | struct page *freelist; | ||
3263 | |||
3264 | if (iommu_no_mapping(dev)) | ||
3265 | return; | ||
3266 | |||
3267 | domain = find_domain(dev); | ||
3268 | BUG_ON(!domain); | ||
3269 | |||
3270 | iommu = domain_get_iommu(domain); | ||
3271 | |||
3272 | iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address)); | ||
3273 | if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n", | ||
3274 | (unsigned long long)sglist[0].dma_address)) | ||
3275 | return; | ||
3276 | |||
3277 | start_pfn = mm_to_dma_pfn(iova->pfn_lo); | ||
3278 | last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; | ||
3279 | |||
3280 | freelist = domain_unmap(domain, start_pfn, last_pfn); | ||
3281 | |||
3282 | if (intel_iommu_strict) { | ||
3283 | iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, | ||
3284 | last_pfn - start_pfn + 1, !freelist, 0); | ||
3285 | /* free iova */ | ||
3286 | __free_iova(&domain->iovad, iova); | ||
3287 | dma_free_pagelist(freelist); | ||
3288 | } else { | ||
3289 | add_unmap(domain, iova, freelist); | ||
3290 | /* | ||
3291 | * queue up the release of the unmap to save the 1/6th of the | ||
3292 | * cpu used up by the iotlb flush operation... | ||
3293 | */ | ||
3294 | } | ||
3295 | } | 3304 | } |
3296 | 3305 | ||
3297 | static int intel_nontranslate_map_sg(struct device *hddev, | 3306 | static int intel_nontranslate_map_sg(struct device *hddev, |
@@ -3355,13 +3364,8 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele | |||
3355 | 3364 | ||
3356 | ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); | 3365 | ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); |
3357 | if (unlikely(ret)) { | 3366 | if (unlikely(ret)) { |
3358 | /* clear the page */ | ||
3359 | dma_pte_clear_range(domain, start_vpfn, | ||
3360 | start_vpfn + size - 1); | ||
3361 | /* free page tables */ | ||
3362 | dma_pte_free_pagetable(domain, start_vpfn, | 3367 | dma_pte_free_pagetable(domain, start_vpfn, |
3363 | start_vpfn + size - 1); | 3368 | start_vpfn + size - 1); |
3364 | /* free iova */ | ||
3365 | __free_iova(&domain->iovad, iova); | 3369 | __free_iova(&domain->iovad, iova); |
3366 | return 0; | 3370 | return 0; |
3367 | } | 3371 | } |
@@ -3568,10 +3572,8 @@ static int init_iommu_hw(void) | |||
3568 | 3572 | ||
3569 | iommu->flush.flush_context(iommu, 0, 0, 0, | 3573 | iommu->flush.flush_context(iommu, 0, 0, 0, |
3570 | DMA_CCMD_GLOBAL_INVL); | 3574 | DMA_CCMD_GLOBAL_INVL); |
3571 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, | 3575 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); |
3572 | DMA_TLB_GLOBAL_FLUSH); | 3576 | iommu_enable_translation(iommu); |
3573 | if (iommu_enable_translation(iommu)) | ||
3574 | return 1; | ||
3575 | iommu_disable_protect_mem_regions(iommu); | 3577 | iommu_disable_protect_mem_regions(iommu); |
3576 | } | 3578 | } |
3577 | 3579 | ||
@@ -3873,9 +3875,7 @@ static int device_notifier(struct notifier_block *nb, | |||
3873 | 3875 | ||
3874 | down_read(&dmar_global_lock); | 3876 | down_read(&dmar_global_lock); |
3875 | domain_remove_one_dev_info(domain, dev); | 3877 | domain_remove_one_dev_info(domain, dev); |
3876 | if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) && | 3878 | if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices)) |
3877 | !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) && | ||
3878 | list_empty(&domain->devices)) | ||
3879 | domain_exit(domain); | 3879 | domain_exit(domain); |
3880 | up_read(&dmar_global_lock); | 3880 | up_read(&dmar_global_lock); |
3881 | 3881 | ||
@@ -3935,8 +3935,7 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb, | |||
3935 | rcu_read_lock(); | 3935 | rcu_read_lock(); |
3936 | for_each_active_iommu(iommu, drhd) | 3936 | for_each_active_iommu(iommu, drhd) |
3937 | iommu_flush_iotlb_psi(iommu, si_domain->id, | 3937 | iommu_flush_iotlb_psi(iommu, si_domain->id, |
3938 | iova->pfn_lo, | 3938 | iova->pfn_lo, iova_size(iova), |
3939 | iova->pfn_hi - iova->pfn_lo + 1, | ||
3940 | !freelist, 0); | 3939 | !freelist, 0); |
3941 | rcu_read_unlock(); | 3940 | rcu_read_unlock(); |
3942 | dma_free_pagelist(freelist); | 3941 | dma_free_pagelist(freelist); |
@@ -3955,6 +3954,63 @@ static struct notifier_block intel_iommu_memory_nb = { | |||
3955 | .priority = 0 | 3954 | .priority = 0 |
3956 | }; | 3955 | }; |
3957 | 3956 | ||
3957 | |||
3958 | static ssize_t intel_iommu_show_version(struct device *dev, | ||
3959 | struct device_attribute *attr, | ||
3960 | char *buf) | ||
3961 | { | ||
3962 | struct intel_iommu *iommu = dev_get_drvdata(dev); | ||
3963 | u32 ver = readl(iommu->reg + DMAR_VER_REG); | ||
3964 | return sprintf(buf, "%d:%d\n", | ||
3965 | DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver)); | ||
3966 | } | ||
3967 | static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL); | ||
3968 | |||
3969 | static ssize_t intel_iommu_show_address(struct device *dev, | ||
3970 | struct device_attribute *attr, | ||
3971 | char *buf) | ||
3972 | { | ||
3973 | struct intel_iommu *iommu = dev_get_drvdata(dev); | ||
3974 | return sprintf(buf, "%llx\n", iommu->reg_phys); | ||
3975 | } | ||
3976 | static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL); | ||
3977 | |||
3978 | static ssize_t intel_iommu_show_cap(struct device *dev, | ||
3979 | struct device_attribute *attr, | ||
3980 | char *buf) | ||
3981 | { | ||
3982 | struct intel_iommu *iommu = dev_get_drvdata(dev); | ||
3983 | return sprintf(buf, "%llx\n", iommu->cap); | ||
3984 | } | ||
3985 | static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL); | ||
3986 | |||
3987 | static ssize_t intel_iommu_show_ecap(struct device *dev, | ||
3988 | struct device_attribute *attr, | ||
3989 | char *buf) | ||
3990 | { | ||
3991 | struct intel_iommu *iommu = dev_get_drvdata(dev); | ||
3992 | return sprintf(buf, "%llx\n", iommu->ecap); | ||
3993 | } | ||
3994 | static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL); | ||
3995 | |||
3996 | static struct attribute *intel_iommu_attrs[] = { | ||
3997 | &dev_attr_version.attr, | ||
3998 | &dev_attr_address.attr, | ||
3999 | &dev_attr_cap.attr, | ||
4000 | &dev_attr_ecap.attr, | ||
4001 | NULL, | ||
4002 | }; | ||
4003 | |||
4004 | static struct attribute_group intel_iommu_group = { | ||
4005 | .name = "intel-iommu", | ||
4006 | .attrs = intel_iommu_attrs, | ||
4007 | }; | ||
4008 | |||
4009 | const struct attribute_group *intel_iommu_groups[] = { | ||
4010 | &intel_iommu_group, | ||
4011 | NULL, | ||
4012 | }; | ||
4013 | |||
3958 | int __init intel_iommu_init(void) | 4014 | int __init intel_iommu_init(void) |
3959 | { | 4015 | { |
3960 | int ret = -ENODEV; | 4016 | int ret = -ENODEV; |
@@ -4026,6 +4082,11 @@ int __init intel_iommu_init(void) | |||
4026 | 4082 | ||
4027 | init_iommu_pm_ops(); | 4083 | init_iommu_pm_ops(); |
4028 | 4084 | ||
4085 | for_each_active_iommu(iommu, drhd) | ||
4086 | iommu->iommu_dev = iommu_device_create(NULL, iommu, | ||
4087 | intel_iommu_groups, | ||
4088 | iommu->name); | ||
4089 | |||
4029 | bus_set_iommu(&pci_bus_type, &intel_iommu_ops); | 4090 | bus_set_iommu(&pci_bus_type, &intel_iommu_ops); |
4030 | bus_register_notifier(&pci_bus_type, &device_nb); | 4091 | bus_register_notifier(&pci_bus_type, &device_nb); |
4031 | if (si_domain && !hw_pass_through) | 4092 | if (si_domain && !hw_pass_through) |
@@ -4044,33 +4105,27 @@ out_free_dmar: | |||
4044 | return ret; | 4105 | return ret; |
4045 | } | 4106 | } |
4046 | 4107 | ||
4108 | static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque) | ||
4109 | { | ||
4110 | struct intel_iommu *iommu = opaque; | ||
4111 | |||
4112 | iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff); | ||
4113 | return 0; | ||
4114 | } | ||
4115 | |||
4116 | /* | ||
4117 | * NB - intel-iommu lacks any sort of reference counting for the users of | ||
4118 | * dependent devices. If multiple endpoints have intersecting dependent | ||
4119 | * devices, unbinding the driver from any one of them will possibly leave | ||
4120 | * the others unable to operate. | ||
4121 | */ | ||
4047 | static void iommu_detach_dependent_devices(struct intel_iommu *iommu, | 4122 | static void iommu_detach_dependent_devices(struct intel_iommu *iommu, |
4048 | struct device *dev) | 4123 | struct device *dev) |
4049 | { | 4124 | { |
4050 | struct pci_dev *tmp, *parent, *pdev; | ||
4051 | |||
4052 | if (!iommu || !dev || !dev_is_pci(dev)) | 4125 | if (!iommu || !dev || !dev_is_pci(dev)) |
4053 | return; | 4126 | return; |
4054 | 4127 | ||
4055 | pdev = to_pci_dev(dev); | 4128 | pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu); |
4056 | |||
4057 | /* dependent device detach */ | ||
4058 | tmp = pci_find_upstream_pcie_bridge(pdev); | ||
4059 | /* Secondary interface's bus number and devfn 0 */ | ||
4060 | if (tmp) { | ||
4061 | parent = pdev->bus->self; | ||
4062 | while (parent != tmp) { | ||
4063 | iommu_detach_dev(iommu, parent->bus->number, | ||
4064 | parent->devfn); | ||
4065 | parent = parent->bus->self; | ||
4066 | } | ||
4067 | if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */ | ||
4068 | iommu_detach_dev(iommu, | ||
4069 | tmp->subordinate->number, 0); | ||
4070 | else /* this is a legacy PCI bridge */ | ||
4071 | iommu_detach_dev(iommu, tmp->bus->number, | ||
4072 | tmp->devfn); | ||
4073 | } | ||
4074 | } | 4129 | } |
4075 | 4130 | ||
4076 | static void domain_remove_one_dev_info(struct dmar_domain *domain, | 4131 | static void domain_remove_one_dev_info(struct dmar_domain *domain, |
@@ -4117,20 +4172,9 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, | |||
4117 | spin_unlock_irqrestore(&device_domain_lock, flags); | 4172 | spin_unlock_irqrestore(&device_domain_lock, flags); |
4118 | 4173 | ||
4119 | if (found == 0) { | 4174 | if (found == 0) { |
4120 | unsigned long tmp_flags; | 4175 | domain_detach_iommu(domain, iommu); |
4121 | spin_lock_irqsave(&domain->iommu_lock, tmp_flags); | 4176 | if (!domain_type_is_vm_or_si(domain)) |
4122 | clear_bit(iommu->seq_id, domain->iommu_bmp); | 4177 | iommu_detach_domain(domain, iommu); |
4123 | domain->iommu_count--; | ||
4124 | domain_update_iommu_cap(domain); | ||
4125 | spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); | ||
4126 | |||
4127 | if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) && | ||
4128 | !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) { | ||
4129 | spin_lock_irqsave(&iommu->lock, tmp_flags); | ||
4130 | clear_bit(domain->id, iommu->domain_ids); | ||
4131 | iommu->domains[domain->id] = NULL; | ||
4132 | spin_unlock_irqrestore(&iommu->lock, tmp_flags); | ||
4133 | } | ||
4134 | } | 4178 | } |
4135 | } | 4179 | } |
4136 | 4180 | ||
@@ -4150,7 +4194,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) | |||
4150 | domain->iommu_snooping = 0; | 4194 | domain->iommu_snooping = 0; |
4151 | domain->iommu_superpage = 0; | 4195 | domain->iommu_superpage = 0; |
4152 | domain->max_addr = 0; | 4196 | domain->max_addr = 0; |
4153 | domain->nid = -1; | ||
4154 | 4197 | ||
4155 | /* always allocate the top pgd */ | 4198 | /* always allocate the top pgd */ |
4156 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); | 4199 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); |
@@ -4164,7 +4207,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain) | |||
4164 | { | 4207 | { |
4165 | struct dmar_domain *dmar_domain; | 4208 | struct dmar_domain *dmar_domain; |
4166 | 4209 | ||
4167 | dmar_domain = alloc_domain(true); | 4210 | dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE); |
4168 | if (!dmar_domain) { | 4211 | if (!dmar_domain) { |
4169 | printk(KERN_ERR | 4212 | printk(KERN_ERR |
4170 | "intel_iommu_domain_init: dmar_domain == NULL\n"); | 4213 | "intel_iommu_domain_init: dmar_domain == NULL\n"); |
@@ -4202,14 +4245,18 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, | |||
4202 | int addr_width; | 4245 | int addr_width; |
4203 | u8 bus, devfn; | 4246 | u8 bus, devfn; |
4204 | 4247 | ||
4248 | if (device_is_rmrr_locked(dev)) { | ||
4249 | dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n"); | ||
4250 | return -EPERM; | ||
4251 | } | ||
4252 | |||
4205 | /* normally dev is not mapped */ | 4253 | /* normally dev is not mapped */ |
4206 | if (unlikely(domain_context_mapped(dev))) { | 4254 | if (unlikely(domain_context_mapped(dev))) { |
4207 | struct dmar_domain *old_domain; | 4255 | struct dmar_domain *old_domain; |
4208 | 4256 | ||
4209 | old_domain = find_domain(dev); | 4257 | old_domain = find_domain(dev); |
4210 | if (old_domain) { | 4258 | if (old_domain) { |
4211 | if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || | 4259 | if (domain_type_is_vm_or_si(dmar_domain)) |
4212 | dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) | ||
4213 | domain_remove_one_dev_info(old_domain, dev); | 4260 | domain_remove_one_dev_info(old_domain, dev); |
4214 | else | 4261 | else |
4215 | domain_remove_dev_info(old_domain); | 4262 | domain_remove_dev_info(old_domain); |
@@ -4373,99 +4420,42 @@ static int intel_iommu_domain_has_cap(struct iommu_domain *domain, | |||
4373 | return 0; | 4420 | return 0; |
4374 | } | 4421 | } |
4375 | 4422 | ||
4376 | #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) | ||
4377 | |||
4378 | static int intel_iommu_add_device(struct device *dev) | 4423 | static int intel_iommu_add_device(struct device *dev) |
4379 | { | 4424 | { |
4380 | struct pci_dev *pdev = to_pci_dev(dev); | 4425 | struct intel_iommu *iommu; |
4381 | struct pci_dev *bridge, *dma_pdev = NULL; | ||
4382 | struct iommu_group *group; | 4426 | struct iommu_group *group; |
4383 | int ret; | ||
4384 | u8 bus, devfn; | 4427 | u8 bus, devfn; |
4385 | 4428 | ||
4386 | if (!device_to_iommu(dev, &bus, &devfn)) | 4429 | iommu = device_to_iommu(dev, &bus, &devfn); |
4430 | if (!iommu) | ||
4387 | return -ENODEV; | 4431 | return -ENODEV; |
4388 | 4432 | ||
4389 | bridge = pci_find_upstream_pcie_bridge(pdev); | 4433 | iommu_device_link(iommu->iommu_dev, dev); |
4390 | if (bridge) { | ||
4391 | if (pci_is_pcie(bridge)) | ||
4392 | dma_pdev = pci_get_domain_bus_and_slot( | ||
4393 | pci_domain_nr(pdev->bus), | ||
4394 | bridge->subordinate->number, 0); | ||
4395 | if (!dma_pdev) | ||
4396 | dma_pdev = pci_dev_get(bridge); | ||
4397 | } else | ||
4398 | dma_pdev = pci_dev_get(pdev); | ||
4399 | |||
4400 | /* Account for quirked devices */ | ||
4401 | swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev)); | ||
4402 | 4434 | ||
4403 | /* | 4435 | group = iommu_group_get_for_dev(dev); |
4404 | * If it's a multifunction device that does not support our | ||
4405 | * required ACS flags, add to the same group as lowest numbered | ||
4406 | * function that also does not suport the required ACS flags. | ||
4407 | */ | ||
4408 | if (dma_pdev->multifunction && | ||
4409 | !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) { | ||
4410 | u8 i, slot = PCI_SLOT(dma_pdev->devfn); | ||
4411 | |||
4412 | for (i = 0; i < 8; i++) { | ||
4413 | struct pci_dev *tmp; | ||
4414 | 4436 | ||
4415 | tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i)); | 4437 | if (IS_ERR(group)) |
4416 | if (!tmp) | 4438 | return PTR_ERR(group); |
4417 | continue; | ||
4418 | |||
4419 | if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) { | ||
4420 | swap_pci_ref(&dma_pdev, tmp); | ||
4421 | break; | ||
4422 | } | ||
4423 | pci_dev_put(tmp); | ||
4424 | } | ||
4425 | } | ||
4426 | |||
4427 | /* | ||
4428 | * Devices on the root bus go through the iommu. If that's not us, | ||
4429 | * find the next upstream device and test ACS up to the root bus. | ||
4430 | * Finding the next device may require skipping virtual buses. | ||
4431 | */ | ||
4432 | while (!pci_is_root_bus(dma_pdev->bus)) { | ||
4433 | struct pci_bus *bus = dma_pdev->bus; | ||
4434 | |||
4435 | while (!bus->self) { | ||
4436 | if (!pci_is_root_bus(bus)) | ||
4437 | bus = bus->parent; | ||
4438 | else | ||
4439 | goto root_bus; | ||
4440 | } | ||
4441 | |||
4442 | if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) | ||
4443 | break; | ||
4444 | |||
4445 | swap_pci_ref(&dma_pdev, pci_dev_get(bus->self)); | ||
4446 | } | ||
4447 | |||
4448 | root_bus: | ||
4449 | group = iommu_group_get(&dma_pdev->dev); | ||
4450 | pci_dev_put(dma_pdev); | ||
4451 | if (!group) { | ||
4452 | group = iommu_group_alloc(); | ||
4453 | if (IS_ERR(group)) | ||
4454 | return PTR_ERR(group); | ||
4455 | } | ||
4456 | |||
4457 | ret = iommu_group_add_device(group, dev); | ||
4458 | 4439 | ||
4459 | iommu_group_put(group); | 4440 | iommu_group_put(group); |
4460 | return ret; | 4441 | return 0; |
4461 | } | 4442 | } |
4462 | 4443 | ||
4463 | static void intel_iommu_remove_device(struct device *dev) | 4444 | static void intel_iommu_remove_device(struct device *dev) |
4464 | { | 4445 | { |
4446 | struct intel_iommu *iommu; | ||
4447 | u8 bus, devfn; | ||
4448 | |||
4449 | iommu = device_to_iommu(dev, &bus, &devfn); | ||
4450 | if (!iommu) | ||
4451 | return; | ||
4452 | |||
4465 | iommu_group_remove_device(dev); | 4453 | iommu_group_remove_device(dev); |
4454 | |||
4455 | iommu_device_unlink(iommu->iommu_dev, dev); | ||
4466 | } | 4456 | } |
4467 | 4457 | ||
4468 | static struct iommu_ops intel_iommu_ops = { | 4458 | static const struct iommu_ops intel_iommu_ops = { |
4469 | .domain_init = intel_iommu_domain_init, | 4459 | .domain_init = intel_iommu_domain_init, |
4470 | .domain_destroy = intel_iommu_domain_destroy, | 4460 | .domain_destroy = intel_iommu_domain_destroy, |
4471 | .attach_dev = intel_iommu_attach_device, | 4461 | .attach_dev = intel_iommu_attach_device, |
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 9b174893f0f5..0df41f6264f5 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c | |||
@@ -70,6 +70,11 @@ static int get_irte(int irq, struct irte *entry) | |||
70 | 70 | ||
71 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); | 71 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
72 | 72 | ||
73 | if (unlikely(!irq_iommu->iommu)) { | ||
74 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
75 | return -1; | ||
76 | } | ||
77 | |||
73 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | 78 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
74 | *entry = *(irq_iommu->iommu->ir_table->base + index); | 79 | *entry = *(irq_iommu->iommu->ir_table->base + index); |
75 | 80 | ||
@@ -369,29 +374,52 @@ static int set_hpet_sid(struct irte *irte, u8 id) | |||
369 | return 0; | 374 | return 0; |
370 | } | 375 | } |
371 | 376 | ||
377 | struct set_msi_sid_data { | ||
378 | struct pci_dev *pdev; | ||
379 | u16 alias; | ||
380 | }; | ||
381 | |||
382 | static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque) | ||
383 | { | ||
384 | struct set_msi_sid_data *data = opaque; | ||
385 | |||
386 | data->pdev = pdev; | ||
387 | data->alias = alias; | ||
388 | |||
389 | return 0; | ||
390 | } | ||
391 | |||
372 | static int set_msi_sid(struct irte *irte, struct pci_dev *dev) | 392 | static int set_msi_sid(struct irte *irte, struct pci_dev *dev) |
373 | { | 393 | { |
374 | struct pci_dev *bridge; | 394 | struct set_msi_sid_data data; |
375 | 395 | ||
376 | if (!irte || !dev) | 396 | if (!irte || !dev) |
377 | return -1; | 397 | return -1; |
378 | 398 | ||
379 | /* PCIe device or Root Complex integrated PCI device */ | 399 | pci_for_each_dma_alias(dev, set_msi_sid_cb, &data); |
380 | if (pci_is_pcie(dev) || !dev->bus->parent) { | ||
381 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, | ||
382 | (dev->bus->number << 8) | dev->devfn); | ||
383 | return 0; | ||
384 | } | ||
385 | 400 | ||
386 | bridge = pci_find_upstream_pcie_bridge(dev); | 401 | /* |
387 | if (bridge) { | 402 | * DMA alias provides us with a PCI device and alias. The only case |
388 | if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */ | 403 | * where the it will return an alias on a different bus than the |
389 | set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, | 404 | * device is the case of a PCIe-to-PCI bridge, where the alias is for |
390 | (bridge->bus->number << 8) | dev->bus->number); | 405 | * the subordinate bus. In this case we can only verify the bus. |
391 | else /* this is a legacy PCI bridge */ | 406 | * |
392 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, | 407 | * If the alias device is on a different bus than our source device |
393 | (bridge->bus->number << 8) | bridge->devfn); | 408 | * then we have a topology based alias, use it. |
394 | } | 409 | * |
410 | * Otherwise, the alias is for a device DMA quirk and we cannot | ||
411 | * assume that MSI uses the same requester ID. Therefore use the | ||
412 | * original device. | ||
413 | */ | ||
414 | if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number) | ||
415 | set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, | ||
416 | PCI_DEVID(PCI_BUS_NUM(data.alias), | ||
417 | dev->bus->number)); | ||
418 | else if (data.pdev->bus->number != dev->bus->number) | ||
419 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias); | ||
420 | else | ||
421 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, | ||
422 | PCI_DEVID(dev->bus->number, dev->devfn)); | ||
395 | 423 | ||
396 | return 0; | 424 | return 0; |
397 | } | 425 | } |
diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c new file mode 100644 index 000000000000..39b2d9127dbf --- /dev/null +++ b/drivers/iommu/iommu-sysfs.c | |||
@@ -0,0 +1,134 @@ | |||
1 | /* | ||
2 | * IOMMU sysfs class support | ||
3 | * | ||
4 | * Copyright (C) 2014 Red Hat, Inc. All rights reserved. | ||
5 | * Author: Alex Williamson <alex.williamson@redhat.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/device.h> | ||
13 | #include <linux/iommu.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/slab.h> | ||
16 | |||
17 | /* | ||
18 | * We provide a common class "devices" group which initially has no attributes. | ||
19 | * As devices are added to the IOMMU, we'll add links to the group. | ||
20 | */ | ||
21 | static struct attribute *devices_attr[] = { | ||
22 | NULL, | ||
23 | }; | ||
24 | |||
25 | static const struct attribute_group iommu_devices_attr_group = { | ||
26 | .name = "devices", | ||
27 | .attrs = devices_attr, | ||
28 | }; | ||
29 | |||
30 | static const struct attribute_group *iommu_dev_groups[] = { | ||
31 | &iommu_devices_attr_group, | ||
32 | NULL, | ||
33 | }; | ||
34 | |||
35 | static void iommu_release_device(struct device *dev) | ||
36 | { | ||
37 | kfree(dev); | ||
38 | } | ||
39 | |||
40 | static struct class iommu_class = { | ||
41 | .name = "iommu", | ||
42 | .dev_release = iommu_release_device, | ||
43 | .dev_groups = iommu_dev_groups, | ||
44 | }; | ||
45 | |||
46 | static int __init iommu_dev_init(void) | ||
47 | { | ||
48 | return class_register(&iommu_class); | ||
49 | } | ||
50 | postcore_initcall(iommu_dev_init); | ||
51 | |||
52 | /* | ||
53 | * Create an IOMMU device and return a pointer to it. IOMMU specific | ||
54 | * attributes can be provided as an attribute group, allowing a unique | ||
55 | * namespace per IOMMU type. | ||
56 | */ | ||
57 | struct device *iommu_device_create(struct device *parent, void *drvdata, | ||
58 | const struct attribute_group **groups, | ||
59 | const char *fmt, ...) | ||
60 | { | ||
61 | struct device *dev; | ||
62 | va_list vargs; | ||
63 | int ret; | ||
64 | |||
65 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | ||
66 | if (!dev) | ||
67 | return ERR_PTR(-ENOMEM); | ||
68 | |||
69 | device_initialize(dev); | ||
70 | |||
71 | dev->class = &iommu_class; | ||
72 | dev->parent = parent; | ||
73 | dev->groups = groups; | ||
74 | dev_set_drvdata(dev, drvdata); | ||
75 | |||
76 | va_start(vargs, fmt); | ||
77 | ret = kobject_set_name_vargs(&dev->kobj, fmt, vargs); | ||
78 | va_end(vargs); | ||
79 | if (ret) | ||
80 | goto error; | ||
81 | |||
82 | ret = device_add(dev); | ||
83 | if (ret) | ||
84 | goto error; | ||
85 | |||
86 | return dev; | ||
87 | |||
88 | error: | ||
89 | put_device(dev); | ||
90 | return ERR_PTR(ret); | ||
91 | } | ||
92 | |||
93 | void iommu_device_destroy(struct device *dev) | ||
94 | { | ||
95 | if (!dev || IS_ERR(dev)) | ||
96 | return; | ||
97 | |||
98 | device_unregister(dev); | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * IOMMU drivers can indicate a device is managed by a given IOMMU using | ||
103 | * this interface. A link to the device will be created in the "devices" | ||
104 | * directory of the IOMMU device in sysfs and an "iommu" link will be | ||
105 | * created under the linked device, pointing back at the IOMMU device. | ||
106 | */ | ||
107 | int iommu_device_link(struct device *dev, struct device *link) | ||
108 | { | ||
109 | int ret; | ||
110 | |||
111 | if (!dev || IS_ERR(dev)) | ||
112 | return -ENODEV; | ||
113 | |||
114 | ret = sysfs_add_link_to_group(&dev->kobj, "devices", | ||
115 | &link->kobj, dev_name(link)); | ||
116 | if (ret) | ||
117 | return ret; | ||
118 | |||
119 | ret = sysfs_create_link_nowarn(&link->kobj, &dev->kobj, "iommu"); | ||
120 | if (ret) | ||
121 | sysfs_remove_link_from_group(&dev->kobj, "devices", | ||
122 | dev_name(link)); | ||
123 | |||
124 | return ret; | ||
125 | } | ||
126 | |||
127 | void iommu_device_unlink(struct device *dev, struct device *link) | ||
128 | { | ||
129 | if (!dev || IS_ERR(dev)) | ||
130 | return; | ||
131 | |||
132 | sysfs_remove_link(&link->kobj, "iommu"); | ||
133 | sysfs_remove_link_from_group(&dev->kobj, "devices", dev_name(link)); | ||
134 | } | ||
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index e5555fcfe703..169836020208 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c | |||
@@ -29,12 +29,17 @@ | |||
29 | #include <linux/idr.h> | 29 | #include <linux/idr.h> |
30 | #include <linux/notifier.h> | 30 | #include <linux/notifier.h> |
31 | #include <linux/err.h> | 31 | #include <linux/err.h> |
32 | #include <linux/pci.h> | ||
32 | #include <trace/events/iommu.h> | 33 | #include <trace/events/iommu.h> |
33 | 34 | ||
34 | static struct kset *iommu_group_kset; | 35 | static struct kset *iommu_group_kset; |
35 | static struct ida iommu_group_ida; | 36 | static struct ida iommu_group_ida; |
36 | static struct mutex iommu_group_mutex; | 37 | static struct mutex iommu_group_mutex; |
37 | 38 | ||
39 | struct iommu_callback_data { | ||
40 | const struct iommu_ops *ops; | ||
41 | }; | ||
42 | |||
38 | struct iommu_group { | 43 | struct iommu_group { |
39 | struct kobject kobj; | 44 | struct kobject kobj; |
40 | struct kobject *devices_kobj; | 45 | struct kobject *devices_kobj; |
@@ -514,9 +519,191 @@ int iommu_group_id(struct iommu_group *group) | |||
514 | } | 519 | } |
515 | EXPORT_SYMBOL_GPL(iommu_group_id); | 520 | EXPORT_SYMBOL_GPL(iommu_group_id); |
516 | 521 | ||
522 | /* | ||
523 | * To consider a PCI device isolated, we require ACS to support Source | ||
524 | * Validation, Request Redirection, Completer Redirection, and Upstream | ||
525 | * Forwarding. This effectively means that devices cannot spoof their | ||
526 | * requester ID, requests and completions cannot be redirected, and all | ||
527 | * transactions are forwarded upstream, even as it passes through a | ||
528 | * bridge where the target device is downstream. | ||
529 | */ | ||
530 | #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) | ||
531 | |||
532 | struct group_for_pci_data { | ||
533 | struct pci_dev *pdev; | ||
534 | struct iommu_group *group; | ||
535 | }; | ||
536 | |||
537 | /* | ||
538 | * DMA alias iterator callback, return the last seen device. Stop and return | ||
539 | * the IOMMU group if we find one along the way. | ||
540 | */ | ||
541 | static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) | ||
542 | { | ||
543 | struct group_for_pci_data *data = opaque; | ||
544 | |||
545 | data->pdev = pdev; | ||
546 | data->group = iommu_group_get(&pdev->dev); | ||
547 | |||
548 | return data->group != NULL; | ||
549 | } | ||
550 | |||
551 | /* | ||
552 | * Use standard PCI bus topology, isolation features, and DMA alias quirks | ||
553 | * to find or create an IOMMU group for a device. | ||
554 | */ | ||
555 | static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev) | ||
556 | { | ||
557 | struct group_for_pci_data data; | ||
558 | struct pci_bus *bus; | ||
559 | struct iommu_group *group = NULL; | ||
560 | struct pci_dev *tmp; | ||
561 | |||
562 | /* | ||
563 | * Find the upstream DMA alias for the device. A device must not | ||
564 | * be aliased due to topology in order to have its own IOMMU group. | ||
565 | * If we find an alias along the way that already belongs to a | ||
566 | * group, use it. | ||
567 | */ | ||
568 | if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) | ||
569 | return data.group; | ||
570 | |||
571 | pdev = data.pdev; | ||
572 | |||
573 | /* | ||
574 | * Continue upstream from the point of minimum IOMMU granularity | ||
575 | * due to aliases to the point where devices are protected from | ||
576 | * peer-to-peer DMA by PCI ACS. Again, if we find an existing | ||
577 | * group, use it. | ||
578 | */ | ||
579 | for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { | ||
580 | if (!bus->self) | ||
581 | continue; | ||
582 | |||
583 | if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) | ||
584 | break; | ||
585 | |||
586 | pdev = bus->self; | ||
587 | |||
588 | group = iommu_group_get(&pdev->dev); | ||
589 | if (group) | ||
590 | return group; | ||
591 | } | ||
592 | |||
593 | /* | ||
594 | * Next we need to consider DMA alias quirks. If one device aliases | ||
595 | * to another, they should be grouped together. It's theoretically | ||
596 | * possible that aliases could create chains of devices where each | ||
597 | * device aliases another device. If we then factor in multifunction | ||
598 | * ACS grouping requirements, each alias could incorporate a new slot | ||
599 | * with multiple functions, each with aliases. This is all extremely | ||
600 | * unlikely as DMA alias quirks are typically only used for PCIe | ||
601 | * devices where we usually have a single slot per bus. Furthermore, | ||
602 | * the alias quirk is usually to another function within the slot | ||
603 | * (and ACS multifunction is not supported) or to a different slot | ||
604 | * that doesn't physically exist. The likely scenario is therefore | ||
605 | * that everything on the bus gets grouped together. To reduce the | ||
606 | * problem space, share the IOMMU group for all devices on the bus | ||
607 | * if a DMA alias quirk is present on the bus. | ||
608 | */ | ||
609 | tmp = NULL; | ||
610 | for_each_pci_dev(tmp) { | ||
611 | if (tmp->bus != pdev->bus || | ||
612 | !(tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN)) | ||
613 | continue; | ||
614 | |||
615 | pci_dev_put(tmp); | ||
616 | tmp = NULL; | ||
617 | |||
618 | /* We have an alias quirk, search for an existing group */ | ||
619 | for_each_pci_dev(tmp) { | ||
620 | struct iommu_group *group_tmp; | ||
621 | |||
622 | if (tmp->bus != pdev->bus) | ||
623 | continue; | ||
624 | |||
625 | group_tmp = iommu_group_get(&tmp->dev); | ||
626 | if (!group) { | ||
627 | group = group_tmp; | ||
628 | continue; | ||
629 | } | ||
630 | |||
631 | if (group_tmp) { | ||
632 | WARN_ON(group != group_tmp); | ||
633 | iommu_group_put(group_tmp); | ||
634 | } | ||
635 | } | ||
636 | |||
637 | return group ? group : iommu_group_alloc(); | ||
638 | } | ||
639 | |||
640 | /* | ||
641 | * Non-multifunction devices or multifunction devices supporting | ||
642 | * ACS get their own group. | ||
643 | */ | ||
644 | if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) | ||
645 | return iommu_group_alloc(); | ||
646 | |||
647 | /* | ||
648 | * Multifunction devices not supporting ACS share a group with other | ||
649 | * similar devices in the same slot. | ||
650 | */ | ||
651 | tmp = NULL; | ||
652 | for_each_pci_dev(tmp) { | ||
653 | if (tmp == pdev || tmp->bus != pdev->bus || | ||
654 | PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || | ||
655 | pci_acs_enabled(tmp, REQ_ACS_FLAGS)) | ||
656 | continue; | ||
657 | |||
658 | group = iommu_group_get(&tmp->dev); | ||
659 | if (group) { | ||
660 | pci_dev_put(tmp); | ||
661 | return group; | ||
662 | } | ||
663 | } | ||
664 | |||
665 | /* No shared group found, allocate new */ | ||
666 | return iommu_group_alloc(); | ||
667 | } | ||
668 | |||
669 | /** | ||
670 | * iommu_group_get_for_dev - Find or create the IOMMU group for a device | ||
671 | * @dev: target device | ||
672 | * | ||
673 | * This function is intended to be called by IOMMU drivers and extended to | ||
674 | * support common, bus-defined algorithms when determining or creating the | ||
675 | * IOMMU group for a device. On success, the caller will hold a reference | ||
676 | * to the returned IOMMU group, which will already include the provided | ||
677 | * device. The reference should be released with iommu_group_put(). | ||
678 | */ | ||
679 | struct iommu_group *iommu_group_get_for_dev(struct device *dev) | ||
680 | { | ||
681 | struct iommu_group *group = ERR_PTR(-EIO); | ||
682 | int ret; | ||
683 | |||
684 | group = iommu_group_get(dev); | ||
685 | if (group) | ||
686 | return group; | ||
687 | |||
688 | if (dev_is_pci(dev)) | ||
689 | group = iommu_group_get_for_pci_dev(to_pci_dev(dev)); | ||
690 | |||
691 | if (IS_ERR(group)) | ||
692 | return group; | ||
693 | |||
694 | ret = iommu_group_add_device(group, dev); | ||
695 | if (ret) { | ||
696 | iommu_group_put(group); | ||
697 | return ERR_PTR(ret); | ||
698 | } | ||
699 | |||
700 | return group; | ||
701 | } | ||
702 | |||
517 | static int add_iommu_group(struct device *dev, void *data) | 703 | static int add_iommu_group(struct device *dev, void *data) |
518 | { | 704 | { |
519 | struct iommu_ops *ops = data; | 705 | struct iommu_callback_data *cb = data; |
706 | const struct iommu_ops *ops = cb->ops; | ||
520 | 707 | ||
521 | if (!ops->add_device) | 708 | if (!ops->add_device) |
522 | return -ENODEV; | 709 | return -ENODEV; |
@@ -532,7 +719,7 @@ static int iommu_bus_notifier(struct notifier_block *nb, | |||
532 | unsigned long action, void *data) | 719 | unsigned long action, void *data) |
533 | { | 720 | { |
534 | struct device *dev = data; | 721 | struct device *dev = data; |
535 | struct iommu_ops *ops = dev->bus->iommu_ops; | 722 | const struct iommu_ops *ops = dev->bus->iommu_ops; |
536 | struct iommu_group *group; | 723 | struct iommu_group *group; |
537 | unsigned long group_action = 0; | 724 | unsigned long group_action = 0; |
538 | 725 | ||
@@ -585,10 +772,14 @@ static struct notifier_block iommu_bus_nb = { | |||
585 | .notifier_call = iommu_bus_notifier, | 772 | .notifier_call = iommu_bus_notifier, |
586 | }; | 773 | }; |
587 | 774 | ||
588 | static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops) | 775 | static void iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) |
589 | { | 776 | { |
777 | struct iommu_callback_data cb = { | ||
778 | .ops = ops, | ||
779 | }; | ||
780 | |||
590 | bus_register_notifier(bus, &iommu_bus_nb); | 781 | bus_register_notifier(bus, &iommu_bus_nb); |
591 | bus_for_each_dev(bus, NULL, ops, add_iommu_group); | 782 | bus_for_each_dev(bus, NULL, &cb, add_iommu_group); |
592 | } | 783 | } |
593 | 784 | ||
594 | /** | 785 | /** |
@@ -604,7 +795,7 @@ static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops) | |||
604 | * is set up. With this function the iommu-driver can set the iommu-ops | 795 | * is set up. With this function the iommu-driver can set the iommu-ops |
605 | * afterwards. | 796 | * afterwards. |
606 | */ | 797 | */ |
607 | int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops) | 798 | int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) |
608 | { | 799 | { |
609 | if (bus->iommu_ops != NULL) | 800 | if (bus->iommu_ops != NULL) |
610 | return -EBUSY; | 801 | return -EBUSY; |
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 53cde086e83b..7dab5cbcc775 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c | |||
@@ -1120,7 +1120,7 @@ static void ipmmu_remove_device(struct device *dev) | |||
1120 | dev->archdata.iommu = NULL; | 1120 | dev->archdata.iommu = NULL; |
1121 | } | 1121 | } |
1122 | 1122 | ||
1123 | static struct iommu_ops ipmmu_ops = { | 1123 | static const struct iommu_ops ipmmu_ops = { |
1124 | .domain_init = ipmmu_domain_init, | 1124 | .domain_init = ipmmu_domain_init, |
1125 | .domain_destroy = ipmmu_domain_destroy, | 1125 | .domain_destroy = ipmmu_domain_destroy, |
1126 | .attach_dev = ipmmu_attach_device, | 1126 | .attach_dev = ipmmu_attach_device, |
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index f5ff657f49fa..49f41d6e02f1 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c | |||
@@ -674,7 +674,7 @@ fail: | |||
674 | return 0; | 674 | return 0; |
675 | } | 675 | } |
676 | 676 | ||
677 | static struct iommu_ops msm_iommu_ops = { | 677 | static const struct iommu_ops msm_iommu_ops = { |
678 | .domain_init = msm_iommu_domain_init, | 678 | .domain_init = msm_iommu_domain_init, |
679 | .domain_destroy = msm_iommu_domain_destroy, | 679 | .domain_destroy = msm_iommu_domain_destroy, |
680 | .attach_dev = msm_iommu_attach_dev, | 680 | .attach_dev = msm_iommu_attach_dev, |
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c index 80fffba7f12d..531658d17333 100644 --- a/drivers/iommu/omap-iommu-debug.c +++ b/drivers/iommu/omap-iommu-debug.c | |||
@@ -213,116 +213,6 @@ static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, | |||
213 | return bytes; | 213 | return bytes; |
214 | } | 214 | } |
215 | 215 | ||
216 | static ssize_t debug_read_mmap(struct file *file, char __user *userbuf, | ||
217 | size_t count, loff_t *ppos) | ||
218 | { | ||
219 | struct device *dev = file->private_data; | ||
220 | struct omap_iommu *obj = dev_to_omap_iommu(dev); | ||
221 | char *p, *buf; | ||
222 | struct iovm_struct *tmp; | ||
223 | int uninitialized_var(i); | ||
224 | ssize_t bytes; | ||
225 | |||
226 | buf = (char *)__get_free_page(GFP_KERNEL); | ||
227 | if (!buf) | ||
228 | return -ENOMEM; | ||
229 | p = buf; | ||
230 | |||
231 | p += sprintf(p, "%-3s %-8s %-8s %6s %8s\n", | ||
232 | "No", "start", "end", "size", "flags"); | ||
233 | p += sprintf(p, "-------------------------------------------------\n"); | ||
234 | |||
235 | mutex_lock(&iommu_debug_lock); | ||
236 | |||
237 | list_for_each_entry(tmp, &obj->mmap, list) { | ||
238 | size_t len; | ||
239 | const char *str = "%3d %08x-%08x %6x %8x\n"; | ||
240 | const int maxcol = 39; | ||
241 | |||
242 | len = tmp->da_end - tmp->da_start; | ||
243 | p += snprintf(p, maxcol, str, | ||
244 | i, tmp->da_start, tmp->da_end, len, tmp->flags); | ||
245 | |||
246 | if (PAGE_SIZE - (p - buf) < maxcol) | ||
247 | break; | ||
248 | i++; | ||
249 | } | ||
250 | |||
251 | bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); | ||
252 | |||
253 | mutex_unlock(&iommu_debug_lock); | ||
254 | free_page((unsigned long)buf); | ||
255 | |||
256 | return bytes; | ||
257 | } | ||
258 | |||
259 | static ssize_t debug_read_mem(struct file *file, char __user *userbuf, | ||
260 | size_t count, loff_t *ppos) | ||
261 | { | ||
262 | struct device *dev = file->private_data; | ||
263 | char *p, *buf; | ||
264 | struct iovm_struct *area; | ||
265 | ssize_t bytes; | ||
266 | |||
267 | count = min_t(ssize_t, count, PAGE_SIZE); | ||
268 | |||
269 | buf = (char *)__get_free_page(GFP_KERNEL); | ||
270 | if (!buf) | ||
271 | return -ENOMEM; | ||
272 | p = buf; | ||
273 | |||
274 | mutex_lock(&iommu_debug_lock); | ||
275 | |||
276 | area = omap_find_iovm_area(dev, (u32)ppos); | ||
277 | if (!area) { | ||
278 | bytes = -EINVAL; | ||
279 | goto err_out; | ||
280 | } | ||
281 | memcpy(p, area->va, count); | ||
282 | p += count; | ||
283 | |||
284 | bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); | ||
285 | err_out: | ||
286 | mutex_unlock(&iommu_debug_lock); | ||
287 | free_page((unsigned long)buf); | ||
288 | |||
289 | return bytes; | ||
290 | } | ||
291 | |||
292 | static ssize_t debug_write_mem(struct file *file, const char __user *userbuf, | ||
293 | size_t count, loff_t *ppos) | ||
294 | { | ||
295 | struct device *dev = file->private_data; | ||
296 | struct iovm_struct *area; | ||
297 | char *p, *buf; | ||
298 | |||
299 | count = min_t(size_t, count, PAGE_SIZE); | ||
300 | |||
301 | buf = (char *)__get_free_page(GFP_KERNEL); | ||
302 | if (!buf) | ||
303 | return -ENOMEM; | ||
304 | p = buf; | ||
305 | |||
306 | mutex_lock(&iommu_debug_lock); | ||
307 | |||
308 | if (copy_from_user(p, userbuf, count)) { | ||
309 | count = -EFAULT; | ||
310 | goto err_out; | ||
311 | } | ||
312 | |||
313 | area = omap_find_iovm_area(dev, (u32)ppos); | ||
314 | if (!area) { | ||
315 | count = -EINVAL; | ||
316 | goto err_out; | ||
317 | } | ||
318 | memcpy(area->va, p, count); | ||
319 | err_out: | ||
320 | mutex_unlock(&iommu_debug_lock); | ||
321 | free_page((unsigned long)buf); | ||
322 | |||
323 | return count; | ||
324 | } | ||
325 | |||
326 | #define DEBUG_FOPS(name) \ | 216 | #define DEBUG_FOPS(name) \ |
327 | static const struct file_operations debug_##name##_fops = { \ | 217 | static const struct file_operations debug_##name##_fops = { \ |
328 | .open = simple_open, \ | 218 | .open = simple_open, \ |
@@ -342,8 +232,6 @@ DEBUG_FOPS_RO(ver); | |||
342 | DEBUG_FOPS_RO(regs); | 232 | DEBUG_FOPS_RO(regs); |
343 | DEBUG_FOPS_RO(tlb); | 233 | DEBUG_FOPS_RO(tlb); |
344 | DEBUG_FOPS(pagetable); | 234 | DEBUG_FOPS(pagetable); |
345 | DEBUG_FOPS_RO(mmap); | ||
346 | DEBUG_FOPS(mem); | ||
347 | 235 | ||
348 | #define __DEBUG_ADD_FILE(attr, mode) \ | 236 | #define __DEBUG_ADD_FILE(attr, mode) \ |
349 | { \ | 237 | { \ |
@@ -389,8 +277,6 @@ static int iommu_debug_register(struct device *dev, void *data) | |||
389 | DEBUG_ADD_FILE_RO(regs); | 277 | DEBUG_ADD_FILE_RO(regs); |
390 | DEBUG_ADD_FILE_RO(tlb); | 278 | DEBUG_ADD_FILE_RO(tlb); |
391 | DEBUG_ADD_FILE(pagetable); | 279 | DEBUG_ADD_FILE(pagetable); |
392 | DEBUG_ADD_FILE_RO(mmap); | ||
393 | DEBUG_ADD_FILE(mem); | ||
394 | 280 | ||
395 | return 0; | 281 | return 0; |
396 | 282 | ||
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 895af06a667f..e202b0c24120 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c | |||
@@ -959,31 +959,18 @@ static int omap_iommu_probe(struct platform_device *pdev) | |||
959 | return err; | 959 | return err; |
960 | if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8) | 960 | if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8) |
961 | return -EINVAL; | 961 | return -EINVAL; |
962 | /* | ||
963 | * da_start and da_end are needed for omap-iovmm, so hardcode | ||
964 | * these values as used by OMAP3 ISP - the only user for | ||
965 | * omap-iovmm | ||
966 | */ | ||
967 | obj->da_start = 0; | ||
968 | obj->da_end = 0xfffff000; | ||
969 | if (of_find_property(of, "ti,iommu-bus-err-back", NULL)) | 962 | if (of_find_property(of, "ti,iommu-bus-err-back", NULL)) |
970 | obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN; | 963 | obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN; |
971 | } else { | 964 | } else { |
972 | obj->nr_tlb_entries = pdata->nr_tlb_entries; | 965 | obj->nr_tlb_entries = pdata->nr_tlb_entries; |
973 | obj->name = pdata->name; | 966 | obj->name = pdata->name; |
974 | obj->da_start = pdata->da_start; | ||
975 | obj->da_end = pdata->da_end; | ||
976 | } | 967 | } |
977 | if (obj->da_end <= obj->da_start) | ||
978 | return -EINVAL; | ||
979 | 968 | ||
980 | obj->dev = &pdev->dev; | 969 | obj->dev = &pdev->dev; |
981 | obj->ctx = (void *)obj + sizeof(*obj); | 970 | obj->ctx = (void *)obj + sizeof(*obj); |
982 | 971 | ||
983 | spin_lock_init(&obj->iommu_lock); | 972 | spin_lock_init(&obj->iommu_lock); |
984 | mutex_init(&obj->mmap_lock); | ||
985 | spin_lock_init(&obj->page_table_lock); | 973 | spin_lock_init(&obj->page_table_lock); |
986 | INIT_LIST_HEAD(&obj->mmap); | ||
987 | 974 | ||
988 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 975 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
989 | obj->regbase = devm_ioremap_resource(obj->dev, res); | 976 | obj->regbase = devm_ioremap_resource(obj->dev, res); |
@@ -1291,7 +1278,7 @@ static void omap_iommu_remove_device(struct device *dev) | |||
1291 | kfree(arch_data); | 1278 | kfree(arch_data); |
1292 | } | 1279 | } |
1293 | 1280 | ||
1294 | static struct iommu_ops omap_iommu_ops = { | 1281 | static const struct iommu_ops omap_iommu_ops = { |
1295 | .domain_init = omap_iommu_domain_init, | 1282 | .domain_init = omap_iommu_domain_init, |
1296 | .domain_destroy = omap_iommu_domain_destroy, | 1283 | .domain_destroy = omap_iommu_domain_destroy, |
1297 | .attach_dev = omap_iommu_attach_dev, | 1284 | .attach_dev = omap_iommu_attach_dev, |
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index ea920c3e94ff..1275a822934b 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h | |||
@@ -46,12 +46,7 @@ struct omap_iommu { | |||
46 | 46 | ||
47 | int nr_tlb_entries; | 47 | int nr_tlb_entries; |
48 | 48 | ||
49 | struct list_head mmap; | ||
50 | struct mutex mmap_lock; /* protect mmap */ | ||
51 | |||
52 | void *ctx; /* iommu context: registres saved area */ | 49 | void *ctx; /* iommu context: registres saved area */ |
53 | u32 da_start; | ||
54 | u32 da_end; | ||
55 | 50 | ||
56 | int has_bus_err_back; | 51 | int has_bus_err_back; |
57 | }; | 52 | }; |
@@ -154,9 +149,12 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) | |||
154 | #define MMU_RAM_PADDR_MASK \ | 149 | #define MMU_RAM_PADDR_MASK \ |
155 | ((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT) | 150 | ((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT) |
156 | 151 | ||
152 | #define MMU_RAM_ENDIAN_SHIFT 9 | ||
157 | #define MMU_RAM_ENDIAN_MASK (1 << MMU_RAM_ENDIAN_SHIFT) | 153 | #define MMU_RAM_ENDIAN_MASK (1 << MMU_RAM_ENDIAN_SHIFT) |
154 | #define MMU_RAM_ENDIAN_LITTLE (0 << MMU_RAM_ENDIAN_SHIFT) | ||
158 | #define MMU_RAM_ENDIAN_BIG (1 << MMU_RAM_ENDIAN_SHIFT) | 155 | #define MMU_RAM_ENDIAN_BIG (1 << MMU_RAM_ENDIAN_SHIFT) |
159 | 156 | ||
157 | #define MMU_RAM_ELSZ_SHIFT 7 | ||
160 | #define MMU_RAM_ELSZ_MASK (3 << MMU_RAM_ELSZ_SHIFT) | 158 | #define MMU_RAM_ELSZ_MASK (3 << MMU_RAM_ELSZ_SHIFT) |
161 | #define MMU_RAM_ELSZ_8 (0 << MMU_RAM_ELSZ_SHIFT) | 159 | #define MMU_RAM_ELSZ_8 (0 << MMU_RAM_ELSZ_SHIFT) |
162 | #define MMU_RAM_ELSZ_16 (1 << MMU_RAM_ELSZ_SHIFT) | 160 | #define MMU_RAM_ELSZ_16 (1 << MMU_RAM_ELSZ_SHIFT) |
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c deleted file mode 100644 index d14725984153..000000000000 --- a/drivers/iommu/omap-iovmm.c +++ /dev/null | |||
@@ -1,791 +0,0 @@ | |||
1 | /* | ||
2 | * omap iommu: simple virtual address space management | ||
3 | * | ||
4 | * Copyright (C) 2008-2009 Nokia Corporation | ||
5 | * | ||
6 | * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/err.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/vmalloc.h> | ||
17 | #include <linux/device.h> | ||
18 | #include <linux/scatterlist.h> | ||
19 | #include <linux/iommu.h> | ||
20 | #include <linux/omap-iommu.h> | ||
21 | #include <linux/platform_data/iommu-omap.h> | ||
22 | |||
23 | #include <asm/cacheflush.h> | ||
24 | #include <asm/mach/map.h> | ||
25 | |||
26 | #include "omap-iopgtable.h" | ||
27 | #include "omap-iommu.h" | ||
28 | |||
29 | /* | ||
30 | * IOVMF_FLAGS: attribute for iommu virtual memory area(iovma) | ||
31 | * | ||
32 | * lower 16 bit is used for h/w and upper 16 bit is for s/w. | ||
33 | */ | ||
34 | #define IOVMF_SW_SHIFT 16 | ||
35 | |||
36 | /* | ||
37 | * iovma: h/w flags derived from cam and ram attribute | ||
38 | */ | ||
39 | #define IOVMF_CAM_MASK (~((1 << 10) - 1)) | ||
40 | #define IOVMF_RAM_MASK (~IOVMF_CAM_MASK) | ||
41 | |||
42 | #define IOVMF_PGSZ_MASK (3 << 0) | ||
43 | #define IOVMF_PGSZ_1M MMU_CAM_PGSZ_1M | ||
44 | #define IOVMF_PGSZ_64K MMU_CAM_PGSZ_64K | ||
45 | #define IOVMF_PGSZ_4K MMU_CAM_PGSZ_4K | ||
46 | #define IOVMF_PGSZ_16M MMU_CAM_PGSZ_16M | ||
47 | |||
48 | #define IOVMF_ENDIAN_MASK (1 << 9) | ||
49 | #define IOVMF_ENDIAN_BIG MMU_RAM_ENDIAN_BIG | ||
50 | |||
51 | #define IOVMF_ELSZ_MASK (3 << 7) | ||
52 | #define IOVMF_ELSZ_16 MMU_RAM_ELSZ_16 | ||
53 | #define IOVMF_ELSZ_32 MMU_RAM_ELSZ_32 | ||
54 | #define IOVMF_ELSZ_NONE MMU_RAM_ELSZ_NONE | ||
55 | |||
56 | #define IOVMF_MIXED_MASK (1 << 6) | ||
57 | #define IOVMF_MIXED MMU_RAM_MIXED | ||
58 | |||
59 | /* | ||
60 | * iovma: s/w flags, used for mapping and umapping internally. | ||
61 | */ | ||
62 | #define IOVMF_MMIO (1 << IOVMF_SW_SHIFT) | ||
63 | #define IOVMF_ALLOC (2 << IOVMF_SW_SHIFT) | ||
64 | #define IOVMF_ALLOC_MASK (3 << IOVMF_SW_SHIFT) | ||
65 | |||
66 | /* "superpages" is supported just with physically linear pages */ | ||
67 | #define IOVMF_DISCONT (1 << (2 + IOVMF_SW_SHIFT)) | ||
68 | #define IOVMF_LINEAR (2 << (2 + IOVMF_SW_SHIFT)) | ||
69 | #define IOVMF_LINEAR_MASK (3 << (2 + IOVMF_SW_SHIFT)) | ||
70 | |||
71 | #define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT)) | ||
72 | |||
73 | static struct kmem_cache *iovm_area_cachep; | ||
74 | |||
75 | /* return the offset of the first scatterlist entry in a sg table */ | ||
76 | static unsigned int sgtable_offset(const struct sg_table *sgt) | ||
77 | { | ||
78 | if (!sgt || !sgt->nents) | ||
79 | return 0; | ||
80 | |||
81 | return sgt->sgl->offset; | ||
82 | } | ||
83 | |||
84 | /* return total bytes of sg buffers */ | ||
85 | static size_t sgtable_len(const struct sg_table *sgt) | ||
86 | { | ||
87 | unsigned int i, total = 0; | ||
88 | struct scatterlist *sg; | ||
89 | |||
90 | if (!sgt) | ||
91 | return 0; | ||
92 | |||
93 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
94 | size_t bytes; | ||
95 | |||
96 | bytes = sg->length + sg->offset; | ||
97 | |||
98 | if (!iopgsz_ok(bytes)) { | ||
99 | pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n", | ||
100 | __func__, i, bytes, sg->offset); | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | if (i && sg->offset) { | ||
105 | pr_err("%s: sg[%d] offset not allowed in internal entries\n", | ||
106 | __func__, i); | ||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | total += bytes; | ||
111 | } | ||
112 | |||
113 | return total; | ||
114 | } | ||
115 | #define sgtable_ok(x) (!!sgtable_len(x)) | ||
116 | |||
117 | static unsigned max_alignment(u32 addr) | ||
118 | { | ||
119 | int i; | ||
120 | unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, }; | ||
121 | for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++) | ||
122 | ; | ||
123 | return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0; | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * calculate the optimal number sg elements from total bytes based on | ||
128 | * iommu superpages | ||
129 | */ | ||
130 | static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa) | ||
131 | { | ||
132 | unsigned nr_entries = 0, ent_sz; | ||
133 | |||
134 | if (!IS_ALIGNED(bytes, PAGE_SIZE)) { | ||
135 | pr_err("%s: wrong size %08x\n", __func__, bytes); | ||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | while (bytes) { | ||
140 | ent_sz = max_alignment(da | pa); | ||
141 | ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes)); | ||
142 | nr_entries++; | ||
143 | da += ent_sz; | ||
144 | pa += ent_sz; | ||
145 | bytes -= ent_sz; | ||
146 | } | ||
147 | |||
148 | return nr_entries; | ||
149 | } | ||
150 | |||
151 | /* allocate and initialize sg_table header(a kind of 'superblock') */ | ||
152 | static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags, | ||
153 | u32 da, u32 pa) | ||
154 | { | ||
155 | unsigned int nr_entries; | ||
156 | int err; | ||
157 | struct sg_table *sgt; | ||
158 | |||
159 | if (!bytes) | ||
160 | return ERR_PTR(-EINVAL); | ||
161 | |||
162 | if (!IS_ALIGNED(bytes, PAGE_SIZE)) | ||
163 | return ERR_PTR(-EINVAL); | ||
164 | |||
165 | if (flags & IOVMF_LINEAR) { | ||
166 | nr_entries = sgtable_nents(bytes, da, pa); | ||
167 | if (!nr_entries) | ||
168 | return ERR_PTR(-EINVAL); | ||
169 | } else | ||
170 | nr_entries = bytes / PAGE_SIZE; | ||
171 | |||
172 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | ||
173 | if (!sgt) | ||
174 | return ERR_PTR(-ENOMEM); | ||
175 | |||
176 | err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL); | ||
177 | if (err) { | ||
178 | kfree(sgt); | ||
179 | return ERR_PTR(err); | ||
180 | } | ||
181 | |||
182 | pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries); | ||
183 | |||
184 | return sgt; | ||
185 | } | ||
186 | |||
187 | /* free sg_table header(a kind of superblock) */ | ||
188 | static void sgtable_free(struct sg_table *sgt) | ||
189 | { | ||
190 | if (!sgt) | ||
191 | return; | ||
192 | |||
193 | sg_free_table(sgt); | ||
194 | kfree(sgt); | ||
195 | |||
196 | pr_debug("%s: sgt:%p\n", __func__, sgt); | ||
197 | } | ||
198 | |||
199 | /* map 'sglist' to a contiguous mpu virtual area and return 'va' */ | ||
200 | static void *vmap_sg(const struct sg_table *sgt) | ||
201 | { | ||
202 | u32 va; | ||
203 | size_t total; | ||
204 | unsigned int i; | ||
205 | struct scatterlist *sg; | ||
206 | struct vm_struct *new; | ||
207 | const struct mem_type *mtype; | ||
208 | |||
209 | mtype = get_mem_type(MT_DEVICE); | ||
210 | if (!mtype) | ||
211 | return ERR_PTR(-EINVAL); | ||
212 | |||
213 | total = sgtable_len(sgt); | ||
214 | if (!total) | ||
215 | return ERR_PTR(-EINVAL); | ||
216 | |||
217 | new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END); | ||
218 | if (!new) | ||
219 | return ERR_PTR(-ENOMEM); | ||
220 | va = (u32)new->addr; | ||
221 | |||
222 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
223 | size_t bytes; | ||
224 | u32 pa; | ||
225 | int err; | ||
226 | |||
227 | pa = sg_phys(sg) - sg->offset; | ||
228 | bytes = sg->length + sg->offset; | ||
229 | |||
230 | BUG_ON(bytes != PAGE_SIZE); | ||
231 | |||
232 | err = ioremap_page(va, pa, mtype); | ||
233 | if (err) | ||
234 | goto err_out; | ||
235 | |||
236 | va += bytes; | ||
237 | } | ||
238 | |||
239 | flush_cache_vmap((unsigned long)new->addr, | ||
240 | (unsigned long)(new->addr + total)); | ||
241 | return new->addr; | ||
242 | |||
243 | err_out: | ||
244 | WARN_ON(1); /* FIXME: cleanup some mpu mappings */ | ||
245 | vunmap(new->addr); | ||
246 | return ERR_PTR(-EAGAIN); | ||
247 | } | ||
248 | |||
249 | static inline void vunmap_sg(const void *va) | ||
250 | { | ||
251 | vunmap(va); | ||
252 | } | ||
253 | |||
254 | static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj, | ||
255 | const u32 da) | ||
256 | { | ||
257 | struct iovm_struct *tmp; | ||
258 | |||
259 | list_for_each_entry(tmp, &obj->mmap, list) { | ||
260 | if ((da >= tmp->da_start) && (da < tmp->da_end)) { | ||
261 | size_t len; | ||
262 | |||
263 | len = tmp->da_end - tmp->da_start; | ||
264 | |||
265 | dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", | ||
266 | __func__, tmp->da_start, da, tmp->da_end, len, | ||
267 | tmp->flags); | ||
268 | |||
269 | return tmp; | ||
270 | } | ||
271 | } | ||
272 | |||
273 | return NULL; | ||
274 | } | ||
275 | |||
276 | /** | ||
277 | * omap_find_iovm_area - find iovma which includes @da | ||
278 | * @dev: client device | ||
279 | * @da: iommu device virtual address | ||
280 | * | ||
281 | * Find the existing iovma starting at @da | ||
282 | */ | ||
283 | struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da) | ||
284 | { | ||
285 | struct omap_iommu *obj = dev_to_omap_iommu(dev); | ||
286 | struct iovm_struct *area; | ||
287 | |||
288 | mutex_lock(&obj->mmap_lock); | ||
289 | area = __find_iovm_area(obj, da); | ||
290 | mutex_unlock(&obj->mmap_lock); | ||
291 | |||
292 | return area; | ||
293 | } | ||
294 | EXPORT_SYMBOL_GPL(omap_find_iovm_area); | ||
295 | |||
296 | /* | ||
297 | * This finds the hole(area) which fits the requested address and len | ||
298 | * in iovmas mmap, and returns the new allocated iovma. | ||
299 | */ | ||
300 | static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da, | ||
301 | size_t bytes, u32 flags) | ||
302 | { | ||
303 | struct iovm_struct *new, *tmp; | ||
304 | u32 start, prev_end, alignment; | ||
305 | |||
306 | if (!obj || !bytes) | ||
307 | return ERR_PTR(-EINVAL); | ||
308 | |||
309 | start = da; | ||
310 | alignment = PAGE_SIZE; | ||
311 | |||
312 | if (~flags & IOVMF_DA_FIXED) { | ||
313 | /* Don't map address 0 */ | ||
314 | start = obj->da_start ? obj->da_start : alignment; | ||
315 | |||
316 | if (flags & IOVMF_LINEAR) | ||
317 | alignment = iopgsz_max(bytes); | ||
318 | start = roundup(start, alignment); | ||
319 | } else if (start < obj->da_start || start > obj->da_end || | ||
320 | obj->da_end - start < bytes) { | ||
321 | return ERR_PTR(-EINVAL); | ||
322 | } | ||
323 | |||
324 | tmp = NULL; | ||
325 | if (list_empty(&obj->mmap)) | ||
326 | goto found; | ||
327 | |||
328 | prev_end = 0; | ||
329 | list_for_each_entry(tmp, &obj->mmap, list) { | ||
330 | |||
331 | if (prev_end > start) | ||
332 | break; | ||
333 | |||
334 | if (tmp->da_start > start && (tmp->da_start - start) >= bytes) | ||
335 | goto found; | ||
336 | |||
337 | if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED) | ||
338 | start = roundup(tmp->da_end + 1, alignment); | ||
339 | |||
340 | prev_end = tmp->da_end; | ||
341 | } | ||
342 | |||
343 | if ((start >= prev_end) && (obj->da_end - start >= bytes)) | ||
344 | goto found; | ||
345 | |||
346 | dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n", | ||
347 | __func__, da, bytes, flags); | ||
348 | |||
349 | return ERR_PTR(-EINVAL); | ||
350 | |||
351 | found: | ||
352 | new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL); | ||
353 | if (!new) | ||
354 | return ERR_PTR(-ENOMEM); | ||
355 | |||
356 | new->iommu = obj; | ||
357 | new->da_start = start; | ||
358 | new->da_end = start + bytes; | ||
359 | new->flags = flags; | ||
360 | |||
361 | /* | ||
362 | * keep ascending order of iovmas | ||
363 | */ | ||
364 | if (tmp) | ||
365 | list_add_tail(&new->list, &tmp->list); | ||
366 | else | ||
367 | list_add(&new->list, &obj->mmap); | ||
368 | |||
369 | dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n", | ||
370 | __func__, new->da_start, start, new->da_end, bytes, flags); | ||
371 | |||
372 | return new; | ||
373 | } | ||
374 | |||
375 | static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area) | ||
376 | { | ||
377 | size_t bytes; | ||
378 | |||
379 | BUG_ON(!obj || !area); | ||
380 | |||
381 | bytes = area->da_end - area->da_start; | ||
382 | |||
383 | dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n", | ||
384 | __func__, area->da_start, area->da_end, bytes, area->flags); | ||
385 | |||
386 | list_del(&area->list); | ||
387 | kmem_cache_free(iovm_area_cachep, area); | ||
388 | } | ||
389 | |||
390 | /** | ||
391 | * omap_da_to_va - convert (d) to (v) | ||
392 | * @dev: client device | ||
393 | * @da: iommu device virtual address | ||
394 | * @va: mpu virtual address | ||
395 | * | ||
396 | * Returns mpu virtual addr which corresponds to a given device virtual addr | ||
397 | */ | ||
398 | void *omap_da_to_va(struct device *dev, u32 da) | ||
399 | { | ||
400 | struct omap_iommu *obj = dev_to_omap_iommu(dev); | ||
401 | void *va = NULL; | ||
402 | struct iovm_struct *area; | ||
403 | |||
404 | mutex_lock(&obj->mmap_lock); | ||
405 | |||
406 | area = __find_iovm_area(obj, da); | ||
407 | if (!area) { | ||
408 | dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); | ||
409 | goto out; | ||
410 | } | ||
411 | va = area->va; | ||
412 | out: | ||
413 | mutex_unlock(&obj->mmap_lock); | ||
414 | |||
415 | return va; | ||
416 | } | ||
417 | EXPORT_SYMBOL_GPL(omap_da_to_va); | ||
418 | |||
419 | static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va) | ||
420 | { | ||
421 | unsigned int i; | ||
422 | struct scatterlist *sg; | ||
423 | void *va = _va; | ||
424 | void *va_end; | ||
425 | |||
426 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
427 | struct page *pg; | ||
428 | const size_t bytes = PAGE_SIZE; | ||
429 | |||
430 | /* | ||
431 | * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()' | ||
432 | */ | ||
433 | pg = vmalloc_to_page(va); | ||
434 | BUG_ON(!pg); | ||
435 | sg_set_page(sg, pg, bytes, 0); | ||
436 | |||
437 | va += bytes; | ||
438 | } | ||
439 | |||
440 | va_end = _va + PAGE_SIZE * i; | ||
441 | } | ||
442 | |||
443 | static inline void sgtable_drain_vmalloc(struct sg_table *sgt) | ||
444 | { | ||
445 | /* | ||
446 | * Actually this is not necessary at all, just exists for | ||
447 | * consistency of the code readability. | ||
448 | */ | ||
449 | BUG_ON(!sgt); | ||
450 | } | ||
451 | |||
452 | /* create 'da' <-> 'pa' mapping from 'sgt' */ | ||
453 | static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new, | ||
454 | const struct sg_table *sgt, u32 flags) | ||
455 | { | ||
456 | int err; | ||
457 | unsigned int i, j; | ||
458 | struct scatterlist *sg; | ||
459 | u32 da = new->da_start; | ||
460 | |||
461 | if (!domain || !sgt) | ||
462 | return -EINVAL; | ||
463 | |||
464 | BUG_ON(!sgtable_ok(sgt)); | ||
465 | |||
466 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
467 | u32 pa; | ||
468 | size_t bytes; | ||
469 | |||
470 | pa = sg_phys(sg) - sg->offset; | ||
471 | bytes = sg->length + sg->offset; | ||
472 | |||
473 | flags &= ~IOVMF_PGSZ_MASK; | ||
474 | |||
475 | if (bytes_to_iopgsz(bytes) < 0) | ||
476 | goto err_out; | ||
477 | |||
478 | pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, | ||
479 | i, da, pa, bytes); | ||
480 | |||
481 | err = iommu_map(domain, da, pa, bytes, flags); | ||
482 | if (err) | ||
483 | goto err_out; | ||
484 | |||
485 | da += bytes; | ||
486 | } | ||
487 | return 0; | ||
488 | |||
489 | err_out: | ||
490 | da = new->da_start; | ||
491 | |||
492 | for_each_sg(sgt->sgl, sg, i, j) { | ||
493 | size_t bytes; | ||
494 | |||
495 | bytes = sg->length + sg->offset; | ||
496 | |||
497 | /* ignore failures.. we're already handling one */ | ||
498 | iommu_unmap(domain, da, bytes); | ||
499 | |||
500 | da += bytes; | ||
501 | } | ||
502 | return err; | ||
503 | } | ||
504 | |||
505 | /* release 'da' <-> 'pa' mapping */ | ||
506 | static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj, | ||
507 | struct iovm_struct *area) | ||
508 | { | ||
509 | u32 start; | ||
510 | size_t total = area->da_end - area->da_start; | ||
511 | const struct sg_table *sgt = area->sgt; | ||
512 | struct scatterlist *sg; | ||
513 | int i; | ||
514 | size_t unmapped; | ||
515 | |||
516 | BUG_ON(!sgtable_ok(sgt)); | ||
517 | BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); | ||
518 | |||
519 | start = area->da_start; | ||
520 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
521 | size_t bytes; | ||
522 | |||
523 | bytes = sg->length + sg->offset; | ||
524 | |||
525 | unmapped = iommu_unmap(domain, start, bytes); | ||
526 | if (unmapped < bytes) | ||
527 | break; | ||
528 | |||
529 | dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n", | ||
530 | __func__, start, bytes, area->flags); | ||
531 | |||
532 | BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); | ||
533 | |||
534 | total -= bytes; | ||
535 | start += bytes; | ||
536 | } | ||
537 | BUG_ON(total); | ||
538 | } | ||
539 | |||
540 | /* template function for all unmapping */ | ||
541 | static struct sg_table *unmap_vm_area(struct iommu_domain *domain, | ||
542 | struct omap_iommu *obj, const u32 da, | ||
543 | void (*fn)(const void *), u32 flags) | ||
544 | { | ||
545 | struct sg_table *sgt = NULL; | ||
546 | struct iovm_struct *area; | ||
547 | |||
548 | if (!IS_ALIGNED(da, PAGE_SIZE)) { | ||
549 | dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da); | ||
550 | return NULL; | ||
551 | } | ||
552 | |||
553 | mutex_lock(&obj->mmap_lock); | ||
554 | |||
555 | area = __find_iovm_area(obj, da); | ||
556 | if (!area) { | ||
557 | dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); | ||
558 | goto out; | ||
559 | } | ||
560 | |||
561 | if ((area->flags & flags) != flags) { | ||
562 | dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__, | ||
563 | area->flags); | ||
564 | goto out; | ||
565 | } | ||
566 | sgt = (struct sg_table *)area->sgt; | ||
567 | |||
568 | unmap_iovm_area(domain, obj, area); | ||
569 | |||
570 | fn(area->va); | ||
571 | |||
572 | dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__, | ||
573 | area->da_start, da, area->da_end, | ||
574 | area->da_end - area->da_start, area->flags); | ||
575 | |||
576 | free_iovm_area(obj, area); | ||
577 | out: | ||
578 | mutex_unlock(&obj->mmap_lock); | ||
579 | |||
580 | return sgt; | ||
581 | } | ||
582 | |||
583 | static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj, | ||
584 | u32 da, const struct sg_table *sgt, void *va, | ||
585 | size_t bytes, u32 flags) | ||
586 | { | ||
587 | int err = -ENOMEM; | ||
588 | struct iovm_struct *new; | ||
589 | |||
590 | mutex_lock(&obj->mmap_lock); | ||
591 | |||
592 | new = alloc_iovm_area(obj, da, bytes, flags); | ||
593 | if (IS_ERR(new)) { | ||
594 | err = PTR_ERR(new); | ||
595 | goto err_alloc_iovma; | ||
596 | } | ||
597 | new->va = va; | ||
598 | new->sgt = sgt; | ||
599 | |||
600 | if (map_iovm_area(domain, new, sgt, new->flags)) | ||
601 | goto err_map; | ||
602 | |||
603 | mutex_unlock(&obj->mmap_lock); | ||
604 | |||
605 | dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n", | ||
606 | __func__, new->da_start, bytes, new->flags, va); | ||
607 | |||
608 | return new->da_start; | ||
609 | |||
610 | err_map: | ||
611 | free_iovm_area(obj, new); | ||
612 | err_alloc_iovma: | ||
613 | mutex_unlock(&obj->mmap_lock); | ||
614 | return err; | ||
615 | } | ||
616 | |||
617 | static inline u32 | ||
618 | __iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, | ||
619 | u32 da, const struct sg_table *sgt, | ||
620 | void *va, size_t bytes, u32 flags) | ||
621 | { | ||
622 | return map_iommu_region(domain, obj, da, sgt, va, bytes, flags); | ||
623 | } | ||
624 | |||
625 | /** | ||
626 | * omap_iommu_vmap - (d)-(p)-(v) address mapper | ||
627 | * @domain: iommu domain | ||
628 | * @dev: client device | ||
629 | * @sgt: address of scatter gather table | ||
630 | * @flags: iovma and page property | ||
631 | * | ||
632 | * Creates 1-n-1 mapping with given @sgt and returns @da. | ||
633 | * All @sgt element must be io page size aligned. | ||
634 | */ | ||
635 | u32 omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da, | ||
636 | const struct sg_table *sgt, u32 flags) | ||
637 | { | ||
638 | struct omap_iommu *obj = dev_to_omap_iommu(dev); | ||
639 | size_t bytes; | ||
640 | void *va = NULL; | ||
641 | |||
642 | if (!obj || !obj->dev || !sgt) | ||
643 | return -EINVAL; | ||
644 | |||
645 | bytes = sgtable_len(sgt); | ||
646 | if (!bytes) | ||
647 | return -EINVAL; | ||
648 | bytes = PAGE_ALIGN(bytes); | ||
649 | |||
650 | if (flags & IOVMF_MMIO) { | ||
651 | va = vmap_sg(sgt); | ||
652 | if (IS_ERR(va)) | ||
653 | return PTR_ERR(va); | ||
654 | } | ||
655 | |||
656 | flags |= IOVMF_DISCONT; | ||
657 | flags |= IOVMF_MMIO; | ||
658 | |||
659 | da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags); | ||
660 | if (IS_ERR_VALUE(da)) | ||
661 | vunmap_sg(va); | ||
662 | |||
663 | return da + sgtable_offset(sgt); | ||
664 | } | ||
665 | EXPORT_SYMBOL_GPL(omap_iommu_vmap); | ||
666 | |||
667 | /** | ||
668 | * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()' | ||
669 | * @domain: iommu domain | ||
670 | * @dev: client device | ||
671 | * @da: iommu device virtual address | ||
672 | * | ||
673 | * Free the iommu virtually contiguous memory area starting at | ||
674 | * @da, which was returned by 'omap_iommu_vmap()'. | ||
675 | */ | ||
676 | struct sg_table * | ||
677 | omap_iommu_vunmap(struct iommu_domain *domain, struct device *dev, u32 da) | ||
678 | { | ||
679 | struct omap_iommu *obj = dev_to_omap_iommu(dev); | ||
680 | struct sg_table *sgt; | ||
681 | /* | ||
682 | * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called. | ||
683 | * Just returns 'sgt' to the caller to free | ||
684 | */ | ||
685 | da &= PAGE_MASK; | ||
686 | sgt = unmap_vm_area(domain, obj, da, vunmap_sg, | ||
687 | IOVMF_DISCONT | IOVMF_MMIO); | ||
688 | if (!sgt) | ||
689 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | ||
690 | return sgt; | ||
691 | } | ||
692 | EXPORT_SYMBOL_GPL(omap_iommu_vunmap); | ||
693 | |||
694 | /** | ||
695 | * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper | ||
696 | * @dev: client device | ||
697 | * @da: contiguous iommu virtual memory | ||
698 | * @bytes: allocation size | ||
699 | * @flags: iovma and page property | ||
700 | * | ||
701 | * Allocate @bytes linearly and creates 1-n-1 mapping and returns | ||
702 | * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. | ||
703 | */ | ||
704 | u32 | ||
705 | omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev, u32 da, | ||
706 | size_t bytes, u32 flags) | ||
707 | { | ||
708 | struct omap_iommu *obj = dev_to_omap_iommu(dev); | ||
709 | void *va; | ||
710 | struct sg_table *sgt; | ||
711 | |||
712 | if (!obj || !obj->dev || !bytes) | ||
713 | return -EINVAL; | ||
714 | |||
715 | bytes = PAGE_ALIGN(bytes); | ||
716 | |||
717 | va = vmalloc(bytes); | ||
718 | if (!va) | ||
719 | return -ENOMEM; | ||
720 | |||
721 | flags |= IOVMF_DISCONT; | ||
722 | flags |= IOVMF_ALLOC; | ||
723 | |||
724 | sgt = sgtable_alloc(bytes, flags, da, 0); | ||
725 | if (IS_ERR(sgt)) { | ||
726 | da = PTR_ERR(sgt); | ||
727 | goto err_sgt_alloc; | ||
728 | } | ||
729 | sgtable_fill_vmalloc(sgt, va); | ||
730 | |||
731 | da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags); | ||
732 | if (IS_ERR_VALUE(da)) | ||
733 | goto err_iommu_vmap; | ||
734 | |||
735 | return da; | ||
736 | |||
737 | err_iommu_vmap: | ||
738 | sgtable_drain_vmalloc(sgt); | ||
739 | sgtable_free(sgt); | ||
740 | err_sgt_alloc: | ||
741 | vfree(va); | ||
742 | return da; | ||
743 | } | ||
744 | EXPORT_SYMBOL_GPL(omap_iommu_vmalloc); | ||
745 | |||
746 | /** | ||
747 | * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()' | ||
748 | * @dev: client device | ||
749 | * @da: iommu device virtual address | ||
750 | * | ||
751 | * Frees the iommu virtually continuous memory area starting at | ||
752 | * @da, as obtained from 'omap_iommu_vmalloc()'. | ||
753 | */ | ||
754 | void omap_iommu_vfree(struct iommu_domain *domain, struct device *dev, | ||
755 | const u32 da) | ||
756 | { | ||
757 | struct omap_iommu *obj = dev_to_omap_iommu(dev); | ||
758 | struct sg_table *sgt; | ||
759 | |||
760 | sgt = unmap_vm_area(domain, obj, da, vfree, | ||
761 | IOVMF_DISCONT | IOVMF_ALLOC); | ||
762 | if (!sgt) | ||
763 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | ||
764 | sgtable_free(sgt); | ||
765 | } | ||
766 | EXPORT_SYMBOL_GPL(omap_iommu_vfree); | ||
767 | |||
768 | static int __init iovmm_init(void) | ||
769 | { | ||
770 | const unsigned long flags = SLAB_HWCACHE_ALIGN; | ||
771 | struct kmem_cache *p; | ||
772 | |||
773 | p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0, | ||
774 | flags, NULL); | ||
775 | if (!p) | ||
776 | return -ENOMEM; | ||
777 | iovm_area_cachep = p; | ||
778 | |||
779 | return 0; | ||
780 | } | ||
781 | module_init(iovmm_init); | ||
782 | |||
783 | static void __exit iovmm_exit(void) | ||
784 | { | ||
785 | kmem_cache_destroy(iovm_area_cachep); | ||
786 | } | ||
787 | module_exit(iovmm_exit); | ||
788 | |||
789 | MODULE_DESCRIPTION("omap iommu: simple virtual address space management"); | ||
790 | MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); | ||
791 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/iommu/pci.h b/drivers/iommu/pci.h deleted file mode 100644 index 352d80ae7443..000000000000 --- a/drivers/iommu/pci.h +++ /dev/null | |||
@@ -1,29 +0,0 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright (C) 2013 Red Hat, Inc. | ||
16 | * Copyright (C) 2013 Freescale Semiconductor, Inc. | ||
17 | * | ||
18 | */ | ||
19 | #ifndef __IOMMU_PCI_H | ||
20 | #define __IOMMU_PCI_H | ||
21 | |||
22 | /* Helper function for swapping pci device reference */ | ||
23 | static inline void swap_pci_ref(struct pci_dev **from, struct pci_dev *to) | ||
24 | { | ||
25 | pci_dev_put(*from); | ||
26 | *from = to; | ||
27 | } | ||
28 | |||
29 | #endif /* __IOMMU_PCI_H */ | ||
diff --git a/drivers/iommu/shmobile-iommu.c b/drivers/iommu/shmobile-iommu.c index 464acda0bbc4..1333e6fb3405 100644 --- a/drivers/iommu/shmobile-iommu.c +++ b/drivers/iommu/shmobile-iommu.c | |||
@@ -354,7 +354,7 @@ static int shmobile_iommu_add_device(struct device *dev) | |||
354 | return 0; | 354 | return 0; |
355 | } | 355 | } |
356 | 356 | ||
357 | static struct iommu_ops shmobile_iommu_ops = { | 357 | static const struct iommu_ops shmobile_iommu_ops = { |
358 | .domain_init = shmobile_iommu_domain_init, | 358 | .domain_init = shmobile_iommu_domain_init, |
359 | .domain_destroy = shmobile_iommu_domain_destroy, | 359 | .domain_destroy = shmobile_iommu_domain_destroy, |
360 | .attach_dev = shmobile_iommu_attach_device, | 360 | .attach_dev = shmobile_iommu_attach_device, |
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index dba1a9fd5070..b10a8ecede8e 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c | |||
@@ -309,7 +309,7 @@ static int gart_iommu_domain_has_cap(struct iommu_domain *domain, | |||
309 | return 0; | 309 | return 0; |
310 | } | 310 | } |
311 | 311 | ||
312 | static struct iommu_ops gart_iommu_ops = { | 312 | static const struct iommu_ops gart_iommu_ops = { |
313 | .domain_init = gart_iommu_domain_init, | 313 | .domain_init = gart_iommu_domain_init, |
314 | .domain_destroy = gart_iommu_domain_destroy, | 314 | .domain_destroy = gart_iommu_domain_destroy, |
315 | .attach_dev = gart_iommu_attach_dev, | 315 | .attach_dev = gart_iommu_attach_dev, |
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 605b5b46a903..792da5ea6d12 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c | |||
@@ -947,7 +947,7 @@ static void smmu_iommu_domain_destroy(struct iommu_domain *domain) | |||
947 | dev_dbg(smmu->dev, "smmu_as@%p\n", as); | 947 | dev_dbg(smmu->dev, "smmu_as@%p\n", as); |
948 | } | 948 | } |
949 | 949 | ||
950 | static struct iommu_ops smmu_iommu_ops = { | 950 | static const struct iommu_ops smmu_iommu_ops = { |
951 | .domain_init = smmu_iommu_domain_init, | 951 | .domain_init = smmu_iommu_domain_init, |
952 | .domain_destroy = smmu_iommu_domain_destroy, | 952 | .domain_destroy = smmu_iommu_domain_destroy, |
953 | .attach_dev = smmu_iommu_attach_dev, | 953 | .attach_dev = smmu_iommu_attach_dev, |
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h index 15f6b9edd0b1..2b08e79f5100 100644 --- a/include/linux/amd-iommu.h +++ b/include/linux/amd-iommu.h | |||
@@ -119,6 +119,13 @@ typedef int (*amd_iommu_invalid_ppr_cb)(struct pci_dev *pdev, | |||
119 | extern int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev, | 119 | extern int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev, |
120 | amd_iommu_invalid_ppr_cb cb); | 120 | amd_iommu_invalid_ppr_cb cb); |
121 | 121 | ||
122 | #define PPR_FAULT_EXEC (1 << 1) | ||
123 | #define PPR_FAULT_READ (1 << 2) | ||
124 | #define PPR_FAULT_WRITE (1 << 5) | ||
125 | #define PPR_FAULT_USER (1 << 6) | ||
126 | #define PPR_FAULT_RSVD (1 << 7) | ||
127 | #define PPR_FAULT_GN (1 << 8) | ||
128 | |||
122 | /** | 129 | /** |
123 | * amd_iommu_device_info() - Get information about IOMMUv2 support of a | 130 | * amd_iommu_device_info() - Get information about IOMMUv2 support of a |
124 | * PCI device | 131 | * PCI device |
diff --git a/include/linux/device.h b/include/linux/device.h index b0aab0d6be7e..43d183aeb25b 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -124,7 +124,7 @@ struct bus_type { | |||
124 | 124 | ||
125 | const struct dev_pm_ops *pm; | 125 | const struct dev_pm_ops *pm; |
126 | 126 | ||
127 | struct iommu_ops *iommu_ops; | 127 | const struct iommu_ops *iommu_ops; |
128 | 128 | ||
129 | struct subsys_private *p; | 129 | struct subsys_private *p; |
130 | struct lock_class_key lock_key; | 130 | struct lock_class_key lock_key; |
diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 23c8db129560..1deece46a0ca 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h | |||
@@ -114,22 +114,30 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, | |||
114 | /* Intel IOMMU detection */ | 114 | /* Intel IOMMU detection */ |
115 | extern int detect_intel_iommu(void); | 115 | extern int detect_intel_iommu(void); |
116 | extern int enable_drhd_fault_handling(void); | 116 | extern int enable_drhd_fault_handling(void); |
117 | #else | 117 | |
118 | struct dmar_pci_notify_info; | 118 | #ifdef CONFIG_INTEL_IOMMU |
119 | static inline int detect_intel_iommu(void) | 119 | extern int iommu_detected, no_iommu; |
120 | extern int intel_iommu_init(void); | ||
121 | extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header); | ||
122 | extern int dmar_parse_one_atsr(struct acpi_dmar_header *header); | ||
123 | extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); | ||
124 | #else /* !CONFIG_INTEL_IOMMU: */ | ||
125 | static inline int intel_iommu_init(void) { return -ENODEV; } | ||
126 | static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header) | ||
120 | { | 127 | { |
121 | return -ENODEV; | 128 | return 0; |
122 | } | 129 | } |
123 | 130 | static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header) | |
124 | static inline int dmar_table_init(void) | ||
125 | { | 131 | { |
126 | return -ENODEV; | 132 | return 0; |
127 | } | 133 | } |
128 | static inline int enable_drhd_fault_handling(void) | 134 | static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) |
129 | { | 135 | { |
130 | return -1; | 136 | return 0; |
131 | } | 137 | } |
132 | #endif /* !CONFIG_DMAR_TABLE */ | 138 | #endif /* CONFIG_INTEL_IOMMU */ |
139 | |||
140 | #endif /* CONFIG_DMAR_TABLE */ | ||
133 | 141 | ||
134 | struct irte { | 142 | struct irte { |
135 | union { | 143 | union { |
@@ -177,26 +185,4 @@ extern int dmar_set_interrupt(struct intel_iommu *iommu); | |||
177 | extern irqreturn_t dmar_fault(int irq, void *dev_id); | 185 | extern irqreturn_t dmar_fault(int irq, void *dev_id); |
178 | extern int arch_setup_dmar_msi(unsigned int irq); | 186 | extern int arch_setup_dmar_msi(unsigned int irq); |
179 | 187 | ||
180 | #ifdef CONFIG_INTEL_IOMMU | ||
181 | extern int iommu_detected, no_iommu; | ||
182 | extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header); | ||
183 | extern int dmar_parse_one_atsr(struct acpi_dmar_header *header); | ||
184 | extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); | ||
185 | extern int intel_iommu_init(void); | ||
186 | #else /* !CONFIG_INTEL_IOMMU: */ | ||
187 | static inline int intel_iommu_init(void) { return -ENODEV; } | ||
188 | static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header) | ||
189 | { | ||
190 | return 0; | ||
191 | } | ||
192 | static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header) | ||
193 | { | ||
194 | return 0; | ||
195 | } | ||
196 | static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) | ||
197 | { | ||
198 | return 0; | ||
199 | } | ||
200 | #endif /* CONFIG_INTEL_IOMMU */ | ||
201 | |||
202 | #endif /* __DMAR_H__ */ | 188 | #endif /* __DMAR_H__ */ |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 0a2da5188217..a65208a8fe18 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
@@ -336,6 +336,7 @@ struct intel_iommu { | |||
336 | #ifdef CONFIG_IRQ_REMAP | 336 | #ifdef CONFIG_IRQ_REMAP |
337 | struct ir_table *ir_table; /* Interrupt remapping info */ | 337 | struct ir_table *ir_table; /* Interrupt remapping info */ |
338 | #endif | 338 | #endif |
339 | struct device *iommu_dev; /* IOMMU-sysfs device */ | ||
339 | int node; | 340 | int node; |
340 | }; | 341 | }; |
341 | 342 | ||
@@ -365,4 +366,6 @@ extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); | |||
365 | 366 | ||
366 | extern int dmar_ir_support(void); | 367 | extern int dmar_ir_support(void); |
367 | 368 | ||
369 | extern const struct attribute_group *intel_iommu_groups[]; | ||
370 | |||
368 | #endif | 371 | #endif |
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index b96a5b2136e4..20f9a527922a 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
@@ -50,7 +50,7 @@ struct iommu_domain_geometry { | |||
50 | }; | 50 | }; |
51 | 51 | ||
52 | struct iommu_domain { | 52 | struct iommu_domain { |
53 | struct iommu_ops *ops; | 53 | const struct iommu_ops *ops; |
54 | void *priv; | 54 | void *priv; |
55 | iommu_fault_handler_t handler; | 55 | iommu_fault_handler_t handler; |
56 | void *handler_token; | 56 | void *handler_token; |
@@ -140,7 +140,7 @@ struct iommu_ops { | |||
140 | #define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */ | 140 | #define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */ |
141 | #define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */ | 141 | #define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */ |
142 | 142 | ||
143 | extern int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops); | 143 | extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops); |
144 | extern bool iommu_present(struct bus_type *bus); | 144 | extern bool iommu_present(struct bus_type *bus); |
145 | extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); | 145 | extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); |
146 | extern struct iommu_group *iommu_group_get_by_id(int id); | 146 | extern struct iommu_group *iommu_group_get_by_id(int id); |
@@ -181,11 +181,18 @@ extern int iommu_group_register_notifier(struct iommu_group *group, | |||
181 | extern int iommu_group_unregister_notifier(struct iommu_group *group, | 181 | extern int iommu_group_unregister_notifier(struct iommu_group *group, |
182 | struct notifier_block *nb); | 182 | struct notifier_block *nb); |
183 | extern int iommu_group_id(struct iommu_group *group); | 183 | extern int iommu_group_id(struct iommu_group *group); |
184 | extern struct iommu_group *iommu_group_get_for_dev(struct device *dev); | ||
184 | 185 | ||
185 | extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, | 186 | extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, |
186 | void *data); | 187 | void *data); |
187 | extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, | 188 | extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, |
188 | void *data); | 189 | void *data); |
190 | struct device *iommu_device_create(struct device *parent, void *drvdata, | ||
191 | const struct attribute_group **groups, | ||
192 | const char *fmt, ...); | ||
193 | void iommu_device_destroy(struct device *dev); | ||
194 | int iommu_device_link(struct device *dev, struct device *link); | ||
195 | void iommu_device_unlink(struct device *dev, struct device *link); | ||
189 | 196 | ||
190 | /* Window handling function prototypes */ | 197 | /* Window handling function prototypes */ |
191 | extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, | 198 | extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, |
@@ -396,6 +403,27 @@ static inline int iommu_domain_set_attr(struct iommu_domain *domain, | |||
396 | return -EINVAL; | 403 | return -EINVAL; |
397 | } | 404 | } |
398 | 405 | ||
406 | static inline struct device *iommu_device_create(struct device *parent, | ||
407 | void *drvdata, | ||
408 | const struct attribute_group **groups, | ||
409 | const char *fmt, ...) | ||
410 | { | ||
411 | return ERR_PTR(-ENODEV); | ||
412 | } | ||
413 | |||
414 | static inline void iommu_device_destroy(struct device *dev) | ||
415 | { | ||
416 | } | ||
417 | |||
418 | static inline int iommu_device_link(struct device *dev, struct device *link) | ||
419 | { | ||
420 | return -EINVAL; | ||
421 | } | ||
422 | |||
423 | static inline void iommu_device_unlink(struct device *dev, struct device *link) | ||
424 | { | ||
425 | } | ||
426 | |||
399 | #endif /* CONFIG_IOMMU_API */ | 427 | #endif /* CONFIG_IOMMU_API */ |
400 | 428 | ||
401 | #endif /* __LINUX_IOMMU_H */ | 429 | #endif /* __LINUX_IOMMU_H */ |
diff --git a/include/linux/iova.h b/include/linux/iova.h index 3277f4711349..19e81d5ccb6d 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h | |||
@@ -34,6 +34,11 @@ struct iova_domain { | |||
34 | unsigned long dma_32bit_pfn; | 34 | unsigned long dma_32bit_pfn; |
35 | }; | 35 | }; |
36 | 36 | ||
37 | static inline unsigned long iova_size(struct iova *iova) | ||
38 | { | ||
39 | return iova->pfn_hi - iova->pfn_lo + 1; | ||
40 | } | ||
41 | |||
37 | struct iova *alloc_iova_mem(void); | 42 | struct iova *alloc_iova_mem(void); |
38 | void free_iova_mem(struct iova *iova); | 43 | void free_iova_mem(struct iova *iova); |
39 | void free_iova(struct iova_domain *iovad, unsigned long pfn); | 44 | void free_iova(struct iova_domain *iovad, unsigned long pfn); |
diff --git a/include/linux/omap-iommu.h b/include/linux/omap-iommu.h index cac78de09c07..c1aede46718b 100644 --- a/include/linux/omap-iommu.h +++ b/include/linux/omap-iommu.h | |||
@@ -10,41 +10,8 @@ | |||
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #ifndef _INTEL_IOMMU_H_ | 13 | #ifndef _OMAP_IOMMU_H_ |
14 | #define _INTEL_IOMMU_H_ | 14 | #define _OMAP_IOMMU_H_ |
15 | |||
16 | struct iovm_struct { | ||
17 | struct omap_iommu *iommu; /* iommu object which this belongs to */ | ||
18 | u32 da_start; /* area definition */ | ||
19 | u32 da_end; | ||
20 | u32 flags; /* IOVMF_: see below */ | ||
21 | struct list_head list; /* linked in ascending order */ | ||
22 | const struct sg_table *sgt; /* keep 'page' <-> 'da' mapping */ | ||
23 | void *va; /* mpu side mapped address */ | ||
24 | }; | ||
25 | |||
26 | #define MMU_RAM_ENDIAN_SHIFT 9 | ||
27 | #define MMU_RAM_ENDIAN_LITTLE (0 << MMU_RAM_ENDIAN_SHIFT) | ||
28 | #define MMU_RAM_ELSZ_8 (0 << MMU_RAM_ELSZ_SHIFT) | ||
29 | #define IOVMF_ENDIAN_LITTLE MMU_RAM_ENDIAN_LITTLE | ||
30 | #define MMU_RAM_ELSZ_SHIFT 7 | ||
31 | #define IOVMF_ELSZ_8 MMU_RAM_ELSZ_8 | ||
32 | |||
33 | struct iommu_domain; | ||
34 | |||
35 | extern struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da); | ||
36 | extern u32 | ||
37 | omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da, | ||
38 | const struct sg_table *sgt, u32 flags); | ||
39 | extern struct sg_table *omap_iommu_vunmap(struct iommu_domain *domain, | ||
40 | struct device *dev, u32 da); | ||
41 | extern u32 | ||
42 | omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev, | ||
43 | u32 da, size_t bytes, u32 flags); | ||
44 | extern void | ||
45 | omap_iommu_vfree(struct iommu_domain *domain, struct device *dev, | ||
46 | const u32 da); | ||
47 | extern void *omap_da_to_va(struct device *dev, u32 da); | ||
48 | 15 | ||
49 | extern void omap_iommu_save_ctx(struct device *dev); | 16 | extern void omap_iommu_save_ctx(struct device *dev); |
50 | extern void omap_iommu_restore_ctx(struct device *dev); | 17 | extern void omap_iommu_restore_ctx(struct device *dev); |
diff --git a/include/linux/platform_data/iommu-omap.h b/include/linux/platform_data/iommu-omap.h index 5b429c43a297..54a0a9582fad 100644 --- a/include/linux/platform_data/iommu-omap.h +++ b/include/linux/platform_data/iommu-omap.h | |||
@@ -31,14 +31,10 @@ struct omap_iommu_arch_data { | |||
31 | 31 | ||
32 | /** | 32 | /** |
33 | * struct omap_mmu_dev_attr - OMAP mmu device attributes for omap_hwmod | 33 | * struct omap_mmu_dev_attr - OMAP mmu device attributes for omap_hwmod |
34 | * @da_start: device address where the va space starts. | ||
35 | * @da_end: device address where the va space ends. | ||
36 | * @nr_tlb_entries: number of entries supported by the translation | 34 | * @nr_tlb_entries: number of entries supported by the translation |
37 | * look-aside buffer (TLB). | 35 | * look-aside buffer (TLB). |
38 | */ | 36 | */ |
39 | struct omap_mmu_dev_attr { | 37 | struct omap_mmu_dev_attr { |
40 | u32 da_start; | ||
41 | u32 da_end; | ||
42 | int nr_tlb_entries; | 38 | int nr_tlb_entries; |
43 | }; | 39 | }; |
44 | 40 | ||
@@ -46,8 +42,6 @@ struct iommu_platform_data { | |||
46 | const char *name; | 42 | const char *name; |
47 | const char *reset_name; | 43 | const char *reset_name; |
48 | int nr_tlb_entries; | 44 | int nr_tlb_entries; |
49 | u32 da_start; | ||
50 | u32 da_end; | ||
51 | 45 | ||
52 | int (*assert_reset)(struct platform_device *pdev, const char *name); | 46 | int (*assert_reset)(struct platform_device *pdev, const char *name); |
53 | int (*deassert_reset)(struct platform_device *pdev, const char *name); | 47 | int (*deassert_reset)(struct platform_device *pdev, const char *name); |