aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-iommu_groups14
-rw-r--r--Documentation/devicetree/bindings/iommu/nvidia,tegra30-smmu.txt21
-rw-r--r--Documentation/kernel-parameters.txt1
-rw-r--r--arch/ia64/include/asm/iommu.h2
-rw-r--r--arch/ia64/kernel/pci-dma.c1
-rw-r--r--arch/x86/include/asm/iommu.h1
-rw-r--r--arch/x86/kernel/pci-dma.c11
-rw-r--r--drivers/iommu/Kconfig6
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd_iommu.c99
-rw-r--r--drivers/iommu/amd_iommu_init.c569
-rw-r--r--drivers/iommu/amd_iommu_types.h13
-rw-r--r--drivers/iommu/amd_iommu_v2.c4
-rw-r--r--drivers/iommu/exynos-iommu.c4
-rw-r--r--drivers/iommu/intel-iommu.c93
-rw-r--r--drivers/iommu/iommu.c611
-rw-r--r--drivers/iommu/irq_remapping.c5
-rw-r--r--drivers/iommu/msm_iommu.c5
-rw-r--r--drivers/iommu/of_iommu.c90
-rw-r--r--drivers/iommu/omap-iommu.c4
-rw-r--r--drivers/iommu/tegra-gart.c5
-rw-r--r--drivers/iommu/tegra-smmu.c285
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/iommu.h140
-rw-r--r--include/linux/of_iommu.h21
25 files changed, 1527 insertions, 481 deletions
diff --git a/Documentation/ABI/testing/sysfs-kernel-iommu_groups b/Documentation/ABI/testing/sysfs-kernel-iommu_groups
new file mode 100644
index 000000000000..9b31556cfdda
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-kernel-iommu_groups
@@ -0,0 +1,14 @@
1What: /sys/kernel/iommu_groups/
2Date: May 2012
3KernelVersion: v3.5
4Contact: Alex Williamson <alex.williamson@redhat.com>
5Description: /sys/kernel/iommu_groups/ contains a number of sub-
6 directories, each representing an IOMMU group. The
7 name of the sub-directory matches the iommu_group_id()
8 for the group, which is an integer value. Within each
9 subdirectory is another directory named "devices" with
10 links to the sysfs devices contained in this group.
11 The group directory also optionally contains a "name"
12 file if the IOMMU driver has chosen to register a more
13 common name for the group.
14Users:
diff --git a/Documentation/devicetree/bindings/iommu/nvidia,tegra30-smmu.txt b/Documentation/devicetree/bindings/iommu/nvidia,tegra30-smmu.txt
new file mode 100644
index 000000000000..89fb5434b730
--- /dev/null
+++ b/Documentation/devicetree/bindings/iommu/nvidia,tegra30-smmu.txt
@@ -0,0 +1,21 @@
1NVIDIA Tegra 30 IOMMU H/W, SMMU (System Memory Management Unit)
2
3Required properties:
4- compatible : "nvidia,tegra30-smmu"
5- reg : Should contain 3 register banks(address and length) for each
6 of the SMMU register blocks.
7- interrupts : Should contain MC General interrupt.
8- nvidia,#asids : # of ASIDs
9- dma-window : IOVA start address and length.
10- nvidia,ahb : phandle to the ahb bus connected to SMMU.
11
12Example:
13 smmu {
14 compatible = "nvidia,tegra30-smmu";
15 reg = <0x7000f010 0x02c
16 0x7000f1f0 0x010
17 0x7000f228 0x05c>;
18 nvidia,#asids = <4>; /* # of ASIDs */
19 dma-window = <0 0x40000000>; /* IOVA start & length */
20 nvidia,ahb = <&ahb>;
21 };
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 12783fa833c3..e714a025c99d 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1134,7 +1134,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1134 forcesac 1134 forcesac
1135 soft 1135 soft
1136 pt [x86, IA-64] 1136 pt [x86, IA-64]
1137 group_mf [x86, IA-64]
1138 1137
1139 1138
1140 io7= [HW] IO7 for Marvel based alpha systems 1139 io7= [HW] IO7 for Marvel based alpha systems
diff --git a/arch/ia64/include/asm/iommu.h b/arch/ia64/include/asm/iommu.h
index b6a809fa2995..105c93b00b1b 100644
--- a/arch/ia64/include/asm/iommu.h
+++ b/arch/ia64/include/asm/iommu.h
@@ -11,12 +11,10 @@ extern void no_iommu_init(void);
11extern int force_iommu, no_iommu; 11extern int force_iommu, no_iommu;
12extern int iommu_pass_through; 12extern int iommu_pass_through;
13extern int iommu_detected; 13extern int iommu_detected;
14extern int iommu_group_mf;
15#else 14#else
16#define iommu_pass_through (0) 15#define iommu_pass_through (0)
17#define no_iommu (1) 16#define no_iommu (1)
18#define iommu_detected (0) 17#define iommu_detected (0)
19#define iommu_group_mf (0)
20#endif 18#endif
21extern void iommu_dma_init(void); 19extern void iommu_dma_init(void);
22extern void machvec_init(const char *name); 20extern void machvec_init(const char *name);
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index 7cdc89b2483c..1ddcfe5ef353 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -32,7 +32,6 @@ int force_iommu __read_mostly;
32#endif 32#endif
33 33
34int iommu_pass_through; 34int iommu_pass_through;
35int iommu_group_mf;
36 35
37/* Dummy device used for NULL arguments (normally ISA). Better would 36/* Dummy device used for NULL arguments (normally ISA). Better would
38 be probably a smaller DMA mask, but this is bug-to-bug compatible 37 be probably a smaller DMA mask, but this is bug-to-bug compatible
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index dffc38ee6255..345c99cef152 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -5,7 +5,6 @@ extern struct dma_map_ops nommu_dma_ops;
5extern int force_iommu, no_iommu; 5extern int force_iommu, no_iommu;
6extern int iommu_detected; 6extern int iommu_detected;
7extern int iommu_pass_through; 7extern int iommu_pass_through;
8extern int iommu_group_mf;
9 8
10/* 10 seconds */ 9/* 10 seconds */
11#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) 10#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index c0f420f76cd3..de2b7ad70273 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -45,15 +45,6 @@ int iommu_detected __read_mostly = 0;
45 */ 45 */
46int iommu_pass_through __read_mostly; 46int iommu_pass_through __read_mostly;
47 47
48/*
49 * Group multi-function PCI devices into a single device-group for the
50 * iommu_device_group interface. This tells the iommu driver to pretend
51 * it cannot distinguish between functions of a device, exposing only one
52 * group for the device. Useful for disallowing use of individual PCI
53 * functions from userspace drivers.
54 */
55int iommu_group_mf __read_mostly;
56
57extern struct iommu_table_entry __iommu_table[], __iommu_table_end[]; 48extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
58 49
59/* Dummy device used for NULL arguments (normally ISA). */ 50/* Dummy device used for NULL arguments (normally ISA). */
@@ -194,8 +185,6 @@ static __init int iommu_setup(char *p)
194#endif 185#endif
195 if (!strncmp(p, "pt", 2)) 186 if (!strncmp(p, "pt", 2))
196 iommu_pass_through = 1; 187 iommu_pass_through = 1;
197 if (!strncmp(p, "group_mf", 8))
198 iommu_group_mf = 1;
199 188
200 gart_parse_options(p); 189 gart_parse_options(p);
201 190
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 340893727538..9f69b561f5db 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -13,6 +13,10 @@ menuconfig IOMMU_SUPPORT
13 13
14if IOMMU_SUPPORT 14if IOMMU_SUPPORT
15 15
16config OF_IOMMU
17 def_bool y
18 depends on OF
19
16# MSM IOMMU support 20# MSM IOMMU support
17config MSM_IOMMU 21config MSM_IOMMU
18 bool "MSM IOMMU Support" 22 bool "MSM IOMMU Support"
@@ -154,7 +158,7 @@ config TEGRA_IOMMU_GART
154 158
155config TEGRA_IOMMU_SMMU 159config TEGRA_IOMMU_SMMU
156 bool "Tegra SMMU IOMMU Support" 160 bool "Tegra SMMU IOMMU Support"
157 depends on ARCH_TEGRA_3x_SOC 161 depends on ARCH_TEGRA_3x_SOC && TEGRA_AHB
158 select IOMMU_API 162 select IOMMU_API
159 help 163 help
160 Enables support for remapping discontiguous physical memory 164 Enables support for remapping discontiguous physical memory
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 76e54ef796de..14a4d5fc94fa 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,4 +1,5 @@
1obj-$(CONFIG_IOMMU_API) += iommu.o 1obj-$(CONFIG_IOMMU_API) += iommu.o
2obj-$(CONFIG_OF_IOMMU) += of_iommu.o
2obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o 3obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
3obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o 4obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
4obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o 5obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 625626391f2d..6d1cbdfc9b2a 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -256,11 +256,21 @@ static bool check_device(struct device *dev)
256 return true; 256 return true;
257} 257}
258 258
259static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
260{
261 pci_dev_put(*from);
262 *from = to;
263}
264
265#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
266
259static int iommu_init_device(struct device *dev) 267static int iommu_init_device(struct device *dev)
260{ 268{
261 struct pci_dev *pdev = to_pci_dev(dev); 269 struct pci_dev *dma_pdev, *pdev = to_pci_dev(dev);
262 struct iommu_dev_data *dev_data; 270 struct iommu_dev_data *dev_data;
271 struct iommu_group *group;
263 u16 alias; 272 u16 alias;
273 int ret;
264 274
265 if (dev->archdata.iommu) 275 if (dev->archdata.iommu)
266 return 0; 276 return 0;
@@ -281,8 +291,43 @@ static int iommu_init_device(struct device *dev)
281 return -ENOTSUPP; 291 return -ENOTSUPP;
282 } 292 }
283 dev_data->alias_data = alias_data; 293 dev_data->alias_data = alias_data;
294
295 dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);
296 } else
297 dma_pdev = pci_dev_get(pdev);
298
299 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
300
301 if (dma_pdev->multifunction &&
302 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
303 swap_pci_ref(&dma_pdev,
304 pci_get_slot(dma_pdev->bus,
305 PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
306 0)));
307
308 while (!pci_is_root_bus(dma_pdev->bus)) {
309 if (pci_acs_path_enabled(dma_pdev->bus->self,
310 NULL, REQ_ACS_FLAGS))
311 break;
312
313 swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self));
314 }
315
316 group = iommu_group_get(&dma_pdev->dev);
317 pci_dev_put(dma_pdev);
318 if (!group) {
319 group = iommu_group_alloc();
320 if (IS_ERR(group))
321 return PTR_ERR(group);
284 } 322 }
285 323
324 ret = iommu_group_add_device(group, dev);
325
326 iommu_group_put(group);
327
328 if (ret)
329 return ret;
330
286 if (pci_iommuv2_capable(pdev)) { 331 if (pci_iommuv2_capable(pdev)) {
287 struct amd_iommu *iommu; 332 struct amd_iommu *iommu;
288 333
@@ -311,6 +356,8 @@ static void iommu_ignore_device(struct device *dev)
311 356
312static void iommu_uninit_device(struct device *dev) 357static void iommu_uninit_device(struct device *dev)
313{ 358{
359 iommu_group_remove_device(dev);
360
314 /* 361 /*
315 * Nothing to do here - we keep dev_data around for unplugged devices 362 * Nothing to do here - we keep dev_data around for unplugged devices
316 * and reuse it when the device is re-plugged - not doing so would 363 * and reuse it when the device is re-plugged - not doing so would
@@ -384,7 +431,6 @@ DECLARE_STATS_COUNTER(invalidate_iotlb);
384DECLARE_STATS_COUNTER(invalidate_iotlb_all); 431DECLARE_STATS_COUNTER(invalidate_iotlb_all);
385DECLARE_STATS_COUNTER(pri_requests); 432DECLARE_STATS_COUNTER(pri_requests);
386 433
387
388static struct dentry *stats_dir; 434static struct dentry *stats_dir;
389static struct dentry *de_fflush; 435static struct dentry *de_fflush;
390 436
@@ -2073,7 +2119,7 @@ out_err:
2073/* FIXME: Move this to PCI code */ 2119/* FIXME: Move this to PCI code */
2074#define PCI_PRI_TLP_OFF (1 << 15) 2120#define PCI_PRI_TLP_OFF (1 << 15)
2075 2121
2076bool pci_pri_tlp_required(struct pci_dev *pdev) 2122static bool pci_pri_tlp_required(struct pci_dev *pdev)
2077{ 2123{
2078 u16 status; 2124 u16 status;
2079 int pos; 2125 int pos;
@@ -2254,6 +2300,18 @@ static int device_change_notifier(struct notifier_block *nb,
2254 2300
2255 iommu_init_device(dev); 2301 iommu_init_device(dev);
2256 2302
2303 /*
2304 * dev_data is still NULL and
2305 * got initialized in iommu_init_device
2306 */
2307 dev_data = get_dev_data(dev);
2308
2309 if (iommu_pass_through || dev_data->iommu_v2) {
2310 dev_data->passthrough = true;
2311 attach_device(dev, pt_domain);
2312 break;
2313 }
2314
2257 domain = domain_for_device(dev); 2315 domain = domain_for_device(dev);
2258 2316
2259 /* allocate a protection domain if a device is added */ 2317 /* allocate a protection domain if a device is added */
@@ -2271,10 +2329,7 @@ static int device_change_notifier(struct notifier_block *nb,
2271 2329
2272 dev_data = get_dev_data(dev); 2330 dev_data = get_dev_data(dev);
2273 2331
2274 if (!dev_data->passthrough) 2332 dev->archdata.dma_ops = &amd_iommu_dma_ops;
2275 dev->archdata.dma_ops = &amd_iommu_dma_ops;
2276 else
2277 dev->archdata.dma_ops = &nommu_dma_ops;
2278 2333
2279 break; 2334 break;
2280 case BUS_NOTIFY_DEL_DEVICE: 2335 case BUS_NOTIFY_DEL_DEVICE:
@@ -2972,6 +3027,11 @@ int __init amd_iommu_init_dma_ops(void)
2972 3027
2973 amd_iommu_stats_init(); 3028 amd_iommu_stats_init();
2974 3029
3030 if (amd_iommu_unmap_flush)
3031 pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
3032 else
3033 pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n");
3034
2975 return 0; 3035 return 0;
2976 3036
2977free_domains: 3037free_domains:
@@ -3078,6 +3138,10 @@ static int amd_iommu_domain_init(struct iommu_domain *dom)
3078 3138
3079 dom->priv = domain; 3139 dom->priv = domain;
3080 3140
3141 dom->geometry.aperture_start = 0;
3142 dom->geometry.aperture_end = ~0ULL;
3143 dom->geometry.force_aperture = true;
3144
3081 return 0; 3145 return 0;
3082 3146
3083out_free: 3147out_free:
@@ -3236,26 +3300,6 @@ static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
3236 return 0; 3300 return 0;
3237} 3301}
3238 3302
3239static int amd_iommu_device_group(struct device *dev, unsigned int *groupid)
3240{
3241 struct iommu_dev_data *dev_data = dev->archdata.iommu;
3242 struct pci_dev *pdev = to_pci_dev(dev);
3243 u16 devid;
3244
3245 if (!dev_data)
3246 return -ENODEV;
3247
3248 if (pdev->is_virtfn || !iommu_group_mf)
3249 devid = dev_data->devid;
3250 else
3251 devid = calc_devid(pdev->bus->number,
3252 PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
3253
3254 *groupid = amd_iommu_alias_table[devid];
3255
3256 return 0;
3257}
3258
3259static struct iommu_ops amd_iommu_ops = { 3303static struct iommu_ops amd_iommu_ops = {
3260 .domain_init = amd_iommu_domain_init, 3304 .domain_init = amd_iommu_domain_init,
3261 .domain_destroy = amd_iommu_domain_destroy, 3305 .domain_destroy = amd_iommu_domain_destroy,
@@ -3265,7 +3309,6 @@ static struct iommu_ops amd_iommu_ops = {
3265 .unmap = amd_iommu_unmap, 3309 .unmap = amd_iommu_unmap,
3266 .iova_to_phys = amd_iommu_iova_to_phys, 3310 .iova_to_phys = amd_iommu_iova_to_phys,
3267 .domain_has_cap = amd_iommu_domain_has_cap, 3311 .domain_has_cap = amd_iommu_domain_has_cap,
3268 .device_group = amd_iommu_device_group,
3269 .pgsize_bitmap = AMD_IOMMU_PGSIZES, 3312 .pgsize_bitmap = AMD_IOMMU_PGSIZES,
3270}; 3313};
3271 3314
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index a33612f3206f..500e7f15f5c2 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -26,6 +26,8 @@
26#include <linux/msi.h> 26#include <linux/msi.h>
27#include <linux/amd-iommu.h> 27#include <linux/amd-iommu.h>
28#include <linux/export.h> 28#include <linux/export.h>
29#include <linux/acpi.h>
30#include <acpi/acpi.h>
29#include <asm/pci-direct.h> 31#include <asm/pci-direct.h>
30#include <asm/iommu.h> 32#include <asm/iommu.h>
31#include <asm/gart.h> 33#include <asm/gart.h>
@@ -122,7 +124,7 @@ struct ivmd_header {
122 124
123bool amd_iommu_dump; 125bool amd_iommu_dump;
124 126
125static int __initdata amd_iommu_detected; 127static bool amd_iommu_detected;
126static bool __initdata amd_iommu_disabled; 128static bool __initdata amd_iommu_disabled;
127 129
128u16 amd_iommu_last_bdf; /* largest PCI device id we have 130u16 amd_iommu_last_bdf; /* largest PCI device id we have
@@ -149,11 +151,6 @@ bool amd_iommu_v2_present __read_mostly;
149bool amd_iommu_force_isolation __read_mostly; 151bool amd_iommu_force_isolation __read_mostly;
150 152
151/* 153/*
152 * The ACPI table parsing functions set this variable on an error
153 */
154static int __initdata amd_iommu_init_err;
155
156/*
157 * List of protection domains - used during resume 154 * List of protection domains - used during resume
158 */ 155 */
159LIST_HEAD(amd_iommu_pd_list); 156LIST_HEAD(amd_iommu_pd_list);
@@ -190,13 +187,23 @@ static u32 dev_table_size; /* size of the device table */
190static u32 alias_table_size; /* size of the alias table */ 187static u32 alias_table_size; /* size of the alias table */
191static u32 rlookup_table_size; /* size if the rlookup table */ 188static u32 rlookup_table_size; /* size if the rlookup table */
192 189
193/* 190enum iommu_init_state {
194 * This function flushes all internal caches of 191 IOMMU_START_STATE,
195 * the IOMMU used by this driver. 192 IOMMU_IVRS_DETECTED,
196 */ 193 IOMMU_ACPI_FINISHED,
197extern void iommu_flush_all_caches(struct amd_iommu *iommu); 194 IOMMU_ENABLED,
195 IOMMU_PCI_INIT,
196 IOMMU_INTERRUPTS_EN,
197 IOMMU_DMA_OPS,
198 IOMMU_INITIALIZED,
199 IOMMU_NOT_FOUND,
200 IOMMU_INIT_ERROR,
201};
202
203static enum iommu_init_state init_state = IOMMU_START_STATE;
198 204
199static int amd_iommu_enable_interrupts(void); 205static int amd_iommu_enable_interrupts(void);
206static int __init iommu_go_to_state(enum iommu_init_state state);
200 207
201static inline void update_last_devid(u16 devid) 208static inline void update_last_devid(u16 devid)
202{ 209{
@@ -321,23 +328,6 @@ static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
321/* Function to enable the hardware */ 328/* Function to enable the hardware */
322static void iommu_enable(struct amd_iommu *iommu) 329static void iommu_enable(struct amd_iommu *iommu)
323{ 330{
324 static const char * const feat_str[] = {
325 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
326 "IA", "GA", "HE", "PC", NULL
327 };
328 int i;
329
330 printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx",
331 dev_name(&iommu->dev->dev), iommu->cap_ptr);
332
333 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
334 printk(KERN_CONT " extended features: ");
335 for (i = 0; feat_str[i]; ++i)
336 if (iommu_feature(iommu, (1ULL << i)))
337 printk(KERN_CONT " %s", feat_str[i]);
338 }
339 printk(KERN_CONT "\n");
340
341 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); 331 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
342} 332}
343 333
@@ -358,7 +348,7 @@ static void iommu_disable(struct amd_iommu *iommu)
358 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in 348 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
359 * the system has one. 349 * the system has one.
360 */ 350 */
361static u8 * __init iommu_map_mmio_space(u64 address) 351static u8 __iomem * __init iommu_map_mmio_space(u64 address)
362{ 352{
363 if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) { 353 if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) {
364 pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n", 354 pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n",
@@ -367,7 +357,7 @@ static u8 * __init iommu_map_mmio_space(u64 address)
367 return NULL; 357 return NULL;
368 } 358 }
369 359
370 return ioremap_nocache(address, MMIO_REGION_LENGTH); 360 return (u8 __iomem *)ioremap_nocache(address, MMIO_REGION_LENGTH);
371} 361}
372 362
373static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) 363static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
@@ -463,11 +453,9 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table)
463 */ 453 */
464 for (i = 0; i < table->length; ++i) 454 for (i = 0; i < table->length; ++i)
465 checksum += p[i]; 455 checksum += p[i];
466 if (checksum != 0) { 456 if (checksum != 0)
467 /* ACPI table corrupt */ 457 /* ACPI table corrupt */
468 amd_iommu_init_err = -ENODEV; 458 return -ENODEV;
469 return 0;
470 }
471 459
472 p += IVRS_HEADER_LENGTH; 460 p += IVRS_HEADER_LENGTH;
473 461
@@ -726,90 +714,6 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
726} 714}
727 715
728/* 716/*
729 * This function reads some important data from the IOMMU PCI space and
730 * initializes the driver data structure with it. It reads the hardware
731 * capabilities and the first/last device entries
732 */
733static void __init init_iommu_from_pci(struct amd_iommu *iommu)
734{
735 int cap_ptr = iommu->cap_ptr;
736 u32 range, misc, low, high;
737 int i, j;
738
739 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
740 &iommu->cap);
741 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
742 &range);
743 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
744 &misc);
745
746 iommu->first_device = calc_devid(MMIO_GET_BUS(range),
747 MMIO_GET_FD(range));
748 iommu->last_device = calc_devid(MMIO_GET_BUS(range),
749 MMIO_GET_LD(range));
750 iommu->evt_msi_num = MMIO_MSI_NUM(misc);
751
752 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
753 amd_iommu_iotlb_sup = false;
754
755 /* read extended feature bits */
756 low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
757 high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
758
759 iommu->features = ((u64)high << 32) | low;
760
761 if (iommu_feature(iommu, FEATURE_GT)) {
762 int glxval;
763 u32 pasids;
764 u64 shift;
765
766 shift = iommu->features & FEATURE_PASID_MASK;
767 shift >>= FEATURE_PASID_SHIFT;
768 pasids = (1 << shift);
769
770 amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids);
771
772 glxval = iommu->features & FEATURE_GLXVAL_MASK;
773 glxval >>= FEATURE_GLXVAL_SHIFT;
774
775 if (amd_iommu_max_glx_val == -1)
776 amd_iommu_max_glx_val = glxval;
777 else
778 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
779 }
780
781 if (iommu_feature(iommu, FEATURE_GT) &&
782 iommu_feature(iommu, FEATURE_PPR)) {
783 iommu->is_iommu_v2 = true;
784 amd_iommu_v2_present = true;
785 }
786
787 if (!is_rd890_iommu(iommu->dev))
788 return;
789
790 /*
791 * Some rd890 systems may not be fully reconfigured by the BIOS, so
792 * it's necessary for us to store this information so it can be
793 * reprogrammed on resume
794 */
795
796 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
797 &iommu->stored_addr_lo);
798 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
799 &iommu->stored_addr_hi);
800
801 /* Low bit locks writes to configuration space */
802 iommu->stored_addr_lo &= ~1;
803
804 for (i = 0; i < 6; i++)
805 for (j = 0; j < 0x12; j++)
806 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
807
808 for (i = 0; i < 0x83; i++)
809 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
810}
811
812/*
813 * Takes a pointer to an AMD IOMMU entry in the ACPI table and 717 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
814 * initializes the hardware and our data structures with it. 718 * initializes the hardware and our data structures with it.
815 */ 719 */
@@ -1025,13 +929,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1025 /* 929 /*
1026 * Copy data from ACPI table entry to the iommu struct 930 * Copy data from ACPI table entry to the iommu struct
1027 */ 931 */
1028 iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff); 932 iommu->devid = h->devid;
1029 if (!iommu->dev)
1030 return 1;
1031
1032 iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
1033 PCI_DEVFN(0, 0));
1034
1035 iommu->cap_ptr = h->cap_ptr; 933 iommu->cap_ptr = h->cap_ptr;
1036 iommu->pci_seg = h->pci_seg; 934 iommu->pci_seg = h->pci_seg;
1037 iommu->mmio_phys = h->mmio_phys; 935 iommu->mmio_phys = h->mmio_phys;
@@ -1049,20 +947,10 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1049 947
1050 iommu->int_enabled = false; 948 iommu->int_enabled = false;
1051 949
1052 init_iommu_from_pci(iommu);
1053 init_iommu_from_acpi(iommu, h); 950 init_iommu_from_acpi(iommu, h);
1054 init_iommu_devices(iommu); 951 init_iommu_devices(iommu);
1055 952
1056 if (iommu_feature(iommu, FEATURE_PPR)) { 953 return 0;
1057 iommu->ppr_log = alloc_ppr_log(iommu);
1058 if (!iommu->ppr_log)
1059 return -ENOMEM;
1060 }
1061
1062 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1063 amd_iommu_np_cache = true;
1064
1065 return pci_enable_device(iommu->dev);
1066} 954}
1067 955
1068/* 956/*
@@ -1093,16 +981,12 @@ static int __init init_iommu_all(struct acpi_table_header *table)
1093 h->mmio_phys); 981 h->mmio_phys);
1094 982
1095 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); 983 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1096 if (iommu == NULL) { 984 if (iommu == NULL)
1097 amd_iommu_init_err = -ENOMEM; 985 return -ENOMEM;
1098 return 0;
1099 }
1100 986
1101 ret = init_iommu_one(iommu, h); 987 ret = init_iommu_one(iommu, h);
1102 if (ret) { 988 if (ret)
1103 amd_iommu_init_err = ret; 989 return ret;
1104 return 0;
1105 }
1106 break; 990 break;
1107 default: 991 default:
1108 break; 992 break;
@@ -1115,6 +999,148 @@ static int __init init_iommu_all(struct acpi_table_header *table)
1115 return 0; 999 return 0;
1116} 1000}
1117 1001
1002static int iommu_init_pci(struct amd_iommu *iommu)
1003{
1004 int cap_ptr = iommu->cap_ptr;
1005 u32 range, misc, low, high;
1006
1007 iommu->dev = pci_get_bus_and_slot(PCI_BUS(iommu->devid),
1008 iommu->devid & 0xff);
1009 if (!iommu->dev)
1010 return -ENODEV;
1011
1012 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1013 &iommu->cap);
1014 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
1015 &range);
1016 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
1017 &misc);
1018
1019 iommu->first_device = calc_devid(MMIO_GET_BUS(range),
1020 MMIO_GET_FD(range));
1021 iommu->last_device = calc_devid(MMIO_GET_BUS(range),
1022 MMIO_GET_LD(range));
1023
1024 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1025 amd_iommu_iotlb_sup = false;
1026
1027 /* read extended feature bits */
1028 low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
1029 high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
1030
1031 iommu->features = ((u64)high << 32) | low;
1032
1033 if (iommu_feature(iommu, FEATURE_GT)) {
1034 int glxval;
1035 u32 pasids;
1036 u64 shift;
1037
1038 shift = iommu->features & FEATURE_PASID_MASK;
1039 shift >>= FEATURE_PASID_SHIFT;
1040 pasids = (1 << shift);
1041
1042 amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids);
1043
1044 glxval = iommu->features & FEATURE_GLXVAL_MASK;
1045 glxval >>= FEATURE_GLXVAL_SHIFT;
1046
1047 if (amd_iommu_max_glx_val == -1)
1048 amd_iommu_max_glx_val = glxval;
1049 else
1050 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1051 }
1052
1053 if (iommu_feature(iommu, FEATURE_GT) &&
1054 iommu_feature(iommu, FEATURE_PPR)) {
1055 iommu->is_iommu_v2 = true;
1056 amd_iommu_v2_present = true;
1057 }
1058
1059 if (iommu_feature(iommu, FEATURE_PPR)) {
1060 iommu->ppr_log = alloc_ppr_log(iommu);
1061 if (!iommu->ppr_log)
1062 return -ENOMEM;
1063 }
1064
1065 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1066 amd_iommu_np_cache = true;
1067
1068 if (is_rd890_iommu(iommu->dev)) {
1069 int i, j;
1070
1071 iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
1072 PCI_DEVFN(0, 0));
1073
1074 /*
1075 * Some rd890 systems may not be fully reconfigured by the
1076 * BIOS, so it's necessary for us to store this information so
1077 * it can be reprogrammed on resume
1078 */
1079 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1080 &iommu->stored_addr_lo);
1081 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1082 &iommu->stored_addr_hi);
1083
1084 /* Low bit locks writes to configuration space */
1085 iommu->stored_addr_lo &= ~1;
1086
1087 for (i = 0; i < 6; i++)
1088 for (j = 0; j < 0x12; j++)
1089 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1090
1091 for (i = 0; i < 0x83; i++)
1092 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1093 }
1094
1095 return pci_enable_device(iommu->dev);
1096}
1097
1098static void print_iommu_info(void)
1099{
1100 static const char * const feat_str[] = {
1101 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1102 "IA", "GA", "HE", "PC"
1103 };
1104 struct amd_iommu *iommu;
1105
1106 for_each_iommu(iommu) {
1107 int i;
1108
1109 pr_info("AMD-Vi: Found IOMMU at %s cap 0x%hx\n",
1110 dev_name(&iommu->dev->dev), iommu->cap_ptr);
1111
1112 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1113 pr_info("AMD-Vi: Extended features: ");
1114 for (i = 0; ARRAY_SIZE(feat_str); ++i) {
1115 if (iommu_feature(iommu, (1ULL << i)))
1116 pr_cont(" %s", feat_str[i]);
1117 }
1118 }
1119 pr_cont("\n");
1120 }
1121}
1122
1123static int __init amd_iommu_init_pci(void)
1124{
1125 struct amd_iommu *iommu;
1126 int ret = 0;
1127
1128 for_each_iommu(iommu) {
1129 ret = iommu_init_pci(iommu);
1130 if (ret)
1131 break;
1132 }
1133
1134 /* Make sure ACS will be enabled */
1135 pci_request_acs();
1136
1137 ret = amd_iommu_init_devices();
1138
1139 print_iommu_info();
1140
1141 return ret;
1142}
1143
1118/**************************************************************************** 1144/****************************************************************************
1119 * 1145 *
1120 * The following functions initialize the MSI interrupts for all IOMMUs 1146 * The following functions initialize the MSI interrupts for all IOMMUs
@@ -1217,7 +1243,7 @@ static int __init init_exclusion_range(struct ivmd_header *m)
1217/* called for unity map ACPI definition */ 1243/* called for unity map ACPI definition */
1218static int __init init_unity_map_range(struct ivmd_header *m) 1244static int __init init_unity_map_range(struct ivmd_header *m)
1219{ 1245{
1220 struct unity_map_entry *e = 0; 1246 struct unity_map_entry *e = NULL;
1221 char *s; 1247 char *s;
1222 1248
1223 e = kzalloc(sizeof(*e), GFP_KERNEL); 1249 e = kzalloc(sizeof(*e), GFP_KERNEL);
@@ -1369,7 +1395,7 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
1369 * This function finally enables all IOMMUs found in the system after 1395 * This function finally enables all IOMMUs found in the system after
1370 * they have been initialized 1396 * they have been initialized
1371 */ 1397 */
1372static void enable_iommus(void) 1398static void early_enable_iommus(void)
1373{ 1399{
1374 struct amd_iommu *iommu; 1400 struct amd_iommu *iommu;
1375 1401
@@ -1379,14 +1405,29 @@ static void enable_iommus(void)
1379 iommu_set_device_table(iommu); 1405 iommu_set_device_table(iommu);
1380 iommu_enable_command_buffer(iommu); 1406 iommu_enable_command_buffer(iommu);
1381 iommu_enable_event_buffer(iommu); 1407 iommu_enable_event_buffer(iommu);
1382 iommu_enable_ppr_log(iommu);
1383 iommu_enable_gt(iommu);
1384 iommu_set_exclusion_range(iommu); 1408 iommu_set_exclusion_range(iommu);
1385 iommu_enable(iommu); 1409 iommu_enable(iommu);
1386 iommu_flush_all_caches(iommu); 1410 iommu_flush_all_caches(iommu);
1387 } 1411 }
1388} 1412}
1389 1413
1414static void enable_iommus_v2(void)
1415{
1416 struct amd_iommu *iommu;
1417
1418 for_each_iommu(iommu) {
1419 iommu_enable_ppr_log(iommu);
1420 iommu_enable_gt(iommu);
1421 }
1422}
1423
1424static void enable_iommus(void)
1425{
1426 early_enable_iommus();
1427
1428 enable_iommus_v2();
1429}
1430
1390static void disable_iommus(void) 1431static void disable_iommus(void)
1391{ 1432{
1392 struct amd_iommu *iommu; 1433 struct amd_iommu *iommu;
@@ -1481,16 +1522,23 @@ static void __init free_on_init_error(void)
1481 * After everything is set up the IOMMUs are enabled and the necessary 1522 * After everything is set up the IOMMUs are enabled and the necessary
1482 * hotplug and suspend notifiers are registered. 1523 * hotplug and suspend notifiers are registered.
1483 */ 1524 */
1484int __init amd_iommu_init_hardware(void) 1525static int __init early_amd_iommu_init(void)
1485{ 1526{
1527 struct acpi_table_header *ivrs_base;
1528 acpi_size ivrs_size;
1529 acpi_status status;
1486 int i, ret = 0; 1530 int i, ret = 0;
1487 1531
1488 if (!amd_iommu_detected) 1532 if (!amd_iommu_detected)
1489 return -ENODEV; 1533 return -ENODEV;
1490 1534
1491 if (amd_iommu_dev_table != NULL) { 1535 status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
1492 /* Hardware already initialized */ 1536 if (status == AE_NOT_FOUND)
1493 return 0; 1537 return -ENODEV;
1538 else if (ACPI_FAILURE(status)) {
1539 const char *err = acpi_format_exception(status);
1540 pr_err("AMD-Vi: IVRS table error: %s\n", err);
1541 return -EINVAL;
1494 } 1542 }
1495 1543
1496 /* 1544 /*
@@ -1498,10 +1546,7 @@ int __init amd_iommu_init_hardware(void)
1498 * we need to handle. Upon this information the shared data 1546 * we need to handle. Upon this information the shared data
1499 * structures for the IOMMUs in the system will be allocated 1547 * structures for the IOMMUs in the system will be allocated
1500 */ 1548 */
1501 if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) 1549 ret = find_last_devid_acpi(ivrs_base);
1502 return -ENODEV;
1503
1504 ret = amd_iommu_init_err;
1505 if (ret) 1550 if (ret)
1506 goto out; 1551 goto out;
1507 1552
@@ -1523,20 +1568,20 @@ int __init amd_iommu_init_hardware(void)
1523 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL, 1568 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
1524 get_order(alias_table_size)); 1569 get_order(alias_table_size));
1525 if (amd_iommu_alias_table == NULL) 1570 if (amd_iommu_alias_table == NULL)
1526 goto free; 1571 goto out;
1527 1572
1528 /* IOMMU rlookup table - find the IOMMU for a specific device */ 1573 /* IOMMU rlookup table - find the IOMMU for a specific device */
1529 amd_iommu_rlookup_table = (void *)__get_free_pages( 1574 amd_iommu_rlookup_table = (void *)__get_free_pages(
1530 GFP_KERNEL | __GFP_ZERO, 1575 GFP_KERNEL | __GFP_ZERO,
1531 get_order(rlookup_table_size)); 1576 get_order(rlookup_table_size));
1532 if (amd_iommu_rlookup_table == NULL) 1577 if (amd_iommu_rlookup_table == NULL)
1533 goto free; 1578 goto out;
1534 1579
1535 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( 1580 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
1536 GFP_KERNEL | __GFP_ZERO, 1581 GFP_KERNEL | __GFP_ZERO,
1537 get_order(MAX_DOMAIN_ID/8)); 1582 get_order(MAX_DOMAIN_ID/8));
1538 if (amd_iommu_pd_alloc_bitmap == NULL) 1583 if (amd_iommu_pd_alloc_bitmap == NULL)
1539 goto free; 1584 goto out;
1540 1585
1541 /* init the device table */ 1586 /* init the device table */
1542 init_device_table(); 1587 init_device_table();
@@ -1559,38 +1604,18 @@ int __init amd_iommu_init_hardware(void)
1559 * now the data structures are allocated and basically initialized 1604 * now the data structures are allocated and basically initialized
1560 * start the real acpi table scan 1605 * start the real acpi table scan
1561 */ 1606 */
1562 ret = -ENODEV; 1607 ret = init_iommu_all(ivrs_base);
1563 if (acpi_table_parse("IVRS", init_iommu_all) != 0)
1564 goto free;
1565
1566 if (amd_iommu_init_err) {
1567 ret = amd_iommu_init_err;
1568 goto free;
1569 }
1570
1571 if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
1572 goto free;
1573
1574 if (amd_iommu_init_err) {
1575 ret = amd_iommu_init_err;
1576 goto free;
1577 }
1578
1579 ret = amd_iommu_init_devices();
1580 if (ret) 1608 if (ret)
1581 goto free; 1609 goto out;
1582
1583 enable_iommus();
1584
1585 amd_iommu_init_notifier();
1586 1610
1587 register_syscore_ops(&amd_iommu_syscore_ops); 1611 ret = init_memory_definitions(ivrs_base);
1612 if (ret)
1613 goto out;
1588 1614
1589out: 1615out:
1590 return ret; 1616 /* Don't leak any ACPI memory */
1591 1617 early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
1592free: 1618 ivrs_base = NULL;
1593 free_on_init_error();
1594 1619
1595 return ret; 1620 return ret;
1596} 1621}
@@ -1610,26 +1635,29 @@ out:
1610 return ret; 1635 return ret;
1611} 1636}
1612 1637
1613/* 1638static bool detect_ivrs(void)
1614 * This is the core init function for AMD IOMMU hardware in the system.
1615 * This function is called from the generic x86 DMA layer initialization
1616 * code.
1617 *
1618 * The function calls amd_iommu_init_hardware() to setup and enable the
1619 * IOMMU hardware if this has not happened yet. After that the driver
1620 * registers for the DMA-API and for the IOMMU-API as necessary.
1621 */
1622static int __init amd_iommu_init(void)
1623{ 1639{
1624 int ret = 0; 1640 struct acpi_table_header *ivrs_base;
1641 acpi_size ivrs_size;
1642 acpi_status status;
1625 1643
1626 ret = amd_iommu_init_hardware(); 1644 status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
1627 if (ret) 1645 if (status == AE_NOT_FOUND)
1628 goto out; 1646 return false;
1647 else if (ACPI_FAILURE(status)) {
1648 const char *err = acpi_format_exception(status);
1649 pr_err("AMD-Vi: IVRS table error: %s\n", err);
1650 return false;
1651 }
1629 1652
1630 ret = amd_iommu_enable_interrupts(); 1653 early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
1631 if (ret) 1654
1632 goto free; 1655 return true;
1656}
1657
1658static int amd_iommu_init_dma(void)
1659{
1660 int ret;
1633 1661
1634 if (iommu_pass_through) 1662 if (iommu_pass_through)
1635 ret = amd_iommu_init_passthrough(); 1663 ret = amd_iommu_init_passthrough();
@@ -1637,29 +1665,108 @@ static int __init amd_iommu_init(void)
1637 ret = amd_iommu_init_dma_ops(); 1665 ret = amd_iommu_init_dma_ops();
1638 1666
1639 if (ret) 1667 if (ret)
1640 goto free; 1668 return ret;
1641 1669
1642 amd_iommu_init_api(); 1670 amd_iommu_init_api();
1643 1671
1644 x86_platform.iommu_shutdown = disable_iommus; 1672 amd_iommu_init_notifier();
1645 1673
1646 if (iommu_pass_through) 1674 return 0;
1647 goto out; 1675}
1648 1676
1649 if (amd_iommu_unmap_flush) 1677/****************************************************************************
1650 printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n"); 1678 *
1651 else 1679 * AMD IOMMU Initialization State Machine
1652 printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n"); 1680 *
1681 ****************************************************************************/
1682
1683static int __init state_next(void)
1684{
1685 int ret = 0;
1686
1687 switch (init_state) {
1688 case IOMMU_START_STATE:
1689 if (!detect_ivrs()) {
1690 init_state = IOMMU_NOT_FOUND;
1691 ret = -ENODEV;
1692 } else {
1693 init_state = IOMMU_IVRS_DETECTED;
1694 }
1695 break;
1696 case IOMMU_IVRS_DETECTED:
1697 ret = early_amd_iommu_init();
1698 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
1699 break;
1700 case IOMMU_ACPI_FINISHED:
1701 early_enable_iommus();
1702 register_syscore_ops(&amd_iommu_syscore_ops);
1703 x86_platform.iommu_shutdown = disable_iommus;
1704 init_state = IOMMU_ENABLED;
1705 break;
1706 case IOMMU_ENABLED:
1707 ret = amd_iommu_init_pci();
1708 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
1709 enable_iommus_v2();
1710 break;
1711 case IOMMU_PCI_INIT:
1712 ret = amd_iommu_enable_interrupts();
1713 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
1714 break;
1715 case IOMMU_INTERRUPTS_EN:
1716 ret = amd_iommu_init_dma();
1717 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
1718 break;
1719 case IOMMU_DMA_OPS:
1720 init_state = IOMMU_INITIALIZED;
1721 break;
1722 case IOMMU_INITIALIZED:
1723 /* Nothing to do */
1724 break;
1725 case IOMMU_NOT_FOUND:
1726 case IOMMU_INIT_ERROR:
1727 /* Error states => do nothing */
1728 ret = -EINVAL;
1729 break;
1730 default:
1731 /* Unknown state */
1732 BUG();
1733 }
1653 1734
1654out:
1655 return ret; 1735 return ret;
1736}
1656 1737
1657free: 1738static int __init iommu_go_to_state(enum iommu_init_state state)
1658 disable_iommus(); 1739{
1740 int ret = 0;
1741
1742 while (init_state != state) {
1743 ret = state_next();
1744 if (init_state == IOMMU_NOT_FOUND ||
1745 init_state == IOMMU_INIT_ERROR)
1746 break;
1747 }
1748
1749 return ret;
1750}
1751
1752
1753
1754/*
1755 * This is the core init function for AMD IOMMU hardware in the system.
1756 * This function is called from the generic x86 DMA layer initialization
1757 * code.
1758 */
1759static int __init amd_iommu_init(void)
1760{
1761 int ret;
1659 1762
1660 free_on_init_error(); 1763 ret = iommu_go_to_state(IOMMU_INITIALIZED);
1764 if (ret) {
1765 disable_iommus();
1766 free_on_init_error();
1767 }
1661 1768
1662 goto out; 1769 return ret;
1663} 1770}
1664 1771
1665/**************************************************************************** 1772/****************************************************************************
@@ -1669,29 +1776,25 @@ free:
1669 * IOMMUs 1776 * IOMMUs
1670 * 1777 *
1671 ****************************************************************************/ 1778 ****************************************************************************/
1672static int __init early_amd_iommu_detect(struct acpi_table_header *table)
1673{
1674 return 0;
1675}
1676
1677int __init amd_iommu_detect(void) 1779int __init amd_iommu_detect(void)
1678{ 1780{
1781 int ret;
1782
1679 if (no_iommu || (iommu_detected && !gart_iommu_aperture)) 1783 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
1680 return -ENODEV; 1784 return -ENODEV;
1681 1785
1682 if (amd_iommu_disabled) 1786 if (amd_iommu_disabled)
1683 return -ENODEV; 1787 return -ENODEV;
1684 1788
1685 if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { 1789 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
1686 iommu_detected = 1; 1790 if (ret)
1687 amd_iommu_detected = 1; 1791 return ret;
1688 x86_init.iommu.iommu_init = amd_iommu_init;
1689 1792
1690 /* Make sure ACS will be enabled */ 1793 amd_iommu_detected = true;
1691 pci_request_acs(); 1794 iommu_detected = 1;
1692 return 1; 1795 x86_init.iommu.iommu_init = amd_iommu_init;
1693 } 1796
1694 return -ENODEV; 1797 return 0;
1695} 1798}
1696 1799
1697/**************************************************************************** 1800/****************************************************************************
@@ -1727,8 +1830,8 @@ __setup("amd_iommu=", parse_amd_iommu_options);
1727 1830
1728IOMMU_INIT_FINISH(amd_iommu_detect, 1831IOMMU_INIT_FINISH(amd_iommu_detect,
1729 gart_iommu_hole_init, 1832 gart_iommu_hole_init,
1730 0, 1833 NULL,
1731 0); 1834 NULL);
1732 1835
1733bool amd_iommu_v2_supported(void) 1836bool amd_iommu_v2_supported(void)
1734{ 1837{
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index c1b1d489817e..d0dab865a8b8 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -487,7 +487,7 @@ struct amd_iommu {
487 /* physical address of MMIO space */ 487 /* physical address of MMIO space */
488 u64 mmio_phys; 488 u64 mmio_phys;
489 /* virtual address of MMIO space */ 489 /* virtual address of MMIO space */
490 u8 *mmio_base; 490 u8 __iomem *mmio_base;
491 491
492 /* capabilities of that IOMMU read from ACPI */ 492 /* capabilities of that IOMMU read from ACPI */
493 u32 cap; 493 u32 cap;
@@ -501,6 +501,9 @@ struct amd_iommu {
501 /* IOMMUv2 */ 501 /* IOMMUv2 */
502 bool is_iommu_v2; 502 bool is_iommu_v2;
503 503
504 /* PCI device id of the IOMMU device */
505 u16 devid;
506
504 /* 507 /*
505 * Capability pointer. There could be more than one IOMMU per PCI 508 * Capability pointer. There could be more than one IOMMU per PCI
506 * device function if there are more than one AMD IOMMU capability 509 * device function if there are more than one AMD IOMMU capability
@@ -530,8 +533,6 @@ struct amd_iommu {
530 u32 evt_buf_size; 533 u32 evt_buf_size;
531 /* event buffer virtual address */ 534 /* event buffer virtual address */
532 u8 *evt_buf; 535 u8 *evt_buf;
533 /* MSI number for event interrupt */
534 u16 evt_msi_num;
535 536
536 /* Base of the PPR log, if present */ 537 /* Base of the PPR log, if present */
537 u8 *ppr_log; 538 u8 *ppr_log;
@@ -664,6 +665,12 @@ extern bool amd_iommu_force_isolation;
664/* Max levels of glxval supported */ 665/* Max levels of glxval supported */
665extern int amd_iommu_max_glx_val; 666extern int amd_iommu_max_glx_val;
666 667
668/*
669 * This function flushes all internal caches of
670 * the IOMMU used by this driver.
671 */
672extern void iommu_flush_all_caches(struct amd_iommu *iommu);
673
667/* takes bus and device/function and returns the device id 674/* takes bus and device/function and returns the device id
668 * FIXME: should that be in generic PCI code? */ 675 * FIXME: should that be in generic PCI code? */
669static inline u16 calc_devid(u8 bus, u8 devfn) 676static inline u16 calc_devid(u8 bus, u8 devfn)
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index be03238ad815..5208828792e6 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -81,7 +81,7 @@ struct fault {
81 u16 flags; 81 u16 flags;
82}; 82};
83 83
84struct device_state **state_table; 84static struct device_state **state_table;
85static spinlock_t state_lock; 85static spinlock_t state_lock;
86 86
87/* List and lock for all pasid_states */ 87/* List and lock for all pasid_states */
@@ -681,6 +681,8 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
681 681
682 atomic_set(&pasid_state->count, 1); 682 atomic_set(&pasid_state->count, 1);
683 init_waitqueue_head(&pasid_state->wq); 683 init_waitqueue_head(&pasid_state->wq);
684 spin_lock_init(&pasid_state->lock);
685
684 pasid_state->task = task; 686 pasid_state->task = task;
685 pasid_state->mm = get_task_mm(task); 687 pasid_state->mm = get_task_mm(task);
686 pasid_state->device_state = dev_state; 688 pasid_state->device_state = dev_state;
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 4407d5eff05e..45350ff5e93c 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -732,6 +732,10 @@ static int exynos_iommu_domain_init(struct iommu_domain *domain)
732 spin_lock_init(&priv->pgtablelock); 732 spin_lock_init(&priv->pgtablelock);
733 INIT_LIST_HEAD(&priv->clients); 733 INIT_LIST_HEAD(&priv->clients);
734 734
735 dom->geometry.aperture_start = 0;
736 dom->geometry.aperture_end = ~0UL;
737 dom->geometry.force_aperture = true;
738
735 domain->priv = priv; 739 domain->priv = priv;
736 return 0; 740 return 0;
737 741
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 2fb7d1598a68..7469b5346643 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3932,6 +3932,10 @@ static int intel_iommu_domain_init(struct iommu_domain *domain)
3932 domain_update_iommu_cap(dmar_domain); 3932 domain_update_iommu_cap(dmar_domain);
3933 domain->priv = dmar_domain; 3933 domain->priv = dmar_domain;
3934 3934
3935 domain->geometry.aperture_start = 0;
3936 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
3937 domain->geometry.force_aperture = true;
3938
3935 return 0; 3939 return 0;
3936} 3940}
3937 3941
@@ -4090,52 +4094,70 @@ static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4090 return 0; 4094 return 0;
4091} 4095}
4092 4096
4093/* 4097static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
4094 * Group numbers are arbitrary. Device with the same group number
4095 * indicate the iommu cannot differentiate between them. To avoid
4096 * tracking used groups we just use the seg|bus|devfn of the lowest
4097 * level we're able to differentiate devices
4098 */
4099static int intel_iommu_device_group(struct device *dev, unsigned int *groupid)
4100{ 4098{
4101 struct pci_dev *pdev = to_pci_dev(dev); 4099 pci_dev_put(*from);
4102 struct pci_dev *bridge; 4100 *from = to;
4103 union { 4101}
4104 struct {
4105 u8 devfn;
4106 u8 bus;
4107 u16 segment;
4108 } pci;
4109 u32 group;
4110 } id;
4111 4102
4112 if (iommu_no_mapping(dev)) 4103#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
4113 return -ENODEV;
4114 4104
4115 id.pci.segment = pci_domain_nr(pdev->bus); 4105static int intel_iommu_add_device(struct device *dev)
4116 id.pci.bus = pdev->bus->number; 4106{
4117 id.pci.devfn = pdev->devfn; 4107 struct pci_dev *pdev = to_pci_dev(dev);
4108 struct pci_dev *bridge, *dma_pdev;
4109 struct iommu_group *group;
4110 int ret;
4118 4111
4119 if (!device_to_iommu(id.pci.segment, id.pci.bus, id.pci.devfn)) 4112 if (!device_to_iommu(pci_domain_nr(pdev->bus),
4113 pdev->bus->number, pdev->devfn))
4120 return -ENODEV; 4114 return -ENODEV;
4121 4115
4122 bridge = pci_find_upstream_pcie_bridge(pdev); 4116 bridge = pci_find_upstream_pcie_bridge(pdev);
4123 if (bridge) { 4117 if (bridge) {
4124 if (pci_is_pcie(bridge)) { 4118 if (pci_is_pcie(bridge))
4125 id.pci.bus = bridge->subordinate->number; 4119 dma_pdev = pci_get_domain_bus_and_slot(
4126 id.pci.devfn = 0; 4120 pci_domain_nr(pdev->bus),
4127 } else { 4121 bridge->subordinate->number, 0);
4128 id.pci.bus = bridge->bus->number; 4122 else
4129 id.pci.devfn = bridge->devfn; 4123 dma_pdev = pci_dev_get(bridge);
4130 } 4124 } else
4125 dma_pdev = pci_dev_get(pdev);
4126
4127 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
4128
4129 if (dma_pdev->multifunction &&
4130 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
4131 swap_pci_ref(&dma_pdev,
4132 pci_get_slot(dma_pdev->bus,
4133 PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
4134 0)));
4135
4136 while (!pci_is_root_bus(dma_pdev->bus)) {
4137 if (pci_acs_path_enabled(dma_pdev->bus->self,
4138 NULL, REQ_ACS_FLAGS))
4139 break;
4140
4141 swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self));
4142 }
4143
4144 group = iommu_group_get(&dma_pdev->dev);
4145 pci_dev_put(dma_pdev);
4146 if (!group) {
4147 group = iommu_group_alloc();
4148 if (IS_ERR(group))
4149 return PTR_ERR(group);
4131 } 4150 }
4132 4151
4133 if (!pdev->is_virtfn && iommu_group_mf) 4152 ret = iommu_group_add_device(group, dev);
4134 id.pci.devfn = PCI_DEVFN(PCI_SLOT(id.pci.devfn), 0);
4135 4153
4136 *groupid = id.group; 4154 iommu_group_put(group);
4155 return ret;
4156}
4137 4157
4138 return 0; 4158static void intel_iommu_remove_device(struct device *dev)
4159{
4160 iommu_group_remove_device(dev);
4139} 4161}
4140 4162
4141static struct iommu_ops intel_iommu_ops = { 4163static struct iommu_ops intel_iommu_ops = {
@@ -4147,7 +4169,8 @@ static struct iommu_ops intel_iommu_ops = {
4147 .unmap = intel_iommu_unmap, 4169 .unmap = intel_iommu_unmap,
4148 .iova_to_phys = intel_iommu_iova_to_phys, 4170 .iova_to_phys = intel_iommu_iova_to_phys,
4149 .domain_has_cap = intel_iommu_domain_has_cap, 4171 .domain_has_cap = intel_iommu_domain_has_cap,
4150 .device_group = intel_iommu_device_group, 4172 .add_device = intel_iommu_add_device,
4173 .remove_device = intel_iommu_remove_device,
4151 .pgsize_bitmap = INTEL_IOMMU_PGSIZES, 4174 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
4152}; 4175};
4153 4176
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 8b9ded88e6f5..ddbdacad7768 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -26,60 +26,535 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/errno.h> 27#include <linux/errno.h>
28#include <linux/iommu.h> 28#include <linux/iommu.h>
29#include <linux/idr.h>
30#include <linux/notifier.h>
31#include <linux/err.h>
32
33static struct kset *iommu_group_kset;
34static struct ida iommu_group_ida;
35static struct mutex iommu_group_mutex;
36
37struct iommu_group {
38 struct kobject kobj;
39 struct kobject *devices_kobj;
40 struct list_head devices;
41 struct mutex mutex;
42 struct blocking_notifier_head notifier;
43 void *iommu_data;
44 void (*iommu_data_release)(void *iommu_data);
45 char *name;
46 int id;
47};
48
49struct iommu_device {
50 struct list_head list;
51 struct device *dev;
52 char *name;
53};
54
55struct iommu_group_attribute {
56 struct attribute attr;
57 ssize_t (*show)(struct iommu_group *group, char *buf);
58 ssize_t (*store)(struct iommu_group *group,
59 const char *buf, size_t count);
60};
61
62#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
63struct iommu_group_attribute iommu_group_attr_##_name = \
64 __ATTR(_name, _mode, _show, _store)
65
66#define to_iommu_group_attr(_attr) \
67 container_of(_attr, struct iommu_group_attribute, attr)
68#define to_iommu_group(_kobj) \
69 container_of(_kobj, struct iommu_group, kobj)
29 70
30static ssize_t show_iommu_group(struct device *dev, 71static ssize_t iommu_group_attr_show(struct kobject *kobj,
31 struct device_attribute *attr, char *buf) 72 struct attribute *__attr, char *buf)
32{ 73{
33 unsigned int groupid; 74 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
75 struct iommu_group *group = to_iommu_group(kobj);
76 ssize_t ret = -EIO;
34 77
35 if (iommu_device_group(dev, &groupid)) 78 if (attr->show)
36 return 0; 79 ret = attr->show(group, buf);
80 return ret;
81}
82
83static ssize_t iommu_group_attr_store(struct kobject *kobj,
84 struct attribute *__attr,
85 const char *buf, size_t count)
86{
87 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
88 struct iommu_group *group = to_iommu_group(kobj);
89 ssize_t ret = -EIO;
90
91 if (attr->store)
92 ret = attr->store(group, buf, count);
93 return ret;
94}
95
96static const struct sysfs_ops iommu_group_sysfs_ops = {
97 .show = iommu_group_attr_show,
98 .store = iommu_group_attr_store,
99};
37 100
38 return sprintf(buf, "%u", groupid); 101static int iommu_group_create_file(struct iommu_group *group,
102 struct iommu_group_attribute *attr)
103{
104 return sysfs_create_file(&group->kobj, &attr->attr);
39} 105}
40static DEVICE_ATTR(iommu_group, S_IRUGO, show_iommu_group, NULL);
41 106
42static int add_iommu_group(struct device *dev, void *data) 107static void iommu_group_remove_file(struct iommu_group *group,
108 struct iommu_group_attribute *attr)
109{
110 sysfs_remove_file(&group->kobj, &attr->attr);
111}
112
113static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
114{
115 return sprintf(buf, "%s\n", group->name);
116}
117
118static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
119
120static void iommu_group_release(struct kobject *kobj)
121{
122 struct iommu_group *group = to_iommu_group(kobj);
123
124 if (group->iommu_data_release)
125 group->iommu_data_release(group->iommu_data);
126
127 mutex_lock(&iommu_group_mutex);
128 ida_remove(&iommu_group_ida, group->id);
129 mutex_unlock(&iommu_group_mutex);
130
131 kfree(group->name);
132 kfree(group);
133}
134
135static struct kobj_type iommu_group_ktype = {
136 .sysfs_ops = &iommu_group_sysfs_ops,
137 .release = iommu_group_release,
138};
139
140/**
141 * iommu_group_alloc - Allocate a new group
142 * @name: Optional name to associate with group, visible in sysfs
143 *
144 * This function is called by an iommu driver to allocate a new iommu
145 * group. The iommu group represents the minimum granularity of the iommu.
146 * Upon successful return, the caller holds a reference to the supplied
147 * group in order to hold the group until devices are added. Use
148 * iommu_group_put() to release this extra reference count, allowing the
149 * group to be automatically reclaimed once it has no devices or external
150 * references.
151 */
152struct iommu_group *iommu_group_alloc(void)
153{
154 struct iommu_group *group;
155 int ret;
156
157 group = kzalloc(sizeof(*group), GFP_KERNEL);
158 if (!group)
159 return ERR_PTR(-ENOMEM);
160
161 group->kobj.kset = iommu_group_kset;
162 mutex_init(&group->mutex);
163 INIT_LIST_HEAD(&group->devices);
164 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
165
166 mutex_lock(&iommu_group_mutex);
167
168again:
169 if (unlikely(0 == ida_pre_get(&iommu_group_ida, GFP_KERNEL))) {
170 kfree(group);
171 mutex_unlock(&iommu_group_mutex);
172 return ERR_PTR(-ENOMEM);
173 }
174
175 if (-EAGAIN == ida_get_new(&iommu_group_ida, &group->id))
176 goto again;
177
178 mutex_unlock(&iommu_group_mutex);
179
180 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
181 NULL, "%d", group->id);
182 if (ret) {
183 mutex_lock(&iommu_group_mutex);
184 ida_remove(&iommu_group_ida, group->id);
185 mutex_unlock(&iommu_group_mutex);
186 kfree(group);
187 return ERR_PTR(ret);
188 }
189
190 group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
191 if (!group->devices_kobj) {
192 kobject_put(&group->kobj); /* triggers .release & free */
193 return ERR_PTR(-ENOMEM);
194 }
195
196 /*
197 * The devices_kobj holds a reference on the group kobject, so
198 * as long as that exists so will the group. We can therefore
199 * use the devices_kobj for reference counting.
200 */
201 kobject_put(&group->kobj);
202
203 return group;
204}
205EXPORT_SYMBOL_GPL(iommu_group_alloc);
206
207/**
208 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
209 * @group: the group
210 *
211 * iommu drivers can store data in the group for use when doing iommu
212 * operations. This function provides a way to retrieve it. Caller
213 * should hold a group reference.
214 */
215void *iommu_group_get_iommudata(struct iommu_group *group)
216{
217 return group->iommu_data;
218}
219EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
220
221/**
222 * iommu_group_set_iommudata - set iommu_data for a group
223 * @group: the group
224 * @iommu_data: new data
225 * @release: release function for iommu_data
226 *
227 * iommu drivers can store data in the group for use when doing iommu
228 * operations. This function provides a way to set the data after
229 * the group has been allocated. Caller should hold a group reference.
230 */
231void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
232 void (*release)(void *iommu_data))
43{ 233{
44 unsigned int groupid; 234 group->iommu_data = iommu_data;
235 group->iommu_data_release = release;
236}
237EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
45 238
46 if (iommu_device_group(dev, &groupid) == 0) 239/**
47 return device_create_file(dev, &dev_attr_iommu_group); 240 * iommu_group_set_name - set name for a group
241 * @group: the group
242 * @name: name
243 *
244 * Allow iommu driver to set a name for a group. When set it will
245 * appear in a name attribute file under the group in sysfs.
246 */
247int iommu_group_set_name(struct iommu_group *group, const char *name)
248{
249 int ret;
250
251 if (group->name) {
252 iommu_group_remove_file(group, &iommu_group_attr_name);
253 kfree(group->name);
254 group->name = NULL;
255 if (!name)
256 return 0;
257 }
258
259 group->name = kstrdup(name, GFP_KERNEL);
260 if (!group->name)
261 return -ENOMEM;
262
263 ret = iommu_group_create_file(group, &iommu_group_attr_name);
264 if (ret) {
265 kfree(group->name);
266 group->name = NULL;
267 return ret;
268 }
48 269
49 return 0; 270 return 0;
50} 271}
272EXPORT_SYMBOL_GPL(iommu_group_set_name);
273
274/**
275 * iommu_group_add_device - add a device to an iommu group
276 * @group: the group into which to add the device (reference should be held)
277 * @dev: the device
278 *
279 * This function is called by an iommu driver to add a device into a
280 * group. Adding a device increments the group reference count.
281 */
282int iommu_group_add_device(struct iommu_group *group, struct device *dev)
283{
284 int ret, i = 0;
285 struct iommu_device *device;
286
287 device = kzalloc(sizeof(*device), GFP_KERNEL);
288 if (!device)
289 return -ENOMEM;
290
291 device->dev = dev;
292
293 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
294 if (ret) {
295 kfree(device);
296 return ret;
297 }
298
299 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
300rename:
301 if (!device->name) {
302 sysfs_remove_link(&dev->kobj, "iommu_group");
303 kfree(device);
304 return -ENOMEM;
305 }
306
307 ret = sysfs_create_link_nowarn(group->devices_kobj,
308 &dev->kobj, device->name);
309 if (ret) {
310 kfree(device->name);
311 if (ret == -EEXIST && i >= 0) {
312 /*
313 * Account for the slim chance of collision
314 * and append an instance to the name.
315 */
316 device->name = kasprintf(GFP_KERNEL, "%s.%d",
317 kobject_name(&dev->kobj), i++);
318 goto rename;
319 }
320
321 sysfs_remove_link(&dev->kobj, "iommu_group");
322 kfree(device);
323 return ret;
324 }
325
326 kobject_get(group->devices_kobj);
327
328 dev->iommu_group = group;
329
330 mutex_lock(&group->mutex);
331 list_add_tail(&device->list, &group->devices);
332 mutex_unlock(&group->mutex);
333
334 /* Notify any listeners about change to group. */
335 blocking_notifier_call_chain(&group->notifier,
336 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
337 return 0;
338}
339EXPORT_SYMBOL_GPL(iommu_group_add_device);
340
341/**
342 * iommu_group_remove_device - remove a device from it's current group
343 * @dev: device to be removed
344 *
345 * This function is called by an iommu driver to remove the device from
346 * it's current group. This decrements the iommu group reference count.
347 */
348void iommu_group_remove_device(struct device *dev)
349{
350 struct iommu_group *group = dev->iommu_group;
351 struct iommu_device *tmp_device, *device = NULL;
352
353 /* Pre-notify listeners that a device is being removed. */
354 blocking_notifier_call_chain(&group->notifier,
355 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
356
357 mutex_lock(&group->mutex);
358 list_for_each_entry(tmp_device, &group->devices, list) {
359 if (tmp_device->dev == dev) {
360 device = tmp_device;
361 list_del(&device->list);
362 break;
363 }
364 }
365 mutex_unlock(&group->mutex);
366
367 if (!device)
368 return;
369
370 sysfs_remove_link(group->devices_kobj, device->name);
371 sysfs_remove_link(&dev->kobj, "iommu_group");
372
373 kfree(device->name);
374 kfree(device);
375 dev->iommu_group = NULL;
376 kobject_put(group->devices_kobj);
377}
378EXPORT_SYMBOL_GPL(iommu_group_remove_device);
379
380/**
381 * iommu_group_for_each_dev - iterate over each device in the group
382 * @group: the group
383 * @data: caller opaque data to be passed to callback function
384 * @fn: caller supplied callback function
385 *
386 * This function is called by group users to iterate over group devices.
387 * Callers should hold a reference count to the group during callback.
388 * The group->mutex is held across callbacks, which will block calls to
389 * iommu_group_add/remove_device.
390 */
391int iommu_group_for_each_dev(struct iommu_group *group, void *data,
392 int (*fn)(struct device *, void *))
393{
394 struct iommu_device *device;
395 int ret = 0;
396
397 mutex_lock(&group->mutex);
398 list_for_each_entry(device, &group->devices, list) {
399 ret = fn(device->dev, data);
400 if (ret)
401 break;
402 }
403 mutex_unlock(&group->mutex);
404 return ret;
405}
406EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
407
408/**
409 * iommu_group_get - Return the group for a device and increment reference
410 * @dev: get the group that this device belongs to
411 *
412 * This function is called by iommu drivers and users to get the group
413 * for the specified device. If found, the group is returned and the group
414 * reference in incremented, else NULL.
415 */
416struct iommu_group *iommu_group_get(struct device *dev)
417{
418 struct iommu_group *group = dev->iommu_group;
419
420 if (group)
421 kobject_get(group->devices_kobj);
422
423 return group;
424}
425EXPORT_SYMBOL_GPL(iommu_group_get);
426
427/**
428 * iommu_group_put - Decrement group reference
429 * @group: the group to use
430 *
431 * This function is called by iommu drivers and users to release the
432 * iommu group. Once the reference count is zero, the group is released.
433 */
434void iommu_group_put(struct iommu_group *group)
435{
436 if (group)
437 kobject_put(group->devices_kobj);
438}
439EXPORT_SYMBOL_GPL(iommu_group_put);
440
441/**
442 * iommu_group_register_notifier - Register a notifier for group changes
443 * @group: the group to watch
444 * @nb: notifier block to signal
445 *
446 * This function allows iommu group users to track changes in a group.
447 * See include/linux/iommu.h for actions sent via this notifier. Caller
448 * should hold a reference to the group throughout notifier registration.
449 */
450int iommu_group_register_notifier(struct iommu_group *group,
451 struct notifier_block *nb)
452{
453 return blocking_notifier_chain_register(&group->notifier, nb);
454}
455EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
456
457/**
458 * iommu_group_unregister_notifier - Unregister a notifier
459 * @group: the group to watch
460 * @nb: notifier block to signal
461 *
462 * Unregister a previously registered group notifier block.
463 */
464int iommu_group_unregister_notifier(struct iommu_group *group,
465 struct notifier_block *nb)
466{
467 return blocking_notifier_chain_unregister(&group->notifier, nb);
468}
469EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
470
471/**
472 * iommu_group_id - Return ID for a group
473 * @group: the group to ID
474 *
475 * Return the unique ID for the group matching the sysfs group number.
476 */
477int iommu_group_id(struct iommu_group *group)
478{
479 return group->id;
480}
481EXPORT_SYMBOL_GPL(iommu_group_id);
51 482
52static int remove_iommu_group(struct device *dev) 483static int add_iommu_group(struct device *dev, void *data)
53{ 484{
54 unsigned int groupid; 485 struct iommu_ops *ops = data;
486
487 if (!ops->add_device)
488 return -ENODEV;
55 489
56 if (iommu_device_group(dev, &groupid) == 0) 490 WARN_ON(dev->iommu_group);
57 device_remove_file(dev, &dev_attr_iommu_group); 491
492 ops->add_device(dev);
58 493
59 return 0; 494 return 0;
60} 495}
61 496
62static int iommu_device_notifier(struct notifier_block *nb, 497static int iommu_bus_notifier(struct notifier_block *nb,
63 unsigned long action, void *data) 498 unsigned long action, void *data)
64{ 499{
65 struct device *dev = data; 500 struct device *dev = data;
501 struct iommu_ops *ops = dev->bus->iommu_ops;
502 struct iommu_group *group;
503 unsigned long group_action = 0;
504
505 /*
506 * ADD/DEL call into iommu driver ops if provided, which may
507 * result in ADD/DEL notifiers to group->notifier
508 */
509 if (action == BUS_NOTIFY_ADD_DEVICE) {
510 if (ops->add_device)
511 return ops->add_device(dev);
512 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
513 if (ops->remove_device && dev->iommu_group) {
514 ops->remove_device(dev);
515 return 0;
516 }
517 }
518
519 /*
520 * Remaining BUS_NOTIFYs get filtered and republished to the
521 * group, if anyone is listening
522 */
523 group = iommu_group_get(dev);
524 if (!group)
525 return 0;
526
527 switch (action) {
528 case BUS_NOTIFY_BIND_DRIVER:
529 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
530 break;
531 case BUS_NOTIFY_BOUND_DRIVER:
532 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
533 break;
534 case BUS_NOTIFY_UNBIND_DRIVER:
535 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
536 break;
537 case BUS_NOTIFY_UNBOUND_DRIVER:
538 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
539 break;
540 }
66 541
67 if (action == BUS_NOTIFY_ADD_DEVICE) 542 if (group_action)
68 return add_iommu_group(dev, NULL); 543 blocking_notifier_call_chain(&group->notifier,
69 else if (action == BUS_NOTIFY_DEL_DEVICE) 544 group_action, dev);
70 return remove_iommu_group(dev);
71 545
546 iommu_group_put(group);
72 return 0; 547 return 0;
73} 548}
74 549
75static struct notifier_block iommu_device_nb = { 550static struct notifier_block iommu_bus_nb = {
76 .notifier_call = iommu_device_notifier, 551 .notifier_call = iommu_bus_notifier,
77}; 552};
78 553
79static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops) 554static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
80{ 555{
81 bus_register_notifier(bus, &iommu_device_nb); 556 bus_register_notifier(bus, &iommu_bus_nb);
82 bus_for_each_dev(bus, NULL, NULL, add_iommu_group); 557 bus_for_each_dev(bus, NULL, ops, add_iommu_group);
83} 558}
84 559
85/** 560/**
@@ -192,6 +667,45 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
192} 667}
193EXPORT_SYMBOL_GPL(iommu_detach_device); 668EXPORT_SYMBOL_GPL(iommu_detach_device);
194 669
670/*
671 * IOMMU groups are really the natrual working unit of the IOMMU, but
672 * the IOMMU API works on domains and devices. Bridge that gap by
673 * iterating over the devices in a group. Ideally we'd have a single
674 * device which represents the requestor ID of the group, but we also
675 * allow IOMMU drivers to create policy defined minimum sets, where
676 * the physical hardware may be able to distiguish members, but we
677 * wish to group them at a higher level (ex. untrusted multi-function
678 * PCI devices). Thus we attach each device.
679 */
680static int iommu_group_do_attach_device(struct device *dev, void *data)
681{
682 struct iommu_domain *domain = data;
683
684 return iommu_attach_device(domain, dev);
685}
686
687int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
688{
689 return iommu_group_for_each_dev(group, domain,
690 iommu_group_do_attach_device);
691}
692EXPORT_SYMBOL_GPL(iommu_attach_group);
693
694static int iommu_group_do_detach_device(struct device *dev, void *data)
695{
696 struct iommu_domain *domain = data;
697
698 iommu_detach_device(domain, dev);
699
700 return 0;
701}
702
703void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
704{
705 iommu_group_for_each_dev(group, domain, iommu_group_do_detach_device);
706}
707EXPORT_SYMBOL_GPL(iommu_detach_group);
708
195phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, 709phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
196 unsigned long iova) 710 unsigned long iova)
197{ 711{
@@ -336,11 +850,48 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
336} 850}
337EXPORT_SYMBOL_GPL(iommu_unmap); 851EXPORT_SYMBOL_GPL(iommu_unmap);
338 852
339int iommu_device_group(struct device *dev, unsigned int *groupid) 853static int __init iommu_init(void)
340{ 854{
341 if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group) 855 iommu_group_kset = kset_create_and_add("iommu_groups",
342 return dev->bus->iommu_ops->device_group(dev, groupid); 856 NULL, kernel_kobj);
857 ida_init(&iommu_group_ida);
858 mutex_init(&iommu_group_mutex);
859
860 BUG_ON(!iommu_group_kset);
861
862 return 0;
863}
864subsys_initcall(iommu_init);
865
866int iommu_domain_get_attr(struct iommu_domain *domain,
867 enum iommu_attr attr, void *data)
868{
869 struct iommu_domain_geometry *geometry;
870 int ret = 0;
871
872 switch (attr) {
873 case DOMAIN_ATTR_GEOMETRY:
874 geometry = data;
875 *geometry = domain->geometry;
876
877 break;
878 default:
879 if (!domain->ops->domain_get_attr)
880 return -EINVAL;
881
882 ret = domain->ops->domain_get_attr(domain, attr, data);
883 }
884
885 return ret;
886}
887EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
888
889int iommu_domain_set_attr(struct iommu_domain *domain,
890 enum iommu_attr attr, void *data)
891{
892 if (!domain->ops->domain_set_attr)
893 return -EINVAL;
343 894
344 return -ENODEV; 895 return domain->ops->domain_set_attr(domain, attr, data);
345} 896}
346EXPORT_SYMBOL_GPL(iommu_device_group); 897EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 1d29b1c66e72..151690db692c 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -1,6 +1,11 @@
1#include <linux/kernel.h> 1#include <linux/kernel.h>
2#include <linux/string.h> 2#include <linux/string.h>
3#include <linux/cpumask.h>
3#include <linux/errno.h> 4#include <linux/errno.h>
5#include <linux/msi.h>
6
7#include <asm/hw_irq.h>
8#include <asm/irq_remapping.h>
4 9
5#include "irq_remapping.h" 10#include "irq_remapping.h"
6 11
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index cee307e86606..6a8870a31668 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -226,6 +226,11 @@ static int msm_iommu_domain_init(struct iommu_domain *domain)
226 226
227 memset(priv->pgtable, 0, SZ_16K); 227 memset(priv->pgtable, 0, SZ_16K);
228 domain->priv = priv; 228 domain->priv = priv;
229
230 domain->geometry.aperture_start = 0;
231 domain->geometry.aperture_end = (1ULL << 32) - 1;
232 domain->geometry.force_aperture = true;
233
229 return 0; 234 return 0;
230 235
231fail_nomem: 236fail_nomem:
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
new file mode 100644
index 000000000000..ee249bc959f8
--- /dev/null
+++ b/drivers/iommu/of_iommu.c
@@ -0,0 +1,90 @@
1/*
2 * OF helpers for IOMMU
3 *
4 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20#include <linux/export.h>
21#include <linux/limits.h>
22#include <linux/of.h>
23
24/**
25 * of_get_dma_window - Parse *dma-window property and returns 0 if found.
26 *
27 * @dn: device node
28 * @prefix: prefix for property name if any
29 * @index: index to start to parse
30 * @busno: Returns busno if supported. Otherwise pass NULL
31 * @addr: Returns address that DMA starts
32 * @size: Returns the range that DMA can handle
33 *
34 * This supports different formats flexibly. "prefix" can be
35 * configured if any. "busno" and "index" are optionally
36 * specified. Set 0(or NULL) if not used.
37 */
38int of_get_dma_window(struct device_node *dn, const char *prefix, int index,
39 unsigned long *busno, dma_addr_t *addr, size_t *size)
40{
41 const __be32 *dma_window, *end;
42 int bytes, cur_index = 0;
43 char propname[NAME_MAX], addrname[NAME_MAX], sizename[NAME_MAX];
44
45 if (!dn || !addr || !size)
46 return -EINVAL;
47
48 if (!prefix)
49 prefix = "";
50
51 snprintf(propname, sizeof(propname), "%sdma-window", prefix);
52 snprintf(addrname, sizeof(addrname), "%s#dma-address-cells", prefix);
53 snprintf(sizename, sizeof(sizename), "%s#dma-size-cells", prefix);
54
55 dma_window = of_get_property(dn, propname, &bytes);
56 if (!dma_window)
57 return -ENODEV;
58 end = dma_window + bytes / sizeof(*dma_window);
59
60 while (dma_window < end) {
61 u32 cells;
62 const void *prop;
63
64 /* busno is one cell if supported */
65 if (busno)
66 *busno = be32_to_cpup(dma_window++);
67
68 prop = of_get_property(dn, addrname, NULL);
69 if (!prop)
70 prop = of_get_property(dn, "#address-cells", NULL);
71
72 cells = prop ? be32_to_cpup(prop) : of_n_addr_cells(dn);
73 if (!cells)
74 return -EINVAL;
75 *addr = of_read_number(dma_window, cells);
76 dma_window += cells;
77
78 prop = of_get_property(dn, sizename, NULL);
79 cells = prop ? be32_to_cpup(prop) : of_n_size_cells(dn);
80 if (!cells)
81 return -EINVAL;
82 *size = of_read_number(dma_window, cells);
83 dma_window += cells;
84
85 if (cur_index++ == index)
86 break;
87 }
88 return 0;
89}
90EXPORT_SYMBOL_GPL(of_get_dma_window);
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index e70ee2b59df9..d0b1234581be 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1148,6 +1148,10 @@ static int omap_iommu_domain_init(struct iommu_domain *domain)
1148 1148
1149 domain->priv = omap_domain; 1149 domain->priv = omap_domain;
1150 1150
1151 domain->geometry.aperture_start = 0;
1152 domain->geometry.aperture_end = (1ULL << 32) - 1;
1153 domain->geometry.force_aperture = true;
1154
1151 return 0; 1155 return 0;
1152 1156
1153fail_nomem: 1157fail_nomem:
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 0c0a37792218..c16e8fc8a4bd 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -165,6 +165,11 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain,
165 return -EINVAL; 165 return -EINVAL;
166 domain->priv = gart; 166 domain->priv = gart;
167 167
168 domain->geometry.aperture_start = gart->iovmm_base;
169 domain->geometry.aperture_end = gart->iovmm_base +
170 gart->page_count * GART_PAGE_SIZE - 1;
171 domain->geometry.force_aperture = true;
172
168 client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL); 173 client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL);
169 if (!client) 174 if (!client)
170 return -ENOMEM; 175 return -ENOMEM;
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 3f3d09d560ea..4ba325ab6262 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -30,12 +30,15 @@
30#include <linux/sched.h> 30#include <linux/sched.h>
31#include <linux/iommu.h> 31#include <linux/iommu.h>
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/of.h>
34#include <linux/of_iommu.h>
33 35
34#include <asm/page.h> 36#include <asm/page.h>
35#include <asm/cacheflush.h> 37#include <asm/cacheflush.h>
36 38
37#include <mach/iomap.h> 39#include <mach/iomap.h>
38#include <mach/smmu.h> 40#include <mach/smmu.h>
41#include <mach/tegra-ahb.h>
39 42
40/* bitmap of the page sizes currently supported */ 43/* bitmap of the page sizes currently supported */
41#define SMMU_IOMMU_PGSIZES (SZ_4K) 44#define SMMU_IOMMU_PGSIZES (SZ_4K)
@@ -111,12 +114,6 @@
111 114
112#define SMMU_PDE_NEXT_SHIFT 28 115#define SMMU_PDE_NEXT_SHIFT 28
113 116
114/* AHB Arbiter Registers */
115#define AHB_XBAR_CTRL 0xe0
116#define AHB_XBAR_CTRL_SMMU_INIT_DONE_DONE 1
117#define AHB_XBAR_CTRL_SMMU_INIT_DONE_SHIFT 17
118
119#define SMMU_NUM_ASIDS 4
120#define SMMU_TLB_FLUSH_VA_SECTION__MASK 0xffc00000 117#define SMMU_TLB_FLUSH_VA_SECTION__MASK 0xffc00000
121#define SMMU_TLB_FLUSH_VA_SECTION__SHIFT 12 /* right shift */ 118#define SMMU_TLB_FLUSH_VA_SECTION__SHIFT 12 /* right shift */
122#define SMMU_TLB_FLUSH_VA_GROUP__MASK 0xffffc000 119#define SMMU_TLB_FLUSH_VA_GROUP__MASK 0xffffc000
@@ -136,6 +133,7 @@
136 133
137#define SMMU_PAGE_SHIFT 12 134#define SMMU_PAGE_SHIFT 12
138#define SMMU_PAGE_SIZE (1 << SMMU_PAGE_SHIFT) 135#define SMMU_PAGE_SIZE (1 << SMMU_PAGE_SHIFT)
136#define SMMU_PAGE_MASK ((1 << SMMU_PAGE_SHIFT) - 1)
139 137
140#define SMMU_PDIR_COUNT 1024 138#define SMMU_PDIR_COUNT 1024
141#define SMMU_PDIR_SIZE (sizeof(unsigned long) * SMMU_PDIR_COUNT) 139#define SMMU_PDIR_SIZE (sizeof(unsigned long) * SMMU_PDIR_COUNT)
@@ -177,6 +175,8 @@
177#define SMMU_ASID_DISABLE 0 175#define SMMU_ASID_DISABLE 0
178#define SMMU_ASID_ASID(n) ((n) & ~SMMU_ASID_ENABLE(0)) 176#define SMMU_ASID_ASID(n) ((n) & ~SMMU_ASID_ENABLE(0))
179 177
178#define NUM_SMMU_REG_BANKS 3
179
180#define smmu_client_enable_hwgrp(c, m) smmu_client_set_hwgrp(c, m, 1) 180#define smmu_client_enable_hwgrp(c, m) smmu_client_set_hwgrp(c, m, 1)
181#define smmu_client_disable_hwgrp(c) smmu_client_set_hwgrp(c, 0, 0) 181#define smmu_client_disable_hwgrp(c) smmu_client_set_hwgrp(c, 0, 0)
182#define __smmu_client_enable_hwgrp(c, m) __smmu_client_set_hwgrp(c, m, 1) 182#define __smmu_client_enable_hwgrp(c, m) __smmu_client_set_hwgrp(c, m, 1)
@@ -235,14 +235,12 @@ struct smmu_as {
235 * Per SMMU device - IOMMU device 235 * Per SMMU device - IOMMU device
236 */ 236 */
237struct smmu_device { 237struct smmu_device {
238 void __iomem *regs, *regs_ahbarb; 238 void __iomem *regs[NUM_SMMU_REG_BANKS];
239 unsigned long iovmm_base; /* remappable base address */ 239 unsigned long iovmm_base; /* remappable base address */
240 unsigned long page_count; /* total remappable size */ 240 unsigned long page_count; /* total remappable size */
241 spinlock_t lock; 241 spinlock_t lock;
242 char *name; 242 char *name;
243 struct device *dev; 243 struct device *dev;
244 int num_as;
245 struct smmu_as *as; /* Run-time allocated array */
246 struct page *avp_vector_page; /* dummy page shared by all AS's */ 244 struct page *avp_vector_page; /* dummy page shared by all AS's */
247 245
248 /* 246 /*
@@ -252,29 +250,50 @@ struct smmu_device {
252 unsigned long translation_enable_1; 250 unsigned long translation_enable_1;
253 unsigned long translation_enable_2; 251 unsigned long translation_enable_2;
254 unsigned long asid_security; 252 unsigned long asid_security;
253
254 struct device_node *ahb;
255
256 int num_as;
257 struct smmu_as as[0]; /* Run-time allocated array */
255}; 258};
256 259
257static struct smmu_device *smmu_handle; /* unique for a system */ 260static struct smmu_device *smmu_handle; /* unique for a system */
258 261
259/* 262/*
260 * SMMU/AHB register accessors 263 * SMMU register accessors
261 */ 264 */
262static inline u32 smmu_read(struct smmu_device *smmu, size_t offs) 265static inline u32 smmu_read(struct smmu_device *smmu, size_t offs)
263{ 266{
264 return readl(smmu->regs + offs); 267 BUG_ON(offs < 0x10);
265} 268 if (offs < 0x3c)
266static inline void smmu_write(struct smmu_device *smmu, u32 val, size_t offs) 269 return readl(smmu->regs[0] + offs - 0x10);
267{ 270 BUG_ON(offs < 0x1f0);
268 writel(val, smmu->regs + offs); 271 if (offs < 0x200)
272 return readl(smmu->regs[1] + offs - 0x1f0);
273 BUG_ON(offs < 0x228);
274 if (offs < 0x284)
275 return readl(smmu->regs[2] + offs - 0x228);
276 BUG();
269} 277}
270 278
271static inline u32 ahb_read(struct smmu_device *smmu, size_t offs) 279static inline void smmu_write(struct smmu_device *smmu, u32 val, size_t offs)
272{
273 return readl(smmu->regs_ahbarb + offs);
274}
275static inline void ahb_write(struct smmu_device *smmu, u32 val, size_t offs)
276{ 280{
277 writel(val, smmu->regs_ahbarb + offs); 281 BUG_ON(offs < 0x10);
282 if (offs < 0x3c) {
283 writel(val, smmu->regs[0] + offs - 0x10);
284 return;
285 }
286 BUG_ON(offs < 0x1f0);
287 if (offs < 0x200) {
288 writel(val, smmu->regs[1] + offs - 0x1f0);
289 return;
290 }
291 BUG_ON(offs < 0x228);
292 if (offs < 0x284) {
293 writel(val, smmu->regs[2] + offs - 0x228);
294 return;
295 }
296 BUG();
278} 297}
279 298
280#define VA_PAGE_TO_PA(va, page) \ 299#define VA_PAGE_TO_PA(va, page) \
@@ -370,7 +389,7 @@ static void smmu_flush_regs(struct smmu_device *smmu, int enable)
370 FLUSH_SMMU_REGS(smmu); 389 FLUSH_SMMU_REGS(smmu);
371} 390}
372 391
373static void smmu_setup_regs(struct smmu_device *smmu) 392static int smmu_setup_regs(struct smmu_device *smmu)
374{ 393{
375 int i; 394 int i;
376 u32 val; 395 u32 val;
@@ -398,10 +417,7 @@ static void smmu_setup_regs(struct smmu_device *smmu)
398 417
399 smmu_flush_regs(smmu, 1); 418 smmu_flush_regs(smmu, 1);
400 419
401 val = ahb_read(smmu, AHB_XBAR_CTRL); 420 return tegra_ahb_enable_smmu(smmu->ahb);
402 val |= AHB_XBAR_CTRL_SMMU_INIT_DONE_DONE <<
403 AHB_XBAR_CTRL_SMMU_INIT_DONE_SHIFT;
404 ahb_write(smmu, val, AHB_XBAR_CTRL);
405} 421}
406 422
407static void flush_ptc_and_tlb(struct smmu_device *smmu, 423static void flush_ptc_and_tlb(struct smmu_device *smmu,
@@ -537,33 +553,42 @@ static inline void put_signature(struct smmu_as *as,
537#endif 553#endif
538 554
539/* 555/*
540 * Caller must lock/unlock as 556 * Caller must not hold as->lock
541 */ 557 */
542static int alloc_pdir(struct smmu_as *as) 558static int alloc_pdir(struct smmu_as *as)
543{ 559{
544 unsigned long *pdir; 560 unsigned long *pdir, flags;
545 int pdn; 561 int pdn, err = 0;
546 u32 val; 562 u32 val;
547 struct smmu_device *smmu = as->smmu; 563 struct smmu_device *smmu = as->smmu;
564 struct page *page;
565 unsigned int *cnt;
566
567 /*
568 * do the allocation, then grab as->lock
569 */
570 cnt = devm_kzalloc(smmu->dev,
571 sizeof(cnt[0]) * SMMU_PDIR_COUNT,
572 GFP_KERNEL);
573 page = alloc_page(GFP_KERNEL | __GFP_DMA);
548 574
549 if (as->pdir_page) 575 spin_lock_irqsave(&as->lock, flags);
550 return 0;
551 576
552 as->pte_count = devm_kzalloc(smmu->dev, 577 if (as->pdir_page) {
553 sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_ATOMIC); 578 /* We raced, free the redundant */
554 if (!as->pte_count) { 579 err = -EAGAIN;
555 dev_err(smmu->dev, 580 goto err_out;
556 "failed to allocate smmu_device PTE cunters\n");
557 return -ENOMEM;
558 } 581 }
559 as->pdir_page = alloc_page(GFP_ATOMIC | __GFP_DMA); 582
560 if (!as->pdir_page) { 583 if (!page || !cnt) {
561 dev_err(smmu->dev, 584 dev_err(smmu->dev, "failed to allocate at %s\n", __func__);
562 "failed to allocate smmu_device page directory\n"); 585 err = -ENOMEM;
563 devm_kfree(smmu->dev, as->pte_count); 586 goto err_out;
564 as->pte_count = NULL;
565 return -ENOMEM;
566 } 587 }
588
589 as->pdir_page = page;
590 as->pte_count = cnt;
591
567 SetPageReserved(as->pdir_page); 592 SetPageReserved(as->pdir_page);
568 pdir = page_address(as->pdir_page); 593 pdir = page_address(as->pdir_page);
569 594
@@ -579,7 +604,17 @@ static int alloc_pdir(struct smmu_as *as)
579 smmu_write(smmu, val, SMMU_TLB_FLUSH); 604 smmu_write(smmu, val, SMMU_TLB_FLUSH);
580 FLUSH_SMMU_REGS(as->smmu); 605 FLUSH_SMMU_REGS(as->smmu);
581 606
607 spin_unlock_irqrestore(&as->lock, flags);
608
582 return 0; 609 return 0;
610
611err_out:
612 spin_unlock_irqrestore(&as->lock, flags);
613
614 devm_kfree(smmu->dev, cnt);
615 if (page)
616 __free_page(page);
617 return err;
583} 618}
584 619
585static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova) 620static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova)
@@ -771,30 +806,28 @@ out:
771 806
772static int smmu_iommu_domain_init(struct iommu_domain *domain) 807static int smmu_iommu_domain_init(struct iommu_domain *domain)
773{ 808{
774 int i; 809 int i, err = -ENODEV;
775 unsigned long flags; 810 unsigned long flags;
776 struct smmu_as *as; 811 struct smmu_as *as;
777 struct smmu_device *smmu = smmu_handle; 812 struct smmu_device *smmu = smmu_handle;
778 813
779 /* Look for a free AS with lock held */ 814 /* Look for a free AS with lock held */
780 for (i = 0; i < smmu->num_as; i++) { 815 for (i = 0; i < smmu->num_as; i++) {
781 struct smmu_as *tmp = &smmu->as[i]; 816 as = &smmu->as[i];
782 817 if (!as->pdir_page) {
783 spin_lock_irqsave(&tmp->lock, flags); 818 err = alloc_pdir(as);
784 if (!tmp->pdir_page) { 819 if (!err)
785 as = tmp; 820 goto found;
786 goto found;
787 } 821 }
788 spin_unlock_irqrestore(&tmp->lock, flags); 822 if (err != -EAGAIN)
823 break;
789 } 824 }
790 dev_err(smmu->dev, "no free AS\n"); 825 if (i == smmu->num_as)
791 return -ENODEV; 826 dev_err(smmu->dev, "no free AS\n");
827 return err;
792 828
793found: 829found:
794 if (alloc_pdir(as) < 0) 830 spin_lock_irqsave(&smmu->lock, flags);
795 goto err_alloc_pdir;
796
797 spin_lock(&smmu->lock);
798 831
799 /* Update PDIR register */ 832 /* Update PDIR register */
800 smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID); 833 smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
@@ -802,17 +835,18 @@ found:
802 SMMU_MK_PDIR(as->pdir_page, as->pdir_attr), SMMU_PTB_DATA); 835 SMMU_MK_PDIR(as->pdir_page, as->pdir_attr), SMMU_PTB_DATA);
803 FLUSH_SMMU_REGS(smmu); 836 FLUSH_SMMU_REGS(smmu);
804 837
805 spin_unlock(&smmu->lock); 838 spin_unlock_irqrestore(&smmu->lock, flags);
806 839
807 spin_unlock_irqrestore(&as->lock, flags);
808 domain->priv = as; 840 domain->priv = as;
809 841
842 domain->geometry.aperture_start = smmu->iovmm_base;
843 domain->geometry.aperture_end = smmu->iovmm_base +
844 smmu->page_count * SMMU_PAGE_SIZE - 1;
845 domain->geometry.force_aperture = true;
846
810 dev_dbg(smmu->dev, "smmu_as@%p\n", as); 847 dev_dbg(smmu->dev, "smmu_as@%p\n", as);
811 return 0;
812 848
813err_alloc_pdir: 849 return 0;
814 spin_unlock_irqrestore(&as->lock, flags);
815 return -ENODEV;
816} 850}
817 851
818static void smmu_iommu_domain_destroy(struct iommu_domain *domain) 852static void smmu_iommu_domain_destroy(struct iommu_domain *domain)
@@ -873,65 +907,73 @@ static int tegra_smmu_resume(struct device *dev)
873{ 907{
874 struct smmu_device *smmu = dev_get_drvdata(dev); 908 struct smmu_device *smmu = dev_get_drvdata(dev);
875 unsigned long flags; 909 unsigned long flags;
910 int err;
876 911
877 spin_lock_irqsave(&smmu->lock, flags); 912 spin_lock_irqsave(&smmu->lock, flags);
878 smmu_setup_regs(smmu); 913 err = smmu_setup_regs(smmu);
879 spin_unlock_irqrestore(&smmu->lock, flags); 914 spin_unlock_irqrestore(&smmu->lock, flags);
880 return 0; 915 return err;
881} 916}
882 917
883static int tegra_smmu_probe(struct platform_device *pdev) 918static int tegra_smmu_probe(struct platform_device *pdev)
884{ 919{
885 struct smmu_device *smmu; 920 struct smmu_device *smmu;
886 struct resource *regs, *regs2, *window;
887 struct device *dev = &pdev->dev; 921 struct device *dev = &pdev->dev;
888 int i, err = 0; 922 int i, asids, err = 0;
923 dma_addr_t uninitialized_var(base);
924 size_t bytes, uninitialized_var(size);
889 925
890 if (smmu_handle) 926 if (smmu_handle)
891 return -EIO; 927 return -EIO;
892 928
893 BUILD_BUG_ON(PAGE_SHIFT != SMMU_PAGE_SHIFT); 929 BUILD_BUG_ON(PAGE_SHIFT != SMMU_PAGE_SHIFT);
894 930
895 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 931 if (of_property_read_u32(dev->of_node, "nvidia,#asids", &asids))
896 regs2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
897 window = platform_get_resource(pdev, IORESOURCE_MEM, 2);
898 if (!regs || !regs2 || !window) {
899 dev_err(dev, "No SMMU resources\n");
900 return -ENODEV; 932 return -ENODEV;
901 }
902 933
903 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); 934 bytes = sizeof(*smmu) + asids * sizeof(*smmu->as);
935 smmu = devm_kzalloc(dev, bytes, GFP_KERNEL);
904 if (!smmu) { 936 if (!smmu) {
905 dev_err(dev, "failed to allocate smmu_device\n"); 937 dev_err(dev, "failed to allocate smmu_device\n");
906 return -ENOMEM; 938 return -ENOMEM;
907 } 939 }
908 940
909 smmu->dev = dev; 941 for (i = 0; i < ARRAY_SIZE(smmu->regs); i++) {
910 smmu->num_as = SMMU_NUM_ASIDS; 942 struct resource *res;
911 smmu->iovmm_base = (unsigned long)window->start; 943
912 smmu->page_count = resource_size(window) >> SMMU_PAGE_SHIFT; 944 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
913 smmu->regs = devm_ioremap(dev, regs->start, resource_size(regs)); 945 if (!res)
914 smmu->regs_ahbarb = devm_ioremap(dev, regs2->start, 946 return -ENODEV;
915 resource_size(regs2)); 947 smmu->regs[i] = devm_request_and_ioremap(&pdev->dev, res);
916 if (!smmu->regs || !smmu->regs_ahbarb) { 948 if (!smmu->regs[i])
917 dev_err(dev, "failed to remap SMMU registers\n"); 949 return -EBUSY;
918 err = -ENXIO;
919 goto fail;
920 } 950 }
921 951
952 err = of_get_dma_window(dev->of_node, NULL, 0, NULL, &base, &size);
953 if (err)
954 return -ENODEV;
955
956 if (size & SMMU_PAGE_MASK)
957 return -EINVAL;
958
959 size >>= SMMU_PAGE_SHIFT;
960 if (!size)
961 return -EINVAL;
962
963 smmu->ahb = of_parse_phandle(dev->of_node, "nvidia,ahb", 0);
964 if (!smmu->ahb)
965 return -ENODEV;
966
967 smmu->dev = dev;
968 smmu->num_as = asids;
969 smmu->iovmm_base = base;
970 smmu->page_count = size;
971
922 smmu->translation_enable_0 = ~0; 972 smmu->translation_enable_0 = ~0;
923 smmu->translation_enable_1 = ~0; 973 smmu->translation_enable_1 = ~0;
924 smmu->translation_enable_2 = ~0; 974 smmu->translation_enable_2 = ~0;
925 smmu->asid_security = 0; 975 smmu->asid_security = 0;
926 976
927 smmu->as = devm_kzalloc(dev,
928 sizeof(smmu->as[0]) * smmu->num_as, GFP_KERNEL);
929 if (!smmu->as) {
930 dev_err(dev, "failed to allocate smmu_as\n");
931 err = -ENOMEM;
932 goto fail;
933 }
934
935 for (i = 0; i < smmu->num_as; i++) { 977 for (i = 0; i < smmu->num_as; i++) {
936 struct smmu_as *as = &smmu->as[i]; 978 struct smmu_as *as = &smmu->as[i];
937 979
@@ -945,57 +987,28 @@ static int tegra_smmu_probe(struct platform_device *pdev)
945 INIT_LIST_HEAD(&as->client); 987 INIT_LIST_HEAD(&as->client);
946 } 988 }
947 spin_lock_init(&smmu->lock); 989 spin_lock_init(&smmu->lock);
948 smmu_setup_regs(smmu); 990 err = smmu_setup_regs(smmu);
991 if (err)
992 return err;
949 platform_set_drvdata(pdev, smmu); 993 platform_set_drvdata(pdev, smmu);
950 994
951 smmu->avp_vector_page = alloc_page(GFP_KERNEL); 995 smmu->avp_vector_page = alloc_page(GFP_KERNEL);
952 if (!smmu->avp_vector_page) 996 if (!smmu->avp_vector_page)
953 goto fail; 997 return -ENOMEM;
954 998
955 smmu_handle = smmu; 999 smmu_handle = smmu;
956 return 0; 1000 return 0;
957
958fail:
959 if (smmu->avp_vector_page)
960 __free_page(smmu->avp_vector_page);
961 if (smmu->regs)
962 devm_iounmap(dev, smmu->regs);
963 if (smmu->regs_ahbarb)
964 devm_iounmap(dev, smmu->regs_ahbarb);
965 if (smmu && smmu->as) {
966 for (i = 0; i < smmu->num_as; i++) {
967 if (smmu->as[i].pdir_page) {
968 ClearPageReserved(smmu->as[i].pdir_page);
969 __free_page(smmu->as[i].pdir_page);
970 }
971 }
972 devm_kfree(dev, smmu->as);
973 }
974 devm_kfree(dev, smmu);
975 return err;
976} 1001}
977 1002
978static int tegra_smmu_remove(struct platform_device *pdev) 1003static int tegra_smmu_remove(struct platform_device *pdev)
979{ 1004{
980 struct smmu_device *smmu = platform_get_drvdata(pdev); 1005 struct smmu_device *smmu = platform_get_drvdata(pdev);
981 struct device *dev = smmu->dev; 1006 int i;
982 1007
983 smmu_write(smmu, SMMU_CONFIG_DISABLE, SMMU_CONFIG); 1008 smmu_write(smmu, SMMU_CONFIG_DISABLE, SMMU_CONFIG);
984 platform_set_drvdata(pdev, NULL); 1009 for (i = 0; i < smmu->num_as; i++)
985 if (smmu->as) { 1010 free_pdir(&smmu->as[i]);
986 int i; 1011 __free_page(smmu->avp_vector_page);
987
988 for (i = 0; i < smmu->num_as; i++)
989 free_pdir(&smmu->as[i]);
990 devm_kfree(dev, smmu->as);
991 }
992 if (smmu->avp_vector_page)
993 __free_page(smmu->avp_vector_page);
994 if (smmu->regs)
995 devm_iounmap(dev, smmu->regs);
996 if (smmu->regs_ahbarb)
997 devm_iounmap(dev, smmu->regs_ahbarb);
998 devm_kfree(dev, smmu);
999 smmu_handle = NULL; 1012 smmu_handle = NULL;
1000 return 0; 1013 return 0;
1001} 1014}
@@ -1005,6 +1018,14 @@ const struct dev_pm_ops tegra_smmu_pm_ops = {
1005 .resume = tegra_smmu_resume, 1018 .resume = tegra_smmu_resume,
1006}; 1019};
1007 1020
1021#ifdef CONFIG_OF
1022static struct of_device_id tegra_smmu_of_match[] __devinitdata = {
1023 { .compatible = "nvidia,tegra30-smmu", },
1024 { },
1025};
1026MODULE_DEVICE_TABLE(of, tegra_smmu_of_match);
1027#endif
1028
1008static struct platform_driver tegra_smmu_driver = { 1029static struct platform_driver tegra_smmu_driver = {
1009 .probe = tegra_smmu_probe, 1030 .probe = tegra_smmu_probe,
1010 .remove = tegra_smmu_remove, 1031 .remove = tegra_smmu_remove,
@@ -1012,6 +1033,7 @@ static struct platform_driver tegra_smmu_driver = {
1012 .owner = THIS_MODULE, 1033 .owner = THIS_MODULE,
1013 .name = "tegra-smmu", 1034 .name = "tegra-smmu",
1014 .pm = &tegra_smmu_pm_ops, 1035 .pm = &tegra_smmu_pm_ops,
1036 .of_match_table = of_match_ptr(tegra_smmu_of_match),
1015 }, 1037 },
1016}; 1038};
1017 1039
@@ -1031,4 +1053,5 @@ module_exit(tegra_smmu_exit);
1031 1053
1032MODULE_DESCRIPTION("IOMMU API for SMMU in Tegra30"); 1054MODULE_DESCRIPTION("IOMMU API for SMMU in Tegra30");
1033MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>"); 1055MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
1056MODULE_ALIAS("platform:tegra-smmu");
1034MODULE_LICENSE("GPL v2"); 1057MODULE_LICENSE("GPL v2");
diff --git a/include/linux/device.h b/include/linux/device.h
index 6de94151ff6f..5083bccae967 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -36,6 +36,7 @@ struct subsys_private;
36struct bus_type; 36struct bus_type;
37struct device_node; 37struct device_node;
38struct iommu_ops; 38struct iommu_ops;
39struct iommu_group;
39 40
40struct bus_attribute { 41struct bus_attribute {
41 struct attribute attr; 42 struct attribute attr;
@@ -687,6 +688,7 @@ struct device {
687 const struct attribute_group **groups; /* optional groups */ 688 const struct attribute_group **groups; /* optional groups */
688 689
689 void (*release)(struct device *dev); 690 void (*release)(struct device *dev);
691 struct iommu_group *iommu_group;
690}; 692};
691 693
692/* Get the wakeup routines, which depend on struct device */ 694/* Get the wakeup routines, which depend on struct device */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 450293f6d68b..54d6d690073c 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -26,6 +26,7 @@
26#define IOMMU_CACHE (4) /* DMA cache coherency */ 26#define IOMMU_CACHE (4) /* DMA cache coherency */
27 27
28struct iommu_ops; 28struct iommu_ops;
29struct iommu_group;
29struct bus_type; 30struct bus_type;
30struct device; 31struct device;
31struct iommu_domain; 32struct iommu_domain;
@@ -37,16 +38,28 @@ struct iommu_domain;
37typedef int (*iommu_fault_handler_t)(struct iommu_domain *, 38typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
38 struct device *, unsigned long, int, void *); 39 struct device *, unsigned long, int, void *);
39 40
41struct iommu_domain_geometry {
42 dma_addr_t aperture_start; /* First address that can be mapped */
43 dma_addr_t aperture_end; /* Last address that can be mapped */
44 bool force_aperture; /* DMA only allowed in mappable range? */
45};
46
40struct iommu_domain { 47struct iommu_domain {
41 struct iommu_ops *ops; 48 struct iommu_ops *ops;
42 void *priv; 49 void *priv;
43 iommu_fault_handler_t handler; 50 iommu_fault_handler_t handler;
44 void *handler_token; 51 void *handler_token;
52 struct iommu_domain_geometry geometry;
45}; 53};
46 54
47#define IOMMU_CAP_CACHE_COHERENCY 0x1 55#define IOMMU_CAP_CACHE_COHERENCY 0x1
48#define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */ 56#define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */
49 57
58enum iommu_attr {
59 DOMAIN_ATTR_MAX,
60 DOMAIN_ATTR_GEOMETRY,
61};
62
50#ifdef CONFIG_IOMMU_API 63#ifdef CONFIG_IOMMU_API
51 64
52/** 65/**
@@ -59,7 +72,10 @@ struct iommu_domain {
59 * @unmap: unmap a physically contiguous memory region from an iommu domain 72 * @unmap: unmap a physically contiguous memory region from an iommu domain
60 * @iova_to_phys: translate iova to physical address 73 * @iova_to_phys: translate iova to physical address
61 * @domain_has_cap: domain capabilities query 74 * @domain_has_cap: domain capabilities query
62 * @commit: commit iommu domain 75 * @add_device: add device to iommu grouping
76 * @remove_device: remove device from iommu grouping
77 * @domain_get_attr: Query domain attributes
78 * @domain_set_attr: Change domain attributes
63 * @pgsize_bitmap: bitmap of supported page sizes 79 * @pgsize_bitmap: bitmap of supported page sizes
64 */ 80 */
65struct iommu_ops { 81struct iommu_ops {
@@ -75,10 +91,23 @@ struct iommu_ops {
75 unsigned long iova); 91 unsigned long iova);
76 int (*domain_has_cap)(struct iommu_domain *domain, 92 int (*domain_has_cap)(struct iommu_domain *domain,
77 unsigned long cap); 93 unsigned long cap);
94 int (*add_device)(struct device *dev);
95 void (*remove_device)(struct device *dev);
78 int (*device_group)(struct device *dev, unsigned int *groupid); 96 int (*device_group)(struct device *dev, unsigned int *groupid);
97 int (*domain_get_attr)(struct iommu_domain *domain,
98 enum iommu_attr attr, void *data);
99 int (*domain_set_attr)(struct iommu_domain *domain,
100 enum iommu_attr attr, void *data);
79 unsigned long pgsize_bitmap; 101 unsigned long pgsize_bitmap;
80}; 102};
81 103
104#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
105#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
106#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
107#define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */
108#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */
109#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
110
82extern int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops); 111extern int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops);
83extern bool iommu_present(struct bus_type *bus); 112extern bool iommu_present(struct bus_type *bus);
84extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); 113extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
@@ -97,7 +126,34 @@ extern int iommu_domain_has_cap(struct iommu_domain *domain,
97 unsigned long cap); 126 unsigned long cap);
98extern void iommu_set_fault_handler(struct iommu_domain *domain, 127extern void iommu_set_fault_handler(struct iommu_domain *domain,
99 iommu_fault_handler_t handler, void *token); 128 iommu_fault_handler_t handler, void *token);
100extern int iommu_device_group(struct device *dev, unsigned int *groupid); 129
130extern int iommu_attach_group(struct iommu_domain *domain,
131 struct iommu_group *group);
132extern void iommu_detach_group(struct iommu_domain *domain,
133 struct iommu_group *group);
134extern struct iommu_group *iommu_group_alloc(void);
135extern void *iommu_group_get_iommudata(struct iommu_group *group);
136extern void iommu_group_set_iommudata(struct iommu_group *group,
137 void *iommu_data,
138 void (*release)(void *iommu_data));
139extern int iommu_group_set_name(struct iommu_group *group, const char *name);
140extern int iommu_group_add_device(struct iommu_group *group,
141 struct device *dev);
142extern void iommu_group_remove_device(struct device *dev);
143extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
144 int (*fn)(struct device *, void *));
145extern struct iommu_group *iommu_group_get(struct device *dev);
146extern void iommu_group_put(struct iommu_group *group);
147extern int iommu_group_register_notifier(struct iommu_group *group,
148 struct notifier_block *nb);
149extern int iommu_group_unregister_notifier(struct iommu_group *group,
150 struct notifier_block *nb);
151extern int iommu_group_id(struct iommu_group *group);
152
153extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
154 void *data);
155extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
156 void *data);
101 157
102/** 158/**
103 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework 159 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
@@ -142,6 +198,7 @@ static inline int report_iommu_fault(struct iommu_domain *domain,
142#else /* CONFIG_IOMMU_API */ 198#else /* CONFIG_IOMMU_API */
143 199
144struct iommu_ops {}; 200struct iommu_ops {};
201struct iommu_group {};
145 202
146static inline bool iommu_present(struct bus_type *bus) 203static inline bool iommu_present(struct bus_type *bus)
147{ 204{
@@ -197,11 +254,88 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
197{ 254{
198} 255}
199 256
200static inline int iommu_device_group(struct device *dev, unsigned int *groupid) 257int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
258{
259 return -ENODEV;
260}
261
262void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
263{
264}
265
266struct iommu_group *iommu_group_alloc(void)
267{
268 return ERR_PTR(-ENODEV);
269}
270
271void *iommu_group_get_iommudata(struct iommu_group *group)
272{
273 return NULL;
274}
275
276void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
277 void (*release)(void *iommu_data))
278{
279}
280
281int iommu_group_set_name(struct iommu_group *group, const char *name)
282{
283 return -ENODEV;
284}
285
286int iommu_group_add_device(struct iommu_group *group, struct device *dev)
287{
288 return -ENODEV;
289}
290
291void iommu_group_remove_device(struct device *dev)
292{
293}
294
295int iommu_group_for_each_dev(struct iommu_group *group, void *data,
296 int (*fn)(struct device *, void *))
297{
298 return -ENODEV;
299}
300
301struct iommu_group *iommu_group_get(struct device *dev)
302{
303 return NULL;
304}
305
306void iommu_group_put(struct iommu_group *group)
307{
308}
309
310int iommu_group_register_notifier(struct iommu_group *group,
311 struct notifier_block *nb)
201{ 312{
202 return -ENODEV; 313 return -ENODEV;
203} 314}
204 315
316int iommu_group_unregister_notifier(struct iommu_group *group,
317 struct notifier_block *nb)
318{
319 return 0;
320}
321
322int iommu_group_id(struct iommu_group *group)
323{
324 return -ENODEV;
325}
326
327static inline int iommu_domain_get_attr(struct iommu_domain *domain,
328 enum iommu_attr attr, void *data)
329{
330 return -EINVAL;
331}
332
333static inline int iommu_domain_set_attr(struct iommu_domain *domain,
334 enum iommu_attr attr, void *data)
335{
336 return -EINVAL;
337}
338
205#endif /* CONFIG_IOMMU_API */ 339#endif /* CONFIG_IOMMU_API */
206 340
207#endif /* __LINUX_IOMMU_H */ 341#endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
new file mode 100644
index 000000000000..51a560f34bca
--- /dev/null
+++ b/include/linux/of_iommu.h
@@ -0,0 +1,21 @@
1#ifndef __OF_IOMMU_H
2#define __OF_IOMMU_H
3
4#ifdef CONFIG_OF_IOMMU
5
6extern int of_get_dma_window(struct device_node *dn, const char *prefix,
7 int index, unsigned long *busno, dma_addr_t *addr,
8 size_t *size);
9
10#else
11
12static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
13 int index, unsigned long *busno, dma_addr_t *addr,
14 size_t *size)
15{
16 return -EINVAL;
17}
18
19#endif /* CONFIG_OF_IOMMU */
20
21#endif /* __OF_IOMMU_H */