aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/mach-msm/Kconfig19
-rw-r--r--arch/arm/mach-msm/Makefile2
-rw-r--r--arch/ia64/Kconfig24
-rw-r--r--arch/x86/Kconfig79
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/iommu/Kconfig110
-rw-r--r--drivers/iommu/Makefile5
-rw-r--r--drivers/iommu/amd_iommu.c (renamed from arch/x86/kernel/amd_iommu.c)298
-rw-r--r--drivers/iommu/amd_iommu_init.c (renamed from arch/x86/kernel/amd_iommu_init.c)8
-rw-r--r--drivers/iommu/amd_iommu_proto.h (renamed from arch/x86/include/asm/amd_iommu_proto.h)2
-rw-r--r--drivers/iommu/amd_iommu_types.h (renamed from arch/x86/include/asm/amd_iommu_types.h)9
-rw-r--r--drivers/iommu/dmar.c (renamed from drivers/pci/dmar.c)0
-rw-r--r--drivers/iommu/intel-iommu.c (renamed from drivers/pci/intel-iommu.c)1
-rw-r--r--drivers/iommu/intr_remapping.c (renamed from drivers/pci/intr_remapping.c)1
-rw-r--r--drivers/iommu/intr_remapping.h (renamed from drivers/pci/intr_remapping.h)0
-rw-r--r--drivers/iommu/iommu.c (renamed from drivers/base/iommu.c)0
-rw-r--r--drivers/iommu/iova.c (renamed from drivers/pci/iova.c)0
-rw-r--r--drivers/iommu/msm_iommu.c (renamed from arch/arm/mach-msm/iommu.c)0
-rw-r--r--drivers/iommu/msm_iommu_dev.c (renamed from arch/arm/mach-msm/iommu_dev.c)0
-rw-r--r--drivers/pci/Makefile5
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--include/linux/amd-iommu.h (renamed from arch/x86/include/asm/amd_iommu.h)0
-rw-r--r--include/linux/iommu.h2
-rw-r--r--include/linux/pci.h11
27 files changed, 317 insertions, 266 deletions
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 1516896e8d17..888e92502e15 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -148,22 +148,6 @@ config MACH_MSM8960_RUMI3
148 148
149endmenu 149endmenu
150 150
151config MSM_IOMMU
152 bool "MSM IOMMU Support"
153 depends on ARCH_MSM8X60 || ARCH_MSM8960
154 select IOMMU_API
155 default n
156 help
157 Support for the IOMMUs found on certain Qualcomm SOCs.
158 These IOMMUs allow virtualization of the address space used by most
159 cores within the multimedia subsystem.
160
161 If unsure, say N here.
162
163config IOMMU_PGTABLES_L2
164 def_bool y
165 depends on MSM_IOMMU && MMU && SMP && CPU_DCACHE_DISABLE=n
166
167config MSM_DEBUG_UART 151config MSM_DEBUG_UART
168 int 152 int
169 default 1 if MSM_DEBUG_UART1 153 default 1 if MSM_DEBUG_UART1
@@ -205,9 +189,6 @@ config MSM_GPIOMUX
205config MSM_V2_TLMM 189config MSM_V2_TLMM
206 bool 190 bool
207 191
208config IOMMU_API
209 bool
210
211config MSM_SCM 192config MSM_SCM
212 bool 193 bool
213endif 194endif
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 9519fd28a025..b70658c5ae00 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -3,7 +3,7 @@ obj-y += clock.o
3obj-$(CONFIG_DEBUG_FS) += clock-debug.o 3obj-$(CONFIG_DEBUG_FS) += clock-debug.o
4 4
5obj-$(CONFIG_MSM_VIC) += irq-vic.o 5obj-$(CONFIG_MSM_VIC) += irq-vic.o
6obj-$(CONFIG_MSM_IOMMU) += iommu.o iommu_dev.o devices-iommu.o 6obj-$(CONFIG_MSM_IOMMU) += devices-iommu.o
7 7
8obj-$(CONFIG_ARCH_MSM7X00A) += dma.o irq.o acpuclock-arm11.o 8obj-$(CONFIG_ARCH_MSM7X00A) += dma.o irq.o acpuclock-arm11.o
9obj-$(CONFIG_ARCH_MSM7X30) += dma.o 9obj-$(CONFIG_ARCH_MSM7X30) += dma.o
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 38280ef4a2af..7336ba653b8f 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -627,27 +627,6 @@ source "drivers/pci/hotplug/Kconfig"
627 627
628source "drivers/pcmcia/Kconfig" 628source "drivers/pcmcia/Kconfig"
629 629
630config DMAR
631 bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
632 depends on IA64_GENERIC && ACPI && EXPERIMENTAL
633 help
634 DMA remapping (DMAR) devices support enables independent address
635 translations for Direct Memory Access (DMA) from devices.
636 These DMA remapping devices are reported via ACPI tables
637 and include PCI device scope covered by these DMA
638 remapping devices.
639
640config DMAR_DEFAULT_ON
641 def_bool y
642 prompt "Enable DMA Remapping Devices by default"
643 depends on DMAR
644 help
645 Selecting this option will enable a DMAR device at boot time if
646 one is found. If this option is not selected, DMAR support can
647 be enabled by passing intel_iommu=on to the kernel. It is
648 recommended you say N here while the DMAR code remains
649 experimental.
650
651endmenu 630endmenu
652 631
653endif 632endif
@@ -681,6 +660,3 @@ source "lib/Kconfig"
681 660
682config IOMMU_HELPER 661config IOMMU_HELPER
683 def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB) 662 def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB)
684
685config IOMMU_API
686 def_bool (DMAR)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index da349723d411..a169573c64fc 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -680,33 +680,6 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
680 Calgary anyway, pass 'iommu=calgary' on the kernel command line. 680 Calgary anyway, pass 'iommu=calgary' on the kernel command line.
681 If unsure, say Y. 681 If unsure, say Y.
682 682
683config AMD_IOMMU
684 bool "AMD IOMMU support"
685 select SWIOTLB
686 select PCI_MSI
687 select PCI_IOV
688 depends on X86_64 && PCI && ACPI
689 ---help---
690 With this option you can enable support for AMD IOMMU hardware in
691 your system. An IOMMU is a hardware component which provides
692 remapping of DMA memory accesses from devices. With an AMD IOMMU you
693 can isolate the the DMA memory of different devices and protect the
694 system from misbehaving device drivers or hardware.
695
696 You can find out if your system has an AMD IOMMU if you look into
697 your BIOS for an option to enable it or if you have an IVRS ACPI
698 table.
699
700config AMD_IOMMU_STATS
701 bool "Export AMD IOMMU statistics to debugfs"
702 depends on AMD_IOMMU
703 select DEBUG_FS
704 ---help---
705 This option enables code in the AMD IOMMU driver to collect various
706 statistics about whats happening in the driver and exports that
707 information to userspace via debugfs.
708 If unsure, say N.
709
710# need this always selected by IOMMU for the VIA workaround 683# need this always selected by IOMMU for the VIA workaround
711config SWIOTLB 684config SWIOTLB
712 def_bool y if X86_64 685 def_bool y if X86_64
@@ -720,9 +693,6 @@ config SWIOTLB
720config IOMMU_HELPER 693config IOMMU_HELPER
721 def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU) 694 def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU)
722 695
723config IOMMU_API
724 def_bool (AMD_IOMMU || DMAR)
725
726config MAXSMP 696config MAXSMP
727 bool "Enable Maximum number of SMP Processors and NUMA Nodes" 697 bool "Enable Maximum number of SMP Processors and NUMA Nodes"
728 depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL 698 depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
@@ -1942,55 +1912,6 @@ config PCI_CNB20LE_QUIRK
1942 1912
1943 You should say N unless you know you need this. 1913 You should say N unless you know you need this.
1944 1914
1945config DMAR
1946 bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
1947 depends on PCI_MSI && ACPI && EXPERIMENTAL
1948 help
1949 DMA remapping (DMAR) devices support enables independent address
1950 translations for Direct Memory Access (DMA) from devices.
1951 These DMA remapping devices are reported via ACPI tables
1952 and include PCI device scope covered by these DMA
1953 remapping devices.
1954
1955config DMAR_DEFAULT_ON
1956 def_bool y
1957 prompt "Enable DMA Remapping Devices by default"
1958 depends on DMAR
1959 help
1960 Selecting this option will enable a DMAR device at boot time if
1961 one is found. If this option is not selected, DMAR support can
1962 be enabled by passing intel_iommu=on to the kernel. It is
1963 recommended you say N here while the DMAR code remains
1964 experimental.
1965
1966config DMAR_BROKEN_GFX_WA
1967 bool "Workaround broken graphics drivers (going away soon)"
1968 depends on DMAR && BROKEN
1969 ---help---
1970 Current Graphics drivers tend to use physical address
1971 for DMA and avoid using DMA APIs. Setting this config
1972 option permits the IOMMU driver to set a unity map for
1973 all the OS-visible memory. Hence the driver can continue
1974 to use physical addresses for DMA, at least until this
1975 option is removed in the 2.6.32 kernel.
1976
1977config DMAR_FLOPPY_WA
1978 def_bool y
1979 depends on DMAR
1980 ---help---
1981 Floppy disk drivers are known to bypass DMA API calls
1982 thereby failing to work when IOMMU is enabled. This
1983 workaround will setup a 1:1 mapping for the first
1984 16MiB to make floppy (an ISA device) work.
1985
1986config INTR_REMAP
1987 bool "Support for Interrupt Remapping (EXPERIMENTAL)"
1988 depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL
1989 ---help---
1990 Supports Interrupt remapping for IO-APIC and MSI devices.
1991 To use x2apic mode in the CPU's which support x2APIC enhancements or
1992 to support platforms with CPU's having > 8 bit APIC ID, say Y.
1993
1994source "drivers/pci/pcie/Kconfig" 1915source "drivers/pci/pcie/Kconfig"
1995 1916
1996source "drivers/pci/Kconfig" 1917source "drivers/pci/Kconfig"
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 90b06d4daee2..11817ff85399 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -123,7 +123,6 @@ ifeq ($(CONFIG_X86_64),y)
123 123
124 obj-$(CONFIG_GART_IOMMU) += amd_gart_64.o aperture_64.o 124 obj-$(CONFIG_GART_IOMMU) += amd_gart_64.o aperture_64.o
125 obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o 125 obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o
126 obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o
127 126
128 obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o 127 obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o
129 obj-y += vsmp_64.o 128 obj-y += vsmp_64.o
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 3bb154d8c8cc..9d513188b47a 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -126,4 +126,6 @@ source "drivers/hwspinlock/Kconfig"
126 126
127source "drivers/clocksource/Kconfig" 127source "drivers/clocksource/Kconfig"
128 128
129source "drivers/iommu/Kconfig"
130
129endmenu 131endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 09f3232bcdcd..2f7a71a933de 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -122,3 +122,4 @@ obj-y += ieee802154/
122obj-y += clk/ 122obj-y += clk/
123 123
124obj-$(CONFIG_HWSPINLOCK) += hwspinlock/ 124obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
125obj-$(CONFIG_IOMMU_API) += iommu/
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 4c5701c15f53..5ab0d07c4578 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -13,7 +13,6 @@ obj-$(CONFIG_FW_LOADER) += firmware_class.o
13obj-$(CONFIG_NUMA) += node.o 13obj-$(CONFIG_NUMA) += node.o
14obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o 14obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o
15obj-$(CONFIG_SMP) += topology.o 15obj-$(CONFIG_SMP) += topology.o
16obj-$(CONFIG_IOMMU_API) += iommu.o
17ifeq ($(CONFIG_SYSFS),y) 16ifeq ($(CONFIG_SYSFS),y)
18obj-$(CONFIG_MODULES) += module.o 17obj-$(CONFIG_MODULES) += module.o
19endif 18endif
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
new file mode 100644
index 000000000000..b57b3fa492f3
--- /dev/null
+++ b/drivers/iommu/Kconfig
@@ -0,0 +1,110 @@
1# IOMMU_API always gets selected by whoever wants it.
2config IOMMU_API
3 bool
4
5menuconfig IOMMU_SUPPORT
6 bool "IOMMU Hardware Support"
7 default y
8 ---help---
9 Say Y here if you want to compile device drivers for IO Memory
10 Management Units into the kernel. These devices usually allow to
11 remap DMA requests and/or remap interrupts from other devices on the
12 system.
13
14if IOMMU_SUPPORT
15
16# MSM IOMMU support
17config MSM_IOMMU
18 bool "MSM IOMMU Support"
19 depends on ARCH_MSM8X60 || ARCH_MSM8960
20 select IOMMU_API
21 help
22 Support for the IOMMUs found on certain Qualcomm SOCs.
23 These IOMMUs allow virtualization of the address space used by most
24 cores within the multimedia subsystem.
25
26 If unsure, say N here.
27
28config IOMMU_PGTABLES_L2
29 def_bool y
30 depends on MSM_IOMMU && MMU && SMP && CPU_DCACHE_DISABLE=n
31
32# AMD IOMMU support
33config AMD_IOMMU
34 bool "AMD IOMMU support"
35 select SWIOTLB
36 select PCI_MSI
37 select PCI_IOV
38 select IOMMU_API
39 depends on X86_64 && PCI && ACPI
40 ---help---
41 With this option you can enable support for AMD IOMMU hardware in
42 your system. An IOMMU is a hardware component which provides
43 remapping of DMA memory accesses from devices. With an AMD IOMMU you
44 can isolate the the DMA memory of different devices and protect the
45 system from misbehaving device drivers or hardware.
46
47 You can find out if your system has an AMD IOMMU if you look into
48 your BIOS for an option to enable it or if you have an IVRS ACPI
49 table.
50
51config AMD_IOMMU_STATS
52 bool "Export AMD IOMMU statistics to debugfs"
53 depends on AMD_IOMMU
54 select DEBUG_FS
55 ---help---
56 This option enables code in the AMD IOMMU driver to collect various
57 statistics about whats happening in the driver and exports that
58 information to userspace via debugfs.
59 If unsure, say N.
60
61# Intel IOMMU support
62config DMAR
63 bool "Support for DMA Remapping Devices"
64 depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC)
65 select IOMMU_API
66 help
67 DMA remapping (DMAR) devices support enables independent address
68 translations for Direct Memory Access (DMA) from devices.
69 These DMA remapping devices are reported via ACPI tables
70 and include PCI device scope covered by these DMA
71 remapping devices.
72
73config DMAR_DEFAULT_ON
74 def_bool y
75 prompt "Enable DMA Remapping Devices by default"
76 depends on DMAR
77 help
78 Selecting this option will enable a DMAR device at boot time if
79 one is found. If this option is not selected, DMAR support can
80 be enabled by passing intel_iommu=on to the kernel.
81
82config DMAR_BROKEN_GFX_WA
83 bool "Workaround broken graphics drivers (going away soon)"
84 depends on DMAR && BROKEN && X86
85 ---help---
86 Current Graphics drivers tend to use physical address
87 for DMA and avoid using DMA APIs. Setting this config
88 option permits the IOMMU driver to set a unity map for
89 all the OS-visible memory. Hence the driver can continue
90 to use physical addresses for DMA, at least until this
91 option is removed in the 2.6.32 kernel.
92
93config DMAR_FLOPPY_WA
94 def_bool y
95 depends on DMAR && X86
96 ---help---
97 Floppy disk drivers are known to bypass DMA API calls
98 thereby failing to work when IOMMU is enabled. This
99 workaround will setup a 1:1 mapping for the first
100 16MiB to make floppy (an ISA device) work.
101
102config INTR_REMAP
103 bool "Support for Interrupt Remapping (EXPERIMENTAL)"
104 depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL
105 ---help---
106 Supports Interrupt remapping for IO-APIC and MSI devices.
107 To use x2apic mode in the CPU's which support x2APIC enhancements or
108 to support platforms with CPU's having > 8 bit APIC ID, say Y.
109
110endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
new file mode 100644
index 000000000000..4d4d77df7cac
--- /dev/null
+++ b/drivers/iommu/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_IOMMU_API) += iommu.o
2obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
3obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
4obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o
5obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o
diff --git a/arch/x86/kernel/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 7c3a95e54ec5..748eab063857 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -27,13 +27,14 @@
27#include <linux/iommu-helper.h> 27#include <linux/iommu-helper.h>
28#include <linux/iommu.h> 28#include <linux/iommu.h>
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/amd-iommu.h>
30#include <asm/proto.h> 31#include <asm/proto.h>
31#include <asm/iommu.h> 32#include <asm/iommu.h>
32#include <asm/gart.h> 33#include <asm/gart.h>
33#include <asm/dma.h> 34#include <asm/dma.h>
34#include <asm/amd_iommu_proto.h> 35
35#include <asm/amd_iommu_types.h> 36#include "amd_iommu_proto.h"
36#include <asm/amd_iommu.h> 37#include "amd_iommu_types.h"
37 38
38#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) 39#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
39 40
@@ -45,6 +46,10 @@ static DEFINE_RWLOCK(amd_iommu_devtable_lock);
45static LIST_HEAD(iommu_pd_list); 46static LIST_HEAD(iommu_pd_list);
46static DEFINE_SPINLOCK(iommu_pd_list_lock); 47static DEFINE_SPINLOCK(iommu_pd_list_lock);
47 48
49/* List of all available dev_data structures */
50static LIST_HEAD(dev_data_list);
51static DEFINE_SPINLOCK(dev_data_list_lock);
52
48/* 53/*
49 * Domain for untranslated devices - only allocated 54 * Domain for untranslated devices - only allocated
50 * if iommu=pt passed on kernel cmd line. 55 * if iommu=pt passed on kernel cmd line.
@@ -68,6 +73,67 @@ static void update_domain(struct protection_domain *domain);
68 * 73 *
69 ****************************************************************************/ 74 ****************************************************************************/
70 75
76static struct iommu_dev_data *alloc_dev_data(u16 devid)
77{
78 struct iommu_dev_data *dev_data;
79 unsigned long flags;
80
81 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
82 if (!dev_data)
83 return NULL;
84
85 dev_data->devid = devid;
86 atomic_set(&dev_data->bind, 0);
87
88 spin_lock_irqsave(&dev_data_list_lock, flags);
89 list_add_tail(&dev_data->dev_data_list, &dev_data_list);
90 spin_unlock_irqrestore(&dev_data_list_lock, flags);
91
92 return dev_data;
93}
94
95static void free_dev_data(struct iommu_dev_data *dev_data)
96{
97 unsigned long flags;
98
99 spin_lock_irqsave(&dev_data_list_lock, flags);
100 list_del(&dev_data->dev_data_list);
101 spin_unlock_irqrestore(&dev_data_list_lock, flags);
102
103 kfree(dev_data);
104}
105
106static struct iommu_dev_data *search_dev_data(u16 devid)
107{
108 struct iommu_dev_data *dev_data;
109 unsigned long flags;
110
111 spin_lock_irqsave(&dev_data_list_lock, flags);
112 list_for_each_entry(dev_data, &dev_data_list, dev_data_list) {
113 if (dev_data->devid == devid)
114 goto out_unlock;
115 }
116
117 dev_data = NULL;
118
119out_unlock:
120 spin_unlock_irqrestore(&dev_data_list_lock, flags);
121
122 return dev_data;
123}
124
125static struct iommu_dev_data *find_dev_data(u16 devid)
126{
127 struct iommu_dev_data *dev_data;
128
129 dev_data = search_dev_data(devid);
130
131 if (dev_data == NULL)
132 dev_data = alloc_dev_data(devid);
133
134 return dev_data;
135}
136
71static inline u16 get_device_id(struct device *dev) 137static inline u16 get_device_id(struct device *dev)
72{ 138{
73 struct pci_dev *pdev = to_pci_dev(dev); 139 struct pci_dev *pdev = to_pci_dev(dev);
@@ -138,33 +204,31 @@ static bool check_device(struct device *dev)
138static int iommu_init_device(struct device *dev) 204static int iommu_init_device(struct device *dev)
139{ 205{
140 struct iommu_dev_data *dev_data; 206 struct iommu_dev_data *dev_data;
141 struct pci_dev *pdev; 207 u16 alias;
142 u16 devid, alias;
143 208
144 if (dev->archdata.iommu) 209 if (dev->archdata.iommu)
145 return 0; 210 return 0;
146 211
147 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); 212 dev_data = find_dev_data(get_device_id(dev));
148 if (!dev_data) 213 if (!dev_data)
149 return -ENOMEM; 214 return -ENOMEM;
150 215
151 dev_data->dev = dev; 216 alias = amd_iommu_alias_table[dev_data->devid];
217 if (alias != dev_data->devid) {
218 struct iommu_dev_data *alias_data;
152 219
153 devid = get_device_id(dev); 220 alias_data = find_dev_data(alias);
154 alias = amd_iommu_alias_table[devid]; 221 if (alias_data == NULL) {
155 pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff); 222 pr_err("AMD-Vi: Warning: Unhandled device %s\n",
156 if (pdev) 223 dev_name(dev));
157 dev_data->alias = &pdev->dev; 224 free_dev_data(dev_data);
158 else { 225 return -ENOTSUPP;
159 kfree(dev_data); 226 }
160 return -ENOTSUPP; 227 dev_data->alias_data = alias_data;
161 } 228 }
162 229
163 atomic_set(&dev_data->bind, 0);
164
165 dev->archdata.iommu = dev_data; 230 dev->archdata.iommu = dev_data;
166 231
167
168 return 0; 232 return 0;
169} 233}
170 234
@@ -184,11 +248,16 @@ static void iommu_ignore_device(struct device *dev)
184 248
185static void iommu_uninit_device(struct device *dev) 249static void iommu_uninit_device(struct device *dev)
186{ 250{
187 kfree(dev->archdata.iommu); 251 /*
252 * Nothing to do here - we keep dev_data around for unplugged devices
253 * and reuse it when the device is re-plugged - not doing so would
254 * introduce a ton of races.
255 */
188} 256}
189 257
190void __init amd_iommu_uninit_devices(void) 258void __init amd_iommu_uninit_devices(void)
191{ 259{
260 struct iommu_dev_data *dev_data, *n;
192 struct pci_dev *pdev = NULL; 261 struct pci_dev *pdev = NULL;
193 262
194 for_each_pci_dev(pdev) { 263 for_each_pci_dev(pdev) {
@@ -198,6 +267,10 @@ void __init amd_iommu_uninit_devices(void)
198 267
199 iommu_uninit_device(&pdev->dev); 268 iommu_uninit_device(&pdev->dev);
200 } 269 }
270
271 /* Free all of our dev_data structures */
272 list_for_each_entry_safe(dev_data, n, &dev_data_list, dev_data_list)
273 free_dev_data(dev_data);
201} 274}
202 275
203int __init amd_iommu_init_devices(void) 276int __init amd_iommu_init_devices(void)
@@ -654,19 +727,17 @@ void iommu_flush_all_caches(struct amd_iommu *iommu)
654/* 727/*
655 * Command send function for flushing on-device TLB 728 * Command send function for flushing on-device TLB
656 */ 729 */
657static int device_flush_iotlb(struct device *dev, u64 address, size_t size) 730static int device_flush_iotlb(struct iommu_dev_data *dev_data,
731 u64 address, size_t size)
658{ 732{
659 struct pci_dev *pdev = to_pci_dev(dev);
660 struct amd_iommu *iommu; 733 struct amd_iommu *iommu;
661 struct iommu_cmd cmd; 734 struct iommu_cmd cmd;
662 u16 devid;
663 int qdep; 735 int qdep;
664 736
665 qdep = pci_ats_queue_depth(pdev); 737 qdep = dev_data->ats.qdep;
666 devid = get_device_id(dev); 738 iommu = amd_iommu_rlookup_table[dev_data->devid];
667 iommu = amd_iommu_rlookup_table[devid];
668 739
669 build_inv_iotlb_pages(&cmd, devid, qdep, address, size); 740 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
670 741
671 return iommu_queue_command(iommu, &cmd); 742 return iommu_queue_command(iommu, &cmd);
672} 743}
@@ -674,23 +745,19 @@ static int device_flush_iotlb(struct device *dev, u64 address, size_t size)
674/* 745/*
675 * Command send function for invalidating a device table entry 746 * Command send function for invalidating a device table entry
676 */ 747 */
677static int device_flush_dte(struct device *dev) 748static int device_flush_dte(struct iommu_dev_data *dev_data)
678{ 749{
679 struct amd_iommu *iommu; 750 struct amd_iommu *iommu;
680 struct pci_dev *pdev;
681 u16 devid;
682 int ret; 751 int ret;
683 752
684 pdev = to_pci_dev(dev); 753 iommu = amd_iommu_rlookup_table[dev_data->devid];
685 devid = get_device_id(dev);
686 iommu = amd_iommu_rlookup_table[devid];
687 754
688 ret = iommu_flush_dte(iommu, devid); 755 ret = iommu_flush_dte(iommu, dev_data->devid);
689 if (ret) 756 if (ret)
690 return ret; 757 return ret;
691 758
692 if (pci_ats_enabled(pdev)) 759 if (dev_data->ats.enabled)
693 ret = device_flush_iotlb(dev, 0, ~0UL); 760 ret = device_flush_iotlb(dev_data, 0, ~0UL);
694 761
695 return ret; 762 return ret;
696} 763}
@@ -721,12 +788,11 @@ static void __domain_flush_pages(struct protection_domain *domain,
721 } 788 }
722 789
723 list_for_each_entry(dev_data, &domain->dev_list, list) { 790 list_for_each_entry(dev_data, &domain->dev_list, list) {
724 struct pci_dev *pdev = to_pci_dev(dev_data->dev);
725 791
726 if (!pci_ats_enabled(pdev)) 792 if (!dev_data->ats.enabled)
727 continue; 793 continue;
728 794
729 ret |= device_flush_iotlb(dev_data->dev, address, size); 795 ret |= device_flush_iotlb(dev_data, address, size);
730 } 796 }
731 797
732 WARN_ON(ret); 798 WARN_ON(ret);
@@ -778,7 +844,7 @@ static void domain_flush_devices(struct protection_domain *domain)
778 spin_lock_irqsave(&domain->lock, flags); 844 spin_lock_irqsave(&domain->lock, flags);
779 845
780 list_for_each_entry(dev_data, &domain->dev_list, list) 846 list_for_each_entry(dev_data, &domain->dev_list, list)
781 device_flush_dte(dev_data->dev); 847 device_flush_dte(dev_data);
782 848
783 spin_unlock_irqrestore(&domain->lock, flags); 849 spin_unlock_irqrestore(&domain->lock, flags);
784} 850}
@@ -1526,44 +1592,33 @@ static void clear_dte_entry(u16 devid)
1526 amd_iommu_apply_erratum_63(devid); 1592 amd_iommu_apply_erratum_63(devid);
1527} 1593}
1528 1594
1529static void do_attach(struct device *dev, struct protection_domain *domain) 1595static void do_attach(struct iommu_dev_data *dev_data,
1596 struct protection_domain *domain)
1530{ 1597{
1531 struct iommu_dev_data *dev_data;
1532 struct amd_iommu *iommu; 1598 struct amd_iommu *iommu;
1533 struct pci_dev *pdev; 1599 bool ats;
1534 bool ats = false;
1535 u16 devid;
1536
1537 devid = get_device_id(dev);
1538 iommu = amd_iommu_rlookup_table[devid];
1539 dev_data = get_dev_data(dev);
1540 pdev = to_pci_dev(dev);
1541 1600
1542 if (amd_iommu_iotlb_sup) 1601 iommu = amd_iommu_rlookup_table[dev_data->devid];
1543 ats = pci_ats_enabled(pdev); 1602 ats = dev_data->ats.enabled;
1544 1603
1545 /* Update data structures */ 1604 /* Update data structures */
1546 dev_data->domain = domain; 1605 dev_data->domain = domain;
1547 list_add(&dev_data->list, &domain->dev_list); 1606 list_add(&dev_data->list, &domain->dev_list);
1548 set_dte_entry(devid, domain, ats); 1607 set_dte_entry(dev_data->devid, domain, ats);
1549 1608
1550 /* Do reference counting */ 1609 /* Do reference counting */
1551 domain->dev_iommu[iommu->index] += 1; 1610 domain->dev_iommu[iommu->index] += 1;
1552 domain->dev_cnt += 1; 1611 domain->dev_cnt += 1;
1553 1612
1554 /* Flush the DTE entry */ 1613 /* Flush the DTE entry */
1555 device_flush_dte(dev); 1614 device_flush_dte(dev_data);
1556} 1615}
1557 1616
1558static void do_detach(struct device *dev) 1617static void do_detach(struct iommu_dev_data *dev_data)
1559{ 1618{
1560 struct iommu_dev_data *dev_data;
1561 struct amd_iommu *iommu; 1619 struct amd_iommu *iommu;
1562 u16 devid;
1563 1620
1564 devid = get_device_id(dev); 1621 iommu = amd_iommu_rlookup_table[dev_data->devid];
1565 iommu = amd_iommu_rlookup_table[devid];
1566 dev_data = get_dev_data(dev);
1567 1622
1568 /* decrease reference counters */ 1623 /* decrease reference counters */
1569 dev_data->domain->dev_iommu[iommu->index] -= 1; 1624 dev_data->domain->dev_iommu[iommu->index] -= 1;
@@ -1572,52 +1627,46 @@ static void do_detach(struct device *dev)
1572 /* Update data structures */ 1627 /* Update data structures */
1573 dev_data->domain = NULL; 1628 dev_data->domain = NULL;
1574 list_del(&dev_data->list); 1629 list_del(&dev_data->list);
1575 clear_dte_entry(devid); 1630 clear_dte_entry(dev_data->devid);
1576 1631
1577 /* Flush the DTE entry */ 1632 /* Flush the DTE entry */
1578 device_flush_dte(dev); 1633 device_flush_dte(dev_data);
1579} 1634}
1580 1635
1581/* 1636/*
1582 * If a device is not yet associated with a domain, this function does 1637 * If a device is not yet associated with a domain, this function does
1583 * assigns it visible for the hardware 1638 * assigns it visible for the hardware
1584 */ 1639 */
1585static int __attach_device(struct device *dev, 1640static int __attach_device(struct iommu_dev_data *dev_data,
1586 struct protection_domain *domain) 1641 struct protection_domain *domain)
1587{ 1642{
1588 struct iommu_dev_data *dev_data, *alias_data;
1589 int ret; 1643 int ret;
1590 1644
1591 dev_data = get_dev_data(dev);
1592 alias_data = get_dev_data(dev_data->alias);
1593
1594 if (!alias_data)
1595 return -EINVAL;
1596
1597 /* lock domain */ 1645 /* lock domain */
1598 spin_lock(&domain->lock); 1646 spin_lock(&domain->lock);
1599 1647
1600 /* Some sanity checks */ 1648 if (dev_data->alias_data != NULL) {
1601 ret = -EBUSY; 1649 struct iommu_dev_data *alias_data = dev_data->alias_data;
1602 if (alias_data->domain != NULL && 1650
1603 alias_data->domain != domain) 1651 /* Some sanity checks */
1604 goto out_unlock; 1652 ret = -EBUSY;
1653 if (alias_data->domain != NULL &&
1654 alias_data->domain != domain)
1655 goto out_unlock;
1605 1656
1606 if (dev_data->domain != NULL && 1657 if (dev_data->domain != NULL &&
1607 dev_data->domain != domain) 1658 dev_data->domain != domain)
1608 goto out_unlock; 1659 goto out_unlock;
1609 1660
1610 /* Do real assignment */ 1661 /* Do real assignment */
1611 if (dev_data->alias != dev) {
1612 alias_data = get_dev_data(dev_data->alias);
1613 if (alias_data->domain == NULL) 1662 if (alias_data->domain == NULL)
1614 do_attach(dev_data->alias, domain); 1663 do_attach(alias_data, domain);
1615 1664
1616 atomic_inc(&alias_data->bind); 1665 atomic_inc(&alias_data->bind);
1617 } 1666 }
1618 1667
1619 if (dev_data->domain == NULL) 1668 if (dev_data->domain == NULL)
1620 do_attach(dev, domain); 1669 do_attach(dev_data, domain);
1621 1670
1622 atomic_inc(&dev_data->bind); 1671 atomic_inc(&dev_data->bind);
1623 1672
@@ -1639,14 +1688,19 @@ static int attach_device(struct device *dev,
1639 struct protection_domain *domain) 1688 struct protection_domain *domain)
1640{ 1689{
1641 struct pci_dev *pdev = to_pci_dev(dev); 1690 struct pci_dev *pdev = to_pci_dev(dev);
1691 struct iommu_dev_data *dev_data;
1642 unsigned long flags; 1692 unsigned long flags;
1643 int ret; 1693 int ret;
1644 1694
1645 if (amd_iommu_iotlb_sup) 1695 dev_data = get_dev_data(dev);
1646 pci_enable_ats(pdev, PAGE_SHIFT); 1696
1697 if (amd_iommu_iotlb_sup && pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
1698 dev_data->ats.enabled = true;
1699 dev_data->ats.qdep = pci_ats_queue_depth(pdev);
1700 }
1647 1701
1648 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 1702 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1649 ret = __attach_device(dev, domain); 1703 ret = __attach_device(dev_data, domain);
1650 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1704 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1651 1705
1652 /* 1706 /*
@@ -1662,10 +1716,8 @@ static int attach_device(struct device *dev,
1662/* 1716/*
1663 * Removes a device from a protection domain (unlocked) 1717 * Removes a device from a protection domain (unlocked)
1664 */ 1718 */
1665static void __detach_device(struct device *dev) 1719static void __detach_device(struct iommu_dev_data *dev_data)
1666{ 1720{
1667 struct iommu_dev_data *dev_data = get_dev_data(dev);
1668 struct iommu_dev_data *alias_data;
1669 struct protection_domain *domain; 1721 struct protection_domain *domain;
1670 unsigned long flags; 1722 unsigned long flags;
1671 1723
@@ -1675,14 +1727,15 @@ static void __detach_device(struct device *dev)
1675 1727
1676 spin_lock_irqsave(&domain->lock, flags); 1728 spin_lock_irqsave(&domain->lock, flags);
1677 1729
1678 if (dev_data->alias != dev) { 1730 if (dev_data->alias_data != NULL) {
1679 alias_data = get_dev_data(dev_data->alias); 1731 struct iommu_dev_data *alias_data = dev_data->alias_data;
1732
1680 if (atomic_dec_and_test(&alias_data->bind)) 1733 if (atomic_dec_and_test(&alias_data->bind))
1681 do_detach(dev_data->alias); 1734 do_detach(alias_data);
1682 } 1735 }
1683 1736
1684 if (atomic_dec_and_test(&dev_data->bind)) 1737 if (atomic_dec_and_test(&dev_data->bind))
1685 do_detach(dev); 1738 do_detach(dev_data);
1686 1739
1687 spin_unlock_irqrestore(&domain->lock, flags); 1740 spin_unlock_irqrestore(&domain->lock, flags);
1688 1741
@@ -1693,7 +1746,7 @@ static void __detach_device(struct device *dev)
1693 */ 1746 */
1694 if (iommu_pass_through && 1747 if (iommu_pass_through &&
1695 (dev_data->domain == NULL && domain != pt_domain)) 1748 (dev_data->domain == NULL && domain != pt_domain))
1696 __attach_device(dev, pt_domain); 1749 __attach_device(dev_data, pt_domain);
1697} 1750}
1698 1751
1699/* 1752/*
@@ -1701,16 +1754,20 @@ static void __detach_device(struct device *dev)
1701 */ 1754 */
1702static void detach_device(struct device *dev) 1755static void detach_device(struct device *dev)
1703{ 1756{
1704 struct pci_dev *pdev = to_pci_dev(dev); 1757 struct iommu_dev_data *dev_data;
1705 unsigned long flags; 1758 unsigned long flags;
1706 1759
1760 dev_data = get_dev_data(dev);
1761
1707 /* lock device table */ 1762 /* lock device table */
1708 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 1763 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1709 __detach_device(dev); 1764 __detach_device(dev_data);
1710 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1765 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1711 1766
1712 if (amd_iommu_iotlb_sup && pci_ats_enabled(pdev)) 1767 if (dev_data->ats.enabled) {
1713 pci_disable_ats(pdev); 1768 pci_disable_ats(to_pci_dev(dev));
1769 dev_data->ats.enabled = false;
1770 }
1714} 1771}
1715 1772
1716/* 1773/*
@@ -1719,26 +1776,25 @@ static void detach_device(struct device *dev)
1719 */ 1776 */
1720static struct protection_domain *domain_for_device(struct device *dev) 1777static struct protection_domain *domain_for_device(struct device *dev)
1721{ 1778{
1722 struct protection_domain *dom; 1779 struct iommu_dev_data *dev_data;
1723 struct iommu_dev_data *dev_data, *alias_data; 1780 struct protection_domain *dom = NULL;
1724 unsigned long flags; 1781 unsigned long flags;
1725 u16 devid;
1726 1782
1727 devid = get_device_id(dev);
1728 dev_data = get_dev_data(dev); 1783 dev_data = get_dev_data(dev);
1729 alias_data = get_dev_data(dev_data->alias);
1730 if (!alias_data)
1731 return NULL;
1732 1784
1733 read_lock_irqsave(&amd_iommu_devtable_lock, flags); 1785 if (dev_data->domain)
1734 dom = dev_data->domain; 1786 return dev_data->domain;
1735 if (dom == NULL &&
1736 alias_data->domain != NULL) {
1737 __attach_device(dev, alias_data->domain);
1738 dom = alias_data->domain;
1739 }
1740 1787
1741 read_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1788 if (dev_data->alias_data != NULL) {
1789 struct iommu_dev_data *alias_data = dev_data->alias_data;
1790
1791 read_lock_irqsave(&amd_iommu_devtable_lock, flags);
1792 if (alias_data->domain != NULL) {
1793 __attach_device(dev_data, alias_data->domain);
1794 dom = alias_data->domain;
1795 }
1796 read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1797 }
1742 1798
1743 return dom; 1799 return dom;
1744} 1800}
@@ -1798,7 +1854,6 @@ static int device_change_notifier(struct notifier_block *nb,
1798 goto out; 1854 goto out;
1799 } 1855 }
1800 1856
1801 device_flush_dte(dev);
1802 iommu_completion_wait(iommu); 1857 iommu_completion_wait(iommu);
1803 1858
1804out: 1859out:
@@ -1858,11 +1913,8 @@ static void update_device_table(struct protection_domain *domain)
1858{ 1913{
1859 struct iommu_dev_data *dev_data; 1914 struct iommu_dev_data *dev_data;
1860 1915
1861 list_for_each_entry(dev_data, &domain->dev_list, list) { 1916 list_for_each_entry(dev_data, &domain->dev_list, list)
1862 struct pci_dev *pdev = to_pci_dev(dev_data->dev); 1917 set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled);
1863 u16 devid = get_device_id(dev_data->dev);
1864 set_dte_entry(devid, domain, pci_ats_enabled(pdev));
1865 }
1866} 1918}
1867 1919
1868static void update_domain(struct protection_domain *domain) 1920static void update_domain(struct protection_domain *domain)
@@ -2497,9 +2549,7 @@ static void cleanup_domain(struct protection_domain *domain)
2497 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 2549 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2498 2550
2499 list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { 2551 list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
2500 struct device *dev = dev_data->dev; 2552 __detach_device(dev_data);
2501
2502 __detach_device(dev);
2503 atomic_set(&dev_data->bind, 0); 2553 atomic_set(&dev_data->bind, 0);
2504 } 2554 }
2505 2555
@@ -2605,7 +2655,6 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
2605 if (!iommu) 2655 if (!iommu)
2606 return; 2656 return;
2607 2657
2608 device_flush_dte(dev);
2609 iommu_completion_wait(iommu); 2658 iommu_completion_wait(iommu);
2610} 2659}
2611 2660
@@ -2616,16 +2665,13 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
2616 struct iommu_dev_data *dev_data; 2665 struct iommu_dev_data *dev_data;
2617 struct amd_iommu *iommu; 2666 struct amd_iommu *iommu;
2618 int ret; 2667 int ret;
2619 u16 devid;
2620 2668
2621 if (!check_device(dev)) 2669 if (!check_device(dev))
2622 return -EINVAL; 2670 return -EINVAL;
2623 2671
2624 dev_data = dev->archdata.iommu; 2672 dev_data = dev->archdata.iommu;
2625 2673
2626 devid = get_device_id(dev); 2674 iommu = amd_iommu_rlookup_table[dev_data->devid];
2627
2628 iommu = amd_iommu_rlookup_table[devid];
2629 if (!iommu) 2675 if (!iommu)
2630 return -EINVAL; 2676 return -EINVAL;
2631 2677
diff --git a/arch/x86/kernel/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index bfc8453bd98d..82d2410f4205 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -24,14 +24,16 @@
24#include <linux/syscore_ops.h> 24#include <linux/syscore_ops.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/msi.h> 26#include <linux/msi.h>
27#include <linux/amd-iommu.h>
27#include <asm/pci-direct.h> 28#include <asm/pci-direct.h>
28#include <asm/amd_iommu_proto.h>
29#include <asm/amd_iommu_types.h>
30#include <asm/amd_iommu.h>
31#include <asm/iommu.h> 29#include <asm/iommu.h>
32#include <asm/gart.h> 30#include <asm/gart.h>
33#include <asm/x86_init.h> 31#include <asm/x86_init.h>
34#include <asm/iommu_table.h> 32#include <asm/iommu_table.h>
33
34#include "amd_iommu_proto.h"
35#include "amd_iommu_types.h"
36
35/* 37/*
36 * definitions for the ACPI scanning code 38 * definitions for the ACPI scanning code
37 */ 39 */
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
index 55d95eb789b3..7ffaa64410b0 100644
--- a/arch/x86/include/asm/amd_iommu_proto.h
+++ b/drivers/iommu/amd_iommu_proto.h
@@ -19,7 +19,7 @@
19#ifndef _ASM_X86_AMD_IOMMU_PROTO_H 19#ifndef _ASM_X86_AMD_IOMMU_PROTO_H
20#define _ASM_X86_AMD_IOMMU_PROTO_H 20#define _ASM_X86_AMD_IOMMU_PROTO_H
21 21
22#include <asm/amd_iommu_types.h> 22#include "amd_iommu_types.h"
23 23
24extern int amd_iommu_init_dma_ops(void); 24extern int amd_iommu_init_dma_ops(void);
25extern int amd_iommu_init_passthrough(void); 25extern int amd_iommu_init_passthrough(void);
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 4c9982995414..5b9c5075e81a 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -310,10 +310,15 @@ struct protection_domain {
310 */ 310 */
311struct iommu_dev_data { 311struct iommu_dev_data {
312 struct list_head list; /* For domain->dev_list */ 312 struct list_head list; /* For domain->dev_list */
313 struct device *dev; /* Device this data belong to */ 313 struct list_head dev_data_list; /* For global dev_data_list */
314 struct device *alias; /* The Alias Device */ 314 struct iommu_dev_data *alias_data;/* The alias dev_data */
315 struct protection_domain *domain; /* Domain the device is bound to */ 315 struct protection_domain *domain; /* Domain the device is bound to */
316 atomic_t bind; /* Domain attach reverent count */ 316 atomic_t bind; /* Domain attach reverent count */
317 u16 devid; /* PCI Device ID */
318 struct {
319 bool enabled;
320 int qdep;
321 } ats; /* ATS state */
317}; 322};
318 323
319/* 324/*
diff --git a/drivers/pci/dmar.c b/drivers/iommu/dmar.c
index 3dc9befa5aec..3dc9befa5aec 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/iommu/dmar.c
diff --git a/drivers/pci/intel-iommu.c b/drivers/iommu/intel-iommu.c
index f02c34d26d1b..c621c98c99da 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -42,7 +42,6 @@
42#include <linux/pci-ats.h> 42#include <linux/pci-ats.h>
43#include <asm/cacheflush.h> 43#include <asm/cacheflush.h>
44#include <asm/iommu.h> 44#include <asm/iommu.h>
45#include "pci.h"
46 45
47#define ROOT_SIZE VTD_PAGE_SIZE 46#define ROOT_SIZE VTD_PAGE_SIZE
48#define CONTEXT_SIZE VTD_PAGE_SIZE 47#define CONTEXT_SIZE VTD_PAGE_SIZE
diff --git a/drivers/pci/intr_remapping.c b/drivers/iommu/intr_remapping.c
index 3607faf28a4d..1a89d4a2cadf 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/iommu/intr_remapping.c
@@ -13,7 +13,6 @@
13#include "intr_remapping.h" 13#include "intr_remapping.h"
14#include <acpi/acpi.h> 14#include <acpi/acpi.h>
15#include <asm/pci-direct.h> 15#include <asm/pci-direct.h>
16#include "pci.h"
17 16
18static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; 17static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
19static struct hpet_scope ir_hpet[MAX_HPET_TBS]; 18static struct hpet_scope ir_hpet[MAX_HPET_TBS];
diff --git a/drivers/pci/intr_remapping.h b/drivers/iommu/intr_remapping.h
index 5662fecfee60..5662fecfee60 100644
--- a/drivers/pci/intr_remapping.h
+++ b/drivers/iommu/intr_remapping.h
diff --git a/drivers/base/iommu.c b/drivers/iommu/iommu.c
index 6e6b6a11b3ce..6e6b6a11b3ce 100644
--- a/drivers/base/iommu.c
+++ b/drivers/iommu/iommu.c
diff --git a/drivers/pci/iova.c b/drivers/iommu/iova.c
index c5c274ab5c5a..c5c274ab5c5a 100644
--- a/drivers/pci/iova.c
+++ b/drivers/iommu/iova.c
diff --git a/arch/arm/mach-msm/iommu.c b/drivers/iommu/msm_iommu.c
index 1a584e077c61..1a584e077c61 100644
--- a/arch/arm/mach-msm/iommu.c
+++ b/drivers/iommu/msm_iommu.c
diff --git a/arch/arm/mach-msm/iommu_dev.c b/drivers/iommu/msm_iommu_dev.c
index 8e8fb079852d..8e8fb079852d 100644
--- a/arch/arm/mach-msm/iommu_dev.c
+++ b/drivers/iommu/msm_iommu_dev.c
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 094308e41be5..825c02b40daa 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -29,11 +29,6 @@ obj-$(CONFIG_PCI_MSI) += msi.o
29# Build the Hypertransport interrupt support 29# Build the Hypertransport interrupt support
30obj-$(CONFIG_HT_IRQ) += htirq.o 30obj-$(CONFIG_HT_IRQ) += htirq.o
31 31
32# Build Intel IOMMU support
33obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o
34
35obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o
36
37obj-$(CONFIG_PCI_IOV) += iov.o 32obj-$(CONFIG_PCI_IOV) += iov.o
38 33
39# 34#
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 731e20265ace..b7bf11dd546a 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -184,8 +184,6 @@ pci_match_one_device(const struct pci_device_id *id, const struct pci_dev *dev)
184 return NULL; 184 return NULL;
185} 185}
186 186
187struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev);
188
189/* PCI slot sysfs helper code */ 187/* PCI slot sysfs helper code */
190#define to_pci_slot(s) container_of(s, struct pci_slot, kobj) 188#define to_pci_slot(s) container_of(s, struct pci_slot, kobj)
191 189
diff --git a/arch/x86/include/asm/amd_iommu.h b/include/linux/amd-iommu.h
index a6863a2dec1f..a6863a2dec1f 100644
--- a/arch/x86/include/asm/amd_iommu.h
+++ b/include/linux/amd-iommu.h
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 0a2ba4098996..9940319d6f9d 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -19,6 +19,8 @@
19#ifndef __LINUX_IOMMU_H 19#ifndef __LINUX_IOMMU_H
20#define __LINUX_IOMMU_H 20#define __LINUX_IOMMU_H
21 21
22#include <linux/errno.h>
23
22#define IOMMU_READ (1) 24#define IOMMU_READ (1)
23#define IOMMU_WRITE (2) 25#define IOMMU_WRITE (2)
24#define IOMMU_CACHE (4) /* DMA cache coherency */ 26#define IOMMU_CACHE (4) /* DMA cache coherency */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index c446b5ca2d38..970bfe0941c3 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1589,5 +1589,16 @@ int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt);
1589int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off, 1589int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
1590 unsigned int len, const char *kw); 1590 unsigned int len, const char *kw);
1591 1591
1592/**
1593 * pci_find_upstream_pcie_bridge - find upstream PCIe-to-PCI bridge of a device
1594 * @pdev: the PCI device
1595 *
1596 * if the device is PCIE, return NULL
1597 * if the device isn't connected to a PCIe bridge (that is its parent is a
1598 * legacy PCI bridge and the bridge is directly connected to bus 0), return its
1599 * parent
1600 */
1601struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev);
1602
1592#endif /* __KERNEL__ */ 1603#endif /* __KERNEL__ */
1593#endif /* LINUX_PCI_H */ 1604#endif /* LINUX_PCI_H */