diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-22 19:39:42 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-22 19:39:42 -0400 |
commit | 6d16d6d9bb6f93e6f8506cfb3e91795d6443d54f (patch) | |
tree | 92a1c9e4b645fa6d1fffedaeb56141b66f847320 | |
parent | 431bf99d26157d56689e5de65bd27ce9f077fc3f (diff) | |
parent | b395fb36d59e17b9335805c10fa30fc51c8a94c6 (diff) |
Merge branch 'core-iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
iommu/core: Fix build with INTR_REMAP=y && CONFIG_DMAR=n
iommu/amd: Don't use MSI address range for DMA addresses
iommu/amd: Move missing parts to drivers/iommu
iommu: Move iommu Kconfig entries to submenu
x86/ia64: intel-iommu: move to drivers/iommu/
x86: amd_iommu: move to drivers/iommu/
msm: iommu: move to drivers/iommu/
drivers: iommu: move to a dedicated folder
x86/amd-iommu: Store device alias as dev_data pointer
x86/amd-iommu: Search for existind dev_data before allocting a new one
x86/amd-iommu: Allow dev_data->alias to be NULL
x86/amd-iommu: Use only dev_data in low-level domain attach/detach functions
x86/amd-iommu: Use only dev_data for dte and iotlb flushing routines
x86/amd-iommu: Store ATS state in dev_data
x86/amd-iommu: Store devid in dev_data
x86/amd-iommu: Introduce global dev_data_list
x86/amd-iommu: Remove redundant device_flush_dte() calls
iommu-api: Add missing header file
Fix up trivial conflicts (independent additions close to each other) in
drivers/Makefile and include/linux/pci.h
-rw-r--r-- | arch/arm/mach-msm/Kconfig | 19 | ||||
-rw-r--r-- | arch/arm/mach-msm/Makefile | 2 | ||||
-rw-r--r-- | arch/ia64/Kconfig | 24 | ||||
-rw-r--r-- | arch/x86/Kconfig | 79 | ||||
-rw-r--r-- | arch/x86/kernel/Makefile | 1 | ||||
-rw-r--r-- | drivers/Kconfig | 2 | ||||
-rw-r--r-- | drivers/Makefile | 1 | ||||
-rw-r--r-- | drivers/base/Makefile | 1 | ||||
-rw-r--r-- | drivers/iommu/Kconfig | 110 | ||||
-rw-r--r-- | drivers/iommu/Makefile | 5 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu.c (renamed from arch/x86/kernel/amd_iommu.c) | 314 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_init.c (renamed from arch/x86/kernel/amd_iommu_init.c) | 8 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_proto.h (renamed from arch/x86/include/asm/amd_iommu_proto.h) | 2 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_types.h (renamed from arch/x86/include/asm/amd_iommu_types.h) | 9 | ||||
-rw-r--r-- | drivers/iommu/dmar.c (renamed from drivers/pci/dmar.c) | 0 | ||||
-rw-r--r-- | drivers/iommu/intel-iommu.c (renamed from drivers/pci/intel-iommu.c) | 1 | ||||
-rw-r--r-- | drivers/iommu/intr_remapping.c (renamed from drivers/pci/intr_remapping.c) | 1 | ||||
-rw-r--r-- | drivers/iommu/intr_remapping.h (renamed from drivers/pci/intr_remapping.h) | 0 | ||||
-rw-r--r-- | drivers/iommu/iommu.c (renamed from drivers/base/iommu.c) | 0 | ||||
-rw-r--r-- | drivers/iommu/iova.c (renamed from drivers/pci/iova.c) | 0 | ||||
-rw-r--r-- | drivers/iommu/msm_iommu.c (renamed from arch/arm/mach-msm/iommu.c) | 0 | ||||
-rw-r--r-- | drivers/iommu/msm_iommu_dev.c (renamed from arch/arm/mach-msm/iommu_dev.c) | 0 | ||||
-rw-r--r-- | drivers/pci/Makefile | 5 | ||||
-rw-r--r-- | drivers/pci/pci.h | 2 | ||||
-rw-r--r-- | include/linux/amd-iommu.h (renamed from arch/x86/include/asm/amd_iommu.h) | 0 | ||||
-rw-r--r-- | include/linux/iommu.h | 2 | ||||
-rw-r--r-- | include/linux/pci.h | 11 |
27 files changed, 332 insertions, 267 deletions
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig index 1516896e8d17..888e92502e15 100644 --- a/arch/arm/mach-msm/Kconfig +++ b/arch/arm/mach-msm/Kconfig | |||
@@ -148,22 +148,6 @@ config MACH_MSM8960_RUMI3 | |||
148 | 148 | ||
149 | endmenu | 149 | endmenu |
150 | 150 | ||
151 | config MSM_IOMMU | ||
152 | bool "MSM IOMMU Support" | ||
153 | depends on ARCH_MSM8X60 || ARCH_MSM8960 | ||
154 | select IOMMU_API | ||
155 | default n | ||
156 | help | ||
157 | Support for the IOMMUs found on certain Qualcomm SOCs. | ||
158 | These IOMMUs allow virtualization of the address space used by most | ||
159 | cores within the multimedia subsystem. | ||
160 | |||
161 | If unsure, say N here. | ||
162 | |||
163 | config IOMMU_PGTABLES_L2 | ||
164 | def_bool y | ||
165 | depends on MSM_IOMMU && MMU && SMP && CPU_DCACHE_DISABLE=n | ||
166 | |||
167 | config MSM_DEBUG_UART | 151 | config MSM_DEBUG_UART |
168 | int | 152 | int |
169 | default 1 if MSM_DEBUG_UART1 | 153 | default 1 if MSM_DEBUG_UART1 |
@@ -205,9 +189,6 @@ config MSM_GPIOMUX | |||
205 | config MSM_V2_TLMM | 189 | config MSM_V2_TLMM |
206 | bool | 190 | bool |
207 | 191 | ||
208 | config IOMMU_API | ||
209 | bool | ||
210 | |||
211 | config MSM_SCM | 192 | config MSM_SCM |
212 | bool | 193 | bool |
213 | endif | 194 | endif |
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile index 9519fd28a025..b70658c5ae00 100644 --- a/arch/arm/mach-msm/Makefile +++ b/arch/arm/mach-msm/Makefile | |||
@@ -3,7 +3,7 @@ obj-y += clock.o | |||
3 | obj-$(CONFIG_DEBUG_FS) += clock-debug.o | 3 | obj-$(CONFIG_DEBUG_FS) += clock-debug.o |
4 | 4 | ||
5 | obj-$(CONFIG_MSM_VIC) += irq-vic.o | 5 | obj-$(CONFIG_MSM_VIC) += irq-vic.o |
6 | obj-$(CONFIG_MSM_IOMMU) += iommu.o iommu_dev.o devices-iommu.o | 6 | obj-$(CONFIG_MSM_IOMMU) += devices-iommu.o |
7 | 7 | ||
8 | obj-$(CONFIG_ARCH_MSM7X00A) += dma.o irq.o acpuclock-arm11.o | 8 | obj-$(CONFIG_ARCH_MSM7X00A) += dma.o irq.o acpuclock-arm11.o |
9 | obj-$(CONFIG_ARCH_MSM7X30) += dma.o | 9 | obj-$(CONFIG_ARCH_MSM7X30) += dma.o |
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 38280ef4a2af..7336ba653b8f 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -627,27 +627,6 @@ source "drivers/pci/hotplug/Kconfig" | |||
627 | 627 | ||
628 | source "drivers/pcmcia/Kconfig" | 628 | source "drivers/pcmcia/Kconfig" |
629 | 629 | ||
630 | config DMAR | ||
631 | bool "Support for DMA Remapping Devices (EXPERIMENTAL)" | ||
632 | depends on IA64_GENERIC && ACPI && EXPERIMENTAL | ||
633 | help | ||
634 | DMA remapping (DMAR) devices support enables independent address | ||
635 | translations for Direct Memory Access (DMA) from devices. | ||
636 | These DMA remapping devices are reported via ACPI tables | ||
637 | and include PCI device scope covered by these DMA | ||
638 | remapping devices. | ||
639 | |||
640 | config DMAR_DEFAULT_ON | ||
641 | def_bool y | ||
642 | prompt "Enable DMA Remapping Devices by default" | ||
643 | depends on DMAR | ||
644 | help | ||
645 | Selecting this option will enable a DMAR device at boot time if | ||
646 | one is found. If this option is not selected, DMAR support can | ||
647 | be enabled by passing intel_iommu=on to the kernel. It is | ||
648 | recommended you say N here while the DMAR code remains | ||
649 | experimental. | ||
650 | |||
651 | endmenu | 630 | endmenu |
652 | 631 | ||
653 | endif | 632 | endif |
@@ -681,6 +660,3 @@ source "lib/Kconfig" | |||
681 | 660 | ||
682 | config IOMMU_HELPER | 661 | config IOMMU_HELPER |
683 | def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB) | 662 | def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB) |
684 | |||
685 | config IOMMU_API | ||
686 | def_bool (DMAR) | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 37357a599dca..7d45601b27e8 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -680,33 +680,6 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT | |||
680 | Calgary anyway, pass 'iommu=calgary' on the kernel command line. | 680 | Calgary anyway, pass 'iommu=calgary' on the kernel command line. |
681 | If unsure, say Y. | 681 | If unsure, say Y. |
682 | 682 | ||
683 | config AMD_IOMMU | ||
684 | bool "AMD IOMMU support" | ||
685 | select SWIOTLB | ||
686 | select PCI_MSI | ||
687 | select PCI_IOV | ||
688 | depends on X86_64 && PCI && ACPI | ||
689 | ---help--- | ||
690 | With this option you can enable support for AMD IOMMU hardware in | ||
691 | your system. An IOMMU is a hardware component which provides | ||
692 | remapping of DMA memory accesses from devices. With an AMD IOMMU you | ||
693 | can isolate the the DMA memory of different devices and protect the | ||
694 | system from misbehaving device drivers or hardware. | ||
695 | |||
696 | You can find out if your system has an AMD IOMMU if you look into | ||
697 | your BIOS for an option to enable it or if you have an IVRS ACPI | ||
698 | table. | ||
699 | |||
700 | config AMD_IOMMU_STATS | ||
701 | bool "Export AMD IOMMU statistics to debugfs" | ||
702 | depends on AMD_IOMMU | ||
703 | select DEBUG_FS | ||
704 | ---help--- | ||
705 | This option enables code in the AMD IOMMU driver to collect various | ||
706 | statistics about whats happening in the driver and exports that | ||
707 | information to userspace via debugfs. | ||
708 | If unsure, say N. | ||
709 | |||
710 | # need this always selected by IOMMU for the VIA workaround | 683 | # need this always selected by IOMMU for the VIA workaround |
711 | config SWIOTLB | 684 | config SWIOTLB |
712 | def_bool y if X86_64 | 685 | def_bool y if X86_64 |
@@ -720,9 +693,6 @@ config SWIOTLB | |||
720 | config IOMMU_HELPER | 693 | config IOMMU_HELPER |
721 | def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU) | 694 | def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU) |
722 | 695 | ||
723 | config IOMMU_API | ||
724 | def_bool (AMD_IOMMU || DMAR) | ||
725 | |||
726 | config MAXSMP | 696 | config MAXSMP |
727 | bool "Enable Maximum number of SMP Processors and NUMA Nodes" | 697 | bool "Enable Maximum number of SMP Processors and NUMA Nodes" |
728 | depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL | 698 | depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL |
@@ -1942,55 +1912,6 @@ config PCI_CNB20LE_QUIRK | |||
1942 | 1912 | ||
1943 | You should say N unless you know you need this. | 1913 | You should say N unless you know you need this. |
1944 | 1914 | ||
1945 | config DMAR | ||
1946 | bool "Support for DMA Remapping Devices (EXPERIMENTAL)" | ||
1947 | depends on PCI_MSI && ACPI && EXPERIMENTAL | ||
1948 | help | ||
1949 | DMA remapping (DMAR) devices support enables independent address | ||
1950 | translations for Direct Memory Access (DMA) from devices. | ||
1951 | These DMA remapping devices are reported via ACPI tables | ||
1952 | and include PCI device scope covered by these DMA | ||
1953 | remapping devices. | ||
1954 | |||
1955 | config DMAR_DEFAULT_ON | ||
1956 | def_bool y | ||
1957 | prompt "Enable DMA Remapping Devices by default" | ||
1958 | depends on DMAR | ||
1959 | help | ||
1960 | Selecting this option will enable a DMAR device at boot time if | ||
1961 | one is found. If this option is not selected, DMAR support can | ||
1962 | be enabled by passing intel_iommu=on to the kernel. It is | ||
1963 | recommended you say N here while the DMAR code remains | ||
1964 | experimental. | ||
1965 | |||
1966 | config DMAR_BROKEN_GFX_WA | ||
1967 | bool "Workaround broken graphics drivers (going away soon)" | ||
1968 | depends on DMAR && BROKEN | ||
1969 | ---help--- | ||
1970 | Current Graphics drivers tend to use physical address | ||
1971 | for DMA and avoid using DMA APIs. Setting this config | ||
1972 | option permits the IOMMU driver to set a unity map for | ||
1973 | all the OS-visible memory. Hence the driver can continue | ||
1974 | to use physical addresses for DMA, at least until this | ||
1975 | option is removed in the 2.6.32 kernel. | ||
1976 | |||
1977 | config DMAR_FLOPPY_WA | ||
1978 | def_bool y | ||
1979 | depends on DMAR | ||
1980 | ---help--- | ||
1981 | Floppy disk drivers are known to bypass DMA API calls | ||
1982 | thereby failing to work when IOMMU is enabled. This | ||
1983 | workaround will setup a 1:1 mapping for the first | ||
1984 | 16MiB to make floppy (an ISA device) work. | ||
1985 | |||
1986 | config INTR_REMAP | ||
1987 | bool "Support for Interrupt Remapping (EXPERIMENTAL)" | ||
1988 | depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL | ||
1989 | ---help--- | ||
1990 | Supports Interrupt remapping for IO-APIC and MSI devices. | ||
1991 | To use x2apic mode in the CPU's which support x2APIC enhancements or | ||
1992 | to support platforms with CPU's having > 8 bit APIC ID, say Y. | ||
1993 | |||
1994 | source "drivers/pci/pcie/Kconfig" | 1915 | source "drivers/pci/pcie/Kconfig" |
1995 | 1916 | ||
1996 | source "drivers/pci/Kconfig" | 1917 | source "drivers/pci/Kconfig" |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 90b06d4daee2..11817ff85399 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -123,7 +123,6 @@ ifeq ($(CONFIG_X86_64),y) | |||
123 | 123 | ||
124 | obj-$(CONFIG_GART_IOMMU) += amd_gart_64.o aperture_64.o | 124 | obj-$(CONFIG_GART_IOMMU) += amd_gart_64.o aperture_64.o |
125 | obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o | 125 | obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o |
126 | obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o | ||
127 | 126 | ||
128 | obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o | 127 | obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o |
129 | obj-y += vsmp_64.o | 128 | obj-y += vsmp_64.o |
diff --git a/drivers/Kconfig b/drivers/Kconfig index 258473ce8d01..52e306dd5010 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig | |||
@@ -124,4 +124,6 @@ source "drivers/hwspinlock/Kconfig" | |||
124 | 124 | ||
125 | source "drivers/clocksource/Kconfig" | 125 | source "drivers/clocksource/Kconfig" |
126 | 126 | ||
127 | source "drivers/iommu/Kconfig" | ||
128 | |||
127 | endmenu | 129 | endmenu |
diff --git a/drivers/Makefile b/drivers/Makefile index 1bc896571a3a..939fcdeb2d31 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -123,3 +123,4 @@ obj-y += clk/ | |||
123 | 123 | ||
124 | obj-$(CONFIG_HWSPINLOCK) += hwspinlock/ | 124 | obj-$(CONFIG_HWSPINLOCK) += hwspinlock/ |
125 | obj-$(CONFIG_NFC) += nfc/ | 125 | obj-$(CONFIG_NFC) += nfc/ |
126 | obj-$(CONFIG_IOMMU_SUPPORT) += iommu/ | ||
diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 4c5701c15f53..5ab0d07c4578 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile | |||
@@ -13,7 +13,6 @@ obj-$(CONFIG_FW_LOADER) += firmware_class.o | |||
13 | obj-$(CONFIG_NUMA) += node.o | 13 | obj-$(CONFIG_NUMA) += node.o |
14 | obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o | 14 | obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o |
15 | obj-$(CONFIG_SMP) += topology.o | 15 | obj-$(CONFIG_SMP) += topology.o |
16 | obj-$(CONFIG_IOMMU_API) += iommu.o | ||
17 | ifeq ($(CONFIG_SYSFS),y) | 16 | ifeq ($(CONFIG_SYSFS),y) |
18 | obj-$(CONFIG_MODULES) += module.o | 17 | obj-$(CONFIG_MODULES) += module.o |
19 | endif | 18 | endif |
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig new file mode 100644 index 000000000000..b57b3fa492f3 --- /dev/null +++ b/drivers/iommu/Kconfig | |||
@@ -0,0 +1,110 @@ | |||
1 | # IOMMU_API always gets selected by whoever wants it. | ||
2 | config IOMMU_API | ||
3 | bool | ||
4 | |||
5 | menuconfig IOMMU_SUPPORT | ||
6 | bool "IOMMU Hardware Support" | ||
7 | default y | ||
8 | ---help--- | ||
9 | Say Y here if you want to compile device drivers for IO Memory | ||
10 | Management Units into the kernel. These devices usually allow to | ||
11 | remap DMA requests and/or remap interrupts from other devices on the | ||
12 | system. | ||
13 | |||
14 | if IOMMU_SUPPORT | ||
15 | |||
16 | # MSM IOMMU support | ||
17 | config MSM_IOMMU | ||
18 | bool "MSM IOMMU Support" | ||
19 | depends on ARCH_MSM8X60 || ARCH_MSM8960 | ||
20 | select IOMMU_API | ||
21 | help | ||
22 | Support for the IOMMUs found on certain Qualcomm SOCs. | ||
23 | These IOMMUs allow virtualization of the address space used by most | ||
24 | cores within the multimedia subsystem. | ||
25 | |||
26 | If unsure, say N here. | ||
27 | |||
28 | config IOMMU_PGTABLES_L2 | ||
29 | def_bool y | ||
30 | depends on MSM_IOMMU && MMU && SMP && CPU_DCACHE_DISABLE=n | ||
31 | |||
32 | # AMD IOMMU support | ||
33 | config AMD_IOMMU | ||
34 | bool "AMD IOMMU support" | ||
35 | select SWIOTLB | ||
36 | select PCI_MSI | ||
37 | select PCI_IOV | ||
38 | select IOMMU_API | ||
39 | depends on X86_64 && PCI && ACPI | ||
40 | ---help--- | ||
41 | With this option you can enable support for AMD IOMMU hardware in | ||
42 | your system. An IOMMU is a hardware component which provides | ||
43 | remapping of DMA memory accesses from devices. With an AMD IOMMU you | ||
44 | can isolate the the DMA memory of different devices and protect the | ||
45 | system from misbehaving device drivers or hardware. | ||
46 | |||
47 | You can find out if your system has an AMD IOMMU if you look into | ||
48 | your BIOS for an option to enable it or if you have an IVRS ACPI | ||
49 | table. | ||
50 | |||
51 | config AMD_IOMMU_STATS | ||
52 | bool "Export AMD IOMMU statistics to debugfs" | ||
53 | depends on AMD_IOMMU | ||
54 | select DEBUG_FS | ||
55 | ---help--- | ||
56 | This option enables code in the AMD IOMMU driver to collect various | ||
57 | statistics about whats happening in the driver and exports that | ||
58 | information to userspace via debugfs. | ||
59 | If unsure, say N. | ||
60 | |||
61 | # Intel IOMMU support | ||
62 | config DMAR | ||
63 | bool "Support for DMA Remapping Devices" | ||
64 | depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC) | ||
65 | select IOMMU_API | ||
66 | help | ||
67 | DMA remapping (DMAR) devices support enables independent address | ||
68 | translations for Direct Memory Access (DMA) from devices. | ||
69 | These DMA remapping devices are reported via ACPI tables | ||
70 | and include PCI device scope covered by these DMA | ||
71 | remapping devices. | ||
72 | |||
73 | config DMAR_DEFAULT_ON | ||
74 | def_bool y | ||
75 | prompt "Enable DMA Remapping Devices by default" | ||
76 | depends on DMAR | ||
77 | help | ||
78 | Selecting this option will enable a DMAR device at boot time if | ||
79 | one is found. If this option is not selected, DMAR support can | ||
80 | be enabled by passing intel_iommu=on to the kernel. | ||
81 | |||
82 | config DMAR_BROKEN_GFX_WA | ||
83 | bool "Workaround broken graphics drivers (going away soon)" | ||
84 | depends on DMAR && BROKEN && X86 | ||
85 | ---help--- | ||
86 | Current Graphics drivers tend to use physical address | ||
87 | for DMA and avoid using DMA APIs. Setting this config | ||
88 | option permits the IOMMU driver to set a unity map for | ||
89 | all the OS-visible memory. Hence the driver can continue | ||
90 | to use physical addresses for DMA, at least until this | ||
91 | option is removed in the 2.6.32 kernel. | ||
92 | |||
93 | config DMAR_FLOPPY_WA | ||
94 | def_bool y | ||
95 | depends on DMAR && X86 | ||
96 | ---help--- | ||
97 | Floppy disk drivers are known to bypass DMA API calls | ||
98 | thereby failing to work when IOMMU is enabled. This | ||
99 | workaround will setup a 1:1 mapping for the first | ||
100 | 16MiB to make floppy (an ISA device) work. | ||
101 | |||
102 | config INTR_REMAP | ||
103 | bool "Support for Interrupt Remapping (EXPERIMENTAL)" | ||
104 | depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL | ||
105 | ---help--- | ||
106 | Supports Interrupt remapping for IO-APIC and MSI devices. | ||
107 | To use x2apic mode in the CPU's which support x2APIC enhancements or | ||
108 | to support platforms with CPU's having > 8 bit APIC ID, say Y. | ||
109 | |||
110 | endif # IOMMU_SUPPORT | ||
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile new file mode 100644 index 000000000000..4d4d77df7cac --- /dev/null +++ b/drivers/iommu/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | obj-$(CONFIG_IOMMU_API) += iommu.o | ||
2 | obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o | ||
3 | obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o | ||
4 | obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o | ||
5 | obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o | ||
diff --git a/arch/x86/kernel/amd_iommu.c b/drivers/iommu/amd_iommu.c index 7c3a95e54ec5..a14f8dc23462 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -27,13 +27,15 @@ | |||
27 | #include <linux/iommu-helper.h> | 27 | #include <linux/iommu-helper.h> |
28 | #include <linux/iommu.h> | 28 | #include <linux/iommu.h> |
29 | #include <linux/delay.h> | 29 | #include <linux/delay.h> |
30 | #include <linux/amd-iommu.h> | ||
31 | #include <asm/msidef.h> | ||
30 | #include <asm/proto.h> | 32 | #include <asm/proto.h> |
31 | #include <asm/iommu.h> | 33 | #include <asm/iommu.h> |
32 | #include <asm/gart.h> | 34 | #include <asm/gart.h> |
33 | #include <asm/dma.h> | 35 | #include <asm/dma.h> |
34 | #include <asm/amd_iommu_proto.h> | 36 | |
35 | #include <asm/amd_iommu_types.h> | 37 | #include "amd_iommu_proto.h" |
36 | #include <asm/amd_iommu.h> | 38 | #include "amd_iommu_types.h" |
37 | 39 | ||
38 | #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) | 40 | #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) |
39 | 41 | ||
@@ -45,6 +47,10 @@ static DEFINE_RWLOCK(amd_iommu_devtable_lock); | |||
45 | static LIST_HEAD(iommu_pd_list); | 47 | static LIST_HEAD(iommu_pd_list); |
46 | static DEFINE_SPINLOCK(iommu_pd_list_lock); | 48 | static DEFINE_SPINLOCK(iommu_pd_list_lock); |
47 | 49 | ||
50 | /* List of all available dev_data structures */ | ||
51 | static LIST_HEAD(dev_data_list); | ||
52 | static DEFINE_SPINLOCK(dev_data_list_lock); | ||
53 | |||
48 | /* | 54 | /* |
49 | * Domain for untranslated devices - only allocated | 55 | * Domain for untranslated devices - only allocated |
50 | * if iommu=pt passed on kernel cmd line. | 56 | * if iommu=pt passed on kernel cmd line. |
@@ -68,6 +74,67 @@ static void update_domain(struct protection_domain *domain); | |||
68 | * | 74 | * |
69 | ****************************************************************************/ | 75 | ****************************************************************************/ |
70 | 76 | ||
77 | static struct iommu_dev_data *alloc_dev_data(u16 devid) | ||
78 | { | ||
79 | struct iommu_dev_data *dev_data; | ||
80 | unsigned long flags; | ||
81 | |||
82 | dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); | ||
83 | if (!dev_data) | ||
84 | return NULL; | ||
85 | |||
86 | dev_data->devid = devid; | ||
87 | atomic_set(&dev_data->bind, 0); | ||
88 | |||
89 | spin_lock_irqsave(&dev_data_list_lock, flags); | ||
90 | list_add_tail(&dev_data->dev_data_list, &dev_data_list); | ||
91 | spin_unlock_irqrestore(&dev_data_list_lock, flags); | ||
92 | |||
93 | return dev_data; | ||
94 | } | ||
95 | |||
96 | static void free_dev_data(struct iommu_dev_data *dev_data) | ||
97 | { | ||
98 | unsigned long flags; | ||
99 | |||
100 | spin_lock_irqsave(&dev_data_list_lock, flags); | ||
101 | list_del(&dev_data->dev_data_list); | ||
102 | spin_unlock_irqrestore(&dev_data_list_lock, flags); | ||
103 | |||
104 | kfree(dev_data); | ||
105 | } | ||
106 | |||
107 | static struct iommu_dev_data *search_dev_data(u16 devid) | ||
108 | { | ||
109 | struct iommu_dev_data *dev_data; | ||
110 | unsigned long flags; | ||
111 | |||
112 | spin_lock_irqsave(&dev_data_list_lock, flags); | ||
113 | list_for_each_entry(dev_data, &dev_data_list, dev_data_list) { | ||
114 | if (dev_data->devid == devid) | ||
115 | goto out_unlock; | ||
116 | } | ||
117 | |||
118 | dev_data = NULL; | ||
119 | |||
120 | out_unlock: | ||
121 | spin_unlock_irqrestore(&dev_data_list_lock, flags); | ||
122 | |||
123 | return dev_data; | ||
124 | } | ||
125 | |||
126 | static struct iommu_dev_data *find_dev_data(u16 devid) | ||
127 | { | ||
128 | struct iommu_dev_data *dev_data; | ||
129 | |||
130 | dev_data = search_dev_data(devid); | ||
131 | |||
132 | if (dev_data == NULL) | ||
133 | dev_data = alloc_dev_data(devid); | ||
134 | |||
135 | return dev_data; | ||
136 | } | ||
137 | |||
71 | static inline u16 get_device_id(struct device *dev) | 138 | static inline u16 get_device_id(struct device *dev) |
72 | { | 139 | { |
73 | struct pci_dev *pdev = to_pci_dev(dev); | 140 | struct pci_dev *pdev = to_pci_dev(dev); |
@@ -138,33 +205,31 @@ static bool check_device(struct device *dev) | |||
138 | static int iommu_init_device(struct device *dev) | 205 | static int iommu_init_device(struct device *dev) |
139 | { | 206 | { |
140 | struct iommu_dev_data *dev_data; | 207 | struct iommu_dev_data *dev_data; |
141 | struct pci_dev *pdev; | 208 | u16 alias; |
142 | u16 devid, alias; | ||
143 | 209 | ||
144 | if (dev->archdata.iommu) | 210 | if (dev->archdata.iommu) |
145 | return 0; | 211 | return 0; |
146 | 212 | ||
147 | dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); | 213 | dev_data = find_dev_data(get_device_id(dev)); |
148 | if (!dev_data) | 214 | if (!dev_data) |
149 | return -ENOMEM; | 215 | return -ENOMEM; |
150 | 216 | ||
151 | dev_data->dev = dev; | 217 | alias = amd_iommu_alias_table[dev_data->devid]; |
218 | if (alias != dev_data->devid) { | ||
219 | struct iommu_dev_data *alias_data; | ||
152 | 220 | ||
153 | devid = get_device_id(dev); | 221 | alias_data = find_dev_data(alias); |
154 | alias = amd_iommu_alias_table[devid]; | 222 | if (alias_data == NULL) { |
155 | pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff); | 223 | pr_err("AMD-Vi: Warning: Unhandled device %s\n", |
156 | if (pdev) | 224 | dev_name(dev)); |
157 | dev_data->alias = &pdev->dev; | 225 | free_dev_data(dev_data); |
158 | else { | 226 | return -ENOTSUPP; |
159 | kfree(dev_data); | 227 | } |
160 | return -ENOTSUPP; | 228 | dev_data->alias_data = alias_data; |
161 | } | 229 | } |
162 | 230 | ||
163 | atomic_set(&dev_data->bind, 0); | ||
164 | |||
165 | dev->archdata.iommu = dev_data; | 231 | dev->archdata.iommu = dev_data; |
166 | 232 | ||
167 | |||
168 | return 0; | 233 | return 0; |
169 | } | 234 | } |
170 | 235 | ||
@@ -184,11 +249,16 @@ static void iommu_ignore_device(struct device *dev) | |||
184 | 249 | ||
185 | static void iommu_uninit_device(struct device *dev) | 250 | static void iommu_uninit_device(struct device *dev) |
186 | { | 251 | { |
187 | kfree(dev->archdata.iommu); | 252 | /* |
253 | * Nothing to do here - we keep dev_data around for unplugged devices | ||
254 | * and reuse it when the device is re-plugged - not doing so would | ||
255 | * introduce a ton of races. | ||
256 | */ | ||
188 | } | 257 | } |
189 | 258 | ||
190 | void __init amd_iommu_uninit_devices(void) | 259 | void __init amd_iommu_uninit_devices(void) |
191 | { | 260 | { |
261 | struct iommu_dev_data *dev_data, *n; | ||
192 | struct pci_dev *pdev = NULL; | 262 | struct pci_dev *pdev = NULL; |
193 | 263 | ||
194 | for_each_pci_dev(pdev) { | 264 | for_each_pci_dev(pdev) { |
@@ -198,6 +268,10 @@ void __init amd_iommu_uninit_devices(void) | |||
198 | 268 | ||
199 | iommu_uninit_device(&pdev->dev); | 269 | iommu_uninit_device(&pdev->dev); |
200 | } | 270 | } |
271 | |||
272 | /* Free all of our dev_data structures */ | ||
273 | list_for_each_entry_safe(dev_data, n, &dev_data_list, dev_data_list) | ||
274 | free_dev_data(dev_data); | ||
201 | } | 275 | } |
202 | 276 | ||
203 | int __init amd_iommu_init_devices(void) | 277 | int __init amd_iommu_init_devices(void) |
@@ -654,19 +728,17 @@ void iommu_flush_all_caches(struct amd_iommu *iommu) | |||
654 | /* | 728 | /* |
655 | * Command send function for flushing on-device TLB | 729 | * Command send function for flushing on-device TLB |
656 | */ | 730 | */ |
657 | static int device_flush_iotlb(struct device *dev, u64 address, size_t size) | 731 | static int device_flush_iotlb(struct iommu_dev_data *dev_data, |
732 | u64 address, size_t size) | ||
658 | { | 733 | { |
659 | struct pci_dev *pdev = to_pci_dev(dev); | ||
660 | struct amd_iommu *iommu; | 734 | struct amd_iommu *iommu; |
661 | struct iommu_cmd cmd; | 735 | struct iommu_cmd cmd; |
662 | u16 devid; | ||
663 | int qdep; | 736 | int qdep; |
664 | 737 | ||
665 | qdep = pci_ats_queue_depth(pdev); | 738 | qdep = dev_data->ats.qdep; |
666 | devid = get_device_id(dev); | 739 | iommu = amd_iommu_rlookup_table[dev_data->devid]; |
667 | iommu = amd_iommu_rlookup_table[devid]; | ||
668 | 740 | ||
669 | build_inv_iotlb_pages(&cmd, devid, qdep, address, size); | 741 | build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size); |
670 | 742 | ||
671 | return iommu_queue_command(iommu, &cmd); | 743 | return iommu_queue_command(iommu, &cmd); |
672 | } | 744 | } |
@@ -674,23 +746,19 @@ static int device_flush_iotlb(struct device *dev, u64 address, size_t size) | |||
674 | /* | 746 | /* |
675 | * Command send function for invalidating a device table entry | 747 | * Command send function for invalidating a device table entry |
676 | */ | 748 | */ |
677 | static int device_flush_dte(struct device *dev) | 749 | static int device_flush_dte(struct iommu_dev_data *dev_data) |
678 | { | 750 | { |
679 | struct amd_iommu *iommu; | 751 | struct amd_iommu *iommu; |
680 | struct pci_dev *pdev; | ||
681 | u16 devid; | ||
682 | int ret; | 752 | int ret; |
683 | 753 | ||
684 | pdev = to_pci_dev(dev); | 754 | iommu = amd_iommu_rlookup_table[dev_data->devid]; |
685 | devid = get_device_id(dev); | ||
686 | iommu = amd_iommu_rlookup_table[devid]; | ||
687 | 755 | ||
688 | ret = iommu_flush_dte(iommu, devid); | 756 | ret = iommu_flush_dte(iommu, dev_data->devid); |
689 | if (ret) | 757 | if (ret) |
690 | return ret; | 758 | return ret; |
691 | 759 | ||
692 | if (pci_ats_enabled(pdev)) | 760 | if (dev_data->ats.enabled) |
693 | ret = device_flush_iotlb(dev, 0, ~0UL); | 761 | ret = device_flush_iotlb(dev_data, 0, ~0UL); |
694 | 762 | ||
695 | return ret; | 763 | return ret; |
696 | } | 764 | } |
@@ -721,12 +789,11 @@ static void __domain_flush_pages(struct protection_domain *domain, | |||
721 | } | 789 | } |
722 | 790 | ||
723 | list_for_each_entry(dev_data, &domain->dev_list, list) { | 791 | list_for_each_entry(dev_data, &domain->dev_list, list) { |
724 | struct pci_dev *pdev = to_pci_dev(dev_data->dev); | ||
725 | 792 | ||
726 | if (!pci_ats_enabled(pdev)) | 793 | if (!dev_data->ats.enabled) |
727 | continue; | 794 | continue; |
728 | 795 | ||
729 | ret |= device_flush_iotlb(dev_data->dev, address, size); | 796 | ret |= device_flush_iotlb(dev_data, address, size); |
730 | } | 797 | } |
731 | 798 | ||
732 | WARN_ON(ret); | 799 | WARN_ON(ret); |
@@ -778,7 +845,7 @@ static void domain_flush_devices(struct protection_domain *domain) | |||
778 | spin_lock_irqsave(&domain->lock, flags); | 845 | spin_lock_irqsave(&domain->lock, flags); |
779 | 846 | ||
780 | list_for_each_entry(dev_data, &domain->dev_list, list) | 847 | list_for_each_entry(dev_data, &domain->dev_list, list) |
781 | device_flush_dte(dev_data->dev); | 848 | device_flush_dte(dev_data); |
782 | 849 | ||
783 | spin_unlock_irqrestore(&domain->lock, flags); | 850 | spin_unlock_irqrestore(&domain->lock, flags); |
784 | } | 851 | } |
@@ -1136,7 +1203,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, | |||
1136 | { | 1203 | { |
1137 | int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; | 1204 | int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; |
1138 | struct amd_iommu *iommu; | 1205 | struct amd_iommu *iommu; |
1139 | unsigned long i; | 1206 | unsigned long i, old_size; |
1140 | 1207 | ||
1141 | #ifdef CONFIG_IOMMU_STRESS | 1208 | #ifdef CONFIG_IOMMU_STRESS |
1142 | populate = false; | 1209 | populate = false; |
@@ -1172,8 +1239,21 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, | |||
1172 | } | 1239 | } |
1173 | } | 1240 | } |
1174 | 1241 | ||
1242 | old_size = dma_dom->aperture_size; | ||
1175 | dma_dom->aperture_size += APERTURE_RANGE_SIZE; | 1243 | dma_dom->aperture_size += APERTURE_RANGE_SIZE; |
1176 | 1244 | ||
1245 | /* Reserve address range used for MSI messages */ | ||
1246 | if (old_size < MSI_ADDR_BASE_LO && | ||
1247 | dma_dom->aperture_size > MSI_ADDR_BASE_LO) { | ||
1248 | unsigned long spage; | ||
1249 | int pages; | ||
1250 | |||
1251 | pages = iommu_num_pages(MSI_ADDR_BASE_LO, 0x10000, PAGE_SIZE); | ||
1252 | spage = MSI_ADDR_BASE_LO >> PAGE_SHIFT; | ||
1253 | |||
1254 | dma_ops_reserve_addresses(dma_dom, spage, pages); | ||
1255 | } | ||
1256 | |||
1177 | /* Initialize the exclusion range if necessary */ | 1257 | /* Initialize the exclusion range if necessary */ |
1178 | for_each_iommu(iommu) { | 1258 | for_each_iommu(iommu) { |
1179 | if (iommu->exclusion_start && | 1259 | if (iommu->exclusion_start && |
@@ -1526,44 +1606,33 @@ static void clear_dte_entry(u16 devid) | |||
1526 | amd_iommu_apply_erratum_63(devid); | 1606 | amd_iommu_apply_erratum_63(devid); |
1527 | } | 1607 | } |
1528 | 1608 | ||
1529 | static void do_attach(struct device *dev, struct protection_domain *domain) | 1609 | static void do_attach(struct iommu_dev_data *dev_data, |
1610 | struct protection_domain *domain) | ||
1530 | { | 1611 | { |
1531 | struct iommu_dev_data *dev_data; | ||
1532 | struct amd_iommu *iommu; | 1612 | struct amd_iommu *iommu; |
1533 | struct pci_dev *pdev; | 1613 | bool ats; |
1534 | bool ats = false; | ||
1535 | u16 devid; | ||
1536 | 1614 | ||
1537 | devid = get_device_id(dev); | 1615 | iommu = amd_iommu_rlookup_table[dev_data->devid]; |
1538 | iommu = amd_iommu_rlookup_table[devid]; | 1616 | ats = dev_data->ats.enabled; |
1539 | dev_data = get_dev_data(dev); | ||
1540 | pdev = to_pci_dev(dev); | ||
1541 | |||
1542 | if (amd_iommu_iotlb_sup) | ||
1543 | ats = pci_ats_enabled(pdev); | ||
1544 | 1617 | ||
1545 | /* Update data structures */ | 1618 | /* Update data structures */ |
1546 | dev_data->domain = domain; | 1619 | dev_data->domain = domain; |
1547 | list_add(&dev_data->list, &domain->dev_list); | 1620 | list_add(&dev_data->list, &domain->dev_list); |
1548 | set_dte_entry(devid, domain, ats); | 1621 | set_dte_entry(dev_data->devid, domain, ats); |
1549 | 1622 | ||
1550 | /* Do reference counting */ | 1623 | /* Do reference counting */ |
1551 | domain->dev_iommu[iommu->index] += 1; | 1624 | domain->dev_iommu[iommu->index] += 1; |
1552 | domain->dev_cnt += 1; | 1625 | domain->dev_cnt += 1; |
1553 | 1626 | ||
1554 | /* Flush the DTE entry */ | 1627 | /* Flush the DTE entry */ |
1555 | device_flush_dte(dev); | 1628 | device_flush_dte(dev_data); |
1556 | } | 1629 | } |
1557 | 1630 | ||
1558 | static void do_detach(struct device *dev) | 1631 | static void do_detach(struct iommu_dev_data *dev_data) |
1559 | { | 1632 | { |
1560 | struct iommu_dev_data *dev_data; | ||
1561 | struct amd_iommu *iommu; | 1633 | struct amd_iommu *iommu; |
1562 | u16 devid; | ||
1563 | 1634 | ||
1564 | devid = get_device_id(dev); | 1635 | iommu = amd_iommu_rlookup_table[dev_data->devid]; |
1565 | iommu = amd_iommu_rlookup_table[devid]; | ||
1566 | dev_data = get_dev_data(dev); | ||
1567 | 1636 | ||
1568 | /* decrease reference counters */ | 1637 | /* decrease reference counters */ |
1569 | dev_data->domain->dev_iommu[iommu->index] -= 1; | 1638 | dev_data->domain->dev_iommu[iommu->index] -= 1; |
@@ -1572,52 +1641,46 @@ static void do_detach(struct device *dev) | |||
1572 | /* Update data structures */ | 1641 | /* Update data structures */ |
1573 | dev_data->domain = NULL; | 1642 | dev_data->domain = NULL; |
1574 | list_del(&dev_data->list); | 1643 | list_del(&dev_data->list); |
1575 | clear_dte_entry(devid); | 1644 | clear_dte_entry(dev_data->devid); |
1576 | 1645 | ||
1577 | /* Flush the DTE entry */ | 1646 | /* Flush the DTE entry */ |
1578 | device_flush_dte(dev); | 1647 | device_flush_dte(dev_data); |
1579 | } | 1648 | } |
1580 | 1649 | ||
1581 | /* | 1650 | /* |
1582 | * If a device is not yet associated with a domain, this function does | 1651 | * If a device is not yet associated with a domain, this function does |
1583 | * assigns it visible for the hardware | 1652 | * assigns it visible for the hardware |
1584 | */ | 1653 | */ |
1585 | static int __attach_device(struct device *dev, | 1654 | static int __attach_device(struct iommu_dev_data *dev_data, |
1586 | struct protection_domain *domain) | 1655 | struct protection_domain *domain) |
1587 | { | 1656 | { |
1588 | struct iommu_dev_data *dev_data, *alias_data; | ||
1589 | int ret; | 1657 | int ret; |
1590 | 1658 | ||
1591 | dev_data = get_dev_data(dev); | ||
1592 | alias_data = get_dev_data(dev_data->alias); | ||
1593 | |||
1594 | if (!alias_data) | ||
1595 | return -EINVAL; | ||
1596 | |||
1597 | /* lock domain */ | 1659 | /* lock domain */ |
1598 | spin_lock(&domain->lock); | 1660 | spin_lock(&domain->lock); |
1599 | 1661 | ||
1600 | /* Some sanity checks */ | 1662 | if (dev_data->alias_data != NULL) { |
1601 | ret = -EBUSY; | 1663 | struct iommu_dev_data *alias_data = dev_data->alias_data; |
1602 | if (alias_data->domain != NULL && | ||
1603 | alias_data->domain != domain) | ||
1604 | goto out_unlock; | ||
1605 | 1664 | ||
1606 | if (dev_data->domain != NULL && | 1665 | /* Some sanity checks */ |
1607 | dev_data->domain != domain) | 1666 | ret = -EBUSY; |
1608 | goto out_unlock; | 1667 | if (alias_data->domain != NULL && |
1668 | alias_data->domain != domain) | ||
1669 | goto out_unlock; | ||
1609 | 1670 | ||
1610 | /* Do real assignment */ | 1671 | if (dev_data->domain != NULL && |
1611 | if (dev_data->alias != dev) { | 1672 | dev_data->domain != domain) |
1612 | alias_data = get_dev_data(dev_data->alias); | 1673 | goto out_unlock; |
1674 | |||
1675 | /* Do real assignment */ | ||
1613 | if (alias_data->domain == NULL) | 1676 | if (alias_data->domain == NULL) |
1614 | do_attach(dev_data->alias, domain); | 1677 | do_attach(alias_data, domain); |
1615 | 1678 | ||
1616 | atomic_inc(&alias_data->bind); | 1679 | atomic_inc(&alias_data->bind); |
1617 | } | 1680 | } |
1618 | 1681 | ||
1619 | if (dev_data->domain == NULL) | 1682 | if (dev_data->domain == NULL) |
1620 | do_attach(dev, domain); | 1683 | do_attach(dev_data, domain); |
1621 | 1684 | ||
1622 | atomic_inc(&dev_data->bind); | 1685 | atomic_inc(&dev_data->bind); |
1623 | 1686 | ||
@@ -1639,14 +1702,19 @@ static int attach_device(struct device *dev, | |||
1639 | struct protection_domain *domain) | 1702 | struct protection_domain *domain) |
1640 | { | 1703 | { |
1641 | struct pci_dev *pdev = to_pci_dev(dev); | 1704 | struct pci_dev *pdev = to_pci_dev(dev); |
1705 | struct iommu_dev_data *dev_data; | ||
1642 | unsigned long flags; | 1706 | unsigned long flags; |
1643 | int ret; | 1707 | int ret; |
1644 | 1708 | ||
1645 | if (amd_iommu_iotlb_sup) | 1709 | dev_data = get_dev_data(dev); |
1646 | pci_enable_ats(pdev, PAGE_SHIFT); | 1710 | |
1711 | if (amd_iommu_iotlb_sup && pci_enable_ats(pdev, PAGE_SHIFT) == 0) { | ||
1712 | dev_data->ats.enabled = true; | ||
1713 | dev_data->ats.qdep = pci_ats_queue_depth(pdev); | ||
1714 | } | ||
1647 | 1715 | ||
1648 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | 1716 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); |
1649 | ret = __attach_device(dev, domain); | 1717 | ret = __attach_device(dev_data, domain); |
1650 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 1718 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
1651 | 1719 | ||
1652 | /* | 1720 | /* |
@@ -1662,10 +1730,8 @@ static int attach_device(struct device *dev, | |||
1662 | /* | 1730 | /* |
1663 | * Removes a device from a protection domain (unlocked) | 1731 | * Removes a device from a protection domain (unlocked) |
1664 | */ | 1732 | */ |
1665 | static void __detach_device(struct device *dev) | 1733 | static void __detach_device(struct iommu_dev_data *dev_data) |
1666 | { | 1734 | { |
1667 | struct iommu_dev_data *dev_data = get_dev_data(dev); | ||
1668 | struct iommu_dev_data *alias_data; | ||
1669 | struct protection_domain *domain; | 1735 | struct protection_domain *domain; |
1670 | unsigned long flags; | 1736 | unsigned long flags; |
1671 | 1737 | ||
@@ -1675,14 +1741,15 @@ static void __detach_device(struct device *dev) | |||
1675 | 1741 | ||
1676 | spin_lock_irqsave(&domain->lock, flags); | 1742 | spin_lock_irqsave(&domain->lock, flags); |
1677 | 1743 | ||
1678 | if (dev_data->alias != dev) { | 1744 | if (dev_data->alias_data != NULL) { |
1679 | alias_data = get_dev_data(dev_data->alias); | 1745 | struct iommu_dev_data *alias_data = dev_data->alias_data; |
1746 | |||
1680 | if (atomic_dec_and_test(&alias_data->bind)) | 1747 | if (atomic_dec_and_test(&alias_data->bind)) |
1681 | do_detach(dev_data->alias); | 1748 | do_detach(alias_data); |
1682 | } | 1749 | } |
1683 | 1750 | ||
1684 | if (atomic_dec_and_test(&dev_data->bind)) | 1751 | if (atomic_dec_and_test(&dev_data->bind)) |
1685 | do_detach(dev); | 1752 | do_detach(dev_data); |
1686 | 1753 | ||
1687 | spin_unlock_irqrestore(&domain->lock, flags); | 1754 | spin_unlock_irqrestore(&domain->lock, flags); |
1688 | 1755 | ||
@@ -1693,7 +1760,7 @@ static void __detach_device(struct device *dev) | |||
1693 | */ | 1760 | */ |
1694 | if (iommu_pass_through && | 1761 | if (iommu_pass_through && |
1695 | (dev_data->domain == NULL && domain != pt_domain)) | 1762 | (dev_data->domain == NULL && domain != pt_domain)) |
1696 | __attach_device(dev, pt_domain); | 1763 | __attach_device(dev_data, pt_domain); |
1697 | } | 1764 | } |
1698 | 1765 | ||
1699 | /* | 1766 | /* |
@@ -1701,16 +1768,20 @@ static void __detach_device(struct device *dev) | |||
1701 | */ | 1768 | */ |
1702 | static void detach_device(struct device *dev) | 1769 | static void detach_device(struct device *dev) |
1703 | { | 1770 | { |
1704 | struct pci_dev *pdev = to_pci_dev(dev); | 1771 | struct iommu_dev_data *dev_data; |
1705 | unsigned long flags; | 1772 | unsigned long flags; |
1706 | 1773 | ||
1774 | dev_data = get_dev_data(dev); | ||
1775 | |||
1707 | /* lock device table */ | 1776 | /* lock device table */ |
1708 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | 1777 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); |
1709 | __detach_device(dev); | 1778 | __detach_device(dev_data); |
1710 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 1779 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
1711 | 1780 | ||
1712 | if (amd_iommu_iotlb_sup && pci_ats_enabled(pdev)) | 1781 | if (dev_data->ats.enabled) { |
1713 | pci_disable_ats(pdev); | 1782 | pci_disable_ats(to_pci_dev(dev)); |
1783 | dev_data->ats.enabled = false; | ||
1784 | } | ||
1714 | } | 1785 | } |
1715 | 1786 | ||
1716 | /* | 1787 | /* |
@@ -1719,26 +1790,25 @@ static void detach_device(struct device *dev) | |||
1719 | */ | 1790 | */ |
1720 | static struct protection_domain *domain_for_device(struct device *dev) | 1791 | static struct protection_domain *domain_for_device(struct device *dev) |
1721 | { | 1792 | { |
1722 | struct protection_domain *dom; | 1793 | struct iommu_dev_data *dev_data; |
1723 | struct iommu_dev_data *dev_data, *alias_data; | 1794 | struct protection_domain *dom = NULL; |
1724 | unsigned long flags; | 1795 | unsigned long flags; |
1725 | u16 devid; | ||
1726 | 1796 | ||
1727 | devid = get_device_id(dev); | ||
1728 | dev_data = get_dev_data(dev); | 1797 | dev_data = get_dev_data(dev); |
1729 | alias_data = get_dev_data(dev_data->alias); | ||
1730 | if (!alias_data) | ||
1731 | return NULL; | ||
1732 | 1798 | ||
1733 | read_lock_irqsave(&amd_iommu_devtable_lock, flags); | 1799 | if (dev_data->domain) |
1734 | dom = dev_data->domain; | 1800 | return dev_data->domain; |
1735 | if (dom == NULL && | 1801 | |
1736 | alias_data->domain != NULL) { | 1802 | if (dev_data->alias_data != NULL) { |
1737 | __attach_device(dev, alias_data->domain); | 1803 | struct iommu_dev_data *alias_data = dev_data->alias_data; |
1738 | dom = alias_data->domain; | ||
1739 | } | ||
1740 | 1804 | ||
1741 | read_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 1805 | read_lock_irqsave(&amd_iommu_devtable_lock, flags); |
1806 | if (alias_data->domain != NULL) { | ||
1807 | __attach_device(dev_data, alias_data->domain); | ||
1808 | dom = alias_data->domain; | ||
1809 | } | ||
1810 | read_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | ||
1811 | } | ||
1742 | 1812 | ||
1743 | return dom; | 1813 | return dom; |
1744 | } | 1814 | } |
@@ -1798,7 +1868,6 @@ static int device_change_notifier(struct notifier_block *nb, | |||
1798 | goto out; | 1868 | goto out; |
1799 | } | 1869 | } |
1800 | 1870 | ||
1801 | device_flush_dte(dev); | ||
1802 | iommu_completion_wait(iommu); | 1871 | iommu_completion_wait(iommu); |
1803 | 1872 | ||
1804 | out: | 1873 | out: |
@@ -1858,11 +1927,8 @@ static void update_device_table(struct protection_domain *domain) | |||
1858 | { | 1927 | { |
1859 | struct iommu_dev_data *dev_data; | 1928 | struct iommu_dev_data *dev_data; |
1860 | 1929 | ||
1861 | list_for_each_entry(dev_data, &domain->dev_list, list) { | 1930 | list_for_each_entry(dev_data, &domain->dev_list, list) |
1862 | struct pci_dev *pdev = to_pci_dev(dev_data->dev); | 1931 | set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled); |
1863 | u16 devid = get_device_id(dev_data->dev); | ||
1864 | set_dte_entry(devid, domain, pci_ats_enabled(pdev)); | ||
1865 | } | ||
1866 | } | 1932 | } |
1867 | 1933 | ||
1868 | static void update_domain(struct protection_domain *domain) | 1934 | static void update_domain(struct protection_domain *domain) |
@@ -2497,9 +2563,7 @@ static void cleanup_domain(struct protection_domain *domain) | |||
2497 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | 2563 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); |
2498 | 2564 | ||
2499 | list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { | 2565 | list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { |
2500 | struct device *dev = dev_data->dev; | 2566 | __detach_device(dev_data); |
2501 | |||
2502 | __detach_device(dev); | ||
2503 | atomic_set(&dev_data->bind, 0); | 2567 | atomic_set(&dev_data->bind, 0); |
2504 | } | 2568 | } |
2505 | 2569 | ||
@@ -2605,7 +2669,6 @@ static void amd_iommu_detach_device(struct iommu_domain *dom, | |||
2605 | if (!iommu) | 2669 | if (!iommu) |
2606 | return; | 2670 | return; |
2607 | 2671 | ||
2608 | device_flush_dte(dev); | ||
2609 | iommu_completion_wait(iommu); | 2672 | iommu_completion_wait(iommu); |
2610 | } | 2673 | } |
2611 | 2674 | ||
@@ -2616,16 +2679,13 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, | |||
2616 | struct iommu_dev_data *dev_data; | 2679 | struct iommu_dev_data *dev_data; |
2617 | struct amd_iommu *iommu; | 2680 | struct amd_iommu *iommu; |
2618 | int ret; | 2681 | int ret; |
2619 | u16 devid; | ||
2620 | 2682 | ||
2621 | if (!check_device(dev)) | 2683 | if (!check_device(dev)) |
2622 | return -EINVAL; | 2684 | return -EINVAL; |
2623 | 2685 | ||
2624 | dev_data = dev->archdata.iommu; | 2686 | dev_data = dev->archdata.iommu; |
2625 | 2687 | ||
2626 | devid = get_device_id(dev); | 2688 | iommu = amd_iommu_rlookup_table[dev_data->devid]; |
2627 | |||
2628 | iommu = amd_iommu_rlookup_table[devid]; | ||
2629 | if (!iommu) | 2689 | if (!iommu) |
2630 | return -EINVAL; | 2690 | return -EINVAL; |
2631 | 2691 | ||
diff --git a/arch/x86/kernel/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index bfc8453bd98d..82d2410f4205 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
@@ -24,14 +24,16 @@ | |||
24 | #include <linux/syscore_ops.h> | 24 | #include <linux/syscore_ops.h> |
25 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
26 | #include <linux/msi.h> | 26 | #include <linux/msi.h> |
27 | #include <linux/amd-iommu.h> | ||
27 | #include <asm/pci-direct.h> | 28 | #include <asm/pci-direct.h> |
28 | #include <asm/amd_iommu_proto.h> | ||
29 | #include <asm/amd_iommu_types.h> | ||
30 | #include <asm/amd_iommu.h> | ||
31 | #include <asm/iommu.h> | 29 | #include <asm/iommu.h> |
32 | #include <asm/gart.h> | 30 | #include <asm/gart.h> |
33 | #include <asm/x86_init.h> | 31 | #include <asm/x86_init.h> |
34 | #include <asm/iommu_table.h> | 32 | #include <asm/iommu_table.h> |
33 | |||
34 | #include "amd_iommu_proto.h" | ||
35 | #include "amd_iommu_types.h" | ||
36 | |||
35 | /* | 37 | /* |
36 | * definitions for the ACPI scanning code | 38 | * definitions for the ACPI scanning code |
37 | */ | 39 | */ |
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h index 55d95eb789b3..7ffaa64410b0 100644 --- a/arch/x86/include/asm/amd_iommu_proto.h +++ b/drivers/iommu/amd_iommu_proto.h | |||
@@ -19,7 +19,7 @@ | |||
19 | #ifndef _ASM_X86_AMD_IOMMU_PROTO_H | 19 | #ifndef _ASM_X86_AMD_IOMMU_PROTO_H |
20 | #define _ASM_X86_AMD_IOMMU_PROTO_H | 20 | #define _ASM_X86_AMD_IOMMU_PROTO_H |
21 | 21 | ||
22 | #include <asm/amd_iommu_types.h> | 22 | #include "amd_iommu_types.h" |
23 | 23 | ||
24 | extern int amd_iommu_init_dma_ops(void); | 24 | extern int amd_iommu_init_dma_ops(void); |
25 | extern int amd_iommu_init_passthrough(void); | 25 | extern int amd_iommu_init_passthrough(void); |
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 4c9982995414..5b9c5075e81a 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h | |||
@@ -310,10 +310,15 @@ struct protection_domain { | |||
310 | */ | 310 | */ |
311 | struct iommu_dev_data { | 311 | struct iommu_dev_data { |
312 | struct list_head list; /* For domain->dev_list */ | 312 | struct list_head list; /* For domain->dev_list */ |
313 | struct device *dev; /* Device this data belong to */ | 313 | struct list_head dev_data_list; /* For global dev_data_list */ |
314 | struct device *alias; /* The Alias Device */ | 314 | struct iommu_dev_data *alias_data;/* The alias dev_data */ |
315 | struct protection_domain *domain; /* Domain the device is bound to */ | 315 | struct protection_domain *domain; /* Domain the device is bound to */ |
316 | atomic_t bind; /* Domain attach reverent count */ | 316 | atomic_t bind; /* Domain attach reverent count */ |
317 | u16 devid; /* PCI Device ID */ | ||
318 | struct { | ||
319 | bool enabled; | ||
320 | int qdep; | ||
321 | } ats; /* ATS state */ | ||
317 | }; | 322 | }; |
318 | 323 | ||
319 | /* | 324 | /* |
diff --git a/drivers/pci/dmar.c b/drivers/iommu/dmar.c index 3dc9befa5aec..3dc9befa5aec 100644 --- a/drivers/pci/dmar.c +++ b/drivers/iommu/dmar.c | |||
diff --git a/drivers/pci/intel-iommu.c b/drivers/iommu/intel-iommu.c index f02c34d26d1b..c621c98c99da 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -42,7 +42,6 @@ | |||
42 | #include <linux/pci-ats.h> | 42 | #include <linux/pci-ats.h> |
43 | #include <asm/cacheflush.h> | 43 | #include <asm/cacheflush.h> |
44 | #include <asm/iommu.h> | 44 | #include <asm/iommu.h> |
45 | #include "pci.h" | ||
46 | 45 | ||
47 | #define ROOT_SIZE VTD_PAGE_SIZE | 46 | #define ROOT_SIZE VTD_PAGE_SIZE |
48 | #define CONTEXT_SIZE VTD_PAGE_SIZE | 47 | #define CONTEXT_SIZE VTD_PAGE_SIZE |
diff --git a/drivers/pci/intr_remapping.c b/drivers/iommu/intr_remapping.c index 3607faf28a4d..1a89d4a2cadf 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/iommu/intr_remapping.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include "intr_remapping.h" | 13 | #include "intr_remapping.h" |
14 | #include <acpi/acpi.h> | 14 | #include <acpi/acpi.h> |
15 | #include <asm/pci-direct.h> | 15 | #include <asm/pci-direct.h> |
16 | #include "pci.h" | ||
17 | 16 | ||
18 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; | 17 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; |
19 | static struct hpet_scope ir_hpet[MAX_HPET_TBS]; | 18 | static struct hpet_scope ir_hpet[MAX_HPET_TBS]; |
diff --git a/drivers/pci/intr_remapping.h b/drivers/iommu/intr_remapping.h index 5662fecfee60..5662fecfee60 100644 --- a/drivers/pci/intr_remapping.h +++ b/drivers/iommu/intr_remapping.h | |||
diff --git a/drivers/base/iommu.c b/drivers/iommu/iommu.c index 6e6b6a11b3ce..6e6b6a11b3ce 100644 --- a/drivers/base/iommu.c +++ b/drivers/iommu/iommu.c | |||
diff --git a/drivers/pci/iova.c b/drivers/iommu/iova.c index c5c274ab5c5a..c5c274ab5c5a 100644 --- a/drivers/pci/iova.c +++ b/drivers/iommu/iova.c | |||
diff --git a/arch/arm/mach-msm/iommu.c b/drivers/iommu/msm_iommu.c index 1a584e077c61..1a584e077c61 100644 --- a/arch/arm/mach-msm/iommu.c +++ b/drivers/iommu/msm_iommu.c | |||
diff --git a/arch/arm/mach-msm/iommu_dev.c b/drivers/iommu/msm_iommu_dev.c index 8e8fb079852d..8e8fb079852d 100644 --- a/arch/arm/mach-msm/iommu_dev.c +++ b/drivers/iommu/msm_iommu_dev.c | |||
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 631f73027608..6fadae3ad134 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile | |||
@@ -29,11 +29,6 @@ obj-$(CONFIG_PCI_MSI) += msi.o | |||
29 | # Build the Hypertransport interrupt support | 29 | # Build the Hypertransport interrupt support |
30 | obj-$(CONFIG_HT_IRQ) += htirq.o | 30 | obj-$(CONFIG_HT_IRQ) += htirq.o |
31 | 31 | ||
32 | # Build Intel IOMMU support | ||
33 | obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o | ||
34 | |||
35 | obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o | ||
36 | |||
37 | obj-$(CONFIG_PCI_IOV) += iov.o | 32 | obj-$(CONFIG_PCI_IOV) += iov.o |
38 | 33 | ||
39 | # | 34 | # |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 3a39bf1f1e2c..c8cee764b0de 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -186,8 +186,6 @@ pci_match_one_device(const struct pci_device_id *id, const struct pci_dev *dev) | |||
186 | return NULL; | 186 | return NULL; |
187 | } | 187 | } |
188 | 188 | ||
189 | struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev); | ||
190 | |||
191 | /* PCI slot sysfs helper code */ | 189 | /* PCI slot sysfs helper code */ |
192 | #define to_pci_slot(s) container_of(s, struct pci_slot, kobj) | 190 | #define to_pci_slot(s) container_of(s, struct pci_slot, kobj) |
193 | 191 | ||
diff --git a/arch/x86/include/asm/amd_iommu.h b/include/linux/amd-iommu.h index a6863a2dec1f..a6863a2dec1f 100644 --- a/arch/x86/include/asm/amd_iommu.h +++ b/include/linux/amd-iommu.h | |||
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 0a2ba4098996..9940319d6f9d 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
@@ -19,6 +19,8 @@ | |||
19 | #ifndef __LINUX_IOMMU_H | 19 | #ifndef __LINUX_IOMMU_H |
20 | #define __LINUX_IOMMU_H | 20 | #define __LINUX_IOMMU_H |
21 | 21 | ||
22 | #include <linux/errno.h> | ||
23 | |||
22 | #define IOMMU_READ (1) | 24 | #define IOMMU_READ (1) |
23 | #define IOMMU_WRITE (2) | 25 | #define IOMMU_WRITE (2) |
24 | #define IOMMU_CACHE (4) /* DMA cache coherency */ | 26 | #define IOMMU_CACHE (4) /* DMA cache coherency */ |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 2d292182dde5..4e4203a96312 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -1617,5 +1617,16 @@ static inline void pci_set_bus_of_node(struct pci_bus *bus) { } | |||
1617 | static inline void pci_release_bus_of_node(struct pci_bus *bus) { } | 1617 | static inline void pci_release_bus_of_node(struct pci_bus *bus) { } |
1618 | #endif /* CONFIG_OF */ | 1618 | #endif /* CONFIG_OF */ |
1619 | 1619 | ||
1620 | /** | ||
1621 | * pci_find_upstream_pcie_bridge - find upstream PCIe-to-PCI bridge of a device | ||
1622 | * @pdev: the PCI device | ||
1623 | * | ||
1624 | * if the device is PCIE, return NULL | ||
1625 | * if the device isn't connected to a PCIe bridge (that is its parent is a | ||
1626 | * legacy PCI bridge and the bridge is directly connected to bus 0), return its | ||
1627 | * parent | ||
1628 | */ | ||
1629 | struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev); | ||
1630 | |||
1620 | #endif /* __KERNEL__ */ | 1631 | #endif /* __KERNEL__ */ |
1621 | #endif /* LINUX_PCI_H */ | 1632 | #endif /* LINUX_PCI_H */ |