diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-26 10:11:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-26 10:11:53 -0400 |
commit | 982653009b883ef1529089e3e6f1ae2fee41cbe2 (patch) | |
tree | eec3b1fe947d442ee204a2d648133bc5223e5c59 | |
parent | 37d96c28ecf0af1215bb6bbf580dbb1fabb5a6ec (diff) | |
parent | c020570138f5d9cb1fc0a853f9cf9e641178b5c5 (diff) |
Merge branch 'core-iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'core-iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86, ioapic: Consolidate the explicit EOI code
x86, ioapic: Restore the mask bit correctly in eoi_ioapic_irq()
x86, kdump, ioapic: Reset remote-IRR in clear_IO_APIC
iommu: Rename the DMAR and INTR_REMAP config options
x86, ioapic: Define irq_remap_modify_chip_defaults()
x86, msi, intr-remap: Use the ioapic set affinity routine
iommu: Cleanup ifdefs in detect_intel_iommu()
iommu: No need to set dmar_disabled in check_zero_address()
iommu: Move IOMMU specific code to intel-iommu.c
intr_remap: Call dmar_dev_scope_init() explicitly
x86, x2apic: Enable the bios request for x2apic optout
27 files changed, 474 insertions, 405 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 2af94a23a6a4..a8ba119a4d53 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -1020,10 +1020,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
1020 | has the capability. With this option, super page will | 1020 | has the capability. With this option, super page will |
1021 | not be supported. | 1021 | not be supported. |
1022 | intremap= [X86-64, Intel-IOMMU] | 1022 | intremap= [X86-64, Intel-IOMMU] |
1023 | Format: { on (default) | off | nosid } | ||
1024 | on enable Interrupt Remapping (default) | 1023 | on enable Interrupt Remapping (default) |
1025 | off disable Interrupt Remapping | 1024 | off disable Interrupt Remapping |
1026 | nosid disable Source ID checking | 1025 | nosid disable Source ID checking |
1026 | no_x2apic_optout | ||
1027 | BIOS x2APIC opt-out request will be ignored | ||
1027 | 1028 | ||
1028 | inttest= [IA-64] | 1029 | inttest= [IA-64] |
1029 | 1030 | ||
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig index 0e5cd1405e0e..43ab1cd097a5 100644 --- a/arch/ia64/configs/generic_defconfig +++ b/arch/ia64/configs/generic_defconfig | |||
@@ -234,4 +234,4 @@ CONFIG_CRYPTO_MD5=y | |||
234 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 234 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
235 | CONFIG_CRC_T10DIF=y | 235 | CONFIG_CRC_T10DIF=y |
236 | CONFIG_MISC_DEVICES=y | 236 | CONFIG_MISC_DEVICES=y |
237 | CONFIG_DMAR=y | 237 | CONFIG_INTEL_IOMMU=y |
diff --git a/arch/ia64/dig/Makefile b/arch/ia64/dig/Makefile index 2f7caddf093e..ae16ec4f6308 100644 --- a/arch/ia64/dig/Makefile +++ b/arch/ia64/dig/Makefile | |||
@@ -6,7 +6,7 @@ | |||
6 | # | 6 | # |
7 | 7 | ||
8 | obj-y := setup.o | 8 | obj-y := setup.o |
9 | ifeq ($(CONFIG_DMAR), y) | 9 | ifeq ($(CONFIG_INTEL_IOMMU), y) |
10 | obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o | 10 | obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o |
11 | else | 11 | else |
12 | obj-$(CONFIG_IA64_GENERIC) += machvec.o | 12 | obj-$(CONFIG_IA64_GENERIC) += machvec.o |
diff --git a/arch/ia64/include/asm/device.h b/arch/ia64/include/asm/device.h index d66d446b127c..d05e78f6db94 100644 --- a/arch/ia64/include/asm/device.h +++ b/arch/ia64/include/asm/device.h | |||
@@ -10,7 +10,7 @@ struct dev_archdata { | |||
10 | #ifdef CONFIG_ACPI | 10 | #ifdef CONFIG_ACPI |
11 | void *acpi_handle; | 11 | void *acpi_handle; |
12 | #endif | 12 | #endif |
13 | #ifdef CONFIG_DMAR | 13 | #ifdef CONFIG_INTEL_IOMMU |
14 | void *iommu; /* hook for IOMMU specific extension */ | 14 | void *iommu; /* hook for IOMMU specific extension */ |
15 | #endif | 15 | #endif |
16 | }; | 16 | }; |
diff --git a/arch/ia64/include/asm/iommu.h b/arch/ia64/include/asm/iommu.h index 745e095fe82e..105c93b00b1b 100644 --- a/arch/ia64/include/asm/iommu.h +++ b/arch/ia64/include/asm/iommu.h | |||
@@ -7,12 +7,14 @@ | |||
7 | 7 | ||
8 | extern void pci_iommu_shutdown(void); | 8 | extern void pci_iommu_shutdown(void); |
9 | extern void no_iommu_init(void); | 9 | extern void no_iommu_init(void); |
10 | #ifdef CONFIG_INTEL_IOMMU | ||
10 | extern int force_iommu, no_iommu; | 11 | extern int force_iommu, no_iommu; |
11 | extern int iommu_detected; | ||
12 | #ifdef CONFIG_DMAR | ||
13 | extern int iommu_pass_through; | 12 | extern int iommu_pass_through; |
13 | extern int iommu_detected; | ||
14 | #else | 14 | #else |
15 | #define iommu_pass_through (0) | 15 | #define iommu_pass_through (0) |
16 | #define no_iommu (1) | ||
17 | #define iommu_detected (0) | ||
16 | #endif | 18 | #endif |
17 | extern void iommu_dma_init(void); | 19 | extern void iommu_dma_init(void); |
18 | extern void machvec_init(const char *name); | 20 | extern void machvec_init(const char *name); |
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h index 73b5f785e70c..127dd7be346a 100644 --- a/arch/ia64/include/asm/pci.h +++ b/arch/ia64/include/asm/pci.h | |||
@@ -139,7 +139,7 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) | |||
139 | return channel ? isa_irq_to_vector(15) : isa_irq_to_vector(14); | 139 | return channel ? isa_irq_to_vector(15) : isa_irq_to_vector(14); |
140 | } | 140 | } |
141 | 141 | ||
142 | #ifdef CONFIG_DMAR | 142 | #ifdef CONFIG_INTEL_IOMMU |
143 | extern void pci_iommu_alloc(void); | 143 | extern void pci_iommu_alloc(void); |
144 | #endif | 144 | #endif |
145 | #endif /* _ASM_IA64_PCI_H */ | 145 | #endif /* _ASM_IA64_PCI_H */ |
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 395c2f216dd8..d959c84904be 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile | |||
@@ -43,7 +43,7 @@ obj-$(CONFIG_IA64_ESI) += esi.o | |||
43 | ifneq ($(CONFIG_IA64_ESI),) | 43 | ifneq ($(CONFIG_IA64_ESI),) |
44 | obj-y += esi_stub.o # must be in kernel proper | 44 | obj-y += esi_stub.o # must be in kernel proper |
45 | endif | 45 | endif |
46 | obj-$(CONFIG_DMAR) += pci-dma.o | 46 | obj-$(CONFIG_INTEL_IOMMU) += pci-dma.o |
47 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o | 47 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o |
48 | 48 | ||
49 | obj-$(CONFIG_BINFMT_ELF) += elfcore.o | 49 | obj-$(CONFIG_BINFMT_ELF) += elfcore.o |
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 3be485a300b1..bfb4d01e0e51 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -88,7 +88,7 @@ acpi_get_sysname(void) | |||
88 | struct acpi_table_rsdp *rsdp; | 88 | struct acpi_table_rsdp *rsdp; |
89 | struct acpi_table_xsdt *xsdt; | 89 | struct acpi_table_xsdt *xsdt; |
90 | struct acpi_table_header *hdr; | 90 | struct acpi_table_header *hdr; |
91 | #ifdef CONFIG_DMAR | 91 | #ifdef CONFIG_INTEL_IOMMU |
92 | u64 i, nentries; | 92 | u64 i, nentries; |
93 | #endif | 93 | #endif |
94 | 94 | ||
@@ -125,7 +125,7 @@ acpi_get_sysname(void) | |||
125 | return "xen"; | 125 | return "xen"; |
126 | } | 126 | } |
127 | 127 | ||
128 | #ifdef CONFIG_DMAR | 128 | #ifdef CONFIG_INTEL_IOMMU |
129 | /* Look for Intel IOMMU */ | 129 | /* Look for Intel IOMMU */ |
130 | nentries = (hdr->length - sizeof(*hdr)) / | 130 | nentries = (hdr->length - sizeof(*hdr)) / |
131 | sizeof(xsdt->table_offset_entry[0]); | 131 | sizeof(xsdt->table_offset_entry[0]); |
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 009df5434a7a..94e0db72d4a6 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c | |||
@@ -131,7 +131,7 @@ void arch_teardown_msi_irq(unsigned int irq) | |||
131 | return ia64_teardown_msi_irq(irq); | 131 | return ia64_teardown_msi_irq(irq); |
132 | } | 132 | } |
133 | 133 | ||
134 | #ifdef CONFIG_DMAR | 134 | #ifdef CONFIG_INTEL_IOMMU |
135 | #ifdef CONFIG_SMP | 135 | #ifdef CONFIG_SMP |
136 | static int dmar_msi_set_affinity(struct irq_data *data, | 136 | static int dmar_msi_set_affinity(struct irq_data *data, |
137 | const struct cpumask *mask, bool force) | 137 | const struct cpumask *mask, bool force) |
@@ -210,5 +210,5 @@ int arch_setup_dmar_msi(unsigned int irq) | |||
210 | "edge"); | 210 | "edge"); |
211 | return 0; | 211 | return 0; |
212 | } | 212 | } |
213 | #endif /* CONFIG_DMAR */ | 213 | #endif /* CONFIG_INTEL_IOMMU */ |
214 | 214 | ||
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c index f6b1ff0aea76..c16162c70860 100644 --- a/arch/ia64/kernel/pci-dma.c +++ b/arch/ia64/kernel/pci-dma.c | |||
@@ -14,7 +14,7 @@ | |||
14 | 14 | ||
15 | #include <asm/system.h> | 15 | #include <asm/system.h> |
16 | 16 | ||
17 | #ifdef CONFIG_DMAR | 17 | #ifdef CONFIG_INTEL_IOMMU |
18 | 18 | ||
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | 20 | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9a4a267a8a55..9037289617ac 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -130,7 +130,7 @@ config SBUS | |||
130 | bool | 130 | bool |
131 | 131 | ||
132 | config NEED_DMA_MAP_STATE | 132 | config NEED_DMA_MAP_STATE |
133 | def_bool (X86_64 || DMAR || DMA_API_DEBUG) | 133 | def_bool (X86_64 || INTEL_IOMMU || DMA_API_DEBUG) |
134 | 134 | ||
135 | config NEED_SG_DMA_LENGTH | 135 | config NEED_SG_DMA_LENGTH |
136 | def_bool y | 136 | def_bool y |
@@ -220,7 +220,7 @@ config ARCH_SUPPORTS_DEBUG_PAGEALLOC | |||
220 | 220 | ||
221 | config HAVE_INTEL_TXT | 221 | config HAVE_INTEL_TXT |
222 | def_bool y | 222 | def_bool y |
223 | depends on EXPERIMENTAL && DMAR && ACPI | 223 | depends on EXPERIMENTAL && INTEL_IOMMU && ACPI |
224 | 224 | ||
225 | config X86_32_SMP | 225 | config X86_32_SMP |
226 | def_bool y | 226 | def_bool y |
@@ -287,7 +287,7 @@ config SMP | |||
287 | 287 | ||
288 | config X86_X2APIC | 288 | config X86_X2APIC |
289 | bool "Support x2apic" | 289 | bool "Support x2apic" |
290 | depends on X86_LOCAL_APIC && X86_64 && INTR_REMAP | 290 | depends on X86_LOCAL_APIC && X86_64 && IRQ_REMAP |
291 | ---help--- | 291 | ---help--- |
292 | This enables x2apic support on CPUs that have this feature. | 292 | This enables x2apic support on CPUs that have this feature. |
293 | 293 | ||
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 22a0dc8e51dd..058a35b8286c 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig | |||
@@ -67,8 +67,8 @@ CONFIG_CPU_FREQ_GOV_PERFORMANCE=y | |||
67 | CONFIG_CPU_FREQ_GOV_ONDEMAND=y | 67 | CONFIG_CPU_FREQ_GOV_ONDEMAND=y |
68 | CONFIG_X86_ACPI_CPUFREQ=y | 68 | CONFIG_X86_ACPI_CPUFREQ=y |
69 | CONFIG_PCI_MMCONFIG=y | 69 | CONFIG_PCI_MMCONFIG=y |
70 | CONFIG_DMAR=y | 70 | CONFIG_INTEL_IOMMU=y |
71 | # CONFIG_DMAR_DEFAULT_ON is not set | 71 | # CONFIG_INTEL_IOMMU_DEFAULT_ON is not set |
72 | CONFIG_PCIEPORTBUS=y | 72 | CONFIG_PCIEPORTBUS=y |
73 | CONFIG_PCCARD=y | 73 | CONFIG_PCCARD=y |
74 | CONFIG_YENTA=y | 74 | CONFIG_YENTA=y |
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h index 029f230ab637..63a2a03d7d51 100644 --- a/arch/x86/include/asm/device.h +++ b/arch/x86/include/asm/device.h | |||
@@ -8,7 +8,7 @@ struct dev_archdata { | |||
8 | #ifdef CONFIG_X86_64 | 8 | #ifdef CONFIG_X86_64 |
9 | struct dma_map_ops *dma_ops; | 9 | struct dma_map_ops *dma_ops; |
10 | #endif | 10 | #endif |
11 | #if defined(CONFIG_DMAR) || defined(CONFIG_AMD_IOMMU) | 11 | #if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU) |
12 | void *iommu; /* hook for IOMMU specific extension */ | 12 | void *iommu; /* hook for IOMMU specific extension */ |
13 | #endif | 13 | #endif |
14 | }; | 14 | }; |
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 09199052060f..eb92a6ed2be7 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -119,7 +119,7 @@ struct irq_cfg { | |||
119 | cpumask_var_t old_domain; | 119 | cpumask_var_t old_domain; |
120 | u8 vector; | 120 | u8 vector; |
121 | u8 move_in_progress : 1; | 121 | u8 move_in_progress : 1; |
122 | #ifdef CONFIG_INTR_REMAP | 122 | #ifdef CONFIG_IRQ_REMAP |
123 | struct irq_2_iommu irq_2_iommu; | 123 | struct irq_2_iommu irq_2_iommu; |
124 | #endif | 124 | #endif |
125 | }; | 125 | }; |
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h index 1c23360fb2d8..47d99934580f 100644 --- a/arch/x86/include/asm/irq_remapping.h +++ b/arch/x86/include/asm/irq_remapping.h | |||
@@ -3,7 +3,8 @@ | |||
3 | 3 | ||
4 | #define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8) | 4 | #define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8) |
5 | 5 | ||
6 | #ifdef CONFIG_INTR_REMAP | 6 | #ifdef CONFIG_IRQ_REMAP |
7 | static void irq_remap_modify_chip_defaults(struct irq_chip *chip); | ||
7 | static inline void prepare_irte(struct irte *irte, int vector, | 8 | static inline void prepare_irte(struct irte *irte, int vector, |
8 | unsigned int dest) | 9 | unsigned int dest) |
9 | { | 10 | { |
@@ -36,6 +37,9 @@ static inline bool irq_remapped(struct irq_cfg *cfg) | |||
36 | { | 37 | { |
37 | return false; | 38 | return false; |
38 | } | 39 | } |
40 | static inline void irq_remap_modify_chip_defaults(struct irq_chip *chip) | ||
41 | { | ||
42 | } | ||
39 | #endif | 43 | #endif |
40 | 44 | ||
41 | #endif /* _ASM_X86_IRQ_REMAPPING_H */ | 45 | #endif /* _ASM_X86_IRQ_REMAPPING_H */ |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 52fa56399a50..a2fd72e0ab35 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -1437,27 +1437,21 @@ void enable_x2apic(void) | |||
1437 | 1437 | ||
1438 | int __init enable_IR(void) | 1438 | int __init enable_IR(void) |
1439 | { | 1439 | { |
1440 | #ifdef CONFIG_INTR_REMAP | 1440 | #ifdef CONFIG_IRQ_REMAP |
1441 | if (!intr_remapping_supported()) { | 1441 | if (!intr_remapping_supported()) { |
1442 | pr_debug("intr-remapping not supported\n"); | 1442 | pr_debug("intr-remapping not supported\n"); |
1443 | return 0; | 1443 | return -1; |
1444 | } | 1444 | } |
1445 | 1445 | ||
1446 | if (!x2apic_preenabled && skip_ioapic_setup) { | 1446 | if (!x2apic_preenabled && skip_ioapic_setup) { |
1447 | pr_info("Skipped enabling intr-remap because of skipping " | 1447 | pr_info("Skipped enabling intr-remap because of skipping " |
1448 | "io-apic setup\n"); | 1448 | "io-apic setup\n"); |
1449 | return 0; | 1449 | return -1; |
1450 | } | 1450 | } |
1451 | 1451 | ||
1452 | if (enable_intr_remapping(x2apic_supported())) | 1452 | return enable_intr_remapping(); |
1453 | return 0; | ||
1454 | |||
1455 | pr_info("Enabled Interrupt-remapping\n"); | ||
1456 | |||
1457 | return 1; | ||
1458 | |||
1459 | #endif | 1453 | #endif |
1460 | return 0; | 1454 | return -1; |
1461 | } | 1455 | } |
1462 | 1456 | ||
1463 | void __init enable_IR_x2apic(void) | 1457 | void __init enable_IR_x2apic(void) |
@@ -1481,11 +1475,11 @@ void __init enable_IR_x2apic(void) | |||
1481 | mask_ioapic_entries(); | 1475 | mask_ioapic_entries(); |
1482 | 1476 | ||
1483 | if (dmar_table_init_ret) | 1477 | if (dmar_table_init_ret) |
1484 | ret = 0; | 1478 | ret = -1; |
1485 | else | 1479 | else |
1486 | ret = enable_IR(); | 1480 | ret = enable_IR(); |
1487 | 1481 | ||
1488 | if (!ret) { | 1482 | if (ret < 0) { |
1489 | /* IR is required if there is APIC ID > 255 even when running | 1483 | /* IR is required if there is APIC ID > 255 even when running |
1490 | * under KVM | 1484 | * under KVM |
1491 | */ | 1485 | */ |
@@ -1499,6 +1493,9 @@ void __init enable_IR_x2apic(void) | |||
1499 | x2apic_force_phys(); | 1493 | x2apic_force_phys(); |
1500 | } | 1494 | } |
1501 | 1495 | ||
1496 | if (ret == IRQ_REMAP_XAPIC_MODE) | ||
1497 | goto nox2apic; | ||
1498 | |||
1502 | x2apic_enabled = 1; | 1499 | x2apic_enabled = 1; |
1503 | 1500 | ||
1504 | if (x2apic_supported() && !x2apic_mode) { | 1501 | if (x2apic_supported() && !x2apic_mode) { |
@@ -1508,19 +1505,21 @@ void __init enable_IR_x2apic(void) | |||
1508 | } | 1505 | } |
1509 | 1506 | ||
1510 | nox2apic: | 1507 | nox2apic: |
1511 | if (!ret) /* IR enabling failed */ | 1508 | if (ret < 0) /* IR enabling failed */ |
1512 | restore_ioapic_entries(); | 1509 | restore_ioapic_entries(); |
1513 | legacy_pic->restore_mask(); | 1510 | legacy_pic->restore_mask(); |
1514 | local_irq_restore(flags); | 1511 | local_irq_restore(flags); |
1515 | 1512 | ||
1516 | out: | 1513 | out: |
1517 | if (x2apic_enabled) | 1514 | if (x2apic_enabled || !x2apic_supported()) |
1518 | return; | 1515 | return; |
1519 | 1516 | ||
1520 | if (x2apic_preenabled) | 1517 | if (x2apic_preenabled) |
1521 | panic("x2apic: enabled by BIOS but kernel init failed."); | 1518 | panic("x2apic: enabled by BIOS but kernel init failed."); |
1522 | else if (cpu_has_x2apic) | 1519 | else if (ret == IRQ_REMAP_XAPIC_MODE) |
1523 | pr_info("Not enabling x2apic, Intr-remapping init failed.\n"); | 1520 | pr_info("x2apic not enabled, IRQ remapping is in xapic mode\n"); |
1521 | else if (ret < 0) | ||
1522 | pr_info("x2apic not enabled, IRQ remapping init failed\n"); | ||
1524 | } | 1523 | } |
1525 | 1524 | ||
1526 | #ifdef CONFIG_X86_64 | 1525 | #ifdef CONFIG_X86_64 |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 8eb863e27ea6..229e19f3eb57 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -394,13 +394,21 @@ union entry_union { | |||
394 | struct IO_APIC_route_entry entry; | 394 | struct IO_APIC_route_entry entry; |
395 | }; | 395 | }; |
396 | 396 | ||
397 | static struct IO_APIC_route_entry __ioapic_read_entry(int apic, int pin) | ||
398 | { | ||
399 | union entry_union eu; | ||
400 | |||
401 | eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); | ||
402 | eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); | ||
403 | return eu.entry; | ||
404 | } | ||
405 | |||
397 | static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin) | 406 | static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin) |
398 | { | 407 | { |
399 | union entry_union eu; | 408 | union entry_union eu; |
400 | unsigned long flags; | 409 | unsigned long flags; |
401 | raw_spin_lock_irqsave(&ioapic_lock, flags); | 410 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
402 | eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); | 411 | eu.entry = __ioapic_read_entry(apic, pin); |
403 | eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); | ||
404 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | 412 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
405 | return eu.entry; | 413 | return eu.entry; |
406 | } | 414 | } |
@@ -529,18 +537,6 @@ static void io_apic_modify_irq(struct irq_cfg *cfg, | |||
529 | __io_apic_modify_irq(entry, mask_and, mask_or, final); | 537 | __io_apic_modify_irq(entry, mask_and, mask_or, final); |
530 | } | 538 | } |
531 | 539 | ||
532 | static void __mask_and_edge_IO_APIC_irq(struct irq_pin_list *entry) | ||
533 | { | ||
534 | __io_apic_modify_irq(entry, ~IO_APIC_REDIR_LEVEL_TRIGGER, | ||
535 | IO_APIC_REDIR_MASKED, NULL); | ||
536 | } | ||
537 | |||
538 | static void __unmask_and_level_IO_APIC_irq(struct irq_pin_list *entry) | ||
539 | { | ||
540 | __io_apic_modify_irq(entry, ~IO_APIC_REDIR_MASKED, | ||
541 | IO_APIC_REDIR_LEVEL_TRIGGER, NULL); | ||
542 | } | ||
543 | |||
544 | static void io_apic_sync(struct irq_pin_list *entry) | 540 | static void io_apic_sync(struct irq_pin_list *entry) |
545 | { | 541 | { |
546 | /* | 542 | /* |
@@ -585,6 +581,66 @@ static void unmask_ioapic_irq(struct irq_data *data) | |||
585 | unmask_ioapic(data->chip_data); | 581 | unmask_ioapic(data->chip_data); |
586 | } | 582 | } |
587 | 583 | ||
584 | /* | ||
585 | * IO-APIC versions below 0x20 don't support EOI register. | ||
586 | * For the record, here is the information about various versions: | ||
587 | * 0Xh 82489DX | ||
588 | * 1Xh I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant | ||
589 | * 2Xh I/O(x)APIC which is PCI 2.2 Compliant | ||
590 | * 30h-FFh Reserved | ||
591 | * | ||
592 | * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic | ||
593 | * version as 0x2. This is an error with documentation and these ICH chips | ||
594 | * use io-apic's of version 0x20. | ||
595 | * | ||
596 | * For IO-APIC's with EOI register, we use that to do an explicit EOI. | ||
597 | * Otherwise, we simulate the EOI message manually by changing the trigger | ||
598 | * mode to edge and then back to level, with RTE being masked during this. | ||
599 | */ | ||
600 | static void __eoi_ioapic_pin(int apic, int pin, int vector, struct irq_cfg *cfg) | ||
601 | { | ||
602 | if (mpc_ioapic_ver(apic) >= 0x20) { | ||
603 | /* | ||
604 | * Intr-remapping uses pin number as the virtual vector | ||
605 | * in the RTE. Actual vector is programmed in | ||
606 | * intr-remapping table entry. Hence for the io-apic | ||
607 | * EOI we use the pin number. | ||
608 | */ | ||
609 | if (cfg && irq_remapped(cfg)) | ||
610 | io_apic_eoi(apic, pin); | ||
611 | else | ||
612 | io_apic_eoi(apic, vector); | ||
613 | } else { | ||
614 | struct IO_APIC_route_entry entry, entry1; | ||
615 | |||
616 | entry = entry1 = __ioapic_read_entry(apic, pin); | ||
617 | |||
618 | /* | ||
619 | * Mask the entry and change the trigger mode to edge. | ||
620 | */ | ||
621 | entry1.mask = 1; | ||
622 | entry1.trigger = IOAPIC_EDGE; | ||
623 | |||
624 | __ioapic_write_entry(apic, pin, entry1); | ||
625 | |||
626 | /* | ||
627 | * Restore the previous level triggered entry. | ||
628 | */ | ||
629 | __ioapic_write_entry(apic, pin, entry); | ||
630 | } | ||
631 | } | ||
632 | |||
633 | static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) | ||
634 | { | ||
635 | struct irq_pin_list *entry; | ||
636 | unsigned long flags; | ||
637 | |||
638 | raw_spin_lock_irqsave(&ioapic_lock, flags); | ||
639 | for_each_irq_pin(entry, cfg->irq_2_pin) | ||
640 | __eoi_ioapic_pin(entry->apic, entry->pin, cfg->vector, cfg); | ||
641 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | ||
642 | } | ||
643 | |||
588 | static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) | 644 | static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) |
589 | { | 645 | { |
590 | struct IO_APIC_route_entry entry; | 646 | struct IO_APIC_route_entry entry; |
@@ -593,10 +649,44 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) | |||
593 | entry = ioapic_read_entry(apic, pin); | 649 | entry = ioapic_read_entry(apic, pin); |
594 | if (entry.delivery_mode == dest_SMI) | 650 | if (entry.delivery_mode == dest_SMI) |
595 | return; | 651 | return; |
652 | |||
653 | /* | ||
654 | * Make sure the entry is masked and re-read the contents to check | ||
655 | * if it is a level triggered pin and if the remote-IRR is set. | ||
656 | */ | ||
657 | if (!entry.mask) { | ||
658 | entry.mask = 1; | ||
659 | ioapic_write_entry(apic, pin, entry); | ||
660 | entry = ioapic_read_entry(apic, pin); | ||
661 | } | ||
662 | |||
663 | if (entry.irr) { | ||
664 | unsigned long flags; | ||
665 | |||
666 | /* | ||
667 | * Make sure the trigger mode is set to level. Explicit EOI | ||
668 | * doesn't clear the remote-IRR if the trigger mode is not | ||
669 | * set to level. | ||
670 | */ | ||
671 | if (!entry.trigger) { | ||
672 | entry.trigger = IOAPIC_LEVEL; | ||
673 | ioapic_write_entry(apic, pin, entry); | ||
674 | } | ||
675 | |||
676 | raw_spin_lock_irqsave(&ioapic_lock, flags); | ||
677 | __eoi_ioapic_pin(apic, pin, entry.vector, NULL); | ||
678 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | ||
679 | } | ||
680 | |||
596 | /* | 681 | /* |
597 | * Disable it in the IO-APIC irq-routing table: | 682 | * Clear the rest of the bits in the IO-APIC RTE except for the mask |
683 | * bit. | ||
598 | */ | 684 | */ |
599 | ioapic_mask_entry(apic, pin); | 685 | ioapic_mask_entry(apic, pin); |
686 | entry = ioapic_read_entry(apic, pin); | ||
687 | if (entry.irr) | ||
688 | printk(KERN_ERR "Unable to reset IRR for apic: %d, pin :%d\n", | ||
689 | mpc_ioapic_id(apic), pin); | ||
600 | } | 690 | } |
601 | 691 | ||
602 | static void clear_IO_APIC (void) | 692 | static void clear_IO_APIC (void) |
@@ -1202,7 +1292,6 @@ void __setup_vector_irq(int cpu) | |||
1202 | } | 1292 | } |
1203 | 1293 | ||
1204 | static struct irq_chip ioapic_chip; | 1294 | static struct irq_chip ioapic_chip; |
1205 | static struct irq_chip ir_ioapic_chip; | ||
1206 | 1295 | ||
1207 | #ifdef CONFIG_X86_32 | 1296 | #ifdef CONFIG_X86_32 |
1208 | static inline int IO_APIC_irq_trigger(int irq) | 1297 | static inline int IO_APIC_irq_trigger(int irq) |
@@ -1246,7 +1335,7 @@ static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg, | |||
1246 | 1335 | ||
1247 | if (irq_remapped(cfg)) { | 1336 | if (irq_remapped(cfg)) { |
1248 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); | 1337 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
1249 | chip = &ir_ioapic_chip; | 1338 | irq_remap_modify_chip_defaults(chip); |
1250 | fasteoi = trigger != 0; | 1339 | fasteoi = trigger != 0; |
1251 | } | 1340 | } |
1252 | 1341 | ||
@@ -2255,7 +2344,7 @@ ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, | |||
2255 | return ret; | 2344 | return ret; |
2256 | } | 2345 | } |
2257 | 2346 | ||
2258 | #ifdef CONFIG_INTR_REMAP | 2347 | #ifdef CONFIG_IRQ_REMAP |
2259 | 2348 | ||
2260 | /* | 2349 | /* |
2261 | * Migrate the IO-APIC irq in the presence of intr-remapping. | 2350 | * Migrate the IO-APIC irq in the presence of intr-remapping. |
@@ -2267,6 +2356,9 @@ ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, | |||
2267 | * updated vector information), by using a virtual vector (io-apic pin number). | 2356 | * updated vector information), by using a virtual vector (io-apic pin number). |
2268 | * Real vector that is used for interrupting cpu will be coming from | 2357 | * Real vector that is used for interrupting cpu will be coming from |
2269 | * the interrupt-remapping table entry. | 2358 | * the interrupt-remapping table entry. |
2359 | * | ||
2360 | * As the migration is a simple atomic update of IRTE, the same mechanism | ||
2361 | * is used to migrate MSI irq's in the presence of interrupt-remapping. | ||
2270 | */ | 2362 | */ |
2271 | static int | 2363 | static int |
2272 | ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, | 2364 | ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, |
@@ -2291,10 +2383,16 @@ ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, | |||
2291 | irte.dest_id = IRTE_DEST(dest); | 2383 | irte.dest_id = IRTE_DEST(dest); |
2292 | 2384 | ||
2293 | /* | 2385 | /* |
2294 | * Modified the IRTE and flushes the Interrupt entry cache. | 2386 | * Atomically updates the IRTE with the new destination, vector |
2387 | * and flushes the interrupt entry cache. | ||
2295 | */ | 2388 | */ |
2296 | modify_irte(irq, &irte); | 2389 | modify_irte(irq, &irte); |
2297 | 2390 | ||
2391 | /* | ||
2392 | * After this point, all the interrupts will start arriving | ||
2393 | * at the new destination. So, time to cleanup the previous | ||
2394 | * vector allocation. | ||
2395 | */ | ||
2298 | if (cfg->move_in_progress) | 2396 | if (cfg->move_in_progress) |
2299 | send_cleanup_vector(cfg); | 2397 | send_cleanup_vector(cfg); |
2300 | 2398 | ||
@@ -2407,48 +2505,6 @@ static void ack_apic_edge(struct irq_data *data) | |||
2407 | 2505 | ||
2408 | atomic_t irq_mis_count; | 2506 | atomic_t irq_mis_count; |
2409 | 2507 | ||
2410 | /* | ||
2411 | * IO-APIC versions below 0x20 don't support EOI register. | ||
2412 | * For the record, here is the information about various versions: | ||
2413 | * 0Xh 82489DX | ||
2414 | * 1Xh I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant | ||
2415 | * 2Xh I/O(x)APIC which is PCI 2.2 Compliant | ||
2416 | * 30h-FFh Reserved | ||
2417 | * | ||
2418 | * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic | ||
2419 | * version as 0x2. This is an error with documentation and these ICH chips | ||
2420 | * use io-apic's of version 0x20. | ||
2421 | * | ||
2422 | * For IO-APIC's with EOI register, we use that to do an explicit EOI. | ||
2423 | * Otherwise, we simulate the EOI message manually by changing the trigger | ||
2424 | * mode to edge and then back to level, with RTE being masked during this. | ||
2425 | */ | ||
2426 | static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) | ||
2427 | { | ||
2428 | struct irq_pin_list *entry; | ||
2429 | unsigned long flags; | ||
2430 | |||
2431 | raw_spin_lock_irqsave(&ioapic_lock, flags); | ||
2432 | for_each_irq_pin(entry, cfg->irq_2_pin) { | ||
2433 | if (mpc_ioapic_ver(entry->apic) >= 0x20) { | ||
2434 | /* | ||
2435 | * Intr-remapping uses pin number as the virtual vector | ||
2436 | * in the RTE. Actual vector is programmed in | ||
2437 | * intr-remapping table entry. Hence for the io-apic | ||
2438 | * EOI we use the pin number. | ||
2439 | */ | ||
2440 | if (irq_remapped(cfg)) | ||
2441 | io_apic_eoi(entry->apic, entry->pin); | ||
2442 | else | ||
2443 | io_apic_eoi(entry->apic, cfg->vector); | ||
2444 | } else { | ||
2445 | __mask_and_edge_IO_APIC_irq(entry); | ||
2446 | __unmask_and_level_IO_APIC_irq(entry); | ||
2447 | } | ||
2448 | } | ||
2449 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | ||
2450 | } | ||
2451 | |||
2452 | static void ack_apic_level(struct irq_data *data) | 2508 | static void ack_apic_level(struct irq_data *data) |
2453 | { | 2509 | { |
2454 | struct irq_cfg *cfg = data->chip_data; | 2510 | struct irq_cfg *cfg = data->chip_data; |
@@ -2552,7 +2608,7 @@ static void ack_apic_level(struct irq_data *data) | |||
2552 | } | 2608 | } |
2553 | } | 2609 | } |
2554 | 2610 | ||
2555 | #ifdef CONFIG_INTR_REMAP | 2611 | #ifdef CONFIG_IRQ_REMAP |
2556 | static void ir_ack_apic_edge(struct irq_data *data) | 2612 | static void ir_ack_apic_edge(struct irq_data *data) |
2557 | { | 2613 | { |
2558 | ack_APIC_irq(); | 2614 | ack_APIC_irq(); |
@@ -2563,7 +2619,23 @@ static void ir_ack_apic_level(struct irq_data *data) | |||
2563 | ack_APIC_irq(); | 2619 | ack_APIC_irq(); |
2564 | eoi_ioapic_irq(data->irq, data->chip_data); | 2620 | eoi_ioapic_irq(data->irq, data->chip_data); |
2565 | } | 2621 | } |
2566 | #endif /* CONFIG_INTR_REMAP */ | 2622 | |
2623 | static void ir_print_prefix(struct irq_data *data, struct seq_file *p) | ||
2624 | { | ||
2625 | seq_printf(p, " IR-%s", data->chip->name); | ||
2626 | } | ||
2627 | |||
2628 | static void irq_remap_modify_chip_defaults(struct irq_chip *chip) | ||
2629 | { | ||
2630 | chip->irq_print_chip = ir_print_prefix; | ||
2631 | chip->irq_ack = ir_ack_apic_edge; | ||
2632 | chip->irq_eoi = ir_ack_apic_level; | ||
2633 | |||
2634 | #ifdef CONFIG_SMP | ||
2635 | chip->irq_set_affinity = ir_ioapic_set_affinity; | ||
2636 | #endif | ||
2637 | } | ||
2638 | #endif /* CONFIG_IRQ_REMAP */ | ||
2567 | 2639 | ||
2568 | static struct irq_chip ioapic_chip __read_mostly = { | 2640 | static struct irq_chip ioapic_chip __read_mostly = { |
2569 | .name = "IO-APIC", | 2641 | .name = "IO-APIC", |
@@ -2578,21 +2650,6 @@ static struct irq_chip ioapic_chip __read_mostly = { | |||
2578 | .irq_retrigger = ioapic_retrigger_irq, | 2650 | .irq_retrigger = ioapic_retrigger_irq, |
2579 | }; | 2651 | }; |
2580 | 2652 | ||
2581 | static struct irq_chip ir_ioapic_chip __read_mostly = { | ||
2582 | .name = "IR-IO-APIC", | ||
2583 | .irq_startup = startup_ioapic_irq, | ||
2584 | .irq_mask = mask_ioapic_irq, | ||
2585 | .irq_unmask = unmask_ioapic_irq, | ||
2586 | #ifdef CONFIG_INTR_REMAP | ||
2587 | .irq_ack = ir_ack_apic_edge, | ||
2588 | .irq_eoi = ir_ack_apic_level, | ||
2589 | #ifdef CONFIG_SMP | ||
2590 | .irq_set_affinity = ir_ioapic_set_affinity, | ||
2591 | #endif | ||
2592 | #endif | ||
2593 | .irq_retrigger = ioapic_retrigger_irq, | ||
2594 | }; | ||
2595 | |||
2596 | static inline void init_IO_APIC_traps(void) | 2653 | static inline void init_IO_APIC_traps(void) |
2597 | { | 2654 | { |
2598 | struct irq_cfg *cfg; | 2655 | struct irq_cfg *cfg; |
@@ -3144,45 +3201,6 @@ msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) | |||
3144 | 3201 | ||
3145 | return 0; | 3202 | return 0; |
3146 | } | 3203 | } |
3147 | #ifdef CONFIG_INTR_REMAP | ||
3148 | /* | ||
3149 | * Migrate the MSI irq to another cpumask. This migration is | ||
3150 | * done in the process context using interrupt-remapping hardware. | ||
3151 | */ | ||
3152 | static int | ||
3153 | ir_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, | ||
3154 | bool force) | ||
3155 | { | ||
3156 | struct irq_cfg *cfg = data->chip_data; | ||
3157 | unsigned int dest, irq = data->irq; | ||
3158 | struct irte irte; | ||
3159 | |||
3160 | if (get_irte(irq, &irte)) | ||
3161 | return -1; | ||
3162 | |||
3163 | if (__ioapic_set_affinity(data, mask, &dest)) | ||
3164 | return -1; | ||
3165 | |||
3166 | irte.vector = cfg->vector; | ||
3167 | irte.dest_id = IRTE_DEST(dest); | ||
3168 | |||
3169 | /* | ||
3170 | * atomically update the IRTE with the new destination and vector. | ||
3171 | */ | ||
3172 | modify_irte(irq, &irte); | ||
3173 | |||
3174 | /* | ||
3175 | * After this point, all the interrupts will start arriving | ||
3176 | * at the new destination. So, time to cleanup the previous | ||
3177 | * vector allocation. | ||
3178 | */ | ||
3179 | if (cfg->move_in_progress) | ||
3180 | send_cleanup_vector(cfg); | ||
3181 | |||
3182 | return 0; | ||
3183 | } | ||
3184 | |||
3185 | #endif | ||
3186 | #endif /* CONFIG_SMP */ | 3204 | #endif /* CONFIG_SMP */ |
3187 | 3205 | ||
3188 | /* | 3206 | /* |
@@ -3200,19 +3218,6 @@ static struct irq_chip msi_chip = { | |||
3200 | .irq_retrigger = ioapic_retrigger_irq, | 3218 | .irq_retrigger = ioapic_retrigger_irq, |
3201 | }; | 3219 | }; |
3202 | 3220 | ||
3203 | static struct irq_chip msi_ir_chip = { | ||
3204 | .name = "IR-PCI-MSI", | ||
3205 | .irq_unmask = unmask_msi_irq, | ||
3206 | .irq_mask = mask_msi_irq, | ||
3207 | #ifdef CONFIG_INTR_REMAP | ||
3208 | .irq_ack = ir_ack_apic_edge, | ||
3209 | #ifdef CONFIG_SMP | ||
3210 | .irq_set_affinity = ir_msi_set_affinity, | ||
3211 | #endif | ||
3212 | #endif | ||
3213 | .irq_retrigger = ioapic_retrigger_irq, | ||
3214 | }; | ||
3215 | |||
3216 | /* | 3221 | /* |
3217 | * Map the PCI dev to the corresponding remapping hardware unit | 3222 | * Map the PCI dev to the corresponding remapping hardware unit |
3218 | * and allocate 'nvec' consecutive interrupt-remapping table entries | 3223 | * and allocate 'nvec' consecutive interrupt-remapping table entries |
@@ -3255,7 +3260,7 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) | |||
3255 | 3260 | ||
3256 | if (irq_remapped(irq_get_chip_data(irq))) { | 3261 | if (irq_remapped(irq_get_chip_data(irq))) { |
3257 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); | 3262 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
3258 | chip = &msi_ir_chip; | 3263 | irq_remap_modify_chip_defaults(chip); |
3259 | } | 3264 | } |
3260 | 3265 | ||
3261 | irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); | 3266 | irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); |
@@ -3328,7 +3333,7 @@ void native_teardown_msi_irq(unsigned int irq) | |||
3328 | destroy_irq(irq); | 3333 | destroy_irq(irq); |
3329 | } | 3334 | } |
3330 | 3335 | ||
3331 | #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP) | 3336 | #ifdef CONFIG_DMAR_TABLE |
3332 | #ifdef CONFIG_SMP | 3337 | #ifdef CONFIG_SMP |
3333 | static int | 3338 | static int |
3334 | dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, | 3339 | dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, |
@@ -3409,19 +3414,6 @@ static int hpet_msi_set_affinity(struct irq_data *data, | |||
3409 | 3414 | ||
3410 | #endif /* CONFIG_SMP */ | 3415 | #endif /* CONFIG_SMP */ |
3411 | 3416 | ||
3412 | static struct irq_chip ir_hpet_msi_type = { | ||
3413 | .name = "IR-HPET_MSI", | ||
3414 | .irq_unmask = hpet_msi_unmask, | ||
3415 | .irq_mask = hpet_msi_mask, | ||
3416 | #ifdef CONFIG_INTR_REMAP | ||
3417 | .irq_ack = ir_ack_apic_edge, | ||
3418 | #ifdef CONFIG_SMP | ||
3419 | .irq_set_affinity = ir_msi_set_affinity, | ||
3420 | #endif | ||
3421 | #endif | ||
3422 | .irq_retrigger = ioapic_retrigger_irq, | ||
3423 | }; | ||
3424 | |||
3425 | static struct irq_chip hpet_msi_type = { | 3417 | static struct irq_chip hpet_msi_type = { |
3426 | .name = "HPET_MSI", | 3418 | .name = "HPET_MSI", |
3427 | .irq_unmask = hpet_msi_unmask, | 3419 | .irq_unmask = hpet_msi_unmask, |
@@ -3458,7 +3450,7 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id) | |||
3458 | hpet_msi_write(irq_get_handler_data(irq), &msg); | 3450 | hpet_msi_write(irq_get_handler_data(irq), &msg); |
3459 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); | 3451 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
3460 | if (irq_remapped(irq_get_chip_data(irq))) | 3452 | if (irq_remapped(irq_get_chip_data(irq))) |
3461 | chip = &ir_hpet_msi_type; | 3453 | irq_remap_modify_chip_defaults(chip); |
3462 | 3454 | ||
3463 | irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); | 3455 | irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); |
3464 | return 0; | 3456 | return 0; |
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 85151019dde1..2774ac1086d3 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c | |||
@@ -30,10 +30,10 @@ | |||
30 | /* | 30 | /* |
31 | * If we have Intel graphics, we're not going to have anything other than | 31 | * If we have Intel graphics, we're not going to have anything other than |
32 | * an Intel IOMMU. So make the correct use of the PCI DMA API contingent | 32 | * an Intel IOMMU. So make the correct use of the PCI DMA API contingent |
33 | * on the Intel IOMMU support (CONFIG_DMAR). | 33 | * on the Intel IOMMU support (CONFIG_INTEL_IOMMU). |
34 | * Only newer chipsets need to bother with this, of course. | 34 | * Only newer chipsets need to bother with this, of course. |
35 | */ | 35 | */ |
36 | #ifdef CONFIG_DMAR | 36 | #ifdef CONFIG_INTEL_IOMMU |
37 | #define USE_PCI_DMA_API 1 | 37 | #define USE_PCI_DMA_API 1 |
38 | #else | 38 | #else |
39 | #define USE_PCI_DMA_API 0 | 39 | #define USE_PCI_DMA_API 0 |
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index b57b3fa492f3..7d7eaa15e773 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -59,10 +59,14 @@ config AMD_IOMMU_STATS | |||
59 | If unsure, say N. | 59 | If unsure, say N. |
60 | 60 | ||
61 | # Intel IOMMU support | 61 | # Intel IOMMU support |
62 | config DMAR | 62 | config DMAR_TABLE |
63 | bool "Support for DMA Remapping Devices" | 63 | bool |
64 | |||
65 | config INTEL_IOMMU | ||
66 | bool "Support for Intel IOMMU using DMA Remapping Devices" | ||
64 | depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC) | 67 | depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC) |
65 | select IOMMU_API | 68 | select IOMMU_API |
69 | select DMAR_TABLE | ||
66 | help | 70 | help |
67 | DMA remapping (DMAR) devices support enables independent address | 71 | DMA remapping (DMAR) devices support enables independent address |
68 | translations for Direct Memory Access (DMA) from devices. | 72 | translations for Direct Memory Access (DMA) from devices. |
@@ -70,18 +74,18 @@ config DMAR | |||
70 | and include PCI device scope covered by these DMA | 74 | and include PCI device scope covered by these DMA |
71 | remapping devices. | 75 | remapping devices. |
72 | 76 | ||
73 | config DMAR_DEFAULT_ON | 77 | config INTEL_IOMMU_DEFAULT_ON |
74 | def_bool y | 78 | def_bool y |
75 | prompt "Enable DMA Remapping Devices by default" | 79 | prompt "Enable Intel DMA Remapping Devices by default" |
76 | depends on DMAR | 80 | depends on INTEL_IOMMU |
77 | help | 81 | help |
78 | Selecting this option will enable a DMAR device at boot time if | 82 | Selecting this option will enable a DMAR device at boot time if |
79 | one is found. If this option is not selected, DMAR support can | 83 | one is found. If this option is not selected, DMAR support can |
80 | be enabled by passing intel_iommu=on to the kernel. | 84 | be enabled by passing intel_iommu=on to the kernel. |
81 | 85 | ||
82 | config DMAR_BROKEN_GFX_WA | 86 | config INTEL_IOMMU_BROKEN_GFX_WA |
83 | bool "Workaround broken graphics drivers (going away soon)" | 87 | bool "Workaround broken graphics drivers (going away soon)" |
84 | depends on DMAR && BROKEN && X86 | 88 | depends on INTEL_IOMMU && BROKEN && X86 |
85 | ---help--- | 89 | ---help--- |
86 | Current Graphics drivers tend to use physical address | 90 | Current Graphics drivers tend to use physical address |
87 | for DMA and avoid using DMA APIs. Setting this config | 91 | for DMA and avoid using DMA APIs. Setting this config |
@@ -90,18 +94,19 @@ config DMAR_BROKEN_GFX_WA | |||
90 | to use physical addresses for DMA, at least until this | 94 | to use physical addresses for DMA, at least until this |
91 | option is removed in the 2.6.32 kernel. | 95 | option is removed in the 2.6.32 kernel. |
92 | 96 | ||
93 | config DMAR_FLOPPY_WA | 97 | config INTEL_IOMMU_FLOPPY_WA |
94 | def_bool y | 98 | def_bool y |
95 | depends on DMAR && X86 | 99 | depends on INTEL_IOMMU && X86 |
96 | ---help--- | 100 | ---help--- |
97 | Floppy disk drivers are known to bypass DMA API calls | 101 | Floppy disk drivers are known to bypass DMA API calls |
98 | thereby failing to work when IOMMU is enabled. This | 102 | thereby failing to work when IOMMU is enabled. This |
99 | workaround will setup a 1:1 mapping for the first | 103 | workaround will setup a 1:1 mapping for the first |
100 | 16MiB to make floppy (an ISA device) work. | 104 | 16MiB to make floppy (an ISA device) work. |
101 | 105 | ||
102 | config INTR_REMAP | 106 | config IRQ_REMAP |
103 | bool "Support for Interrupt Remapping (EXPERIMENTAL)" | 107 | bool "Support for Interrupt Remapping (EXPERIMENTAL)" |
104 | depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL | 108 | depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL |
109 | select DMAR_TABLE | ||
105 | ---help--- | 110 | ---help--- |
106 | Supports Interrupt remapping for IO-APIC and MSI devices. | 111 | Supports Interrupt remapping for IO-APIC and MSI devices. |
107 | To use x2apic mode in the CPU's which support x2APIC enhancements or | 112 | To use x2apic mode in the CPU's which support x2APIC enhancements or |
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 4d4d77df7cac..6394994a2b9d 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile | |||
@@ -1,5 +1,6 @@ | |||
1 | obj-$(CONFIG_IOMMU_API) += iommu.o | 1 | obj-$(CONFIG_IOMMU_API) += iommu.o |
2 | obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o | 2 | obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o |
3 | obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o | 3 | obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o |
4 | obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o | 4 | obj-$(CONFIG_DMAR_TABLE) += dmar.o |
5 | obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o | 5 | obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o |
6 | obj-$(CONFIG_IRQ_REMAP) += intr_remapping.o | ||
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 6dcc7e2d54de..587e8f2d38d8 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c | |||
@@ -46,7 +46,7 @@ | |||
46 | */ | 46 | */ |
47 | LIST_HEAD(dmar_drhd_units); | 47 | LIST_HEAD(dmar_drhd_units); |
48 | 48 | ||
49 | static struct acpi_table_header * __initdata dmar_tbl; | 49 | struct acpi_table_header * __initdata dmar_tbl; |
50 | static acpi_size dmar_tbl_size; | 50 | static acpi_size dmar_tbl_size; |
51 | 51 | ||
52 | static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) | 52 | static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) |
@@ -118,8 +118,8 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, | |||
118 | return 0; | 118 | return 0; |
119 | } | 119 | } |
120 | 120 | ||
121 | static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt, | 121 | int __init dmar_parse_dev_scope(void *start, void *end, int *cnt, |
122 | struct pci_dev ***devices, u16 segment) | 122 | struct pci_dev ***devices, u16 segment) |
123 | { | 123 | { |
124 | struct acpi_dmar_device_scope *scope; | 124 | struct acpi_dmar_device_scope *scope; |
125 | void * tmp = start; | 125 | void * tmp = start; |
@@ -217,133 +217,6 @@ static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) | |||
217 | return ret; | 217 | return ret; |
218 | } | 218 | } |
219 | 219 | ||
220 | #ifdef CONFIG_DMAR | ||
221 | LIST_HEAD(dmar_rmrr_units); | ||
222 | |||
223 | static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr) | ||
224 | { | ||
225 | list_add(&rmrr->list, &dmar_rmrr_units); | ||
226 | } | ||
227 | |||
228 | |||
229 | static int __init | ||
230 | dmar_parse_one_rmrr(struct acpi_dmar_header *header) | ||
231 | { | ||
232 | struct acpi_dmar_reserved_memory *rmrr; | ||
233 | struct dmar_rmrr_unit *rmrru; | ||
234 | |||
235 | rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); | ||
236 | if (!rmrru) | ||
237 | return -ENOMEM; | ||
238 | |||
239 | rmrru->hdr = header; | ||
240 | rmrr = (struct acpi_dmar_reserved_memory *)header; | ||
241 | rmrru->base_address = rmrr->base_address; | ||
242 | rmrru->end_address = rmrr->end_address; | ||
243 | |||
244 | dmar_register_rmrr_unit(rmrru); | ||
245 | return 0; | ||
246 | } | ||
247 | |||
248 | static int __init | ||
249 | rmrr_parse_dev(struct dmar_rmrr_unit *rmrru) | ||
250 | { | ||
251 | struct acpi_dmar_reserved_memory *rmrr; | ||
252 | int ret; | ||
253 | |||
254 | rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr; | ||
255 | ret = dmar_parse_dev_scope((void *)(rmrr + 1), | ||
256 | ((void *)rmrr) + rmrr->header.length, | ||
257 | &rmrru->devices_cnt, &rmrru->devices, rmrr->segment); | ||
258 | |||
259 | if (ret || (rmrru->devices_cnt == 0)) { | ||
260 | list_del(&rmrru->list); | ||
261 | kfree(rmrru); | ||
262 | } | ||
263 | return ret; | ||
264 | } | ||
265 | |||
266 | static LIST_HEAD(dmar_atsr_units); | ||
267 | |||
268 | static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr) | ||
269 | { | ||
270 | struct acpi_dmar_atsr *atsr; | ||
271 | struct dmar_atsr_unit *atsru; | ||
272 | |||
273 | atsr = container_of(hdr, struct acpi_dmar_atsr, header); | ||
274 | atsru = kzalloc(sizeof(*atsru), GFP_KERNEL); | ||
275 | if (!atsru) | ||
276 | return -ENOMEM; | ||
277 | |||
278 | atsru->hdr = hdr; | ||
279 | atsru->include_all = atsr->flags & 0x1; | ||
280 | |||
281 | list_add(&atsru->list, &dmar_atsr_units); | ||
282 | |||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru) | ||
287 | { | ||
288 | int rc; | ||
289 | struct acpi_dmar_atsr *atsr; | ||
290 | |||
291 | if (atsru->include_all) | ||
292 | return 0; | ||
293 | |||
294 | atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); | ||
295 | rc = dmar_parse_dev_scope((void *)(atsr + 1), | ||
296 | (void *)atsr + atsr->header.length, | ||
297 | &atsru->devices_cnt, &atsru->devices, | ||
298 | atsr->segment); | ||
299 | if (rc || !atsru->devices_cnt) { | ||
300 | list_del(&atsru->list); | ||
301 | kfree(atsru); | ||
302 | } | ||
303 | |||
304 | return rc; | ||
305 | } | ||
306 | |||
307 | int dmar_find_matched_atsr_unit(struct pci_dev *dev) | ||
308 | { | ||
309 | int i; | ||
310 | struct pci_bus *bus; | ||
311 | struct acpi_dmar_atsr *atsr; | ||
312 | struct dmar_atsr_unit *atsru; | ||
313 | |||
314 | dev = pci_physfn(dev); | ||
315 | |||
316 | list_for_each_entry(atsru, &dmar_atsr_units, list) { | ||
317 | atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); | ||
318 | if (atsr->segment == pci_domain_nr(dev->bus)) | ||
319 | goto found; | ||
320 | } | ||
321 | |||
322 | return 0; | ||
323 | |||
324 | found: | ||
325 | for (bus = dev->bus; bus; bus = bus->parent) { | ||
326 | struct pci_dev *bridge = bus->self; | ||
327 | |||
328 | if (!bridge || !pci_is_pcie(bridge) || | ||
329 | bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) | ||
330 | return 0; | ||
331 | |||
332 | if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) { | ||
333 | for (i = 0; i < atsru->devices_cnt; i++) | ||
334 | if (atsru->devices[i] == bridge) | ||
335 | return 1; | ||
336 | break; | ||
337 | } | ||
338 | } | ||
339 | |||
340 | if (atsru->include_all) | ||
341 | return 1; | ||
342 | |||
343 | return 0; | ||
344 | } | ||
345 | #endif | ||
346 | |||
347 | #ifdef CONFIG_ACPI_NUMA | 220 | #ifdef CONFIG_ACPI_NUMA |
348 | static int __init | 221 | static int __init |
349 | dmar_parse_one_rhsa(struct acpi_dmar_header *header) | 222 | dmar_parse_one_rhsa(struct acpi_dmar_header *header) |
@@ -484,14 +357,10 @@ parse_dmar_table(void) | |||
484 | ret = dmar_parse_one_drhd(entry_header); | 357 | ret = dmar_parse_one_drhd(entry_header); |
485 | break; | 358 | break; |
486 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: | 359 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: |
487 | #ifdef CONFIG_DMAR | ||
488 | ret = dmar_parse_one_rmrr(entry_header); | 360 | ret = dmar_parse_one_rmrr(entry_header); |
489 | #endif | ||
490 | break; | 361 | break; |
491 | case ACPI_DMAR_TYPE_ATSR: | 362 | case ACPI_DMAR_TYPE_ATSR: |
492 | #ifdef CONFIG_DMAR | ||
493 | ret = dmar_parse_one_atsr(entry_header); | 363 | ret = dmar_parse_one_atsr(entry_header); |
494 | #endif | ||
495 | break; | 364 | break; |
496 | case ACPI_DMAR_HARDWARE_AFFINITY: | 365 | case ACPI_DMAR_HARDWARE_AFFINITY: |
497 | #ifdef CONFIG_ACPI_NUMA | 366 | #ifdef CONFIG_ACPI_NUMA |
@@ -557,34 +426,31 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev) | |||
557 | 426 | ||
558 | int __init dmar_dev_scope_init(void) | 427 | int __init dmar_dev_scope_init(void) |
559 | { | 428 | { |
429 | static int dmar_dev_scope_initialized; | ||
560 | struct dmar_drhd_unit *drhd, *drhd_n; | 430 | struct dmar_drhd_unit *drhd, *drhd_n; |
561 | int ret = -ENODEV; | 431 | int ret = -ENODEV; |
562 | 432 | ||
433 | if (dmar_dev_scope_initialized) | ||
434 | return dmar_dev_scope_initialized; | ||
435 | |||
436 | if (list_empty(&dmar_drhd_units)) | ||
437 | goto fail; | ||
438 | |||
563 | list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) { | 439 | list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) { |
564 | ret = dmar_parse_dev(drhd); | 440 | ret = dmar_parse_dev(drhd); |
565 | if (ret) | 441 | if (ret) |
566 | return ret; | 442 | goto fail; |
567 | } | 443 | } |
568 | 444 | ||
569 | #ifdef CONFIG_DMAR | 445 | ret = dmar_parse_rmrr_atsr_dev(); |
570 | { | 446 | if (ret) |
571 | struct dmar_rmrr_unit *rmrr, *rmrr_n; | 447 | goto fail; |
572 | struct dmar_atsr_unit *atsr, *atsr_n; | ||
573 | |||
574 | list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) { | ||
575 | ret = rmrr_parse_dev(rmrr); | ||
576 | if (ret) | ||
577 | return ret; | ||
578 | } | ||
579 | 448 | ||
580 | list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) { | 449 | dmar_dev_scope_initialized = 1; |
581 | ret = atsr_parse_dev(atsr); | 450 | return 0; |
582 | if (ret) | ||
583 | return ret; | ||
584 | } | ||
585 | } | ||
586 | #endif | ||
587 | 451 | ||
452 | fail: | ||
453 | dmar_dev_scope_initialized = ret; | ||
588 | return ret; | 454 | return ret; |
589 | } | 455 | } |
590 | 456 | ||
@@ -611,14 +477,6 @@ int __init dmar_table_init(void) | |||
611 | return -ENODEV; | 477 | return -ENODEV; |
612 | } | 478 | } |
613 | 479 | ||
614 | #ifdef CONFIG_DMAR | ||
615 | if (list_empty(&dmar_rmrr_units)) | ||
616 | printk(KERN_INFO PREFIX "No RMRR found\n"); | ||
617 | |||
618 | if (list_empty(&dmar_atsr_units)) | ||
619 | printk(KERN_INFO PREFIX "No ATSR found\n"); | ||
620 | #endif | ||
621 | |||
622 | return 0; | 480 | return 0; |
623 | } | 481 | } |
624 | 482 | ||
@@ -682,9 +540,6 @@ int __init check_zero_address(void) | |||
682 | return 1; | 540 | return 1; |
683 | 541 | ||
684 | failed: | 542 | failed: |
685 | #ifdef CONFIG_DMAR | ||
686 | dmar_disabled = 1; | ||
687 | #endif | ||
688 | return 0; | 543 | return 0; |
689 | } | 544 | } |
690 | 545 | ||
@@ -696,22 +551,21 @@ int __init detect_intel_iommu(void) | |||
696 | if (ret) | 551 | if (ret) |
697 | ret = check_zero_address(); | 552 | ret = check_zero_address(); |
698 | { | 553 | { |
699 | #ifdef CONFIG_INTR_REMAP | ||
700 | struct acpi_table_dmar *dmar; | 554 | struct acpi_table_dmar *dmar; |
701 | 555 | ||
702 | dmar = (struct acpi_table_dmar *) dmar_tbl; | 556 | dmar = (struct acpi_table_dmar *) dmar_tbl; |
703 | if (ret && cpu_has_x2apic && dmar->flags & 0x1) | 557 | |
558 | if (ret && intr_remapping_enabled && cpu_has_x2apic && | ||
559 | dmar->flags & 0x1) | ||
704 | printk(KERN_INFO | 560 | printk(KERN_INFO |
705 | "Queued invalidation will be enabled to support " | 561 | "Queued invalidation will be enabled to support x2apic and Intr-remapping.\n"); |
706 | "x2apic and Intr-remapping.\n"); | 562 | |
707 | #endif | ||
708 | #ifdef CONFIG_DMAR | ||
709 | if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { | 563 | if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { |
710 | iommu_detected = 1; | 564 | iommu_detected = 1; |
711 | /* Make sure ACS will be enabled */ | 565 | /* Make sure ACS will be enabled */ |
712 | pci_request_acs(); | 566 | pci_request_acs(); |
713 | } | 567 | } |
714 | #endif | 568 | |
715 | #ifdef CONFIG_X86 | 569 | #ifdef CONFIG_X86 |
716 | if (ret) | 570 | if (ret) |
717 | x86_init.iommu.iommu_init = intel_iommu_init; | 571 | x86_init.iommu.iommu_init = intel_iommu_init; |
@@ -758,7 +612,6 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
758 | goto err_unmap; | 612 | goto err_unmap; |
759 | } | 613 | } |
760 | 614 | ||
761 | #ifdef CONFIG_DMAR | ||
762 | agaw = iommu_calculate_agaw(iommu); | 615 | agaw = iommu_calculate_agaw(iommu); |
763 | if (agaw < 0) { | 616 | if (agaw < 0) { |
764 | printk(KERN_ERR | 617 | printk(KERN_ERR |
@@ -773,7 +626,6 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
773 | iommu->seq_id); | 626 | iommu->seq_id); |
774 | goto err_unmap; | 627 | goto err_unmap; |
775 | } | 628 | } |
776 | #endif | ||
777 | iommu->agaw = agaw; | 629 | iommu->agaw = agaw; |
778 | iommu->msagaw = msagaw; | 630 | iommu->msagaw = msagaw; |
779 | 631 | ||
@@ -817,9 +669,7 @@ void free_iommu(struct intel_iommu *iommu) | |||
817 | if (!iommu) | 669 | if (!iommu) |
818 | return; | 670 | return; |
819 | 671 | ||
820 | #ifdef CONFIG_DMAR | ||
821 | free_dmar_iommu(iommu); | 672 | free_dmar_iommu(iommu); |
822 | #endif | ||
823 | 673 | ||
824 | if (iommu->reg) | 674 | if (iommu->reg) |
825 | iounmap(iommu->reg); | 675 | iounmap(iommu->reg); |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index a88f3cbb100b..f28d933c7927 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -398,11 +398,11 @@ static long list_size; | |||
398 | 398 | ||
399 | static void domain_remove_dev_info(struct dmar_domain *domain); | 399 | static void domain_remove_dev_info(struct dmar_domain *domain); |
400 | 400 | ||
401 | #ifdef CONFIG_DMAR_DEFAULT_ON | 401 | #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON |
402 | int dmar_disabled = 0; | 402 | int dmar_disabled = 0; |
403 | #else | 403 | #else |
404 | int dmar_disabled = 1; | 404 | int dmar_disabled = 1; |
405 | #endif /*CONFIG_DMAR_DEFAULT_ON*/ | 405 | #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/ |
406 | 406 | ||
407 | static int dmar_map_gfx = 1; | 407 | static int dmar_map_gfx = 1; |
408 | static int dmar_forcedac; | 408 | static int dmar_forcedac; |
@@ -2157,7 +2157,7 @@ static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr, | |||
2157 | rmrr->end_address); | 2157 | rmrr->end_address); |
2158 | } | 2158 | } |
2159 | 2159 | ||
2160 | #ifdef CONFIG_DMAR_FLOPPY_WA | 2160 | #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA |
2161 | static inline void iommu_prepare_isa(void) | 2161 | static inline void iommu_prepare_isa(void) |
2162 | { | 2162 | { |
2163 | struct pci_dev *pdev; | 2163 | struct pci_dev *pdev; |
@@ -2180,7 +2180,7 @@ static inline void iommu_prepare_isa(void) | |||
2180 | { | 2180 | { |
2181 | return; | 2181 | return; |
2182 | } | 2182 | } |
2183 | #endif /* !CONFIG_DMAR_FLPY_WA */ | 2183 | #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */ |
2184 | 2184 | ||
2185 | static int md_domain_init(struct dmar_domain *domain, int guest_width); | 2185 | static int md_domain_init(struct dmar_domain *domain, int guest_width); |
2186 | 2186 | ||
@@ -2491,7 +2491,7 @@ static int __init init_dmars(void) | |||
2491 | if (iommu_pass_through) | 2491 | if (iommu_pass_through) |
2492 | iommu_identity_mapping |= IDENTMAP_ALL; | 2492 | iommu_identity_mapping |= IDENTMAP_ALL; |
2493 | 2493 | ||
2494 | #ifdef CONFIG_DMAR_BROKEN_GFX_WA | 2494 | #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA |
2495 | iommu_identity_mapping |= IDENTMAP_GFX; | 2495 | iommu_identity_mapping |= IDENTMAP_GFX; |
2496 | #endif | 2496 | #endif |
2497 | 2497 | ||
@@ -3399,6 +3399,151 @@ static void __init init_iommu_pm_ops(void) | |||
3399 | static inline void init_iommu_pm_ops(void) {} | 3399 | static inline void init_iommu_pm_ops(void) {} |
3400 | #endif /* CONFIG_PM */ | 3400 | #endif /* CONFIG_PM */ |
3401 | 3401 | ||
3402 | LIST_HEAD(dmar_rmrr_units); | ||
3403 | |||
3404 | static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr) | ||
3405 | { | ||
3406 | list_add(&rmrr->list, &dmar_rmrr_units); | ||
3407 | } | ||
3408 | |||
3409 | |||
3410 | int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header) | ||
3411 | { | ||
3412 | struct acpi_dmar_reserved_memory *rmrr; | ||
3413 | struct dmar_rmrr_unit *rmrru; | ||
3414 | |||
3415 | rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); | ||
3416 | if (!rmrru) | ||
3417 | return -ENOMEM; | ||
3418 | |||
3419 | rmrru->hdr = header; | ||
3420 | rmrr = (struct acpi_dmar_reserved_memory *)header; | ||
3421 | rmrru->base_address = rmrr->base_address; | ||
3422 | rmrru->end_address = rmrr->end_address; | ||
3423 | |||
3424 | dmar_register_rmrr_unit(rmrru); | ||
3425 | return 0; | ||
3426 | } | ||
3427 | |||
3428 | static int __init | ||
3429 | rmrr_parse_dev(struct dmar_rmrr_unit *rmrru) | ||
3430 | { | ||
3431 | struct acpi_dmar_reserved_memory *rmrr; | ||
3432 | int ret; | ||
3433 | |||
3434 | rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr; | ||
3435 | ret = dmar_parse_dev_scope((void *)(rmrr + 1), | ||
3436 | ((void *)rmrr) + rmrr->header.length, | ||
3437 | &rmrru->devices_cnt, &rmrru->devices, rmrr->segment); | ||
3438 | |||
3439 | if (ret || (rmrru->devices_cnt == 0)) { | ||
3440 | list_del(&rmrru->list); | ||
3441 | kfree(rmrru); | ||
3442 | } | ||
3443 | return ret; | ||
3444 | } | ||
3445 | |||
3446 | static LIST_HEAD(dmar_atsr_units); | ||
3447 | |||
3448 | int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr) | ||
3449 | { | ||
3450 | struct acpi_dmar_atsr *atsr; | ||
3451 | struct dmar_atsr_unit *atsru; | ||
3452 | |||
3453 | atsr = container_of(hdr, struct acpi_dmar_atsr, header); | ||
3454 | atsru = kzalloc(sizeof(*atsru), GFP_KERNEL); | ||
3455 | if (!atsru) | ||
3456 | return -ENOMEM; | ||
3457 | |||
3458 | atsru->hdr = hdr; | ||
3459 | atsru->include_all = atsr->flags & 0x1; | ||
3460 | |||
3461 | list_add(&atsru->list, &dmar_atsr_units); | ||
3462 | |||
3463 | return 0; | ||
3464 | } | ||
3465 | |||
3466 | static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru) | ||
3467 | { | ||
3468 | int rc; | ||
3469 | struct acpi_dmar_atsr *atsr; | ||
3470 | |||
3471 | if (atsru->include_all) | ||
3472 | return 0; | ||
3473 | |||
3474 | atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); | ||
3475 | rc = dmar_parse_dev_scope((void *)(atsr + 1), | ||
3476 | (void *)atsr + atsr->header.length, | ||
3477 | &atsru->devices_cnt, &atsru->devices, | ||
3478 | atsr->segment); | ||
3479 | if (rc || !atsru->devices_cnt) { | ||
3480 | list_del(&atsru->list); | ||
3481 | kfree(atsru); | ||
3482 | } | ||
3483 | |||
3484 | return rc; | ||
3485 | } | ||
3486 | |||
3487 | int dmar_find_matched_atsr_unit(struct pci_dev *dev) | ||
3488 | { | ||
3489 | int i; | ||
3490 | struct pci_bus *bus; | ||
3491 | struct acpi_dmar_atsr *atsr; | ||
3492 | struct dmar_atsr_unit *atsru; | ||
3493 | |||
3494 | dev = pci_physfn(dev); | ||
3495 | |||
3496 | list_for_each_entry(atsru, &dmar_atsr_units, list) { | ||
3497 | atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); | ||
3498 | if (atsr->segment == pci_domain_nr(dev->bus)) | ||
3499 | goto found; | ||
3500 | } | ||
3501 | |||
3502 | return 0; | ||
3503 | |||
3504 | found: | ||
3505 | for (bus = dev->bus; bus; bus = bus->parent) { | ||
3506 | struct pci_dev *bridge = bus->self; | ||
3507 | |||
3508 | if (!bridge || !pci_is_pcie(bridge) || | ||
3509 | bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) | ||
3510 | return 0; | ||
3511 | |||
3512 | if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) { | ||
3513 | for (i = 0; i < atsru->devices_cnt; i++) | ||
3514 | if (atsru->devices[i] == bridge) | ||
3515 | return 1; | ||
3516 | break; | ||
3517 | } | ||
3518 | } | ||
3519 | |||
3520 | if (atsru->include_all) | ||
3521 | return 1; | ||
3522 | |||
3523 | return 0; | ||
3524 | } | ||
3525 | |||
3526 | int dmar_parse_rmrr_atsr_dev(void) | ||
3527 | { | ||
3528 | struct dmar_rmrr_unit *rmrr, *rmrr_n; | ||
3529 | struct dmar_atsr_unit *atsr, *atsr_n; | ||
3530 | int ret = 0; | ||
3531 | |||
3532 | list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) { | ||
3533 | ret = rmrr_parse_dev(rmrr); | ||
3534 | if (ret) | ||
3535 | return ret; | ||
3536 | } | ||
3537 | |||
3538 | list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) { | ||
3539 | ret = atsr_parse_dev(atsr); | ||
3540 | if (ret) | ||
3541 | return ret; | ||
3542 | } | ||
3543 | |||
3544 | return ret; | ||
3545 | } | ||
3546 | |||
3402 | /* | 3547 | /* |
3403 | * Here we only respond to action of unbound device from driver. | 3548 | * Here we only respond to action of unbound device from driver. |
3404 | * | 3549 | * |
@@ -3448,16 +3593,12 @@ int __init intel_iommu_init(void) | |||
3448 | return -ENODEV; | 3593 | return -ENODEV; |
3449 | } | 3594 | } |
3450 | 3595 | ||
3451 | if (dmar_dev_scope_init()) { | 3596 | if (dmar_dev_scope_init() < 0) { |
3452 | if (force_on) | 3597 | if (force_on) |
3453 | panic("tboot: Failed to initialize DMAR device scope\n"); | 3598 | panic("tboot: Failed to initialize DMAR device scope\n"); |
3454 | return -ENODEV; | 3599 | return -ENODEV; |
3455 | } | 3600 | } |
3456 | 3601 | ||
3457 | /* | ||
3458 | * Check the need for DMA-remapping initialization now. | ||
3459 | * Above initialization will also be used by Interrupt-remapping. | ||
3460 | */ | ||
3461 | if (no_iommu || dmar_disabled) | 3602 | if (no_iommu || dmar_disabled) |
3462 | return -ENODEV; | 3603 | return -ENODEV; |
3463 | 3604 | ||
@@ -3467,6 +3608,12 @@ int __init intel_iommu_init(void) | |||
3467 | return -ENODEV; | 3608 | return -ENODEV; |
3468 | } | 3609 | } |
3469 | 3610 | ||
3611 | if (list_empty(&dmar_rmrr_units)) | ||
3612 | printk(KERN_INFO "DMAR: No RMRR found\n"); | ||
3613 | |||
3614 | if (list_empty(&dmar_atsr_units)) | ||
3615 | printk(KERN_INFO "DMAR: No ATSR found\n"); | ||
3616 | |||
3470 | if (dmar_init_reserved_ranges()) { | 3617 | if (dmar_init_reserved_ranges()) { |
3471 | if (force_on) | 3618 | if (force_on) |
3472 | panic("tboot: Failed to reserve iommu ranges\n"); | 3619 | panic("tboot: Failed to reserve iommu ranges\n"); |
diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c index 1a89d4a2cadf..cfb0dd4bf0b6 100644 --- a/drivers/iommu/intr_remapping.c +++ b/drivers/iommu/intr_remapping.c | |||
@@ -21,6 +21,7 @@ int intr_remapping_enabled; | |||
21 | 21 | ||
22 | static int disable_intremap; | 22 | static int disable_intremap; |
23 | static int disable_sourceid_checking; | 23 | static int disable_sourceid_checking; |
24 | static int no_x2apic_optout; | ||
24 | 25 | ||
25 | static __init int setup_nointremap(char *str) | 26 | static __init int setup_nointremap(char *str) |
26 | { | 27 | { |
@@ -34,12 +35,20 @@ static __init int setup_intremap(char *str) | |||
34 | if (!str) | 35 | if (!str) |
35 | return -EINVAL; | 36 | return -EINVAL; |
36 | 37 | ||
37 | if (!strncmp(str, "on", 2)) | 38 | while (*str) { |
38 | disable_intremap = 0; | 39 | if (!strncmp(str, "on", 2)) |
39 | else if (!strncmp(str, "off", 3)) | 40 | disable_intremap = 0; |
40 | disable_intremap = 1; | 41 | else if (!strncmp(str, "off", 3)) |
41 | else if (!strncmp(str, "nosid", 5)) | 42 | disable_intremap = 1; |
42 | disable_sourceid_checking = 1; | 43 | else if (!strncmp(str, "nosid", 5)) |
44 | disable_sourceid_checking = 1; | ||
45 | else if (!strncmp(str, "no_x2apic_optout", 16)) | ||
46 | no_x2apic_optout = 1; | ||
47 | |||
48 | str += strcspn(str, ","); | ||
49 | while (*str == ',') | ||
50 | str++; | ||
51 | } | ||
43 | 52 | ||
44 | return 0; | 53 | return 0; |
45 | } | 54 | } |
@@ -501,6 +510,15 @@ end: | |||
501 | spin_unlock_irqrestore(&iommu->register_lock, flags); | 510 | spin_unlock_irqrestore(&iommu->register_lock, flags); |
502 | } | 511 | } |
503 | 512 | ||
513 | static int __init dmar_x2apic_optout(void) | ||
514 | { | ||
515 | struct acpi_table_dmar *dmar; | ||
516 | dmar = (struct acpi_table_dmar *)dmar_tbl; | ||
517 | if (!dmar || no_x2apic_optout) | ||
518 | return 0; | ||
519 | return dmar->flags & DMAR_X2APIC_OPT_OUT; | ||
520 | } | ||
521 | |||
504 | int __init intr_remapping_supported(void) | 522 | int __init intr_remapping_supported(void) |
505 | { | 523 | { |
506 | struct dmar_drhd_unit *drhd; | 524 | struct dmar_drhd_unit *drhd; |
@@ -521,16 +539,25 @@ int __init intr_remapping_supported(void) | |||
521 | return 1; | 539 | return 1; |
522 | } | 540 | } |
523 | 541 | ||
524 | int __init enable_intr_remapping(int eim) | 542 | int __init enable_intr_remapping(void) |
525 | { | 543 | { |
526 | struct dmar_drhd_unit *drhd; | 544 | struct dmar_drhd_unit *drhd; |
527 | int setup = 0; | 545 | int setup = 0; |
546 | int eim = 0; | ||
528 | 547 | ||
529 | if (parse_ioapics_under_ir() != 1) { | 548 | if (parse_ioapics_under_ir() != 1) { |
530 | printk(KERN_INFO "Not enable interrupt remapping\n"); | 549 | printk(KERN_INFO "Not enable interrupt remapping\n"); |
531 | return -1; | 550 | return -1; |
532 | } | 551 | } |
533 | 552 | ||
553 | if (x2apic_supported()) { | ||
554 | eim = !dmar_x2apic_optout(); | ||
555 | WARN(!eim, KERN_WARNING | ||
556 | "Your BIOS is broken and requested that x2apic be disabled\n" | ||
557 | "This will leave your machine vulnerable to irq-injection attacks\n" | ||
558 | "Use 'intremap=no_x2apic_optout' to override BIOS request\n"); | ||
559 | } | ||
560 | |||
534 | for_each_drhd_unit(drhd) { | 561 | for_each_drhd_unit(drhd) { |
535 | struct intel_iommu *iommu = drhd->iommu; | 562 | struct intel_iommu *iommu = drhd->iommu; |
536 | 563 | ||
@@ -606,8 +633,9 @@ int __init enable_intr_remapping(int eim) | |||
606 | goto error; | 633 | goto error; |
607 | 634 | ||
608 | intr_remapping_enabled = 1; | 635 | intr_remapping_enabled = 1; |
636 | pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic"); | ||
609 | 637 | ||
610 | return 0; | 638 | return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE; |
611 | 639 | ||
612 | error: | 640 | error: |
613 | /* | 641 | /* |
@@ -745,6 +773,15 @@ int __init parse_ioapics_under_ir(void) | |||
745 | return ir_supported; | 773 | return ir_supported; |
746 | } | 774 | } |
747 | 775 | ||
776 | int ir_dev_scope_init(void) | ||
777 | { | ||
778 | if (!intr_remapping_enabled) | ||
779 | return 0; | ||
780 | |||
781 | return dmar_dev_scope_init(); | ||
782 | } | ||
783 | rootfs_initcall(ir_dev_scope_init); | ||
784 | |||
748 | void disable_intr_remapping(void) | 785 | void disable_intr_remapping(void) |
749 | { | 786 | { |
750 | struct dmar_drhd_unit *drhd; | 787 | struct dmar_drhd_unit *drhd; |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 1196f61a4ab6..b23856aaf6eb 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -2788,7 +2788,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_ | |||
2788 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832); | 2788 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832); |
2789 | #endif /*CONFIG_MMC_RICOH_MMC*/ | 2789 | #endif /*CONFIG_MMC_RICOH_MMC*/ |
2790 | 2790 | ||
2791 | #if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP) | 2791 | #ifdef CONFIG_DMAR_TABLE |
2792 | #define VTUNCERRMSK_REG 0x1ac | 2792 | #define VTUNCERRMSK_REG 0x1ac |
2793 | #define VTD_MSK_SPEC_ERRORS (1 << 31) | 2793 | #define VTD_MSK_SPEC_ERRORS (1 << 31) |
2794 | /* | 2794 | /* |
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h index bbd8661b3473..ef90cbd8e173 100644 --- a/include/linux/dma_remapping.h +++ b/include/linux/dma_remapping.h | |||
@@ -25,11 +25,12 @@ struct intel_iommu; | |||
25 | struct dmar_domain; | 25 | struct dmar_domain; |
26 | struct root_entry; | 26 | struct root_entry; |
27 | 27 | ||
28 | extern void free_dmar_iommu(struct intel_iommu *iommu); | ||
29 | 28 | ||
30 | #ifdef CONFIG_DMAR | 29 | #ifdef CONFIG_INTEL_IOMMU |
30 | extern void free_dmar_iommu(struct intel_iommu *iommu); | ||
31 | extern int iommu_calculate_agaw(struct intel_iommu *iommu); | 31 | extern int iommu_calculate_agaw(struct intel_iommu *iommu); |
32 | extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); | 32 | extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); |
33 | extern int dmar_disabled; | ||
33 | #else | 34 | #else |
34 | static inline int iommu_calculate_agaw(struct intel_iommu *iommu) | 35 | static inline int iommu_calculate_agaw(struct intel_iommu *iommu) |
35 | { | 36 | { |
@@ -39,8 +40,11 @@ static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu) | |||
39 | { | 40 | { |
40 | return 0; | 41 | return 0; |
41 | } | 42 | } |
43 | static inline void free_dmar_iommu(struct intel_iommu *iommu) | ||
44 | { | ||
45 | } | ||
46 | #define dmar_disabled (1) | ||
42 | #endif | 47 | #endif |
43 | 48 | ||
44 | extern int dmar_disabled; | ||
45 | 49 | ||
46 | #endif | 50 | #endif |
diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 7b776d71d36d..a8b1a847c103 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h | |||
@@ -26,8 +26,13 @@ | |||
26 | #include <linux/msi.h> | 26 | #include <linux/msi.h> |
27 | #include <linux/irqreturn.h> | 27 | #include <linux/irqreturn.h> |
28 | 28 | ||
29 | /* DMAR Flags */ | ||
30 | #define DMAR_INTR_REMAP 0x1 | ||
31 | #define DMAR_X2APIC_OPT_OUT 0x2 | ||
32 | |||
29 | struct intel_iommu; | 33 | struct intel_iommu; |
30 | #if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP) | 34 | #ifdef CONFIG_DMAR_TABLE |
35 | extern struct acpi_table_header *dmar_tbl; | ||
31 | struct dmar_drhd_unit { | 36 | struct dmar_drhd_unit { |
32 | struct list_head list; /* list of drhd units */ | 37 | struct list_head list; /* list of drhd units */ |
33 | struct acpi_dmar_header *hdr; /* ACPI header */ | 38 | struct acpi_dmar_header *hdr; /* ACPI header */ |
@@ -76,7 +81,7 @@ static inline int enable_drhd_fault_handling(void) | |||
76 | { | 81 | { |
77 | return -1; | 82 | return -1; |
78 | } | 83 | } |
79 | #endif /* !CONFIG_DMAR && !CONFIG_INTR_REMAP */ | 84 | #endif /* !CONFIG_DMAR_TABLE */ |
80 | 85 | ||
81 | struct irte { | 86 | struct irte { |
82 | union { | 87 | union { |
@@ -107,10 +112,10 @@ struct irte { | |||
107 | }; | 112 | }; |
108 | }; | 113 | }; |
109 | 114 | ||
110 | #ifdef CONFIG_INTR_REMAP | 115 | #ifdef CONFIG_IRQ_REMAP |
111 | extern int intr_remapping_enabled; | 116 | extern int intr_remapping_enabled; |
112 | extern int intr_remapping_supported(void); | 117 | extern int intr_remapping_supported(void); |
113 | extern int enable_intr_remapping(int); | 118 | extern int enable_intr_remapping(void); |
114 | extern void disable_intr_remapping(void); | 119 | extern void disable_intr_remapping(void); |
115 | extern int reenable_intr_remapping(int); | 120 | extern int reenable_intr_remapping(int); |
116 | 121 | ||
@@ -177,7 +182,7 @@ static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev) | |||
177 | 182 | ||
178 | #define intr_remapping_enabled (0) | 183 | #define intr_remapping_enabled (0) |
179 | 184 | ||
180 | static inline int enable_intr_remapping(int eim) | 185 | static inline int enable_intr_remapping(void) |
181 | { | 186 | { |
182 | return -1; | 187 | return -1; |
183 | } | 188 | } |
@@ -192,6 +197,11 @@ static inline int reenable_intr_remapping(int eim) | |||
192 | } | 197 | } |
193 | #endif | 198 | #endif |
194 | 199 | ||
200 | enum { | ||
201 | IRQ_REMAP_XAPIC_MODE, | ||
202 | IRQ_REMAP_X2APIC_MODE, | ||
203 | }; | ||
204 | |||
195 | /* Can't use the common MSI interrupt functions | 205 | /* Can't use the common MSI interrupt functions |
196 | * since DMAR is not a pci device | 206 | * since DMAR is not a pci device |
197 | */ | 207 | */ |
@@ -204,7 +214,7 @@ extern int dmar_set_interrupt(struct intel_iommu *iommu); | |||
204 | extern irqreturn_t dmar_fault(int irq, void *dev_id); | 214 | extern irqreturn_t dmar_fault(int irq, void *dev_id); |
205 | extern int arch_setup_dmar_msi(unsigned int irq); | 215 | extern int arch_setup_dmar_msi(unsigned int irq); |
206 | 216 | ||
207 | #ifdef CONFIG_DMAR | 217 | #ifdef CONFIG_INTEL_IOMMU |
208 | extern int iommu_detected, no_iommu; | 218 | extern int iommu_detected, no_iommu; |
209 | extern struct list_head dmar_rmrr_units; | 219 | extern struct list_head dmar_rmrr_units; |
210 | struct dmar_rmrr_unit { | 220 | struct dmar_rmrr_unit { |
@@ -227,9 +237,26 @@ struct dmar_atsr_unit { | |||
227 | u8 include_all:1; /* include all ports */ | 237 | u8 include_all:1; /* include all ports */ |
228 | }; | 238 | }; |
229 | 239 | ||
240 | int dmar_parse_rmrr_atsr_dev(void); | ||
241 | extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header); | ||
242 | extern int dmar_parse_one_atsr(struct acpi_dmar_header *header); | ||
243 | extern int dmar_parse_dev_scope(void *start, void *end, int *cnt, | ||
244 | struct pci_dev ***devices, u16 segment); | ||
230 | extern int intel_iommu_init(void); | 245 | extern int intel_iommu_init(void); |
231 | #else /* !CONFIG_DMAR: */ | 246 | #else /* !CONFIG_INTEL_IOMMU: */ |
232 | static inline int intel_iommu_init(void) { return -ENODEV; } | 247 | static inline int intel_iommu_init(void) { return -ENODEV; } |
233 | #endif /* CONFIG_DMAR */ | 248 | static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header) |
249 | { | ||
250 | return 0; | ||
251 | } | ||
252 | static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header) | ||
253 | { | ||
254 | return 0; | ||
255 | } | ||
256 | static inline int dmar_parse_rmrr_atsr_dev(void) | ||
257 | { | ||
258 | return 0; | ||
259 | } | ||
260 | #endif /* CONFIG_INTEL_IOMMU */ | ||
234 | 261 | ||
235 | #endif /* __DMAR_H__ */ | 262 | #endif /* __DMAR_H__ */ |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 9310c699a37d..235b8879af45 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
@@ -279,7 +279,7 @@ struct q_inval { | |||
279 | int free_cnt; | 279 | int free_cnt; |
280 | }; | 280 | }; |
281 | 281 | ||
282 | #ifdef CONFIG_INTR_REMAP | 282 | #ifdef CONFIG_IRQ_REMAP |
283 | /* 1MB - maximum possible interrupt remapping table size */ | 283 | /* 1MB - maximum possible interrupt remapping table size */ |
284 | #define INTR_REMAP_PAGE_ORDER 8 | 284 | #define INTR_REMAP_PAGE_ORDER 8 |
285 | #define INTR_REMAP_TABLE_REG_SIZE 0xf | 285 | #define INTR_REMAP_TABLE_REG_SIZE 0xf |
@@ -318,7 +318,7 @@ struct intel_iommu { | |||
318 | unsigned int irq; | 318 | unsigned int irq; |
319 | unsigned char name[13]; /* Device Name */ | 319 | unsigned char name[13]; /* Device Name */ |
320 | 320 | ||
321 | #ifdef CONFIG_DMAR | 321 | #ifdef CONFIG_INTEL_IOMMU |
322 | unsigned long *domain_ids; /* bitmap of domains */ | 322 | unsigned long *domain_ids; /* bitmap of domains */ |
323 | struct dmar_domain **domains; /* ptr to domains */ | 323 | struct dmar_domain **domains; /* ptr to domains */ |
324 | spinlock_t lock; /* protect context, domain ids */ | 324 | spinlock_t lock; /* protect context, domain ids */ |
@@ -329,7 +329,7 @@ struct intel_iommu { | |||
329 | struct q_inval *qi; /* Queued invalidation info */ | 329 | struct q_inval *qi; /* Queued invalidation info */ |
330 | u32 *iommu_state; /* Store iommu states between suspend and resume.*/ | 330 | u32 *iommu_state; /* Store iommu states between suspend and resume.*/ |
331 | 331 | ||
332 | #ifdef CONFIG_INTR_REMAP | 332 | #ifdef CONFIG_IRQ_REMAP |
333 | struct ir_table *ir_table; /* Interrupt remapping info */ | 333 | struct ir_table *ir_table; /* Interrupt remapping info */ |
334 | #endif | 334 | #endif |
335 | int node; | 335 | int node; |