aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/dmar.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/dmar.c')
-rw-r--r--drivers/pci/dmar.c34
1 files changed, 19 insertions, 15 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 0a19708074c2..3dc9befa5aec 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -36,6 +36,7 @@
36#include <linux/tboot.h> 36#include <linux/tboot.h>
37#include <linux/dmi.h> 37#include <linux/dmi.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <asm/iommu_table.h>
39 40
40#define PREFIX "DMAR: " 41#define PREFIX "DMAR: "
41 42
@@ -687,7 +688,7 @@ failed:
687 return 0; 688 return 0;
688} 689}
689 690
690void __init detect_intel_iommu(void) 691int __init detect_intel_iommu(void)
691{ 692{
692 int ret; 693 int ret;
693 694
@@ -697,12 +698,7 @@ void __init detect_intel_iommu(void)
697 { 698 {
698#ifdef CONFIG_INTR_REMAP 699#ifdef CONFIG_INTR_REMAP
699 struct acpi_table_dmar *dmar; 700 struct acpi_table_dmar *dmar;
700 /* 701
701 * for now we will disable dma-remapping when interrupt
702 * remapping is enabled.
703 * When support for queued invalidation for IOTLB invalidation
704 * is added, we will not need this any more.
705 */
706 dmar = (struct acpi_table_dmar *) dmar_tbl; 702 dmar = (struct acpi_table_dmar *) dmar_tbl;
707 if (ret && cpu_has_x2apic && dmar->flags & 0x1) 703 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
708 printk(KERN_INFO 704 printk(KERN_INFO
@@ -723,6 +719,8 @@ void __init detect_intel_iommu(void)
723 } 719 }
724 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); 720 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
725 dmar_tbl = NULL; 721 dmar_tbl = NULL;
722
723 return ret ? 1 : -ENODEV;
726} 724}
727 725
728 726
@@ -1221,9 +1219,9 @@ const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1221 } 1219 }
1222} 1220}
1223 1221
1224void dmar_msi_unmask(unsigned int irq) 1222void dmar_msi_unmask(struct irq_data *data)
1225{ 1223{
1226 struct intel_iommu *iommu = get_irq_data(irq); 1224 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1227 unsigned long flag; 1225 unsigned long flag;
1228 1226
1229 /* unmask it */ 1227 /* unmask it */
@@ -1234,10 +1232,10 @@ void dmar_msi_unmask(unsigned int irq)
1234 spin_unlock_irqrestore(&iommu->register_lock, flag); 1232 spin_unlock_irqrestore(&iommu->register_lock, flag);
1235} 1233}
1236 1234
1237void dmar_msi_mask(unsigned int irq) 1235void dmar_msi_mask(struct irq_data *data)
1238{ 1236{
1239 unsigned long flag; 1237 unsigned long flag;
1240 struct intel_iommu *iommu = get_irq_data(irq); 1238 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1241 1239
1242 /* mask it */ 1240 /* mask it */
1243 spin_lock_irqsave(&iommu->register_lock, flag); 1241 spin_lock_irqsave(&iommu->register_lock, flag);
@@ -1249,7 +1247,7 @@ void dmar_msi_mask(unsigned int irq)
1249 1247
1250void dmar_msi_write(int irq, struct msi_msg *msg) 1248void dmar_msi_write(int irq, struct msi_msg *msg)
1251{ 1249{
1252 struct intel_iommu *iommu = get_irq_data(irq); 1250 struct intel_iommu *iommu = irq_get_handler_data(irq);
1253 unsigned long flag; 1251 unsigned long flag;
1254 1252
1255 spin_lock_irqsave(&iommu->register_lock, flag); 1253 spin_lock_irqsave(&iommu->register_lock, flag);
@@ -1261,7 +1259,7 @@ void dmar_msi_write(int irq, struct msi_msg *msg)
1261 1259
1262void dmar_msi_read(int irq, struct msi_msg *msg) 1260void dmar_msi_read(int irq, struct msi_msg *msg)
1263{ 1261{
1264 struct intel_iommu *iommu = get_irq_data(irq); 1262 struct intel_iommu *iommu = irq_get_handler_data(irq);
1265 unsigned long flag; 1263 unsigned long flag;
1266 1264
1267 spin_lock_irqsave(&iommu->register_lock, flag); 1265 spin_lock_irqsave(&iommu->register_lock, flag);
@@ -1379,12 +1377,12 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
1379 return -EINVAL; 1377 return -EINVAL;
1380 } 1378 }
1381 1379
1382 set_irq_data(irq, iommu); 1380 irq_set_handler_data(irq, iommu);
1383 iommu->irq = irq; 1381 iommu->irq = irq;
1384 1382
1385 ret = arch_setup_dmar_msi(irq); 1383 ret = arch_setup_dmar_msi(irq);
1386 if (ret) { 1384 if (ret) {
1387 set_irq_data(irq, NULL); 1385 irq_set_handler_data(irq, NULL);
1388 iommu->irq = 0; 1386 iommu->irq = 0;
1389 destroy_irq(irq); 1387 destroy_irq(irq);
1390 return ret; 1388 return ret;
@@ -1414,6 +1412,11 @@ int __init enable_drhd_fault_handling(void)
1414 (unsigned long long)drhd->reg_base_addr, ret); 1412 (unsigned long long)drhd->reg_base_addr, ret);
1415 return -1; 1413 return -1;
1416 } 1414 }
1415
1416 /*
1417 * Clear any previous faults.
1418 */
1419 dmar_fault(iommu->irq, iommu);
1417 } 1420 }
1418 1421
1419 return 0; 1422 return 0;
@@ -1455,3 +1458,4 @@ int __init dmar_ir_support(void)
1455 return 0; 1458 return 0;
1456 return dmar->flags & 0x1; 1459 return dmar->flags & 0x1;
1457} 1460}
1461IOMMU_INIT_POST(detect_intel_iommu);