aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/Intel-IOMMU.txt17
-rw-r--r--arch/x86/kernel/io_apic_64.c59
-rw-r--r--drivers/pci/intel-iommu.c194
-rw-r--r--include/linux/dmar.h12
4 files changed, 281 insertions, 1 deletions
diff --git a/Documentation/Intel-IOMMU.txt b/Documentation/Intel-IOMMU.txt
index cbb4dbaef761..aba7722c2935 100644
--- a/Documentation/Intel-IOMMU.txt
+++ b/Documentation/Intel-IOMMU.txt
@@ -63,6 +63,15 @@ Interrupt ranges are not address translated, (0xfee00000 - 0xfeefffff).
63The same is true for peer to peer transactions. Hence we reserve the 63The same is true for peer to peer transactions. Hence we reserve the
64address from PCI MMIO ranges so they are not allocated for IOVA addresses. 64address from PCI MMIO ranges so they are not allocated for IOVA addresses.
65 65
66
67Fault reporting
68---------------
69When errors are reported, the DMA engine signals via an interrupt. The fault
70reason and device that caused it with fault reason is printed on console.
71
72See below for sample.
73
74
66Boot Message Sample 75Boot Message Sample
67------------------- 76-------------------
68 77
@@ -85,6 +94,14 @@ When DMAR is enabled for use, you will notice..
85 94
86PCI-DMA: Using DMAR IOMMU 95PCI-DMA: Using DMAR IOMMU
87 96
97Fault reporting
98---------------
99
100DMAR:[DMA Write] Request device [00:02.0] fault addr 6df084000
101DMAR:[fault reason 05] PTE Write access is not set
102DMAR:[DMA Write] Request device [00:02.0] fault addr 6df084000
103DMAR:[fault reason 05] PTE Write access is not set
104
88TBD 105TBD
89---- 106----
90 107
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index b3c2d268d708..953328b55a30 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -31,6 +31,7 @@
31#include <linux/sysdev.h> 31#include <linux/sysdev.h>
32#include <linux/msi.h> 32#include <linux/msi.h>
33#include <linux/htirq.h> 33#include <linux/htirq.h>
34#include <linux/dmar.h>
34#ifdef CONFIG_ACPI 35#ifdef CONFIG_ACPI
35#include <acpi/acpi_bus.h> 36#include <acpi/acpi_bus.h>
36#endif 37#endif
@@ -2031,8 +2032,64 @@ void arch_teardown_msi_irq(unsigned int irq)
2031 destroy_irq(irq); 2032 destroy_irq(irq);
2032} 2033}
2033 2034
2034#endif /* CONFIG_PCI_MSI */ 2035#ifdef CONFIG_DMAR
2036#ifdef CONFIG_SMP
2037static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
2038{
2039 struct irq_cfg *cfg = irq_cfg + irq;
2040 struct msi_msg msg;
2041 unsigned int dest;
2042 cpumask_t tmp;
2043
2044 cpus_and(tmp, mask, cpu_online_map);
2045 if (cpus_empty(tmp))
2046 return;
2047
2048 if (assign_irq_vector(irq, mask))
2049 return;
2050
2051 cpus_and(tmp, cfg->domain, mask);
2052 dest = cpu_mask_to_apicid(tmp);
2053
2054 dmar_msi_read(irq, &msg);
2055
2056 msg.data &= ~MSI_DATA_VECTOR_MASK;
2057 msg.data |= MSI_DATA_VECTOR(cfg->vector);
2058 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
2059 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2060
2061 dmar_msi_write(irq, &msg);
2062 irq_desc[irq].affinity = mask;
2063}
2064#endif /* CONFIG_SMP */
2065
2066struct irq_chip dmar_msi_type = {
2067 .name = "DMAR_MSI",
2068 .unmask = dmar_msi_unmask,
2069 .mask = dmar_msi_mask,
2070 .ack = ack_apic_edge,
2071#ifdef CONFIG_SMP
2072 .set_affinity = dmar_msi_set_affinity,
2073#endif
2074 .retrigger = ioapic_retrigger_irq,
2075};
2076
2077int arch_setup_dmar_msi(unsigned int irq)
2078{
2079 int ret;
2080 struct msi_msg msg;
2081
2082 ret = msi_compose_msg(NULL, irq, &msg);
2083 if (ret < 0)
2084 return ret;
2085 dmar_msi_write(irq, &msg);
2086 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
2087 "edge");
2088 return 0;
2089}
2090#endif
2035 2091
2092#endif /* CONFIG_PCI_MSI */
2036/* 2093/*
2037 * Hypertransport interrupt support 2094 * Hypertransport interrupt support
2038 */ 2095 */
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index cb24defdb6d9..358dd406fe21 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -743,6 +743,196 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
743 return 0; 743 return 0;
744} 744}
745 745
746/* iommu interrupt handling. Most stuff are MSI-like. */
747
748static char *fault_reason_strings[] =
749{
750 "Software",
751 "Present bit in root entry is clear",
752 "Present bit in context entry is clear",
753 "Invalid context entry",
754 "Access beyond MGAW",
755 "PTE Write access is not set",
756 "PTE Read access is not set",
757 "Next page table ptr is invalid",
758 "Root table address invalid",
759 "Context table ptr is invalid",
760 "non-zero reserved fields in RTP",
761 "non-zero reserved fields in CTP",
762 "non-zero reserved fields in PTE",
763 "Unknown"
764};
765#define MAX_FAULT_REASON_IDX ARRAY_SIZE(fault_reason_strings)
766
767char *dmar_get_fault_reason(u8 fault_reason)
768{
769 if (fault_reason > MAX_FAULT_REASON_IDX)
770 return fault_reason_strings[MAX_FAULT_REASON_IDX];
771 else
772 return fault_reason_strings[fault_reason];
773}
774
775void dmar_msi_unmask(unsigned int irq)
776{
777 struct intel_iommu *iommu = get_irq_data(irq);
778 unsigned long flag;
779
780 /* unmask it */
781 spin_lock_irqsave(&iommu->register_lock, flag);
782 writel(0, iommu->reg + DMAR_FECTL_REG);
783 /* Read a reg to force flush the post write */
784 readl(iommu->reg + DMAR_FECTL_REG);
785 spin_unlock_irqrestore(&iommu->register_lock, flag);
786}
787
788void dmar_msi_mask(unsigned int irq)
789{
790 unsigned long flag;
791 struct intel_iommu *iommu = get_irq_data(irq);
792
793 /* mask it */
794 spin_lock_irqsave(&iommu->register_lock, flag);
795 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
796 /* Read a reg to force flush the post write */
797 readl(iommu->reg + DMAR_FECTL_REG);
798 spin_unlock_irqrestore(&iommu->register_lock, flag);
799}
800
801void dmar_msi_write(int irq, struct msi_msg *msg)
802{
803 struct intel_iommu *iommu = get_irq_data(irq);
804 unsigned long flag;
805
806 spin_lock_irqsave(&iommu->register_lock, flag);
807 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
808 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
809 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
810 spin_unlock_irqrestore(&iommu->register_lock, flag);
811}
812
813void dmar_msi_read(int irq, struct msi_msg *msg)
814{
815 struct intel_iommu *iommu = get_irq_data(irq);
816 unsigned long flag;
817
818 spin_lock_irqsave(&iommu->register_lock, flag);
819 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
820 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
821 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
822 spin_unlock_irqrestore(&iommu->register_lock, flag);
823}
824
825static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type,
826 u8 fault_reason, u16 source_id, u64 addr)
827{
828 char *reason;
829
830 reason = dmar_get_fault_reason(fault_reason);
831
832 printk(KERN_ERR
833 "DMAR:[%s] Request device [%02x:%02x.%d] "
834 "fault addr %llx \n"
835 "DMAR:[fault reason %02d] %s\n",
836 (type ? "DMA Read" : "DMA Write"),
837 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
838 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
839 return 0;
840}
841
842#define PRIMARY_FAULT_REG_LEN (16)
843static irqreturn_t iommu_page_fault(int irq, void *dev_id)
844{
845 struct intel_iommu *iommu = dev_id;
846 int reg, fault_index;
847 u32 fault_status;
848 unsigned long flag;
849
850 spin_lock_irqsave(&iommu->register_lock, flag);
851 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
852
853 /* TBD: ignore advanced fault log currently */
854 if (!(fault_status & DMA_FSTS_PPF))
855 goto clear_overflow;
856
857 fault_index = dma_fsts_fault_record_index(fault_status);
858 reg = cap_fault_reg_offset(iommu->cap);
859 while (1) {
860 u8 fault_reason;
861 u16 source_id;
862 u64 guest_addr;
863 int type;
864 u32 data;
865
866 /* highest 32 bits */
867 data = readl(iommu->reg + reg +
868 fault_index * PRIMARY_FAULT_REG_LEN + 12);
869 if (!(data & DMA_FRCD_F))
870 break;
871
872 fault_reason = dma_frcd_fault_reason(data);
873 type = dma_frcd_type(data);
874
875 data = readl(iommu->reg + reg +
876 fault_index * PRIMARY_FAULT_REG_LEN + 8);
877 source_id = dma_frcd_source_id(data);
878
879 guest_addr = dmar_readq(iommu->reg + reg +
880 fault_index * PRIMARY_FAULT_REG_LEN);
881 guest_addr = dma_frcd_page_addr(guest_addr);
882 /* clear the fault */
883 writel(DMA_FRCD_F, iommu->reg + reg +
884 fault_index * PRIMARY_FAULT_REG_LEN + 12);
885
886 spin_unlock_irqrestore(&iommu->register_lock, flag);
887
888 iommu_page_fault_do_one(iommu, type, fault_reason,
889 source_id, guest_addr);
890
891 fault_index++;
892 if (fault_index > cap_num_fault_regs(iommu->cap))
893 fault_index = 0;
894 spin_lock_irqsave(&iommu->register_lock, flag);
895 }
896clear_overflow:
897 /* clear primary fault overflow */
898 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
899 if (fault_status & DMA_FSTS_PFO)
900 writel(DMA_FSTS_PFO, iommu->reg + DMAR_FSTS_REG);
901
902 spin_unlock_irqrestore(&iommu->register_lock, flag);
903 return IRQ_HANDLED;
904}
905
906int dmar_set_interrupt(struct intel_iommu *iommu)
907{
908 int irq, ret;
909
910 irq = create_irq();
911 if (!irq) {
912 printk(KERN_ERR "IOMMU: no free vectors\n");
913 return -EINVAL;
914 }
915
916 set_irq_data(irq, iommu);
917 iommu->irq = irq;
918
919 ret = arch_setup_dmar_msi(irq);
920 if (ret) {
921 set_irq_data(irq, NULL);
922 iommu->irq = 0;
923 destroy_irq(irq);
924 return 0;
925 }
926
927 /* Force fault register is cleared */
928 iommu_page_fault(irq, iommu);
929
930 ret = request_irq(irq, iommu_page_fault, 0, iommu->name, iommu);
931 if (ret)
932 printk(KERN_ERR "IOMMU: can't request irq\n");
933 return ret;
934}
935
746static int iommu_init_domains(struct intel_iommu *iommu) 936static int iommu_init_domains(struct intel_iommu *iommu)
747{ 937{
748 unsigned long ndomains; 938 unsigned long ndomains;
@@ -1490,6 +1680,10 @@ int __init init_dmars(void)
1490 1680
1491 iommu_flush_write_buffer(iommu); 1681 iommu_flush_write_buffer(iommu);
1492 1682
1683 ret = dmar_set_interrupt(iommu);
1684 if (ret)
1685 goto error;
1686
1493 iommu_set_root_entry(iommu); 1687 iommu_set_root_entry(iommu);
1494 1688
1495 iommu_flush_context_global(iommu, 0); 1689 iommu_flush_context_global(iommu, 0);
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 7d683dc8ed1e..ffb6439cb5e6 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -28,6 +28,18 @@
28#ifdef CONFIG_DMAR 28#ifdef CONFIG_DMAR
29struct intel_iommu; 29struct intel_iommu;
30 30
31extern char *dmar_get_fault_reason(u8 fault_reason);
32
33/* Can't use the common MSI interrupt functions
34 * since DMAR is not a pci device
35 */
36extern void dmar_msi_unmask(unsigned int irq);
37extern void dmar_msi_mask(unsigned int irq);
38extern void dmar_msi_read(int irq, struct msi_msg *msg);
39extern void dmar_msi_write(int irq, struct msi_msg *msg);
40extern int dmar_set_interrupt(struct intel_iommu *iommu);
41extern int arch_setup_dmar_msi(unsigned int irq);
42
31/* Intel IOMMU detection and initialization functions */ 43/* Intel IOMMU detection and initialization functions */
32extern void detect_intel_iommu(void); 44extern void detect_intel_iommu(void);
33extern int intel_iommu_init(void); 45extern int intel_iommu_init(void);