aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-03-30 17:53:32 -0400
committerIngo Molnar <mingo@elte.hu>2009-03-30 17:53:32 -0400
commit65fb0d23fcddd8697c871047b700c78817bdaa43 (patch)
tree119e6e5f276622c4c862f6c9b6d795264ba1603a /drivers/pci
parent8c083f081d0014057901c68a0a3e0f8ca7ac8d23 (diff)
parentdfbbe89e197a77f2c8046a51c74e33e35f878080 (diff)
Merge branch 'linus' into cpumask-for-linus
Conflicts: arch/x86/kernel/cpu/common.c
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/dmar.c296
-rw-r--r--drivers/pci/hotplug/cpqphp_sysfs.c3
-rw-r--r--drivers/pci/intel-iommu.c270
-rw-r--r--drivers/pci/intr_remapping.c112
4 files changed, 440 insertions, 241 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 5f333403c2ea..d313039e2fdf 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -31,6 +31,8 @@
31#include <linux/iova.h> 31#include <linux/iova.h>
32#include <linux/intel-iommu.h> 32#include <linux/intel-iommu.h>
33#include <linux/timer.h> 33#include <linux/timer.h>
34#include <linux/irq.h>
35#include <linux/interrupt.h>
34 36
35#undef PREFIX 37#undef PREFIX
36#define PREFIX "DMAR:" 38#define PREFIX "DMAR:"
@@ -509,6 +511,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
509 return -ENOMEM; 511 return -ENOMEM;
510 512
511 iommu->seq_id = iommu_allocated++; 513 iommu->seq_id = iommu_allocated++;
514 sprintf (iommu->name, "dmar%d", iommu->seq_id);
512 515
513 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE); 516 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
514 if (!iommu->reg) { 517 if (!iommu->reg) {
@@ -751,6 +754,42 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
751} 754}
752 755
753/* 756/*
757 * Disable Queued Invalidation interface.
758 */
759void dmar_disable_qi(struct intel_iommu *iommu)
760{
761 unsigned long flags;
762 u32 sts;
763 cycles_t start_time = get_cycles();
764
765 if (!ecap_qis(iommu->ecap))
766 return;
767
768 spin_lock_irqsave(&iommu->register_lock, flags);
769
770 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
771 if (!(sts & DMA_GSTS_QIES))
772 goto end;
773
774 /*
775 * Give a chance to HW to complete the pending invalidation requests.
776 */
777 while ((readl(iommu->reg + DMAR_IQT_REG) !=
778 readl(iommu->reg + DMAR_IQH_REG)) &&
779 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
780 cpu_relax();
781
782 iommu->gcmd &= ~DMA_GCMD_QIE;
783
784 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
785
786 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
787 !(sts & DMA_GSTS_QIES), sts);
788end:
789 spin_unlock_irqrestore(&iommu->register_lock, flags);
790}
791
792/*
754 * Enable Queued Invalidation interface. This is a must to support 793 * Enable Queued Invalidation interface. This is a must to support
755 * interrupt-remapping. Also used by DMA-remapping, which replaces 794 * interrupt-remapping. Also used by DMA-remapping, which replaces
756 * register based IOTLB invalidation. 795 * register based IOTLB invalidation.
@@ -770,20 +809,20 @@ int dmar_enable_qi(struct intel_iommu *iommu)
770 if (iommu->qi) 809 if (iommu->qi)
771 return 0; 810 return 0;
772 811
773 iommu->qi = kmalloc(sizeof(*qi), GFP_KERNEL); 812 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
774 if (!iommu->qi) 813 if (!iommu->qi)
775 return -ENOMEM; 814 return -ENOMEM;
776 815
777 qi = iommu->qi; 816 qi = iommu->qi;
778 817
779 qi->desc = (void *)(get_zeroed_page(GFP_KERNEL)); 818 qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
780 if (!qi->desc) { 819 if (!qi->desc) {
781 kfree(qi); 820 kfree(qi);
782 iommu->qi = 0; 821 iommu->qi = 0;
783 return -ENOMEM; 822 return -ENOMEM;
784 } 823 }
785 824
786 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_KERNEL); 825 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
787 if (!qi->desc_status) { 826 if (!qi->desc_status) {
788 free_page((unsigned long) qi->desc); 827 free_page((unsigned long) qi->desc);
789 kfree(qi); 828 kfree(qi);
@@ -812,3 +851,254 @@ int dmar_enable_qi(struct intel_iommu *iommu)
812 851
813 return 0; 852 return 0;
814} 853}
854
855/* iommu interrupt handling. Most stuff are MSI-like. */
856
857enum faulttype {
858 DMA_REMAP,
859 INTR_REMAP,
860 UNKNOWN,
861};
862
863static const char *dma_remap_fault_reasons[] =
864{
865 "Software",
866 "Present bit in root entry is clear",
867 "Present bit in context entry is clear",
868 "Invalid context entry",
869 "Access beyond MGAW",
870 "PTE Write access is not set",
871 "PTE Read access is not set",
872 "Next page table ptr is invalid",
873 "Root table address invalid",
874 "Context table ptr is invalid",
875 "non-zero reserved fields in RTP",
876 "non-zero reserved fields in CTP",
877 "non-zero reserved fields in PTE",
878};
879
880static const char *intr_remap_fault_reasons[] =
881{
882 "Detected reserved fields in the decoded interrupt-remapped request",
883 "Interrupt index exceeded the interrupt-remapping table size",
884 "Present field in the IRTE entry is clear",
885 "Error accessing interrupt-remapping table pointed by IRTA_REG",
886 "Detected reserved fields in the IRTE entry",
887 "Blocked a compatibility format interrupt request",
888 "Blocked an interrupt request due to source-id verification failure",
889};
890
891#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
892
893const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
894{
895 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
896 ARRAY_SIZE(intr_remap_fault_reasons))) {
897 *fault_type = INTR_REMAP;
898 return intr_remap_fault_reasons[fault_reason - 0x20];
899 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
900 *fault_type = DMA_REMAP;
901 return dma_remap_fault_reasons[fault_reason];
902 } else {
903 *fault_type = UNKNOWN;
904 return "Unknown";
905 }
906}
907
908void dmar_msi_unmask(unsigned int irq)
909{
910 struct intel_iommu *iommu = get_irq_data(irq);
911 unsigned long flag;
912
913 /* unmask it */
914 spin_lock_irqsave(&iommu->register_lock, flag);
915 writel(0, iommu->reg + DMAR_FECTL_REG);
916 /* Read a reg to force flush the post write */
917 readl(iommu->reg + DMAR_FECTL_REG);
918 spin_unlock_irqrestore(&iommu->register_lock, flag);
919}
920
921void dmar_msi_mask(unsigned int irq)
922{
923 unsigned long flag;
924 struct intel_iommu *iommu = get_irq_data(irq);
925
926 /* mask it */
927 spin_lock_irqsave(&iommu->register_lock, flag);
928 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
929 /* Read a reg to force flush the post write */
930 readl(iommu->reg + DMAR_FECTL_REG);
931 spin_unlock_irqrestore(&iommu->register_lock, flag);
932}
933
934void dmar_msi_write(int irq, struct msi_msg *msg)
935{
936 struct intel_iommu *iommu = get_irq_data(irq);
937 unsigned long flag;
938
939 spin_lock_irqsave(&iommu->register_lock, flag);
940 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
941 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
942 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
943 spin_unlock_irqrestore(&iommu->register_lock, flag);
944}
945
946void dmar_msi_read(int irq, struct msi_msg *msg)
947{
948 struct intel_iommu *iommu = get_irq_data(irq);
949 unsigned long flag;
950
951 spin_lock_irqsave(&iommu->register_lock, flag);
952 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
953 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
954 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
955 spin_unlock_irqrestore(&iommu->register_lock, flag);
956}
957
958static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
959 u8 fault_reason, u16 source_id, unsigned long long addr)
960{
961 const char *reason;
962 int fault_type;
963
964 reason = dmar_get_fault_reason(fault_reason, &fault_type);
965
966 if (fault_type == INTR_REMAP)
967 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
968 "fault index %llx\n"
969 "INTR-REMAP:[fault reason %02d] %s\n",
970 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
971 PCI_FUNC(source_id & 0xFF), addr >> 48,
972 fault_reason, reason);
973 else
974 printk(KERN_ERR
975 "DMAR:[%s] Request device [%02x:%02x.%d] "
976 "fault addr %llx \n"
977 "DMAR:[fault reason %02d] %s\n",
978 (type ? "DMA Read" : "DMA Write"),
979 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
980 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
981 return 0;
982}
983
984#define PRIMARY_FAULT_REG_LEN (16)
985irqreturn_t dmar_fault(int irq, void *dev_id)
986{
987 struct intel_iommu *iommu = dev_id;
988 int reg, fault_index;
989 u32 fault_status;
990 unsigned long flag;
991
992 spin_lock_irqsave(&iommu->register_lock, flag);
993 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
994 if (fault_status)
995 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
996 fault_status);
997
998 /* TBD: ignore advanced fault log currently */
999 if (!(fault_status & DMA_FSTS_PPF))
1000 goto clear_rest;
1001
1002 fault_index = dma_fsts_fault_record_index(fault_status);
1003 reg = cap_fault_reg_offset(iommu->cap);
1004 while (1) {
1005 u8 fault_reason;
1006 u16 source_id;
1007 u64 guest_addr;
1008 int type;
1009 u32 data;
1010
1011 /* highest 32 bits */
1012 data = readl(iommu->reg + reg +
1013 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1014 if (!(data & DMA_FRCD_F))
1015 break;
1016
1017 fault_reason = dma_frcd_fault_reason(data);
1018 type = dma_frcd_type(data);
1019
1020 data = readl(iommu->reg + reg +
1021 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1022 source_id = dma_frcd_source_id(data);
1023
1024 guest_addr = dmar_readq(iommu->reg + reg +
1025 fault_index * PRIMARY_FAULT_REG_LEN);
1026 guest_addr = dma_frcd_page_addr(guest_addr);
1027 /* clear the fault */
1028 writel(DMA_FRCD_F, iommu->reg + reg +
1029 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1030
1031 spin_unlock_irqrestore(&iommu->register_lock, flag);
1032
1033 dmar_fault_do_one(iommu, type, fault_reason,
1034 source_id, guest_addr);
1035
1036 fault_index++;
1037 if (fault_index > cap_num_fault_regs(iommu->cap))
1038 fault_index = 0;
1039 spin_lock_irqsave(&iommu->register_lock, flag);
1040 }
1041clear_rest:
1042 /* clear all the other faults */
1043 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1044 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1045
1046 spin_unlock_irqrestore(&iommu->register_lock, flag);
1047 return IRQ_HANDLED;
1048}
1049
1050int dmar_set_interrupt(struct intel_iommu *iommu)
1051{
1052 int irq, ret;
1053
1054 /*
1055 * Check if the fault interrupt is already initialized.
1056 */
1057 if (iommu->irq)
1058 return 0;
1059
1060 irq = create_irq();
1061 if (!irq) {
1062 printk(KERN_ERR "IOMMU: no free vectors\n");
1063 return -EINVAL;
1064 }
1065
1066 set_irq_data(irq, iommu);
1067 iommu->irq = irq;
1068
1069 ret = arch_setup_dmar_msi(irq);
1070 if (ret) {
1071 set_irq_data(irq, NULL);
1072 iommu->irq = 0;
1073 destroy_irq(irq);
1074 return 0;
1075 }
1076
1077 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1078 if (ret)
1079 printk(KERN_ERR "IOMMU: can't request irq\n");
1080 return ret;
1081}
1082
1083int __init enable_drhd_fault_handling(void)
1084{
1085 struct dmar_drhd_unit *drhd;
1086
1087 /*
1088 * Enable fault control interrupt.
1089 */
1090 for_each_drhd_unit(drhd) {
1091 int ret;
1092 struct intel_iommu *iommu = drhd->iommu;
1093 ret = dmar_set_interrupt(iommu);
1094
1095 if (ret) {
1096 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1097 " interrupt, ret %d\n",
1098 (unsigned long long)drhd->reg_base_addr, ret);
1099 return -1;
1100 }
1101 }
1102
1103 return 0;
1104}
diff --git a/drivers/pci/hotplug/cpqphp_sysfs.c b/drivers/pci/hotplug/cpqphp_sysfs.c
index a13abf55d784..8450f4a6568a 100644
--- a/drivers/pci/hotplug/cpqphp_sysfs.c
+++ b/drivers/pci/hotplug/cpqphp_sysfs.c
@@ -225,7 +225,8 @@ void cpqhp_shutdown_debugfs(void)
225 225
226void cpqhp_create_debugfs_files(struct controller *ctrl) 226void cpqhp_create_debugfs_files(struct controller *ctrl)
227{ 227{
228 ctrl->dentry = debugfs_create_file(ctrl->pci_dev->dev.bus_id, S_IRUGO, root, ctrl, &debug_ops); 228 ctrl->dentry = debugfs_create_file(dev_name(&ctrl->pci_dev->dev),
229 S_IRUGO, root, ctrl, &debug_ops);
229} 230}
230 231
231void cpqhp_remove_debugfs_files(struct controller *ctrl) 232void cpqhp_remove_debugfs_files(struct controller *ctrl)
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index f3f686581a90..49402c399232 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1004,194 +1004,6 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
1004 return 0; 1004 return 0;
1005} 1005}
1006 1006
1007/* iommu interrupt handling. Most stuff are MSI-like. */
1008
1009static const char *fault_reason_strings[] =
1010{
1011 "Software",
1012 "Present bit in root entry is clear",
1013 "Present bit in context entry is clear",
1014 "Invalid context entry",
1015 "Access beyond MGAW",
1016 "PTE Write access is not set",
1017 "PTE Read access is not set",
1018 "Next page table ptr is invalid",
1019 "Root table address invalid",
1020 "Context table ptr is invalid",
1021 "non-zero reserved fields in RTP",
1022 "non-zero reserved fields in CTP",
1023 "non-zero reserved fields in PTE",
1024};
1025#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1026
1027const char *dmar_get_fault_reason(u8 fault_reason)
1028{
1029 if (fault_reason > MAX_FAULT_REASON_IDX)
1030 return "Unknown";
1031 else
1032 return fault_reason_strings[fault_reason];
1033}
1034
1035void dmar_msi_unmask(unsigned int irq)
1036{
1037 struct intel_iommu *iommu = get_irq_data(irq);
1038 unsigned long flag;
1039
1040 /* unmask it */
1041 spin_lock_irqsave(&iommu->register_lock, flag);
1042 writel(0, iommu->reg + DMAR_FECTL_REG);
1043 /* Read a reg to force flush the post write */
1044 readl(iommu->reg + DMAR_FECTL_REG);
1045 spin_unlock_irqrestore(&iommu->register_lock, flag);
1046}
1047
1048void dmar_msi_mask(unsigned int irq)
1049{
1050 unsigned long flag;
1051 struct intel_iommu *iommu = get_irq_data(irq);
1052
1053 /* mask it */
1054 spin_lock_irqsave(&iommu->register_lock, flag);
1055 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1056 /* Read a reg to force flush the post write */
1057 readl(iommu->reg + DMAR_FECTL_REG);
1058 spin_unlock_irqrestore(&iommu->register_lock, flag);
1059}
1060
1061void dmar_msi_write(int irq, struct msi_msg *msg)
1062{
1063 struct intel_iommu *iommu = get_irq_data(irq);
1064 unsigned long flag;
1065
1066 spin_lock_irqsave(&iommu->register_lock, flag);
1067 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1068 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1069 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1070 spin_unlock_irqrestore(&iommu->register_lock, flag);
1071}
1072
1073void dmar_msi_read(int irq, struct msi_msg *msg)
1074{
1075 struct intel_iommu *iommu = get_irq_data(irq);
1076 unsigned long flag;
1077
1078 spin_lock_irqsave(&iommu->register_lock, flag);
1079 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1080 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1081 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1082 spin_unlock_irqrestore(&iommu->register_lock, flag);
1083}
1084
1085static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type,
1086 u8 fault_reason, u16 source_id, unsigned long long addr)
1087{
1088 const char *reason;
1089
1090 reason = dmar_get_fault_reason(fault_reason);
1091
1092 printk(KERN_ERR
1093 "DMAR:[%s] Request device [%02x:%02x.%d] "
1094 "fault addr %llx \n"
1095 "DMAR:[fault reason %02d] %s\n",
1096 (type ? "DMA Read" : "DMA Write"),
1097 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1098 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1099 return 0;
1100}
1101
1102#define PRIMARY_FAULT_REG_LEN (16)
1103static irqreturn_t iommu_page_fault(int irq, void *dev_id)
1104{
1105 struct intel_iommu *iommu = dev_id;
1106 int reg, fault_index;
1107 u32 fault_status;
1108 unsigned long flag;
1109
1110 spin_lock_irqsave(&iommu->register_lock, flag);
1111 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1112
1113 /* TBD: ignore advanced fault log currently */
1114 if (!(fault_status & DMA_FSTS_PPF))
1115 goto clear_overflow;
1116
1117 fault_index = dma_fsts_fault_record_index(fault_status);
1118 reg = cap_fault_reg_offset(iommu->cap);
1119 while (1) {
1120 u8 fault_reason;
1121 u16 source_id;
1122 u64 guest_addr;
1123 int type;
1124 u32 data;
1125
1126 /* highest 32 bits */
1127 data = readl(iommu->reg + reg +
1128 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1129 if (!(data & DMA_FRCD_F))
1130 break;
1131
1132 fault_reason = dma_frcd_fault_reason(data);
1133 type = dma_frcd_type(data);
1134
1135 data = readl(iommu->reg + reg +
1136 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1137 source_id = dma_frcd_source_id(data);
1138
1139 guest_addr = dmar_readq(iommu->reg + reg +
1140 fault_index * PRIMARY_FAULT_REG_LEN);
1141 guest_addr = dma_frcd_page_addr(guest_addr);
1142 /* clear the fault */
1143 writel(DMA_FRCD_F, iommu->reg + reg +
1144 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1145
1146 spin_unlock_irqrestore(&iommu->register_lock, flag);
1147
1148 iommu_page_fault_do_one(iommu, type, fault_reason,
1149 source_id, guest_addr);
1150
1151 fault_index++;
1152 if (fault_index > cap_num_fault_regs(iommu->cap))
1153 fault_index = 0;
1154 spin_lock_irqsave(&iommu->register_lock, flag);
1155 }
1156clear_overflow:
1157 /* clear primary fault overflow */
1158 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1159 if (fault_status & DMA_FSTS_PFO)
1160 writel(DMA_FSTS_PFO, iommu->reg + DMAR_FSTS_REG);
1161
1162 spin_unlock_irqrestore(&iommu->register_lock, flag);
1163 return IRQ_HANDLED;
1164}
1165
1166int dmar_set_interrupt(struct intel_iommu *iommu)
1167{
1168 int irq, ret;
1169
1170 irq = create_irq();
1171 if (!irq) {
1172 printk(KERN_ERR "IOMMU: no free vectors\n");
1173 return -EINVAL;
1174 }
1175
1176 set_irq_data(irq, iommu);
1177 iommu->irq = irq;
1178
1179 ret = arch_setup_dmar_msi(irq);
1180 if (ret) {
1181 set_irq_data(irq, NULL);
1182 iommu->irq = 0;
1183 destroy_irq(irq);
1184 return 0;
1185 }
1186
1187 /* Force fault register is cleared */
1188 iommu_page_fault(irq, iommu);
1189
1190 ret = request_irq(irq, iommu_page_fault, 0, iommu->name, iommu);
1191 if (ret)
1192 printk(KERN_ERR "IOMMU: can't request irq\n");
1193 return ret;
1194}
1195 1007
1196static int iommu_init_domains(struct intel_iommu *iommu) 1008static int iommu_init_domains(struct intel_iommu *iommu)
1197{ 1009{
@@ -1987,7 +1799,7 @@ static int __init init_dmars(void)
1987 struct dmar_rmrr_unit *rmrr; 1799 struct dmar_rmrr_unit *rmrr;
1988 struct pci_dev *pdev; 1800 struct pci_dev *pdev;
1989 struct intel_iommu *iommu; 1801 struct intel_iommu *iommu;
1990 int i, ret, unit = 0; 1802 int i, ret;
1991 1803
1992 /* 1804 /*
1993 * for each drhd 1805 * for each drhd
@@ -2043,11 +1855,40 @@ static int __init init_dmars(void)
2043 } 1855 }
2044 } 1856 }
2045 1857
1858 /*
1859 * Start from the sane iommu hardware state.
1860 */
2046 for_each_drhd_unit(drhd) { 1861 for_each_drhd_unit(drhd) {
2047 if (drhd->ignored) 1862 if (drhd->ignored)
2048 continue; 1863 continue;
2049 1864
2050 iommu = drhd->iommu; 1865 iommu = drhd->iommu;
1866
1867 /*
1868 * If the queued invalidation is already initialized by us
1869 * (for example, while enabling interrupt-remapping) then
1870 * we got the things already rolling from a sane state.
1871 */
1872 if (iommu->qi)
1873 continue;
1874
1875 /*
1876 * Clear any previous faults.
1877 */
1878 dmar_fault(-1, iommu);
1879 /*
1880 * Disable queued invalidation if supported and already enabled
1881 * before OS handover.
1882 */
1883 dmar_disable_qi(iommu);
1884 }
1885
1886 for_each_drhd_unit(drhd) {
1887 if (drhd->ignored)
1888 continue;
1889
1890 iommu = drhd->iommu;
1891
2051 if (dmar_enable_qi(iommu)) { 1892 if (dmar_enable_qi(iommu)) {
2052 /* 1893 /*
2053 * Queued Invalidate not enabled, use Register Based 1894 * Queued Invalidate not enabled, use Register Based
@@ -2109,7 +1950,6 @@ static int __init init_dmars(void)
2109 if (drhd->ignored) 1950 if (drhd->ignored)
2110 continue; 1951 continue;
2111 iommu = drhd->iommu; 1952 iommu = drhd->iommu;
2112 sprintf (iommu->name, "dmar%d", unit++);
2113 1953
2114 iommu_flush_write_buffer(iommu); 1954 iommu_flush_write_buffer(iommu);
2115 1955
@@ -2284,11 +2124,13 @@ error:
2284 return 0; 2124 return 0;
2285} 2125}
2286 2126
2287dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr, 2127static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2288 size_t size, int dir) 2128 unsigned long offset, size_t size,
2129 enum dma_data_direction dir,
2130 struct dma_attrs *attrs)
2289{ 2131{
2290 return __intel_map_single(hwdev, paddr, size, dir, 2132 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2291 to_pci_dev(hwdev)->dma_mask); 2133 dir, to_pci_dev(dev)->dma_mask);
2292} 2134}
2293 2135
2294static void flush_unmaps(void) 2136static void flush_unmaps(void)
@@ -2352,8 +2194,9 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2352 spin_unlock_irqrestore(&async_umap_flush_lock, flags); 2194 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2353} 2195}
2354 2196
2355void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, 2197static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2356 int dir) 2198 size_t size, enum dma_data_direction dir,
2199 struct dma_attrs *attrs)
2357{ 2200{
2358 struct pci_dev *pdev = to_pci_dev(dev); 2201 struct pci_dev *pdev = to_pci_dev(dev);
2359 struct dmar_domain *domain; 2202 struct dmar_domain *domain;
@@ -2397,8 +2240,14 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2397 } 2240 }
2398} 2241}
2399 2242
2400void *intel_alloc_coherent(struct device *hwdev, size_t size, 2243static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2401 dma_addr_t *dma_handle, gfp_t flags) 2244 int dir)
2245{
2246 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2247}
2248
2249static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2250 dma_addr_t *dma_handle, gfp_t flags)
2402{ 2251{
2403 void *vaddr; 2252 void *vaddr;
2404 int order; 2253 int order;
@@ -2421,8 +2270,8 @@ void *intel_alloc_coherent(struct device *hwdev, size_t size,
2421 return NULL; 2270 return NULL;
2422} 2271}
2423 2272
2424void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, 2273static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2425 dma_addr_t dma_handle) 2274 dma_addr_t dma_handle)
2426{ 2275{
2427 int order; 2276 int order;
2428 2277
@@ -2435,8 +2284,9 @@ void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2435 2284
2436#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) 2285#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
2437 2286
2438void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, 2287static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2439 int nelems, int dir) 2288 int nelems, enum dma_data_direction dir,
2289 struct dma_attrs *attrs)
2440{ 2290{
2441 int i; 2291 int i;
2442 struct pci_dev *pdev = to_pci_dev(hwdev); 2292 struct pci_dev *pdev = to_pci_dev(hwdev);
@@ -2493,8 +2343,8 @@ static int intel_nontranslate_map_sg(struct device *hddev,
2493 return nelems; 2343 return nelems;
2494} 2344}
2495 2345
2496int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, 2346static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2497 int dir) 2347 enum dma_data_direction dir, struct dma_attrs *attrs)
2498{ 2348{
2499 void *addr; 2349 void *addr;
2500 int i; 2350 int i;
@@ -2574,13 +2424,19 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2574 return nelems; 2424 return nelems;
2575} 2425}
2576 2426
2577static struct dma_mapping_ops intel_dma_ops = { 2427static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2428{
2429 return !dma_addr;
2430}
2431
2432struct dma_map_ops intel_dma_ops = {
2578 .alloc_coherent = intel_alloc_coherent, 2433 .alloc_coherent = intel_alloc_coherent,
2579 .free_coherent = intel_free_coherent, 2434 .free_coherent = intel_free_coherent,
2580 .map_single = intel_map_single,
2581 .unmap_single = intel_unmap_single,
2582 .map_sg = intel_map_sg, 2435 .map_sg = intel_map_sg,
2583 .unmap_sg = intel_unmap_sg, 2436 .unmap_sg = intel_unmap_sg,
2437 .map_page = intel_map_page,
2438 .unmap_page = intel_unmap_page,
2439 .mapping_error = intel_mapping_error,
2584}; 2440};
2585 2441
2586static inline int iommu_domain_cache_init(void) 2442static inline int iommu_domain_cache_init(void)
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 8e44db040db7..b041a409f4a7 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -21,7 +21,7 @@ struct irq_2_iommu {
21 u8 irte_mask; 21 u8 irte_mask;
22}; 22};
23 23
24#ifdef CONFIG_SPARSE_IRQ 24#ifdef CONFIG_GENERIC_HARDIRQS
25static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu) 25static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu)
26{ 26{
27 struct irq_2_iommu *iommu; 27 struct irq_2_iommu *iommu;
@@ -117,21 +117,22 @@ int get_irte(int irq, struct irte *entry)
117{ 117{
118 int index; 118 int index;
119 struct irq_2_iommu *irq_iommu; 119 struct irq_2_iommu *irq_iommu;
120 unsigned long flags;
120 121
121 if (!entry) 122 if (!entry)
122 return -1; 123 return -1;
123 124
124 spin_lock(&irq_2_ir_lock); 125 spin_lock_irqsave(&irq_2_ir_lock, flags);
125 irq_iommu = valid_irq_2_iommu(irq); 126 irq_iommu = valid_irq_2_iommu(irq);
126 if (!irq_iommu) { 127 if (!irq_iommu) {
127 spin_unlock(&irq_2_ir_lock); 128 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
128 return -1; 129 return -1;
129 } 130 }
130 131
131 index = irq_iommu->irte_index + irq_iommu->sub_handle; 132 index = irq_iommu->irte_index + irq_iommu->sub_handle;
132 *entry = *(irq_iommu->iommu->ir_table->base + index); 133 *entry = *(irq_iommu->iommu->ir_table->base + index);
133 134
134 spin_unlock(&irq_2_ir_lock); 135 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
135 return 0; 136 return 0;
136} 137}
137 138
@@ -141,6 +142,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
141 struct irq_2_iommu *irq_iommu; 142 struct irq_2_iommu *irq_iommu;
142 u16 index, start_index; 143 u16 index, start_index;
143 unsigned int mask = 0; 144 unsigned int mask = 0;
145 unsigned long flags;
144 int i; 146 int i;
145 147
146 if (!count) 148 if (!count)
@@ -170,7 +172,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
170 return -1; 172 return -1;
171 } 173 }
172 174
173 spin_lock(&irq_2_ir_lock); 175 spin_lock_irqsave(&irq_2_ir_lock, flags);
174 do { 176 do {
175 for (i = index; i < index + count; i++) 177 for (i = index; i < index + count; i++)
176 if (table->base[i].present) 178 if (table->base[i].present)
@@ -182,7 +184,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
182 index = (index + count) % INTR_REMAP_TABLE_ENTRIES; 184 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
183 185
184 if (index == start_index) { 186 if (index == start_index) {
185 spin_unlock(&irq_2_ir_lock); 187 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
186 printk(KERN_ERR "can't allocate an IRTE\n"); 188 printk(KERN_ERR "can't allocate an IRTE\n");
187 return -1; 189 return -1;
188 } 190 }
@@ -193,7 +195,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
193 195
194 irq_iommu = irq_2_iommu_alloc(irq); 196 irq_iommu = irq_2_iommu_alloc(irq);
195 if (!irq_iommu) { 197 if (!irq_iommu) {
196 spin_unlock(&irq_2_ir_lock); 198 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
197 printk(KERN_ERR "can't allocate irq_2_iommu\n"); 199 printk(KERN_ERR "can't allocate irq_2_iommu\n");
198 return -1; 200 return -1;
199 } 201 }
@@ -203,7 +205,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
203 irq_iommu->sub_handle = 0; 205 irq_iommu->sub_handle = 0;
204 irq_iommu->irte_mask = mask; 206 irq_iommu->irte_mask = mask;
205 207
206 spin_unlock(&irq_2_ir_lock); 208 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
207 209
208 return index; 210 return index;
209} 211}
@@ -223,30 +225,32 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle)
223{ 225{
224 int index; 226 int index;
225 struct irq_2_iommu *irq_iommu; 227 struct irq_2_iommu *irq_iommu;
228 unsigned long flags;
226 229
227 spin_lock(&irq_2_ir_lock); 230 spin_lock_irqsave(&irq_2_ir_lock, flags);
228 irq_iommu = valid_irq_2_iommu(irq); 231 irq_iommu = valid_irq_2_iommu(irq);
229 if (!irq_iommu) { 232 if (!irq_iommu) {
230 spin_unlock(&irq_2_ir_lock); 233 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
231 return -1; 234 return -1;
232 } 235 }
233 236
234 *sub_handle = irq_iommu->sub_handle; 237 *sub_handle = irq_iommu->sub_handle;
235 index = irq_iommu->irte_index; 238 index = irq_iommu->irte_index;
236 spin_unlock(&irq_2_ir_lock); 239 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
237 return index; 240 return index;
238} 241}
239 242
240int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) 243int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
241{ 244{
242 struct irq_2_iommu *irq_iommu; 245 struct irq_2_iommu *irq_iommu;
246 unsigned long flags;
243 247
244 spin_lock(&irq_2_ir_lock); 248 spin_lock_irqsave(&irq_2_ir_lock, flags);
245 249
246 irq_iommu = irq_2_iommu_alloc(irq); 250 irq_iommu = irq_2_iommu_alloc(irq);
247 251
248 if (!irq_iommu) { 252 if (!irq_iommu) {
249 spin_unlock(&irq_2_ir_lock); 253 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
250 printk(KERN_ERR "can't allocate irq_2_iommu\n"); 254 printk(KERN_ERR "can't allocate irq_2_iommu\n");
251 return -1; 255 return -1;
252 } 256 }
@@ -256,7 +260,7 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
256 irq_iommu->sub_handle = subhandle; 260 irq_iommu->sub_handle = subhandle;
257 irq_iommu->irte_mask = 0; 261 irq_iommu->irte_mask = 0;
258 262
259 spin_unlock(&irq_2_ir_lock); 263 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
260 264
261 return 0; 265 return 0;
262} 266}
@@ -264,11 +268,12 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
264int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) 268int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
265{ 269{
266 struct irq_2_iommu *irq_iommu; 270 struct irq_2_iommu *irq_iommu;
271 unsigned long flags;
267 272
268 spin_lock(&irq_2_ir_lock); 273 spin_lock_irqsave(&irq_2_ir_lock, flags);
269 irq_iommu = valid_irq_2_iommu(irq); 274 irq_iommu = valid_irq_2_iommu(irq);
270 if (!irq_iommu) { 275 if (!irq_iommu) {
271 spin_unlock(&irq_2_ir_lock); 276 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
272 return -1; 277 return -1;
273 } 278 }
274 279
@@ -277,7 +282,7 @@ int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
277 irq_iommu->sub_handle = 0; 282 irq_iommu->sub_handle = 0;
278 irq_2_iommu(irq)->irte_mask = 0; 283 irq_2_iommu(irq)->irte_mask = 0;
279 284
280 spin_unlock(&irq_2_ir_lock); 285 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
281 286
282 return 0; 287 return 0;
283} 288}
@@ -289,11 +294,12 @@ int modify_irte(int irq, struct irte *irte_modified)
289 struct irte *irte; 294 struct irte *irte;
290 struct intel_iommu *iommu; 295 struct intel_iommu *iommu;
291 struct irq_2_iommu *irq_iommu; 296 struct irq_2_iommu *irq_iommu;
297 unsigned long flags;
292 298
293 spin_lock(&irq_2_ir_lock); 299 spin_lock_irqsave(&irq_2_ir_lock, flags);
294 irq_iommu = valid_irq_2_iommu(irq); 300 irq_iommu = valid_irq_2_iommu(irq);
295 if (!irq_iommu) { 301 if (!irq_iommu) {
296 spin_unlock(&irq_2_ir_lock); 302 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
297 return -1; 303 return -1;
298 } 304 }
299 305
@@ -302,11 +308,11 @@ int modify_irte(int irq, struct irte *irte_modified)
302 index = irq_iommu->irte_index + irq_iommu->sub_handle; 308 index = irq_iommu->irte_index + irq_iommu->sub_handle;
303 irte = &iommu->ir_table->base[index]; 309 irte = &iommu->ir_table->base[index];
304 310
305 set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); 311 set_64bit((unsigned long *)irte, irte_modified->low);
306 __iommu_flush_cache(iommu, irte, sizeof(*irte)); 312 __iommu_flush_cache(iommu, irte, sizeof(*irte));
307 313
308 rc = qi_flush_iec(iommu, index, 0); 314 rc = qi_flush_iec(iommu, index, 0);
309 spin_unlock(&irq_2_ir_lock); 315 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
310 316
311 return rc; 317 return rc;
312} 318}
@@ -317,11 +323,12 @@ int flush_irte(int irq)
317 int index; 323 int index;
318 struct intel_iommu *iommu; 324 struct intel_iommu *iommu;
319 struct irq_2_iommu *irq_iommu; 325 struct irq_2_iommu *irq_iommu;
326 unsigned long flags;
320 327
321 spin_lock(&irq_2_ir_lock); 328 spin_lock_irqsave(&irq_2_ir_lock, flags);
322 irq_iommu = valid_irq_2_iommu(irq); 329 irq_iommu = valid_irq_2_iommu(irq);
323 if (!irq_iommu) { 330 if (!irq_iommu) {
324 spin_unlock(&irq_2_ir_lock); 331 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
325 return -1; 332 return -1;
326 } 333 }
327 334
@@ -330,7 +337,7 @@ int flush_irte(int irq)
330 index = irq_iommu->irte_index + irq_iommu->sub_handle; 337 index = irq_iommu->irte_index + irq_iommu->sub_handle;
331 338
332 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); 339 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
333 spin_unlock(&irq_2_ir_lock); 340 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
334 341
335 return rc; 342 return rc;
336} 343}
@@ -363,11 +370,12 @@ int free_irte(int irq)
363 struct irte *irte; 370 struct irte *irte;
364 struct intel_iommu *iommu; 371 struct intel_iommu *iommu;
365 struct irq_2_iommu *irq_iommu; 372 struct irq_2_iommu *irq_iommu;
373 unsigned long flags;
366 374
367 spin_lock(&irq_2_ir_lock); 375 spin_lock_irqsave(&irq_2_ir_lock, flags);
368 irq_iommu = valid_irq_2_iommu(irq); 376 irq_iommu = valid_irq_2_iommu(irq);
369 if (!irq_iommu) { 377 if (!irq_iommu) {
370 spin_unlock(&irq_2_ir_lock); 378 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
371 return -1; 379 return -1;
372 } 380 }
373 381
@@ -378,7 +386,7 @@ int free_irte(int irq)
378 386
379 if (!irq_iommu->sub_handle) { 387 if (!irq_iommu->sub_handle) {
380 for (i = 0; i < (1 << irq_iommu->irte_mask); i++) 388 for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
381 set_64bit((unsigned long *)irte, 0); 389 set_64bit((unsigned long *)(irte + i), 0);
382 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); 390 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
383 } 391 }
384 392
@@ -387,7 +395,7 @@ int free_irte(int irq)
387 irq_iommu->sub_handle = 0; 395 irq_iommu->sub_handle = 0;
388 irq_iommu->irte_mask = 0; 396 irq_iommu->irte_mask = 0;
389 397
390 spin_unlock(&irq_2_ir_lock); 398 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
391 399
392 return rc; 400 return rc;
393} 401}
@@ -439,12 +447,12 @@ static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
439 struct page *pages; 447 struct page *pages;
440 448
441 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), 449 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
442 GFP_KERNEL); 450 GFP_ATOMIC);
443 451
444 if (!iommu->ir_table) 452 if (!iommu->ir_table)
445 return -ENOMEM; 453 return -ENOMEM;
446 454
447 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER); 455 pages = alloc_pages(GFP_ATOMIC | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
448 456
449 if (!pages) { 457 if (!pages) {
450 printk(KERN_ERR "failed to allocate pages of order %d\n", 458 printk(KERN_ERR "failed to allocate pages of order %d\n",
@@ -459,11 +467,55 @@ static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
459 return 0; 467 return 0;
460} 468}
461 469
470/*
471 * Disable Interrupt Remapping.
472 */
473static void disable_intr_remapping(struct intel_iommu *iommu)
474{
475 unsigned long flags;
476 u32 sts;
477
478 if (!ecap_ir_support(iommu->ecap))
479 return;
480
481 spin_lock_irqsave(&iommu->register_lock, flags);
482
483 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
484 if (!(sts & DMA_GSTS_IRES))
485 goto end;
486
487 iommu->gcmd &= ~DMA_GCMD_IRE;
488 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
489
490 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
491 readl, !(sts & DMA_GSTS_IRES), sts);
492
493end:
494 spin_unlock_irqrestore(&iommu->register_lock, flags);
495}
496
462int __init enable_intr_remapping(int eim) 497int __init enable_intr_remapping(int eim)
463{ 498{
464 struct dmar_drhd_unit *drhd; 499 struct dmar_drhd_unit *drhd;
465 int setup = 0; 500 int setup = 0;
466 501
502 for_each_drhd_unit(drhd) {
503 struct intel_iommu *iommu = drhd->iommu;
504
505 /*
506 * Clear previous faults.
507 */
508 dmar_fault(-1, iommu);
509
510 /*
511 * Disable intr remapping and queued invalidation, if already
512 * enabled prior to OS handover.
513 */
514 disable_intr_remapping(iommu);
515
516 dmar_disable_qi(iommu);
517 }
518
467 /* 519 /*
468 * check for the Interrupt-remapping support 520 * check for the Interrupt-remapping support
469 */ 521 */