aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2009-03-16 20:04:54 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2009-03-17 18:37:06 -0400
commit0ac2491f57af5644f88383d28809760902d6f4d7 (patch)
tree0dcf5875ef83a5bd14cbe37f8b4671a4601cc797 /drivers/pci
parent4c5502b1c5744b2090414e1b80ca6388d5c46e06 (diff)
x86, dmar: move page fault handling code to dmar.c
Impact: code movement Move page fault handling code to dmar.c This will be shared both by DMA-remapping and Intr-remapping code. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/dmar.c191
-rw-r--r--drivers/pci/intel-iommu.c188
2 files changed, 191 insertions, 188 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 5f333403c2ea..75d34bf2db50 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -31,6 +31,8 @@
31#include <linux/iova.h> 31#include <linux/iova.h>
32#include <linux/intel-iommu.h> 32#include <linux/intel-iommu.h>
33#include <linux/timer.h> 33#include <linux/timer.h>
34#include <linux/irq.h>
35#include <linux/interrupt.h>
34 36
35#undef PREFIX 37#undef PREFIX
36#define PREFIX "DMAR:" 38#define PREFIX "DMAR:"
@@ -812,3 +814,192 @@ int dmar_enable_qi(struct intel_iommu *iommu)
812 814
813 return 0; 815 return 0;
814} 816}
817
818/* iommu interrupt handling. Most stuff are MSI-like. */
819
820static const char *fault_reason_strings[] =
821{
822 "Software",
823 "Present bit in root entry is clear",
824 "Present bit in context entry is clear",
825 "Invalid context entry",
826 "Access beyond MGAW",
827 "PTE Write access is not set",
828 "PTE Read access is not set",
829 "Next page table ptr is invalid",
830 "Root table address invalid",
831 "Context table ptr is invalid",
832 "non-zero reserved fields in RTP",
833 "non-zero reserved fields in CTP",
834 "non-zero reserved fields in PTE",
835};
836#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
837
838const char *dmar_get_fault_reason(u8 fault_reason)
839{
840 if (fault_reason > MAX_FAULT_REASON_IDX)
841 return "Unknown";
842 else
843 return fault_reason_strings[fault_reason];
844}
845
846void dmar_msi_unmask(unsigned int irq)
847{
848 struct intel_iommu *iommu = get_irq_data(irq);
849 unsigned long flag;
850
851 /* unmask it */
852 spin_lock_irqsave(&iommu->register_lock, flag);
853 writel(0, iommu->reg + DMAR_FECTL_REG);
854 /* Read a reg to force flush the post write */
855 readl(iommu->reg + DMAR_FECTL_REG);
856 spin_unlock_irqrestore(&iommu->register_lock, flag);
857}
858
859void dmar_msi_mask(unsigned int irq)
860{
861 unsigned long flag;
862 struct intel_iommu *iommu = get_irq_data(irq);
863
864 /* mask it */
865 spin_lock_irqsave(&iommu->register_lock, flag);
866 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
867 /* Read a reg to force flush the post write */
868 readl(iommu->reg + DMAR_FECTL_REG);
869 spin_unlock_irqrestore(&iommu->register_lock, flag);
870}
871
872void dmar_msi_write(int irq, struct msi_msg *msg)
873{
874 struct intel_iommu *iommu = get_irq_data(irq);
875 unsigned long flag;
876
877 spin_lock_irqsave(&iommu->register_lock, flag);
878 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
879 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
880 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
881 spin_unlock_irqrestore(&iommu->register_lock, flag);
882}
883
884void dmar_msi_read(int irq, struct msi_msg *msg)
885{
886 struct intel_iommu *iommu = get_irq_data(irq);
887 unsigned long flag;
888
889 spin_lock_irqsave(&iommu->register_lock, flag);
890 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
891 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
892 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
893 spin_unlock_irqrestore(&iommu->register_lock, flag);
894}
895
896static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
897 u8 fault_reason, u16 source_id, unsigned long long addr)
898{
899 const char *reason;
900
901 reason = dmar_get_fault_reason(fault_reason);
902
903 printk(KERN_ERR
904 "DMAR:[%s] Request device [%02x:%02x.%d] "
905 "fault addr %llx \n"
906 "DMAR:[fault reason %02d] %s\n",
907 (type ? "DMA Read" : "DMA Write"),
908 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
909 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
910 return 0;
911}
912
913#define PRIMARY_FAULT_REG_LEN (16)
914static irqreturn_t dmar_fault(int irq, void *dev_id)
915{
916 struct intel_iommu *iommu = dev_id;
917 int reg, fault_index;
918 u32 fault_status;
919 unsigned long flag;
920
921 spin_lock_irqsave(&iommu->register_lock, flag);
922 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
923
924 /* TBD: ignore advanced fault log currently */
925 if (!(fault_status & DMA_FSTS_PPF))
926 goto clear_overflow;
927
928 fault_index = dma_fsts_fault_record_index(fault_status);
929 reg = cap_fault_reg_offset(iommu->cap);
930 while (1) {
931 u8 fault_reason;
932 u16 source_id;
933 u64 guest_addr;
934 int type;
935 u32 data;
936
937 /* highest 32 bits */
938 data = readl(iommu->reg + reg +
939 fault_index * PRIMARY_FAULT_REG_LEN + 12);
940 if (!(data & DMA_FRCD_F))
941 break;
942
943 fault_reason = dma_frcd_fault_reason(data);
944 type = dma_frcd_type(data);
945
946 data = readl(iommu->reg + reg +
947 fault_index * PRIMARY_FAULT_REG_LEN + 8);
948 source_id = dma_frcd_source_id(data);
949
950 guest_addr = dmar_readq(iommu->reg + reg +
951 fault_index * PRIMARY_FAULT_REG_LEN);
952 guest_addr = dma_frcd_page_addr(guest_addr);
953 /* clear the fault */
954 writel(DMA_FRCD_F, iommu->reg + reg +
955 fault_index * PRIMARY_FAULT_REG_LEN + 12);
956
957 spin_unlock_irqrestore(&iommu->register_lock, flag);
958
959 dmar_fault_do_one(iommu, type, fault_reason,
960 source_id, guest_addr);
961
962 fault_index++;
963 if (fault_index > cap_num_fault_regs(iommu->cap))
964 fault_index = 0;
965 spin_lock_irqsave(&iommu->register_lock, flag);
966 }
967clear_overflow:
968 /* clear primary fault overflow */
969 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
970 if (fault_status & DMA_FSTS_PFO)
971 writel(DMA_FSTS_PFO, iommu->reg + DMAR_FSTS_REG);
972
973 spin_unlock_irqrestore(&iommu->register_lock, flag);
974 return IRQ_HANDLED;
975}
976
977int dmar_set_interrupt(struct intel_iommu *iommu)
978{
979 int irq, ret;
980
981 irq = create_irq();
982 if (!irq) {
983 printk(KERN_ERR "IOMMU: no free vectors\n");
984 return -EINVAL;
985 }
986
987 set_irq_data(irq, iommu);
988 iommu->irq = irq;
989
990 ret = arch_setup_dmar_msi(irq);
991 if (ret) {
992 set_irq_data(irq, NULL);
993 iommu->irq = 0;
994 destroy_irq(irq);
995 return 0;
996 }
997
998 /* Force fault register is cleared */
999 dmar_fault(irq, iommu);
1000
1001 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1002 if (ret)
1003 printk(KERN_ERR "IOMMU: can't request irq\n");
1004 return ret;
1005}
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index f3f686581a90..4a4ab651b709 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1004,194 +1004,6 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
1004 return 0; 1004 return 0;
1005} 1005}
1006 1006
1007/* iommu interrupt handling. Most stuff are MSI-like. */
1008
1009static const char *fault_reason_strings[] =
1010{
1011 "Software",
1012 "Present bit in root entry is clear",
1013 "Present bit in context entry is clear",
1014 "Invalid context entry",
1015 "Access beyond MGAW",
1016 "PTE Write access is not set",
1017 "PTE Read access is not set",
1018 "Next page table ptr is invalid",
1019 "Root table address invalid",
1020 "Context table ptr is invalid",
1021 "non-zero reserved fields in RTP",
1022 "non-zero reserved fields in CTP",
1023 "non-zero reserved fields in PTE",
1024};
1025#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1026
1027const char *dmar_get_fault_reason(u8 fault_reason)
1028{
1029 if (fault_reason > MAX_FAULT_REASON_IDX)
1030 return "Unknown";
1031 else
1032 return fault_reason_strings[fault_reason];
1033}
1034
1035void dmar_msi_unmask(unsigned int irq)
1036{
1037 struct intel_iommu *iommu = get_irq_data(irq);
1038 unsigned long flag;
1039
1040 /* unmask it */
1041 spin_lock_irqsave(&iommu->register_lock, flag);
1042 writel(0, iommu->reg + DMAR_FECTL_REG);
1043 /* Read a reg to force flush the post write */
1044 readl(iommu->reg + DMAR_FECTL_REG);
1045 spin_unlock_irqrestore(&iommu->register_lock, flag);
1046}
1047
1048void dmar_msi_mask(unsigned int irq)
1049{
1050 unsigned long flag;
1051 struct intel_iommu *iommu = get_irq_data(irq);
1052
1053 /* mask it */
1054 spin_lock_irqsave(&iommu->register_lock, flag);
1055 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1056 /* Read a reg to force flush the post write */
1057 readl(iommu->reg + DMAR_FECTL_REG);
1058 spin_unlock_irqrestore(&iommu->register_lock, flag);
1059}
1060
1061void dmar_msi_write(int irq, struct msi_msg *msg)
1062{
1063 struct intel_iommu *iommu = get_irq_data(irq);
1064 unsigned long flag;
1065
1066 spin_lock_irqsave(&iommu->register_lock, flag);
1067 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1068 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1069 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1070 spin_unlock_irqrestore(&iommu->register_lock, flag);
1071}
1072
1073void dmar_msi_read(int irq, struct msi_msg *msg)
1074{
1075 struct intel_iommu *iommu = get_irq_data(irq);
1076 unsigned long flag;
1077
1078 spin_lock_irqsave(&iommu->register_lock, flag);
1079 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1080 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1081 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1082 spin_unlock_irqrestore(&iommu->register_lock, flag);
1083}
1084
1085static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type,
1086 u8 fault_reason, u16 source_id, unsigned long long addr)
1087{
1088 const char *reason;
1089
1090 reason = dmar_get_fault_reason(fault_reason);
1091
1092 printk(KERN_ERR
1093 "DMAR:[%s] Request device [%02x:%02x.%d] "
1094 "fault addr %llx \n"
1095 "DMAR:[fault reason %02d] %s\n",
1096 (type ? "DMA Read" : "DMA Write"),
1097 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1098 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1099 return 0;
1100}
1101
1102#define PRIMARY_FAULT_REG_LEN (16)
1103static irqreturn_t iommu_page_fault(int irq, void *dev_id)
1104{
1105 struct intel_iommu *iommu = dev_id;
1106 int reg, fault_index;
1107 u32 fault_status;
1108 unsigned long flag;
1109
1110 spin_lock_irqsave(&iommu->register_lock, flag);
1111 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1112
1113 /* TBD: ignore advanced fault log currently */
1114 if (!(fault_status & DMA_FSTS_PPF))
1115 goto clear_overflow;
1116
1117 fault_index = dma_fsts_fault_record_index(fault_status);
1118 reg = cap_fault_reg_offset(iommu->cap);
1119 while (1) {
1120 u8 fault_reason;
1121 u16 source_id;
1122 u64 guest_addr;
1123 int type;
1124 u32 data;
1125
1126 /* highest 32 bits */
1127 data = readl(iommu->reg + reg +
1128 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1129 if (!(data & DMA_FRCD_F))
1130 break;
1131
1132 fault_reason = dma_frcd_fault_reason(data);
1133 type = dma_frcd_type(data);
1134
1135 data = readl(iommu->reg + reg +
1136 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1137 source_id = dma_frcd_source_id(data);
1138
1139 guest_addr = dmar_readq(iommu->reg + reg +
1140 fault_index * PRIMARY_FAULT_REG_LEN);
1141 guest_addr = dma_frcd_page_addr(guest_addr);
1142 /* clear the fault */
1143 writel(DMA_FRCD_F, iommu->reg + reg +
1144 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1145
1146 spin_unlock_irqrestore(&iommu->register_lock, flag);
1147
1148 iommu_page_fault_do_one(iommu, type, fault_reason,
1149 source_id, guest_addr);
1150
1151 fault_index++;
1152 if (fault_index > cap_num_fault_regs(iommu->cap))
1153 fault_index = 0;
1154 spin_lock_irqsave(&iommu->register_lock, flag);
1155 }
1156clear_overflow:
1157 /* clear primary fault overflow */
1158 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1159 if (fault_status & DMA_FSTS_PFO)
1160 writel(DMA_FSTS_PFO, iommu->reg + DMAR_FSTS_REG);
1161
1162 spin_unlock_irqrestore(&iommu->register_lock, flag);
1163 return IRQ_HANDLED;
1164}
1165
1166int dmar_set_interrupt(struct intel_iommu *iommu)
1167{
1168 int irq, ret;
1169
1170 irq = create_irq();
1171 if (!irq) {
1172 printk(KERN_ERR "IOMMU: no free vectors\n");
1173 return -EINVAL;
1174 }
1175
1176 set_irq_data(irq, iommu);
1177 iommu->irq = irq;
1178
1179 ret = arch_setup_dmar_msi(irq);
1180 if (ret) {
1181 set_irq_data(irq, NULL);
1182 iommu->irq = 0;
1183 destroy_irq(irq);
1184 return 0;
1185 }
1186
1187 /* Force fault register is cleared */
1188 iommu_page_fault(irq, iommu);
1189
1190 ret = request_irq(irq, iommu_page_fault, 0, iommu->name, iommu);
1191 if (ret)
1192 printk(KERN_ERR "IOMMU: can't request irq\n");
1193 return ret;
1194}
1195 1007
1196static int iommu_init_domains(struct intel_iommu *iommu) 1008static int iommu_init_domains(struct intel_iommu *iommu)
1197{ 1009{