diff options
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r-- | drivers/pci/intel-iommu.c | 270 |
1 files changed, 63 insertions, 207 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index f3f686581a90..49402c399232 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -1004,194 +1004,6 @@ static int iommu_disable_translation(struct intel_iommu *iommu) | |||
1004 | return 0; | 1004 | return 0; |
1005 | } | 1005 | } |
1006 | 1006 | ||
1007 | /* iommu interrupt handling. Most stuff are MSI-like. */ | ||
1008 | |||
1009 | static const char *fault_reason_strings[] = | ||
1010 | { | ||
1011 | "Software", | ||
1012 | "Present bit in root entry is clear", | ||
1013 | "Present bit in context entry is clear", | ||
1014 | "Invalid context entry", | ||
1015 | "Access beyond MGAW", | ||
1016 | "PTE Write access is not set", | ||
1017 | "PTE Read access is not set", | ||
1018 | "Next page table ptr is invalid", | ||
1019 | "Root table address invalid", | ||
1020 | "Context table ptr is invalid", | ||
1021 | "non-zero reserved fields in RTP", | ||
1022 | "non-zero reserved fields in CTP", | ||
1023 | "non-zero reserved fields in PTE", | ||
1024 | }; | ||
1025 | #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1) | ||
1026 | |||
1027 | const char *dmar_get_fault_reason(u8 fault_reason) | ||
1028 | { | ||
1029 | if (fault_reason > MAX_FAULT_REASON_IDX) | ||
1030 | return "Unknown"; | ||
1031 | else | ||
1032 | return fault_reason_strings[fault_reason]; | ||
1033 | } | ||
1034 | |||
1035 | void dmar_msi_unmask(unsigned int irq) | ||
1036 | { | ||
1037 | struct intel_iommu *iommu = get_irq_data(irq); | ||
1038 | unsigned long flag; | ||
1039 | |||
1040 | /* unmask it */ | ||
1041 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
1042 | writel(0, iommu->reg + DMAR_FECTL_REG); | ||
1043 | /* Read a reg to force flush the post write */ | ||
1044 | readl(iommu->reg + DMAR_FECTL_REG); | ||
1045 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
1046 | } | ||
1047 | |||
1048 | void dmar_msi_mask(unsigned int irq) | ||
1049 | { | ||
1050 | unsigned long flag; | ||
1051 | struct intel_iommu *iommu = get_irq_data(irq); | ||
1052 | |||
1053 | /* mask it */ | ||
1054 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
1055 | writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG); | ||
1056 | /* Read a reg to force flush the post write */ | ||
1057 | readl(iommu->reg + DMAR_FECTL_REG); | ||
1058 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
1059 | } | ||
1060 | |||
1061 | void dmar_msi_write(int irq, struct msi_msg *msg) | ||
1062 | { | ||
1063 | struct intel_iommu *iommu = get_irq_data(irq); | ||
1064 | unsigned long flag; | ||
1065 | |||
1066 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
1067 | writel(msg->data, iommu->reg + DMAR_FEDATA_REG); | ||
1068 | writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG); | ||
1069 | writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG); | ||
1070 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
1071 | } | ||
1072 | |||
1073 | void dmar_msi_read(int irq, struct msi_msg *msg) | ||
1074 | { | ||
1075 | struct intel_iommu *iommu = get_irq_data(irq); | ||
1076 | unsigned long flag; | ||
1077 | |||
1078 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
1079 | msg->data = readl(iommu->reg + DMAR_FEDATA_REG); | ||
1080 | msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG); | ||
1081 | msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG); | ||
1082 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
1083 | } | ||
1084 | |||
1085 | static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type, | ||
1086 | u8 fault_reason, u16 source_id, unsigned long long addr) | ||
1087 | { | ||
1088 | const char *reason; | ||
1089 | |||
1090 | reason = dmar_get_fault_reason(fault_reason); | ||
1091 | |||
1092 | printk(KERN_ERR | ||
1093 | "DMAR:[%s] Request device [%02x:%02x.%d] " | ||
1094 | "fault addr %llx \n" | ||
1095 | "DMAR:[fault reason %02d] %s\n", | ||
1096 | (type ? "DMA Read" : "DMA Write"), | ||
1097 | (source_id >> 8), PCI_SLOT(source_id & 0xFF), | ||
1098 | PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason); | ||
1099 | return 0; | ||
1100 | } | ||
1101 | |||
1102 | #define PRIMARY_FAULT_REG_LEN (16) | ||
1103 | static irqreturn_t iommu_page_fault(int irq, void *dev_id) | ||
1104 | { | ||
1105 | struct intel_iommu *iommu = dev_id; | ||
1106 | int reg, fault_index; | ||
1107 | u32 fault_status; | ||
1108 | unsigned long flag; | ||
1109 | |||
1110 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
1111 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); | ||
1112 | |||
1113 | /* TBD: ignore advanced fault log currently */ | ||
1114 | if (!(fault_status & DMA_FSTS_PPF)) | ||
1115 | goto clear_overflow; | ||
1116 | |||
1117 | fault_index = dma_fsts_fault_record_index(fault_status); | ||
1118 | reg = cap_fault_reg_offset(iommu->cap); | ||
1119 | while (1) { | ||
1120 | u8 fault_reason; | ||
1121 | u16 source_id; | ||
1122 | u64 guest_addr; | ||
1123 | int type; | ||
1124 | u32 data; | ||
1125 | |||
1126 | /* highest 32 bits */ | ||
1127 | data = readl(iommu->reg + reg + | ||
1128 | fault_index * PRIMARY_FAULT_REG_LEN + 12); | ||
1129 | if (!(data & DMA_FRCD_F)) | ||
1130 | break; | ||
1131 | |||
1132 | fault_reason = dma_frcd_fault_reason(data); | ||
1133 | type = dma_frcd_type(data); | ||
1134 | |||
1135 | data = readl(iommu->reg + reg + | ||
1136 | fault_index * PRIMARY_FAULT_REG_LEN + 8); | ||
1137 | source_id = dma_frcd_source_id(data); | ||
1138 | |||
1139 | guest_addr = dmar_readq(iommu->reg + reg + | ||
1140 | fault_index * PRIMARY_FAULT_REG_LEN); | ||
1141 | guest_addr = dma_frcd_page_addr(guest_addr); | ||
1142 | /* clear the fault */ | ||
1143 | writel(DMA_FRCD_F, iommu->reg + reg + | ||
1144 | fault_index * PRIMARY_FAULT_REG_LEN + 12); | ||
1145 | |||
1146 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
1147 | |||
1148 | iommu_page_fault_do_one(iommu, type, fault_reason, | ||
1149 | source_id, guest_addr); | ||
1150 | |||
1151 | fault_index++; | ||
1152 | if (fault_index > cap_num_fault_regs(iommu->cap)) | ||
1153 | fault_index = 0; | ||
1154 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
1155 | } | ||
1156 | clear_overflow: | ||
1157 | /* clear primary fault overflow */ | ||
1158 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); | ||
1159 | if (fault_status & DMA_FSTS_PFO) | ||
1160 | writel(DMA_FSTS_PFO, iommu->reg + DMAR_FSTS_REG); | ||
1161 | |||
1162 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
1163 | return IRQ_HANDLED; | ||
1164 | } | ||
1165 | |||
1166 | int dmar_set_interrupt(struct intel_iommu *iommu) | ||
1167 | { | ||
1168 | int irq, ret; | ||
1169 | |||
1170 | irq = create_irq(); | ||
1171 | if (!irq) { | ||
1172 | printk(KERN_ERR "IOMMU: no free vectors\n"); | ||
1173 | return -EINVAL; | ||
1174 | } | ||
1175 | |||
1176 | set_irq_data(irq, iommu); | ||
1177 | iommu->irq = irq; | ||
1178 | |||
1179 | ret = arch_setup_dmar_msi(irq); | ||
1180 | if (ret) { | ||
1181 | set_irq_data(irq, NULL); | ||
1182 | iommu->irq = 0; | ||
1183 | destroy_irq(irq); | ||
1184 | return 0; | ||
1185 | } | ||
1186 | |||
1187 | /* Force fault register is cleared */ | ||
1188 | iommu_page_fault(irq, iommu); | ||
1189 | |||
1190 | ret = request_irq(irq, iommu_page_fault, 0, iommu->name, iommu); | ||
1191 | if (ret) | ||
1192 | printk(KERN_ERR "IOMMU: can't request irq\n"); | ||
1193 | return ret; | ||
1194 | } | ||
1195 | 1007 | ||
1196 | static int iommu_init_domains(struct intel_iommu *iommu) | 1008 | static int iommu_init_domains(struct intel_iommu *iommu) |
1197 | { | 1009 | { |
@@ -1987,7 +1799,7 @@ static int __init init_dmars(void) | |||
1987 | struct dmar_rmrr_unit *rmrr; | 1799 | struct dmar_rmrr_unit *rmrr; |
1988 | struct pci_dev *pdev; | 1800 | struct pci_dev *pdev; |
1989 | struct intel_iommu *iommu; | 1801 | struct intel_iommu *iommu; |
1990 | int i, ret, unit = 0; | 1802 | int i, ret; |
1991 | 1803 | ||
1992 | /* | 1804 | /* |
1993 | * for each drhd | 1805 | * for each drhd |
@@ -2043,11 +1855,40 @@ static int __init init_dmars(void) | |||
2043 | } | 1855 | } |
2044 | } | 1856 | } |
2045 | 1857 | ||
1858 | /* | ||
1859 | * Start from the sane iommu hardware state. | ||
1860 | */ | ||
2046 | for_each_drhd_unit(drhd) { | 1861 | for_each_drhd_unit(drhd) { |
2047 | if (drhd->ignored) | 1862 | if (drhd->ignored) |
2048 | continue; | 1863 | continue; |
2049 | 1864 | ||
2050 | iommu = drhd->iommu; | 1865 | iommu = drhd->iommu; |
1866 | |||
1867 | /* | ||
1868 | * If the queued invalidation is already initialized by us | ||
1869 | * (for example, while enabling interrupt-remapping) then | ||
1870 | * we got the things already rolling from a sane state. | ||
1871 | */ | ||
1872 | if (iommu->qi) | ||
1873 | continue; | ||
1874 | |||
1875 | /* | ||
1876 | * Clear any previous faults. | ||
1877 | */ | ||
1878 | dmar_fault(-1, iommu); | ||
1879 | /* | ||
1880 | * Disable queued invalidation if supported and already enabled | ||
1881 | * before OS handover. | ||
1882 | */ | ||
1883 | dmar_disable_qi(iommu); | ||
1884 | } | ||
1885 | |||
1886 | for_each_drhd_unit(drhd) { | ||
1887 | if (drhd->ignored) | ||
1888 | continue; | ||
1889 | |||
1890 | iommu = drhd->iommu; | ||
1891 | |||
2051 | if (dmar_enable_qi(iommu)) { | 1892 | if (dmar_enable_qi(iommu)) { |
2052 | /* | 1893 | /* |
2053 | * Queued Invalidate not enabled, use Register Based | 1894 | * Queued Invalidate not enabled, use Register Based |
@@ -2109,7 +1950,6 @@ static int __init init_dmars(void) | |||
2109 | if (drhd->ignored) | 1950 | if (drhd->ignored) |
2110 | continue; | 1951 | continue; |
2111 | iommu = drhd->iommu; | 1952 | iommu = drhd->iommu; |
2112 | sprintf (iommu->name, "dmar%d", unit++); | ||
2113 | 1953 | ||
2114 | iommu_flush_write_buffer(iommu); | 1954 | iommu_flush_write_buffer(iommu); |
2115 | 1955 | ||
@@ -2284,11 +2124,13 @@ error: | |||
2284 | return 0; | 2124 | return 0; |
2285 | } | 2125 | } |
2286 | 2126 | ||
2287 | dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr, | 2127 | static dma_addr_t intel_map_page(struct device *dev, struct page *page, |
2288 | size_t size, int dir) | 2128 | unsigned long offset, size_t size, |
2129 | enum dma_data_direction dir, | ||
2130 | struct dma_attrs *attrs) | ||
2289 | { | 2131 | { |
2290 | return __intel_map_single(hwdev, paddr, size, dir, | 2132 | return __intel_map_single(dev, page_to_phys(page) + offset, size, |
2291 | to_pci_dev(hwdev)->dma_mask); | 2133 | dir, to_pci_dev(dev)->dma_mask); |
2292 | } | 2134 | } |
2293 | 2135 | ||
2294 | static void flush_unmaps(void) | 2136 | static void flush_unmaps(void) |
@@ -2352,8 +2194,9 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) | |||
2352 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); | 2194 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); |
2353 | } | 2195 | } |
2354 | 2196 | ||
2355 | void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, | 2197 | static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, |
2356 | int dir) | 2198 | size_t size, enum dma_data_direction dir, |
2199 | struct dma_attrs *attrs) | ||
2357 | { | 2200 | { |
2358 | struct pci_dev *pdev = to_pci_dev(dev); | 2201 | struct pci_dev *pdev = to_pci_dev(dev); |
2359 | struct dmar_domain *domain; | 2202 | struct dmar_domain *domain; |
@@ -2397,8 +2240,14 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, | |||
2397 | } | 2240 | } |
2398 | } | 2241 | } |
2399 | 2242 | ||
2400 | void *intel_alloc_coherent(struct device *hwdev, size_t size, | 2243 | static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, |
2401 | dma_addr_t *dma_handle, gfp_t flags) | 2244 | int dir) |
2245 | { | ||
2246 | intel_unmap_page(dev, dev_addr, size, dir, NULL); | ||
2247 | } | ||
2248 | |||
2249 | static void *intel_alloc_coherent(struct device *hwdev, size_t size, | ||
2250 | dma_addr_t *dma_handle, gfp_t flags) | ||
2402 | { | 2251 | { |
2403 | void *vaddr; | 2252 | void *vaddr; |
2404 | int order; | 2253 | int order; |
@@ -2421,8 +2270,8 @@ void *intel_alloc_coherent(struct device *hwdev, size_t size, | |||
2421 | return NULL; | 2270 | return NULL; |
2422 | } | 2271 | } |
2423 | 2272 | ||
2424 | void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, | 2273 | static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, |
2425 | dma_addr_t dma_handle) | 2274 | dma_addr_t dma_handle) |
2426 | { | 2275 | { |
2427 | int order; | 2276 | int order; |
2428 | 2277 | ||
@@ -2435,8 +2284,9 @@ void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
2435 | 2284 | ||
2436 | #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) | 2285 | #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) |
2437 | 2286 | ||
2438 | void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | 2287 | static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, |
2439 | int nelems, int dir) | 2288 | int nelems, enum dma_data_direction dir, |
2289 | struct dma_attrs *attrs) | ||
2440 | { | 2290 | { |
2441 | int i; | 2291 | int i; |
2442 | struct pci_dev *pdev = to_pci_dev(hwdev); | 2292 | struct pci_dev *pdev = to_pci_dev(hwdev); |
@@ -2493,8 +2343,8 @@ static int intel_nontranslate_map_sg(struct device *hddev, | |||
2493 | return nelems; | 2343 | return nelems; |
2494 | } | 2344 | } |
2495 | 2345 | ||
2496 | int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, | 2346 | static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, |
2497 | int dir) | 2347 | enum dma_data_direction dir, struct dma_attrs *attrs) |
2498 | { | 2348 | { |
2499 | void *addr; | 2349 | void *addr; |
2500 | int i; | 2350 | int i; |
@@ -2574,13 +2424,19 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, | |||
2574 | return nelems; | 2424 | return nelems; |
2575 | } | 2425 | } |
2576 | 2426 | ||
2577 | static struct dma_mapping_ops intel_dma_ops = { | 2427 | static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr) |
2428 | { | ||
2429 | return !dma_addr; | ||
2430 | } | ||
2431 | |||
2432 | struct dma_map_ops intel_dma_ops = { | ||
2578 | .alloc_coherent = intel_alloc_coherent, | 2433 | .alloc_coherent = intel_alloc_coherent, |
2579 | .free_coherent = intel_free_coherent, | 2434 | .free_coherent = intel_free_coherent, |
2580 | .map_single = intel_map_single, | ||
2581 | .unmap_single = intel_unmap_single, | ||
2582 | .map_sg = intel_map_sg, | 2435 | .map_sg = intel_map_sg, |
2583 | .unmap_sg = intel_unmap_sg, | 2436 | .unmap_sg = intel_unmap_sg, |
2437 | .map_page = intel_map_page, | ||
2438 | .unmap_page = intel_unmap_page, | ||
2439 | .mapping_error = intel_mapping_error, | ||
2584 | }; | 2440 | }; |
2585 | 2441 | ||
2586 | static inline int iommu_domain_cache_init(void) | 2442 | static inline int iommu_domain_cache_init(void) |