aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorJoseph Cihula <joseph.cihula@intel.com>2011-03-21 14:04:24 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2011-04-21 08:51:40 -0400
commit51a63e67da6056c13b5b597dcc9e1b3bd7ceaa55 (patch)
tree88258f313963c0b67de3ba2f1ad57d4365b86496 /drivers/pci
parent2fe9723df8e45fd247782adea244a5e653c30bf4 (diff)
intel_iommu: disable all VT-d PMRs when TXT launched
Intel VT-d Protected Memory Regions (PMRs) are supposed to be disabled, on each VT-d engine, after DMA remapping is enabled on the engines. This is because the behavior of having both enabled is not deterministic and because, if TXT has been used to launch the kernel, the PMRs may be programmed to cover memory regions that will be used for DMA. Under some circumstances (certain quirks detected, lack of multiple devices, etc.), the current code does not set up DMA remapping on some VT-d engines. In such cases it also skips disabling the PMRs. This causes failures when the kernel is launched with TXT (most often this occurs on the graphics engine and results in colored vertical bars on the display). This patch detects when the kernel has been launched with TXT and then disables the PMRs on all VT-d engines. In some cases where the reason that remapping is not being enabled is due to possible ACPI DMAR table errors, the VT-d engine addresses may not be correct and thus not able to be safely programmed even to disable PMRs. Because part of the TXT launch process is the verification of these addresses, it will always be safe to disable PMRs if the TXT launch has succeeded and hence only doing this in such cases. Signed-off-by: Joseph Cihula <joseph.cihula@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/intel-iommu.c38
1 files changed, 29 insertions, 9 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 5dc5d3e3508e..cdded1e2e660 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1299,7 +1299,7 @@ static void iommu_detach_domain(struct dmar_domain *domain,
1299static struct iova_domain reserved_iova_list; 1299static struct iova_domain reserved_iova_list;
1300static struct lock_class_key reserved_rbtree_key; 1300static struct lock_class_key reserved_rbtree_key;
1301 1301
1302static void dmar_init_reserved_ranges(void) 1302static int dmar_init_reserved_ranges(void)
1303{ 1303{
1304 struct pci_dev *pdev = NULL; 1304 struct pci_dev *pdev = NULL;
1305 struct iova *iova; 1305 struct iova *iova;
@@ -1313,8 +1313,10 @@ static void dmar_init_reserved_ranges(void)
1313 /* IOAPIC ranges shouldn't be accessed by DMA */ 1313 /* IOAPIC ranges shouldn't be accessed by DMA */
1314 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START), 1314 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1315 IOVA_PFN(IOAPIC_RANGE_END)); 1315 IOVA_PFN(IOAPIC_RANGE_END));
1316 if (!iova) 1316 if (!iova) {
1317 printk(KERN_ERR "Reserve IOAPIC range failed\n"); 1317 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1318 return -ENODEV;
1319 }
1318 1320
1319 /* Reserve all PCI MMIO to avoid peer-to-peer access */ 1321 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1320 for_each_pci_dev(pdev) { 1322 for_each_pci_dev(pdev) {
@@ -1327,11 +1329,13 @@ static void dmar_init_reserved_ranges(void)
1327 iova = reserve_iova(&reserved_iova_list, 1329 iova = reserve_iova(&reserved_iova_list,
1328 IOVA_PFN(r->start), 1330 IOVA_PFN(r->start),
1329 IOVA_PFN(r->end)); 1331 IOVA_PFN(r->end));
1330 if (!iova) 1332 if (!iova) {
1331 printk(KERN_ERR "Reserve iova failed\n"); 1333 printk(KERN_ERR "Reserve iova failed\n");
1334 return -ENODEV;
1335 }
1332 } 1336 }
1333 } 1337 }
1334 1338 return 0;
1335} 1339}
1336 1340
1337static void domain_reserve_special_ranges(struct dmar_domain *domain) 1341static void domain_reserve_special_ranges(struct dmar_domain *domain)
@@ -2213,7 +2217,7 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
2213 return 0; 2217 return 0;
2214} 2218}
2215 2219
2216int __init init_dmars(void) 2220static int __init init_dmars(int force_on)
2217{ 2221{
2218 struct dmar_drhd_unit *drhd; 2222 struct dmar_drhd_unit *drhd;
2219 struct dmar_rmrr_unit *rmrr; 2223 struct dmar_rmrr_unit *rmrr;
@@ -2393,8 +2397,15 @@ int __init init_dmars(void)
2393 * enable translation 2397 * enable translation
2394 */ 2398 */
2395 for_each_drhd_unit(drhd) { 2399 for_each_drhd_unit(drhd) {
2396 if (drhd->ignored) 2400 if (drhd->ignored) {
2401 /*
2402 * we always have to disable PMRs or DMA may fail on
2403 * this device
2404 */
2405 if (force_on)
2406 iommu_disable_protect_mem_regions(drhd->iommu);
2397 continue; 2407 continue;
2408 }
2398 iommu = drhd->iommu; 2409 iommu = drhd->iommu;
2399 2410
2400 iommu_flush_write_buffer(iommu); 2411 iommu_flush_write_buffer(iommu);
@@ -3303,12 +3314,21 @@ int __init intel_iommu_init(void)
3303 if (no_iommu || dmar_disabled) 3314 if (no_iommu || dmar_disabled)
3304 return -ENODEV; 3315 return -ENODEV;
3305 3316
3306 iommu_init_mempool(); 3317 if (iommu_init_mempool()) {
3307 dmar_init_reserved_ranges(); 3318 if (force_on)
3319 panic("tboot: Failed to initialize iommu memory\n");
3320 return -ENODEV;
3321 }
3322
3323 if (dmar_init_reserved_ranges()) {
3324 if (force_on)
3325 panic("tboot: Failed to reserve iommu ranges\n");
3326 return -ENODEV;
3327 }
3308 3328
3309 init_no_remapping_devices(); 3329 init_no_remapping_devices();
3310 3330
3311 ret = init_dmars(); 3331 ret = init_dmars(force_on);
3312 if (ret) { 3332 if (ret) {
3313 if (force_on) 3333 if (force_on)
3314 panic("tboot: Failed to initialize DMARs\n"); 3334 panic("tboot: Failed to initialize DMARs\n");