aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorYouquan Song <youquan.song@intel.com>2008-10-16 19:31:56 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2008-10-17 03:05:01 -0400
commita77b67d4023770805141014b8fa9eb5467457817 (patch)
tree661dda1d33b8892f1e1fa2508565a288712592b9 /drivers/pci/intel-iommu.c
parent3481f21097cb560392c411377893b5109fbde557 (diff)
dmar: Use queued invalidation interface for IOTLB and context invalidation
If queued invalidation interface is available and enabled, queued invalidation interface will be used instead of the register based interface. According to Vt-d2 specification, when queued invalidation is enabled, invalidation command submit works only through invalidation queue and not through the command registers interface. Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c95
1 files changed, 45 insertions, 50 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index fc5f2dbf5323..509470419130 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -567,27 +567,6 @@ static int __iommu_flush_context(struct intel_iommu *iommu,
567 return 0; 567 return 0;
568} 568}
569 569
570static int inline iommu_flush_context_global(struct intel_iommu *iommu,
571 int non_present_entry_flush)
572{
573 return __iommu_flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
574 non_present_entry_flush);
575}
576
577static int inline iommu_flush_context_domain(struct intel_iommu *iommu, u16 did,
578 int non_present_entry_flush)
579{
580 return __iommu_flush_context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL,
581 non_present_entry_flush);
582}
583
584static int inline iommu_flush_context_device(struct intel_iommu *iommu,
585 u16 did, u16 source_id, u8 function_mask, int non_present_entry_flush)
586{
587 return __iommu_flush_context(iommu, did, source_id, function_mask,
588 DMA_CCMD_DEVICE_INVL, non_present_entry_flush);
589}
590
591/* return value determine if we need a write buffer flush */ 570/* return value determine if we need a write buffer flush */
592static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, 571static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
593 u64 addr, unsigned int size_order, u64 type, 572 u64 addr, unsigned int size_order, u64 type,
@@ -660,20 +639,6 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
660 return 0; 639 return 0;
661} 640}
662 641
663static int inline iommu_flush_iotlb_global(struct intel_iommu *iommu,
664 int non_present_entry_flush)
665{
666 return __iommu_flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
667 non_present_entry_flush);
668}
669
670static int inline iommu_flush_iotlb_dsi(struct intel_iommu *iommu, u16 did,
671 int non_present_entry_flush)
672{
673 return __iommu_flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH,
674 non_present_entry_flush);
675}
676
677static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, 642static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
678 u64 addr, unsigned int pages, int non_present_entry_flush) 643 u64 addr, unsigned int pages, int non_present_entry_flush)
679{ 644{
@@ -684,8 +649,9 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
684 649
685 /* Fallback to domain selective flush if no PSI support */ 650 /* Fallback to domain selective flush if no PSI support */
686 if (!cap_pgsel_inv(iommu->cap)) 651 if (!cap_pgsel_inv(iommu->cap))
687 return iommu_flush_iotlb_dsi(iommu, did, 652 return iommu->flush.flush_iotlb(iommu, did, 0, 0,
688 non_present_entry_flush); 653 DMA_TLB_DSI_FLUSH,
654 non_present_entry_flush);
689 655
690 /* 656 /*
691 * PSI requires page size to be 2 ^ x, and the base address is naturally 657 * PSI requires page size to be 2 ^ x, and the base address is naturally
@@ -694,11 +660,12 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
694 mask = ilog2(__roundup_pow_of_two(pages)); 660 mask = ilog2(__roundup_pow_of_two(pages));
695 /* Fallback to domain selective flush if size is too big */ 661 /* Fallback to domain selective flush if size is too big */
696 if (mask > cap_max_amask_val(iommu->cap)) 662 if (mask > cap_max_amask_val(iommu->cap))
697 return iommu_flush_iotlb_dsi(iommu, did, 663 return iommu->flush.flush_iotlb(iommu, did, 0, 0,
698 non_present_entry_flush); 664 DMA_TLB_DSI_FLUSH, non_present_entry_flush);
699 665
700 return __iommu_flush_iotlb(iommu, did, addr, mask, 666 return iommu->flush.flush_iotlb(iommu, did, addr, mask,
701 DMA_TLB_PSI_FLUSH, non_present_entry_flush); 667 DMA_TLB_PSI_FLUSH,
668 non_present_entry_flush);
702} 669}
703 670
704static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) 671static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -1204,11 +1171,13 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1204 __iommu_flush_cache(iommu, context, sizeof(*context)); 1171 __iommu_flush_cache(iommu, context, sizeof(*context));
1205 1172
1206 /* it's a non-present to present mapping */ 1173 /* it's a non-present to present mapping */
1207 if (iommu_flush_context_device(iommu, domain->id, 1174 if (iommu->flush.flush_context(iommu, domain->id,
1208 (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, 1)) 1175 (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT,
1176 DMA_CCMD_DEVICE_INVL, 1))
1209 iommu_flush_write_buffer(iommu); 1177 iommu_flush_write_buffer(iommu);
1210 else 1178 else
1211 iommu_flush_iotlb_dsi(iommu, 0, 0); 1179 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
1180
1212 spin_unlock_irqrestore(&iommu->lock, flags); 1181 spin_unlock_irqrestore(&iommu->lock, flags);
1213 return 0; 1182 return 0;
1214} 1183}
@@ -1310,8 +1279,10 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1310static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn) 1279static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
1311{ 1280{
1312 clear_context_table(domain->iommu, bus, devfn); 1281 clear_context_table(domain->iommu, bus, devfn);
1313 iommu_flush_context_global(domain->iommu, 0); 1282 domain->iommu->flush.flush_context(domain->iommu, 0, 0, 0,
1314 iommu_flush_iotlb_global(domain->iommu, 0); 1283 DMA_CCMD_GLOBAL_INVL, 0);
1284 domain->iommu->flush.flush_iotlb(domain->iommu, 0, 0, 0,
1285 DMA_TLB_GLOBAL_FLUSH, 0);
1315} 1286}
1316 1287
1317static void domain_remove_dev_info(struct dmar_domain *domain) 1288static void domain_remove_dev_info(struct dmar_domain *domain)
@@ -1662,6 +1633,28 @@ int __init init_dmars(void)
1662 } 1633 }
1663 } 1634 }
1664 1635
1636 for_each_drhd_unit(drhd) {
1637 if (drhd->ignored)
1638 continue;
1639
1640 iommu = drhd->iommu;
1641 if (dmar_enable_qi(iommu)) {
1642 /*
1643 * Queued Invalidate not enabled, use Register Based
1644 * Invalidate
1645 */
1646 iommu->flush.flush_context = __iommu_flush_context;
1647 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
1648 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
1649 "invalidation\n", drhd->reg_base_addr);
1650 } else {
1651 iommu->flush.flush_context = qi_flush_context;
1652 iommu->flush.flush_iotlb = qi_flush_iotlb;
1653 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
1654 "invalidation\n", drhd->reg_base_addr);
1655 }
1656 }
1657
1665 /* 1658 /*
1666 * For each rmrr 1659 * For each rmrr
1667 * for each dev attached to rmrr 1660 * for each dev attached to rmrr
@@ -1714,9 +1707,10 @@ int __init init_dmars(void)
1714 1707
1715 iommu_set_root_entry(iommu); 1708 iommu_set_root_entry(iommu);
1716 1709
1717 iommu_flush_context_global(iommu, 0); 1710 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
1718 iommu_flush_iotlb_global(iommu, 0); 1711 0);
1719 1712 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
1713 0);
1720 iommu_disable_protect_mem_regions(iommu); 1714 iommu_disable_protect_mem_regions(iommu);
1721 1715
1722 ret = iommu_enable_translation(iommu); 1716 ret = iommu_enable_translation(iommu);
@@ -1891,7 +1885,8 @@ static void flush_unmaps(void)
1891 struct intel_iommu *iommu = 1885 struct intel_iommu *iommu =
1892 deferred_flush[i].domain[0]->iommu; 1886 deferred_flush[i].domain[0]->iommu;
1893 1887
1894 iommu_flush_iotlb_global(iommu, 0); 1888 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
1889 DMA_TLB_GLOBAL_FLUSH, 0);
1895 for (j = 0; j < deferred_flush[i].next; j++) { 1890 for (j = 0; j < deferred_flush[i].next; j++) {
1896 __free_iova(&deferred_flush[i].domain[j]->iovad, 1891 __free_iova(&deferred_flush[i].domain[j]->iovad,
1897 deferred_flush[i].iova[j]); 1892 deferred_flush[i].iova[j]);