aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2011-04-07 02:16:10 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2011-04-07 05:04:32 -0400
commit7d0c5cc5be73f7ce26fdcca7b8ec2203f661eb93 (patch)
treefbe4ff16580783316f3b2435130865f45387a4c3 /arch/x86/kernel/amd_iommu.c
parentd8c13085775c72e2d46edc54ed0c803c3a944ddb (diff)
x86/amd-iommu: Flush all internal TLBs when IOMMUs are enabled
The old code only flushed a DTE or a domain TLB before it is actually used by the IOMMU driver. While this is efficient and works when done right it is more likely to introduce new bugs when changing code (which happened in the past). This patch adds code to flush all DTEs and all domain TLBs in each IOMMU right after it is enabled (at boot and after resume). This reduces the complexity of the driver and makes it less likely to introduce stale-TLB bugs in the future. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c75
1 files changed, 34 insertions, 41 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 3557f223f40..bcf58ea55cf 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -539,6 +539,40 @@ static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
539 return iommu_queue_command(iommu, &cmd); 539 return iommu_queue_command(iommu, &cmd);
540} 540}
541 541
542static void iommu_flush_dte_all(struct amd_iommu *iommu)
543{
544 u32 devid;
545
546 for (devid = 0; devid <= 0xffff; ++devid)
547 iommu_flush_dte(iommu, devid);
548
549 iommu_completion_wait(iommu);
550}
551
552/*
553 * This function uses heavy locking and may disable irqs for some time. But
554 * this is no issue because it is only called during resume.
555 */
556static void iommu_flush_tlb_all(struct amd_iommu *iommu)
557{
558 u32 dom_id;
559
560 for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
561 struct iommu_cmd cmd;
562 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
563 dom_id, 1);
564 iommu_queue_command(iommu, &cmd);
565 }
566
567 iommu_completion_wait(iommu);
568}
569
570void iommu_flush_all_caches(struct amd_iommu *iommu)
571{
572 iommu_flush_dte_all(iommu);
573 iommu_flush_tlb_all(iommu);
574}
575
542/* 576/*
543 * Command send function for invalidating a device table entry 577 * Command send function for invalidating a device table entry
544 */ 578 */
@@ -631,47 +665,6 @@ static void domain_flush_devices(struct protection_domain *domain)
631 spin_unlock_irqrestore(&domain->lock, flags); 665 spin_unlock_irqrestore(&domain->lock, flags);
632} 666}
633 667
634static void iommu_flush_all_domain_devices(void)
635{
636 struct protection_domain *domain;
637 unsigned long flags;
638
639 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
640
641 list_for_each_entry(domain, &amd_iommu_pd_list, list) {
642 domain_flush_devices(domain);
643 domain_flush_complete(domain);
644 }
645
646 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
647}
648
649void amd_iommu_flush_all_devices(void)
650{
651 iommu_flush_all_domain_devices();
652}
653
654/*
655 * This function uses heavy locking and may disable irqs for some time. But
656 * this is no issue because it is only called during resume.
657 */
658void amd_iommu_flush_all_domains(void)
659{
660 struct protection_domain *domain;
661 unsigned long flags;
662
663 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
664
665 list_for_each_entry(domain, &amd_iommu_pd_list, list) {
666 spin_lock(&domain->lock);
667 domain_flush_tlb_pde(domain);
668 domain_flush_complete(domain);
669 spin_unlock(&domain->lock);
670 }
671
672 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
673}
674
675/**************************************************************************** 668/****************************************************************************
676 * 669 *
677 * The functions below are used the create the page table mappings for 670 * The functions below are used the create the page table mappings for