aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2011-04-07 02:16:10 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2011-04-07 05:04:32 -0400
commit7d0c5cc5be73f7ce26fdcca7b8ec2203f661eb93 (patch)
treefbe4ff16580783316f3b2435130865f45387a4c3
parentd8c13085775c72e2d46edc54ed0c803c3a944ddb (diff)
x86/amd-iommu: Flush all internal TLBs when IOMMUs are enabled
The old code only flushed a DTE or a domain TLB before it is actually used by the IOMMU driver. While this is efficient and works when done right it is more likely to introduce new bugs when changing code (which happened in the past). This patch adds code to flush all DTEs and all domain TLBs in each IOMMU right after it is enabled (at boot and after resume). This reduces the complexity of the driver and makes it less likely to introduce stale-TLB bugs in the future. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
-rw-r--r--arch/x86/include/asm/amd_iommu_proto.h2
-rw-r--r--arch/x86/kernel/amd_iommu.c75
-rw-r--r--arch/x86/kernel/amd_iommu_init.c11
3 files changed, 43 insertions, 45 deletions
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h
index 916bc8111a0..1223c0fe03f 100644
--- a/arch/x86/include/asm/amd_iommu_proto.h
+++ b/arch/x86/include/asm/amd_iommu_proto.h
@@ -24,8 +24,6 @@ struct amd_iommu;
24extern int amd_iommu_init_dma_ops(void); 24extern int amd_iommu_init_dma_ops(void);
25extern int amd_iommu_init_passthrough(void); 25extern int amd_iommu_init_passthrough(void);
26extern irqreturn_t amd_iommu_int_handler(int irq, void *data); 26extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
27extern void amd_iommu_flush_all_domains(void);
28extern void amd_iommu_flush_all_devices(void);
29extern void amd_iommu_apply_erratum_63(u16 devid); 27extern void amd_iommu_apply_erratum_63(u16 devid);
30extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); 28extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
31extern int amd_iommu_init_devices(void); 29extern int amd_iommu_init_devices(void);
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 3557f223f40..bcf58ea55cf 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -539,6 +539,40 @@ static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
539 return iommu_queue_command(iommu, &cmd); 539 return iommu_queue_command(iommu, &cmd);
540} 540}
541 541
542static void iommu_flush_dte_all(struct amd_iommu *iommu)
543{
544 u32 devid;
545
546 for (devid = 0; devid <= 0xffff; ++devid)
547 iommu_flush_dte(iommu, devid);
548
549 iommu_completion_wait(iommu);
550}
551
552/*
553 * This function uses heavy locking and may disable irqs for some time. But
554 * this is no issue because it is only called during resume.
555 */
556static void iommu_flush_tlb_all(struct amd_iommu *iommu)
557{
558 u32 dom_id;
559
560 for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
561 struct iommu_cmd cmd;
562 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
563 dom_id, 1);
564 iommu_queue_command(iommu, &cmd);
565 }
566
567 iommu_completion_wait(iommu);
568}
569
570void iommu_flush_all_caches(struct amd_iommu *iommu)
571{
572 iommu_flush_dte_all(iommu);
573 iommu_flush_tlb_all(iommu);
574}
575
542/* 576/*
543 * Command send function for invalidating a device table entry 577 * Command send function for invalidating a device table entry
544 */ 578 */
@@ -631,47 +665,6 @@ static void domain_flush_devices(struct protection_domain *domain)
631 spin_unlock_irqrestore(&domain->lock, flags); 665 spin_unlock_irqrestore(&domain->lock, flags);
632} 666}
633 667
634static void iommu_flush_all_domain_devices(void)
635{
636 struct protection_domain *domain;
637 unsigned long flags;
638
639 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
640
641 list_for_each_entry(domain, &amd_iommu_pd_list, list) {
642 domain_flush_devices(domain);
643 domain_flush_complete(domain);
644 }
645
646 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
647}
648
649void amd_iommu_flush_all_devices(void)
650{
651 iommu_flush_all_domain_devices();
652}
653
654/*
655 * This function uses heavy locking and may disable irqs for some time. But
656 * this is no issue because it is only called during resume.
657 */
658void amd_iommu_flush_all_domains(void)
659{
660 struct protection_domain *domain;
661 unsigned long flags;
662
663 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
664
665 list_for_each_entry(domain, &amd_iommu_pd_list, list) {
666 spin_lock(&domain->lock);
667 domain_flush_tlb_pde(domain);
668 domain_flush_complete(domain);
669 spin_unlock(&domain->lock);
670 }
671
672 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
673}
674
675/**************************************************************************** 668/****************************************************************************
676 * 669 *
677 * The functions below are used the create the page table mappings for 670 * The functions below are used the create the page table mappings for
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 246d727b65b..8848dda808e 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -180,6 +180,12 @@ static u32 dev_table_size; /* size of the device table */
180static u32 alias_table_size; /* size of the alias table */ 180static u32 alias_table_size; /* size of the alias table */
181static u32 rlookup_table_size; /* size if the rlookup table */ 181static u32 rlookup_table_size; /* size if the rlookup table */
182 182
183/*
184 * This function flushes all internal caches of
185 * the IOMMU used by this driver.
186 */
187extern void iommu_flush_all_caches(struct amd_iommu *iommu);
188
183static inline void update_last_devid(u16 devid) 189static inline void update_last_devid(u16 devid)
184{ 190{
185 if (devid > amd_iommu_last_bdf) 191 if (devid > amd_iommu_last_bdf)
@@ -1244,6 +1250,7 @@ static void enable_iommus(void)
1244 iommu_set_exclusion_range(iommu); 1250 iommu_set_exclusion_range(iommu);
1245 iommu_init_msi(iommu); 1251 iommu_init_msi(iommu);
1246 iommu_enable(iommu); 1252 iommu_enable(iommu);
1253 iommu_flush_all_caches(iommu);
1247 } 1254 }
1248} 1255}
1249 1256
@@ -1274,8 +1281,8 @@ static void amd_iommu_resume(void)
1274 * we have to flush after the IOMMUs are enabled because a 1281 * we have to flush after the IOMMUs are enabled because a
1275 * disabled IOMMU will never execute the commands we send 1282 * disabled IOMMU will never execute the commands we send
1276 */ 1283 */
1277 amd_iommu_flush_all_devices(); 1284 for_each_iommu(iommu)
1278 amd_iommu_flush_all_domains(); 1285 iommu_flush_all_caches(iommu);
1279} 1286}
1280 1287
1281static int amd_iommu_suspend(void) 1288static int amd_iommu_suspend(void)