diff options
-rw-r--r-- | arch/x86/include/asm/amd_iommu_proto.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 75 | ||||
-rw-r--r-- | arch/x86/kernel/amd_iommu_init.c | 11 |
3 files changed, 43 insertions, 45 deletions
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h index 916bc8111a01..1223c0fe03f5 100644 --- a/arch/x86/include/asm/amd_iommu_proto.h +++ b/arch/x86/include/asm/amd_iommu_proto.h | |||
@@ -24,8 +24,6 @@ struct amd_iommu; | |||
24 | extern int amd_iommu_init_dma_ops(void); | 24 | extern int amd_iommu_init_dma_ops(void); |
25 | extern int amd_iommu_init_passthrough(void); | 25 | extern int amd_iommu_init_passthrough(void); |
26 | extern irqreturn_t amd_iommu_int_handler(int irq, void *data); | 26 | extern irqreturn_t amd_iommu_int_handler(int irq, void *data); |
27 | extern void amd_iommu_flush_all_domains(void); | ||
28 | extern void amd_iommu_flush_all_devices(void); | ||
29 | extern void amd_iommu_apply_erratum_63(u16 devid); | 27 | extern void amd_iommu_apply_erratum_63(u16 devid); |
30 | extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); | 28 | extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); |
31 | extern int amd_iommu_init_devices(void); | 29 | extern int amd_iommu_init_devices(void); |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 3557f223f40b..bcf58ea55cfa 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -539,6 +539,40 @@ static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) | |||
539 | return iommu_queue_command(iommu, &cmd); | 539 | return iommu_queue_command(iommu, &cmd); |
540 | } | 540 | } |
541 | 541 | ||
542 | static void iommu_flush_dte_all(struct amd_iommu *iommu) | ||
543 | { | ||
544 | u32 devid; | ||
545 | |||
546 | for (devid = 0; devid <= 0xffff; ++devid) | ||
547 | iommu_flush_dte(iommu, devid); | ||
548 | |||
549 | iommu_completion_wait(iommu); | ||
550 | } | ||
551 | |||
552 | /* | ||
553 | * This function uses heavy locking and may disable irqs for some time. But | ||
554 | * this is no issue because it is only called during resume. | ||
555 | */ | ||
556 | static void iommu_flush_tlb_all(struct amd_iommu *iommu) | ||
557 | { | ||
558 | u32 dom_id; | ||
559 | |||
560 | for (dom_id = 0; dom_id <= 0xffff; ++dom_id) { | ||
561 | struct iommu_cmd cmd; | ||
562 | build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, | ||
563 | dom_id, 1); | ||
564 | iommu_queue_command(iommu, &cmd); | ||
565 | } | ||
566 | |||
567 | iommu_completion_wait(iommu); | ||
568 | } | ||
569 | |||
570 | void iommu_flush_all_caches(struct amd_iommu *iommu) | ||
571 | { | ||
572 | iommu_flush_dte_all(iommu); | ||
573 | iommu_flush_tlb_all(iommu); | ||
574 | } | ||
575 | |||
542 | /* | 576 | /* |
543 | * Command send function for invalidating a device table entry | 577 | * Command send function for invalidating a device table entry |
544 | */ | 578 | */ |
@@ -631,47 +665,6 @@ static void domain_flush_devices(struct protection_domain *domain) | |||
631 | spin_unlock_irqrestore(&domain->lock, flags); | 665 | spin_unlock_irqrestore(&domain->lock, flags); |
632 | } | 666 | } |
633 | 667 | ||
634 | static void iommu_flush_all_domain_devices(void) | ||
635 | { | ||
636 | struct protection_domain *domain; | ||
637 | unsigned long flags; | ||
638 | |||
639 | spin_lock_irqsave(&amd_iommu_pd_lock, flags); | ||
640 | |||
641 | list_for_each_entry(domain, &amd_iommu_pd_list, list) { | ||
642 | domain_flush_devices(domain); | ||
643 | domain_flush_complete(domain); | ||
644 | } | ||
645 | |||
646 | spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); | ||
647 | } | ||
648 | |||
649 | void amd_iommu_flush_all_devices(void) | ||
650 | { | ||
651 | iommu_flush_all_domain_devices(); | ||
652 | } | ||
653 | |||
654 | /* | ||
655 | * This function uses heavy locking and may disable irqs for some time. But | ||
656 | * this is no issue because it is only called during resume. | ||
657 | */ | ||
658 | void amd_iommu_flush_all_domains(void) | ||
659 | { | ||
660 | struct protection_domain *domain; | ||
661 | unsigned long flags; | ||
662 | |||
663 | spin_lock_irqsave(&amd_iommu_pd_lock, flags); | ||
664 | |||
665 | list_for_each_entry(domain, &amd_iommu_pd_list, list) { | ||
666 | spin_lock(&domain->lock); | ||
667 | domain_flush_tlb_pde(domain); | ||
668 | domain_flush_complete(domain); | ||
669 | spin_unlock(&domain->lock); | ||
670 | } | ||
671 | |||
672 | spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); | ||
673 | } | ||
674 | |||
675 | /**************************************************************************** | 668 | /**************************************************************************** |
676 | * | 669 | * |
677 | * The functions below are used the create the page table mappings for | 670 | * The functions below are used the create the page table mappings for |
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 246d727b65b7..8848dda808e2 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -180,6 +180,12 @@ static u32 dev_table_size; /* size of the device table */ | |||
180 | static u32 alias_table_size; /* size of the alias table */ | 180 | static u32 alias_table_size; /* size of the alias table */ |
181 | static u32 rlookup_table_size; /* size if the rlookup table */ | 181 | static u32 rlookup_table_size; /* size if the rlookup table */ |
182 | 182 | ||
183 | /* | ||
184 | * This function flushes all internal caches of | ||
185 | * the IOMMU used by this driver. | ||
186 | */ | ||
187 | extern void iommu_flush_all_caches(struct amd_iommu *iommu); | ||
188 | |||
183 | static inline void update_last_devid(u16 devid) | 189 | static inline void update_last_devid(u16 devid) |
184 | { | 190 | { |
185 | if (devid > amd_iommu_last_bdf) | 191 | if (devid > amd_iommu_last_bdf) |
@@ -1244,6 +1250,7 @@ static void enable_iommus(void) | |||
1244 | iommu_set_exclusion_range(iommu); | 1250 | iommu_set_exclusion_range(iommu); |
1245 | iommu_init_msi(iommu); | 1251 | iommu_init_msi(iommu); |
1246 | iommu_enable(iommu); | 1252 | iommu_enable(iommu); |
1253 | iommu_flush_all_caches(iommu); | ||
1247 | } | 1254 | } |
1248 | } | 1255 | } |
1249 | 1256 | ||
@@ -1274,8 +1281,8 @@ static void amd_iommu_resume(void) | |||
1274 | * we have to flush after the IOMMUs are enabled because a | 1281 | * we have to flush after the IOMMUs are enabled because a |
1275 | * disabled IOMMU will never execute the commands we send | 1282 | * disabled IOMMU will never execute the commands we send |
1276 | */ | 1283 | */ |
1277 | amd_iommu_flush_all_devices(); | 1284 | for_each_iommu(iommu) |
1278 | amd_iommu_flush_all_domains(); | 1285 | iommu_flush_all_caches(iommu); |
1279 | } | 1286 | } |
1280 | 1287 | ||
1281 | static int amd_iommu_suspend(void) | 1288 | static int amd_iommu_suspend(void) |