aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-11-20 11:02:44 -0500
committerJoerg Roedel <joerg.roedel@amd.com>2009-11-27 08:16:28 -0500
commit09b4280439ef6fdc55f1353a9135034336eb5d26 (patch)
tree835ac6e2b3c488f8674a7fa6d67a1cb9f415d834 /arch/x86/kernel/amd_iommu.c
parente3306664eb307ae4cc93211cd9f12d0dbd49de65 (diff)
x86/amd-iommu: Reimplement flush_all_domains_on_iommu()
This patch reimplements the function flush_all_domains_on_iommu to use the global protection domain list. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c43
1 files changed, 24 insertions, 19 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 5141f5608c5c..a1bd99d390ab 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -499,43 +499,48 @@ static void iommu_flush_tlb_pde(struct protection_domain *domain)
499} 499}
500 500
501/* 501/*
502 * This function flushes one domain on one IOMMU 502 * This function flushes all domains that have devices on the given IOMMU
503 */ 503 */
504static void flush_domain_on_iommu(struct amd_iommu *iommu, u16 domid) 504static void flush_all_domains_on_iommu(struct amd_iommu *iommu)
505{ 505{
506 struct iommu_cmd cmd; 506 u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
507 struct protection_domain *domain;
507 unsigned long flags; 508 unsigned long flags;
508 509
509 __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 510 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
510 domid, 1, 1);
511
512 spin_lock_irqsave(&iommu->lock, flags);
513 __iommu_queue_command(iommu, &cmd);
514 __iommu_completion_wait(iommu);
515 __iommu_wait_for_completion(iommu);
516 spin_unlock_irqrestore(&iommu->lock, flags);
517}
518
519static void flush_all_domains_on_iommu(struct amd_iommu *iommu)
520{
521 int i;
522 511
523 for (i = 1; i < MAX_DOMAIN_ID; ++i) { 512 list_for_each_entry(domain, &amd_iommu_pd_list, list) {
524 if (!test_bit(i, amd_iommu_pd_alloc_bitmap)) 513 if (domain->dev_iommu[iommu->index] == 0)
525 continue; 514 continue;
526 flush_domain_on_iommu(iommu, i); 515
516 spin_lock(&domain->lock);
517 iommu_queue_inv_iommu_pages(iommu, address, domain->id, 1, 1);
518 iommu_flush_complete(domain);
519 spin_unlock(&domain->lock);
527 } 520 }
528 521
522 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
529} 523}
530 524
525/*
526 * This function uses heavy locking and may disable irqs for some time. But
527 * this is no issue because it is only called during resume.
528 */
531void amd_iommu_flush_all_domains(void) 529void amd_iommu_flush_all_domains(void)
532{ 530{
533 struct protection_domain *domain; 531 struct protection_domain *domain;
532 unsigned long flags;
533
534 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
534 535
535 list_for_each_entry(domain, &amd_iommu_pd_list, list) { 536 list_for_each_entry(domain, &amd_iommu_pd_list, list) {
537 spin_lock(&domain->lock);
536 iommu_flush_tlb_pde(domain); 538 iommu_flush_tlb_pde(domain);
537 iommu_flush_complete(domain); 539 iommu_flush_complete(domain);
540 spin_unlock(&domain->lock);
538 } 541 }
542
543 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
539} 544}
540 545
541static void flush_all_devices_for_iommu(struct amd_iommu *iommu) 546static void flush_all_devices_for_iommu(struct amd_iommu *iommu)