diff options
| -rw-r--r-- | arch/x86/include/asm/amd_iommu_types.h | 1 | ||||
| -rw-r--r-- | arch/x86/kernel/amd_iommu.c | 100 |
2 files changed, 35 insertions, 66 deletions
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 93953d1922c4..f92d1b37b877 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
| @@ -253,6 +253,7 @@ struct protection_domain { | |||
| 253 | */ | 253 | */ |
| 254 | struct iommu_dev_data { | 254 | struct iommu_dev_data { |
| 255 | struct list_head list; /* For domain->dev_list */ | 255 | struct list_head list; /* For domain->dev_list */ |
| 256 | struct device *dev; /* Device this data belong to */ | ||
| 256 | struct device *alias; /* The Alias Device */ | 257 | struct device *alias; /* The Alias Device */ |
| 257 | struct protection_domain *domain; /* Domain the device is bound to */ | 258 | struct protection_domain *domain; /* Domain the device is bound to */ |
| 258 | atomic_t bind; /* Domain attach reverent count */ | 259 | atomic_t bind; /* Domain attach reverent count */ |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 41c4ebecced4..0eafca58926f 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
| @@ -146,6 +146,8 @@ static int iommu_init_device(struct device *dev) | |||
| 146 | if (!dev_data) | 146 | if (!dev_data) |
| 147 | return -ENOMEM; | 147 | return -ENOMEM; |
| 148 | 148 | ||
| 149 | dev_data->dev = dev; | ||
| 150 | |||
| 149 | devid = get_device_id(dev); | 151 | devid = get_device_id(dev); |
| 150 | alias = amd_iommu_alias_table[devid]; | 152 | alias = amd_iommu_alias_table[devid]; |
| 151 | pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff); | 153 | pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff); |
| @@ -478,31 +480,21 @@ static void iommu_flush_complete(struct protection_domain *domain) | |||
| 478 | /* | 480 | /* |
| 479 | * Command send function for invalidating a device table entry | 481 | * Command send function for invalidating a device table entry |
| 480 | */ | 482 | */ |
| 481 | static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) | ||
| 482 | { | ||
| 483 | struct iommu_cmd cmd; | ||
| 484 | int ret; | ||
| 485 | |||
| 486 | BUG_ON(iommu == NULL); | ||
| 487 | |||
| 488 | memset(&cmd, 0, sizeof(cmd)); | ||
| 489 | CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY); | ||
| 490 | cmd.data[0] = devid; | ||
| 491 | |||
| 492 | ret = iommu_queue_command(iommu, &cmd); | ||
| 493 | |||
| 494 | return ret; | ||
| 495 | } | ||
| 496 | |||
| 497 | static int iommu_flush_device(struct device *dev) | 483 | static int iommu_flush_device(struct device *dev) |
| 498 | { | 484 | { |
| 499 | struct amd_iommu *iommu; | 485 | struct amd_iommu *iommu; |
| 486 | struct iommu_cmd cmd; | ||
| 500 | u16 devid; | 487 | u16 devid; |
| 501 | 488 | ||
| 502 | devid = get_device_id(dev); | 489 | devid = get_device_id(dev); |
| 503 | iommu = amd_iommu_rlookup_table[devid]; | 490 | iommu = amd_iommu_rlookup_table[devid]; |
| 504 | 491 | ||
| 505 | return iommu_queue_inv_dev_entry(iommu, devid); | 492 | /* Build command */ |
| 493 | memset(&cmd, 0, sizeof(cmd)); | ||
| 494 | CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY); | ||
| 495 | cmd.data[0] = devid; | ||
| 496 | |||
| 497 | return iommu_queue_command(iommu, &cmd); | ||
| 506 | } | 498 | } |
| 507 | 499 | ||
| 508 | static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, | 500 | static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, |
| @@ -592,30 +584,43 @@ static void iommu_flush_tlb_pde(struct protection_domain *domain) | |||
| 592 | __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); | 584 | __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); |
| 593 | } | 585 | } |
| 594 | 586 | ||
| 587 | |||
| 595 | /* | 588 | /* |
| 596 | * This function flushes all domains that have devices on the given IOMMU | 589 | * This function flushes the DTEs for all devices in domain |
| 597 | */ | 590 | */ |
| 598 | static void flush_all_domains_on_iommu(struct amd_iommu *iommu) | 591 | static void iommu_flush_domain_devices(struct protection_domain *domain) |
| 592 | { | ||
| 593 | struct iommu_dev_data *dev_data; | ||
| 594 | unsigned long flags; | ||
| 595 | |||
| 596 | spin_lock_irqsave(&domain->lock, flags); | ||
| 597 | |||
| 598 | list_for_each_entry(dev_data, &domain->dev_list, list) | ||
| 599 | iommu_flush_device(dev_data->dev); | ||
| 600 | |||
| 601 | spin_unlock_irqrestore(&domain->lock, flags); | ||
| 602 | } | ||
| 603 | |||
| 604 | static void iommu_flush_all_domain_devices(void) | ||
| 599 | { | 605 | { |
| 600 | u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; | ||
| 601 | struct protection_domain *domain; | 606 | struct protection_domain *domain; |
| 602 | unsigned long flags; | 607 | unsigned long flags; |
| 603 | 608 | ||
| 604 | spin_lock_irqsave(&amd_iommu_pd_lock, flags); | 609 | spin_lock_irqsave(&amd_iommu_pd_lock, flags); |
| 605 | 610 | ||
| 606 | list_for_each_entry(domain, &amd_iommu_pd_list, list) { | 611 | list_for_each_entry(domain, &amd_iommu_pd_list, list) { |
| 607 | if (domain->dev_iommu[iommu->index] == 0) | 612 | iommu_flush_domain_devices(domain); |
| 608 | continue; | ||
| 609 | |||
| 610 | spin_lock(&domain->lock); | ||
| 611 | iommu_queue_inv_iommu_pages(iommu, address, domain->id, 1, 1); | ||
| 612 | iommu_flush_complete(domain); | 613 | iommu_flush_complete(domain); |
| 613 | spin_unlock(&domain->lock); | ||
| 614 | } | 614 | } |
| 615 | 615 | ||
| 616 | spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); | 616 | spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); |
| 617 | } | 617 | } |
| 618 | 618 | ||
| 619 | void amd_iommu_flush_all_devices(void) | ||
| 620 | { | ||
| 621 | iommu_flush_all_domain_devices(); | ||
| 622 | } | ||
| 623 | |||
| 619 | /* | 624 | /* |
| 620 | * This function uses heavy locking and may disable irqs for some time. But | 625 | * This function uses heavy locking and may disable irqs for some time. But |
| 621 | * this is no issue because it is only called during resume. | 626 | * this is no issue because it is only called during resume. |
| @@ -637,38 +642,6 @@ void amd_iommu_flush_all_domains(void) | |||
| 637 | spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); | 642 | spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); |
| 638 | } | 643 | } |
| 639 | 644 | ||
| 640 | static void flush_all_devices_for_iommu(struct amd_iommu *iommu) | ||
| 641 | { | ||
| 642 | int i; | ||
| 643 | |||
| 644 | for (i = 0; i <= amd_iommu_last_bdf; ++i) { | ||
| 645 | if (iommu != amd_iommu_rlookup_table[i]) | ||
| 646 | continue; | ||
| 647 | |||
| 648 | iommu_queue_inv_dev_entry(iommu, i); | ||
| 649 | iommu_completion_wait(iommu); | ||
| 650 | } | ||
| 651 | } | ||
| 652 | |||
| 653 | static void flush_devices_by_domain(struct protection_domain *domain) | ||
| 654 | { | ||
| 655 | struct amd_iommu *iommu; | ||
| 656 | int i; | ||
| 657 | |||
| 658 | for (i = 0; i <= amd_iommu_last_bdf; ++i) { | ||
| 659 | if ((domain == NULL && amd_iommu_pd_table[i] == NULL) || | ||
| 660 | (amd_iommu_pd_table[i] != domain)) | ||
| 661 | continue; | ||
| 662 | |||
| 663 | iommu = amd_iommu_rlookup_table[i]; | ||
| 664 | if (!iommu) | ||
| 665 | continue; | ||
| 666 | |||
| 667 | iommu_queue_inv_dev_entry(iommu, i); | ||
| 668 | iommu_completion_wait(iommu); | ||
| 669 | } | ||
| 670 | } | ||
| 671 | |||
| 672 | static void reset_iommu_command_buffer(struct amd_iommu *iommu) | 645 | static void reset_iommu_command_buffer(struct amd_iommu *iommu) |
| 673 | { | 646 | { |
| 674 | pr_err("AMD-Vi: Resetting IOMMU command buffer\n"); | 647 | pr_err("AMD-Vi: Resetting IOMMU command buffer\n"); |
| @@ -679,17 +652,12 @@ static void reset_iommu_command_buffer(struct amd_iommu *iommu) | |||
| 679 | iommu->reset_in_progress = true; | 652 | iommu->reset_in_progress = true; |
| 680 | 653 | ||
| 681 | amd_iommu_reset_cmd_buffer(iommu); | 654 | amd_iommu_reset_cmd_buffer(iommu); |
| 682 | flush_all_devices_for_iommu(iommu); | 655 | amd_iommu_flush_all_devices(); |
| 683 | flush_all_domains_on_iommu(iommu); | 656 | amd_iommu_flush_all_domains(); |
| 684 | 657 | ||
| 685 | iommu->reset_in_progress = false; | 658 | iommu->reset_in_progress = false; |
| 686 | } | 659 | } |
| 687 | 660 | ||
| 688 | void amd_iommu_flush_all_devices(void) | ||
| 689 | { | ||
| 690 | flush_devices_by_domain(NULL); | ||
| 691 | } | ||
| 692 | |||
| 693 | /**************************************************************************** | 661 | /**************************************************************************** |
| 694 | * | 662 | * |
| 695 | * The functions below are used the create the page table mappings for | 663 | * The functions below are used the create the page table mappings for |
| @@ -1692,7 +1660,7 @@ static void update_domain(struct protection_domain *domain) | |||
| 1692 | return; | 1660 | return; |
| 1693 | 1661 | ||
| 1694 | update_device_table(domain); | 1662 | update_device_table(domain); |
| 1695 | flush_devices_by_domain(domain); | 1663 | iommu_flush_domain_devices(domain); |
| 1696 | iommu_flush_tlb_pde(domain); | 1664 | iommu_flush_tlb_pde(domain); |
| 1697 | 1665 | ||
| 1698 | domain->updated = false; | 1666 | domain->updated = false; |
