diff options
Diffstat (limited to 'drivers/iommu/intel-iommu.c')
-rw-r--r-- | drivers/iommu/intel-iommu.c | 31 |
1 files changed, 23 insertions, 8 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 68d43beccb7e..5ecfaf29933a 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -422,6 +422,14 @@ static int dmar_map_gfx = 1; | |||
422 | static int dmar_forcedac; | 422 | static int dmar_forcedac; |
423 | static int intel_iommu_strict; | 423 | static int intel_iommu_strict; |
424 | static int intel_iommu_superpage = 1; | 424 | static int intel_iommu_superpage = 1; |
425 | static int intel_iommu_ecs = 1; | ||
426 | |||
427 | /* We only actually use ECS when PASID support (on the new bit 40) | ||
428 | * is also advertised. Some early implementations — the ones with | ||
429 | * PASID support on bit 28 — have issues even when we *only* use | ||
430 | * extended root/context tables. */ | ||
431 | #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \ | ||
432 | ecap_pasid(iommu->ecap)) | ||
425 | 433 | ||
426 | int intel_iommu_gfx_mapped; | 434 | int intel_iommu_gfx_mapped; |
427 | EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); | 435 | EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); |
@@ -465,6 +473,10 @@ static int __init intel_iommu_setup(char *str) | |||
465 | printk(KERN_INFO | 473 | printk(KERN_INFO |
466 | "Intel-IOMMU: disable supported super page\n"); | 474 | "Intel-IOMMU: disable supported super page\n"); |
467 | intel_iommu_superpage = 0; | 475 | intel_iommu_superpage = 0; |
476 | } else if (!strncmp(str, "ecs_off", 7)) { | ||
477 | printk(KERN_INFO | ||
478 | "Intel-IOMMU: disable extended context table support\n"); | ||
479 | intel_iommu_ecs = 0; | ||
468 | } | 480 | } |
469 | 481 | ||
470 | str += strcspn(str, ","); | 482 | str += strcspn(str, ","); |
@@ -669,7 +681,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu | |||
669 | struct context_entry *context; | 681 | struct context_entry *context; |
670 | u64 *entry; | 682 | u64 *entry; |
671 | 683 | ||
672 | if (ecap_ecs(iommu->ecap)) { | 684 | if (ecs_enabled(iommu)) { |
673 | if (devfn >= 0x80) { | 685 | if (devfn >= 0x80) { |
674 | devfn -= 0x80; | 686 | devfn -= 0x80; |
675 | entry = &root->hi; | 687 | entry = &root->hi; |
@@ -696,6 +708,11 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu | |||
696 | return &context[devfn]; | 708 | return &context[devfn]; |
697 | } | 709 | } |
698 | 710 | ||
711 | static int iommu_dummy(struct device *dev) | ||
712 | { | ||
713 | return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; | ||
714 | } | ||
715 | |||
699 | static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) | 716 | static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) |
700 | { | 717 | { |
701 | struct dmar_drhd_unit *drhd = NULL; | 718 | struct dmar_drhd_unit *drhd = NULL; |
@@ -705,6 +722,9 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf | |||
705 | u16 segment = 0; | 722 | u16 segment = 0; |
706 | int i; | 723 | int i; |
707 | 724 | ||
725 | if (iommu_dummy(dev)) | ||
726 | return NULL; | ||
727 | |||
708 | if (dev_is_pci(dev)) { | 728 | if (dev_is_pci(dev)) { |
709 | pdev = to_pci_dev(dev); | 729 | pdev = to_pci_dev(dev); |
710 | segment = pci_domain_nr(pdev->bus); | 730 | segment = pci_domain_nr(pdev->bus); |
@@ -798,7 +818,7 @@ static void free_context_table(struct intel_iommu *iommu) | |||
798 | if (context) | 818 | if (context) |
799 | free_pgtable_page(context); | 819 | free_pgtable_page(context); |
800 | 820 | ||
801 | if (!ecap_ecs(iommu->ecap)) | 821 | if (!ecs_enabled(iommu)) |
802 | continue; | 822 | continue; |
803 | 823 | ||
804 | context = iommu_context_addr(iommu, i, 0x80, 0); | 824 | context = iommu_context_addr(iommu, i, 0x80, 0); |
@@ -1133,7 +1153,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu) | |||
1133 | unsigned long flag; | 1153 | unsigned long flag; |
1134 | 1154 | ||
1135 | addr = virt_to_phys(iommu->root_entry); | 1155 | addr = virt_to_phys(iommu->root_entry); |
1136 | if (ecap_ecs(iommu->ecap)) | 1156 | if (ecs_enabled(iommu)) |
1137 | addr |= DMA_RTADDR_RTT; | 1157 | addr |= DMA_RTADDR_RTT; |
1138 | 1158 | ||
1139 | raw_spin_lock_irqsave(&iommu->register_lock, flag); | 1159 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
@@ -2969,11 +2989,6 @@ static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev) | |||
2969 | return __get_valid_domain_for_dev(dev); | 2989 | return __get_valid_domain_for_dev(dev); |
2970 | } | 2990 | } |
2971 | 2991 | ||
2972 | static int iommu_dummy(struct device *dev) | ||
2973 | { | ||
2974 | return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; | ||
2975 | } | ||
2976 | |||
2977 | /* Check if the dev needs to go through non-identity map and unmap process.*/ | 2992 | /* Check if the dev needs to go through non-identity map and unmap process.*/ |
2978 | static int iommu_no_mapping(struct device *dev) | 2993 | static int iommu_no_mapping(struct device *dev) |
2979 | { | 2994 | { |