diff options
-rw-r--r-- | drivers/iommu/intel-iommu.c | 142 | ||||
-rw-r--r-- | drivers/iommu/intel_irq_remapping.c | 5 | ||||
-rw-r--r-- | include/linux/intel-iommu.h | 18 |
3 files changed, 82 insertions, 83 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index a35927cd42e5..68d43beccb7e 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #define CONTEXT_SIZE VTD_PAGE_SIZE | 50 | #define CONTEXT_SIZE VTD_PAGE_SIZE |
51 | 51 | ||
52 | #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) | 52 | #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) |
53 | #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB) | ||
53 | #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) | 54 | #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) |
54 | #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e) | 55 | #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e) |
55 | 56 | ||
@@ -184,32 +185,11 @@ static int force_on = 0; | |||
184 | * 64-127: Reserved | 185 | * 64-127: Reserved |
185 | */ | 186 | */ |
186 | struct root_entry { | 187 | struct root_entry { |
187 | u64 val; | 188 | u64 lo; |
188 | u64 rsvd1; | 189 | u64 hi; |
189 | }; | 190 | }; |
190 | #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry)) | 191 | #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry)) |
191 | static inline bool root_present(struct root_entry *root) | ||
192 | { | ||
193 | return (root->val & 1); | ||
194 | } | ||
195 | static inline void set_root_present(struct root_entry *root) | ||
196 | { | ||
197 | root->val |= 1; | ||
198 | } | ||
199 | static inline void set_root_value(struct root_entry *root, unsigned long value) | ||
200 | { | ||
201 | root->val &= ~VTD_PAGE_MASK; | ||
202 | root->val |= value & VTD_PAGE_MASK; | ||
203 | } | ||
204 | 192 | ||
205 | static inline struct context_entry * | ||
206 | get_context_addr_from_root(struct root_entry *root) | ||
207 | { | ||
208 | return (struct context_entry *) | ||
209 | (root_present(root)?phys_to_virt( | ||
210 | root->val & VTD_PAGE_MASK) : | ||
211 | NULL); | ||
212 | } | ||
213 | 193 | ||
214 | /* | 194 | /* |
215 | * low 64 bits: | 195 | * low 64 bits: |
@@ -682,6 +662,40 @@ static void domain_update_iommu_cap(struct dmar_domain *domain) | |||
682 | domain->iommu_superpage = domain_update_iommu_superpage(NULL); | 662 | domain->iommu_superpage = domain_update_iommu_superpage(NULL); |
683 | } | 663 | } |
684 | 664 | ||
665 | static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu, | ||
666 | u8 bus, u8 devfn, int alloc) | ||
667 | { | ||
668 | struct root_entry *root = &iommu->root_entry[bus]; | ||
669 | struct context_entry *context; | ||
670 | u64 *entry; | ||
671 | |||
672 | if (ecap_ecs(iommu->ecap)) { | ||
673 | if (devfn >= 0x80) { | ||
674 | devfn -= 0x80; | ||
675 | entry = &root->hi; | ||
676 | } | ||
677 | devfn *= 2; | ||
678 | } | ||
679 | entry = &root->lo; | ||
680 | if (*entry & 1) | ||
681 | context = phys_to_virt(*entry & VTD_PAGE_MASK); | ||
682 | else { | ||
683 | unsigned long phy_addr; | ||
684 | if (!alloc) | ||
685 | return NULL; | ||
686 | |||
687 | context = alloc_pgtable_page(iommu->node); | ||
688 | if (!context) | ||
689 | return NULL; | ||
690 | |||
691 | __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); | ||
692 | phy_addr = virt_to_phys((void *)context); | ||
693 | *entry = phy_addr | 1; | ||
694 | __iommu_flush_cache(iommu, entry, sizeof(*entry)); | ||
695 | } | ||
696 | return &context[devfn]; | ||
697 | } | ||
698 | |||
685 | static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) | 699 | static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) |
686 | { | 700 | { |
687 | struct dmar_drhd_unit *drhd = NULL; | 701 | struct dmar_drhd_unit *drhd = NULL; |
@@ -741,75 +755,36 @@ static void domain_flush_cache(struct dmar_domain *domain, | |||
741 | clflush_cache_range(addr, size); | 755 | clflush_cache_range(addr, size); |
742 | } | 756 | } |
743 | 757 | ||
744 | /* Gets context entry for a given bus and devfn */ | ||
745 | static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, | ||
746 | u8 bus, u8 devfn) | ||
747 | { | ||
748 | struct root_entry *root; | ||
749 | struct context_entry *context; | ||
750 | unsigned long phy_addr; | ||
751 | unsigned long flags; | ||
752 | |||
753 | spin_lock_irqsave(&iommu->lock, flags); | ||
754 | root = &iommu->root_entry[bus]; | ||
755 | context = get_context_addr_from_root(root); | ||
756 | if (!context) { | ||
757 | context = (struct context_entry *) | ||
758 | alloc_pgtable_page(iommu->node); | ||
759 | if (!context) { | ||
760 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
761 | return NULL; | ||
762 | } | ||
763 | __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); | ||
764 | phy_addr = virt_to_phys((void *)context); | ||
765 | set_root_value(root, phy_addr); | ||
766 | set_root_present(root); | ||
767 | __iommu_flush_cache(iommu, root, sizeof(*root)); | ||
768 | } | ||
769 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
770 | return &context[devfn]; | ||
771 | } | ||
772 | |||
773 | static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn) | 758 | static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn) |
774 | { | 759 | { |
775 | struct root_entry *root; | ||
776 | struct context_entry *context; | 760 | struct context_entry *context; |
777 | int ret; | 761 | int ret = 0; |
778 | unsigned long flags; | 762 | unsigned long flags; |
779 | 763 | ||
780 | spin_lock_irqsave(&iommu->lock, flags); | 764 | spin_lock_irqsave(&iommu->lock, flags); |
781 | root = &iommu->root_entry[bus]; | 765 | context = iommu_context_addr(iommu, bus, devfn, 0); |
782 | context = get_context_addr_from_root(root); | 766 | if (context) |
783 | if (!context) { | 767 | ret = context_present(context); |
784 | ret = 0; | ||
785 | goto out; | ||
786 | } | ||
787 | ret = context_present(&context[devfn]); | ||
788 | out: | ||
789 | spin_unlock_irqrestore(&iommu->lock, flags); | 768 | spin_unlock_irqrestore(&iommu->lock, flags); |
790 | return ret; | 769 | return ret; |
791 | } | 770 | } |
792 | 771 | ||
793 | static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn) | 772 | static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn) |
794 | { | 773 | { |
795 | struct root_entry *root; | ||
796 | struct context_entry *context; | 774 | struct context_entry *context; |
797 | unsigned long flags; | 775 | unsigned long flags; |
798 | 776 | ||
799 | spin_lock_irqsave(&iommu->lock, flags); | 777 | spin_lock_irqsave(&iommu->lock, flags); |
800 | root = &iommu->root_entry[bus]; | 778 | context = iommu_context_addr(iommu, bus, devfn, 0); |
801 | context = get_context_addr_from_root(root); | ||
802 | if (context) { | 779 | if (context) { |
803 | context_clear_entry(&context[devfn]); | 780 | context_clear_entry(context); |
804 | __iommu_flush_cache(iommu, &context[devfn], \ | 781 | __iommu_flush_cache(iommu, context, sizeof(*context)); |
805 | sizeof(*context)); | ||
806 | } | 782 | } |
807 | spin_unlock_irqrestore(&iommu->lock, flags); | 783 | spin_unlock_irqrestore(&iommu->lock, flags); |
808 | } | 784 | } |
809 | 785 | ||
810 | static void free_context_table(struct intel_iommu *iommu) | 786 | static void free_context_table(struct intel_iommu *iommu) |
811 | { | 787 | { |
812 | struct root_entry *root; | ||
813 | int i; | 788 | int i; |
814 | unsigned long flags; | 789 | unsigned long flags; |
815 | struct context_entry *context; | 790 | struct context_entry *context; |
@@ -819,10 +794,17 @@ static void free_context_table(struct intel_iommu *iommu) | |||
819 | goto out; | 794 | goto out; |
820 | } | 795 | } |
821 | for (i = 0; i < ROOT_ENTRY_NR; i++) { | 796 | for (i = 0; i < ROOT_ENTRY_NR; i++) { |
822 | root = &iommu->root_entry[i]; | 797 | context = iommu_context_addr(iommu, i, 0, 0); |
823 | context = get_context_addr_from_root(root); | 798 | if (context) |
799 | free_pgtable_page(context); | ||
800 | |||
801 | if (!ecap_ecs(iommu->ecap)) | ||
802 | continue; | ||
803 | |||
804 | context = iommu_context_addr(iommu, i, 0x80, 0); | ||
824 | if (context) | 805 | if (context) |
825 | free_pgtable_page(context); | 806 | free_pgtable_page(context); |
807 | |||
826 | } | 808 | } |
827 | free_pgtable_page(iommu->root_entry); | 809 | free_pgtable_page(iommu->root_entry); |
828 | iommu->root_entry = NULL; | 810 | iommu->root_entry = NULL; |
@@ -1146,14 +1128,16 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) | |||
1146 | 1128 | ||
1147 | static void iommu_set_root_entry(struct intel_iommu *iommu) | 1129 | static void iommu_set_root_entry(struct intel_iommu *iommu) |
1148 | { | 1130 | { |
1149 | void *addr; | 1131 | u64 addr; |
1150 | u32 sts; | 1132 | u32 sts; |
1151 | unsigned long flag; | 1133 | unsigned long flag; |
1152 | 1134 | ||
1153 | addr = iommu->root_entry; | 1135 | addr = virt_to_phys(iommu->root_entry); |
1136 | if (ecap_ecs(iommu->ecap)) | ||
1137 | addr |= DMA_RTADDR_RTT; | ||
1154 | 1138 | ||
1155 | raw_spin_lock_irqsave(&iommu->register_lock, flag); | 1139 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
1156 | dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr)); | 1140 | dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr); |
1157 | 1141 | ||
1158 | writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); | 1142 | writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); |
1159 | 1143 | ||
@@ -1800,7 +1784,9 @@ static int domain_context_mapping_one(struct dmar_domain *domain, | |||
1800 | BUG_ON(translation != CONTEXT_TT_PASS_THROUGH && | 1784 | BUG_ON(translation != CONTEXT_TT_PASS_THROUGH && |
1801 | translation != CONTEXT_TT_MULTI_LEVEL); | 1785 | translation != CONTEXT_TT_MULTI_LEVEL); |
1802 | 1786 | ||
1803 | context = device_to_context_entry(iommu, bus, devfn); | 1787 | spin_lock_irqsave(&iommu->lock, flags); |
1788 | context = iommu_context_addr(iommu, bus, devfn, 1); | ||
1789 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
1804 | if (!context) | 1790 | if (!context) |
1805 | return -ENOMEM; | 1791 | return -ENOMEM; |
1806 | spin_lock_irqsave(&iommu->lock, flags); | 1792 | spin_lock_irqsave(&iommu->lock, flags); |
@@ -2564,6 +2550,10 @@ static bool device_has_rmrr(struct device *dev) | |||
2564 | * In both cases we assume that PCI USB devices with RMRRs have them largely | 2550 | * In both cases we assume that PCI USB devices with RMRRs have them largely |
2565 | * for historical reasons and that the RMRR space is not actively used post | 2551 | * for historical reasons and that the RMRR space is not actively used post |
2566 | * boot. This exclusion may change if vendors begin to abuse it. | 2552 | * boot. This exclusion may change if vendors begin to abuse it. |
2553 | * | ||
2554 | * The same exception is made for graphics devices, with the requirement that | ||
2555 | * any use of the RMRR regions will be torn down before assigning the device | ||
2556 | * to a guest. | ||
2567 | */ | 2557 | */ |
2568 | static bool device_is_rmrr_locked(struct device *dev) | 2558 | static bool device_is_rmrr_locked(struct device *dev) |
2569 | { | 2559 | { |
@@ -2573,7 +2563,7 @@ static bool device_is_rmrr_locked(struct device *dev) | |||
2573 | if (dev_is_pci(dev)) { | 2563 | if (dev_is_pci(dev)) { |
2574 | struct pci_dev *pdev = to_pci_dev(dev); | 2564 | struct pci_dev *pdev = to_pci_dev(dev); |
2575 | 2565 | ||
2576 | if ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB) | 2566 | if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev)) |
2577 | return false; | 2567 | return false; |
2578 | } | 2568 | } |
2579 | 2569 | ||
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 6c25b3c5b729..5709ae9c3e77 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c | |||
@@ -637,10 +637,7 @@ static int __init intel_enable_irq_remapping(void) | |||
637 | if (x2apic_supported()) { | 637 | if (x2apic_supported()) { |
638 | eim = !dmar_x2apic_optout(); | 638 | eim = !dmar_x2apic_optout(); |
639 | if (!eim) | 639 | if (!eim) |
640 | printk(KERN_WARNING | 640 | pr_info("x2apic is disabled because BIOS sets x2apic opt out bit. You can use 'intremap=no_x2apic_optout' to override the BIOS setting.\n"); |
641 | "Your BIOS is broken and requested that x2apic be disabled.\n" | ||
642 | "This will slightly decrease performance.\n" | ||
643 | "Use 'intremap=no_x2apic_optout' to override BIOS request.\n"); | ||
644 | } | 641 | } |
645 | 642 | ||
646 | for_each_iommu(iommu, drhd) { | 643 | for_each_iommu(iommu, drhd) { |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index a65208a8fe18..796ef9645827 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
@@ -115,10 +115,19 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
115 | * Extended Capability Register | 115 | * Extended Capability Register |
116 | */ | 116 | */ |
117 | 117 | ||
118 | #define ecap_niotlb_iunits(e) ((((e) >> 24) & 0xff) + 1) | 118 | #define ecap_pss(e) ((e >> 35) & 0x1f) |
119 | #define ecap_eafs(e) ((e >> 34) & 0x1) | ||
120 | #define ecap_nwfs(e) ((e >> 33) & 0x1) | ||
121 | #define ecap_srs(e) ((e >> 31) & 0x1) | ||
122 | #define ecap_ers(e) ((e >> 30) & 0x1) | ||
123 | #define ecap_prs(e) ((e >> 29) & 0x1) | ||
124 | #define ecap_pasid(e) ((e >> 28) & 0x1) | ||
125 | #define ecap_dis(e) ((e >> 27) & 0x1) | ||
126 | #define ecap_nest(e) ((e >> 26) & 0x1) | ||
127 | #define ecap_mts(e) ((e >> 25) & 0x1) | ||
128 | #define ecap_ecs(e) ((e >> 24) & 0x1) | ||
119 | #define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16) | 129 | #define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16) |
120 | #define ecap_max_iotlb_offset(e) \ | 130 | #define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16) |
121 | (ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16) | ||
122 | #define ecap_coherent(e) ((e) & 0x1) | 131 | #define ecap_coherent(e) ((e) & 0x1) |
123 | #define ecap_qis(e) ((e) & 0x2) | 132 | #define ecap_qis(e) ((e) & 0x2) |
124 | #define ecap_pass_through(e) ((e >> 6) & 0x1) | 133 | #define ecap_pass_through(e) ((e >> 6) & 0x1) |
@@ -180,6 +189,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
180 | #define DMA_GSTS_IRES (((u32)1) << 25) | 189 | #define DMA_GSTS_IRES (((u32)1) << 25) |
181 | #define DMA_GSTS_CFIS (((u32)1) << 23) | 190 | #define DMA_GSTS_CFIS (((u32)1) << 23) |
182 | 191 | ||
192 | /* DMA_RTADDR_REG */ | ||
193 | #define DMA_RTADDR_RTT (((u64)1) << 11) | ||
194 | |||
183 | /* CCMD_REG */ | 195 | /* CCMD_REG */ |
184 | #define DMA_CCMD_ICC (((u64)1) << 63) | 196 | #define DMA_CCMD_ICC (((u64)1) << 63) |
185 | #define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61) | 197 | #define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61) |