diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-24 01:08:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-24 01:08:24 -0400 |
commit | 6a0596583fadd15dca293736114abdea306d3d7c (patch) | |
tree | 39ce634c2f9578c548f990215579559ba4dc630d /drivers | |
parent | 15cc91011246fa67f755eb3feea08d57a7692fde (diff) | |
parent | 4399c8bf2b9093696fa8160d79712e7346989c46 (diff) |
Merge git://git.infradead.org/iommu-2.6
* git://git.infradead.org/iommu-2.6:
intel-iommu: fix superpage support in pfn_to_dma_pte()
intel-iommu: set iommu_superpage on VM domains to lowest common denominator
intel-iommu: fix return value of iommu_unmap() API
MAINTAINERS: Update VT-d entry for drivers/pci -> drivers/iommu move
intel-iommu: Export a flag indicating that the IOMMU is used for iGFX.
intel-iommu: Workaround IOTLB hang on Ironlake GPU
intel-iommu: Fix AB-BA lockdep report
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/iommu/intel-iommu.c | 75 |
1 files changed, 45 insertions, 30 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index c621c98c99da..a88f3cbb100b 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -306,6 +306,11 @@ static inline bool dma_pte_present(struct dma_pte *pte) | |||
306 | return (pte->val & 3) != 0; | 306 | return (pte->val & 3) != 0; |
307 | } | 307 | } |
308 | 308 | ||
309 | static inline bool dma_pte_superpage(struct dma_pte *pte) | ||
310 | { | ||
311 | return (pte->val & (1 << 7)); | ||
312 | } | ||
313 | |||
309 | static inline int first_pte_in_page(struct dma_pte *pte) | 314 | static inline int first_pte_in_page(struct dma_pte *pte) |
310 | { | 315 | { |
311 | return !((unsigned long)pte & ~VTD_PAGE_MASK); | 316 | return !((unsigned long)pte & ~VTD_PAGE_MASK); |
@@ -404,6 +409,9 @@ static int dmar_forcedac; | |||
404 | static int intel_iommu_strict; | 409 | static int intel_iommu_strict; |
405 | static int intel_iommu_superpage = 1; | 410 | static int intel_iommu_superpage = 1; |
406 | 411 | ||
412 | int intel_iommu_gfx_mapped; | ||
413 | EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); | ||
414 | |||
407 | #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) | 415 | #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) |
408 | static DEFINE_SPINLOCK(device_domain_lock); | 416 | static DEFINE_SPINLOCK(device_domain_lock); |
409 | static LIST_HEAD(device_domain_list); | 417 | static LIST_HEAD(device_domain_list); |
@@ -577,17 +585,18 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain) | |||
577 | 585 | ||
578 | static void domain_update_iommu_superpage(struct dmar_domain *domain) | 586 | static void domain_update_iommu_superpage(struct dmar_domain *domain) |
579 | { | 587 | { |
580 | int i, mask = 0xf; | 588 | struct dmar_drhd_unit *drhd; |
589 | struct intel_iommu *iommu = NULL; | ||
590 | int mask = 0xf; | ||
581 | 591 | ||
582 | if (!intel_iommu_superpage) { | 592 | if (!intel_iommu_superpage) { |
583 | domain->iommu_superpage = 0; | 593 | domain->iommu_superpage = 0; |
584 | return; | 594 | return; |
585 | } | 595 | } |
586 | 596 | ||
587 | domain->iommu_superpage = 4; /* 1TiB */ | 597 | /* set iommu_superpage to the smallest common denominator */ |
588 | 598 | for_each_active_iommu(iommu, drhd) { | |
589 | for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) { | 599 | mask &= cap_super_page_val(iommu->cap); |
590 | mask |= cap_super_page_val(g_iommus[i]->cap); | ||
591 | if (!mask) { | 600 | if (!mask) { |
592 | break; | 601 | break; |
593 | } | 602 | } |
@@ -730,29 +739,23 @@ out: | |||
730 | } | 739 | } |
731 | 740 | ||
732 | static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, | 741 | static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, |
733 | unsigned long pfn, int large_level) | 742 | unsigned long pfn, int target_level) |
734 | { | 743 | { |
735 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; | 744 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; |
736 | struct dma_pte *parent, *pte = NULL; | 745 | struct dma_pte *parent, *pte = NULL; |
737 | int level = agaw_to_level(domain->agaw); | 746 | int level = agaw_to_level(domain->agaw); |
738 | int offset, target_level; | 747 | int offset; |
739 | 748 | ||
740 | BUG_ON(!domain->pgd); | 749 | BUG_ON(!domain->pgd); |
741 | BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width); | 750 | BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width); |
742 | parent = domain->pgd; | 751 | parent = domain->pgd; |
743 | 752 | ||
744 | /* Search pte */ | ||
745 | if (!large_level) | ||
746 | target_level = 1; | ||
747 | else | ||
748 | target_level = large_level; | ||
749 | |||
750 | while (level > 0) { | 753 | while (level > 0) { |
751 | void *tmp_page; | 754 | void *tmp_page; |
752 | 755 | ||
753 | offset = pfn_level_offset(pfn, level); | 756 | offset = pfn_level_offset(pfn, level); |
754 | pte = &parent[offset]; | 757 | pte = &parent[offset]; |
755 | if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE)) | 758 | if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte))) |
756 | break; | 759 | break; |
757 | if (level == target_level) | 760 | if (level == target_level) |
758 | break; | 761 | break; |
@@ -816,13 +819,14 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, | |||
816 | } | 819 | } |
817 | 820 | ||
818 | /* clear last level pte, a tlb flush should be followed */ | 821 | /* clear last level pte, a tlb flush should be followed */ |
819 | static void dma_pte_clear_range(struct dmar_domain *domain, | 822 | static int dma_pte_clear_range(struct dmar_domain *domain, |
820 | unsigned long start_pfn, | 823 | unsigned long start_pfn, |
821 | unsigned long last_pfn) | 824 | unsigned long last_pfn) |
822 | { | 825 | { |
823 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; | 826 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; |
824 | unsigned int large_page = 1; | 827 | unsigned int large_page = 1; |
825 | struct dma_pte *first_pte, *pte; | 828 | struct dma_pte *first_pte, *pte; |
829 | int order; | ||
826 | 830 | ||
827 | BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); | 831 | BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); |
828 | BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); | 832 | BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); |
@@ -846,6 +850,9 @@ static void dma_pte_clear_range(struct dmar_domain *domain, | |||
846 | (void *)pte - (void *)first_pte); | 850 | (void *)pte - (void *)first_pte); |
847 | 851 | ||
848 | } while (start_pfn && start_pfn <= last_pfn); | 852 | } while (start_pfn && start_pfn <= last_pfn); |
853 | |||
854 | order = (large_page - 1) * 9; | ||
855 | return order; | ||
849 | } | 856 | } |
850 | 857 | ||
851 | /* free page table pages. last level pte should already be cleared */ | 858 | /* free page table pages. last level pte should already be cleared */ |
@@ -3226,9 +3233,6 @@ static void __init init_no_remapping_devices(void) | |||
3226 | } | 3233 | } |
3227 | } | 3234 | } |
3228 | 3235 | ||
3229 | if (dmar_map_gfx) | ||
3230 | return; | ||
3231 | |||
3232 | for_each_drhd_unit(drhd) { | 3236 | for_each_drhd_unit(drhd) { |
3233 | int i; | 3237 | int i; |
3234 | if (drhd->ignored || drhd->include_all) | 3238 | if (drhd->ignored || drhd->include_all) |
@@ -3236,18 +3240,23 @@ static void __init init_no_remapping_devices(void) | |||
3236 | 3240 | ||
3237 | for (i = 0; i < drhd->devices_cnt; i++) | 3241 | for (i = 0; i < drhd->devices_cnt; i++) |
3238 | if (drhd->devices[i] && | 3242 | if (drhd->devices[i] && |
3239 | !IS_GFX_DEVICE(drhd->devices[i])) | 3243 | !IS_GFX_DEVICE(drhd->devices[i])) |
3240 | break; | 3244 | break; |
3241 | 3245 | ||
3242 | if (i < drhd->devices_cnt) | 3246 | if (i < drhd->devices_cnt) |
3243 | continue; | 3247 | continue; |
3244 | 3248 | ||
3245 | /* bypass IOMMU if it is just for gfx devices */ | 3249 | /* This IOMMU has *only* gfx devices. Either bypass it or |
3246 | drhd->ignored = 1; | 3250 | set the gfx_mapped flag, as appropriate */ |
3247 | for (i = 0; i < drhd->devices_cnt; i++) { | 3251 | if (dmar_map_gfx) { |
3248 | if (!drhd->devices[i]) | 3252 | intel_iommu_gfx_mapped = 1; |
3249 | continue; | 3253 | } else { |
3250 | drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; | 3254 | drhd->ignored = 1; |
3255 | for (i = 0; i < drhd->devices_cnt; i++) { | ||
3256 | if (!drhd->devices[i]) | ||
3257 | continue; | ||
3258 | drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; | ||
3259 | } | ||
3251 | } | 3260 | } |
3252 | } | 3261 | } |
3253 | } | 3262 | } |
@@ -3568,6 +3577,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, | |||
3568 | found = 1; | 3577 | found = 1; |
3569 | } | 3578 | } |
3570 | 3579 | ||
3580 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
3581 | |||
3571 | if (found == 0) { | 3582 | if (found == 0) { |
3572 | unsigned long tmp_flags; | 3583 | unsigned long tmp_flags; |
3573 | spin_lock_irqsave(&domain->iommu_lock, tmp_flags); | 3584 | spin_lock_irqsave(&domain->iommu_lock, tmp_flags); |
@@ -3584,8 +3595,6 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, | |||
3584 | spin_unlock_irqrestore(&iommu->lock, tmp_flags); | 3595 | spin_unlock_irqrestore(&iommu->lock, tmp_flags); |
3585 | } | 3596 | } |
3586 | } | 3597 | } |
3587 | |||
3588 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
3589 | } | 3598 | } |
3590 | 3599 | ||
3591 | static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) | 3600 | static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) |
@@ -3739,6 +3748,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain) | |||
3739 | vm_domain_exit(dmar_domain); | 3748 | vm_domain_exit(dmar_domain); |
3740 | return -ENOMEM; | 3749 | return -ENOMEM; |
3741 | } | 3750 | } |
3751 | domain_update_iommu_cap(dmar_domain); | ||
3742 | domain->priv = dmar_domain; | 3752 | domain->priv = dmar_domain; |
3743 | 3753 | ||
3744 | return 0; | 3754 | return 0; |
@@ -3864,14 +3874,15 @@ static int intel_iommu_unmap(struct iommu_domain *domain, | |||
3864 | { | 3874 | { |
3865 | struct dmar_domain *dmar_domain = domain->priv; | 3875 | struct dmar_domain *dmar_domain = domain->priv; |
3866 | size_t size = PAGE_SIZE << gfp_order; | 3876 | size_t size = PAGE_SIZE << gfp_order; |
3877 | int order; | ||
3867 | 3878 | ||
3868 | dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, | 3879 | order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, |
3869 | (iova + size - 1) >> VTD_PAGE_SHIFT); | 3880 | (iova + size - 1) >> VTD_PAGE_SHIFT); |
3870 | 3881 | ||
3871 | if (dmar_domain->max_addr == iova + size) | 3882 | if (dmar_domain->max_addr == iova + size) |
3872 | dmar_domain->max_addr = iova; | 3883 | dmar_domain->max_addr = iova; |
3873 | 3884 | ||
3874 | return gfp_order; | 3885 | return order; |
3875 | } | 3886 | } |
3876 | 3887 | ||
3877 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, | 3888 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, |
@@ -3950,7 +3961,11 @@ static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev) | |||
3950 | if (!(ggc & GGC_MEMORY_VT_ENABLED)) { | 3961 | if (!(ggc & GGC_MEMORY_VT_ENABLED)) { |
3951 | printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); | 3962 | printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); |
3952 | dmar_map_gfx = 0; | 3963 | dmar_map_gfx = 0; |
3953 | } | 3964 | } else if (dmar_map_gfx) { |
3965 | /* we have to ensure the gfx device is idle before we flush */ | ||
3966 | printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n"); | ||
3967 | intel_iommu_strict = 1; | ||
3968 | } | ||
3954 | } | 3969 | } |
3955 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); | 3970 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); |
3956 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); | 3971 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); |