diff options
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r-- | drivers/pci/intel-iommu.c | 107 |
1 files changed, 105 insertions, 2 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index f0a21995b135..171f6c61fa1d 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -1216,6 +1216,7 @@ static int iommu_init_domains(struct intel_iommu *iommu) | |||
1216 | 1216 | ||
1217 | 1217 | ||
1218 | static void domain_exit(struct dmar_domain *domain); | 1218 | static void domain_exit(struct dmar_domain *domain); |
1219 | static void vm_domain_exit(struct dmar_domain *domain); | ||
1219 | 1220 | ||
1220 | void free_dmar_iommu(struct intel_iommu *iommu) | 1221 | void free_dmar_iommu(struct intel_iommu *iommu) |
1221 | { | 1222 | { |
@@ -1229,8 +1230,12 @@ void free_dmar_iommu(struct intel_iommu *iommu) | |||
1229 | clear_bit(i, iommu->domain_ids); | 1230 | clear_bit(i, iommu->domain_ids); |
1230 | 1231 | ||
1231 | spin_lock_irqsave(&domain->iommu_lock, flags); | 1232 | spin_lock_irqsave(&domain->iommu_lock, flags); |
1232 | if (--domain->iommu_count == 0) | 1233 | if (--domain->iommu_count == 0) { |
1233 | domain_exit(domain); | 1234 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) |
1235 | vm_domain_exit(domain); | ||
1236 | else | ||
1237 | domain_exit(domain); | ||
1238 | } | ||
1234 | spin_unlock_irqrestore(&domain->iommu_lock, flags); | 1239 | spin_unlock_irqrestore(&domain->iommu_lock, flags); |
1235 | 1240 | ||
1236 | i = find_next_bit(iommu->domain_ids, | 1241 | i = find_next_bit(iommu->domain_ids, |
@@ -2792,6 +2797,104 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) | |||
2792 | spin_unlock_irqrestore(&device_domain_lock, flags1); | 2797 | spin_unlock_irqrestore(&device_domain_lock, flags1); |
2793 | } | 2798 | } |
2794 | 2799 | ||
2800 | /* domain id for virtual machine, it won't be set in context */ | ||
2801 | static unsigned long vm_domid; | ||
2802 | |||
2803 | static struct dmar_domain *iommu_alloc_vm_domain(void) | ||
2804 | { | ||
2805 | struct dmar_domain *domain; | ||
2806 | |||
2807 | domain = alloc_domain_mem(); | ||
2808 | if (!domain) | ||
2809 | return NULL; | ||
2810 | |||
2811 | domain->id = vm_domid++; | ||
2812 | memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); | ||
2813 | domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE; | ||
2814 | |||
2815 | return domain; | ||
2816 | } | ||
2817 | |||
2818 | static int vm_domain_init(struct dmar_domain *domain, int guest_width) | ||
2819 | { | ||
2820 | int adjust_width; | ||
2821 | |||
2822 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); | ||
2823 | spin_lock_init(&domain->mapping_lock); | ||
2824 | spin_lock_init(&domain->iommu_lock); | ||
2825 | |||
2826 | domain_reserve_special_ranges(domain); | ||
2827 | |||
2828 | /* calculate AGAW */ | ||
2829 | domain->gaw = guest_width; | ||
2830 | adjust_width = guestwidth_to_adjustwidth(guest_width); | ||
2831 | domain->agaw = width_to_agaw(adjust_width); | ||
2832 | |||
2833 | INIT_LIST_HEAD(&domain->devices); | ||
2834 | |||
2835 | domain->iommu_count = 0; | ||
2836 | domain->iommu_coherency = 0; | ||
2837 | |||
2838 | /* always allocate the top pgd */ | ||
2839 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(); | ||
2840 | if (!domain->pgd) | ||
2841 | return -ENOMEM; | ||
2842 | domain_flush_cache(domain, domain->pgd, PAGE_SIZE); | ||
2843 | return 0; | ||
2844 | } | ||
2845 | |||
2846 | static void iommu_free_vm_domain(struct dmar_domain *domain) | ||
2847 | { | ||
2848 | unsigned long flags; | ||
2849 | struct dmar_drhd_unit *drhd; | ||
2850 | struct intel_iommu *iommu; | ||
2851 | unsigned long i; | ||
2852 | unsigned long ndomains; | ||
2853 | |||
2854 | for_each_drhd_unit(drhd) { | ||
2855 | if (drhd->ignored) | ||
2856 | continue; | ||
2857 | iommu = drhd->iommu; | ||
2858 | |||
2859 | ndomains = cap_ndoms(iommu->cap); | ||
2860 | i = find_first_bit(iommu->domain_ids, ndomains); | ||
2861 | for (; i < ndomains; ) { | ||
2862 | if (iommu->domains[i] == domain) { | ||
2863 | spin_lock_irqsave(&iommu->lock, flags); | ||
2864 | clear_bit(i, iommu->domain_ids); | ||
2865 | iommu->domains[i] = NULL; | ||
2866 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
2867 | break; | ||
2868 | } | ||
2869 | i = find_next_bit(iommu->domain_ids, ndomains, i+1); | ||
2870 | } | ||
2871 | } | ||
2872 | } | ||
2873 | |||
2874 | static void vm_domain_exit(struct dmar_domain *domain) | ||
2875 | { | ||
2876 | u64 end; | ||
2877 | |||
2878 | /* Domain 0 is reserved, so dont process it */ | ||
2879 | if (!domain) | ||
2880 | return; | ||
2881 | |||
2882 | vm_domain_remove_all_dev_info(domain); | ||
2883 | /* destroy iovas */ | ||
2884 | put_iova_domain(&domain->iovad); | ||
2885 | end = DOMAIN_MAX_ADDR(domain->gaw); | ||
2886 | end = end & (~VTD_PAGE_MASK); | ||
2887 | |||
2888 | /* clear ptes */ | ||
2889 | dma_pte_clear_range(domain, 0, end); | ||
2890 | |||
2891 | /* free page tables */ | ||
2892 | dma_pte_free_pagetable(domain, 0, end); | ||
2893 | |||
2894 | iommu_free_vm_domain(domain); | ||
2895 | free_domain_mem(domain); | ||
2896 | } | ||
2897 | |||
2795 | void intel_iommu_domain_exit(struct dmar_domain *domain) | 2898 | void intel_iommu_domain_exit(struct dmar_domain *domain) |
2796 | { | 2899 | { |
2797 | u64 end; | 2900 | u64 end; |