aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/pci/intel-iommu.c61
1 files changed, 61 insertions, 0 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index f1380269cabd..772fb22e1be0 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -230,6 +230,7 @@ struct dmar_domain {
230 int iommu_coherency;/* indicate coherency of iommu access */ 230 int iommu_coherency;/* indicate coherency of iommu access */
231 int iommu_count; /* reference count of iommu */ 231 int iommu_count; /* reference count of iommu */
232 spinlock_t iommu_lock; /* protect iommu set in domain */ 232 spinlock_t iommu_lock; /* protect iommu set in domain */
233 u64 max_addr; /* maximum mapped address */
233}; 234};
234 235
235/* PCI domain-device relationship */ 236/* PCI domain-device relationship */
@@ -2849,6 +2850,22 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
2849/* domain id for virtual machine, it won't be set in context */ 2850/* domain id for virtual machine, it won't be set in context */
2850static unsigned long vm_domid; 2851static unsigned long vm_domid;
2851 2852
2853static int vm_domain_min_agaw(struct dmar_domain *domain)
2854{
2855 int i;
2856 int min_agaw = domain->agaw;
2857
2858 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
2859 for (; i < g_num_of_iommus; ) {
2860 if (min_agaw > g_iommus[i]->agaw)
2861 min_agaw = g_iommus[i]->agaw;
2862
2863 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
2864 }
2865
2866 return min_agaw;
2867}
2868
2852static struct dmar_domain *iommu_alloc_vm_domain(void) 2869static struct dmar_domain *iommu_alloc_vm_domain(void)
2853{ 2870{
2854 struct dmar_domain *domain; 2871 struct dmar_domain *domain;
@@ -2883,6 +2900,7 @@ static int vm_domain_init(struct dmar_domain *domain, int guest_width)
2883 2900
2884 domain->iommu_count = 0; 2901 domain->iommu_count = 0;
2885 domain->iommu_coherency = 0; 2902 domain->iommu_coherency = 0;
2903 domain->max_addr = 0;
2886 2904
2887 /* always allocate the top pgd */ 2905 /* always allocate the top pgd */
2888 domain->pgd = (struct dma_pte *)alloc_pgtable_page(); 2906 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
@@ -2974,6 +2992,9 @@ EXPORT_SYMBOL_GPL(intel_iommu_free_domain);
2974int intel_iommu_attach_device(struct dmar_domain *domain, 2992int intel_iommu_attach_device(struct dmar_domain *domain,
2975 struct pci_dev *pdev) 2993 struct pci_dev *pdev)
2976{ 2994{
2995 struct intel_iommu *iommu;
2996 int addr_width;
2997 u64 end;
2977 int ret; 2998 int ret;
2978 2999
2979 /* normally pdev is not mapped */ 3000 /* normally pdev is not mapped */
@@ -2989,6 +3010,21 @@ int intel_iommu_attach_device(struct dmar_domain *domain,
2989 } 3010 }
2990 } 3011 }
2991 3012
3013 iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
3014 if (!iommu)
3015 return -ENODEV;
3016
3017 /* check if this iommu agaw is sufficient for max mapped address */
3018 addr_width = agaw_to_width(iommu->agaw);
3019 end = DOMAIN_MAX_ADDR(addr_width);
3020 end = end & VTD_PAGE_MASK;
3021 if (end < domain->max_addr) {
3022 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3023 "sufficient for the mapped address (%llx)\n",
3024 __func__, iommu->agaw, domain->max_addr);
3025 return -EFAULT;
3026 }
3027
2992 ret = domain_context_mapping(domain, pdev); 3028 ret = domain_context_mapping(domain, pdev);
2993 if (ret) 3029 if (ret)
2994 return ret; 3030 return ret;
@@ -3008,7 +3044,29 @@ EXPORT_SYMBOL_GPL(intel_iommu_detach_device);
3008int intel_iommu_map_address(struct dmar_domain *domain, dma_addr_t iova, 3044int intel_iommu_map_address(struct dmar_domain *domain, dma_addr_t iova,
3009 u64 hpa, size_t size, int prot) 3045 u64 hpa, size_t size, int prot)
3010{ 3046{
3047 u64 max_addr;
3048 int addr_width;
3011 int ret; 3049 int ret;
3050
3051 max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
3052 if (domain->max_addr < max_addr) {
3053 int min_agaw;
3054 u64 end;
3055
3056 /* check if minimum agaw is sufficient for mapped address */
3057 min_agaw = vm_domain_min_agaw(domain);
3058 addr_width = agaw_to_width(min_agaw);
3059 end = DOMAIN_MAX_ADDR(addr_width);
3060 end = end & VTD_PAGE_MASK;
3061 if (end < max_addr) {
3062 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3063 "sufficient for the mapped address (%llx)\n",
3064 __func__, min_agaw, max_addr);
3065 return -EFAULT;
3066 }
3067 domain->max_addr = max_addr;
3068 }
3069
3012 ret = domain_page_mapping(domain, iova, hpa, size, prot); 3070 ret = domain_page_mapping(domain, iova, hpa, size, prot);
3013 return ret; 3071 return ret;
3014} 3072}
@@ -3023,6 +3081,9 @@ void intel_iommu_unmap_address(struct dmar_domain *domain,
3023 base = iova & VTD_PAGE_MASK; 3081 base = iova & VTD_PAGE_MASK;
3024 size = VTD_PAGE_ALIGN(size); 3082 size = VTD_PAGE_ALIGN(size);
3025 dma_pte_clear_range(domain, base, base + size); 3083 dma_pte_clear_range(domain, base, base + size);
3084
3085 if (domain->max_addr == base + size)
3086 domain->max_addr = base;
3026} 3087}
3027EXPORT_SYMBOL_GPL(intel_iommu_unmap_address); 3088EXPORT_SYMBOL_GPL(intel_iommu_unmap_address);
3028 3089