summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/intel-iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2019-07-22 10:21:05 -0400
committerJoerg Roedel <jroedel@suse.de>2019-07-22 10:21:17 -0400
commit301e7ee1dec513e5aca12d01c819a1f762918d0a (patch)
tree156468a6ddc629d398391b4c19736d466d09b54b /drivers/iommu/intel-iommu.c
parent5f9e832c137075045d15cd6899ab0505cfb2ca4b (diff)
Revert "iommu/vt-d: Consolidate domain_init() to avoid duplication"
This reverts commit 123b2ffc376e1b3e9e015c75175b61e88a8b8518. This commit reportedly caused boot failures on some systems and needs to be reverted for now. Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/intel-iommu.c')
-rw-r--r--drivers/iommu/intel-iommu.c123
1 files changed, 87 insertions, 36 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index ac4172c02244..441781d12553 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1833,6 +1833,63 @@ static inline int guestwidth_to_adjustwidth(int gaw)
1833 return agaw; 1833 return agaw;
1834} 1834}
1835 1835
1836static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1837 int guest_width)
1838{
1839 int adjust_width, agaw;
1840 unsigned long sagaw;
1841 int err;
1842
1843 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
1844
1845 err = init_iova_flush_queue(&domain->iovad,
1846 iommu_flush_iova, iova_entry_free);
1847 if (err)
1848 return err;
1849
1850 domain_reserve_special_ranges(domain);
1851
1852 /* calculate AGAW */
1853 if (guest_width > cap_mgaw(iommu->cap))
1854 guest_width = cap_mgaw(iommu->cap);
1855 domain->gaw = guest_width;
1856 adjust_width = guestwidth_to_adjustwidth(guest_width);
1857 agaw = width_to_agaw(adjust_width);
1858 sagaw = cap_sagaw(iommu->cap);
1859 if (!test_bit(agaw, &sagaw)) {
1860 /* hardware doesn't support it, choose a bigger one */
1861 pr_debug("Hardware doesn't support agaw %d\n", agaw);
1862 agaw = find_next_bit(&sagaw, 5, agaw);
1863 if (agaw >= 5)
1864 return -ENODEV;
1865 }
1866 domain->agaw = agaw;
1867
1868 if (ecap_coherent(iommu->ecap))
1869 domain->iommu_coherency = 1;
1870 else
1871 domain->iommu_coherency = 0;
1872
1873 if (ecap_sc_support(iommu->ecap))
1874 domain->iommu_snooping = 1;
1875 else
1876 domain->iommu_snooping = 0;
1877
1878 if (intel_iommu_superpage)
1879 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1880 else
1881 domain->iommu_superpage = 0;
1882
1883 domain->nid = iommu->node;
1884
1885 /* always allocate the top pgd */
1886 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1887 if (!domain->pgd)
1888 return -ENOMEM;
1889 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1890 return 0;
1891}
1892
1836static void domain_exit(struct dmar_domain *domain) 1893static void domain_exit(struct dmar_domain *domain)
1837{ 1894{
1838 struct page *freelist; 1895 struct page *freelist;
@@ -2513,31 +2570,6 @@ static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2513 return 0; 2570 return 0;
2514} 2571}
2515 2572
2516static int domain_init(struct dmar_domain *domain, int guest_width)
2517{
2518 int adjust_width;
2519
2520 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
2521 domain_reserve_special_ranges(domain);
2522
2523 /* calculate AGAW */
2524 domain->gaw = guest_width;
2525 adjust_width = guestwidth_to_adjustwidth(guest_width);
2526 domain->agaw = width_to_agaw(adjust_width);
2527
2528 domain->iommu_coherency = 0;
2529 domain->iommu_snooping = 0;
2530 domain->iommu_superpage = 0;
2531 domain->max_addr = 0;
2532
2533 /* always allocate the top pgd */
2534 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
2535 if (!domain->pgd)
2536 return -ENOMEM;
2537 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
2538 return 0;
2539}
2540
2541static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw) 2573static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
2542{ 2574{
2543 struct device_domain_info *info; 2575 struct device_domain_info *info;
@@ -2575,19 +2607,11 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
2575 domain = alloc_domain(0); 2607 domain = alloc_domain(0);
2576 if (!domain) 2608 if (!domain)
2577 return NULL; 2609 return NULL;
2578 2610 if (domain_init(domain, iommu, gaw)) {
2579 if (domain_init(domain, gaw)) {
2580 domain_exit(domain); 2611 domain_exit(domain);
2581 return NULL; 2612 return NULL;
2582 } 2613 }
2583 2614
2584 if (init_iova_flush_queue(&domain->iovad,
2585 iommu_flush_iova,
2586 iova_entry_free)) {
2587 pr_warn("iova flush queue initialization failed\n");
2588 intel_iommu_strict = 1;
2589 }
2590
2591out: 2615out:
2592 return domain; 2616 return domain;
2593} 2617}
@@ -2692,6 +2716,8 @@ static int domain_prepare_identity_map(struct device *dev,
2692 return iommu_domain_identity_map(domain, start, end); 2716 return iommu_domain_identity_map(domain, start, end);
2693} 2717}
2694 2718
2719static int md_domain_init(struct dmar_domain *domain, int guest_width);
2720
2695static int __init si_domain_init(int hw) 2721static int __init si_domain_init(int hw)
2696{ 2722{
2697 struct dmar_rmrr_unit *rmrr; 2723 struct dmar_rmrr_unit *rmrr;
@@ -2702,7 +2728,7 @@ static int __init si_domain_init(int hw)
2702 if (!si_domain) 2728 if (!si_domain)
2703 return -EFAULT; 2729 return -EFAULT;
2704 2730
2705 if (domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { 2731 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2706 domain_exit(si_domain); 2732 domain_exit(si_domain);
2707 return -EFAULT; 2733 return -EFAULT;
2708 } 2734 }
@@ -4829,6 +4855,31 @@ static void dmar_remove_one_dev_info(struct device *dev)
4829 spin_unlock_irqrestore(&device_domain_lock, flags); 4855 spin_unlock_irqrestore(&device_domain_lock, flags);
4830} 4856}
4831 4857
4858static int md_domain_init(struct dmar_domain *domain, int guest_width)
4859{
4860 int adjust_width;
4861
4862 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
4863 domain_reserve_special_ranges(domain);
4864
4865 /* calculate AGAW */
4866 domain->gaw = guest_width;
4867 adjust_width = guestwidth_to_adjustwidth(guest_width);
4868 domain->agaw = width_to_agaw(adjust_width);
4869
4870 domain->iommu_coherency = 0;
4871 domain->iommu_snooping = 0;
4872 domain->iommu_superpage = 0;
4873 domain->max_addr = 0;
4874
4875 /* always allocate the top pgd */
4876 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4877 if (!domain->pgd)
4878 return -ENOMEM;
4879 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4880 return 0;
4881}
4882
4832static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) 4883static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
4833{ 4884{
4834 struct dmar_domain *dmar_domain; 4885 struct dmar_domain *dmar_domain;
@@ -4843,7 +4894,7 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
4843 pr_err("Can't allocate dmar_domain\n"); 4894 pr_err("Can't allocate dmar_domain\n");
4844 return NULL; 4895 return NULL;
4845 } 4896 }
4846 if (domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { 4897 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4847 pr_err("Domain initialization failed\n"); 4898 pr_err("Domain initialization failed\n");
4848 domain_exit(dmar_domain); 4899 domain_exit(dmar_domain);
4849 return NULL; 4900 return NULL;