summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/intel-iommu.c
diff options
context:
space:
mode:
authorLu Baolu <baolu.lu@linux.intel.com>2019-06-11 20:28:51 -0400
committerJoerg Roedel <jroedel@suse.de>2019-06-12 04:36:59 -0400
commit123b2ffc376e1b3e9e015c75175b61e88a8b8518 (patch)
tree53cc956263b3952a477dc210fe2e17e3573c7bce /drivers/iommu/intel-iommu.c
parent16c9e29e12e8edbdf437001f46cf3f557aca80e8 (diff)
iommu/vt-d: Consolidate domain_init() to avoid duplication
The domain_init() and md_domain_init() do almost the same job. Consolidate them to avoid duplication. Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/intel-iommu.c')
-rw-r--r--drivers/iommu/intel-iommu.c123
1 files changed, 36 insertions, 87 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 8c6ed39dec01..466129e7e50c 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1841,63 +1841,6 @@ static inline int guestwidth_to_adjustwidth(int gaw)
1841 return agaw; 1841 return agaw;
1842} 1842}
1843 1843
1844static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1845 int guest_width)
1846{
1847 int adjust_width, agaw;
1848 unsigned long sagaw;
1849 int err;
1850
1851 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
1852
1853 err = init_iova_flush_queue(&domain->iovad,
1854 iommu_flush_iova, iova_entry_free);
1855 if (err)
1856 return err;
1857
1858 domain_reserve_special_ranges(domain);
1859
1860 /* calculate AGAW */
1861 if (guest_width > cap_mgaw(iommu->cap))
1862 guest_width = cap_mgaw(iommu->cap);
1863 domain->gaw = guest_width;
1864 adjust_width = guestwidth_to_adjustwidth(guest_width);
1865 agaw = width_to_agaw(adjust_width);
1866 sagaw = cap_sagaw(iommu->cap);
1867 if (!test_bit(agaw, &sagaw)) {
1868 /* hardware doesn't support it, choose a bigger one */
1869 pr_debug("Hardware doesn't support agaw %d\n", agaw);
1870 agaw = find_next_bit(&sagaw, 5, agaw);
1871 if (agaw >= 5)
1872 return -ENODEV;
1873 }
1874 domain->agaw = agaw;
1875
1876 if (ecap_coherent(iommu->ecap))
1877 domain->iommu_coherency = 1;
1878 else
1879 domain->iommu_coherency = 0;
1880
1881 if (ecap_sc_support(iommu->ecap))
1882 domain->iommu_snooping = 1;
1883 else
1884 domain->iommu_snooping = 0;
1885
1886 if (intel_iommu_superpage)
1887 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1888 else
1889 domain->iommu_superpage = 0;
1890
1891 domain->nid = iommu->node;
1892
1893 /* always allocate the top pgd */
1894 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1895 if (!domain->pgd)
1896 return -ENOMEM;
1897 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1898 return 0;
1899}
1900
1901static void domain_exit(struct dmar_domain *domain) 1844static void domain_exit(struct dmar_domain *domain)
1902{ 1845{
1903 struct page *freelist; 1846 struct page *freelist;
@@ -2578,6 +2521,31 @@ static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2578 return 0; 2521 return 0;
2579} 2522}
2580 2523
2524static int domain_init(struct dmar_domain *domain, int guest_width)
2525{
2526 int adjust_width;
2527
2528 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
2529 domain_reserve_special_ranges(domain);
2530
2531 /* calculate AGAW */
2532 domain->gaw = guest_width;
2533 adjust_width = guestwidth_to_adjustwidth(guest_width);
2534 domain->agaw = width_to_agaw(adjust_width);
2535
2536 domain->iommu_coherency = 0;
2537 domain->iommu_snooping = 0;
2538 domain->iommu_superpage = 0;
2539 domain->max_addr = 0;
2540
2541 /* always allocate the top pgd */
2542 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
2543 if (!domain->pgd)
2544 return -ENOMEM;
2545 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
2546 return 0;
2547}
2548
2581static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw) 2549static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
2582{ 2550{
2583 struct device_domain_info *info; 2551 struct device_domain_info *info;
@@ -2615,11 +2583,19 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
2615 domain = alloc_domain(0); 2583 domain = alloc_domain(0);
2616 if (!domain) 2584 if (!domain)
2617 return NULL; 2585 return NULL;
2618 if (domain_init(domain, iommu, gaw)) { 2586
2587 if (domain_init(domain, gaw)) {
2619 domain_exit(domain); 2588 domain_exit(domain);
2620 return NULL; 2589 return NULL;
2621 } 2590 }
2622 2591
2592 if (init_iova_flush_queue(&domain->iovad,
2593 iommu_flush_iova,
2594 iova_entry_free)) {
2595 pr_warn("iova flush queue initialization failed\n");
2596 intel_iommu_strict = 1;
2597 }
2598
2623out: 2599out:
2624 return domain; 2600 return domain;
2625} 2601}
@@ -2724,8 +2700,6 @@ static int domain_prepare_identity_map(struct device *dev,
2724 return iommu_domain_identity_map(domain, start, end); 2700 return iommu_domain_identity_map(domain, start, end);
2725} 2701}
2726 2702
2727static int md_domain_init(struct dmar_domain *domain, int guest_width);
2728
2729static int __init si_domain_init(int hw) 2703static int __init si_domain_init(int hw)
2730{ 2704{
2731 struct dmar_rmrr_unit *rmrr; 2705 struct dmar_rmrr_unit *rmrr;
@@ -2736,7 +2710,7 @@ static int __init si_domain_init(int hw)
2736 if (!si_domain) 2710 if (!si_domain)
2737 return -EFAULT; 2711 return -EFAULT;
2738 2712
2739 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { 2713 if (domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2740 domain_exit(si_domain); 2714 domain_exit(si_domain);
2741 return -EFAULT; 2715 return -EFAULT;
2742 } 2716 }
@@ -4865,31 +4839,6 @@ static void dmar_remove_one_dev_info(struct device *dev)
4865 spin_unlock_irqrestore(&device_domain_lock, flags); 4839 spin_unlock_irqrestore(&device_domain_lock, flags);
4866} 4840}
4867 4841
4868static int md_domain_init(struct dmar_domain *domain, int guest_width)
4869{
4870 int adjust_width;
4871
4872 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
4873 domain_reserve_special_ranges(domain);
4874
4875 /* calculate AGAW */
4876 domain->gaw = guest_width;
4877 adjust_width = guestwidth_to_adjustwidth(guest_width);
4878 domain->agaw = width_to_agaw(adjust_width);
4879
4880 domain->iommu_coherency = 0;
4881 domain->iommu_snooping = 0;
4882 domain->iommu_superpage = 0;
4883 domain->max_addr = 0;
4884
4885 /* always allocate the top pgd */
4886 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4887 if (!domain->pgd)
4888 return -ENOMEM;
4889 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4890 return 0;
4891}
4892
4893static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) 4842static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
4894{ 4843{
4895 struct dmar_domain *dmar_domain; 4844 struct dmar_domain *dmar_domain;
@@ -4904,7 +4853,7 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
4904 pr_err("Can't allocate dmar_domain\n"); 4853 pr_err("Can't allocate dmar_domain\n");
4905 return NULL; 4854 return NULL;
4906 } 4855 }
4907 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { 4856 if (domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4908 pr_err("Domain initialization failed\n"); 4857 pr_err("Domain initialization failed\n");
4909 domain_exit(dmar_domain); 4858 domain_exit(dmar_domain);
4910 return NULL; 4859 return NULL;