diff options
Diffstat (limited to 'drivers/iommu/intel-iommu.c')
-rw-r--r-- | drivers/iommu/intel-iommu.c | 189 |
1 files changed, 96 insertions, 93 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index ac4172c02244..bdaed2da8a55 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -339,8 +339,6 @@ static void domain_exit(struct dmar_domain *domain); | |||
339 | static void domain_remove_dev_info(struct dmar_domain *domain); | 339 | static void domain_remove_dev_info(struct dmar_domain *domain); |
340 | static void dmar_remove_one_dev_info(struct device *dev); | 340 | static void dmar_remove_one_dev_info(struct device *dev); |
341 | static void __dmar_remove_one_dev_info(struct device_domain_info *info); | 341 | static void __dmar_remove_one_dev_info(struct device_domain_info *info); |
342 | static void domain_context_clear(struct intel_iommu *iommu, | ||
343 | struct device *dev); | ||
344 | static int domain_detach_iommu(struct dmar_domain *domain, | 342 | static int domain_detach_iommu(struct dmar_domain *domain, |
345 | struct intel_iommu *iommu); | 343 | struct intel_iommu *iommu); |
346 | static bool device_is_rmrr_locked(struct device *dev); | 344 | static bool device_is_rmrr_locked(struct device *dev); |
@@ -1833,9 +1831,65 @@ static inline int guestwidth_to_adjustwidth(int gaw) | |||
1833 | return agaw; | 1831 | return agaw; |
1834 | } | 1832 | } |
1835 | 1833 | ||
1834 | static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu, | ||
1835 | int guest_width) | ||
1836 | { | ||
1837 | int adjust_width, agaw; | ||
1838 | unsigned long sagaw; | ||
1839 | int err; | ||
1840 | |||
1841 | init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN); | ||
1842 | |||
1843 | err = init_iova_flush_queue(&domain->iovad, | ||
1844 | iommu_flush_iova, iova_entry_free); | ||
1845 | if (err) | ||
1846 | return err; | ||
1847 | |||
1848 | domain_reserve_special_ranges(domain); | ||
1849 | |||
1850 | /* calculate AGAW */ | ||
1851 | if (guest_width > cap_mgaw(iommu->cap)) | ||
1852 | guest_width = cap_mgaw(iommu->cap); | ||
1853 | domain->gaw = guest_width; | ||
1854 | adjust_width = guestwidth_to_adjustwidth(guest_width); | ||
1855 | agaw = width_to_agaw(adjust_width); | ||
1856 | sagaw = cap_sagaw(iommu->cap); | ||
1857 | if (!test_bit(agaw, &sagaw)) { | ||
1858 | /* hardware doesn't support it, choose a bigger one */ | ||
1859 | pr_debug("Hardware doesn't support agaw %d\n", agaw); | ||
1860 | agaw = find_next_bit(&sagaw, 5, agaw); | ||
1861 | if (agaw >= 5) | ||
1862 | return -ENODEV; | ||
1863 | } | ||
1864 | domain->agaw = agaw; | ||
1865 | |||
1866 | if (ecap_coherent(iommu->ecap)) | ||
1867 | domain->iommu_coherency = 1; | ||
1868 | else | ||
1869 | domain->iommu_coherency = 0; | ||
1870 | |||
1871 | if (ecap_sc_support(iommu->ecap)) | ||
1872 | domain->iommu_snooping = 1; | ||
1873 | else | ||
1874 | domain->iommu_snooping = 0; | ||
1875 | |||
1876 | if (intel_iommu_superpage) | ||
1877 | domain->iommu_superpage = fls(cap_super_page_val(iommu->cap)); | ||
1878 | else | ||
1879 | domain->iommu_superpage = 0; | ||
1880 | |||
1881 | domain->nid = iommu->node; | ||
1882 | |||
1883 | /* always allocate the top pgd */ | ||
1884 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); | ||
1885 | if (!domain->pgd) | ||
1886 | return -ENOMEM; | ||
1887 | __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); | ||
1888 | return 0; | ||
1889 | } | ||
1890 | |||
1836 | static void domain_exit(struct dmar_domain *domain) | 1891 | static void domain_exit(struct dmar_domain *domain) |
1837 | { | 1892 | { |
1838 | struct page *freelist; | ||
1839 | 1893 | ||
1840 | /* Remove associated devices and clear attached or cached domains */ | 1894 | /* Remove associated devices and clear attached or cached domains */ |
1841 | domain_remove_dev_info(domain); | 1895 | domain_remove_dev_info(domain); |
@@ -1843,9 +1897,12 @@ static void domain_exit(struct dmar_domain *domain) | |||
1843 | /* destroy iovas */ | 1897 | /* destroy iovas */ |
1844 | put_iova_domain(&domain->iovad); | 1898 | put_iova_domain(&domain->iovad); |
1845 | 1899 | ||
1846 | freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); | 1900 | if (domain->pgd) { |
1901 | struct page *freelist; | ||
1847 | 1902 | ||
1848 | dma_free_pagelist(freelist); | 1903 | freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); |
1904 | dma_free_pagelist(freelist); | ||
1905 | } | ||
1849 | 1906 | ||
1850 | free_domain_mem(domain); | 1907 | free_domain_mem(domain); |
1851 | } | 1908 | } |
@@ -2048,26 +2105,9 @@ out_unlock: | |||
2048 | return ret; | 2105 | return ret; |
2049 | } | 2106 | } |
2050 | 2107 | ||
2051 | struct domain_context_mapping_data { | ||
2052 | struct dmar_domain *domain; | ||
2053 | struct intel_iommu *iommu; | ||
2054 | struct pasid_table *table; | ||
2055 | }; | ||
2056 | |||
2057 | static int domain_context_mapping_cb(struct pci_dev *pdev, | ||
2058 | u16 alias, void *opaque) | ||
2059 | { | ||
2060 | struct domain_context_mapping_data *data = opaque; | ||
2061 | |||
2062 | return domain_context_mapping_one(data->domain, data->iommu, | ||
2063 | data->table, PCI_BUS_NUM(alias), | ||
2064 | alias & 0xff); | ||
2065 | } | ||
2066 | |||
2067 | static int | 2108 | static int |
2068 | domain_context_mapping(struct dmar_domain *domain, struct device *dev) | 2109 | domain_context_mapping(struct dmar_domain *domain, struct device *dev) |
2069 | { | 2110 | { |
2070 | struct domain_context_mapping_data data; | ||
2071 | struct pasid_table *table; | 2111 | struct pasid_table *table; |
2072 | struct intel_iommu *iommu; | 2112 | struct intel_iommu *iommu; |
2073 | u8 bus, devfn; | 2113 | u8 bus, devfn; |
@@ -2077,17 +2117,7 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev) | |||
2077 | return -ENODEV; | 2117 | return -ENODEV; |
2078 | 2118 | ||
2079 | table = intel_pasid_get_table(dev); | 2119 | table = intel_pasid_get_table(dev); |
2080 | 2120 | return domain_context_mapping_one(domain, iommu, table, bus, devfn); | |
2081 | if (!dev_is_pci(dev)) | ||
2082 | return domain_context_mapping_one(domain, iommu, table, | ||
2083 | bus, devfn); | ||
2084 | |||
2085 | data.domain = domain; | ||
2086 | data.iommu = iommu; | ||
2087 | data.table = table; | ||
2088 | |||
2089 | return pci_for_each_dma_alias(to_pci_dev(dev), | ||
2090 | &domain_context_mapping_cb, &data); | ||
2091 | } | 2121 | } |
2092 | 2122 | ||
2093 | static int domain_context_mapped_cb(struct pci_dev *pdev, | 2123 | static int domain_context_mapped_cb(struct pci_dev *pdev, |
@@ -2513,31 +2543,6 @@ static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque) | |||
2513 | return 0; | 2543 | return 0; |
2514 | } | 2544 | } |
2515 | 2545 | ||
2516 | static int domain_init(struct dmar_domain *domain, int guest_width) | ||
2517 | { | ||
2518 | int adjust_width; | ||
2519 | |||
2520 | init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN); | ||
2521 | domain_reserve_special_ranges(domain); | ||
2522 | |||
2523 | /* calculate AGAW */ | ||
2524 | domain->gaw = guest_width; | ||
2525 | adjust_width = guestwidth_to_adjustwidth(guest_width); | ||
2526 | domain->agaw = width_to_agaw(adjust_width); | ||
2527 | |||
2528 | domain->iommu_coherency = 0; | ||
2529 | domain->iommu_snooping = 0; | ||
2530 | domain->iommu_superpage = 0; | ||
2531 | domain->max_addr = 0; | ||
2532 | |||
2533 | /* always allocate the top pgd */ | ||
2534 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); | ||
2535 | if (!domain->pgd) | ||
2536 | return -ENOMEM; | ||
2537 | domain_flush_cache(domain, domain->pgd, PAGE_SIZE); | ||
2538 | return 0; | ||
2539 | } | ||
2540 | |||
2541 | static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw) | 2546 | static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw) |
2542 | { | 2547 | { |
2543 | struct device_domain_info *info; | 2548 | struct device_domain_info *info; |
@@ -2575,19 +2580,11 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw) | |||
2575 | domain = alloc_domain(0); | 2580 | domain = alloc_domain(0); |
2576 | if (!domain) | 2581 | if (!domain) |
2577 | return NULL; | 2582 | return NULL; |
2578 | 2583 | if (domain_init(domain, iommu, gaw)) { | |
2579 | if (domain_init(domain, gaw)) { | ||
2580 | domain_exit(domain); | 2584 | domain_exit(domain); |
2581 | return NULL; | 2585 | return NULL; |
2582 | } | 2586 | } |
2583 | 2587 | ||
2584 | if (init_iova_flush_queue(&domain->iovad, | ||
2585 | iommu_flush_iova, | ||
2586 | iova_entry_free)) { | ||
2587 | pr_warn("iova flush queue initialization failed\n"); | ||
2588 | intel_iommu_strict = 1; | ||
2589 | } | ||
2590 | |||
2591 | out: | 2588 | out: |
2592 | return domain; | 2589 | return domain; |
2593 | } | 2590 | } |
@@ -2692,6 +2689,8 @@ static int domain_prepare_identity_map(struct device *dev, | |||
2692 | return iommu_domain_identity_map(domain, start, end); | 2689 | return iommu_domain_identity_map(domain, start, end); |
2693 | } | 2690 | } |
2694 | 2691 | ||
2692 | static int md_domain_init(struct dmar_domain *domain, int guest_width); | ||
2693 | |||
2695 | static int __init si_domain_init(int hw) | 2694 | static int __init si_domain_init(int hw) |
2696 | { | 2695 | { |
2697 | struct dmar_rmrr_unit *rmrr; | 2696 | struct dmar_rmrr_unit *rmrr; |
@@ -2702,7 +2701,7 @@ static int __init si_domain_init(int hw) | |||
2702 | if (!si_domain) | 2701 | if (!si_domain) |
2703 | return -EFAULT; | 2702 | return -EFAULT; |
2704 | 2703 | ||
2705 | if (domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { | 2704 | if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { |
2706 | domain_exit(si_domain); | 2705 | domain_exit(si_domain); |
2707 | return -EFAULT; | 2706 | return -EFAULT; |
2708 | } | 2707 | } |
@@ -3564,7 +3563,8 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) | |||
3564 | 3563 | ||
3565 | freelist = domain_unmap(domain, start_pfn, last_pfn); | 3564 | freelist = domain_unmap(domain, start_pfn, last_pfn); |
3566 | 3565 | ||
3567 | if (intel_iommu_strict || (pdev && pdev->untrusted)) { | 3566 | if (intel_iommu_strict || (pdev && pdev->untrusted) || |
3567 | !has_iova_flush_queue(&domain->iovad)) { | ||
3568 | iommu_flush_iotlb_psi(iommu, domain, start_pfn, | 3568 | iommu_flush_iotlb_psi(iommu, domain, start_pfn, |
3569 | nrpages, !freelist, 0); | 3569 | nrpages, !freelist, 0); |
3570 | /* free iova */ | 3570 | /* free iova */ |
@@ -4758,28 +4758,6 @@ out_free_dmar: | |||
4758 | return ret; | 4758 | return ret; |
4759 | } | 4759 | } |
4760 | 4760 | ||
4761 | static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque) | ||
4762 | { | ||
4763 | struct intel_iommu *iommu = opaque; | ||
4764 | |||
4765 | domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff); | ||
4766 | return 0; | ||
4767 | } | ||
4768 | |||
4769 | /* | ||
4770 | * NB - intel-iommu lacks any sort of reference counting for the users of | ||
4771 | * dependent devices. If multiple endpoints have intersecting dependent | ||
4772 | * devices, unbinding the driver from any one of them will possibly leave | ||
4773 | * the others unable to operate. | ||
4774 | */ | ||
4775 | static void domain_context_clear(struct intel_iommu *iommu, struct device *dev) | ||
4776 | { | ||
4777 | if (!iommu || !dev || !dev_is_pci(dev)) | ||
4778 | return; | ||
4779 | |||
4780 | pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu); | ||
4781 | } | ||
4782 | |||
4783 | static void __dmar_remove_one_dev_info(struct device_domain_info *info) | 4761 | static void __dmar_remove_one_dev_info(struct device_domain_info *info) |
4784 | { | 4762 | { |
4785 | struct dmar_domain *domain; | 4763 | struct dmar_domain *domain; |
@@ -4800,7 +4778,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info) | |||
4800 | PASID_RID2PASID); | 4778 | PASID_RID2PASID); |
4801 | 4779 | ||
4802 | iommu_disable_dev_iotlb(info); | 4780 | iommu_disable_dev_iotlb(info); |
4803 | domain_context_clear(iommu, info->dev); | 4781 | domain_context_clear_one(iommu, info->bus, info->devfn); |
4804 | intel_pasid_free_table(info->dev); | 4782 | intel_pasid_free_table(info->dev); |
4805 | } | 4783 | } |
4806 | 4784 | ||
@@ -4829,6 +4807,31 @@ static void dmar_remove_one_dev_info(struct device *dev) | |||
4829 | spin_unlock_irqrestore(&device_domain_lock, flags); | 4807 | spin_unlock_irqrestore(&device_domain_lock, flags); |
4830 | } | 4808 | } |
4831 | 4809 | ||
4810 | static int md_domain_init(struct dmar_domain *domain, int guest_width) | ||
4811 | { | ||
4812 | int adjust_width; | ||
4813 | |||
4814 | init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN); | ||
4815 | domain_reserve_special_ranges(domain); | ||
4816 | |||
4817 | /* calculate AGAW */ | ||
4818 | domain->gaw = guest_width; | ||
4819 | adjust_width = guestwidth_to_adjustwidth(guest_width); | ||
4820 | domain->agaw = width_to_agaw(adjust_width); | ||
4821 | |||
4822 | domain->iommu_coherency = 0; | ||
4823 | domain->iommu_snooping = 0; | ||
4824 | domain->iommu_superpage = 0; | ||
4825 | domain->max_addr = 0; | ||
4826 | |||
4827 | /* always allocate the top pgd */ | ||
4828 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); | ||
4829 | if (!domain->pgd) | ||
4830 | return -ENOMEM; | ||
4831 | domain_flush_cache(domain, domain->pgd, PAGE_SIZE); | ||
4832 | return 0; | ||
4833 | } | ||
4834 | |||
4832 | static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) | 4835 | static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) |
4833 | { | 4836 | { |
4834 | struct dmar_domain *dmar_domain; | 4837 | struct dmar_domain *dmar_domain; |
@@ -4843,7 +4846,7 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) | |||
4843 | pr_err("Can't allocate dmar_domain\n"); | 4846 | pr_err("Can't allocate dmar_domain\n"); |
4844 | return NULL; | 4847 | return NULL; |
4845 | } | 4848 | } |
4846 | if (domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { | 4849 | if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { |
4847 | pr_err("Domain initialization failed\n"); | 4850 | pr_err("Domain initialization failed\n"); |
4848 | domain_exit(dmar_domain); | 4851 | domain_exit(dmar_domain); |
4849 | return NULL; | 4852 | return NULL; |