aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/iommu/intel-iommu.c228
-rw-r--r--drivers/iommu/intel_irq_remapping.c55
2 files changed, 145 insertions, 138 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 9128c84a51e7..f38ec7ab7673 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1841,54 +1841,56 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1841 return 0; 1841 return 0;
1842} 1842}
1843 1843
1844struct domain_context_mapping_data {
1845 struct dmar_domain *domain;
1846 struct intel_iommu *iommu;
1847 int translation;
1848};
1849
1850static int domain_context_mapping_cb(struct pci_dev *pdev,
1851 u16 alias, void *opaque)
1852{
1853 struct domain_context_mapping_data *data = opaque;
1854
1855 return domain_context_mapping_one(data->domain, data->iommu,
1856 PCI_BUS_NUM(alias), alias & 0xff,
1857 data->translation);
1858}
1859
1844static int 1860static int
1845domain_context_mapping(struct dmar_domain *domain, struct device *dev, 1861domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1846 int translation) 1862 int translation)
1847{ 1863{
1848 int ret;
1849 struct pci_dev *pdev, *tmp, *parent;
1850 struct intel_iommu *iommu; 1864 struct intel_iommu *iommu;
1851 u8 bus, devfn; 1865 u8 bus, devfn;
1866 struct domain_context_mapping_data data;
1852 1867
1853 iommu = device_to_iommu(dev, &bus, &devfn); 1868 iommu = device_to_iommu(dev, &bus, &devfn);
1854 if (!iommu) 1869 if (!iommu)
1855 return -ENODEV; 1870 return -ENODEV;
1856 1871
1857 ret = domain_context_mapping_one(domain, iommu, bus, devfn, 1872 if (!dev_is_pci(dev))
1858 translation); 1873 return domain_context_mapping_one(domain, iommu, bus, devfn,
1859 if (ret || !dev_is_pci(dev))
1860 return ret;
1861
1862 /* dependent device mapping */
1863 pdev = to_pci_dev(dev);
1864 tmp = pci_find_upstream_pcie_bridge(pdev);
1865 if (!tmp)
1866 return 0;
1867 /* Secondary interface's bus number and devfn 0 */
1868 parent = pdev->bus->self;
1869 while (parent != tmp) {
1870 ret = domain_context_mapping_one(domain, iommu,
1871 parent->bus->number,
1872 parent->devfn, translation);
1873 if (ret)
1874 return ret;
1875 parent = parent->bus->self;
1876 }
1877 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
1878 return domain_context_mapping_one(domain, iommu,
1879 tmp->subordinate->number, 0,
1880 translation);
1881 else /* this is a legacy PCI bridge */
1882 return domain_context_mapping_one(domain, iommu,
1883 tmp->bus->number,
1884 tmp->devfn,
1885 translation); 1874 translation);
1875
1876 data.domain = domain;
1877 data.iommu = iommu;
1878 data.translation = translation;
1879
1880 return pci_for_each_dma_alias(to_pci_dev(dev),
1881 &domain_context_mapping_cb, &data);
1882}
1883
1884static int domain_context_mapped_cb(struct pci_dev *pdev,
1885 u16 alias, void *opaque)
1886{
1887 struct intel_iommu *iommu = opaque;
1888
1889 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
1886} 1890}
1887 1891
1888static int domain_context_mapped(struct device *dev) 1892static int domain_context_mapped(struct device *dev)
1889{ 1893{
1890 int ret;
1891 struct pci_dev *pdev, *tmp, *parent;
1892 struct intel_iommu *iommu; 1894 struct intel_iommu *iommu;
1893 u8 bus, devfn; 1895 u8 bus, devfn;
1894 1896
@@ -1896,30 +1898,11 @@ static int domain_context_mapped(struct device *dev)
1896 if (!iommu) 1898 if (!iommu)
1897 return -ENODEV; 1899 return -ENODEV;
1898 1900
1899 ret = device_context_mapped(iommu, bus, devfn); 1901 if (!dev_is_pci(dev))
1900 if (!ret || !dev_is_pci(dev)) 1902 return device_context_mapped(iommu, bus, devfn);
1901 return ret;
1902 1903
1903 /* dependent device mapping */ 1904 return !pci_for_each_dma_alias(to_pci_dev(dev),
1904 pdev = to_pci_dev(dev); 1905 domain_context_mapped_cb, iommu);
1905 tmp = pci_find_upstream_pcie_bridge(pdev);
1906 if (!tmp)
1907 return ret;
1908 /* Secondary interface's bus number and devfn 0 */
1909 parent = pdev->bus->self;
1910 while (parent != tmp) {
1911 ret = device_context_mapped(iommu, parent->bus->number,
1912 parent->devfn);
1913 if (!ret)
1914 return ret;
1915 parent = parent->bus->self;
1916 }
1917 if (pci_is_pcie(tmp))
1918 return device_context_mapped(iommu, tmp->subordinate->number,
1919 0);
1920 else
1921 return device_context_mapped(iommu, tmp->bus->number,
1922 tmp->devfn);
1923} 1906}
1924 1907
1925/* Returns a number of VTD pages, but aligned to MM page size */ 1908/* Returns a number of VTD pages, but aligned to MM page size */
@@ -2208,79 +2191,86 @@ static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
2208 return domain; 2191 return domain;
2209} 2192}
2210 2193
2194static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2195{
2196 *(u16 *)opaque = alias;
2197 return 0;
2198}
2199
2211/* domain is initialized */ 2200/* domain is initialized */
2212static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw) 2201static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2213{ 2202{
2214 struct dmar_domain *domain, *free = NULL; 2203 struct dmar_domain *domain, *tmp;
2215 struct intel_iommu *iommu = NULL; 2204 struct intel_iommu *iommu;
2216 struct device_domain_info *info; 2205 struct device_domain_info *info;
2217 struct pci_dev *dev_tmp = NULL; 2206 u16 dma_alias;
2218 unsigned long flags; 2207 unsigned long flags;
2219 u8 bus, devfn, bridge_bus, bridge_devfn; 2208 u8 bus, devfn;
2220 2209
2221 domain = find_domain(dev); 2210 domain = find_domain(dev);
2222 if (domain) 2211 if (domain)
2223 return domain; 2212 return domain;
2224 2213
2214 iommu = device_to_iommu(dev, &bus, &devfn);
2215 if (!iommu)
2216 return NULL;
2217
2225 if (dev_is_pci(dev)) { 2218 if (dev_is_pci(dev)) {
2226 struct pci_dev *pdev = to_pci_dev(dev); 2219 struct pci_dev *pdev = to_pci_dev(dev);
2227 u16 segment;
2228 2220
2229 segment = pci_domain_nr(pdev->bus); 2221 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2230 dev_tmp = pci_find_upstream_pcie_bridge(pdev); 2222
2231 if (dev_tmp) { 2223 spin_lock_irqsave(&device_domain_lock, flags);
2232 if (pci_is_pcie(dev_tmp)) { 2224 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2233 bridge_bus = dev_tmp->subordinate->number; 2225 PCI_BUS_NUM(dma_alias),
2234 bridge_devfn = 0; 2226 dma_alias & 0xff);
2235 } else { 2227 if (info) {
2236 bridge_bus = dev_tmp->bus->number; 2228 iommu = info->iommu;
2237 bridge_devfn = dev_tmp->devfn; 2229 domain = info->domain;
2238 }
2239 spin_lock_irqsave(&device_domain_lock, flags);
2240 info = dmar_search_domain_by_dev_info(segment,
2241 bridge_bus,
2242 bridge_devfn);
2243 if (info) {
2244 iommu = info->iommu;
2245 domain = info->domain;
2246 }
2247 spin_unlock_irqrestore(&device_domain_lock, flags);
2248 /* pcie-pci bridge already has a domain, uses it */
2249 if (info)
2250 goto found_domain;
2251 } 2230 }
2252 } 2231 spin_unlock_irqrestore(&device_domain_lock, flags);
2253 2232
2254 iommu = device_to_iommu(dev, &bus, &devfn); 2233 /* DMA alias already has a domain, uses it */
2255 if (!iommu) 2234 if (info)
2256 goto error; 2235 goto found_domain;
2236 }
2257 2237
2258 /* Allocate and initialize new domain for the device */ 2238 /* Allocate and initialize new domain for the device */
2259 domain = alloc_domain(false); 2239 domain = alloc_domain(false);
2260 if (!domain) 2240 if (!domain)
2261 goto error; 2241 return NULL;
2242
2262 if (iommu_attach_domain(domain, iommu)) { 2243 if (iommu_attach_domain(domain, iommu)) {
2263 free_domain_mem(domain); 2244 free_domain_mem(domain);
2264 domain = NULL; 2245 return NULL;
2265 goto error;
2266 } 2246 }
2267 free = domain;
2268 if (domain_init(domain, gaw))
2269 goto error;
2270 2247
2271 /* register pcie-to-pci device */ 2248 if (domain_init(domain, gaw)) {
2272 if (dev_tmp) { 2249 domain_exit(domain);
2273 domain = dmar_insert_dev_info(iommu, bridge_bus, bridge_devfn, 2250 return NULL;
2274 NULL, domain); 2251 }
2252
2253 /* register PCI DMA alias device */
2254 if (dev_is_pci(dev)) {
2255 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2256 dma_alias & 0xff, NULL, domain);
2257
2258 if (!tmp || tmp != domain) {
2259 domain_exit(domain);
2260 domain = tmp;
2261 }
2262
2275 if (!domain) 2263 if (!domain)
2276 goto error; 2264 return NULL;
2277 } 2265 }
2278 2266
2279found_domain: 2267found_domain:
2280 domain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain); 2268 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2281error: 2269
2282 if (free != domain) 2270 if (!tmp || tmp != domain) {
2283 domain_exit(free); 2271 domain_exit(domain);
2272 domain = tmp;
2273 }
2284 2274
2285 return domain; 2275 return domain;
2286} 2276}
@@ -4043,33 +4033,27 @@ out_free_dmar:
4043 return ret; 4033 return ret;
4044} 4034}
4045 4035
4036static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4037{
4038 struct intel_iommu *iommu = opaque;
4039
4040 iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4041 return 0;
4042}
4043
4044/*
4045 * NB - intel-iommu lacks any sort of reference counting for the users of
4046 * dependent devices. If multiple endpoints have intersecting dependent
4047 * devices, unbinding the driver from any one of them will possibly leave
4048 * the others unable to operate.
4049 */
4046static void iommu_detach_dependent_devices(struct intel_iommu *iommu, 4050static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
4047 struct device *dev) 4051 struct device *dev)
4048{ 4052{
4049 struct pci_dev *tmp, *parent, *pdev;
4050
4051 if (!iommu || !dev || !dev_is_pci(dev)) 4053 if (!iommu || !dev || !dev_is_pci(dev))
4052 return; 4054 return;
4053 4055
4054 pdev = to_pci_dev(dev); 4056 pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
4055
4056 /* dependent device detach */
4057 tmp = pci_find_upstream_pcie_bridge(pdev);
4058 /* Secondary interface's bus number and devfn 0 */
4059 if (tmp) {
4060 parent = pdev->bus->self;
4061 while (parent != tmp) {
4062 iommu_detach_dev(iommu, parent->bus->number,
4063 parent->devfn);
4064 parent = parent->bus->self;
4065 }
4066 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
4067 iommu_detach_dev(iommu,
4068 tmp->subordinate->number, 0);
4069 else /* this is a legacy PCI bridge */
4070 iommu_detach_dev(iommu, tmp->bus->number,
4071 tmp->devfn);
4072 }
4073} 4057}
4074 4058
4075static void domain_remove_one_dev_info(struct dmar_domain *domain, 4059static void domain_remove_one_dev_info(struct dmar_domain *domain,
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 9b174893f0f5..757e0b0d19ff 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -369,29 +369,52 @@ static int set_hpet_sid(struct irte *irte, u8 id)
369 return 0; 369 return 0;
370} 370}
371 371
372struct set_msi_sid_data {
373 struct pci_dev *pdev;
374 u16 alias;
375};
376
377static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
378{
379 struct set_msi_sid_data *data = opaque;
380
381 data->pdev = pdev;
382 data->alias = alias;
383
384 return 0;
385}
386
372static int set_msi_sid(struct irte *irte, struct pci_dev *dev) 387static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
373{ 388{
374 struct pci_dev *bridge; 389 struct set_msi_sid_data data;
375 390
376 if (!irte || !dev) 391 if (!irte || !dev)
377 return -1; 392 return -1;
378 393
379 /* PCIe device or Root Complex integrated PCI device */ 394 pci_for_each_dma_alias(dev, set_msi_sid_cb, &data);
380 if (pci_is_pcie(dev) || !dev->bus->parent) {
381 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
382 (dev->bus->number << 8) | dev->devfn);
383 return 0;
384 }
385 395
386 bridge = pci_find_upstream_pcie_bridge(dev); 396 /*
387 if (bridge) { 397 * DMA alias provides us with a PCI device and alias. The only case
388 if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */ 398 * where the it will return an alias on a different bus than the
389 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, 399 * device is the case of a PCIe-to-PCI bridge, where the alias is for
390 (bridge->bus->number << 8) | dev->bus->number); 400 * the subordinate bus. In this case we can only verify the bus.
391 else /* this is a legacy PCI bridge */ 401 *
392 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, 402 * If the alias device is on a different bus than our source device
393 (bridge->bus->number << 8) | bridge->devfn); 403 * then we have a topology based alias, use it.
394 } 404 *
405 * Otherwise, the alias is for a device DMA quirk and we cannot
406 * assume that MSI uses the same requester ID. Therefore use the
407 * original device.
408 */
409 if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number)
410 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
411 PCI_DEVID(PCI_BUS_NUM(data.alias),
412 dev->bus->number));
413 else if (data.pdev->bus->number != dev->bus->number)
414 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias);
415 else
416 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
417 PCI_DEVID(dev->bus->number, dev->devfn));
395 418
396 return 0; 419 return 0;
397} 420}