aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/intel-iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2014-07-23 10:04:37 -0400
committerJoerg Roedel <jroedel@suse.de>2014-07-23 10:04:37 -0400
commitcbb24a25a871cbdac4e58b68b541aadd91b249be (patch)
tree0b8207913c437d91c938a64a48c1a1751a31a37b /drivers/iommu/intel-iommu.c
parentaa4d066a2a8041b7e73cee68ce5499aca29f265e (diff)
parente09f8ea560490e941139d23b4c278d3e6e2c871a (diff)
Merge branch 'core' into x86/vt-d
Conflicts: drivers/iommu/intel-iommu.c
Diffstat (limited to 'drivers/iommu/intel-iommu.c')
-rw-r--r--drivers/iommu/intel-iommu.c385
1 files changed, 186 insertions, 199 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index c03d4b44bb92..3664d0d00338 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -45,7 +45,6 @@
45#include <asm/iommu.h> 45#include <asm/iommu.h>
46 46
47#include "irq_remapping.h" 47#include "irq_remapping.h"
48#include "pci.h"
49 48
50#define ROOT_SIZE VTD_PAGE_SIZE 49#define ROOT_SIZE VTD_PAGE_SIZE
51#define CONTEXT_SIZE VTD_PAGE_SIZE 50#define CONTEXT_SIZE VTD_PAGE_SIZE
@@ -451,7 +450,7 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
451static DEFINE_SPINLOCK(device_domain_lock); 450static DEFINE_SPINLOCK(device_domain_lock);
452static LIST_HEAD(device_domain_list); 451static LIST_HEAD(device_domain_list);
453 452
454static struct iommu_ops intel_iommu_ops; 453static const struct iommu_ops intel_iommu_ops;
455 454
456static int __init intel_iommu_setup(char *str) 455static int __init intel_iommu_setup(char *str)
457{ 456{
@@ -1840,54 +1839,56 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1840 return 0; 1839 return 0;
1841} 1840}
1842 1841
1842struct domain_context_mapping_data {
1843 struct dmar_domain *domain;
1844 struct intel_iommu *iommu;
1845 int translation;
1846};
1847
1848static int domain_context_mapping_cb(struct pci_dev *pdev,
1849 u16 alias, void *opaque)
1850{
1851 struct domain_context_mapping_data *data = opaque;
1852
1853 return domain_context_mapping_one(data->domain, data->iommu,
1854 PCI_BUS_NUM(alias), alias & 0xff,
1855 data->translation);
1856}
1857
1843static int 1858static int
1844domain_context_mapping(struct dmar_domain *domain, struct device *dev, 1859domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1845 int translation) 1860 int translation)
1846{ 1861{
1847 int ret;
1848 struct pci_dev *pdev, *tmp, *parent;
1849 struct intel_iommu *iommu; 1862 struct intel_iommu *iommu;
1850 u8 bus, devfn; 1863 u8 bus, devfn;
1864 struct domain_context_mapping_data data;
1851 1865
1852 iommu = device_to_iommu(dev, &bus, &devfn); 1866 iommu = device_to_iommu(dev, &bus, &devfn);
1853 if (!iommu) 1867 if (!iommu)
1854 return -ENODEV; 1868 return -ENODEV;
1855 1869
1856 ret = domain_context_mapping_one(domain, iommu, bus, devfn, 1870 if (!dev_is_pci(dev))
1857 translation); 1871 return domain_context_mapping_one(domain, iommu, bus, devfn,
1858 if (ret || !dev_is_pci(dev))
1859 return ret;
1860
1861 /* dependent device mapping */
1862 pdev = to_pci_dev(dev);
1863 tmp = pci_find_upstream_pcie_bridge(pdev);
1864 if (!tmp)
1865 return 0;
1866 /* Secondary interface's bus number and devfn 0 */
1867 parent = pdev->bus->self;
1868 while (parent != tmp) {
1869 ret = domain_context_mapping_one(domain, iommu,
1870 parent->bus->number,
1871 parent->devfn, translation);
1872 if (ret)
1873 return ret;
1874 parent = parent->bus->self;
1875 }
1876 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
1877 return domain_context_mapping_one(domain, iommu,
1878 tmp->subordinate->number, 0,
1879 translation);
1880 else /* this is a legacy PCI bridge */
1881 return domain_context_mapping_one(domain, iommu,
1882 tmp->bus->number,
1883 tmp->devfn,
1884 translation); 1872 translation);
1873
1874 data.domain = domain;
1875 data.iommu = iommu;
1876 data.translation = translation;
1877
1878 return pci_for_each_dma_alias(to_pci_dev(dev),
1879 &domain_context_mapping_cb, &data);
1880}
1881
1882static int domain_context_mapped_cb(struct pci_dev *pdev,
1883 u16 alias, void *opaque)
1884{
1885 struct intel_iommu *iommu = opaque;
1886
1887 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
1885} 1888}
1886 1889
1887static int domain_context_mapped(struct device *dev) 1890static int domain_context_mapped(struct device *dev)
1888{ 1891{
1889 int ret;
1890 struct pci_dev *pdev, *tmp, *parent;
1891 struct intel_iommu *iommu; 1892 struct intel_iommu *iommu;
1892 u8 bus, devfn; 1893 u8 bus, devfn;
1893 1894
@@ -1895,30 +1896,11 @@ static int domain_context_mapped(struct device *dev)
1895 if (!iommu) 1896 if (!iommu)
1896 return -ENODEV; 1897 return -ENODEV;
1897 1898
1898 ret = device_context_mapped(iommu, bus, devfn); 1899 if (!dev_is_pci(dev))
1899 if (!ret || !dev_is_pci(dev)) 1900 return device_context_mapped(iommu, bus, devfn);
1900 return ret;
1901 1901
1902 /* dependent device mapping */ 1902 return !pci_for_each_dma_alias(to_pci_dev(dev),
1903 pdev = to_pci_dev(dev); 1903 domain_context_mapped_cb, iommu);
1904 tmp = pci_find_upstream_pcie_bridge(pdev);
1905 if (!tmp)
1906 return ret;
1907 /* Secondary interface's bus number and devfn 0 */
1908 parent = pdev->bus->self;
1909 while (parent != tmp) {
1910 ret = device_context_mapped(iommu, parent->bus->number,
1911 parent->devfn);
1912 if (!ret)
1913 return ret;
1914 parent = parent->bus->self;
1915 }
1916 if (pci_is_pcie(tmp))
1917 return device_context_mapped(iommu, tmp->subordinate->number,
1918 0);
1919 else
1920 return device_context_mapped(iommu, tmp->bus->number,
1921 tmp->devfn);
1922} 1904}
1923 1905
1924/* Returns a number of VTD pages, but aligned to MM page size */ 1906/* Returns a number of VTD pages, but aligned to MM page size */
@@ -2205,80 +2187,86 @@ static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
2205 return domain; 2187 return domain;
2206} 2188}
2207 2189
2190static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2191{
2192 *(u16 *)opaque = alias;
2193 return 0;
2194}
2195
2208/* domain is initialized */ 2196/* domain is initialized */
2209static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw) 2197static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2210{ 2198{
2211 struct dmar_domain *domain, *free = NULL; 2199 struct dmar_domain *domain, *tmp;
2212 struct intel_iommu *iommu = NULL; 2200 struct intel_iommu *iommu;
2213 struct device_domain_info *info; 2201 struct device_domain_info *info;
2214 struct pci_dev *dev_tmp = NULL; 2202 u16 dma_alias;
2215 unsigned long flags; 2203 unsigned long flags;
2216 u8 bus, devfn; 2204 u8 bus, devfn;
2217 u8 bridge_bus = 0, bridge_devfn = 0;
2218 2205
2219 domain = find_domain(dev); 2206 domain = find_domain(dev);
2220 if (domain) 2207 if (domain)
2221 return domain; 2208 return domain;
2222 2209
2210 iommu = device_to_iommu(dev, &bus, &devfn);
2211 if (!iommu)
2212 return NULL;
2213
2223 if (dev_is_pci(dev)) { 2214 if (dev_is_pci(dev)) {
2224 struct pci_dev *pdev = to_pci_dev(dev); 2215 struct pci_dev *pdev = to_pci_dev(dev);
2225 u16 segment;
2226 2216
2227 segment = pci_domain_nr(pdev->bus); 2217 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2228 dev_tmp = pci_find_upstream_pcie_bridge(pdev); 2218
2229 if (dev_tmp) { 2219 spin_lock_irqsave(&device_domain_lock, flags);
2230 if (pci_is_pcie(dev_tmp)) { 2220 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2231 bridge_bus = dev_tmp->subordinate->number; 2221 PCI_BUS_NUM(dma_alias),
2232 bridge_devfn = 0; 2222 dma_alias & 0xff);
2233 } else { 2223 if (info) {
2234 bridge_bus = dev_tmp->bus->number; 2224 iommu = info->iommu;
2235 bridge_devfn = dev_tmp->devfn; 2225 domain = info->domain;
2236 }
2237 spin_lock_irqsave(&device_domain_lock, flags);
2238 info = dmar_search_domain_by_dev_info(segment,
2239 bridge_bus,
2240 bridge_devfn);
2241 if (info) {
2242 iommu = info->iommu;
2243 domain = info->domain;
2244 }
2245 spin_unlock_irqrestore(&device_domain_lock, flags);
2246 /* pcie-pci bridge already has a domain, uses it */
2247 if (info)
2248 goto found_domain;
2249 } 2226 }
2250 } 2227 spin_unlock_irqrestore(&device_domain_lock, flags);
2251 2228
2252 iommu = device_to_iommu(dev, &bus, &devfn); 2229 /* DMA alias already has a domain, uses it */
2253 if (!iommu) 2230 if (info)
2254 goto error; 2231 goto found_domain;
2232 }
2255 2233
2256 /* Allocate and initialize new domain for the device */ 2234 /* Allocate and initialize new domain for the device */
2257 domain = alloc_domain(false); 2235 domain = alloc_domain(false);
2258 if (!domain) 2236 if (!domain)
2259 goto error; 2237 return NULL;
2238
2260 if (iommu_attach_domain(domain, iommu)) { 2239 if (iommu_attach_domain(domain, iommu)) {
2261 free_domain_mem(domain); 2240 free_domain_mem(domain);
2262 domain = NULL; 2241 return NULL;
2263 goto error;
2264 } 2242 }
2265 free = domain;
2266 if (domain_init(domain, gaw))
2267 goto error;
2268 2243
2269 /* register pcie-to-pci device */ 2244 if (domain_init(domain, gaw)) {
2270 if (dev_tmp) { 2245 domain_exit(domain);
2271 domain = dmar_insert_dev_info(iommu, bridge_bus, bridge_devfn, 2246 return NULL;
2272 NULL, domain); 2247 }
2248
2249 /* register PCI DMA alias device */
2250 if (dev_is_pci(dev)) {
2251 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2252 dma_alias & 0xff, NULL, domain);
2253
2254 if (!tmp || tmp != domain) {
2255 domain_exit(domain);
2256 domain = tmp;
2257 }
2258
2273 if (!domain) 2259 if (!domain)
2274 goto error; 2260 return NULL;
2275 } 2261 }
2276 2262
2277found_domain: 2263found_domain:
2278 domain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain); 2264 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2279error: 2265
2280 if (free != domain) 2266 if (!tmp || tmp != domain) {
2281 domain_exit(free); 2267 domain_exit(domain);
2268 domain = tmp;
2269 }
2282 2270
2283 return domain; 2271 return domain;
2284} 2272}
@@ -3953,6 +3941,63 @@ static struct notifier_block intel_iommu_memory_nb = {
3953 .priority = 0 3941 .priority = 0
3954}; 3942};
3955 3943
3944
3945static ssize_t intel_iommu_show_version(struct device *dev,
3946 struct device_attribute *attr,
3947 char *buf)
3948{
3949 struct intel_iommu *iommu = dev_get_drvdata(dev);
3950 u32 ver = readl(iommu->reg + DMAR_VER_REG);
3951 return sprintf(buf, "%d:%d\n",
3952 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
3953}
3954static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
3955
3956static ssize_t intel_iommu_show_address(struct device *dev,
3957 struct device_attribute *attr,
3958 char *buf)
3959{
3960 struct intel_iommu *iommu = dev_get_drvdata(dev);
3961 return sprintf(buf, "%llx\n", iommu->reg_phys);
3962}
3963static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
3964
3965static ssize_t intel_iommu_show_cap(struct device *dev,
3966 struct device_attribute *attr,
3967 char *buf)
3968{
3969 struct intel_iommu *iommu = dev_get_drvdata(dev);
3970 return sprintf(buf, "%llx\n", iommu->cap);
3971}
3972static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
3973
3974static ssize_t intel_iommu_show_ecap(struct device *dev,
3975 struct device_attribute *attr,
3976 char *buf)
3977{
3978 struct intel_iommu *iommu = dev_get_drvdata(dev);
3979 return sprintf(buf, "%llx\n", iommu->ecap);
3980}
3981static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
3982
3983static struct attribute *intel_iommu_attrs[] = {
3984 &dev_attr_version.attr,
3985 &dev_attr_address.attr,
3986 &dev_attr_cap.attr,
3987 &dev_attr_ecap.attr,
3988 NULL,
3989};
3990
3991static struct attribute_group intel_iommu_group = {
3992 .name = "intel-iommu",
3993 .attrs = intel_iommu_attrs,
3994};
3995
3996const struct attribute_group *intel_iommu_groups[] = {
3997 &intel_iommu_group,
3998 NULL,
3999};
4000
3956int __init intel_iommu_init(void) 4001int __init intel_iommu_init(void)
3957{ 4002{
3958 int ret = -ENODEV; 4003 int ret = -ENODEV;
@@ -4024,6 +4069,11 @@ int __init intel_iommu_init(void)
4024 4069
4025 init_iommu_pm_ops(); 4070 init_iommu_pm_ops();
4026 4071
4072 for_each_active_iommu(iommu, drhd)
4073 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4074 intel_iommu_groups,
4075 iommu->name);
4076
4027 bus_set_iommu(&pci_bus_type, &intel_iommu_ops); 4077 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4028 bus_register_notifier(&pci_bus_type, &device_nb); 4078 bus_register_notifier(&pci_bus_type, &device_nb);
4029 if (si_domain && !hw_pass_through) 4079 if (si_domain && !hw_pass_through)
@@ -4042,33 +4092,27 @@ out_free_dmar:
4042 return ret; 4092 return ret;
4043} 4093}
4044 4094
4095static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4096{
4097 struct intel_iommu *iommu = opaque;
4098
4099 iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4100 return 0;
4101}
4102
4103/*
4104 * NB - intel-iommu lacks any sort of reference counting for the users of
4105 * dependent devices. If multiple endpoints have intersecting dependent
4106 * devices, unbinding the driver from any one of them will possibly leave
4107 * the others unable to operate.
4108 */
4045static void iommu_detach_dependent_devices(struct intel_iommu *iommu, 4109static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
4046 struct device *dev) 4110 struct device *dev)
4047{ 4111{
4048 struct pci_dev *tmp, *parent, *pdev;
4049
4050 if (!iommu || !dev || !dev_is_pci(dev)) 4112 if (!iommu || !dev || !dev_is_pci(dev))
4051 return; 4113 return;
4052 4114
4053 pdev = to_pci_dev(dev); 4115 pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
4054
4055 /* dependent device detach */
4056 tmp = pci_find_upstream_pcie_bridge(pdev);
4057 /* Secondary interface's bus number and devfn 0 */
4058 if (tmp) {
4059 parent = pdev->bus->self;
4060 while (parent != tmp) {
4061 iommu_detach_dev(iommu, parent->bus->number,
4062 parent->devfn);
4063 parent = parent->bus->self;
4064 }
4065 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
4066 iommu_detach_dev(iommu,
4067 tmp->subordinate->number, 0);
4068 else /* this is a legacy PCI bridge */
4069 iommu_detach_dev(iommu, tmp->bus->number,
4070 tmp->devfn);
4071 }
4072} 4116}
4073 4117
4074static void domain_remove_one_dev_info(struct dmar_domain *domain, 4118static void domain_remove_one_dev_info(struct dmar_domain *domain,
@@ -4370,99 +4414,42 @@ static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4370 return 0; 4414 return 0;
4371} 4415}
4372 4416
4373#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
4374
4375static int intel_iommu_add_device(struct device *dev) 4417static int intel_iommu_add_device(struct device *dev)
4376{ 4418{
4377 struct pci_dev *pdev = to_pci_dev(dev); 4419 struct intel_iommu *iommu;
4378 struct pci_dev *bridge, *dma_pdev = NULL;
4379 struct iommu_group *group; 4420 struct iommu_group *group;
4380 int ret;
4381 u8 bus, devfn; 4421 u8 bus, devfn;
4382 4422
4383 if (!device_to_iommu(dev, &bus, &devfn)) 4423 iommu = device_to_iommu(dev, &bus, &devfn);
4424 if (!iommu)
4384 return -ENODEV; 4425 return -ENODEV;
4385 4426
4386 bridge = pci_find_upstream_pcie_bridge(pdev); 4427 iommu_device_link(iommu->iommu_dev, dev);
4387 if (bridge) {
4388 if (pci_is_pcie(bridge))
4389 dma_pdev = pci_get_domain_bus_and_slot(
4390 pci_domain_nr(pdev->bus),
4391 bridge->subordinate->number, 0);
4392 if (!dma_pdev)
4393 dma_pdev = pci_dev_get(bridge);
4394 } else
4395 dma_pdev = pci_dev_get(pdev);
4396
4397 /* Account for quirked devices */
4398 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
4399
4400 /*
4401 * If it's a multifunction device that does not support our
4402 * required ACS flags, add to the same group as lowest numbered
4403 * function that also does not suport the required ACS flags.
4404 */
4405 if (dma_pdev->multifunction &&
4406 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
4407 u8 i, slot = PCI_SLOT(dma_pdev->devfn);
4408
4409 for (i = 0; i < 8; i++) {
4410 struct pci_dev *tmp;
4411
4412 tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
4413 if (!tmp)
4414 continue;
4415
4416 if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
4417 swap_pci_ref(&dma_pdev, tmp);
4418 break;
4419 }
4420 pci_dev_put(tmp);
4421 }
4422 }
4423
4424 /*
4425 * Devices on the root bus go through the iommu. If that's not us,
4426 * find the next upstream device and test ACS up to the root bus.
4427 * Finding the next device may require skipping virtual buses.
4428 */
4429 while (!pci_is_root_bus(dma_pdev->bus)) {
4430 struct pci_bus *bus = dma_pdev->bus;
4431
4432 while (!bus->self) {
4433 if (!pci_is_root_bus(bus))
4434 bus = bus->parent;
4435 else
4436 goto root_bus;
4437 }
4438 4428
4439 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) 4429 group = iommu_group_get_for_dev(dev);
4440 break;
4441 4430
4442 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self)); 4431 if (IS_ERR(group))
4443 } 4432 return PTR_ERR(group);
4444
4445root_bus:
4446 group = iommu_group_get(&dma_pdev->dev);
4447 pci_dev_put(dma_pdev);
4448 if (!group) {
4449 group = iommu_group_alloc();
4450 if (IS_ERR(group))
4451 return PTR_ERR(group);
4452 }
4453
4454 ret = iommu_group_add_device(group, dev);
4455 4433
4456 iommu_group_put(group); 4434 iommu_group_put(group);
4457 return ret; 4435 return 0;
4458} 4436}
4459 4437
4460static void intel_iommu_remove_device(struct device *dev) 4438static void intel_iommu_remove_device(struct device *dev)
4461{ 4439{
4440 struct intel_iommu *iommu;
4441 u8 bus, devfn;
4442
4443 iommu = device_to_iommu(dev, &bus, &devfn);
4444 if (!iommu)
4445 return;
4446
4462 iommu_group_remove_device(dev); 4447 iommu_group_remove_device(dev);
4448
4449 iommu_device_unlink(iommu->iommu_dev, dev);
4463} 4450}
4464 4451
4465static struct iommu_ops intel_iommu_ops = { 4452static const struct iommu_ops intel_iommu_ops = {
4466 .domain_init = intel_iommu_domain_init, 4453 .domain_init = intel_iommu_domain_init,
4467 .domain_destroy = intel_iommu_domain_destroy, 4454 .domain_destroy = intel_iommu_domain_destroy,
4468 .attach_dev = intel_iommu_attach_device, 4455 .attach_dev = intel_iommu_attach_device,