aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/amd_iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/amd_iommu.c')
-rw-r--r--drivers/iommu/amd_iommu.c117
1 files changed, 68 insertions, 49 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index ecb0109a5360..505a9adac2d5 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -88,6 +88,27 @@ int amd_iommu_max_glx_val = -1;
88static struct dma_map_ops amd_iommu_dma_ops; 88static struct dma_map_ops amd_iommu_dma_ops;
89 89
90/* 90/*
91 * This struct contains device specific data for the IOMMU
92 */
93struct iommu_dev_data {
94 struct list_head list; /* For domain->dev_list */
95 struct list_head dev_data_list; /* For global dev_data_list */
96 struct list_head alias_list; /* Link alias-groups together */
97 struct iommu_dev_data *alias_data;/* The alias dev_data */
98 struct protection_domain *domain; /* Domain the device is bound to */
99 u16 devid; /* PCI Device ID */
100 bool iommu_v2; /* Device can make use of IOMMUv2 */
101 bool passthrough; /* Default for device is pt_domain */
102 struct {
103 bool enabled;
104 int qdep;
105 } ats; /* ATS state */
106 bool pri_tlp; /* PASID TLB required for
107 PPR completions */
108 u32 errata; /* Bitmap for errata to apply */
109};
110
111/*
91 * general struct to manage commands send to an IOMMU 112 * general struct to manage commands send to an IOMMU
92 */ 113 */
93struct iommu_cmd { 114struct iommu_cmd {
@@ -114,8 +135,9 @@ static struct iommu_dev_data *alloc_dev_data(u16 devid)
114 if (!dev_data) 135 if (!dev_data)
115 return NULL; 136 return NULL;
116 137
138 INIT_LIST_HEAD(&dev_data->alias_list);
139
117 dev_data->devid = devid; 140 dev_data->devid = devid;
118 atomic_set(&dev_data->bind, 0);
119 141
120 spin_lock_irqsave(&dev_data_list_lock, flags); 142 spin_lock_irqsave(&dev_data_list_lock, flags);
121 list_add_tail(&dev_data->dev_data_list, &dev_data_list); 143 list_add_tail(&dev_data->dev_data_list, &dev_data_list);
@@ -260,17 +282,13 @@ static bool check_device(struct device *dev)
260 return true; 282 return true;
261} 283}
262 284
263static int init_iommu_group(struct device *dev) 285static void init_iommu_group(struct device *dev)
264{ 286{
265 struct iommu_group *group; 287 struct iommu_group *group;
266 288
267 group = iommu_group_get_for_dev(dev); 289 group = iommu_group_get_for_dev(dev);
268 290 if (!IS_ERR(group))
269 if (IS_ERR(group)) 291 iommu_group_put(group);
270 return PTR_ERR(group);
271
272 iommu_group_put(group);
273 return 0;
274} 292}
275 293
276static int __last_alias(struct pci_dev *pdev, u16 alias, void *data) 294static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
@@ -340,7 +358,6 @@ static int iommu_init_device(struct device *dev)
340 struct pci_dev *pdev = to_pci_dev(dev); 358 struct pci_dev *pdev = to_pci_dev(dev);
341 struct iommu_dev_data *dev_data; 359 struct iommu_dev_data *dev_data;
342 u16 alias; 360 u16 alias;
343 int ret;
344 361
345 if (dev->archdata.iommu) 362 if (dev->archdata.iommu)
346 return 0; 363 return 0;
@@ -362,12 +379,9 @@ static int iommu_init_device(struct device *dev)
362 return -ENOTSUPP; 379 return -ENOTSUPP;
363 } 380 }
364 dev_data->alias_data = alias_data; 381 dev_data->alias_data = alias_data;
365 }
366 382
367 ret = init_iommu_group(dev); 383 /* Add device to the alias_list */
368 if (ret) { 384 list_add(&dev_data->alias_list, &alias_data->alias_list);
369 free_dev_data(dev_data);
370 return ret;
371 } 385 }
372 386
373 if (pci_iommuv2_capable(pdev)) { 387 if (pci_iommuv2_capable(pdev)) {
@@ -455,6 +469,15 @@ int __init amd_iommu_init_devices(void)
455 goto out_free; 469 goto out_free;
456 } 470 }
457 471
472 /*
473 * Initialize IOMMU groups only after iommu_init_device() has
474 * had a chance to populate any IVRS defined aliases.
475 */
476 for_each_pci_dev(pdev) {
477 if (check_device(&pdev->dev))
478 init_iommu_group(&pdev->dev);
479 }
480
458 return 0; 481 return 0;
459 482
460out_free: 483out_free:
@@ -1368,6 +1391,9 @@ static int iommu_map_page(struct protection_domain *dom,
1368 count = PAGE_SIZE_PTE_COUNT(page_size); 1391 count = PAGE_SIZE_PTE_COUNT(page_size);
1369 pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL); 1392 pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
1370 1393
1394 if (!pte)
1395 return -ENOMEM;
1396
1371 for (i = 0; i < count; ++i) 1397 for (i = 0; i < count; ++i)
1372 if (IOMMU_PTE_PRESENT(pte[i])) 1398 if (IOMMU_PTE_PRESENT(pte[i]))
1373 return -EBUSY; 1399 return -EBUSY;
@@ -2122,35 +2148,29 @@ static void do_detach(struct iommu_dev_data *dev_data)
2122static int __attach_device(struct iommu_dev_data *dev_data, 2148static int __attach_device(struct iommu_dev_data *dev_data,
2123 struct protection_domain *domain) 2149 struct protection_domain *domain)
2124{ 2150{
2151 struct iommu_dev_data *head, *entry;
2125 int ret; 2152 int ret;
2126 2153
2127 /* lock domain */ 2154 /* lock domain */
2128 spin_lock(&domain->lock); 2155 spin_lock(&domain->lock);
2129 2156
2130 if (dev_data->alias_data != NULL) { 2157 head = dev_data;
2131 struct iommu_dev_data *alias_data = dev_data->alias_data;
2132 2158
2133 /* Some sanity checks */ 2159 if (head->alias_data != NULL)
2134 ret = -EBUSY; 2160 head = head->alias_data;
2135 if (alias_data->domain != NULL &&
2136 alias_data->domain != domain)
2137 goto out_unlock;
2138 2161
2139 if (dev_data->domain != NULL && 2162 /* Now we have the root of the alias group, if any */
2140 dev_data->domain != domain)
2141 goto out_unlock;
2142 2163
2143 /* Do real assignment */ 2164 ret = -EBUSY;
2144 if (alias_data->domain == NULL) 2165 if (head->domain != NULL)
2145 do_attach(alias_data, domain); 2166 goto out_unlock;
2146
2147 atomic_inc(&alias_data->bind);
2148 }
2149 2167
2150 if (dev_data->domain == NULL) 2168 /* Attach alias group root */
2151 do_attach(dev_data, domain); 2169 do_attach(head, domain);
2152 2170
2153 atomic_inc(&dev_data->bind); 2171 /* Attach other devices in the alias group */
2172 list_for_each_entry(entry, &head->alias_list, alias_list)
2173 do_attach(entry, domain);
2154 2174
2155 ret = 0; 2175 ret = 0;
2156 2176
@@ -2298,6 +2318,7 @@ static int attach_device(struct device *dev,
2298 */ 2318 */
2299static void __detach_device(struct iommu_dev_data *dev_data) 2319static void __detach_device(struct iommu_dev_data *dev_data)
2300{ 2320{
2321 struct iommu_dev_data *head, *entry;
2301 struct protection_domain *domain; 2322 struct protection_domain *domain;
2302 unsigned long flags; 2323 unsigned long flags;
2303 2324
@@ -2307,15 +2328,14 @@ static void __detach_device(struct iommu_dev_data *dev_data)
2307 2328
2308 spin_lock_irqsave(&domain->lock, flags); 2329 spin_lock_irqsave(&domain->lock, flags);
2309 2330
2310 if (dev_data->alias_data != NULL) { 2331 head = dev_data;
2311 struct iommu_dev_data *alias_data = dev_data->alias_data; 2332 if (head->alias_data != NULL)
2333 head = head->alias_data;
2312 2334
2313 if (atomic_dec_and_test(&alias_data->bind)) 2335 list_for_each_entry(entry, &head->alias_list, alias_list)
2314 do_detach(alias_data); 2336 do_detach(entry);
2315 }
2316 2337
2317 if (atomic_dec_and_test(&dev_data->bind)) 2338 do_detach(head);
2318 do_detach(dev_data);
2319 2339
2320 spin_unlock_irqrestore(&domain->lock, flags); 2340 spin_unlock_irqrestore(&domain->lock, flags);
2321 2341
@@ -2415,6 +2435,7 @@ static int device_change_notifier(struct notifier_block *nb,
2415 case BUS_NOTIFY_ADD_DEVICE: 2435 case BUS_NOTIFY_ADD_DEVICE:
2416 2436
2417 iommu_init_device(dev); 2437 iommu_init_device(dev);
2438 init_iommu_group(dev);
2418 2439
2419 /* 2440 /*
2420 * dev_data is still NULL and 2441 * dev_data is still NULL and
@@ -3158,7 +3179,6 @@ static void cleanup_domain(struct protection_domain *domain)
3158 entry = list_first_entry(&domain->dev_list, 3179 entry = list_first_entry(&domain->dev_list,
3159 struct iommu_dev_data, list); 3180 struct iommu_dev_data, list);
3160 __detach_device(entry); 3181 __detach_device(entry);
3161 atomic_set(&entry->bind, 0);
3162 } 3182 }
3163 3183
3164 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 3184 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
@@ -3384,20 +3404,20 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
3384 return paddr; 3404 return paddr;
3385} 3405}
3386 3406
3387static int amd_iommu_domain_has_cap(struct iommu_domain *domain, 3407static bool amd_iommu_capable(enum iommu_cap cap)
3388 unsigned long cap)
3389{ 3408{
3390 switch (cap) { 3409 switch (cap) {
3391 case IOMMU_CAP_CACHE_COHERENCY: 3410 case IOMMU_CAP_CACHE_COHERENCY:
3392 return 1; 3411 return true;
3393 case IOMMU_CAP_INTR_REMAP: 3412 case IOMMU_CAP_INTR_REMAP:
3394 return irq_remapping_enabled; 3413 return (irq_remapping_enabled == 1);
3395 } 3414 }
3396 3415
3397 return 0; 3416 return false;
3398} 3417}
3399 3418
3400static const struct iommu_ops amd_iommu_ops = { 3419static const struct iommu_ops amd_iommu_ops = {
3420 .capable = amd_iommu_capable,
3401 .domain_init = amd_iommu_domain_init, 3421 .domain_init = amd_iommu_domain_init,
3402 .domain_destroy = amd_iommu_domain_destroy, 3422 .domain_destroy = amd_iommu_domain_destroy,
3403 .attach_dev = amd_iommu_attach_device, 3423 .attach_dev = amd_iommu_attach_device,
@@ -3405,7 +3425,6 @@ static const struct iommu_ops amd_iommu_ops = {
3405 .map = amd_iommu_map, 3425 .map = amd_iommu_map,
3406 .unmap = amd_iommu_unmap, 3426 .unmap = amd_iommu_unmap,
3407 .iova_to_phys = amd_iommu_iova_to_phys, 3427 .iova_to_phys = amd_iommu_iova_to_phys,
3408 .domain_has_cap = amd_iommu_domain_has_cap,
3409 .pgsize_bitmap = AMD_IOMMU_PGSIZES, 3428 .pgsize_bitmap = AMD_IOMMU_PGSIZES,
3410}; 3429};
3411 3430
@@ -4235,7 +4254,7 @@ static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
4235 return 0; 4254 return 0;
4236} 4255}
4237 4256
4238static int setup_hpet_msi(unsigned int irq, unsigned int id) 4257static int alloc_hpet_msi(unsigned int irq, unsigned int id)
4239{ 4258{
4240 struct irq_2_irte *irte_info; 4259 struct irq_2_irte *irte_info;
4241 struct irq_cfg *cfg; 4260 struct irq_cfg *cfg;
@@ -4274,6 +4293,6 @@ struct irq_remap_ops amd_iommu_irq_ops = {
4274 .compose_msi_msg = compose_msi_msg, 4293 .compose_msi_msg = compose_msi_msg,
4275 .msi_alloc_irq = msi_alloc_irq, 4294 .msi_alloc_irq = msi_alloc_irq,
4276 .msi_setup_irq = msi_setup_irq, 4295 .msi_setup_irq = msi_setup_irq,
4277 .setup_hpet_msi = setup_hpet_msi, 4296 .alloc_hpet_msi = alloc_hpet_msi,
4278}; 4297};
4279#endif 4298#endif