aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/amd_iommu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-19 20:07:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-19 20:07:04 -0400
commite0fb1b36398487475e0d2c50264e4ec1eaed3e11 (patch)
tree4541c8d00a265d2db5b7f2b2c33eb8ccb5819a37 /drivers/iommu/amd_iommu.c
parentf4c80d5a16eb4b08a0d9ade154af1ebdc63f5752 (diff)
parent6c0b43df74f900e7f31a49d1844f166df0f8afc6 (diff)
Merge tag 'iommu-updates-v4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU updates from Joerg Roedel: "The updates include: - rate limiting for the VT-d fault handler - remove statistics code from the AMD IOMMU driver. It is unused and should be replaced by something more generic if needed - per-domain pagesize-bitmaps in IOMMU core code to support systems with different types of IOMMUs - support for ACPI devices in the AMD IOMMU driver - 4GB mode support for Mediatek IOMMU driver - ARM-SMMU updates from Will Deacon: - support for 64k pages with SMMUv1 implementations (e.g MMU-401) - remove open-coded 64-bit MMIO accessors - initial support for 16-bit VMIDs, as supported by some ThunderX SMMU implementations - a couple of errata workarounds for silicon in the field - various fixes here and there" * tag 'iommu-updates-v4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (44 commits) iommu/arm-smmu: Use per-domain page sizes. iommu/amd: Remove statistics code iommu/dma: Finish optimising higher-order allocations iommu: Allow selecting page sizes per domain iommu: of: enforce const-ness of struct iommu_ops iommu: remove unused priv field from struct iommu_ops iommu/dma: Implement scatterlist segment merging iommu/arm-smmu: Clear cache lock bit of ACR iommu/arm-smmu: Support SMMUv1 64KB supplement iommu/arm-smmu: Decouple context format from kernel config iommu/arm-smmu: Tidy up 64-bit/atomic I/O accesses io-64-nonatomic: Add relaxed accessor variants iommu/arm-smmu: Work around MMU-500 prefetch errata iommu/arm-smmu: Convert ThunderX workaround to new method iommu/arm-smmu: Differentiate specific implementations iommu/arm-smmu: Workaround for ThunderX erratum #27704 iommu/arm-smmu: Add support for 16 bit VMID iommu/amd: Move get_device_id() and friends to beginning of file iommu/amd: Don't use IS_ERR_VALUE to check integer values iommu/amd: Signedness bug in acpihid_device_group() ...
Diffstat (limited to 'drivers/iommu/amd_iommu.c')
-rw-r--r--drivers/iommu/amd_iommu.c267
1 files changed, 149 insertions, 118 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 3839fd2865a6..634f636393d5 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -19,6 +19,8 @@
19 19
20#include <linux/ratelimit.h> 20#include <linux/ratelimit.h>
21#include <linux/pci.h> 21#include <linux/pci.h>
22#include <linux/acpi.h>
23#include <linux/amba/bus.h>
22#include <linux/pci-ats.h> 24#include <linux/pci-ats.h>
23#include <linux/bitmap.h> 25#include <linux/bitmap.h>
24#include <linux/slab.h> 26#include <linux/slab.h>
@@ -72,6 +74,7 @@ static DEFINE_SPINLOCK(dev_data_list_lock);
72 74
73LIST_HEAD(ioapic_map); 75LIST_HEAD(ioapic_map);
74LIST_HEAD(hpet_map); 76LIST_HEAD(hpet_map);
77LIST_HEAD(acpihid_map);
75 78
76/* 79/*
77 * Domain for untranslated devices - only allocated 80 * Domain for untranslated devices - only allocated
@@ -162,18 +165,65 @@ struct dma_ops_domain {
162 * 165 *
163 ****************************************************************************/ 166 ****************************************************************************/
164 167
165static struct protection_domain *to_pdomain(struct iommu_domain *dom) 168static inline int match_hid_uid(struct device *dev,
169 struct acpihid_map_entry *entry)
166{ 170{
167 return container_of(dom, struct protection_domain, domain); 171 const char *hid, *uid;
172
173 hid = acpi_device_hid(ACPI_COMPANION(dev));
174 uid = acpi_device_uid(ACPI_COMPANION(dev));
175
176 if (!hid || !(*hid))
177 return -ENODEV;
178
179 if (!uid || !(*uid))
180 return strcmp(hid, entry->hid);
181
182 if (!(*entry->uid))
183 return strcmp(hid, entry->hid);
184
185 return (strcmp(hid, entry->hid) || strcmp(uid, entry->uid));
168} 186}
169 187
170static inline u16 get_device_id(struct device *dev) 188static inline u16 get_pci_device_id(struct device *dev)
171{ 189{
172 struct pci_dev *pdev = to_pci_dev(dev); 190 struct pci_dev *pdev = to_pci_dev(dev);
173 191
174 return PCI_DEVID(pdev->bus->number, pdev->devfn); 192 return PCI_DEVID(pdev->bus->number, pdev->devfn);
175} 193}
176 194
195static inline int get_acpihid_device_id(struct device *dev,
196 struct acpihid_map_entry **entry)
197{
198 struct acpihid_map_entry *p;
199
200 list_for_each_entry(p, &acpihid_map, list) {
201 if (!match_hid_uid(dev, p)) {
202 if (entry)
203 *entry = p;
204 return p->devid;
205 }
206 }
207 return -EINVAL;
208}
209
210static inline int get_device_id(struct device *dev)
211{
212 int devid;
213
214 if (dev_is_pci(dev))
215 devid = get_pci_device_id(dev);
216 else
217 devid = get_acpihid_device_id(dev, NULL);
218
219 return devid;
220}
221
222static struct protection_domain *to_pdomain(struct iommu_domain *dom)
223{
224 return container_of(dom, struct protection_domain, domain);
225}
226
177static struct iommu_dev_data *alloc_dev_data(u16 devid) 227static struct iommu_dev_data *alloc_dev_data(u16 devid)
178{ 228{
179 struct iommu_dev_data *dev_data; 229 struct iommu_dev_data *dev_data;
@@ -222,6 +272,7 @@ static u16 get_alias(struct device *dev)
222 struct pci_dev *pdev = to_pci_dev(dev); 272 struct pci_dev *pdev = to_pci_dev(dev);
223 u16 devid, ivrs_alias, pci_alias; 273 u16 devid, ivrs_alias, pci_alias;
224 274
275 /* The callers make sure that get_device_id() does not fail here */
225 devid = get_device_id(dev); 276 devid = get_device_id(dev);
226 ivrs_alias = amd_iommu_alias_table[devid]; 277 ivrs_alias = amd_iommu_alias_table[devid];
227 pci_for_each_dma_alias(pdev, __last_alias, &pci_alias); 278 pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
@@ -289,6 +340,29 @@ static struct iommu_dev_data *get_dev_data(struct device *dev)
289 return dev->archdata.iommu; 340 return dev->archdata.iommu;
290} 341}
291 342
343/*
344* Find or create an IOMMU group for a acpihid device.
345*/
346static struct iommu_group *acpihid_device_group(struct device *dev)
347{
348 struct acpihid_map_entry *p, *entry = NULL;
349 int devid;
350
351 devid = get_acpihid_device_id(dev, &entry);
352 if (devid < 0)
353 return ERR_PTR(devid);
354
355 list_for_each_entry(p, &acpihid_map, list) {
356 if ((devid == p->devid) && p->group)
357 entry->group = p->group;
358 }
359
360 if (!entry->group)
361 entry->group = generic_device_group(dev);
362
363 return entry->group;
364}
365
292static bool pci_iommuv2_capable(struct pci_dev *pdev) 366static bool pci_iommuv2_capable(struct pci_dev *pdev)
293{ 367{
294 static const int caps[] = { 368 static const int caps[] = {
@@ -340,9 +414,11 @@ static void init_unity_mappings_for_device(struct device *dev,
340 struct dma_ops_domain *dma_dom) 414 struct dma_ops_domain *dma_dom)
341{ 415{
342 struct unity_map_entry *e; 416 struct unity_map_entry *e;
343 u16 devid; 417 int devid;
344 418
345 devid = get_device_id(dev); 419 devid = get_device_id(dev);
420 if (devid < 0)
421 return;
346 422
347 list_for_each_entry(e, &amd_iommu_unity_map, list) { 423 list_for_each_entry(e, &amd_iommu_unity_map, list) {
348 if (!(devid >= e->devid_start && devid <= e->devid_end)) 424 if (!(devid >= e->devid_start && devid <= e->devid_end))
@@ -357,16 +433,14 @@ static void init_unity_mappings_for_device(struct device *dev,
357 */ 433 */
358static bool check_device(struct device *dev) 434static bool check_device(struct device *dev)
359{ 435{
360 u16 devid; 436 int devid;
361 437
362 if (!dev || !dev->dma_mask) 438 if (!dev || !dev->dma_mask)
363 return false; 439 return false;
364 440
365 /* No PCI device */
366 if (!dev_is_pci(dev))
367 return false;
368
369 devid = get_device_id(dev); 441 devid = get_device_id(dev);
442 if (devid < 0)
443 return false;
370 444
371 /* Out of our scope? */ 445 /* Out of our scope? */
372 if (devid > amd_iommu_last_bdf) 446 if (devid > amd_iommu_last_bdf)
@@ -401,22 +475,26 @@ out:
401 475
402static int iommu_init_device(struct device *dev) 476static int iommu_init_device(struct device *dev)
403{ 477{
404 struct pci_dev *pdev = to_pci_dev(dev);
405 struct iommu_dev_data *dev_data; 478 struct iommu_dev_data *dev_data;
479 int devid;
406 480
407 if (dev->archdata.iommu) 481 if (dev->archdata.iommu)
408 return 0; 482 return 0;
409 483
410 dev_data = find_dev_data(get_device_id(dev)); 484 devid = get_device_id(dev);
485 if (devid < 0)
486 return devid;
487
488 dev_data = find_dev_data(devid);
411 if (!dev_data) 489 if (!dev_data)
412 return -ENOMEM; 490 return -ENOMEM;
413 491
414 dev_data->alias = get_alias(dev); 492 dev_data->alias = get_alias(dev);
415 493
416 if (pci_iommuv2_capable(pdev)) { 494 if (dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
417 struct amd_iommu *iommu; 495 struct amd_iommu *iommu;
418 496
419 iommu = amd_iommu_rlookup_table[dev_data->devid]; 497 iommu = amd_iommu_rlookup_table[dev_data->devid];
420 dev_data->iommu_v2 = iommu->is_iommu_v2; 498 dev_data->iommu_v2 = iommu->is_iommu_v2;
421 } 499 }
422 500
@@ -430,9 +508,13 @@ static int iommu_init_device(struct device *dev)
430 508
431static void iommu_ignore_device(struct device *dev) 509static void iommu_ignore_device(struct device *dev)
432{ 510{
433 u16 devid, alias; 511 u16 alias;
512 int devid;
434 513
435 devid = get_device_id(dev); 514 devid = get_device_id(dev);
515 if (devid < 0)
516 return;
517
436 alias = get_alias(dev); 518 alias = get_alias(dev);
437 519
438 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry)); 520 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
@@ -444,8 +526,14 @@ static void iommu_ignore_device(struct device *dev)
444 526
445static void iommu_uninit_device(struct device *dev) 527static void iommu_uninit_device(struct device *dev)
446{ 528{
447 struct iommu_dev_data *dev_data = search_dev_data(get_device_id(dev)); 529 int devid;
530 struct iommu_dev_data *dev_data;
448 531
532 devid = get_device_id(dev);
533 if (devid < 0)
534 return;
535
536 dev_data = search_dev_data(devid);
449 if (!dev_data) 537 if (!dev_data)
450 return; 538 return;
451 539
@@ -466,70 +554,6 @@ static void iommu_uninit_device(struct device *dev)
466 */ 554 */
467} 555}
468 556
469#ifdef CONFIG_AMD_IOMMU_STATS
470
471/*
472 * Initialization code for statistics collection
473 */
474
475DECLARE_STATS_COUNTER(compl_wait);
476DECLARE_STATS_COUNTER(cnt_map_single);
477DECLARE_STATS_COUNTER(cnt_unmap_single);
478DECLARE_STATS_COUNTER(cnt_map_sg);
479DECLARE_STATS_COUNTER(cnt_unmap_sg);
480DECLARE_STATS_COUNTER(cnt_alloc_coherent);
481DECLARE_STATS_COUNTER(cnt_free_coherent);
482DECLARE_STATS_COUNTER(cross_page);
483DECLARE_STATS_COUNTER(domain_flush_single);
484DECLARE_STATS_COUNTER(domain_flush_all);
485DECLARE_STATS_COUNTER(alloced_io_mem);
486DECLARE_STATS_COUNTER(total_map_requests);
487DECLARE_STATS_COUNTER(complete_ppr);
488DECLARE_STATS_COUNTER(invalidate_iotlb);
489DECLARE_STATS_COUNTER(invalidate_iotlb_all);
490DECLARE_STATS_COUNTER(pri_requests);
491
492static struct dentry *stats_dir;
493static struct dentry *de_fflush;
494
495static void amd_iommu_stats_add(struct __iommu_counter *cnt)
496{
497 if (stats_dir == NULL)
498 return;
499
500 cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
501 &cnt->value);
502}
503
504static void amd_iommu_stats_init(void)
505{
506 stats_dir = debugfs_create_dir("amd-iommu", NULL);
507 if (stats_dir == NULL)
508 return;
509
510 de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
511 &amd_iommu_unmap_flush);
512
513 amd_iommu_stats_add(&compl_wait);
514 amd_iommu_stats_add(&cnt_map_single);
515 amd_iommu_stats_add(&cnt_unmap_single);
516 amd_iommu_stats_add(&cnt_map_sg);
517 amd_iommu_stats_add(&cnt_unmap_sg);
518 amd_iommu_stats_add(&cnt_alloc_coherent);
519 amd_iommu_stats_add(&cnt_free_coherent);
520 amd_iommu_stats_add(&cross_page);
521 amd_iommu_stats_add(&domain_flush_single);
522 amd_iommu_stats_add(&domain_flush_all);
523 amd_iommu_stats_add(&alloced_io_mem);
524 amd_iommu_stats_add(&total_map_requests);
525 amd_iommu_stats_add(&complete_ppr);
526 amd_iommu_stats_add(&invalidate_iotlb);
527 amd_iommu_stats_add(&invalidate_iotlb_all);
528 amd_iommu_stats_add(&pri_requests);
529}
530
531#endif
532
533/**************************************************************************** 557/****************************************************************************
534 * 558 *
535 * Interrupt handling functions 559 * Interrupt handling functions
@@ -652,8 +676,6 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
652{ 676{
653 struct amd_iommu_fault fault; 677 struct amd_iommu_fault fault;
654 678
655 INC_STATS_COUNTER(pri_requests);
656
657 if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) { 679 if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
658 pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n"); 680 pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
659 return; 681 return;
@@ -2283,13 +2305,17 @@ static bool pci_pri_tlp_required(struct pci_dev *pdev)
2283static int attach_device(struct device *dev, 2305static int attach_device(struct device *dev,
2284 struct protection_domain *domain) 2306 struct protection_domain *domain)
2285{ 2307{
2286 struct pci_dev *pdev = to_pci_dev(dev); 2308 struct pci_dev *pdev;
2287 struct iommu_dev_data *dev_data; 2309 struct iommu_dev_data *dev_data;
2288 unsigned long flags; 2310 unsigned long flags;
2289 int ret; 2311 int ret;
2290 2312
2291 dev_data = get_dev_data(dev); 2313 dev_data = get_dev_data(dev);
2292 2314
2315 if (!dev_is_pci(dev))
2316 goto skip_ats_check;
2317
2318 pdev = to_pci_dev(dev);
2293 if (domain->flags & PD_IOMMUV2_MASK) { 2319 if (domain->flags & PD_IOMMUV2_MASK) {
2294 if (!dev_data->passthrough) 2320 if (!dev_data->passthrough)
2295 return -EINVAL; 2321 return -EINVAL;
@@ -2308,6 +2334,7 @@ static int attach_device(struct device *dev,
2308 dev_data->ats.qdep = pci_ats_queue_depth(pdev); 2334 dev_data->ats.qdep = pci_ats_queue_depth(pdev);
2309 } 2335 }
2310 2336
2337skip_ats_check:
2311 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 2338 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2312 ret = __attach_device(dev_data, domain); 2339 ret = __attach_device(dev_data, domain);
2313 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 2340 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
@@ -2364,6 +2391,9 @@ static void detach_device(struct device *dev)
2364 __detach_device(dev_data); 2391 __detach_device(dev_data);
2365 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 2392 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2366 2393
2394 if (!dev_is_pci(dev))
2395 return;
2396
2367 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2) 2397 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
2368 pdev_iommuv2_disable(to_pci_dev(dev)); 2398 pdev_iommuv2_disable(to_pci_dev(dev));
2369 else if (dev_data->ats.enabled) 2399 else if (dev_data->ats.enabled)
@@ -2377,13 +2407,15 @@ static int amd_iommu_add_device(struct device *dev)
2377 struct iommu_dev_data *dev_data; 2407 struct iommu_dev_data *dev_data;
2378 struct iommu_domain *domain; 2408 struct iommu_domain *domain;
2379 struct amd_iommu *iommu; 2409 struct amd_iommu *iommu;
2380 u16 devid; 2410 int ret, devid;
2381 int ret;
2382 2411
2383 if (!check_device(dev) || get_dev_data(dev)) 2412 if (!check_device(dev) || get_dev_data(dev))
2384 return 0; 2413 return 0;
2385 2414
2386 devid = get_device_id(dev); 2415 devid = get_device_id(dev);
2416 if (devid < 0)
2417 return devid;
2418
2387 iommu = amd_iommu_rlookup_table[devid]; 2419 iommu = amd_iommu_rlookup_table[devid];
2388 2420
2389 ret = iommu_init_device(dev); 2421 ret = iommu_init_device(dev);
@@ -2421,18 +2453,29 @@ out:
2421static void amd_iommu_remove_device(struct device *dev) 2453static void amd_iommu_remove_device(struct device *dev)
2422{ 2454{
2423 struct amd_iommu *iommu; 2455 struct amd_iommu *iommu;
2424 u16 devid; 2456 int devid;
2425 2457
2426 if (!check_device(dev)) 2458 if (!check_device(dev))
2427 return; 2459 return;
2428 2460
2429 devid = get_device_id(dev); 2461 devid = get_device_id(dev);
2462 if (devid < 0)
2463 return;
2464
2430 iommu = amd_iommu_rlookup_table[devid]; 2465 iommu = amd_iommu_rlookup_table[devid];
2431 2466
2432 iommu_uninit_device(dev); 2467 iommu_uninit_device(dev);
2433 iommu_completion_wait(iommu); 2468 iommu_completion_wait(iommu);
2434} 2469}
2435 2470
2471static struct iommu_group *amd_iommu_device_group(struct device *dev)
2472{
2473 if (dev_is_pci(dev))
2474 return pci_device_group(dev);
2475
2476 return acpihid_device_group(dev);
2477}
2478
2436/***************************************************************************** 2479/*****************************************************************************
2437 * 2480 *
2438 * The next functions belong to the dma_ops mapping/unmapping code. 2481 * The next functions belong to the dma_ops mapping/unmapping code.
@@ -2597,11 +2640,6 @@ static dma_addr_t __map_single(struct device *dev,
2597 pages = iommu_num_pages(paddr, size, PAGE_SIZE); 2640 pages = iommu_num_pages(paddr, size, PAGE_SIZE);
2598 paddr &= PAGE_MASK; 2641 paddr &= PAGE_MASK;
2599 2642
2600 INC_STATS_COUNTER(total_map_requests);
2601
2602 if (pages > 1)
2603 INC_STATS_COUNTER(cross_page);
2604
2605 if (align) 2643 if (align)
2606 align_mask = (1UL << get_order(size)) - 1; 2644 align_mask = (1UL << get_order(size)) - 1;
2607 2645
@@ -2622,8 +2660,6 @@ static dma_addr_t __map_single(struct device *dev,
2622 } 2660 }
2623 address += offset; 2661 address += offset;
2624 2662
2625 ADD_STATS_COUNTER(alloced_io_mem, size);
2626
2627 if (unlikely(amd_iommu_np_cache)) { 2663 if (unlikely(amd_iommu_np_cache)) {
2628 domain_flush_pages(&dma_dom->domain, address, size); 2664 domain_flush_pages(&dma_dom->domain, address, size);
2629 domain_flush_complete(&dma_dom->domain); 2665 domain_flush_complete(&dma_dom->domain);
@@ -2671,8 +2707,6 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
2671 start += PAGE_SIZE; 2707 start += PAGE_SIZE;
2672 } 2708 }
2673 2709
2674 SUB_STATS_COUNTER(alloced_io_mem, size);
2675
2676 dma_ops_free_addresses(dma_dom, dma_addr, pages); 2710 dma_ops_free_addresses(dma_dom, dma_addr, pages);
2677} 2711}
2678 2712
@@ -2688,8 +2722,6 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
2688 struct protection_domain *domain; 2722 struct protection_domain *domain;
2689 u64 dma_mask; 2723 u64 dma_mask;
2690 2724
2691 INC_STATS_COUNTER(cnt_map_single);
2692
2693 domain = get_domain(dev); 2725 domain = get_domain(dev);
2694 if (PTR_ERR(domain) == -EINVAL) 2726 if (PTR_ERR(domain) == -EINVAL)
2695 return (dma_addr_t)paddr; 2727 return (dma_addr_t)paddr;
@@ -2710,8 +2742,6 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
2710{ 2742{
2711 struct protection_domain *domain; 2743 struct protection_domain *domain;
2712 2744
2713 INC_STATS_COUNTER(cnt_unmap_single);
2714
2715 domain = get_domain(dev); 2745 domain = get_domain(dev);
2716 if (IS_ERR(domain)) 2746 if (IS_ERR(domain))
2717 return; 2747 return;
@@ -2734,8 +2764,6 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
2734 int mapped_elems = 0; 2764 int mapped_elems = 0;
2735 u64 dma_mask; 2765 u64 dma_mask;
2736 2766
2737 INC_STATS_COUNTER(cnt_map_sg);
2738
2739 domain = get_domain(dev); 2767 domain = get_domain(dev);
2740 if (IS_ERR(domain)) 2768 if (IS_ERR(domain))
2741 return 0; 2769 return 0;
@@ -2781,8 +2809,6 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
2781 struct scatterlist *s; 2809 struct scatterlist *s;
2782 int i; 2810 int i;
2783 2811
2784 INC_STATS_COUNTER(cnt_unmap_sg);
2785
2786 domain = get_domain(dev); 2812 domain = get_domain(dev);
2787 if (IS_ERR(domain)) 2813 if (IS_ERR(domain))
2788 return; 2814 return;
@@ -2805,8 +2831,6 @@ static void *alloc_coherent(struct device *dev, size_t size,
2805 struct protection_domain *domain; 2831 struct protection_domain *domain;
2806 struct page *page; 2832 struct page *page;
2807 2833
2808 INC_STATS_COUNTER(cnt_alloc_coherent);
2809
2810 domain = get_domain(dev); 2834 domain = get_domain(dev);
2811 if (PTR_ERR(domain) == -EINVAL) { 2835 if (PTR_ERR(domain) == -EINVAL) {
2812 page = alloc_pages(flag, get_order(size)); 2836 page = alloc_pages(flag, get_order(size));
@@ -2860,8 +2884,6 @@ static void free_coherent(struct device *dev, size_t size,
2860 struct protection_domain *domain; 2884 struct protection_domain *domain;
2861 struct page *page; 2885 struct page *page;
2862 2886
2863 INC_STATS_COUNTER(cnt_free_coherent);
2864
2865 page = virt_to_page(virt_addr); 2887 page = virt_to_page(virt_addr);
2866 size = PAGE_ALIGN(size); 2888 size = PAGE_ALIGN(size);
2867 2889
@@ -2926,7 +2948,17 @@ static struct dma_map_ops amd_iommu_dma_ops = {
2926 2948
2927int __init amd_iommu_init_api(void) 2949int __init amd_iommu_init_api(void)
2928{ 2950{
2929 return bus_set_iommu(&pci_bus_type, &amd_iommu_ops); 2951 int err = 0;
2952
2953 err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
2954 if (err)
2955 return err;
2956#ifdef CONFIG_ARM_AMBA
2957 err = bus_set_iommu(&amba_bustype, &amd_iommu_ops);
2958 if (err)
2959 return err;
2960#endif
2961 return 0;
2930} 2962}
2931 2963
2932int __init amd_iommu_init_dma_ops(void) 2964int __init amd_iommu_init_dma_ops(void)
@@ -2943,8 +2975,6 @@ int __init amd_iommu_init_dma_ops(void)
2943 if (!swiotlb) 2975 if (!swiotlb)
2944 dma_ops = &nommu_dma_ops; 2976 dma_ops = &nommu_dma_ops;
2945 2977
2946 amd_iommu_stats_init();
2947
2948 if (amd_iommu_unmap_flush) 2978 if (amd_iommu_unmap_flush)
2949 pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n"); 2979 pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
2950 else 2980 else
@@ -3098,12 +3128,14 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
3098{ 3128{
3099 struct iommu_dev_data *dev_data = dev->archdata.iommu; 3129 struct iommu_dev_data *dev_data = dev->archdata.iommu;
3100 struct amd_iommu *iommu; 3130 struct amd_iommu *iommu;
3101 u16 devid; 3131 int devid;
3102 3132
3103 if (!check_device(dev)) 3133 if (!check_device(dev))
3104 return; 3134 return;
3105 3135
3106 devid = get_device_id(dev); 3136 devid = get_device_id(dev);
3137 if (devid < 0)
3138 return;
3107 3139
3108 if (dev_data->domain != NULL) 3140 if (dev_data->domain != NULL)
3109 detach_device(dev); 3141 detach_device(dev);
@@ -3221,9 +3253,11 @@ static void amd_iommu_get_dm_regions(struct device *dev,
3221 struct list_head *head) 3253 struct list_head *head)
3222{ 3254{
3223 struct unity_map_entry *entry; 3255 struct unity_map_entry *entry;
3224 u16 devid; 3256 int devid;
3225 3257
3226 devid = get_device_id(dev); 3258 devid = get_device_id(dev);
3259 if (devid < 0)
3260 return;
3227 3261
3228 list_for_each_entry(entry, &amd_iommu_unity_map, list) { 3262 list_for_each_entry(entry, &amd_iommu_unity_map, list) {
3229 struct iommu_dm_region *region; 3263 struct iommu_dm_region *region;
@@ -3270,7 +3304,7 @@ static const struct iommu_ops amd_iommu_ops = {
3270 .iova_to_phys = amd_iommu_iova_to_phys, 3304 .iova_to_phys = amd_iommu_iova_to_phys,
3271 .add_device = amd_iommu_add_device, 3305 .add_device = amd_iommu_add_device,
3272 .remove_device = amd_iommu_remove_device, 3306 .remove_device = amd_iommu_remove_device,
3273 .device_group = pci_device_group, 3307 .device_group = amd_iommu_device_group,
3274 .get_dm_regions = amd_iommu_get_dm_regions, 3308 .get_dm_regions = amd_iommu_get_dm_regions,
3275 .put_dm_regions = amd_iommu_put_dm_regions, 3309 .put_dm_regions = amd_iommu_put_dm_regions,
3276 .pgsize_bitmap = AMD_IOMMU_PGSIZES, 3310 .pgsize_bitmap = AMD_IOMMU_PGSIZES,
@@ -3431,8 +3465,6 @@ out:
3431static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid, 3465static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
3432 u64 address) 3466 u64 address)
3433{ 3467{
3434 INC_STATS_COUNTER(invalidate_iotlb);
3435
3436 return __flush_pasid(domain, pasid, address, false); 3468 return __flush_pasid(domain, pasid, address, false);
3437} 3469}
3438 3470
@@ -3453,8 +3485,6 @@ EXPORT_SYMBOL(amd_iommu_flush_page);
3453 3485
3454static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid) 3486static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
3455{ 3487{
3456 INC_STATS_COUNTER(invalidate_iotlb_all);
3457
3458 return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 3488 return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
3459 true); 3489 true);
3460} 3490}
@@ -3574,8 +3604,6 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
3574 struct amd_iommu *iommu; 3604 struct amd_iommu *iommu;
3575 struct iommu_cmd cmd; 3605 struct iommu_cmd cmd;
3576 3606
3577 INC_STATS_COUNTER(complete_ppr);
3578
3579 dev_data = get_dev_data(&pdev->dev); 3607 dev_data = get_dev_data(&pdev->dev);
3580 iommu = amd_iommu_rlookup_table[dev_data->devid]; 3608 iommu = amd_iommu_rlookup_table[dev_data->devid];
3581 3609
@@ -3925,6 +3953,9 @@ static struct irq_domain *get_irq_domain(struct irq_alloc_info *info)
3925 case X86_IRQ_ALLOC_TYPE_MSI: 3953 case X86_IRQ_ALLOC_TYPE_MSI:
3926 case X86_IRQ_ALLOC_TYPE_MSIX: 3954 case X86_IRQ_ALLOC_TYPE_MSIX:
3927 devid = get_device_id(&info->msi_dev->dev); 3955 devid = get_device_id(&info->msi_dev->dev);
3956 if (devid < 0)
3957 return NULL;
3958
3928 iommu = amd_iommu_rlookup_table[devid]; 3959 iommu = amd_iommu_rlookup_table[devid];
3929 if (iommu) 3960 if (iommu)
3930 return iommu->msi_domain; 3961 return iommu->msi_domain;