aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-15 00:02:18 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-15 00:02:18 -0500
commit91838e2dab460ba589fb90db0fe1f504f5c04f12 (patch)
tree53974ed7a32ddd63d75e2da63f00d8308eb8d08a /drivers/iommu
parentf080480488028bcc25357f85e8ae54ccc3bb7173 (diff)
parentbb51eeee5a947f61eeefaa55221c26460542654d (diff)
Merge tag 'iommu-updates-v3.13' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU updates from Joerg Roedel: "This time the updates contain: - Tracepoints for certain IOMMU-API functions to make their use easier to debug - A tracepoint for IOMMU page faults to make it easier to get them in user space - Updates and fixes for the new ARM SMMU driver after the first hardware showed up - Various other fixes and cleanups in other IOMMU drivers" * tag 'iommu-updates-v3.13' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (26 commits) iommu/shmobile: Enable the driver on all ARM platforms iommu/tegra-smmu: Staticize tegra_smmu_pm_ops iommu/tegra-gart: Staticize tegra_gart_pm_ops iommu/vt-d: Use list_for_each_entry_safe() for dmar_domain->devices traversal iommu/vt-d: Use for_each_drhd_unit() instead of list_for_each_entry() iommu/vt-d: Fixed interaction of VFIO_IOMMU_MAP_DMA with IOMMU address limits iommu/arm-smmu: Clear global and context bank fault status registers iommu/arm-smmu: Print context fault information iommu/arm-smmu: Check for num_context_irqs > 0 to avoid divide by zero exception iommu/arm-smmu: Refine check for proper size of mapped region iommu/arm-smmu: Switch to subsys_initcall for driver registration iommu/arm-smmu: use relaxed accessors where possible iommu/arm-smmu: replace devm_request_and_ioremap by devm_ioremap_resource iommu: Remove stack trace from broken irq remapping warning iommu: Change iommu driver to call io_page_fault trace event iommu: Add iommu_error class event to iommu trace iommu/tegra: gart: cleanup devm_* functions usage iommu/tegra: Print phys_addr_t using %pa iommu: No need to pass '0x' when '%pa' is used iommu: Change iommu driver to call unmap trace event ...
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/arm-smmu.c69
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/iommu/intel-iommu.c12
-rw-r--r--drivers/iommu/intel_irq_remapping.c13
-rw-r--r--drivers/iommu/iommu-traces.c27
-rw-r--r--drivers/iommu/iommu.c21
-rw-r--r--drivers/iommu/tegra-gart.c27
-rw-r--r--drivers/iommu/tegra-smmu.c4
10 files changed, 106 insertions, 72 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index c880ebaf1553..9fd51e51e78b 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -206,7 +206,7 @@ config SHMOBILE_IPMMU_TLB
206config SHMOBILE_IOMMU 206config SHMOBILE_IOMMU
207 bool "IOMMU for Renesas IPMMU/IPMMUI" 207 bool "IOMMU for Renesas IPMMU/IPMMUI"
208 default n 208 default n
209 depends on (ARM && ARCH_SHMOBILE) 209 depends on ARM || COMPILE_TEST
210 select IOMMU_API 210 select IOMMU_API
211 select ARM_DMA_USE_IOMMU 211 select ARM_DMA_USE_IOMMU
212 select SHMOBILE_IPMMU 212 select SHMOBILE_IPMMU
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 14c1f474cf11..5d58bf16e9e3 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,4 +1,5 @@
1obj-$(CONFIG_IOMMU_API) += iommu.o 1obj-$(CONFIG_IOMMU_API) += iommu.o
2obj-$(CONFIG_IOMMU_API) += iommu-traces.o
2obj-$(CONFIG_OF_IOMMU) += of_iommu.o 3obj-$(CONFIG_OF_IOMMU) += of_iommu.o
3obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o 4obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
4obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o 5obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 2349d6272aef..1abfb5684ab7 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -590,6 +590,9 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
590 ret = IRQ_HANDLED; 590 ret = IRQ_HANDLED;
591 resume = RESUME_RETRY; 591 resume = RESUME_RETRY;
592 } else { 592 } else {
593 dev_err_ratelimited(smmu->dev,
594 "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
595 iova, fsynr, root_cfg->cbndx);
593 ret = IRQ_NONE; 596 ret = IRQ_NONE;
594 resume = RESUME_TERMINATE; 597 resume = RESUME_TERMINATE;
595 } 598 }
@@ -778,7 +781,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
778#ifdef __BIG_ENDIAN 781#ifdef __BIG_ENDIAN
779 reg |= SCTLR_E; 782 reg |= SCTLR_E;
780#endif 783#endif
781 writel(reg, cb_base + ARM_SMMU_CB_SCTLR); 784 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
782} 785}
783 786
784static int arm_smmu_init_domain_context(struct iommu_domain *domain, 787static int arm_smmu_init_domain_context(struct iommu_domain *domain,
@@ -1562,9 +1565,13 @@ static struct iommu_ops arm_smmu_ops = {
1562static void arm_smmu_device_reset(struct arm_smmu_device *smmu) 1565static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1563{ 1566{
1564 void __iomem *gr0_base = ARM_SMMU_GR0(smmu); 1567 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1565 void __iomem *sctlr_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB_SCTLR; 1568 void __iomem *cb_base;
1566 int i = 0; 1569 int i = 0;
1567 u32 scr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0); 1570 u32 reg;
1571
1572 /* Clear Global FSR */
1573 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1574 writel(reg, gr0_base + ARM_SMMU_GR0_sGFSR);
1568 1575
1569 /* Mark all SMRn as invalid and all S2CRn as bypass */ 1576 /* Mark all SMRn as invalid and all S2CRn as bypass */
1570 for (i = 0; i < smmu->num_mapping_groups; ++i) { 1577 for (i = 0; i < smmu->num_mapping_groups; ++i) {
@@ -1572,33 +1579,38 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1572 writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i)); 1579 writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i));
1573 } 1580 }
1574 1581
1575 /* Make sure all context banks are disabled */ 1582 /* Make sure all context banks are disabled and clear CB_FSR */
1576 for (i = 0; i < smmu->num_context_banks; ++i) 1583 for (i = 0; i < smmu->num_context_banks; ++i) {
1577 writel_relaxed(0, sctlr_base + ARM_SMMU_CB(smmu, i)); 1584 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1585 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1586 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
1587 }
1578 1588
1579 /* Invalidate the TLB, just in case */ 1589 /* Invalidate the TLB, just in case */
1580 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL); 1590 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL);
1581 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); 1591 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1582 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); 1592 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1583 1593
1594 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0);
1595
1584 /* Enable fault reporting */ 1596 /* Enable fault reporting */
1585 scr0 |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE); 1597 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
1586 1598
1587 /* Disable TLB broadcasting. */ 1599 /* Disable TLB broadcasting. */
1588 scr0 |= (sCR0_VMIDPNE | sCR0_PTM); 1600 reg |= (sCR0_VMIDPNE | sCR0_PTM);
1589 1601
1590 /* Enable client access, but bypass when no mapping is found */ 1602 /* Enable client access, but bypass when no mapping is found */
1591 scr0 &= ~(sCR0_CLIENTPD | sCR0_USFCFG); 1603 reg &= ~(sCR0_CLIENTPD | sCR0_USFCFG);
1592 1604
1593 /* Disable forced broadcasting */ 1605 /* Disable forced broadcasting */
1594 scr0 &= ~sCR0_FB; 1606 reg &= ~sCR0_FB;
1595 1607
1596 /* Don't upgrade barriers */ 1608 /* Don't upgrade barriers */
1597 scr0 &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT); 1609 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
1598 1610
1599 /* Push the button */ 1611 /* Push the button */
1600 arm_smmu_tlb_sync(smmu); 1612 arm_smmu_tlb_sync(smmu);
1601 writel(scr0, gr0_base + ARM_SMMU_GR0_sCR0); 1613 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sCR0);
1602} 1614}
1603 1615
1604static int arm_smmu_id_size_to_bits(int size) 1616static int arm_smmu_id_size_to_bits(int size)
@@ -1703,13 +1715,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1703 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1); 1715 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
1704 smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K; 1716 smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K;
1705 1717
1706 /* Check that we ioremapped enough */ 1718 /* Check for size mismatch of SMMU address space from mapped region */
1707 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); 1719 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
1708 size *= (smmu->pagesize << 1); 1720 size *= (smmu->pagesize << 1);
1709 if (smmu->size < size) 1721 if (smmu->size != size)
1710 dev_warn(smmu->dev, 1722 dev_warn(smmu->dev, "SMMU address space size (0x%lx) differs "
1711 "device is 0x%lx bytes but only mapped 0x%lx!\n", 1723 "from mapped region size (0x%lx)!\n", size, smmu->size);
1712 size, smmu->size);
1713 1724
1714 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & 1725 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) &
1715 ID1_NUMS2CB_MASK; 1726 ID1_NUMS2CB_MASK;
@@ -1784,15 +1795,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1784 smmu->dev = dev; 1795 smmu->dev = dev;
1785 1796
1786 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1797 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1787 if (!res) { 1798 smmu->base = devm_ioremap_resource(dev, res);
1788 dev_err(dev, "missing base address/size\n"); 1799 if (IS_ERR(smmu->base))
1789 return -ENODEV; 1800 return PTR_ERR(smmu->base);
1790 }
1791
1792 smmu->size = resource_size(res); 1801 smmu->size = resource_size(res);
1793 smmu->base = devm_request_and_ioremap(dev, res);
1794 if (!smmu->base)
1795 return -EADDRNOTAVAIL;
1796 1802
1797 if (of_property_read_u32(dev->of_node, "#global-interrupts", 1803 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1798 &smmu->num_global_irqs)) { 1804 &smmu->num_global_irqs)) {
@@ -1807,12 +1813,11 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1807 smmu->num_context_irqs++; 1813 smmu->num_context_irqs++;
1808 } 1814 }
1809 1815
1810 if (num_irqs < smmu->num_global_irqs) { 1816 if (!smmu->num_context_irqs) {
1811 dev_warn(dev, "found %d interrupts but expected at least %d\n", 1817 dev_err(dev, "found %d interrupts but expected at least %d\n",
1812 num_irqs, smmu->num_global_irqs); 1818 num_irqs, smmu->num_global_irqs + 1);
1813 smmu->num_global_irqs = num_irqs; 1819 return -ENODEV;
1814 } 1820 }
1815 smmu->num_context_irqs = num_irqs - smmu->num_global_irqs;
1816 1821
1817 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs, 1822 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1818 GFP_KERNEL); 1823 GFP_KERNEL);
@@ -1936,7 +1941,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
1936 free_irq(smmu->irqs[i], smmu); 1941 free_irq(smmu->irqs[i], smmu);
1937 1942
1938 /* Turn the thing off */ 1943 /* Turn the thing off */
1939 writel(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0); 1944 writel_relaxed(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0);
1940 return 0; 1945 return 0;
1941} 1946}
1942 1947
@@ -1984,7 +1989,7 @@ static void __exit arm_smmu_exit(void)
1984 return platform_driver_unregister(&arm_smmu_driver); 1989 return platform_driver_unregister(&arm_smmu_driver);
1985} 1990}
1986 1991
1987module_init(arm_smmu_init); 1992subsys_initcall(arm_smmu_init);
1988module_exit(arm_smmu_exit); 1993module_exit(arm_smmu_exit);
1989 1994
1990MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations"); 1995MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 900946950230..8b452c9676d9 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -403,7 +403,7 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev)
403 403
404 dev = pci_physfn(dev); 404 dev = pci_physfn(dev);
405 405
406 list_for_each_entry(dmaru, &dmar_drhd_units, list) { 406 for_each_drhd_unit(dmaru) {
407 drhd = container_of(dmaru->hdr, 407 drhd = container_of(dmaru->hdr,
408 struct acpi_dmar_hardware_unit, 408 struct acpi_dmar_hardware_unit,
409 header); 409 header);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 15e9b57e9cf0..43b9bfea48fa 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -782,7 +782,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
782 int offset; 782 int offset;
783 783
784 BUG_ON(!domain->pgd); 784 BUG_ON(!domain->pgd);
785 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width); 785
786 if (addr_width < BITS_PER_LONG && pfn >> addr_width)
787 /* Address beyond IOMMU's addressing capabilities. */
788 return NULL;
789
786 parent = domain->pgd; 790 parent = domain->pgd;
787 791
788 while (level > 0) { 792 while (level > 0) {
@@ -3777,11 +3781,10 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3777static void domain_remove_one_dev_info(struct dmar_domain *domain, 3781static void domain_remove_one_dev_info(struct dmar_domain *domain,
3778 struct pci_dev *pdev) 3782 struct pci_dev *pdev)
3779{ 3783{
3780 struct device_domain_info *info; 3784 struct device_domain_info *info, *tmp;
3781 struct intel_iommu *iommu; 3785 struct intel_iommu *iommu;
3782 unsigned long flags; 3786 unsigned long flags;
3783 int found = 0; 3787 int found = 0;
3784 struct list_head *entry, *tmp;
3785 3788
3786 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, 3789 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3787 pdev->devfn); 3790 pdev->devfn);
@@ -3789,8 +3792,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
3789 return; 3792 return;
3790 3793
3791 spin_lock_irqsave(&device_domain_lock, flags); 3794 spin_lock_irqsave(&device_domain_lock, flags);
3792 list_for_each_safe(entry, tmp, &domain->devices) { 3795 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
3793 info = list_entry(entry, struct device_domain_info, link);
3794 if (info->segment == pci_domain_nr(pdev->bus) && 3796 if (info->segment == pci_domain_nr(pdev->bus) &&
3795 info->bus == pdev->bus->number && 3797 info->bus == pdev->bus->number &&
3796 info->devfn == pdev->devfn) { 3798 info->devfn == pdev->devfn) {
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index ab86902fd9ff..bab10b1002fb 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -525,12 +525,13 @@ static int __init intel_irq_remapping_supported(void)
525 if (disable_irq_remap) 525 if (disable_irq_remap)
526 return 0; 526 return 0;
527 if (irq_remap_broken) { 527 if (irq_remap_broken) {
528 WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND, 528 printk(KERN_WARNING
529 "This system BIOS has enabled interrupt remapping\n" 529 "This system BIOS has enabled interrupt remapping\n"
530 "on a chipset that contains an erratum making that\n" 530 "on a chipset that contains an erratum making that\n"
531 "feature unstable. To maintain system stability\n" 531 "feature unstable. To maintain system stability\n"
532 "interrupt remapping is being disabled. Please\n" 532 "interrupt remapping is being disabled. Please\n"
533 "contact your BIOS vendor for an update\n"); 533 "contact your BIOS vendor for an update\n");
534 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
534 disable_irq_remap = 1; 535 disable_irq_remap = 1;
535 return 0; 536 return 0;
536 } 537 }
diff --git a/drivers/iommu/iommu-traces.c b/drivers/iommu/iommu-traces.c
new file mode 100644
index 000000000000..bf3b317ff0c1
--- /dev/null
+++ b/drivers/iommu/iommu-traces.c
@@ -0,0 +1,27 @@
1/*
2 * iommu trace points
3 *
4 * Copyright (C) 2013 Shuah Khan <shuah.kh@samsung.com>
5 *
6 */
7
8#include <linux/string.h>
9#include <linux/types.h>
10
11#define CREATE_TRACE_POINTS
12#include <trace/events/iommu.h>
13
14/* iommu_group_event */
15EXPORT_TRACEPOINT_SYMBOL_GPL(add_device_to_group);
16EXPORT_TRACEPOINT_SYMBOL_GPL(remove_device_from_group);
17
18/* iommu_device_event */
19EXPORT_TRACEPOINT_SYMBOL_GPL(attach_device_to_domain);
20EXPORT_TRACEPOINT_SYMBOL_GPL(detach_device_from_domain);
21
22/* iommu_map_unmap */
23EXPORT_TRACEPOINT_SYMBOL_GPL(map);
24EXPORT_TRACEPOINT_SYMBOL_GPL(unmap);
25
26/* iommu_error */
27EXPORT_TRACEPOINT_SYMBOL_GPL(io_page_fault);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index fbe9ca734f8f..e5555fcfe703 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -29,6 +29,7 @@
29#include <linux/idr.h> 29#include <linux/idr.h>
30#include <linux/notifier.h> 30#include <linux/notifier.h>
31#include <linux/err.h> 31#include <linux/err.h>
32#include <trace/events/iommu.h>
32 33
33static struct kset *iommu_group_kset; 34static struct kset *iommu_group_kset;
34static struct ida iommu_group_ida; 35static struct ida iommu_group_ida;
@@ -363,6 +364,8 @@ rename:
363 /* Notify any listeners about change to group. */ 364 /* Notify any listeners about change to group. */
364 blocking_notifier_call_chain(&group->notifier, 365 blocking_notifier_call_chain(&group->notifier,
365 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev); 366 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
367
368 trace_add_device_to_group(group->id, dev);
366 return 0; 369 return 0;
367} 370}
368EXPORT_SYMBOL_GPL(iommu_group_add_device); 371EXPORT_SYMBOL_GPL(iommu_group_add_device);
@@ -399,6 +402,8 @@ void iommu_group_remove_device(struct device *dev)
399 sysfs_remove_link(group->devices_kobj, device->name); 402 sysfs_remove_link(group->devices_kobj, device->name);
400 sysfs_remove_link(&dev->kobj, "iommu_group"); 403 sysfs_remove_link(&dev->kobj, "iommu_group");
401 404
405 trace_remove_device_from_group(group->id, dev);
406
402 kfree(device->name); 407 kfree(device->name);
403 kfree(device); 408 kfree(device);
404 dev->iommu_group = NULL; 409 dev->iommu_group = NULL;
@@ -680,10 +685,14 @@ EXPORT_SYMBOL_GPL(iommu_domain_free);
680 685
681int iommu_attach_device(struct iommu_domain *domain, struct device *dev) 686int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
682{ 687{
688 int ret;
683 if (unlikely(domain->ops->attach_dev == NULL)) 689 if (unlikely(domain->ops->attach_dev == NULL))
684 return -ENODEV; 690 return -ENODEV;
685 691
686 return domain->ops->attach_dev(domain, dev); 692 ret = domain->ops->attach_dev(domain, dev);
693 if (!ret)
694 trace_attach_device_to_domain(dev);
695 return ret;
687} 696}
688EXPORT_SYMBOL_GPL(iommu_attach_device); 697EXPORT_SYMBOL_GPL(iommu_attach_device);
689 698
@@ -693,6 +702,7 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
693 return; 702 return;
694 703
695 domain->ops->detach_dev(domain, dev); 704 domain->ops->detach_dev(domain, dev);
705 trace_detach_device_from_domain(dev);
696} 706}
697EXPORT_SYMBOL_GPL(iommu_detach_device); 707EXPORT_SYMBOL_GPL(iommu_detach_device);
698 708
@@ -807,17 +817,17 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
807 * size of the smallest page supported by the hardware 817 * size of the smallest page supported by the hardware
808 */ 818 */
809 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { 819 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
810 pr_err("unaligned: iova 0x%lx pa 0x%pa size 0x%zx min_pagesz 0x%x\n", 820 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
811 iova, &paddr, size, min_pagesz); 821 iova, &paddr, size, min_pagesz);
812 return -EINVAL; 822 return -EINVAL;
813 } 823 }
814 824
815 pr_debug("map: iova 0x%lx pa 0x%pa size 0x%zx\n", iova, &paddr, size); 825 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
816 826
817 while (size) { 827 while (size) {
818 size_t pgsize = iommu_pgsize(domain, iova | paddr, size); 828 size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
819 829
820 pr_debug("mapping: iova 0x%lx pa 0x%pa pgsize 0x%zx\n", 830 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
821 iova, &paddr, pgsize); 831 iova, &paddr, pgsize);
822 832
823 ret = domain->ops->map(domain, iova, paddr, pgsize, prot); 833 ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
@@ -832,6 +842,8 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
832 /* unroll mapping in case something went wrong */ 842 /* unroll mapping in case something went wrong */
833 if (ret) 843 if (ret)
834 iommu_unmap(domain, orig_iova, orig_size - size); 844 iommu_unmap(domain, orig_iova, orig_size - size);
845 else
846 trace_map(iova, paddr, size);
835 847
836 return ret; 848 return ret;
837} 849}
@@ -880,6 +892,7 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
880 unmapped += unmapped_page; 892 unmapped += unmapped_page;
881 } 893 }
882 894
895 trace_unmap(iova, 0, size);
883 return unmapped; 896 return unmapped;
884} 897}
885EXPORT_SYMBOL_GPL(iommu_unmap); 898EXPORT_SYMBOL_GPL(iommu_unmap);
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 108c0e9c24d9..dba1a9fd5070 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -252,7 +252,7 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
252 spin_lock_irqsave(&gart->pte_lock, flags); 252 spin_lock_irqsave(&gart->pte_lock, flags);
253 pfn = __phys_to_pfn(pa); 253 pfn = __phys_to_pfn(pa);
254 if (!pfn_valid(pfn)) { 254 if (!pfn_valid(pfn)) {
255 dev_err(gart->dev, "Invalid page: %08x\n", pa); 255 dev_err(gart->dev, "Invalid page: %pa\n", &pa);
256 spin_unlock_irqrestore(&gart->pte_lock, flags); 256 spin_unlock_irqrestore(&gart->pte_lock, flags);
257 return -EINVAL; 257 return -EINVAL;
258 } 258 }
@@ -295,8 +295,8 @@ static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
295 295
296 pa = (pte & GART_PAGE_MASK); 296 pa = (pte & GART_PAGE_MASK);
297 if (!pfn_valid(__phys_to_pfn(pa))) { 297 if (!pfn_valid(__phys_to_pfn(pa))) {
298 dev_err(gart->dev, "No entry for %08llx:%08x\n", 298 dev_err(gart->dev, "No entry for %08llx:%pa\n",
299 (unsigned long long)iova, pa); 299 (unsigned long long)iova, &pa);
300 gart_dump_table(gart); 300 gart_dump_table(gart);
301 return -EINVAL; 301 return -EINVAL;
302 } 302 }
@@ -351,7 +351,6 @@ static int tegra_gart_probe(struct platform_device *pdev)
351 struct gart_device *gart; 351 struct gart_device *gart;
352 struct resource *res, *res_remap; 352 struct resource *res, *res_remap;
353 void __iomem *gart_regs; 353 void __iomem *gart_regs;
354 int err;
355 struct device *dev = &pdev->dev; 354 struct device *dev = &pdev->dev;
356 355
357 if (gart_handle) 356 if (gart_handle)
@@ -376,8 +375,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
376 gart_regs = devm_ioremap(dev, res->start, resource_size(res)); 375 gart_regs = devm_ioremap(dev, res->start, resource_size(res));
377 if (!gart_regs) { 376 if (!gart_regs) {
378 dev_err(dev, "failed to remap GART registers\n"); 377 dev_err(dev, "failed to remap GART registers\n");
379 err = -ENXIO; 378 return -ENXIO;
380 goto fail;
381 } 379 }
382 380
383 gart->dev = &pdev->dev; 381 gart->dev = &pdev->dev;
@@ -391,8 +389,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
391 gart->savedata = vmalloc(sizeof(u32) * gart->page_count); 389 gart->savedata = vmalloc(sizeof(u32) * gart->page_count);
392 if (!gart->savedata) { 390 if (!gart->savedata) {
393 dev_err(dev, "failed to allocate context save area\n"); 391 dev_err(dev, "failed to allocate context save area\n");
394 err = -ENOMEM; 392 return -ENOMEM;
395 goto fail;
396 } 393 }
397 394
398 platform_set_drvdata(pdev, gart); 395 platform_set_drvdata(pdev, gart);
@@ -401,32 +398,20 @@ static int tegra_gart_probe(struct platform_device *pdev)
401 gart_handle = gart; 398 gart_handle = gart;
402 bus_set_iommu(&platform_bus_type, &gart_iommu_ops); 399 bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
403 return 0; 400 return 0;
404
405fail:
406 if (gart_regs)
407 devm_iounmap(dev, gart_regs);
408 if (gart && gart->savedata)
409 vfree(gart->savedata);
410 devm_kfree(dev, gart);
411 return err;
412} 401}
413 402
414static int tegra_gart_remove(struct platform_device *pdev) 403static int tegra_gart_remove(struct platform_device *pdev)
415{ 404{
416 struct gart_device *gart = platform_get_drvdata(pdev); 405 struct gart_device *gart = platform_get_drvdata(pdev);
417 struct device *dev = gart->dev;
418 406
419 writel(0, gart->regs + GART_CONFIG); 407 writel(0, gart->regs + GART_CONFIG);
420 if (gart->savedata) 408 if (gart->savedata)
421 vfree(gart->savedata); 409 vfree(gart->savedata);
422 if (gart->regs)
423 devm_iounmap(dev, gart->regs);
424 devm_kfree(dev, gart);
425 gart_handle = NULL; 410 gart_handle = NULL;
426 return 0; 411 return 0;
427} 412}
428 413
429const struct dev_pm_ops tegra_gart_pm_ops = { 414static const struct dev_pm_ops tegra_gart_pm_ops = {
430 .suspend = tegra_gart_suspend, 415 .suspend = tegra_gart_suspend,
431 .resume = tegra_gart_resume, 416 .resume = tegra_gart_resume,
432}; 417};
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index e0665603afd9..605b5b46a903 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -731,7 +731,7 @@ static int smmu_iommu_map(struct iommu_domain *domain, unsigned long iova,
731 unsigned long pfn = __phys_to_pfn(pa); 731 unsigned long pfn = __phys_to_pfn(pa);
732 unsigned long flags; 732 unsigned long flags;
733 733
734 dev_dbg(as->smmu->dev, "[%d] %08lx:%08x\n", as->asid, iova, pa); 734 dev_dbg(as->smmu->dev, "[%d] %08lx:%pa\n", as->asid, iova, &pa);
735 735
736 if (!pfn_valid(pfn)) 736 if (!pfn_valid(pfn))
737 return -ENOMEM; 737 return -ENOMEM;
@@ -1254,7 +1254,7 @@ static int tegra_smmu_remove(struct platform_device *pdev)
1254 return 0; 1254 return 0;
1255} 1255}
1256 1256
1257const struct dev_pm_ops tegra_smmu_pm_ops = { 1257static const struct dev_pm_ops tegra_smmu_pm_ops = {
1258 .suspend = tegra_smmu_suspend, 1258 .suspend = tegra_smmu_suspend,
1259 .resume = tegra_smmu_resume, 1259 .resume = tegra_smmu_resume,
1260}; 1260};