aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-23 17:15:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-23 17:15:07 -0400
commit0b0c9d3a5872e8a02a071c6f0775ee6bf00a1206 (patch)
tree12f6cc1eedffd62e4dfa3b0d41b2c643462426db /drivers/iommu
parentfc2bb8d1cde1296d210a0f1ff9ee979a447d0a34 (diff)
parent7de473066f1512e52ea806e3c9698e5ea325b26c (diff)
Merge tag 'iommu-updates-v3.4' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU updates from Joerg Roedel: "The IOMMU updates for this round are not very large patch-wise. But they contain two new IOMMU drivers for the ARM Tegra 2 and 3 platforms. Besides that there are also a few patches for the AMD IOMMU which prepare the driver for adding intr-remapping support and a couple of fixes." * tag 'iommu-updates-v3.4' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: iommu/amd: Fix section mismatch iommu/amd: Move interrupt setup code into seperate function iommu/amd: Make sure IOMMU interrupts are re-enabled on resume iommu/amd: Fix section warning for prealloc_protection_domains iommu/amd: Don't initialize IOMMUv2 resources when not required iommu/amd: Update git-tree in MAINTAINERS iommu/tegra-gart: fix spin_unlock in map failure path iommu/amd: Fix double free of mem-region in error-path iommu/amd: Split amd_iommu_init function ARM: IOMMU: Tegra30: Add iommu_ops for SMMU driver ARM: IOMMU: Tegra20: Add iommu_ops for GART driver
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/Kconfig20
-rw-r--r--drivers/iommu/Makefile2
-rw-r--r--drivers/iommu/amd_iommu_init.c187
-rw-r--r--drivers/iommu/amd_iommu_v2.c14
-rw-r--r--drivers/iommu/tegra-gart.c451
-rw-r--r--drivers/iommu/tegra-smmu.c1034
6 files changed, 1643 insertions, 65 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 6bea6962f8ee..3bd9fff5c589 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -142,4 +142,24 @@ config OMAP_IOMMU_DEBUG
142 142
143 Say N unless you know you need this. 143 Say N unless you know you need this.
144 144
145config TEGRA_IOMMU_GART
146 bool "Tegra GART IOMMU Support"
147 depends on ARCH_TEGRA_2x_SOC
148 select IOMMU_API
149 help
150 Enables support for remapping discontiguous physical memory
151 shared with the operating system into contiguous I/O virtual
152 space through the GART (Graphics Address Relocation Table)
153 hardware included on Tegra SoCs.
154
155config TEGRA_IOMMU_SMMU
156 bool "Tegra SMMU IOMMU Support"
157 depends on ARCH_TEGRA_3x_SOC
158 select IOMMU_API
159 help
160 Enables support for remapping discontiguous physical memory
161 shared with the operating system into contiguous I/O virtual
162 space through the SMMU (System Memory Management Unit)
163 hardware included on Tegra SoCs.
164
145endif # IOMMU_SUPPORT 165endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 0e36b4934aff..7ad7a3bc1242 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -8,3 +8,5 @@ obj-$(CONFIG_IRQ_REMAP) += intr_remapping.o
8obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o 8obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
9obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o 9obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o
10obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o 10obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
11obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
12obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index a35e98ad9725..c56790375e0f 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -196,6 +196,8 @@ static u32 rlookup_table_size; /* size if the rlookup table */
196 */ 196 */
197extern void iommu_flush_all_caches(struct amd_iommu *iommu); 197extern void iommu_flush_all_caches(struct amd_iommu *iommu);
198 198
199static int amd_iommu_enable_interrupts(void);
200
199static inline void update_last_devid(u16 devid) 201static inline void update_last_devid(u16 devid)
200{ 202{
201 if (devid > amd_iommu_last_bdf) 203 if (devid > amd_iommu_last_bdf)
@@ -358,8 +360,6 @@ static void iommu_disable(struct amd_iommu *iommu)
358 */ 360 */
359static u8 * __init iommu_map_mmio_space(u64 address) 361static u8 * __init iommu_map_mmio_space(u64 address)
360{ 362{
361 u8 *ret;
362
363 if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) { 363 if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) {
364 pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n", 364 pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n",
365 address); 365 address);
@@ -367,13 +367,7 @@ static u8 * __init iommu_map_mmio_space(u64 address)
367 return NULL; 367 return NULL;
368 } 368 }
369 369
370 ret = ioremap_nocache(address, MMIO_REGION_LENGTH); 370 return ioremap_nocache(address, MMIO_REGION_LENGTH);
371 if (ret != NULL)
372 return ret;
373
374 release_mem_region(address, MMIO_REGION_LENGTH);
375
376 return NULL;
377} 371}
378 372
379static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) 373static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
@@ -1131,8 +1125,9 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
1131{ 1125{
1132 int r; 1126 int r;
1133 1127
1134 if (pci_enable_msi(iommu->dev)) 1128 r = pci_enable_msi(iommu->dev);
1135 return 1; 1129 if (r)
1130 return r;
1136 1131
1137 r = request_threaded_irq(iommu->dev->irq, 1132 r = request_threaded_irq(iommu->dev->irq,
1138 amd_iommu_int_handler, 1133 amd_iommu_int_handler,
@@ -1142,27 +1137,36 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
1142 1137
1143 if (r) { 1138 if (r) {
1144 pci_disable_msi(iommu->dev); 1139 pci_disable_msi(iommu->dev);
1145 return 1; 1140 return r;
1146 } 1141 }
1147 1142
1148 iommu->int_enabled = true; 1143 iommu->int_enabled = true;
1149 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1150
1151 if (iommu->ppr_log != NULL)
1152 iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
1153 1144
1154 return 0; 1145 return 0;
1155} 1146}
1156 1147
1157static int iommu_init_msi(struct amd_iommu *iommu) 1148static int iommu_init_msi(struct amd_iommu *iommu)
1158{ 1149{
1150 int ret;
1151
1159 if (iommu->int_enabled) 1152 if (iommu->int_enabled)
1160 return 0; 1153 goto enable_faults;
1161 1154
1162 if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI)) 1155 if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
1163 return iommu_setup_msi(iommu); 1156 ret = iommu_setup_msi(iommu);
1157 else
1158 ret = -ENODEV;
1164 1159
1165 return 1; 1160 if (ret)
1161 return ret;
1162
1163enable_faults:
1164 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1165
1166 if (iommu->ppr_log != NULL)
1167 iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
1168
1169 return 0;
1166} 1170}
1167 1171
1168/**************************************************************************** 1172/****************************************************************************
@@ -1381,7 +1385,6 @@ static void enable_iommus(void)
1381 iommu_enable_ppr_log(iommu); 1385 iommu_enable_ppr_log(iommu);
1382 iommu_enable_gt(iommu); 1386 iommu_enable_gt(iommu);
1383 iommu_set_exclusion_range(iommu); 1387 iommu_set_exclusion_range(iommu);
1384 iommu_init_msi(iommu);
1385 iommu_enable(iommu); 1388 iommu_enable(iommu);
1386 iommu_flush_all_caches(iommu); 1389 iommu_flush_all_caches(iommu);
1387 } 1390 }
@@ -1409,6 +1412,8 @@ static void amd_iommu_resume(void)
1409 1412
1410 /* re-load the hardware */ 1413 /* re-load the hardware */
1411 enable_iommus(); 1414 enable_iommus();
1415
1416 amd_iommu_enable_interrupts();
1412} 1417}
1413 1418
1414static int amd_iommu_suspend(void) 1419static int amd_iommu_suspend(void)
@@ -1424,10 +1429,40 @@ static struct syscore_ops amd_iommu_syscore_ops = {
1424 .resume = amd_iommu_resume, 1429 .resume = amd_iommu_resume,
1425}; 1430};
1426 1431
1432static void __init free_on_init_error(void)
1433{
1434 amd_iommu_uninit_devices();
1435
1436 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
1437 get_order(MAX_DOMAIN_ID/8));
1438
1439 free_pages((unsigned long)amd_iommu_rlookup_table,
1440 get_order(rlookup_table_size));
1441
1442 free_pages((unsigned long)amd_iommu_alias_table,
1443 get_order(alias_table_size));
1444
1445 free_pages((unsigned long)amd_iommu_dev_table,
1446 get_order(dev_table_size));
1447
1448 free_iommu_all();
1449
1450 free_unity_maps();
1451
1452#ifdef CONFIG_GART_IOMMU
1453 /*
1454 * We failed to initialize the AMD IOMMU - try fallback to GART
1455 * if possible.
1456 */
1457 gart_iommu_init();
1458
1459#endif
1460}
1461
1427/* 1462/*
1428 * This is the core init function for AMD IOMMU hardware in the system. 1463 * This is the hardware init function for AMD IOMMU in the system.
1429 * This function is called from the generic x86 DMA layer initialization 1464 * This function is called either from amd_iommu_init or from the interrupt
1430 * code. 1465 * remapping setup code.
1431 * 1466 *
1432 * This function basically parses the ACPI table for AMD IOMMU (IVRS) 1467 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
1433 * three times: 1468 * three times:
@@ -1446,16 +1481,21 @@ static struct syscore_ops amd_iommu_syscore_ops = {
1446 * remapping requirements parsed out of the ACPI table in 1481 * remapping requirements parsed out of the ACPI table in
1447 * this last pass. 1482 * this last pass.
1448 * 1483 *
1449 * After that the hardware is initialized and ready to go. In the last 1484 * After everything is set up the IOMMUs are enabled and the necessary
1450 * step we do some Linux specific things like registering the driver in 1485 * hotplug and suspend notifiers are registered.
1451 * the dma_ops interface and initializing the suspend/resume support
1452 * functions. Finally it prints some information about AMD IOMMUs and
1453 * the driver state and enables the hardware.
1454 */ 1486 */
1455static int __init amd_iommu_init(void) 1487int __init amd_iommu_init_hardware(void)
1456{ 1488{
1457 int i, ret = 0; 1489 int i, ret = 0;
1458 1490
1491 if (!amd_iommu_detected)
1492 return -ENODEV;
1493
1494 if (amd_iommu_dev_table != NULL) {
1495 /* Hardware already initialized */
1496 return 0;
1497 }
1498
1459 /* 1499 /*
1460 * First parse ACPI tables to find the largest Bus/Dev/Func 1500 * First parse ACPI tables to find the largest Bus/Dev/Func
1461 * we need to handle. Upon this information the shared data 1501 * we need to handle. Upon this information the shared data
@@ -1472,9 +1512,8 @@ static int __init amd_iommu_init(void)
1472 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); 1512 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
1473 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); 1513 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
1474 1514
1475 ret = -ENOMEM;
1476
1477 /* Device table - directly used by all IOMMUs */ 1515 /* Device table - directly used by all IOMMUs */
1516 ret = -ENOMEM;
1478 amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1517 amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1479 get_order(dev_table_size)); 1518 get_order(dev_table_size));
1480 if (amd_iommu_dev_table == NULL) 1519 if (amd_iommu_dev_table == NULL)
@@ -1546,20 +1585,65 @@ static int __init amd_iommu_init(void)
1546 1585
1547 enable_iommus(); 1586 enable_iommus();
1548 1587
1588 amd_iommu_init_notifier();
1589
1590 register_syscore_ops(&amd_iommu_syscore_ops);
1591
1592out:
1593 return ret;
1594
1595free:
1596 free_on_init_error();
1597
1598 return ret;
1599}
1600
1601static int amd_iommu_enable_interrupts(void)
1602{
1603 struct amd_iommu *iommu;
1604 int ret = 0;
1605
1606 for_each_iommu(iommu) {
1607 ret = iommu_init_msi(iommu);
1608 if (ret)
1609 goto out;
1610 }
1611
1612out:
1613 return ret;
1614}
1615
1616/*
1617 * This is the core init function for AMD IOMMU hardware in the system.
1618 * This function is called from the generic x86 DMA layer initialization
1619 * code.
1620 *
1621 * The function calls amd_iommu_init_hardware() to setup and enable the
1622 * IOMMU hardware if this has not happened yet. After that the driver
1623 * registers for the DMA-API and for the IOMMU-API as necessary.
1624 */
1625static int __init amd_iommu_init(void)
1626{
1627 int ret = 0;
1628
1629 ret = amd_iommu_init_hardware();
1630 if (ret)
1631 goto out;
1632
1633 ret = amd_iommu_enable_interrupts();
1634 if (ret)
1635 goto free;
1636
1549 if (iommu_pass_through) 1637 if (iommu_pass_through)
1550 ret = amd_iommu_init_passthrough(); 1638 ret = amd_iommu_init_passthrough();
1551 else 1639 else
1552 ret = amd_iommu_init_dma_ops(); 1640 ret = amd_iommu_init_dma_ops();
1553 1641
1554 if (ret) 1642 if (ret)
1555 goto free_disable; 1643 goto free;
1556 1644
1557 amd_iommu_init_api(); 1645 amd_iommu_init_api();
1558 1646
1559 amd_iommu_init_notifier();
1560
1561 register_syscore_ops(&amd_iommu_syscore_ops);
1562
1563 if (iommu_pass_through) 1647 if (iommu_pass_through)
1564 goto out; 1648 goto out;
1565 1649
@@ -1569,39 +1653,14 @@ static int __init amd_iommu_init(void)
1569 printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n"); 1653 printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
1570 1654
1571 x86_platform.iommu_shutdown = disable_iommus; 1655 x86_platform.iommu_shutdown = disable_iommus;
1656
1572out: 1657out:
1573 return ret; 1658 return ret;
1574 1659
1575free_disable:
1576 disable_iommus();
1577
1578free: 1660free:
1579 amd_iommu_uninit_devices(); 1661 disable_iommus();
1580
1581 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
1582 get_order(MAX_DOMAIN_ID/8));
1583
1584 free_pages((unsigned long)amd_iommu_rlookup_table,
1585 get_order(rlookup_table_size));
1586
1587 free_pages((unsigned long)amd_iommu_alias_table,
1588 get_order(alias_table_size));
1589
1590 free_pages((unsigned long)amd_iommu_dev_table,
1591 get_order(dev_table_size));
1592
1593 free_iommu_all();
1594
1595 free_unity_maps();
1596
1597#ifdef CONFIG_GART_IOMMU
1598 /*
1599 * We failed to initialize the AMD IOMMU - try fallback to GART
1600 * if possible.
1601 */
1602 gart_iommu_init();
1603 1662
1604#endif 1663 free_on_init_error();
1605 1664
1606 goto out; 1665 goto out;
1607} 1666}
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 8add9f125d3e..036fe9bf157e 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -921,7 +921,16 @@ static int __init amd_iommu_v2_init(void)
921 size_t state_table_size; 921 size_t state_table_size;
922 int ret; 922 int ret;
923 923
924 pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>"); 924 pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>\n");
925
926 if (!amd_iommu_v2_supported()) {
927 pr_info("AMD IOMMUv2 functionality not available on this sytem\n");
928 /*
929 * Load anyway to provide the symbols to other modules
930 * which may use AMD IOMMUv2 optionally.
931 */
932 return 0;
933 }
925 934
926 spin_lock_init(&state_lock); 935 spin_lock_init(&state_lock);
927 936
@@ -961,6 +970,9 @@ static void __exit amd_iommu_v2_exit(void)
961 size_t state_table_size; 970 size_t state_table_size;
962 int i; 971 int i;
963 972
973 if (!amd_iommu_v2_supported())
974 return;
975
964 profile_event_unregister(PROFILE_TASK_EXIT, &profile_nb); 976 profile_event_unregister(PROFILE_TASK_EXIT, &profile_nb);
965 amd_iommu_unregister_ppr_notifier(&ppr_nb); 977 amd_iommu_unregister_ppr_notifier(&ppr_nb);
966 978
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
new file mode 100644
index 000000000000..779306ee7b16
--- /dev/null
+++ b/drivers/iommu/tegra-gart.c
@@ -0,0 +1,451 @@
1/*
2 * IOMMU API for GART in Tegra20
3 *
4 * Copyright (c) 2010-2012, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20#define pr_fmt(fmt) "%s(): " fmt, __func__
21
22#include <linux/module.h>
23#include <linux/platform_device.h>
24#include <linux/spinlock.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/mm.h>
28#include <linux/list.h>
29#include <linux/device.h>
30#include <linux/io.h>
31#include <linux/iommu.h>
32
33#include <asm/cacheflush.h>
34
35/* bitmap of the page sizes currently supported */
36#define GART_IOMMU_PGSIZES (SZ_4K)
37
38#define GART_CONFIG 0x24
39#define GART_ENTRY_ADDR 0x28
40#define GART_ENTRY_DATA 0x2c
41#define GART_ENTRY_PHYS_ADDR_VALID (1 << 31)
42
43#define GART_PAGE_SHIFT 12
44#define GART_PAGE_SIZE (1 << GART_PAGE_SHIFT)
45#define GART_PAGE_MASK \
46 (~(GART_PAGE_SIZE - 1) & ~GART_ENTRY_PHYS_ADDR_VALID)
47
48struct gart_client {
49 struct device *dev;
50 struct list_head list;
51};
52
53struct gart_device {
54 void __iomem *regs;
55 u32 *savedata;
56 u32 page_count; /* total remappable size */
57 dma_addr_t iovmm_base; /* offset to vmm_area */
58 spinlock_t pte_lock; /* for pagetable */
59 struct list_head client;
60 spinlock_t client_lock; /* for client list */
61 struct device *dev;
62};
63
64static struct gart_device *gart_handle; /* unique for a system */
65
66#define GART_PTE(_pfn) \
67 (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
68
69/*
70 * Any interaction between any block on PPSB and a block on APB or AHB
71 * must have these read-back to ensure the APB/AHB bus transaction is
72 * complete before initiating activity on the PPSB block.
73 */
74#define FLUSH_GART_REGS(gart) ((void)readl((gart)->regs + GART_CONFIG))
75
76#define for_each_gart_pte(gart, iova) \
77 for (iova = gart->iovmm_base; \
78 iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \
79 iova += GART_PAGE_SIZE)
80
81static inline void gart_set_pte(struct gart_device *gart,
82 unsigned long offs, u32 pte)
83{
84 writel(offs, gart->regs + GART_ENTRY_ADDR);
85 writel(pte, gart->regs + GART_ENTRY_DATA);
86
87 dev_dbg(gart->dev, "%s %08lx:%08x\n",
88 pte ? "map" : "unmap", offs, pte & GART_PAGE_MASK);
89}
90
91static inline unsigned long gart_read_pte(struct gart_device *gart,
92 unsigned long offs)
93{
94 unsigned long pte;
95
96 writel(offs, gart->regs + GART_ENTRY_ADDR);
97 pte = readl(gart->regs + GART_ENTRY_DATA);
98
99 return pte;
100}
101
102static void do_gart_setup(struct gart_device *gart, const u32 *data)
103{
104 unsigned long iova;
105
106 for_each_gart_pte(gart, iova)
107 gart_set_pte(gart, iova, data ? *(data++) : 0);
108
109 writel(1, gart->regs + GART_CONFIG);
110 FLUSH_GART_REGS(gart);
111}
112
113#ifdef DEBUG
114static void gart_dump_table(struct gart_device *gart)
115{
116 unsigned long iova;
117 unsigned long flags;
118
119 spin_lock_irqsave(&gart->pte_lock, flags);
120 for_each_gart_pte(gart, iova) {
121 unsigned long pte;
122
123 pte = gart_read_pte(gart, iova);
124
125 dev_dbg(gart->dev, "%s %08lx:%08lx\n",
126 (GART_ENTRY_PHYS_ADDR_VALID & pte) ? "v" : " ",
127 iova, pte & GART_PAGE_MASK);
128 }
129 spin_unlock_irqrestore(&gart->pte_lock, flags);
130}
131#else
132static inline void gart_dump_table(struct gart_device *gart)
133{
134}
135#endif
136
137static inline bool gart_iova_range_valid(struct gart_device *gart,
138 unsigned long iova, size_t bytes)
139{
140 unsigned long iova_start, iova_end, gart_start, gart_end;
141
142 iova_start = iova;
143 iova_end = iova_start + bytes - 1;
144 gart_start = gart->iovmm_base;
145 gart_end = gart_start + gart->page_count * GART_PAGE_SIZE - 1;
146
147 if (iova_start < gart_start)
148 return false;
149 if (iova_end > gart_end)
150 return false;
151 return true;
152}
153
154static int gart_iommu_attach_dev(struct iommu_domain *domain,
155 struct device *dev)
156{
157 struct gart_device *gart;
158 struct gart_client *client, *c;
159 int err = 0;
160
161 gart = dev_get_drvdata(dev->parent);
162 if (!gart)
163 return -EINVAL;
164 domain->priv = gart;
165
166 client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL);
167 if (!client)
168 return -ENOMEM;
169 client->dev = dev;
170
171 spin_lock(&gart->client_lock);
172 list_for_each_entry(c, &gart->client, list) {
173 if (c->dev == dev) {
174 dev_err(gart->dev,
175 "%s is already attached\n", dev_name(dev));
176 err = -EINVAL;
177 goto fail;
178 }
179 }
180 list_add(&client->list, &gart->client);
181 spin_unlock(&gart->client_lock);
182 dev_dbg(gart->dev, "Attached %s\n", dev_name(dev));
183 return 0;
184
185fail:
186 devm_kfree(gart->dev, client);
187 spin_unlock(&gart->client_lock);
188 return err;
189}
190
191static void gart_iommu_detach_dev(struct iommu_domain *domain,
192 struct device *dev)
193{
194 struct gart_device *gart = domain->priv;
195 struct gart_client *c;
196
197 spin_lock(&gart->client_lock);
198
199 list_for_each_entry(c, &gart->client, list) {
200 if (c->dev == dev) {
201 list_del(&c->list);
202 devm_kfree(gart->dev, c);
203 dev_dbg(gart->dev, "Detached %s\n", dev_name(dev));
204 goto out;
205 }
206 }
207 dev_err(gart->dev, "Couldn't find\n");
208out:
209 spin_unlock(&gart->client_lock);
210}
211
212static int gart_iommu_domain_init(struct iommu_domain *domain)
213{
214 return 0;
215}
216
217static void gart_iommu_domain_destroy(struct iommu_domain *domain)
218{
219 struct gart_device *gart = domain->priv;
220
221 if (!gart)
222 return;
223
224 spin_lock(&gart->client_lock);
225 if (!list_empty(&gart->client)) {
226 struct gart_client *c;
227
228 list_for_each_entry(c, &gart->client, list)
229 gart_iommu_detach_dev(domain, c->dev);
230 }
231 spin_unlock(&gart->client_lock);
232 domain->priv = NULL;
233}
234
235static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
236 phys_addr_t pa, size_t bytes, int prot)
237{
238 struct gart_device *gart = domain->priv;
239 unsigned long flags;
240 unsigned long pfn;
241
242 if (!gart_iova_range_valid(gart, iova, bytes))
243 return -EINVAL;
244
245 spin_lock_irqsave(&gart->pte_lock, flags);
246 pfn = __phys_to_pfn(pa);
247 if (!pfn_valid(pfn)) {
248 dev_err(gart->dev, "Invalid page: %08x\n", pa);
249 spin_unlock_irqrestore(&gart->pte_lock, flags);
250 return -EINVAL;
251 }
252 gart_set_pte(gart, iova, GART_PTE(pfn));
253 FLUSH_GART_REGS(gart);
254 spin_unlock_irqrestore(&gart->pte_lock, flags);
255 return 0;
256}
257
258static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
259 size_t bytes)
260{
261 struct gart_device *gart = domain->priv;
262 unsigned long flags;
263
264 if (!gart_iova_range_valid(gart, iova, bytes))
265 return 0;
266
267 spin_lock_irqsave(&gart->pte_lock, flags);
268 gart_set_pte(gart, iova, 0);
269 FLUSH_GART_REGS(gart);
270 spin_unlock_irqrestore(&gart->pte_lock, flags);
271 return 0;
272}
273
274static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
275 unsigned long iova)
276{
277 struct gart_device *gart = domain->priv;
278 unsigned long pte;
279 phys_addr_t pa;
280 unsigned long flags;
281
282 if (!gart_iova_range_valid(gart, iova, 0))
283 return -EINVAL;
284
285 spin_lock_irqsave(&gart->pte_lock, flags);
286 pte = gart_read_pte(gart, iova);
287 spin_unlock_irqrestore(&gart->pte_lock, flags);
288
289 pa = (pte & GART_PAGE_MASK);
290 if (!pfn_valid(__phys_to_pfn(pa))) {
291 dev_err(gart->dev, "No entry for %08lx:%08x\n", iova, pa);
292 gart_dump_table(gart);
293 return -EINVAL;
294 }
295 return pa;
296}
297
298static int gart_iommu_domain_has_cap(struct iommu_domain *domain,
299 unsigned long cap)
300{
301 return 0;
302}
303
304static struct iommu_ops gart_iommu_ops = {
305 .domain_init = gart_iommu_domain_init,
306 .domain_destroy = gart_iommu_domain_destroy,
307 .attach_dev = gart_iommu_attach_dev,
308 .detach_dev = gart_iommu_detach_dev,
309 .map = gart_iommu_map,
310 .unmap = gart_iommu_unmap,
311 .iova_to_phys = gart_iommu_iova_to_phys,
312 .domain_has_cap = gart_iommu_domain_has_cap,
313 .pgsize_bitmap = GART_IOMMU_PGSIZES,
314};
315
316static int tegra_gart_suspend(struct device *dev)
317{
318 struct gart_device *gart = dev_get_drvdata(dev);
319 unsigned long iova;
320 u32 *data = gart->savedata;
321 unsigned long flags;
322
323 spin_lock_irqsave(&gart->pte_lock, flags);
324 for_each_gart_pte(gart, iova)
325 *(data++) = gart_read_pte(gart, iova);
326 spin_unlock_irqrestore(&gart->pte_lock, flags);
327 return 0;
328}
329
330static int tegra_gart_resume(struct device *dev)
331{
332 struct gart_device *gart = dev_get_drvdata(dev);
333 unsigned long flags;
334
335 spin_lock_irqsave(&gart->pte_lock, flags);
336 do_gart_setup(gart, gart->savedata);
337 spin_unlock_irqrestore(&gart->pte_lock, flags);
338 return 0;
339}
340
341static int tegra_gart_probe(struct platform_device *pdev)
342{
343 struct gart_device *gart;
344 struct resource *res, *res_remap;
345 void __iomem *gart_regs;
346 int err;
347 struct device *dev = &pdev->dev;
348
349 if (gart_handle)
350 return -EIO;
351
352 BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT);
353
354 /* the GART memory aperture is required */
355 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
356 res_remap = platform_get_resource(pdev, IORESOURCE_MEM, 1);
357 if (!res || !res_remap) {
358 dev_err(dev, "GART memory aperture expected\n");
359 return -ENXIO;
360 }
361
362 gart = devm_kzalloc(dev, sizeof(*gart), GFP_KERNEL);
363 if (!gart) {
364 dev_err(dev, "failed to allocate gart_device\n");
365 return -ENOMEM;
366 }
367
368 gart_regs = devm_ioremap(dev, res->start, resource_size(res));
369 if (!gart_regs) {
370 dev_err(dev, "failed to remap GART registers\n");
371 err = -ENXIO;
372 goto fail;
373 }
374
375 gart->dev = &pdev->dev;
376 spin_lock_init(&gart->pte_lock);
377 spin_lock_init(&gart->client_lock);
378 INIT_LIST_HEAD(&gart->client);
379 gart->regs = gart_regs;
380 gart->iovmm_base = (dma_addr_t)res_remap->start;
381 gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT);
382
383 gart->savedata = vmalloc(sizeof(u32) * gart->page_count);
384 if (!gart->savedata) {
385 dev_err(dev, "failed to allocate context save area\n");
386 err = -ENOMEM;
387 goto fail;
388 }
389
390 platform_set_drvdata(pdev, gart);
391 do_gart_setup(gart, NULL);
392
393 gart_handle = gart;
394 return 0;
395
396fail:
397 if (gart_regs)
398 devm_iounmap(dev, gart_regs);
399 if (gart && gart->savedata)
400 vfree(gart->savedata);
401 devm_kfree(dev, gart);
402 return err;
403}
404
405static int tegra_gart_remove(struct platform_device *pdev)
406{
407 struct gart_device *gart = platform_get_drvdata(pdev);
408 struct device *dev = gart->dev;
409
410 writel(0, gart->regs + GART_CONFIG);
411 if (gart->savedata)
412 vfree(gart->savedata);
413 if (gart->regs)
414 devm_iounmap(dev, gart->regs);
415 devm_kfree(dev, gart);
416 gart_handle = NULL;
417 return 0;
418}
419
420const struct dev_pm_ops tegra_gart_pm_ops = {
421 .suspend = tegra_gart_suspend,
422 .resume = tegra_gart_resume,
423};
424
425static struct platform_driver tegra_gart_driver = {
426 .probe = tegra_gart_probe,
427 .remove = tegra_gart_remove,
428 .driver = {
429 .owner = THIS_MODULE,
430 .name = "tegra-gart",
431 .pm = &tegra_gart_pm_ops,
432 },
433};
434
435static int __devinit tegra_gart_init(void)
436{
437 bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
438 return platform_driver_register(&tegra_gart_driver);
439}
440
441static void __exit tegra_gart_exit(void)
442{
443 platform_driver_unregister(&tegra_gart_driver);
444}
445
446subsys_initcall(tegra_gart_init);
447module_exit(tegra_gart_exit);
448
449MODULE_DESCRIPTION("IOMMU API for GART in Tegra20");
450MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
451MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
new file mode 100644
index 000000000000..eb93c821f592
--- /dev/null
+++ b/drivers/iommu/tegra-smmu.c
@@ -0,0 +1,1034 @@
1/*
2 * IOMMU API for SMMU in Tegra30
3 *
4 * Copyright (c) 2011-2012, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20#define pr_fmt(fmt) "%s(): " fmt, __func__
21
22#include <linux/module.h>
23#include <linux/platform_device.h>
24#include <linux/spinlock.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/mm.h>
28#include <linux/pagemap.h>
29#include <linux/device.h>
30#include <linux/sched.h>
31#include <linux/iommu.h>
32#include <linux/io.h>
33
34#include <asm/page.h>
35#include <asm/cacheflush.h>
36
37#include <mach/iomap.h>
38#include <mach/smmu.h>
39
40/* bitmap of the page sizes currently supported */
41#define SMMU_IOMMU_PGSIZES (SZ_4K)
42
43#define SMMU_CONFIG 0x10
44#define SMMU_CONFIG_DISABLE 0
45#define SMMU_CONFIG_ENABLE 1
46
47#define SMMU_TLB_CONFIG 0x14
48#define SMMU_TLB_CONFIG_STATS__MASK (1 << 31)
49#define SMMU_TLB_CONFIG_STATS__ENABLE (1 << 31)
50#define SMMU_TLB_CONFIG_HIT_UNDER_MISS__ENABLE (1 << 29)
51#define SMMU_TLB_CONFIG_ACTIVE_LINES__VALUE 0x10
52#define SMMU_TLB_CONFIG_RESET_VAL 0x20000010
53
54#define SMMU_PTC_CONFIG 0x18
55#define SMMU_PTC_CONFIG_STATS__MASK (1 << 31)
56#define SMMU_PTC_CONFIG_STATS__ENABLE (1 << 31)
57#define SMMU_PTC_CONFIG_CACHE__ENABLE (1 << 29)
58#define SMMU_PTC_CONFIG_INDEX_MAP__PATTERN 0x3f
59#define SMMU_PTC_CONFIG_RESET_VAL 0x2000003f
60
61#define SMMU_PTB_ASID 0x1c
62#define SMMU_PTB_ASID_CURRENT_SHIFT 0
63
64#define SMMU_PTB_DATA 0x20
65#define SMMU_PTB_DATA_RESET_VAL 0
66#define SMMU_PTB_DATA_ASID_NONSECURE_SHIFT 29
67#define SMMU_PTB_DATA_ASID_WRITABLE_SHIFT 30
68#define SMMU_PTB_DATA_ASID_READABLE_SHIFT 31
69
70#define SMMU_TLB_FLUSH 0x30
71#define SMMU_TLB_FLUSH_VA_MATCH_ALL 0
72#define SMMU_TLB_FLUSH_VA_MATCH_SECTION 2
73#define SMMU_TLB_FLUSH_VA_MATCH_GROUP 3
74#define SMMU_TLB_FLUSH_ASID_SHIFT 29
75#define SMMU_TLB_FLUSH_ASID_MATCH_DISABLE 0
76#define SMMU_TLB_FLUSH_ASID_MATCH_ENABLE 1
77#define SMMU_TLB_FLUSH_ASID_MATCH_SHIFT 31
78
79#define SMMU_PTC_FLUSH 0x34
80#define SMMU_PTC_FLUSH_TYPE_ALL 0
81#define SMMU_PTC_FLUSH_TYPE_ADR 1
82#define SMMU_PTC_FLUSH_ADR_SHIFT 4
83
84#define SMMU_ASID_SECURITY 0x38
85
86#define SMMU_STATS_TLB_HIT_COUNT 0x1f0
87#define SMMU_STATS_TLB_MISS_COUNT 0x1f4
88#define SMMU_STATS_PTC_HIT_COUNT 0x1f8
89#define SMMU_STATS_PTC_MISS_COUNT 0x1fc
90
91#define SMMU_TRANSLATION_ENABLE_0 0x228
92#define SMMU_TRANSLATION_ENABLE_1 0x22c
93#define SMMU_TRANSLATION_ENABLE_2 0x230
94
95#define SMMU_AFI_ASID 0x238 /* PCIE */
96#define SMMU_AVPC_ASID 0x23c /* AVP */
97#define SMMU_DC_ASID 0x240 /* Display controller */
98#define SMMU_DCB_ASID 0x244 /* Display controller B */
99#define SMMU_EPP_ASID 0x248 /* Encoder pre-processor */
100#define SMMU_G2_ASID 0x24c /* 2D engine */
101#define SMMU_HC_ASID 0x250 /* Host1x */
102#define SMMU_HDA_ASID 0x254 /* High-def audio */
103#define SMMU_ISP_ASID 0x258 /* Image signal processor */
104#define SMMU_MPE_ASID 0x264 /* MPEG encoder */
105#define SMMU_NV_ASID 0x268 /* (3D) */
106#define SMMU_NV2_ASID 0x26c /* (3D) */
107#define SMMU_PPCS_ASID 0x270 /* AHB */
108#define SMMU_SATA_ASID 0x278 /* SATA */
109#define SMMU_VDE_ASID 0x27c /* Video decoder */
110#define SMMU_VI_ASID 0x280 /* Video input */
111
112#define SMMU_PDE_NEXT_SHIFT 28
113
114/* AHB Arbiter Registers */
115#define AHB_XBAR_CTRL 0xe0
116#define AHB_XBAR_CTRL_SMMU_INIT_DONE_DONE 1
117#define AHB_XBAR_CTRL_SMMU_INIT_DONE_SHIFT 17
118
119#define SMMU_NUM_ASIDS 4
120#define SMMU_TLB_FLUSH_VA_SECTION__MASK 0xffc00000
121#define SMMU_TLB_FLUSH_VA_SECTION__SHIFT 12 /* right shift */
122#define SMMU_TLB_FLUSH_VA_GROUP__MASK 0xffffc000
123#define SMMU_TLB_FLUSH_VA_GROUP__SHIFT 12 /* right shift */
124#define SMMU_TLB_FLUSH_VA(iova, which) \
125 ((((iova) & SMMU_TLB_FLUSH_VA_##which##__MASK) >> \
126 SMMU_TLB_FLUSH_VA_##which##__SHIFT) | \
127 SMMU_TLB_FLUSH_VA_MATCH_##which)
128#define SMMU_PTB_ASID_CUR(n) \
129 ((n) << SMMU_PTB_ASID_CURRENT_SHIFT)
130#define SMMU_TLB_FLUSH_ASID_MATCH_disable \
131 (SMMU_TLB_FLUSH_ASID_MATCH_DISABLE << \
132 SMMU_TLB_FLUSH_ASID_MATCH_SHIFT)
133#define SMMU_TLB_FLUSH_ASID_MATCH__ENABLE \
134 (SMMU_TLB_FLUSH_ASID_MATCH_ENABLE << \
135 SMMU_TLB_FLUSH_ASID_MATCH_SHIFT)
136
137#define SMMU_PAGE_SHIFT 12
138#define SMMU_PAGE_SIZE (1 << SMMU_PAGE_SHIFT)
139
140#define SMMU_PDIR_COUNT 1024
141#define SMMU_PDIR_SIZE (sizeof(unsigned long) * SMMU_PDIR_COUNT)
142#define SMMU_PTBL_COUNT 1024
143#define SMMU_PTBL_SIZE (sizeof(unsigned long) * SMMU_PTBL_COUNT)
144#define SMMU_PDIR_SHIFT 12
145#define SMMU_PDE_SHIFT 12
146#define SMMU_PTE_SHIFT 12
147#define SMMU_PFN_MASK 0x000fffff
148
149#define SMMU_ADDR_TO_PFN(addr) ((addr) >> 12)
150#define SMMU_ADDR_TO_PDN(addr) ((addr) >> 22)
151#define SMMU_PDN_TO_ADDR(addr) ((pdn) << 22)
152
153#define _READABLE (1 << SMMU_PTB_DATA_ASID_READABLE_SHIFT)
154#define _WRITABLE (1 << SMMU_PTB_DATA_ASID_WRITABLE_SHIFT)
155#define _NONSECURE (1 << SMMU_PTB_DATA_ASID_NONSECURE_SHIFT)
156#define _PDE_NEXT (1 << SMMU_PDE_NEXT_SHIFT)
157#define _MASK_ATTR (_READABLE | _WRITABLE | _NONSECURE)
158
159#define _PDIR_ATTR (_READABLE | _WRITABLE | _NONSECURE)
160
161#define _PDE_ATTR (_READABLE | _WRITABLE | _NONSECURE)
162#define _PDE_ATTR_N (_PDE_ATTR | _PDE_NEXT)
163#define _PDE_VACANT(pdn) (((pdn) << 10) | _PDE_ATTR)
164
165#define _PTE_ATTR (_READABLE | _WRITABLE | _NONSECURE)
166#define _PTE_VACANT(addr) (((addr) >> SMMU_PAGE_SHIFT) | _PTE_ATTR)
167
168#define SMMU_MK_PDIR(page, attr) \
169 ((page_to_phys(page) >> SMMU_PDIR_SHIFT) | (attr))
170#define SMMU_MK_PDE(page, attr) \
171 (unsigned long)((page_to_phys(page) >> SMMU_PDE_SHIFT) | (attr))
172#define SMMU_EX_PTBL_PAGE(pde) \
173 pfn_to_page((unsigned long)(pde) & SMMU_PFN_MASK)
174#define SMMU_PFN_TO_PTE(pfn, attr) (unsigned long)((pfn) | (attr))
175
176#define SMMU_ASID_ENABLE(asid) ((asid) | (1 << 31))
177#define SMMU_ASID_DISABLE 0
178#define SMMU_ASID_ASID(n) ((n) & ~SMMU_ASID_ENABLE(0))
179
180#define smmu_client_enable_hwgrp(c, m) smmu_client_set_hwgrp(c, m, 1)
181#define smmu_client_disable_hwgrp(c) smmu_client_set_hwgrp(c, 0, 0)
182#define __smmu_client_enable_hwgrp(c, m) __smmu_client_set_hwgrp(c, m, 1)
183#define __smmu_client_disable_hwgrp(c) __smmu_client_set_hwgrp(c, 0, 0)
184
185#define HWGRP_INIT(client) [HWGRP_##client] = SMMU_##client##_ASID
186
187static const u32 smmu_hwgrp_asid_reg[] = {
188 HWGRP_INIT(AFI),
189 HWGRP_INIT(AVPC),
190 HWGRP_INIT(DC),
191 HWGRP_INIT(DCB),
192 HWGRP_INIT(EPP),
193 HWGRP_INIT(G2),
194 HWGRP_INIT(HC),
195 HWGRP_INIT(HDA),
196 HWGRP_INIT(ISP),
197 HWGRP_INIT(MPE),
198 HWGRP_INIT(NV),
199 HWGRP_INIT(NV2),
200 HWGRP_INIT(PPCS),
201 HWGRP_INIT(SATA),
202 HWGRP_INIT(VDE),
203 HWGRP_INIT(VI),
204};
205#define HWGRP_ASID_REG(x) (smmu_hwgrp_asid_reg[x])
206
207/*
208 * Per client for address space
209 */
210struct smmu_client {
211 struct device *dev;
212 struct list_head list;
213 struct smmu_as *as;
214 u32 hwgrp;
215};
216
217/*
218 * Per address space
219 */
220struct smmu_as {
221 struct smmu_device *smmu; /* back pointer to container */
222 unsigned int asid;
223 spinlock_t lock; /* for pagetable */
224 struct page *pdir_page;
225 unsigned long pdir_attr;
226 unsigned long pde_attr;
227 unsigned long pte_attr;
228 unsigned int *pte_count;
229
230 struct list_head client;
231 spinlock_t client_lock; /* for client list */
232};
233
234/*
235 * Per SMMU device - IOMMU device
236 */
237struct smmu_device {
238 void __iomem *regs, *regs_ahbarb;
239 unsigned long iovmm_base; /* remappable base address */
240 unsigned long page_count; /* total remappable size */
241 spinlock_t lock;
242 char *name;
243 struct device *dev;
244 int num_as;
245 struct smmu_as *as; /* Run-time allocated array */
246 struct page *avp_vector_page; /* dummy page shared by all AS's */
247
248 /*
249 * Register image savers for suspend/resume
250 */
251 unsigned long translation_enable_0;
252 unsigned long translation_enable_1;
253 unsigned long translation_enable_2;
254 unsigned long asid_security;
255};
256
257static struct smmu_device *smmu_handle; /* unique for a system */
258
259/*
260 * SMMU/AHB register accessors
261 */
262static inline u32 smmu_read(struct smmu_device *smmu, size_t offs)
263{
264 return readl(smmu->regs + offs);
265}
266static inline void smmu_write(struct smmu_device *smmu, u32 val, size_t offs)
267{
268 writel(val, smmu->regs + offs);
269}
270
271static inline u32 ahb_read(struct smmu_device *smmu, size_t offs)
272{
273 return readl(smmu->regs_ahbarb + offs);
274}
275static inline void ahb_write(struct smmu_device *smmu, u32 val, size_t offs)
276{
277 writel(val, smmu->regs_ahbarb + offs);
278}
279
280#define VA_PAGE_TO_PA(va, page) \
281 (page_to_phys(page) + ((unsigned long)(va) & ~PAGE_MASK))
282
283#define FLUSH_CPU_DCACHE(va, page, size) \
284 do { \
285 unsigned long _pa_ = VA_PAGE_TO_PA(va, page); \
286 __cpuc_flush_dcache_area((void *)(va), (size_t)(size)); \
287 outer_flush_range(_pa_, _pa_+(size_t)(size)); \
288 } while (0)
289
290/*
291 * Any interaction between any block on PPSB and a block on APB or AHB
292 * must have these read-back barriers to ensure the APB/AHB bus
293 * transaction is complete before initiating activity on the PPSB
294 * block.
295 */
296#define FLUSH_SMMU_REGS(smmu) smmu_read(smmu, SMMU_CONFIG)
297
298#define smmu_client_hwgrp(c) (u32)((c)->dev->platform_data)
299
300static int __smmu_client_set_hwgrp(struct smmu_client *c,
301 unsigned long map, int on)
302{
303 int i;
304 struct smmu_as *as = c->as;
305 u32 val, offs, mask = SMMU_ASID_ENABLE(as->asid);
306 struct smmu_device *smmu = as->smmu;
307
308 WARN_ON(!on && map);
309 if (on && !map)
310 return -EINVAL;
311 if (!on)
312 map = smmu_client_hwgrp(c);
313
314 for_each_set_bit(i, &map, HWGRP_COUNT) {
315 offs = HWGRP_ASID_REG(i);
316 val = smmu_read(smmu, offs);
317 if (on) {
318 if (WARN_ON(val & mask))
319 goto err_hw_busy;
320 val |= mask;
321 } else {
322 WARN_ON((val & mask) == mask);
323 val &= ~mask;
324 }
325 smmu_write(smmu, val, offs);
326 }
327 FLUSH_SMMU_REGS(smmu);
328 c->hwgrp = map;
329 return 0;
330
331err_hw_busy:
332 for_each_set_bit(i, &map, HWGRP_COUNT) {
333 offs = HWGRP_ASID_REG(i);
334 val = smmu_read(smmu, offs);
335 val &= ~mask;
336 smmu_write(smmu, val, offs);
337 }
338 return -EBUSY;
339}
340
341static int smmu_client_set_hwgrp(struct smmu_client *c, u32 map, int on)
342{
343 u32 val;
344 unsigned long flags;
345 struct smmu_as *as = c->as;
346 struct smmu_device *smmu = as->smmu;
347
348 spin_lock_irqsave(&smmu->lock, flags);
349 val = __smmu_client_set_hwgrp(c, map, on);
350 spin_unlock_irqrestore(&smmu->lock, flags);
351 return val;
352}
353
354/*
355 * Flush all TLB entries and all PTC entries
356 * Caller must lock smmu
357 */
358static void smmu_flush_regs(struct smmu_device *smmu, int enable)
359{
360 u32 val;
361
362 smmu_write(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
363 FLUSH_SMMU_REGS(smmu);
364 val = SMMU_TLB_FLUSH_VA_MATCH_ALL |
365 SMMU_TLB_FLUSH_ASID_MATCH_disable;
366 smmu_write(smmu, val, SMMU_TLB_FLUSH);
367
368 if (enable)
369 smmu_write(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
370 FLUSH_SMMU_REGS(smmu);
371}
372
373static void smmu_setup_regs(struct smmu_device *smmu)
374{
375 int i;
376 u32 val;
377
378 for (i = 0; i < smmu->num_as; i++) {
379 struct smmu_as *as = &smmu->as[i];
380 struct smmu_client *c;
381
382 smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
383 val = as->pdir_page ?
384 SMMU_MK_PDIR(as->pdir_page, as->pdir_attr) :
385 SMMU_PTB_DATA_RESET_VAL;
386 smmu_write(smmu, val, SMMU_PTB_DATA);
387
388 list_for_each_entry(c, &as->client, list)
389 __smmu_client_set_hwgrp(c, c->hwgrp, 1);
390 }
391
392 smmu_write(smmu, smmu->translation_enable_0, SMMU_TRANSLATION_ENABLE_0);
393 smmu_write(smmu, smmu->translation_enable_1, SMMU_TRANSLATION_ENABLE_1);
394 smmu_write(smmu, smmu->translation_enable_2, SMMU_TRANSLATION_ENABLE_2);
395 smmu_write(smmu, smmu->asid_security, SMMU_ASID_SECURITY);
396 smmu_write(smmu, SMMU_TLB_CONFIG_RESET_VAL, SMMU_TLB_CONFIG);
397 smmu_write(smmu, SMMU_PTC_CONFIG_RESET_VAL, SMMU_PTC_CONFIG);
398
399 smmu_flush_regs(smmu, 1);
400
401 val = ahb_read(smmu, AHB_XBAR_CTRL);
402 val |= AHB_XBAR_CTRL_SMMU_INIT_DONE_DONE <<
403 AHB_XBAR_CTRL_SMMU_INIT_DONE_SHIFT;
404 ahb_write(smmu, val, AHB_XBAR_CTRL);
405}
406
407static void flush_ptc_and_tlb(struct smmu_device *smmu,
408 struct smmu_as *as, dma_addr_t iova,
409 unsigned long *pte, struct page *page, int is_pde)
410{
411 u32 val;
412 unsigned long tlb_flush_va = is_pde
413 ? SMMU_TLB_FLUSH_VA(iova, SECTION)
414 : SMMU_TLB_FLUSH_VA(iova, GROUP);
415
416 val = SMMU_PTC_FLUSH_TYPE_ADR | VA_PAGE_TO_PA(pte, page);
417 smmu_write(smmu, val, SMMU_PTC_FLUSH);
418 FLUSH_SMMU_REGS(smmu);
419 val = tlb_flush_va |
420 SMMU_TLB_FLUSH_ASID_MATCH__ENABLE |
421 (as->asid << SMMU_TLB_FLUSH_ASID_SHIFT);
422 smmu_write(smmu, val, SMMU_TLB_FLUSH);
423 FLUSH_SMMU_REGS(smmu);
424}
425
426static void free_ptbl(struct smmu_as *as, dma_addr_t iova)
427{
428 unsigned long pdn = SMMU_ADDR_TO_PDN(iova);
429 unsigned long *pdir = (unsigned long *)page_address(as->pdir_page);
430
431 if (pdir[pdn] != _PDE_VACANT(pdn)) {
432 dev_dbg(as->smmu->dev, "pdn: %lx\n", pdn);
433
434 ClearPageReserved(SMMU_EX_PTBL_PAGE(pdir[pdn]));
435 __free_page(SMMU_EX_PTBL_PAGE(pdir[pdn]));
436 pdir[pdn] = _PDE_VACANT(pdn);
437 FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
438 flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
439 as->pdir_page, 1);
440 }
441}
442
443static void free_pdir(struct smmu_as *as)
444{
445 unsigned addr;
446 int count;
447 struct device *dev = as->smmu->dev;
448
449 if (!as->pdir_page)
450 return;
451
452 addr = as->smmu->iovmm_base;
453 count = as->smmu->page_count;
454 while (count-- > 0) {
455 free_ptbl(as, addr);
456 addr += SMMU_PAGE_SIZE * SMMU_PTBL_COUNT;
457 }
458 ClearPageReserved(as->pdir_page);
459 __free_page(as->pdir_page);
460 as->pdir_page = NULL;
461 devm_kfree(dev, as->pte_count);
462 as->pte_count = NULL;
463}
464
465/*
466 * Maps PTBL for given iova and returns the PTE address
467 * Caller must unmap the mapped PTBL returned in *ptbl_page_p
468 */
469static unsigned long *locate_pte(struct smmu_as *as,
470 dma_addr_t iova, bool allocate,
471 struct page **ptbl_page_p,
472 unsigned int **count)
473{
474 unsigned long ptn = SMMU_ADDR_TO_PFN(iova);
475 unsigned long pdn = SMMU_ADDR_TO_PDN(iova);
476 unsigned long *pdir = page_address(as->pdir_page);
477 unsigned long *ptbl;
478
479 if (pdir[pdn] != _PDE_VACANT(pdn)) {
480 /* Mapped entry table already exists */
481 *ptbl_page_p = SMMU_EX_PTBL_PAGE(pdir[pdn]);
482 ptbl = page_address(*ptbl_page_p);
483 } else if (!allocate) {
484 return NULL;
485 } else {
486 int pn;
487 unsigned long addr = SMMU_PDN_TO_ADDR(pdn);
488
489 /* Vacant - allocate a new page table */
490 dev_dbg(as->smmu->dev, "New PTBL pdn: %lx\n", pdn);
491
492 *ptbl_page_p = alloc_page(GFP_ATOMIC);
493 if (!*ptbl_page_p) {
494 dev_err(as->smmu->dev,
495 "failed to allocate smmu_device page table\n");
496 return NULL;
497 }
498 SetPageReserved(*ptbl_page_p);
499 ptbl = (unsigned long *)page_address(*ptbl_page_p);
500 for (pn = 0; pn < SMMU_PTBL_COUNT;
501 pn++, addr += SMMU_PAGE_SIZE) {
502 ptbl[pn] = _PTE_VACANT(addr);
503 }
504 FLUSH_CPU_DCACHE(ptbl, *ptbl_page_p, SMMU_PTBL_SIZE);
505 pdir[pdn] = SMMU_MK_PDE(*ptbl_page_p,
506 as->pde_attr | _PDE_NEXT);
507 FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
508 flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
509 as->pdir_page, 1);
510 }
511 *count = &as->pte_count[pdn];
512
513 return &ptbl[ptn % SMMU_PTBL_COUNT];
514}
515
516#ifdef CONFIG_SMMU_SIG_DEBUG
517static void put_signature(struct smmu_as *as,
518 dma_addr_t iova, unsigned long pfn)
519{
520 struct page *page;
521 unsigned long *vaddr;
522
523 page = pfn_to_page(pfn);
524 vaddr = page_address(page);
525 if (!vaddr)
526 return;
527
528 vaddr[0] = iova;
529 vaddr[1] = pfn << PAGE_SHIFT;
530 FLUSH_CPU_DCACHE(vaddr, page, sizeof(vaddr[0]) * 2);
531}
532#else
533static inline void put_signature(struct smmu_as *as,
534 unsigned long addr, unsigned long pfn)
535{
536}
537#endif
538
539/*
540 * Caller must lock/unlock as
541 */
542static int alloc_pdir(struct smmu_as *as)
543{
544 unsigned long *pdir;
545 int pdn;
546 u32 val;
547 struct smmu_device *smmu = as->smmu;
548
549 if (as->pdir_page)
550 return 0;
551
552 as->pte_count = devm_kzalloc(smmu->dev,
553 sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_KERNEL);
554 if (!as->pte_count) {
555 dev_err(smmu->dev,
556 "failed to allocate smmu_device PTE cunters\n");
557 return -ENOMEM;
558 }
559 as->pdir_page = alloc_page(GFP_KERNEL | __GFP_DMA);
560 if (!as->pdir_page) {
561 dev_err(smmu->dev,
562 "failed to allocate smmu_device page directory\n");
563 devm_kfree(smmu->dev, as->pte_count);
564 as->pte_count = NULL;
565 return -ENOMEM;
566 }
567 SetPageReserved(as->pdir_page);
568 pdir = page_address(as->pdir_page);
569
570 for (pdn = 0; pdn < SMMU_PDIR_COUNT; pdn++)
571 pdir[pdn] = _PDE_VACANT(pdn);
572 FLUSH_CPU_DCACHE(pdir, as->pdir_page, SMMU_PDIR_SIZE);
573 val = SMMU_PTC_FLUSH_TYPE_ADR | VA_PAGE_TO_PA(pdir, as->pdir_page);
574 smmu_write(smmu, val, SMMU_PTC_FLUSH);
575 FLUSH_SMMU_REGS(as->smmu);
576 val = SMMU_TLB_FLUSH_VA_MATCH_ALL |
577 SMMU_TLB_FLUSH_ASID_MATCH__ENABLE |
578 (as->asid << SMMU_TLB_FLUSH_ASID_SHIFT);
579 smmu_write(smmu, val, SMMU_TLB_FLUSH);
580 FLUSH_SMMU_REGS(as->smmu);
581
582 return 0;
583}
584
585static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova)
586{
587 unsigned long *pte;
588 struct page *page;
589 unsigned int *count;
590
591 pte = locate_pte(as, iova, false, &page, &count);
592 if (WARN_ON(!pte))
593 return;
594
595 if (WARN_ON(*pte == _PTE_VACANT(iova)))
596 return;
597
598 *pte = _PTE_VACANT(iova);
599 FLUSH_CPU_DCACHE(pte, page, sizeof(*pte));
600 flush_ptc_and_tlb(as->smmu, as, iova, pte, page, 0);
601 if (!--(*count)) {
602 free_ptbl(as, iova);
603 smmu_flush_regs(as->smmu, 0);
604 }
605}
606
607static void __smmu_iommu_map_pfn(struct smmu_as *as, dma_addr_t iova,
608 unsigned long pfn)
609{
610 struct smmu_device *smmu = as->smmu;
611 unsigned long *pte;
612 unsigned int *count;
613 struct page *page;
614
615 pte = locate_pte(as, iova, true, &page, &count);
616 if (WARN_ON(!pte))
617 return;
618
619 if (*pte == _PTE_VACANT(iova))
620 (*count)++;
621 *pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr);
622 if (unlikely((*pte == _PTE_VACANT(iova))))
623 (*count)--;
624 FLUSH_CPU_DCACHE(pte, page, sizeof(*pte));
625 flush_ptc_and_tlb(smmu, as, iova, pte, page, 0);
626 put_signature(as, iova, pfn);
627}
628
629static int smmu_iommu_map(struct iommu_domain *domain, unsigned long iova,
630 phys_addr_t pa, size_t bytes, int prot)
631{
632 struct smmu_as *as = domain->priv;
633 unsigned long pfn = __phys_to_pfn(pa);
634 unsigned long flags;
635
636 dev_dbg(as->smmu->dev, "[%d] %08lx:%08x\n", as->asid, iova, pa);
637
638 if (!pfn_valid(pfn))
639 return -ENOMEM;
640
641 spin_lock_irqsave(&as->lock, flags);
642 __smmu_iommu_map_pfn(as, iova, pfn);
643 spin_unlock_irqrestore(&as->lock, flags);
644 return 0;
645}
646
647static size_t smmu_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
648 size_t bytes)
649{
650 struct smmu_as *as = domain->priv;
651 unsigned long flags;
652
653 dev_dbg(as->smmu->dev, "[%d] %08lx\n", as->asid, iova);
654
655 spin_lock_irqsave(&as->lock, flags);
656 __smmu_iommu_unmap(as, iova);
657 spin_unlock_irqrestore(&as->lock, flags);
658 return SMMU_PAGE_SIZE;
659}
660
661static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain,
662 unsigned long iova)
663{
664 struct smmu_as *as = domain->priv;
665 unsigned long *pte;
666 unsigned int *count;
667 struct page *page;
668 unsigned long pfn;
669 unsigned long flags;
670
671 spin_lock_irqsave(&as->lock, flags);
672
673 pte = locate_pte(as, iova, true, &page, &count);
674 pfn = *pte & SMMU_PFN_MASK;
675 WARN_ON(!pfn_valid(pfn));
676 dev_dbg(as->smmu->dev,
677 "iova:%08lx pfn:%08lx asid:%d\n", iova, pfn, as->asid);
678
679 spin_unlock_irqrestore(&as->lock, flags);
680 return PFN_PHYS(pfn);
681}
682
683static int smmu_iommu_domain_has_cap(struct iommu_domain *domain,
684 unsigned long cap)
685{
686 return 0;
687}
688
689static int smmu_iommu_attach_dev(struct iommu_domain *domain,
690 struct device *dev)
691{
692 struct smmu_as *as = domain->priv;
693 struct smmu_device *smmu = as->smmu;
694 struct smmu_client *client, *c;
695 u32 map;
696 int err;
697
698 client = devm_kzalloc(smmu->dev, sizeof(*c), GFP_KERNEL);
699 if (!client)
700 return -ENOMEM;
701 client->dev = dev;
702 client->as = as;
703 map = (unsigned long)dev->platform_data;
704 if (!map)
705 return -EINVAL;
706
707 err = smmu_client_enable_hwgrp(client, map);
708 if (err)
709 goto err_hwgrp;
710
711 spin_lock(&as->client_lock);
712 list_for_each_entry(c, &as->client, list) {
713 if (c->dev == dev) {
714 dev_err(smmu->dev,
715 "%s is already attached\n", dev_name(c->dev));
716 err = -EINVAL;
717 goto err_client;
718 }
719 }
720 list_add(&client->list, &as->client);
721 spin_unlock(&as->client_lock);
722
723 /*
724 * Reserve "page zero" for AVP vectors using a common dummy
725 * page.
726 */
727 if (map & HWG_AVPC) {
728 struct page *page;
729
730 page = as->smmu->avp_vector_page;
731 __smmu_iommu_map_pfn(as, 0, page_to_pfn(page));
732
733 pr_info("Reserve \"page zero\" for AVP vectors using a common dummy\n");
734 }
735
736 dev_dbg(smmu->dev, "%s is attached\n", dev_name(c->dev));
737 return 0;
738
739err_client:
740 smmu_client_disable_hwgrp(client);
741 spin_unlock(&as->client_lock);
742err_hwgrp:
743 devm_kfree(smmu->dev, client);
744 return err;
745}
746
747static void smmu_iommu_detach_dev(struct iommu_domain *domain,
748 struct device *dev)
749{
750 struct smmu_as *as = domain->priv;
751 struct smmu_device *smmu = as->smmu;
752 struct smmu_client *c;
753
754 spin_lock(&as->client_lock);
755
756 list_for_each_entry(c, &as->client, list) {
757 if (c->dev == dev) {
758 smmu_client_disable_hwgrp(c);
759 list_del(&c->list);
760 devm_kfree(smmu->dev, c);
761 c->as = NULL;
762 dev_dbg(smmu->dev,
763 "%s is detached\n", dev_name(c->dev));
764 goto out;
765 }
766 }
767 dev_err(smmu->dev, "Couldn't find %s\n", dev_name(c->dev));
768out:
769 spin_unlock(&as->client_lock);
770}
771
772static int smmu_iommu_domain_init(struct iommu_domain *domain)
773{
774 int i;
775 unsigned long flags;
776 struct smmu_as *as;
777 struct smmu_device *smmu = smmu_handle;
778
779 /* Look for a free AS with lock held */
780 for (i = 0; i < smmu->num_as; i++) {
781 struct smmu_as *tmp = &smmu->as[i];
782
783 spin_lock_irqsave(&tmp->lock, flags);
784 if (!tmp->pdir_page) {
785 as = tmp;
786 goto found;
787 }
788 spin_unlock_irqrestore(&tmp->lock, flags);
789 }
790 dev_err(smmu->dev, "no free AS\n");
791 return -ENODEV;
792
793found:
794 if (alloc_pdir(as) < 0)
795 goto err_alloc_pdir;
796
797 spin_lock(&smmu->lock);
798
799 /* Update PDIR register */
800 smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
801 smmu_write(smmu,
802 SMMU_MK_PDIR(as->pdir_page, as->pdir_attr), SMMU_PTB_DATA);
803 FLUSH_SMMU_REGS(smmu);
804
805 spin_unlock(&smmu->lock);
806
807 spin_unlock_irqrestore(&as->lock, flags);
808 domain->priv = as;
809
810 dev_dbg(smmu->dev, "smmu_as@%p\n", as);
811 return 0;
812
813err_alloc_pdir:
814 spin_unlock_irqrestore(&as->lock, flags);
815 return -ENODEV;
816}
817
818static void smmu_iommu_domain_destroy(struct iommu_domain *domain)
819{
820 struct smmu_as *as = domain->priv;
821 struct smmu_device *smmu = as->smmu;
822 unsigned long flags;
823
824 spin_lock_irqsave(&as->lock, flags);
825
826 if (as->pdir_page) {
827 spin_lock(&smmu->lock);
828 smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
829 smmu_write(smmu, SMMU_PTB_DATA_RESET_VAL, SMMU_PTB_DATA);
830 FLUSH_SMMU_REGS(smmu);
831 spin_unlock(&smmu->lock);
832
833 free_pdir(as);
834 }
835
836 if (!list_empty(&as->client)) {
837 struct smmu_client *c;
838
839 list_for_each_entry(c, &as->client, list)
840 smmu_iommu_detach_dev(domain, c->dev);
841 }
842
843 spin_unlock_irqrestore(&as->lock, flags);
844
845 domain->priv = NULL;
846 dev_dbg(smmu->dev, "smmu_as@%p\n", as);
847}
848
849static struct iommu_ops smmu_iommu_ops = {
850 .domain_init = smmu_iommu_domain_init,
851 .domain_destroy = smmu_iommu_domain_destroy,
852 .attach_dev = smmu_iommu_attach_dev,
853 .detach_dev = smmu_iommu_detach_dev,
854 .map = smmu_iommu_map,
855 .unmap = smmu_iommu_unmap,
856 .iova_to_phys = smmu_iommu_iova_to_phys,
857 .domain_has_cap = smmu_iommu_domain_has_cap,
858 .pgsize_bitmap = SMMU_IOMMU_PGSIZES,
859};
860
861static int tegra_smmu_suspend(struct device *dev)
862{
863 struct smmu_device *smmu = dev_get_drvdata(dev);
864
865 smmu->translation_enable_0 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_0);
866 smmu->translation_enable_1 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_1);
867 smmu->translation_enable_2 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_2);
868 smmu->asid_security = smmu_read(smmu, SMMU_ASID_SECURITY);
869 return 0;
870}
871
872static int tegra_smmu_resume(struct device *dev)
873{
874 struct smmu_device *smmu = dev_get_drvdata(dev);
875 unsigned long flags;
876
877 spin_lock_irqsave(&smmu->lock, flags);
878 smmu_setup_regs(smmu);
879 spin_unlock_irqrestore(&smmu->lock, flags);
880 return 0;
881}
882
883static int tegra_smmu_probe(struct platform_device *pdev)
884{
885 struct smmu_device *smmu;
886 struct resource *regs, *regs2, *window;
887 struct device *dev = &pdev->dev;
888 int i, err = 0;
889
890 if (smmu_handle)
891 return -EIO;
892
893 BUILD_BUG_ON(PAGE_SHIFT != SMMU_PAGE_SHIFT);
894
895 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
896 regs2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
897 window = platform_get_resource(pdev, IORESOURCE_MEM, 2);
898 if (!regs || !regs2 || !window) {
899 dev_err(dev, "No SMMU resources\n");
900 return -ENODEV;
901 }
902
903 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
904 if (!smmu) {
905 dev_err(dev, "failed to allocate smmu_device\n");
906 return -ENOMEM;
907 }
908
909 smmu->dev = dev;
910 smmu->num_as = SMMU_NUM_ASIDS;
911 smmu->iovmm_base = (unsigned long)window->start;
912 smmu->page_count = resource_size(window) >> SMMU_PAGE_SHIFT;
913 smmu->regs = devm_ioremap(dev, regs->start, resource_size(regs));
914 smmu->regs_ahbarb = devm_ioremap(dev, regs2->start,
915 resource_size(regs2));
916 if (!smmu->regs || !smmu->regs_ahbarb) {
917 dev_err(dev, "failed to remap SMMU registers\n");
918 err = -ENXIO;
919 goto fail;
920 }
921
922 smmu->translation_enable_0 = ~0;
923 smmu->translation_enable_1 = ~0;
924 smmu->translation_enable_2 = ~0;
925 smmu->asid_security = 0;
926
927 smmu->as = devm_kzalloc(dev,
928 sizeof(smmu->as[0]) * smmu->num_as, GFP_KERNEL);
929 if (!smmu->as) {
930 dev_err(dev, "failed to allocate smmu_as\n");
931 err = -ENOMEM;
932 goto fail;
933 }
934
935 for (i = 0; i < smmu->num_as; i++) {
936 struct smmu_as *as = &smmu->as[i];
937
938 as->smmu = smmu;
939 as->asid = i;
940 as->pdir_attr = _PDIR_ATTR;
941 as->pde_attr = _PDE_ATTR;
942 as->pte_attr = _PTE_ATTR;
943
944 spin_lock_init(&as->lock);
945 INIT_LIST_HEAD(&as->client);
946 }
947 spin_lock_init(&smmu->lock);
948 smmu_setup_regs(smmu);
949 platform_set_drvdata(pdev, smmu);
950
951 smmu->avp_vector_page = alloc_page(GFP_KERNEL);
952 if (!smmu->avp_vector_page)
953 goto fail;
954
955 smmu_handle = smmu;
956 return 0;
957
958fail:
959 if (smmu->avp_vector_page)
960 __free_page(smmu->avp_vector_page);
961 if (smmu->regs)
962 devm_iounmap(dev, smmu->regs);
963 if (smmu->regs_ahbarb)
964 devm_iounmap(dev, smmu->regs_ahbarb);
965 if (smmu && smmu->as) {
966 for (i = 0; i < smmu->num_as; i++) {
967 if (smmu->as[i].pdir_page) {
968 ClearPageReserved(smmu->as[i].pdir_page);
969 __free_page(smmu->as[i].pdir_page);
970 }
971 }
972 devm_kfree(dev, smmu->as);
973 }
974 devm_kfree(dev, smmu);
975 return err;
976}
977
978static int tegra_smmu_remove(struct platform_device *pdev)
979{
980 struct smmu_device *smmu = platform_get_drvdata(pdev);
981 struct device *dev = smmu->dev;
982
983 smmu_write(smmu, SMMU_CONFIG_DISABLE, SMMU_CONFIG);
984 platform_set_drvdata(pdev, NULL);
985 if (smmu->as) {
986 int i;
987
988 for (i = 0; i < smmu->num_as; i++)
989 free_pdir(&smmu->as[i]);
990 devm_kfree(dev, smmu->as);
991 }
992 if (smmu->avp_vector_page)
993 __free_page(smmu->avp_vector_page);
994 if (smmu->regs)
995 devm_iounmap(dev, smmu->regs);
996 if (smmu->regs_ahbarb)
997 devm_iounmap(dev, smmu->regs_ahbarb);
998 devm_kfree(dev, smmu);
999 smmu_handle = NULL;
1000 return 0;
1001}
1002
1003const struct dev_pm_ops tegra_smmu_pm_ops = {
1004 .suspend = tegra_smmu_suspend,
1005 .resume = tegra_smmu_resume,
1006};
1007
1008static struct platform_driver tegra_smmu_driver = {
1009 .probe = tegra_smmu_probe,
1010 .remove = tegra_smmu_remove,
1011 .driver = {
1012 .owner = THIS_MODULE,
1013 .name = "tegra-smmu",
1014 .pm = &tegra_smmu_pm_ops,
1015 },
1016};
1017
1018static int __devinit tegra_smmu_init(void)
1019{
1020 bus_set_iommu(&platform_bus_type, &smmu_iommu_ops);
1021 return platform_driver_register(&tegra_smmu_driver);
1022}
1023
1024static void __exit tegra_smmu_exit(void)
1025{
1026 platform_driver_unregister(&tegra_smmu_driver);
1027}
1028
1029subsys_initcall(tegra_smmu_init);
1030module_exit(tegra_smmu_exit);
1031
1032MODULE_DESCRIPTION("IOMMU API for SMMU in Tegra30");
1033MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
1034MODULE_LICENSE("GPL v2");