aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYong Wu <yong.wu@mediatek.com>2016-02-22 12:20:50 -0500
committerJoerg Roedel <jroedel@suse.de>2016-02-25 10:49:08 -0500
commit0df4fabe208d9576f2671d31e77cf46d20fdcd01 (patch)
tree3acb1b77bfac4d81a71825f168b156343991b522
parentcc8bbe1a83128ad06457e4dc69907c4f9a6fc1a7 (diff)
iommu/mediatek: Add mt8173 IOMMU driver
This patch adds support for mediatek m4u (MultiMedia Memory Management Unit). Signed-off-by: Yong Wu <yong.wu@mediatek.com> Reviewed-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--drivers/iommu/Kconfig16
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/mtk_iommu.c737
3 files changed, 754 insertions, 0 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index dc1aaa5d53e8..6df982f89f0c 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -337,4 +337,20 @@ config S390_IOMMU
337 help 337 help
338 Support for the IOMMU API for s390 PCI devices. 338 Support for the IOMMU API for s390 PCI devices.
339 339
340config MTK_IOMMU
341 bool "MTK IOMMU Support"
342 depends on ARM || ARM64
343 depends on ARCH_MEDIATEK || COMPILE_TEST
344 select IOMMU_API
345 select IOMMU_DMA
346 select IOMMU_IO_PGTABLE_ARMV7S
347 select MEMORY
348 select MTK_SMI
349 help
350 Support for the M4U on certain Mediatek SOCs. M4U is MultiMedia
351 Memory Management Unit. This option enables remapping of DMA memory
352 accesses for the multimedia subsystem.
353
354 If unsure, say N here.
355
340endif # IOMMU_SUPPORT 356endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 2f9bfbc8cfd1..c6edb31bf8c6 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o
17obj-$(CONFIG_INTEL_IOMMU_SVM) += intel-svm.o 17obj-$(CONFIG_INTEL_IOMMU_SVM) += intel-svm.o
18obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o 18obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
19obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o 19obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
20obj-$(CONFIG_MTK_IOMMU) += mtk_iommu.o
20obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o 21obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
21obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o 22obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
22obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o 23obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
new file mode 100644
index 000000000000..721ffdb296d6
--- /dev/null
+++ b/drivers/iommu/mtk_iommu.c
@@ -0,0 +1,737 @@
1/*
2 * Copyright (c) 2015-2016 MediaTek Inc.
3 * Author: Yong Wu <yong.wu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/bug.h>
15#include <linux/clk.h>
16#include <linux/component.h>
17#include <linux/device.h>
18#include <linux/dma-iommu.h>
19#include <linux/err.h>
20#include <linux/interrupt.h>
21#include <linux/io.h>
22#include <linux/iommu.h>
23#include <linux/iopoll.h>
24#include <linux/list.h>
25#include <linux/of_address.h>
26#include <linux/of_iommu.h>
27#include <linux/of_irq.h>
28#include <linux/of_platform.h>
29#include <linux/platform_device.h>
30#include <linux/slab.h>
31#include <linux/spinlock.h>
32#include <asm/barrier.h>
33#include <dt-bindings/memory/mt8173-larb-port.h>
34#include <soc/mediatek/smi.h>
35
36#include "io-pgtable.h"
37
38#define REG_MMU_PT_BASE_ADDR 0x000
39
40#define REG_MMU_INVALIDATE 0x020
41#define F_ALL_INVLD 0x2
42#define F_MMU_INV_RANGE 0x1
43
44#define REG_MMU_INVLD_START_A 0x024
45#define REG_MMU_INVLD_END_A 0x028
46
47#define REG_MMU_INV_SEL 0x038
48#define F_INVLD_EN0 BIT(0)
49#define F_INVLD_EN1 BIT(1)
50
51#define REG_MMU_STANDARD_AXI_MODE 0x048
52#define REG_MMU_DCM_DIS 0x050
53
54#define REG_MMU_CTRL_REG 0x110
55#define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4)
56#define F_MMU_TF_PROTECT_SEL(prot) (((prot) & 0x3) << 5)
57
58#define REG_MMU_IVRP_PADDR 0x114
59#define F_MMU_IVRP_PA_SET(pa) ((pa) >> 1)
60
61#define REG_MMU_INT_CONTROL0 0x120
62#define F_L2_MULIT_HIT_EN BIT(0)
63#define F_TABLE_WALK_FAULT_INT_EN BIT(1)
64#define F_PREETCH_FIFO_OVERFLOW_INT_EN BIT(2)
65#define F_MISS_FIFO_OVERFLOW_INT_EN BIT(3)
66#define F_PREFETCH_FIFO_ERR_INT_EN BIT(5)
67#define F_MISS_FIFO_ERR_INT_EN BIT(6)
68#define F_INT_CLR_BIT BIT(12)
69
70#define REG_MMU_INT_MAIN_CONTROL 0x124
71#define F_INT_TRANSLATION_FAULT BIT(0)
72#define F_INT_MAIN_MULTI_HIT_FAULT BIT(1)
73#define F_INT_INVALID_PA_FAULT BIT(2)
74#define F_INT_ENTRY_REPLACEMENT_FAULT BIT(3)
75#define F_INT_TLB_MISS_FAULT BIT(4)
76#define F_INT_MISS_TRANSACTION_FIFO_FAULT BIT(5)
77#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT BIT(6)
78
79#define REG_MMU_CPE_DONE 0x12C
80
81#define REG_MMU_FAULT_ST1 0x134
82
83#define REG_MMU_FAULT_VA 0x13c
84#define F_MMU_FAULT_VA_MSK 0xfffff000
85#define F_MMU_FAULT_VA_WRITE_BIT BIT(1)
86#define F_MMU_FAULT_VA_LAYER_BIT BIT(0)
87
88#define REG_MMU_INVLD_PA 0x140
89#define REG_MMU_INT_ID 0x150
90#define F_MMU0_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7)
91#define F_MMU0_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)
92
93#define MTK_PROTECT_PA_ALIGN 128
94
95struct mtk_iommu_suspend_reg {
96 u32 standard_axi_mode;
97 u32 dcm_dis;
98 u32 ctrl_reg;
99 u32 int_control0;
100 u32 int_main_control;
101};
102
103struct mtk_iommu_client_priv {
104 struct list_head client;
105 unsigned int mtk_m4u_id;
106 struct device *m4udev;
107};
108
109struct mtk_iommu_domain {
110 spinlock_t pgtlock; /* lock for page table */
111
112 struct io_pgtable_cfg cfg;
113 struct io_pgtable_ops *iop;
114
115 struct iommu_domain domain;
116};
117
118struct mtk_iommu_data {
119 void __iomem *base;
120 int irq;
121 struct device *dev;
122 struct clk *bclk;
123 phys_addr_t protect_base; /* protect memory base */
124 struct mtk_iommu_suspend_reg reg;
125 struct mtk_iommu_domain *m4u_dom;
126 struct iommu_group *m4u_group;
127 struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */
128};
129
130static struct iommu_ops mtk_iommu_ops;
131
132static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
133{
134 return container_of(dom, struct mtk_iommu_domain, domain);
135}
136
137static void mtk_iommu_tlb_flush_all(void *cookie)
138{
139 struct mtk_iommu_data *data = cookie;
140
141 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, data->base + REG_MMU_INV_SEL);
142 writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
143 wmb(); /* Make sure the tlb flush all done */
144}
145
146static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
147 size_t granule, bool leaf,
148 void *cookie)
149{
150 struct mtk_iommu_data *data = cookie;
151
152 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, data->base + REG_MMU_INV_SEL);
153
154 writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
155 writel_relaxed(iova + size - 1, data->base + REG_MMU_INVLD_END_A);
156 writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE);
157}
158
159static void mtk_iommu_tlb_sync(void *cookie)
160{
161 struct mtk_iommu_data *data = cookie;
162 int ret;
163 u32 tmp;
164
165 ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, tmp,
166 tmp != 0, 10, 100000);
167 if (ret) {
168 dev_warn(data->dev,
169 "Partial TLB flush timed out, falling back to full flush\n");
170 mtk_iommu_tlb_flush_all(cookie);
171 }
172 /* Clear the CPE status */
173 writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
174}
175
176static const struct iommu_gather_ops mtk_iommu_gather_ops = {
177 .tlb_flush_all = mtk_iommu_tlb_flush_all,
178 .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync,
179 .tlb_sync = mtk_iommu_tlb_sync,
180};
181
182static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
183{
184 struct mtk_iommu_data *data = dev_id;
185 struct mtk_iommu_domain *dom = data->m4u_dom;
186 u32 int_state, regval, fault_iova, fault_pa;
187 unsigned int fault_larb, fault_port;
188 bool layer, write;
189
190 /* Read error info from registers */
191 int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
192 fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA);
193 layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
194 write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
195 fault_iova &= F_MMU_FAULT_VA_MSK;
196 fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA);
197 regval = readl_relaxed(data->base + REG_MMU_INT_ID);
198 fault_larb = F_MMU0_INT_ID_LARB_ID(regval);
199 fault_port = F_MMU0_INT_ID_PORT_ID(regval);
200
201 if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
202 write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
203 dev_err_ratelimited(
204 data->dev,
205 "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n",
206 int_state, fault_iova, fault_pa, fault_larb, fault_port,
207 layer, write ? "write" : "read");
208 }
209
210 /* Interrupt clear */
211 regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0);
212 regval |= F_INT_CLR_BIT;
213 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
214
215 mtk_iommu_tlb_flush_all(data);
216
217 return IRQ_HANDLED;
218}
219
220static void mtk_iommu_config(struct mtk_iommu_data *data,
221 struct device *dev, bool enable)
222{
223 struct mtk_iommu_client_priv *head, *cur, *next;
224 struct mtk_smi_larb_iommu *larb_mmu;
225 unsigned int larbid, portid;
226
227 head = dev->archdata.iommu;
228 list_for_each_entry_safe(cur, next, &head->client, client) {
229 larbid = MTK_M4U_TO_LARB(cur->mtk_m4u_id);
230 portid = MTK_M4U_TO_PORT(cur->mtk_m4u_id);
231 larb_mmu = &data->smi_imu.larb_imu[larbid];
232
233 dev_dbg(dev, "%s iommu port: %d\n",
234 enable ? "enable" : "disable", portid);
235
236 if (enable)
237 larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
238 else
239 larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
240 }
241}
242
243static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
244{
245 struct mtk_iommu_domain *dom = data->m4u_dom;
246
247 spin_lock_init(&dom->pgtlock);
248
249 dom->cfg = (struct io_pgtable_cfg) {
250 .quirks = IO_PGTABLE_QUIRK_ARM_NS |
251 IO_PGTABLE_QUIRK_NO_PERMS |
252 IO_PGTABLE_QUIRK_TLBI_ON_MAP,
253 .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
254 .ias = 32,
255 .oas = 32,
256 .tlb = &mtk_iommu_gather_ops,
257 .iommu_dev = data->dev,
258 };
259
260 dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
261 if (!dom->iop) {
262 dev_err(data->dev, "Failed to alloc io pgtable\n");
263 return -EINVAL;
264 }
265
266 /* Update our support page sizes bitmap */
267 mtk_iommu_ops.pgsize_bitmap = dom->cfg.pgsize_bitmap;
268
269 writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
270 data->base + REG_MMU_PT_BASE_ADDR);
271 return 0;
272}
273
274static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
275{
276 struct mtk_iommu_domain *dom;
277
278 if (type != IOMMU_DOMAIN_DMA)
279 return NULL;
280
281 dom = kzalloc(sizeof(*dom), GFP_KERNEL);
282 if (!dom)
283 return NULL;
284
285 if (iommu_get_dma_cookie(&dom->domain)) {
286 kfree(dom);
287 return NULL;
288 }
289
290 dom->domain.geometry.aperture_start = 0;
291 dom->domain.geometry.aperture_end = DMA_BIT_MASK(32);
292 dom->domain.geometry.force_aperture = true;
293
294 return &dom->domain;
295}
296
297static void mtk_iommu_domain_free(struct iommu_domain *domain)
298{
299 iommu_put_dma_cookie(domain);
300 kfree(to_mtk_domain(domain));
301}
302
303static int mtk_iommu_attach_device(struct iommu_domain *domain,
304 struct device *dev)
305{
306 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
307 struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
308 struct mtk_iommu_data *data;
309 int ret;
310
311 if (!priv)
312 return -ENODEV;
313
314 data = dev_get_drvdata(priv->m4udev);
315 if (!data->m4u_dom) {
316 data->m4u_dom = dom;
317 ret = mtk_iommu_domain_finalise(data);
318 if (ret) {
319 data->m4u_dom = NULL;
320 return ret;
321 }
322 } else if (data->m4u_dom != dom) {
323 /* All the client devices should be in the same m4u domain */
324 dev_err(dev, "try to attach into the error iommu domain\n");
325 return -EPERM;
326 }
327
328 mtk_iommu_config(data, dev, true);
329 return 0;
330}
331
332static void mtk_iommu_detach_device(struct iommu_domain *domain,
333 struct device *dev)
334{
335 struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
336 struct mtk_iommu_data *data;
337
338 if (!priv)
339 return;
340
341 data = dev_get_drvdata(priv->m4udev);
342 mtk_iommu_config(data, dev, false);
343}
344
345static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
346 phys_addr_t paddr, size_t size, int prot)
347{
348 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
349 unsigned long flags;
350 int ret;
351
352 spin_lock_irqsave(&dom->pgtlock, flags);
353 ret = dom->iop->map(dom->iop, iova, paddr, size, prot);
354 spin_unlock_irqrestore(&dom->pgtlock, flags);
355
356 return ret;
357}
358
359static size_t mtk_iommu_unmap(struct iommu_domain *domain,
360 unsigned long iova, size_t size)
361{
362 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
363 unsigned long flags;
364 size_t unmapsz;
365
366 spin_lock_irqsave(&dom->pgtlock, flags);
367 unmapsz = dom->iop->unmap(dom->iop, iova, size);
368 spin_unlock_irqrestore(&dom->pgtlock, flags);
369
370 return unmapsz;
371}
372
373static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
374 dma_addr_t iova)
375{
376 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
377 unsigned long flags;
378 phys_addr_t pa;
379
380 spin_lock_irqsave(&dom->pgtlock, flags);
381 pa = dom->iop->iova_to_phys(dom->iop, iova);
382 spin_unlock_irqrestore(&dom->pgtlock, flags);
383
384 return pa;
385}
386
387static int mtk_iommu_add_device(struct device *dev)
388{
389 struct iommu_group *group;
390
391 if (!dev->archdata.iommu) /* Not a iommu client device */
392 return -ENODEV;
393
394 group = iommu_group_get_for_dev(dev);
395 if (IS_ERR(group))
396 return PTR_ERR(group);
397
398 iommu_group_put(group);
399 return 0;
400}
401
402static void mtk_iommu_remove_device(struct device *dev)
403{
404 struct mtk_iommu_client_priv *head, *cur, *next;
405
406 head = dev->archdata.iommu;
407 if (!head)
408 return;
409
410 list_for_each_entry_safe(cur, next, &head->client, client) {
411 list_del(&cur->client);
412 kfree(cur);
413 }
414 kfree(head);
415 dev->archdata.iommu = NULL;
416
417 iommu_group_remove_device(dev);
418}
419
420static struct iommu_group *mtk_iommu_device_group(struct device *dev)
421{
422 struct mtk_iommu_data *data;
423 struct mtk_iommu_client_priv *priv;
424
425 priv = dev->archdata.iommu;
426 if (!priv)
427 return ERR_PTR(-ENODEV);
428
429 /* All the client devices are in the same m4u iommu-group */
430 data = dev_get_drvdata(priv->m4udev);
431 if (!data->m4u_group) {
432 data->m4u_group = iommu_group_alloc();
433 if (IS_ERR(data->m4u_group))
434 dev_err(dev, "Failed to allocate M4U IOMMU group\n");
435 }
436 return data->m4u_group;
437}
438
439static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
440{
441 struct mtk_iommu_client_priv *head, *priv, *next;
442 struct platform_device *m4updev;
443
444 if (args->args_count != 1) {
445 dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
446 args->args_count);
447 return -EINVAL;
448 }
449
450 if (!dev->archdata.iommu) {
451 /* Get the m4u device */
452 m4updev = of_find_device_by_node(args->np);
453 of_node_put(args->np);
454 if (WARN_ON(!m4updev))
455 return -EINVAL;
456
457 head = kzalloc(sizeof(*head), GFP_KERNEL);
458 if (!head)
459 return -ENOMEM;
460
461 dev->archdata.iommu = head;
462 INIT_LIST_HEAD(&head->client);
463 head->m4udev = &m4updev->dev;
464 } else {
465 head = dev->archdata.iommu;
466 }
467
468 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
469 if (!priv)
470 goto err_free_mem;
471
472 priv->mtk_m4u_id = args->args[0];
473 list_add_tail(&priv->client, &head->client);
474
475 return 0;
476
477err_free_mem:
478 list_for_each_entry_safe(priv, next, &head->client, client)
479 kfree(priv);
480 kfree(head);
481 dev->archdata.iommu = NULL;
482 return -ENOMEM;
483}
484
485static struct iommu_ops mtk_iommu_ops = {
486 .domain_alloc = mtk_iommu_domain_alloc,
487 .domain_free = mtk_iommu_domain_free,
488 .attach_dev = mtk_iommu_attach_device,
489 .detach_dev = mtk_iommu_detach_device,
490 .map = mtk_iommu_map,
491 .unmap = mtk_iommu_unmap,
492 .map_sg = default_iommu_map_sg,
493 .iova_to_phys = mtk_iommu_iova_to_phys,
494 .add_device = mtk_iommu_add_device,
495 .remove_device = mtk_iommu_remove_device,
496 .device_group = mtk_iommu_device_group,
497 .of_xlate = mtk_iommu_of_xlate,
498 .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
499};
500
501static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
502{
503 u32 regval;
504 int ret;
505
506 ret = clk_prepare_enable(data->bclk);
507 if (ret) {
508 dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
509 return ret;
510 }
511
512 regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
513 F_MMU_TF_PROTECT_SEL(2);
514 writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
515
516 regval = F_L2_MULIT_HIT_EN |
517 F_TABLE_WALK_FAULT_INT_EN |
518 F_PREETCH_FIFO_OVERFLOW_INT_EN |
519 F_MISS_FIFO_OVERFLOW_INT_EN |
520 F_PREFETCH_FIFO_ERR_INT_EN |
521 F_MISS_FIFO_ERR_INT_EN;
522 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
523
524 regval = F_INT_TRANSLATION_FAULT |
525 F_INT_MAIN_MULTI_HIT_FAULT |
526 F_INT_INVALID_PA_FAULT |
527 F_INT_ENTRY_REPLACEMENT_FAULT |
528 F_INT_TLB_MISS_FAULT |
529 F_INT_MISS_TRANSACTION_FIFO_FAULT |
530 F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
531 writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
532
533 writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base),
534 data->base + REG_MMU_IVRP_PADDR);
535
536 writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
537 writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE);
538
539 if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
540 dev_name(data->dev), (void *)data)) {
541 writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
542 clk_disable_unprepare(data->bclk);
543 dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
544 return -ENODEV;
545 }
546
547 return 0;
548}
549
550static int compare_of(struct device *dev, void *data)
551{
552 return dev->of_node == data;
553}
554
555static int mtk_iommu_bind(struct device *dev)
556{
557 struct mtk_iommu_data *data = dev_get_drvdata(dev);
558
559 return component_bind_all(dev, &data->smi_imu);
560}
561
562static void mtk_iommu_unbind(struct device *dev)
563{
564 struct mtk_iommu_data *data = dev_get_drvdata(dev);
565
566 component_unbind_all(dev, &data->smi_imu);
567}
568
569static const struct component_master_ops mtk_iommu_com_ops = {
570 .bind = mtk_iommu_bind,
571 .unbind = mtk_iommu_unbind,
572};
573
574static int mtk_iommu_probe(struct platform_device *pdev)
575{
576 struct mtk_iommu_data *data;
577 struct device *dev = &pdev->dev;
578 struct resource *res;
579 struct component_match *match = NULL;
580 void *protect;
581 unsigned int i, larb_nr;
582 int ret;
583
584 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
585 if (!data)
586 return -ENOMEM;
587 data->dev = dev;
588
589 /* Protect memory. HW will access here while translation fault.*/
590 protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);
591 if (!protect)
592 return -ENOMEM;
593 data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
594
595 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
596 data->base = devm_ioremap_resource(dev, res);
597 if (IS_ERR(data->base))
598 return PTR_ERR(data->base);
599
600 data->irq = platform_get_irq(pdev, 0);
601 if (data->irq < 0)
602 return data->irq;
603
604 data->bclk = devm_clk_get(dev, "bclk");
605 if (IS_ERR(data->bclk))
606 return PTR_ERR(data->bclk);
607
608 larb_nr = of_count_phandle_with_args(dev->of_node,
609 "mediatek,larbs", NULL);
610 if (larb_nr < 0)
611 return larb_nr;
612 data->smi_imu.larb_nr = larb_nr;
613
614 for (i = 0; i < larb_nr; i++) {
615 struct device_node *larbnode;
616 struct platform_device *plarbdev;
617
618 larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
619 if (!larbnode)
620 return -EINVAL;
621
622 if (!of_device_is_available(larbnode))
623 continue;
624
625 plarbdev = of_find_device_by_node(larbnode);
626 of_node_put(larbnode);
627 if (!plarbdev) {
628 plarbdev = of_platform_device_create(
629 larbnode, NULL,
630 platform_bus_type.dev_root);
631 if (IS_ERR(plarbdev))
632 return -EPROBE_DEFER;
633 }
634 data->smi_imu.larb_imu[i].dev = &plarbdev->dev;
635
636 component_match_add(dev, &match, compare_of, larbnode);
637 }
638
639 platform_set_drvdata(pdev, data);
640
641 ret = mtk_iommu_hw_init(data);
642 if (ret)
643 return ret;
644
645 if (!iommu_present(&platform_bus_type))
646 bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
647
648 return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
649}
650
651static int mtk_iommu_remove(struct platform_device *pdev)
652{
653 struct mtk_iommu_data *data = platform_get_drvdata(pdev);
654
655 if (iommu_present(&platform_bus_type))
656 bus_set_iommu(&platform_bus_type, NULL);
657
658 free_io_pgtable_ops(data->m4u_dom->iop);
659 clk_disable_unprepare(data->bclk);
660 devm_free_irq(&pdev->dev, data->irq, data);
661 component_master_del(&pdev->dev, &mtk_iommu_com_ops);
662 return 0;
663}
664
665static int mtk_iommu_suspend(struct device *dev)
666{
667 struct mtk_iommu_data *data = dev_get_drvdata(dev);
668 struct mtk_iommu_suspend_reg *reg = &data->reg;
669 void __iomem *base = data->base;
670
671 reg->standard_axi_mode = readl_relaxed(base +
672 REG_MMU_STANDARD_AXI_MODE);
673 reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS);
674 reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
675 reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
676 reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
677 return 0;
678}
679
680static int mtk_iommu_resume(struct device *dev)
681{
682 struct mtk_iommu_data *data = dev_get_drvdata(dev);
683 struct mtk_iommu_suspend_reg *reg = &data->reg;
684 void __iomem *base = data->base;
685
686 writel_relaxed(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
687 base + REG_MMU_PT_BASE_ADDR);
688 writel_relaxed(reg->standard_axi_mode,
689 base + REG_MMU_STANDARD_AXI_MODE);
690 writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
691 writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
692 writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
693 writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
694 writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base),
695 base + REG_MMU_IVRP_PADDR);
696 return 0;
697}
698
699const struct dev_pm_ops mtk_iommu_pm_ops = {
700 SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
701};
702
703static const struct of_device_id mtk_iommu_of_ids[] = {
704 { .compatible = "mediatek,mt8173-m4u", },
705 {}
706};
707
708static struct platform_driver mtk_iommu_driver = {
709 .probe = mtk_iommu_probe,
710 .remove = mtk_iommu_remove,
711 .driver = {
712 .name = "mtk-iommu",
713 .of_match_table = mtk_iommu_of_ids,
714 .pm = &mtk_iommu_pm_ops,
715 }
716};
717
718static int mtk_iommu_init_fn(struct device_node *np)
719{
720 int ret;
721 struct platform_device *pdev;
722
723 pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root);
724 if (IS_ERR(pdev))
725 return PTR_ERR(pdev);
726
727 ret = platform_driver_register(&mtk_iommu_driver);
728 if (ret) {
729 pr_err("%s: Failed to register driver\n", __func__);
730 return ret;
731 }
732
733 of_iommu_set_ops(np, &mtk_iommu_ops);
734 return 0;
735}
736
737IOMMU_OF_DECLARE(mtkm4u, "mediatek,mt8173-m4u", mtk_iommu_init_fn);