aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/Kconfig19
-rw-r--r--drivers/iommu/Makefile3
-rw-r--r--drivers/iommu/iommu.c13
-rw-r--r--drivers/iommu/omap-iommu-debug.c418
-rw-r--r--drivers/iommu/omap-iommu.c1270
-rw-r--r--drivers/iommu/omap-iovmm.c742
6 files changed, 2457 insertions, 8 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index b57b3fa492f..d901930a8f8 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -107,4 +107,23 @@ config INTR_REMAP
107 To use x2apic mode in the CPU's which support x2APIC enhancements or 107 To use x2apic mode in the CPU's which support x2APIC enhancements or
108 to support platforms with CPU's having > 8 bit APIC ID, say Y. 108 to support platforms with CPU's having > 8 bit APIC ID, say Y.
109 109
110# OMAP IOMMU support
111config OMAP_IOMMU
112 bool "OMAP IOMMU Support"
113 depends on ARCH_OMAP
114 select IOMMU_API
115
116config OMAP_IOVMM
117 tristate
118 select OMAP_IOMMU
119
120config OMAP_IOMMU_DEBUG
121 tristate "Export OMAP IOMMU/IOVMM internals in DebugFS"
122 depends on OMAP_IOVMM && DEBUG_FS
123 help
124 Select this to see extensive information about
125 the internal state of OMAP IOMMU/IOVMM in debugfs.
126
127 Say N unless you know you need this.
128
110endif # IOMMU_SUPPORT 129endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 4d4d77df7ca..f798cdd3699 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -3,3 +3,6 @@ obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
3obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o 3obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
4obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o 4obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o
5obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o 5obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o
6obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
7obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o
8obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 6e6b6a11b3c..30b06449748 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -16,6 +16,7 @@
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */ 17 */
18 18
19#include <linux/kernel.h>
19#include <linux/bug.h> 20#include <linux/bug.h>
20#include <linux/types.h> 21#include <linux/types.h>
21#include <linux/module.h> 22#include <linux/module.h>
@@ -97,13 +98,11 @@ EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
97int iommu_map(struct iommu_domain *domain, unsigned long iova, 98int iommu_map(struct iommu_domain *domain, unsigned long iova,
98 phys_addr_t paddr, int gfp_order, int prot) 99 phys_addr_t paddr, int gfp_order, int prot)
99{ 100{
100 unsigned long invalid_mask;
101 size_t size; 101 size_t size;
102 102
103 size = 0x1000UL << gfp_order; 103 size = PAGE_SIZE << gfp_order;
104 invalid_mask = size - 1;
105 104
106 BUG_ON((iova | paddr) & invalid_mask); 105 BUG_ON(!IS_ALIGNED(iova | paddr, size));
107 106
108 return iommu_ops->map(domain, iova, paddr, gfp_order, prot); 107 return iommu_ops->map(domain, iova, paddr, gfp_order, prot);
109} 108}
@@ -111,13 +110,11 @@ EXPORT_SYMBOL_GPL(iommu_map);
111 110
112int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order) 111int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
113{ 112{
114 unsigned long invalid_mask;
115 size_t size; 113 size_t size;
116 114
117 size = 0x1000UL << gfp_order; 115 size = PAGE_SIZE << gfp_order;
118 invalid_mask = size - 1;
119 116
120 BUG_ON(iova & invalid_mask); 117 BUG_ON(!IS_ALIGNED(iova, size));
121 118
122 return iommu_ops->unmap(domain, iova, gfp_order); 119 return iommu_ops->unmap(domain, iova, gfp_order);
123} 120}
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
new file mode 100644
index 00000000000..9c192e79f80
--- /dev/null
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -0,0 +1,418 @@
1/*
2 * omap iommu: debugfs interface
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/err.h>
14#include <linux/clk.h>
15#include <linux/io.h>
16#include <linux/slab.h>
17#include <linux/uaccess.h>
18#include <linux/platform_device.h>
19#include <linux/debugfs.h>
20
21#include <plat/iommu.h>
22#include <plat/iovmm.h>
23
24#include <plat/iopgtable.h>
25
26#define MAXCOLUMN 100 /* for short messages */
27
28static DEFINE_MUTEX(iommu_debug_lock);
29
30static struct dentry *iommu_debug_root;
31
32static ssize_t debug_read_ver(struct file *file, char __user *userbuf,
33 size_t count, loff_t *ppos)
34{
35 u32 ver = omap_iommu_arch_version();
36 char buf[MAXCOLUMN], *p = buf;
37
38 p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf);
39
40 return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
41}
42
43static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
44 size_t count, loff_t *ppos)
45{
46 struct omap_iommu *obj = file->private_data;
47 char *p, *buf;
48 ssize_t bytes;
49
50 buf = kmalloc(count, GFP_KERNEL);
51 if (!buf)
52 return -ENOMEM;
53 p = buf;
54
55 mutex_lock(&iommu_debug_lock);
56
57 bytes = omap_iommu_dump_ctx(obj, p, count);
58 bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes);
59
60 mutex_unlock(&iommu_debug_lock);
61 kfree(buf);
62
63 return bytes;
64}
65
66static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
67 size_t count, loff_t *ppos)
68{
69 struct omap_iommu *obj = file->private_data;
70 char *p, *buf;
71 ssize_t bytes, rest;
72
73 buf = kmalloc(count, GFP_KERNEL);
74 if (!buf)
75 return -ENOMEM;
76 p = buf;
77
78 mutex_lock(&iommu_debug_lock);
79
80 p += sprintf(p, "%8s %8s\n", "cam:", "ram:");
81 p += sprintf(p, "-----------------------------------------\n");
82 rest = count - (p - buf);
83 p += omap_dump_tlb_entries(obj, p, rest);
84
85 bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
86
87 mutex_unlock(&iommu_debug_lock);
88 kfree(buf);
89
90 return bytes;
91}
92
93static ssize_t debug_write_pagetable(struct file *file,
94 const char __user *userbuf, size_t count, loff_t *ppos)
95{
96 struct iotlb_entry e;
97 struct cr_regs cr;
98 int err;
99 struct omap_iommu *obj = file->private_data;
100 char buf[MAXCOLUMN], *p = buf;
101
102 count = min(count, sizeof(buf));
103
104 mutex_lock(&iommu_debug_lock);
105 if (copy_from_user(p, userbuf, count)) {
106 mutex_unlock(&iommu_debug_lock);
107 return -EFAULT;
108 }
109
110 sscanf(p, "%x %x", &cr.cam, &cr.ram);
111 if (!cr.cam || !cr.ram) {
112 mutex_unlock(&iommu_debug_lock);
113 return -EINVAL;
114 }
115
116 omap_iotlb_cr_to_e(&cr, &e);
117 err = omap_iopgtable_store_entry(obj, &e);
118 if (err)
119 dev_err(obj->dev, "%s: fail to store cr\n", __func__);
120
121 mutex_unlock(&iommu_debug_lock);
122 return count;
123}
124
125#define dump_ioptable_entry_one(lv, da, val) \
126 ({ \
127 int __err = 0; \
128 ssize_t bytes; \
129 const int maxcol = 22; \
130 const char *str = "%d: %08x %08x\n"; \
131 bytes = snprintf(p, maxcol, str, lv, da, val); \
132 p += bytes; \
133 len -= bytes; \
134 if (len < maxcol) \
135 __err = -ENOMEM; \
136 __err; \
137 })
138
139static ssize_t dump_ioptable(struct omap_iommu *obj, char *buf, ssize_t len)
140{
141 int i;
142 u32 *iopgd;
143 char *p = buf;
144
145 spin_lock(&obj->page_table_lock);
146
147 iopgd = iopgd_offset(obj, 0);
148 for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) {
149 int j, err;
150 u32 *iopte;
151 u32 da;
152
153 if (!*iopgd)
154 continue;
155
156 if (!(*iopgd & IOPGD_TABLE)) {
157 da = i << IOPGD_SHIFT;
158
159 err = dump_ioptable_entry_one(1, da, *iopgd);
160 if (err)
161 goto out;
162 continue;
163 }
164
165 iopte = iopte_offset(iopgd, 0);
166
167 for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) {
168 if (!*iopte)
169 continue;
170
171 da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT);
172 err = dump_ioptable_entry_one(2, da, *iopgd);
173 if (err)
174 goto out;
175 }
176 }
177out:
178 spin_unlock(&obj->page_table_lock);
179
180 return p - buf;
181}
182
183static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf,
184 size_t count, loff_t *ppos)
185{
186 struct omap_iommu *obj = file->private_data;
187 char *p, *buf;
188 size_t bytes;
189
190 buf = (char *)__get_free_page(GFP_KERNEL);
191 if (!buf)
192 return -ENOMEM;
193 p = buf;
194
195 p += sprintf(p, "L: %8s %8s\n", "da:", "pa:");
196 p += sprintf(p, "-----------------------------------------\n");
197
198 mutex_lock(&iommu_debug_lock);
199
200 bytes = PAGE_SIZE - (p - buf);
201 p += dump_ioptable(obj, p, bytes);
202
203 bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
204
205 mutex_unlock(&iommu_debug_lock);
206 free_page((unsigned long)buf);
207
208 return bytes;
209}
210
211static ssize_t debug_read_mmap(struct file *file, char __user *userbuf,
212 size_t count, loff_t *ppos)
213{
214 struct omap_iommu *obj = file->private_data;
215 char *p, *buf;
216 struct iovm_struct *tmp;
217 int uninitialized_var(i);
218 ssize_t bytes;
219
220 buf = (char *)__get_free_page(GFP_KERNEL);
221 if (!buf)
222 return -ENOMEM;
223 p = buf;
224
225 p += sprintf(p, "%-3s %-8s %-8s %6s %8s\n",
226 "No", "start", "end", "size", "flags");
227 p += sprintf(p, "-------------------------------------------------\n");
228
229 mutex_lock(&iommu_debug_lock);
230
231 list_for_each_entry(tmp, &obj->mmap, list) {
232 size_t len;
233 const char *str = "%3d %08x-%08x %6x %8x\n";
234 const int maxcol = 39;
235
236 len = tmp->da_end - tmp->da_start;
237 p += snprintf(p, maxcol, str,
238 i, tmp->da_start, tmp->da_end, len, tmp->flags);
239
240 if (PAGE_SIZE - (p - buf) < maxcol)
241 break;
242 i++;
243 }
244
245 bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
246
247 mutex_unlock(&iommu_debug_lock);
248 free_page((unsigned long)buf);
249
250 return bytes;
251}
252
253static ssize_t debug_read_mem(struct file *file, char __user *userbuf,
254 size_t count, loff_t *ppos)
255{
256 struct omap_iommu *obj = file->private_data;
257 char *p, *buf;
258 struct iovm_struct *area;
259 ssize_t bytes;
260
261 count = min_t(ssize_t, count, PAGE_SIZE);
262
263 buf = (char *)__get_free_page(GFP_KERNEL);
264 if (!buf)
265 return -ENOMEM;
266 p = buf;
267
268 mutex_lock(&iommu_debug_lock);
269
270 area = omap_find_iovm_area(obj, (u32)ppos);
271 if (IS_ERR(area)) {
272 bytes = -EINVAL;
273 goto err_out;
274 }
275 memcpy(p, area->va, count);
276 p += count;
277
278 bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
279err_out:
280 mutex_unlock(&iommu_debug_lock);
281 free_page((unsigned long)buf);
282
283 return bytes;
284}
285
286static ssize_t debug_write_mem(struct file *file, const char __user *userbuf,
287 size_t count, loff_t *ppos)
288{
289 struct omap_iommu *obj = file->private_data;
290 struct iovm_struct *area;
291 char *p, *buf;
292
293 count = min_t(size_t, count, PAGE_SIZE);
294
295 buf = (char *)__get_free_page(GFP_KERNEL);
296 if (!buf)
297 return -ENOMEM;
298 p = buf;
299
300 mutex_lock(&iommu_debug_lock);
301
302 if (copy_from_user(p, userbuf, count)) {
303 count = -EFAULT;
304 goto err_out;
305 }
306
307 area = omap_find_iovm_area(obj, (u32)ppos);
308 if (IS_ERR(area)) {
309 count = -EINVAL;
310 goto err_out;
311 }
312 memcpy(area->va, p, count);
313err_out:
314 mutex_unlock(&iommu_debug_lock);
315 free_page((unsigned long)buf);
316
317 return count;
318}
319
320static int debug_open_generic(struct inode *inode, struct file *file)
321{
322 file->private_data = inode->i_private;
323 return 0;
324}
325
326#define DEBUG_FOPS(name) \
327 static const struct file_operations debug_##name##_fops = { \
328 .open = debug_open_generic, \
329 .read = debug_read_##name, \
330 .write = debug_write_##name, \
331 .llseek = generic_file_llseek, \
332 };
333
334#define DEBUG_FOPS_RO(name) \
335 static const struct file_operations debug_##name##_fops = { \
336 .open = debug_open_generic, \
337 .read = debug_read_##name, \
338 .llseek = generic_file_llseek, \
339 };
340
341DEBUG_FOPS_RO(ver);
342DEBUG_FOPS_RO(regs);
343DEBUG_FOPS_RO(tlb);
344DEBUG_FOPS(pagetable);
345DEBUG_FOPS_RO(mmap);
346DEBUG_FOPS(mem);
347
348#define __DEBUG_ADD_FILE(attr, mode) \
349 { \
350 struct dentry *dent; \
351 dent = debugfs_create_file(#attr, mode, parent, \
352 obj, &debug_##attr##_fops); \
353 if (!dent) \
354 return -ENOMEM; \
355 }
356
357#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 600)
358#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 400)
359
360static int iommu_debug_register(struct device *dev, void *data)
361{
362 struct platform_device *pdev = to_platform_device(dev);
363 struct omap_iommu *obj = platform_get_drvdata(pdev);
364 struct dentry *d, *parent;
365
366 if (!obj || !obj->dev)
367 return -EINVAL;
368
369 d = debugfs_create_dir(obj->name, iommu_debug_root);
370 if (!d)
371 return -ENOMEM;
372 parent = d;
373
374 d = debugfs_create_u8("nr_tlb_entries", 400, parent,
375 (u8 *)&obj->nr_tlb_entries);
376 if (!d)
377 return -ENOMEM;
378
379 DEBUG_ADD_FILE_RO(ver);
380 DEBUG_ADD_FILE_RO(regs);
381 DEBUG_ADD_FILE_RO(tlb);
382 DEBUG_ADD_FILE(pagetable);
383 DEBUG_ADD_FILE_RO(mmap);
384 DEBUG_ADD_FILE(mem);
385
386 return 0;
387}
388
389static int __init iommu_debug_init(void)
390{
391 struct dentry *d;
392 int err;
393
394 d = debugfs_create_dir("iommu", NULL);
395 if (!d)
396 return -ENOMEM;
397 iommu_debug_root = d;
398
399 err = omap_foreach_iommu_device(d, iommu_debug_register);
400 if (err)
401 goto err_out;
402 return 0;
403
404err_out:
405 debugfs_remove_recursive(iommu_debug_root);
406 return err;
407}
408module_init(iommu_debug_init)
409
410static void __exit iommu_debugfs_exit(void)
411{
412 debugfs_remove_recursive(iommu_debug_root);
413}
414module_exit(iommu_debugfs_exit)
415
416MODULE_DESCRIPTION("omap iommu: debugfs interface");
417MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
418MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
new file mode 100644
index 00000000000..bd5f6064c74
--- /dev/null
+++ b/drivers/iommu/omap-iommu.c
@@ -0,0 +1,1270 @@
1/*
2 * omap iommu: tlb and pagetable primitives
3 *
4 * Copyright (C) 2008-2010 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7 * Paul Mundt and Toshihiro Kobayashi
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/err.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/interrupt.h>
18#include <linux/ioport.h>
19#include <linux/clk.h>
20#include <linux/platform_device.h>
21#include <linux/iommu.h>
22#include <linux/mutex.h>
23#include <linux/spinlock.h>
24
25#include <asm/cacheflush.h>
26
27#include <plat/iommu.h>
28
29#include <plat/iopgtable.h>
30
31#define for_each_iotlb_cr(obj, n, __i, cr) \
32 for (__i = 0; \
33 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
34 __i++)
35
36/**
37 * struct omap_iommu_domain - omap iommu domain
38 * @pgtable: the page table
39 * @iommu_dev: an omap iommu device attached to this domain. only a single
40 * iommu device can be attached for now.
41 * @lock: domain lock, should be taken when attaching/detaching
42 */
43struct omap_iommu_domain {
44 u32 *pgtable;
45 struct omap_iommu *iommu_dev;
46 spinlock_t lock;
47};
48
49/* accommodate the difference between omap1 and omap2/3 */
50static const struct iommu_functions *arch_iommu;
51
52static struct platform_driver omap_iommu_driver;
53static struct kmem_cache *iopte_cachep;
54
55/**
56 * omap_install_iommu_arch - Install archtecure specific iommu functions
57 * @ops: a pointer to architecture specific iommu functions
58 *
59 * There are several kind of iommu algorithm(tlb, pagetable) among
60 * omap series. This interface installs such an iommu algorighm.
61 **/
62int omap_install_iommu_arch(const struct iommu_functions *ops)
63{
64 if (arch_iommu)
65 return -EBUSY;
66
67 arch_iommu = ops;
68 return 0;
69}
70EXPORT_SYMBOL_GPL(omap_install_iommu_arch);
71
72/**
73 * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions
74 * @ops: a pointer to architecture specific iommu functions
75 *
76 * This interface uninstalls the iommu algorighm installed previously.
77 **/
78void omap_uninstall_iommu_arch(const struct iommu_functions *ops)
79{
80 if (arch_iommu != ops)
81 pr_err("%s: not your arch\n", __func__);
82
83 arch_iommu = NULL;
84}
85EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch);
86
87/**
88 * omap_iommu_save_ctx - Save registers for pm off-mode support
89 * @obj: target iommu
90 **/
91void omap_iommu_save_ctx(struct omap_iommu *obj)
92{
93 arch_iommu->save_ctx(obj);
94}
95EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
96
97/**
98 * omap_iommu_restore_ctx - Restore registers for pm off-mode support
99 * @obj: target iommu
100 **/
101void omap_iommu_restore_ctx(struct omap_iommu *obj)
102{
103 arch_iommu->restore_ctx(obj);
104}
105EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
106
107/**
108 * omap_iommu_arch_version - Return running iommu arch version
109 **/
110u32 omap_iommu_arch_version(void)
111{
112 return arch_iommu->version;
113}
114EXPORT_SYMBOL_GPL(omap_iommu_arch_version);
115
116static int iommu_enable(struct omap_iommu *obj)
117{
118 int err;
119
120 if (!obj)
121 return -EINVAL;
122
123 if (!arch_iommu)
124 return -ENODEV;
125
126 clk_enable(obj->clk);
127
128 err = arch_iommu->enable(obj);
129
130 clk_disable(obj->clk);
131 return err;
132}
133
134static void iommu_disable(struct omap_iommu *obj)
135{
136 if (!obj)
137 return;
138
139 clk_enable(obj->clk);
140
141 arch_iommu->disable(obj);
142
143 clk_disable(obj->clk);
144}
145
146/*
147 * TLB operations
148 */
149void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
150{
151 BUG_ON(!cr || !e);
152
153 arch_iommu->cr_to_e(cr, e);
154}
155EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e);
156
157static inline int iotlb_cr_valid(struct cr_regs *cr)
158{
159 if (!cr)
160 return -EINVAL;
161
162 return arch_iommu->cr_valid(cr);
163}
164
165static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
166 struct iotlb_entry *e)
167{
168 if (!e)
169 return NULL;
170
171 return arch_iommu->alloc_cr(obj, e);
172}
173
174static u32 iotlb_cr_to_virt(struct cr_regs *cr)
175{
176 return arch_iommu->cr_to_virt(cr);
177}
178
179static u32 get_iopte_attr(struct iotlb_entry *e)
180{
181 return arch_iommu->get_pte_attr(e);
182}
183
184static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
185{
186 return arch_iommu->fault_isr(obj, da);
187}
188
189static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
190{
191 u32 val;
192
193 val = iommu_read_reg(obj, MMU_LOCK);
194
195 l->base = MMU_LOCK_BASE(val);
196 l->vict = MMU_LOCK_VICT(val);
197
198}
199
200static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
201{
202 u32 val;
203
204 val = (l->base << MMU_LOCK_BASE_SHIFT);
205 val |= (l->vict << MMU_LOCK_VICT_SHIFT);
206
207 iommu_write_reg(obj, val, MMU_LOCK);
208}
209
210static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
211{
212 arch_iommu->tlb_read_cr(obj, cr);
213}
214
215static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
216{
217 arch_iommu->tlb_load_cr(obj, cr);
218
219 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
220 iommu_write_reg(obj, 1, MMU_LD_TLB);
221}
222
223/**
224 * iotlb_dump_cr - Dump an iommu tlb entry into buf
225 * @obj: target iommu
226 * @cr: contents of cam and ram register
227 * @buf: output buffer
228 **/
229static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
230 char *buf)
231{
232 BUG_ON(!cr || !buf);
233
234 return arch_iommu->dump_cr(obj, cr, buf);
235}
236
237/* only used in iotlb iteration for-loop */
238static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
239{
240 struct cr_regs cr;
241 struct iotlb_lock l;
242
243 iotlb_lock_get(obj, &l);
244 l.vict = n;
245 iotlb_lock_set(obj, &l);
246 iotlb_read_cr(obj, &cr);
247
248 return cr;
249}
250
251/**
252 * load_iotlb_entry - Set an iommu tlb entry
253 * @obj: target iommu
254 * @e: an iommu tlb entry info
255 **/
256#ifdef PREFETCH_IOTLB
257static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
258{
259 int err = 0;
260 struct iotlb_lock l;
261 struct cr_regs *cr;
262
263 if (!obj || !obj->nr_tlb_entries || !e)
264 return -EINVAL;
265
266 clk_enable(obj->clk);
267
268 iotlb_lock_get(obj, &l);
269 if (l.base == obj->nr_tlb_entries) {
270 dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
271 err = -EBUSY;
272 goto out;
273 }
274 if (!e->prsvd) {
275 int i;
276 struct cr_regs tmp;
277
278 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
279 if (!iotlb_cr_valid(&tmp))
280 break;
281
282 if (i == obj->nr_tlb_entries) {
283 dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
284 err = -EBUSY;
285 goto out;
286 }
287
288 iotlb_lock_get(obj, &l);
289 } else {
290 l.vict = l.base;
291 iotlb_lock_set(obj, &l);
292 }
293
294 cr = iotlb_alloc_cr(obj, e);
295 if (IS_ERR(cr)) {
296 clk_disable(obj->clk);
297 return PTR_ERR(cr);
298 }
299
300 iotlb_load_cr(obj, cr);
301 kfree(cr);
302
303 if (e->prsvd)
304 l.base++;
305 /* increment victim for next tlb load */
306 if (++l.vict == obj->nr_tlb_entries)
307 l.vict = l.base;
308 iotlb_lock_set(obj, &l);
309out:
310 clk_disable(obj->clk);
311 return err;
312}
313
314#else /* !PREFETCH_IOTLB */
315
316static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
317{
318 return 0;
319}
320
321#endif /* !PREFETCH_IOTLB */
322
323static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
324{
325 return load_iotlb_entry(obj, e);
326}
327
328/**
329 * flush_iotlb_page - Clear an iommu tlb entry
330 * @obj: target iommu
331 * @da: iommu device virtual address
332 *
333 * Clear an iommu tlb entry which includes 'da' address.
334 **/
335static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
336{
337 int i;
338 struct cr_regs cr;
339
340 clk_enable(obj->clk);
341
342 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
343 u32 start;
344 size_t bytes;
345
346 if (!iotlb_cr_valid(&cr))
347 continue;
348
349 start = iotlb_cr_to_virt(&cr);
350 bytes = iopgsz_to_bytes(cr.cam & 3);
351
352 if ((start <= da) && (da < start + bytes)) {
353 dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
354 __func__, start, da, bytes);
355 iotlb_load_cr(obj, &cr);
356 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
357 }
358 }
359 clk_disable(obj->clk);
360
361 if (i == obj->nr_tlb_entries)
362 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
363}
364
365/**
366 * flush_iotlb_all - Clear all iommu tlb entries
367 * @obj: target iommu
368 **/
369static void flush_iotlb_all(struct omap_iommu *obj)
370{
371 struct iotlb_lock l;
372
373 clk_enable(obj->clk);
374
375 l.base = 0;
376 l.vict = 0;
377 iotlb_lock_set(obj, &l);
378
379 iommu_write_reg(obj, 1, MMU_GFLUSH);
380
381 clk_disable(obj->clk);
382}
383
384#if defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
385
386ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
387{
388 if (!obj || !buf)
389 return -EINVAL;
390
391 clk_enable(obj->clk);
392
393 bytes = arch_iommu->dump_ctx(obj, buf, bytes);
394
395 clk_disable(obj->clk);
396
397 return bytes;
398}
399EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx);
400
401static int
402__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
403{
404 int i;
405 struct iotlb_lock saved;
406 struct cr_regs tmp;
407 struct cr_regs *p = crs;
408
409 clk_enable(obj->clk);
410 iotlb_lock_get(obj, &saved);
411
412 for_each_iotlb_cr(obj, num, i, tmp) {
413 if (!iotlb_cr_valid(&tmp))
414 continue;
415 *p++ = tmp;
416 }
417
418 iotlb_lock_set(obj, &saved);
419 clk_disable(obj->clk);
420
421 return p - crs;
422}
423
424/**
425 * omap_dump_tlb_entries - dump cr arrays to given buffer
426 * @obj: target iommu
427 * @buf: output buffer
428 **/
429size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes)
430{
431 int i, num;
432 struct cr_regs *cr;
433 char *p = buf;
434
435 num = bytes / sizeof(*cr);
436 num = min(obj->nr_tlb_entries, num);
437
438 cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
439 if (!cr)
440 return 0;
441
442 num = __dump_tlb_entries(obj, cr, num);
443 for (i = 0; i < num; i++)
444 p += iotlb_dump_cr(obj, cr + i, p);
445 kfree(cr);
446
447 return p - buf;
448}
449EXPORT_SYMBOL_GPL(omap_dump_tlb_entries);
450
451int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *))
452{
453 return driver_for_each_device(&omap_iommu_driver.driver,
454 NULL, data, fn);
455}
456EXPORT_SYMBOL_GPL(omap_foreach_iommu_device);
457
458#endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */
459
460/*
461 * H/W pagetable operations
462 */
463static void flush_iopgd_range(u32 *first, u32 *last)
464{
465 /* FIXME: L2 cache should be taken care of if it exists */
466 do {
467 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
468 : : "r" (first));
469 first += L1_CACHE_BYTES / sizeof(*first);
470 } while (first <= last);
471}
472
473static void flush_iopte_range(u32 *first, u32 *last)
474{
475 /* FIXME: L2 cache should be taken care of if it exists */
476 do {
477 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
478 : : "r" (first));
479 first += L1_CACHE_BYTES / sizeof(*first);
480 } while (first <= last);
481}
482
483static void iopte_free(u32 *iopte)
484{
485 /* Note: freed iopte's must be clean ready for re-use */
486 kmem_cache_free(iopte_cachep, iopte);
487}
488
489static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da)
490{
491 u32 *iopte;
492
493 /* a table has already existed */
494 if (*iopgd)
495 goto pte_ready;
496
497 /*
498 * do the allocation outside the page table lock
499 */
500 spin_unlock(&obj->page_table_lock);
501 iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
502 spin_lock(&obj->page_table_lock);
503
504 if (!*iopgd) {
505 if (!iopte)
506 return ERR_PTR(-ENOMEM);
507
508 *iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
509 flush_iopgd_range(iopgd, iopgd);
510
511 dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
512 } else {
513 /* We raced, free the reduniovant table */
514 iopte_free(iopte);
515 }
516
517pte_ready:
518 iopte = iopte_offset(iopgd, da);
519
520 dev_vdbg(obj->dev,
521 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
522 __func__, da, iopgd, *iopgd, iopte, *iopte);
523
524 return iopte;
525}
526
527static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
528{
529 u32 *iopgd = iopgd_offset(obj, da);
530
531 if ((da | pa) & ~IOSECTION_MASK) {
532 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
533 __func__, da, pa, IOSECTION_SIZE);
534 return -EINVAL;
535 }
536
537 *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
538 flush_iopgd_range(iopgd, iopgd);
539 return 0;
540}
541
542static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
543{
544 u32 *iopgd = iopgd_offset(obj, da);
545 int i;
546
547 if ((da | pa) & ~IOSUPER_MASK) {
548 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
549 __func__, da, pa, IOSUPER_SIZE);
550 return -EINVAL;
551 }
552
553 for (i = 0; i < 16; i++)
554 *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
555 flush_iopgd_range(iopgd, iopgd + 15);
556 return 0;
557}
558
559static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
560{
561 u32 *iopgd = iopgd_offset(obj, da);
562 u32 *iopte = iopte_alloc(obj, iopgd, da);
563
564 if (IS_ERR(iopte))
565 return PTR_ERR(iopte);
566
567 *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
568 flush_iopte_range(iopte, iopte);
569
570 dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
571 __func__, da, pa, iopte, *iopte);
572
573 return 0;
574}
575
576static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
577{
578 u32 *iopgd = iopgd_offset(obj, da);
579 u32 *iopte = iopte_alloc(obj, iopgd, da);
580 int i;
581
582 if ((da | pa) & ~IOLARGE_MASK) {
583 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
584 __func__, da, pa, IOLARGE_SIZE);
585 return -EINVAL;
586 }
587
588 if (IS_ERR(iopte))
589 return PTR_ERR(iopte);
590
591 for (i = 0; i < 16; i++)
592 *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
593 flush_iopte_range(iopte, iopte + 15);
594 return 0;
595}
596
597static int
598iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
599{
600 int (*fn)(struct omap_iommu *, u32, u32, u32);
601 u32 prot;
602 int err;
603
604 if (!obj || !e)
605 return -EINVAL;
606
607 switch (e->pgsz) {
608 case MMU_CAM_PGSZ_16M:
609 fn = iopgd_alloc_super;
610 break;
611 case MMU_CAM_PGSZ_1M:
612 fn = iopgd_alloc_section;
613 break;
614 case MMU_CAM_PGSZ_64K:
615 fn = iopte_alloc_large;
616 break;
617 case MMU_CAM_PGSZ_4K:
618 fn = iopte_alloc_page;
619 break;
620 default:
621 fn = NULL;
622 BUG();
623 break;
624 }
625
626 prot = get_iopte_attr(e);
627
628 spin_lock(&obj->page_table_lock);
629 err = fn(obj, e->da, e->pa, prot);
630 spin_unlock(&obj->page_table_lock);
631
632 return err;
633}
634
635/**
636 * omap_iopgtable_store_entry - Make an iommu pte entry
637 * @obj: target iommu
638 * @e: an iommu tlb entry info
639 **/
640int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
641{
642 int err;
643
644 flush_iotlb_page(obj, e->da);
645 err = iopgtable_store_entry_core(obj, e);
646 if (!err)
647 prefetch_iotlb_entry(obj, e);
648 return err;
649}
650EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry);
651
652/**
653 * iopgtable_lookup_entry - Lookup an iommu pte entry
654 * @obj: target iommu
655 * @da: iommu device virtual address
656 * @ppgd: iommu pgd entry pointer to be returned
657 * @ppte: iommu pte entry pointer to be returned
658 **/
659static void
660iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
661{
662 u32 *iopgd, *iopte = NULL;
663
664 iopgd = iopgd_offset(obj, da);
665 if (!*iopgd)
666 goto out;
667
668 if (iopgd_is_table(*iopgd))
669 iopte = iopte_offset(iopgd, da);
670out:
671 *ppgd = iopgd;
672 *ppte = iopte;
673}
674
675static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
676{
677 size_t bytes;
678 u32 *iopgd = iopgd_offset(obj, da);
679 int nent = 1;
680
681 if (!*iopgd)
682 return 0;
683
684 if (iopgd_is_table(*iopgd)) {
685 int i;
686 u32 *iopte = iopte_offset(iopgd, da);
687
688 bytes = IOPTE_SIZE;
689 if (*iopte & IOPTE_LARGE) {
690 nent *= 16;
691 /* rewind to the 1st entry */
692 iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
693 }
694 bytes *= nent;
695 memset(iopte, 0, nent * sizeof(*iopte));
696 flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));
697
698 /*
699 * do table walk to check if this table is necessary or not
700 */
701 iopte = iopte_offset(iopgd, 0);
702 for (i = 0; i < PTRS_PER_IOPTE; i++)
703 if (iopte[i])
704 goto out;
705
706 iopte_free(iopte);
707 nent = 1; /* for the next L1 entry */
708 } else {
709 bytes = IOPGD_SIZE;
710 if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
711 nent *= 16;
712 /* rewind to the 1st entry */
713 iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
714 }
715 bytes *= nent;
716 }
717 memset(iopgd, 0, nent * sizeof(*iopgd));
718 flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd));
719out:
720 return bytes;
721}
722
723/**
724 * iopgtable_clear_entry - Remove an iommu pte entry
725 * @obj: target iommu
726 * @da: iommu device virtual address
727 **/
728static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da)
729{
730 size_t bytes;
731
732 spin_lock(&obj->page_table_lock);
733
734 bytes = iopgtable_clear_entry_core(obj, da);
735 flush_iotlb_page(obj, da);
736
737 spin_unlock(&obj->page_table_lock);
738
739 return bytes;
740}
741
742static void iopgtable_clear_entry_all(struct omap_iommu *obj)
743{
744 int i;
745
746 spin_lock(&obj->page_table_lock);
747
748 for (i = 0; i < PTRS_PER_IOPGD; i++) {
749 u32 da;
750 u32 *iopgd;
751
752 da = i << IOPGD_SHIFT;
753 iopgd = iopgd_offset(obj, da);
754
755 if (!*iopgd)
756 continue;
757
758 if (iopgd_is_table(*iopgd))
759 iopte_free(iopte_offset(iopgd, 0));
760
761 *iopgd = 0;
762 flush_iopgd_range(iopgd, iopgd);
763 }
764
765 flush_iotlb_all(obj);
766
767 spin_unlock(&obj->page_table_lock);
768}
769
770/*
771 * Device IOMMU generic operations
772 */
773static irqreturn_t iommu_fault_handler(int irq, void *data)
774{
775 u32 da, errs;
776 u32 *iopgd, *iopte;
777 struct omap_iommu *obj = data;
778
779 if (!obj->refcount)
780 return IRQ_NONE;
781
782 clk_enable(obj->clk);
783 errs = iommu_report_fault(obj, &da);
784 clk_disable(obj->clk);
785 if (errs == 0)
786 return IRQ_HANDLED;
787
788 /* Fault callback or TLB/PTE Dynamic loading */
789 if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv))
790 return IRQ_HANDLED;
791
792 iommu_disable(obj);
793
794 iopgd = iopgd_offset(obj, da);
795
796 if (!iopgd_is_table(*iopgd)) {
797 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p "
798 "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd);
799 return IRQ_NONE;
800 }
801
802 iopte = iopte_offset(iopgd, da);
803
804 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x "
805 "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd,
806 iopte, *iopte);
807
808 return IRQ_NONE;
809}
810
811static int device_match_by_alias(struct device *dev, void *data)
812{
813 struct omap_iommu *obj = to_iommu(dev);
814 const char *name = data;
815
816 pr_debug("%s: %s %s\n", __func__, obj->name, name);
817
818 return strcmp(obj->name, name) == 0;
819}
820
821/**
822 * omap_find_iommu_device() - find an omap iommu device by name
823 * @name: name of the iommu device
824 *
825 * The generic iommu API requires the caller to provide the device
826 * he wishes to attach to a certain iommu domain.
827 *
828 * Drivers generally should not bother with this as it should just
829 * be taken care of by the DMA-API using dev_archdata.
830 *
831 * This function is provided as an interim solution until the latter
832 * materializes, and omap3isp is fully migrated to the DMA-API.
833 */
834struct device *omap_find_iommu_device(const char *name)
835{
836 return driver_find_device(&omap_iommu_driver.driver, NULL,
837 (void *)name,
838 device_match_by_alias);
839}
840EXPORT_SYMBOL_GPL(omap_find_iommu_device);
841
842/**
843 * omap_iommu_attach() - attach iommu device to an iommu domain
844 * @dev: target omap iommu device
845 * @iopgd: page table
846 **/
847static struct omap_iommu *omap_iommu_attach(struct device *dev, u32 *iopgd)
848{
849 int err = -ENOMEM;
850 struct omap_iommu *obj = to_iommu(dev);
851
852 spin_lock(&obj->iommu_lock);
853
854 /* an iommu device can only be attached once */
855 if (++obj->refcount > 1) {
856 dev_err(dev, "%s: already attached!\n", obj->name);
857 err = -EBUSY;
858 goto err_enable;
859 }
860
861 obj->iopgd = iopgd;
862 err = iommu_enable(obj);
863 if (err)
864 goto err_enable;
865 flush_iotlb_all(obj);
866
867 if (!try_module_get(obj->owner))
868 goto err_module;
869
870 spin_unlock(&obj->iommu_lock);
871
872 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
873 return obj;
874
875err_module:
876 if (obj->refcount == 1)
877 iommu_disable(obj);
878err_enable:
879 obj->refcount--;
880 spin_unlock(&obj->iommu_lock);
881 return ERR_PTR(err);
882}
883
884/**
885 * omap_iommu_detach - release iommu device
886 * @obj: target iommu
887 **/
888static void omap_iommu_detach(struct omap_iommu *obj)
889{
890 if (!obj || IS_ERR(obj))
891 return;
892
893 spin_lock(&obj->iommu_lock);
894
895 if (--obj->refcount == 0)
896 iommu_disable(obj);
897
898 module_put(obj->owner);
899
900 obj->iopgd = NULL;
901
902 spin_unlock(&obj->iommu_lock);
903
904 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
905}
906
907int omap_iommu_set_isr(const char *name,
908 int (*isr)(struct omap_iommu *obj, u32 da, u32 iommu_errs,
909 void *priv),
910 void *isr_priv)
911{
912 struct device *dev;
913 struct omap_iommu *obj;
914
915 dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
916 device_match_by_alias);
917 if (!dev)
918 return -ENODEV;
919
920 obj = to_iommu(dev);
921 spin_lock(&obj->iommu_lock);
922 if (obj->refcount != 0) {
923 spin_unlock(&obj->iommu_lock);
924 return -EBUSY;
925 }
926 obj->isr = isr;
927 obj->isr_priv = isr_priv;
928 spin_unlock(&obj->iommu_lock);
929
930 return 0;
931}
932EXPORT_SYMBOL_GPL(omap_iommu_set_isr);
933
934/*
935 * OMAP Device MMU(IOMMU) detection
936 */
937static int __devinit omap_iommu_probe(struct platform_device *pdev)
938{
939 int err = -ENODEV;
940 int irq;
941 struct omap_iommu *obj;
942 struct resource *res;
943 struct iommu_platform_data *pdata = pdev->dev.platform_data;
944
945 if (pdev->num_resources != 2)
946 return -EINVAL;
947
948 obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
949 if (!obj)
950 return -ENOMEM;
951
952 obj->clk = clk_get(&pdev->dev, pdata->clk_name);
953 if (IS_ERR(obj->clk))
954 goto err_clk;
955
956 obj->nr_tlb_entries = pdata->nr_tlb_entries;
957 obj->name = pdata->name;
958 obj->dev = &pdev->dev;
959 obj->ctx = (void *)obj + sizeof(*obj);
960 obj->da_start = pdata->da_start;
961 obj->da_end = pdata->da_end;
962
963 spin_lock_init(&obj->iommu_lock);
964 mutex_init(&obj->mmap_lock);
965 spin_lock_init(&obj->page_table_lock);
966 INIT_LIST_HEAD(&obj->mmap);
967
968 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
969 if (!res) {
970 err = -ENODEV;
971 goto err_mem;
972 }
973
974 res = request_mem_region(res->start, resource_size(res),
975 dev_name(&pdev->dev));
976 if (!res) {
977 err = -EIO;
978 goto err_mem;
979 }
980
981 obj->regbase = ioremap(res->start, resource_size(res));
982 if (!obj->regbase) {
983 err = -ENOMEM;
984 goto err_ioremap;
985 }
986
987 irq = platform_get_irq(pdev, 0);
988 if (irq < 0) {
989 err = -ENODEV;
990 goto err_irq;
991 }
992 err = request_irq(irq, iommu_fault_handler, IRQF_SHARED,
993 dev_name(&pdev->dev), obj);
994 if (err < 0)
995 goto err_irq;
996 platform_set_drvdata(pdev, obj);
997
998 dev_info(&pdev->dev, "%s registered\n", obj->name);
999 return 0;
1000
1001err_irq:
1002 iounmap(obj->regbase);
1003err_ioremap:
1004 release_mem_region(res->start, resource_size(res));
1005err_mem:
1006 clk_put(obj->clk);
1007err_clk:
1008 kfree(obj);
1009 return err;
1010}
1011
1012static int __devexit omap_iommu_remove(struct platform_device *pdev)
1013{
1014 int irq;
1015 struct resource *res;
1016 struct omap_iommu *obj = platform_get_drvdata(pdev);
1017
1018 platform_set_drvdata(pdev, NULL);
1019
1020 iopgtable_clear_entry_all(obj);
1021
1022 irq = platform_get_irq(pdev, 0);
1023 free_irq(irq, obj);
1024 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1025 release_mem_region(res->start, resource_size(res));
1026 iounmap(obj->regbase);
1027
1028 clk_put(obj->clk);
1029 dev_info(&pdev->dev, "%s removed\n", obj->name);
1030 kfree(obj);
1031 return 0;
1032}
1033
1034static struct platform_driver omap_iommu_driver = {
1035 .probe = omap_iommu_probe,
1036 .remove = __devexit_p(omap_iommu_remove),
1037 .driver = {
1038 .name = "omap-iommu",
1039 },
1040};
1041
1042static void iopte_cachep_ctor(void *iopte)
1043{
1044 clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
1045}
1046
1047static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1048 phys_addr_t pa, int order, int prot)
1049{
1050 struct omap_iommu_domain *omap_domain = domain->priv;
1051 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1052 struct device *dev = oiommu->dev;
1053 size_t bytes = PAGE_SIZE << order;
1054 struct iotlb_entry e;
1055 int omap_pgsz;
1056 u32 ret, flags;
1057
1058 /* we only support mapping a single iommu page for now */
1059 omap_pgsz = bytes_to_iopgsz(bytes);
1060 if (omap_pgsz < 0) {
1061 dev_err(dev, "invalid size to map: %d\n", bytes);
1062 return -EINVAL;
1063 }
1064
1065 dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);
1066
1067 flags = omap_pgsz | prot;
1068
1069 iotlb_init_entry(&e, da, pa, flags);
1070
1071 ret = omap_iopgtable_store_entry(oiommu, &e);
1072 if (ret)
1073 dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret);
1074
1075 return ret;
1076}
1077
1078static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
1079 int order)
1080{
1081 struct omap_iommu_domain *omap_domain = domain->priv;
1082 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1083 struct device *dev = oiommu->dev;
1084 size_t unmap_size;
1085
1086 dev_dbg(dev, "unmapping da 0x%lx order %d\n", da, order);
1087
1088 unmap_size = iopgtable_clear_entry(oiommu, da);
1089
1090 return unmap_size ? get_order(unmap_size) : -EINVAL;
1091}
1092
1093static int
1094omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
1095{
1096 struct omap_iommu_domain *omap_domain = domain->priv;
1097 struct omap_iommu *oiommu;
1098 int ret = 0;
1099
1100 spin_lock(&omap_domain->lock);
1101
1102 /* only a single device is supported per domain for now */
1103 if (omap_domain->iommu_dev) {
1104 dev_err(dev, "iommu domain is already attached\n");
1105 ret = -EBUSY;
1106 goto out;
1107 }
1108
1109 /* get a handle to and enable the omap iommu */
1110 oiommu = omap_iommu_attach(dev, omap_domain->pgtable);
1111 if (IS_ERR(oiommu)) {
1112 ret = PTR_ERR(oiommu);
1113 dev_err(dev, "can't get omap iommu: %d\n", ret);
1114 goto out;
1115 }
1116
1117 omap_domain->iommu_dev = oiommu;
1118
1119out:
1120 spin_unlock(&omap_domain->lock);
1121 return ret;
1122}
1123
1124static void omap_iommu_detach_dev(struct iommu_domain *domain,
1125 struct device *dev)
1126{
1127 struct omap_iommu_domain *omap_domain = domain->priv;
1128 struct omap_iommu *oiommu = to_iommu(dev);
1129
1130 spin_lock(&omap_domain->lock);
1131
1132 /* only a single device is supported per domain for now */
1133 if (omap_domain->iommu_dev != oiommu) {
1134 dev_err(dev, "invalid iommu device\n");
1135 goto out;
1136 }
1137
1138 iopgtable_clear_entry_all(oiommu);
1139
1140 omap_iommu_detach(oiommu);
1141
1142 omap_domain->iommu_dev = NULL;
1143
1144out:
1145 spin_unlock(&omap_domain->lock);
1146}
1147
1148static int omap_iommu_domain_init(struct iommu_domain *domain)
1149{
1150 struct omap_iommu_domain *omap_domain;
1151
1152 omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
1153 if (!omap_domain) {
1154 pr_err("kzalloc failed\n");
1155 goto out;
1156 }
1157
1158 omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
1159 if (!omap_domain->pgtable) {
1160 pr_err("kzalloc failed\n");
1161 goto fail_nomem;
1162 }
1163
1164 /*
1165 * should never fail, but please keep this around to ensure
1166 * we keep the hardware happy
1167 */
1168 BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE));
1169
1170 clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
1171 spin_lock_init(&omap_domain->lock);
1172
1173 domain->priv = omap_domain;
1174
1175 return 0;
1176
1177fail_nomem:
1178 kfree(omap_domain);
1179out:
1180 return -ENOMEM;
1181}
1182
1183/* assume device was already detached */
1184static void omap_iommu_domain_destroy(struct iommu_domain *domain)
1185{
1186 struct omap_iommu_domain *omap_domain = domain->priv;
1187
1188 domain->priv = NULL;
1189
1190 kfree(omap_domain->pgtable);
1191 kfree(omap_domain);
1192}
1193
1194static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
1195 unsigned long da)
1196{
1197 struct omap_iommu_domain *omap_domain = domain->priv;
1198 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1199 struct device *dev = oiommu->dev;
1200 u32 *pgd, *pte;
1201 phys_addr_t ret = 0;
1202
1203 iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
1204
1205 if (pte) {
1206 if (iopte_is_small(*pte))
1207 ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
1208 else if (iopte_is_large(*pte))
1209 ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
1210 else
1211 dev_err(dev, "bogus pte 0x%x", *pte);
1212 } else {
1213 if (iopgd_is_section(*pgd))
1214 ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
1215 else if (iopgd_is_super(*pgd))
1216 ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
1217 else
1218 dev_err(dev, "bogus pgd 0x%x", *pgd);
1219 }
1220
1221 return ret;
1222}
1223
1224static int omap_iommu_domain_has_cap(struct iommu_domain *domain,
1225 unsigned long cap)
1226{
1227 return 0;
1228}
1229
1230static struct iommu_ops omap_iommu_ops = {
1231 .domain_init = omap_iommu_domain_init,
1232 .domain_destroy = omap_iommu_domain_destroy,
1233 .attach_dev = omap_iommu_attach_dev,
1234 .detach_dev = omap_iommu_detach_dev,
1235 .map = omap_iommu_map,
1236 .unmap = omap_iommu_unmap,
1237 .iova_to_phys = omap_iommu_iova_to_phys,
1238 .domain_has_cap = omap_iommu_domain_has_cap,
1239};
1240
1241static int __init omap_iommu_init(void)
1242{
1243 struct kmem_cache *p;
1244 const unsigned long flags = SLAB_HWCACHE_ALIGN;
1245 size_t align = 1 << 10; /* L2 pagetable alignement */
1246
1247 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
1248 iopte_cachep_ctor);
1249 if (!p)
1250 return -ENOMEM;
1251 iopte_cachep = p;
1252
1253 register_iommu(&omap_iommu_ops);
1254
1255 return platform_driver_register(&omap_iommu_driver);
1256}
1257module_init(omap_iommu_init);
1258
1259static void __exit omap_iommu_exit(void)
1260{
1261 kmem_cache_destroy(iopte_cachep);
1262
1263 platform_driver_unregister(&omap_iommu_driver);
1264}
1265module_exit(omap_iommu_exit);
1266
1267MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
1268MODULE_ALIAS("platform:omap-iommu");
1269MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
1270MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c
new file mode 100644
index 00000000000..e8fdb8830f6
--- /dev/null
+++ b/drivers/iommu/omap-iovmm.c
@@ -0,0 +1,742 @@
1/*
2 * omap iommu: simple virtual address space management
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/err.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16#include <linux/device.h>
17#include <linux/scatterlist.h>
18#include <linux/iommu.h>
19
20#include <asm/cacheflush.h>
21#include <asm/mach/map.h>
22
23#include <plat/iommu.h>
24#include <plat/iovmm.h>
25
26#include <plat/iopgtable.h>
27
28static struct kmem_cache *iovm_area_cachep;
29
30/* return the offset of the first scatterlist entry in a sg table */
31static unsigned int sgtable_offset(const struct sg_table *sgt)
32{
33 if (!sgt || !sgt->nents)
34 return 0;
35
36 return sgt->sgl->offset;
37}
38
39/* return total bytes of sg buffers */
40static size_t sgtable_len(const struct sg_table *sgt)
41{
42 unsigned int i, total = 0;
43 struct scatterlist *sg;
44
45 if (!sgt)
46 return 0;
47
48 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
49 size_t bytes;
50
51 bytes = sg->length + sg->offset;
52
53 if (!iopgsz_ok(bytes)) {
54 pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n",
55 __func__, i, bytes, sg->offset);
56 return 0;
57 }
58
59 if (i && sg->offset) {
60 pr_err("%s: sg[%d] offset not allowed in internal "
61 "entries\n", __func__, i);
62 return 0;
63 }
64
65 total += bytes;
66 }
67
68 return total;
69}
70#define sgtable_ok(x) (!!sgtable_len(x))
71
72static unsigned max_alignment(u32 addr)
73{
74 int i;
75 unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
76 for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
77 ;
78 return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
79}
80
81/*
82 * calculate the optimal number sg elements from total bytes based on
83 * iommu superpages
84 */
85static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
86{
87 unsigned nr_entries = 0, ent_sz;
88
89 if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
90 pr_err("%s: wrong size %08x\n", __func__, bytes);
91 return 0;
92 }
93
94 while (bytes) {
95 ent_sz = max_alignment(da | pa);
96 ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
97 nr_entries++;
98 da += ent_sz;
99 pa += ent_sz;
100 bytes -= ent_sz;
101 }
102
103 return nr_entries;
104}
105
106/* allocate and initialize sg_table header(a kind of 'superblock') */
107static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
108 u32 da, u32 pa)
109{
110 unsigned int nr_entries;
111 int err;
112 struct sg_table *sgt;
113
114 if (!bytes)
115 return ERR_PTR(-EINVAL);
116
117 if (!IS_ALIGNED(bytes, PAGE_SIZE))
118 return ERR_PTR(-EINVAL);
119
120 if (flags & IOVMF_LINEAR) {
121 nr_entries = sgtable_nents(bytes, da, pa);
122 if (!nr_entries)
123 return ERR_PTR(-EINVAL);
124 } else
125 nr_entries = bytes / PAGE_SIZE;
126
127 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
128 if (!sgt)
129 return ERR_PTR(-ENOMEM);
130
131 err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
132 if (err) {
133 kfree(sgt);
134 return ERR_PTR(err);
135 }
136
137 pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
138
139 return sgt;
140}
141
142/* free sg_table header(a kind of superblock) */
143static void sgtable_free(struct sg_table *sgt)
144{
145 if (!sgt)
146 return;
147
148 sg_free_table(sgt);
149 kfree(sgt);
150
151 pr_debug("%s: sgt:%p\n", __func__, sgt);
152}
153
154/* map 'sglist' to a contiguous mpu virtual area and return 'va' */
155static void *vmap_sg(const struct sg_table *sgt)
156{
157 u32 va;
158 size_t total;
159 unsigned int i;
160 struct scatterlist *sg;
161 struct vm_struct *new;
162 const struct mem_type *mtype;
163
164 mtype = get_mem_type(MT_DEVICE);
165 if (!mtype)
166 return ERR_PTR(-EINVAL);
167
168 total = sgtable_len(sgt);
169 if (!total)
170 return ERR_PTR(-EINVAL);
171
172 new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
173 if (!new)
174 return ERR_PTR(-ENOMEM);
175 va = (u32)new->addr;
176
177 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
178 size_t bytes;
179 u32 pa;
180 int err;
181
182 pa = sg_phys(sg) - sg->offset;
183 bytes = sg->length + sg->offset;
184
185 BUG_ON(bytes != PAGE_SIZE);
186
187 err = ioremap_page(va, pa, mtype);
188 if (err)
189 goto err_out;
190
191 va += bytes;
192 }
193
194 flush_cache_vmap((unsigned long)new->addr,
195 (unsigned long)(new->addr + total));
196 return new->addr;
197
198err_out:
199 WARN_ON(1); /* FIXME: cleanup some mpu mappings */
200 vunmap(new->addr);
201 return ERR_PTR(-EAGAIN);
202}
203
204static inline void vunmap_sg(const void *va)
205{
206 vunmap(va);
207}
208
209static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj,
210 const u32 da)
211{
212 struct iovm_struct *tmp;
213
214 list_for_each_entry(tmp, &obj->mmap, list) {
215 if ((da >= tmp->da_start) && (da < tmp->da_end)) {
216 size_t len;
217
218 len = tmp->da_end - tmp->da_start;
219
220 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
221 __func__, tmp->da_start, da, tmp->da_end, len,
222 tmp->flags);
223
224 return tmp;
225 }
226 }
227
228 return NULL;
229}
230
231/**
232 * omap_find_iovm_area - find iovma which includes @da
233 * @da: iommu device virtual address
234 *
235 * Find the existing iovma starting at @da
236 */
237struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da)
238{
239 struct iovm_struct *area;
240
241 mutex_lock(&obj->mmap_lock);
242 area = __find_iovm_area(obj, da);
243 mutex_unlock(&obj->mmap_lock);
244
245 return area;
246}
247EXPORT_SYMBOL_GPL(omap_find_iovm_area);
248
249/*
250 * This finds the hole(area) which fits the requested address and len
251 * in iovmas mmap, and returns the new allocated iovma.
252 */
253static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da,
254 size_t bytes, u32 flags)
255{
256 struct iovm_struct *new, *tmp;
257 u32 start, prev_end, alignment;
258
259 if (!obj || !bytes)
260 return ERR_PTR(-EINVAL);
261
262 start = da;
263 alignment = PAGE_SIZE;
264
265 if (~flags & IOVMF_DA_FIXED) {
266 /* Don't map address 0 */
267 start = obj->da_start ? obj->da_start : alignment;
268
269 if (flags & IOVMF_LINEAR)
270 alignment = iopgsz_max(bytes);
271 start = roundup(start, alignment);
272 } else if (start < obj->da_start || start > obj->da_end ||
273 obj->da_end - start < bytes) {
274 return ERR_PTR(-EINVAL);
275 }
276
277 tmp = NULL;
278 if (list_empty(&obj->mmap))
279 goto found;
280
281 prev_end = 0;
282 list_for_each_entry(tmp, &obj->mmap, list) {
283
284 if (prev_end > start)
285 break;
286
287 if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
288 goto found;
289
290 if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
291 start = roundup(tmp->da_end + 1, alignment);
292
293 prev_end = tmp->da_end;
294 }
295
296 if ((start >= prev_end) && (obj->da_end - start >= bytes))
297 goto found;
298
299 dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
300 __func__, da, bytes, flags);
301
302 return ERR_PTR(-EINVAL);
303
304found:
305 new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
306 if (!new)
307 return ERR_PTR(-ENOMEM);
308
309 new->iommu = obj;
310 new->da_start = start;
311 new->da_end = start + bytes;
312 new->flags = flags;
313
314 /*
315 * keep ascending order of iovmas
316 */
317 if (tmp)
318 list_add_tail(&new->list, &tmp->list);
319 else
320 list_add(&new->list, &obj->mmap);
321
322 dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
323 __func__, new->da_start, start, new->da_end, bytes, flags);
324
325 return new;
326}
327
328static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area)
329{
330 size_t bytes;
331
332 BUG_ON(!obj || !area);
333
334 bytes = area->da_end - area->da_start;
335
336 dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
337 __func__, area->da_start, area->da_end, bytes, area->flags);
338
339 list_del(&area->list);
340 kmem_cache_free(iovm_area_cachep, area);
341}
342
343/**
344 * omap_da_to_va - convert (d) to (v)
345 * @obj: objective iommu
346 * @da: iommu device virtual address
347 * @va: mpu virtual address
348 *
349 * Returns mpu virtual addr which corresponds to a given device virtual addr
350 */
351void *omap_da_to_va(struct omap_iommu *obj, u32 da)
352{
353 void *va = NULL;
354 struct iovm_struct *area;
355
356 mutex_lock(&obj->mmap_lock);
357
358 area = __find_iovm_area(obj, da);
359 if (!area) {
360 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
361 goto out;
362 }
363 va = area->va;
364out:
365 mutex_unlock(&obj->mmap_lock);
366
367 return va;
368}
369EXPORT_SYMBOL_GPL(omap_da_to_va);
370
371static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
372{
373 unsigned int i;
374 struct scatterlist *sg;
375 void *va = _va;
376 void *va_end;
377
378 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
379 struct page *pg;
380 const size_t bytes = PAGE_SIZE;
381
382 /*
383 * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()'
384 */
385 pg = vmalloc_to_page(va);
386 BUG_ON(!pg);
387 sg_set_page(sg, pg, bytes, 0);
388
389 va += bytes;
390 }
391
392 va_end = _va + PAGE_SIZE * i;
393}
394
395static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
396{
397 /*
398 * Actually this is not necessary at all, just exists for
399 * consistency of the code readability.
400 */
401 BUG_ON(!sgt);
402}
403
404/* create 'da' <-> 'pa' mapping from 'sgt' */
405static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
406 const struct sg_table *sgt, u32 flags)
407{
408 int err;
409 unsigned int i, j;
410 struct scatterlist *sg;
411 u32 da = new->da_start;
412 int order;
413
414 if (!domain || !sgt)
415 return -EINVAL;
416
417 BUG_ON(!sgtable_ok(sgt));
418
419 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
420 u32 pa;
421 size_t bytes;
422
423 pa = sg_phys(sg) - sg->offset;
424 bytes = sg->length + sg->offset;
425
426 flags &= ~IOVMF_PGSZ_MASK;
427
428 if (bytes_to_iopgsz(bytes) < 0)
429 goto err_out;
430
431 order = get_order(bytes);
432
433 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
434 i, da, pa, bytes);
435
436 err = iommu_map(domain, da, pa, order, flags);
437 if (err)
438 goto err_out;
439
440 da += bytes;
441 }
442 return 0;
443
444err_out:
445 da = new->da_start;
446
447 for_each_sg(sgt->sgl, sg, i, j) {
448 size_t bytes;
449
450 bytes = sg->length + sg->offset;
451 order = get_order(bytes);
452
453 /* ignore failures.. we're already handling one */
454 iommu_unmap(domain, da, order);
455
456 da += bytes;
457 }
458 return err;
459}
460
461/* release 'da' <-> 'pa' mapping */
462static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
463 struct iovm_struct *area)
464{
465 u32 start;
466 size_t total = area->da_end - area->da_start;
467 const struct sg_table *sgt = area->sgt;
468 struct scatterlist *sg;
469 int i, err;
470
471 BUG_ON(!sgtable_ok(sgt));
472 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
473
474 start = area->da_start;
475 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
476 size_t bytes;
477 int order;
478
479 bytes = sg->length + sg->offset;
480 order = get_order(bytes);
481
482 err = iommu_unmap(domain, start, order);
483 if (err < 0)
484 break;
485
486 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
487 __func__, start, bytes, area->flags);
488
489 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
490
491 total -= bytes;
492 start += bytes;
493 }
494 BUG_ON(total);
495}
496
497/* template function for all unmapping */
498static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
499 struct omap_iommu *obj, const u32 da,
500 void (*fn)(const void *), u32 flags)
501{
502 struct sg_table *sgt = NULL;
503 struct iovm_struct *area;
504
505 if (!IS_ALIGNED(da, PAGE_SIZE)) {
506 dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
507 return NULL;
508 }
509
510 mutex_lock(&obj->mmap_lock);
511
512 area = __find_iovm_area(obj, da);
513 if (!area) {
514 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
515 goto out;
516 }
517
518 if ((area->flags & flags) != flags) {
519 dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
520 area->flags);
521 goto out;
522 }
523 sgt = (struct sg_table *)area->sgt;
524
525 unmap_iovm_area(domain, obj, area);
526
527 fn(area->va);
528
529 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
530 area->da_start, da, area->da_end,
531 area->da_end - area->da_start, area->flags);
532
533 free_iovm_area(obj, area);
534out:
535 mutex_unlock(&obj->mmap_lock);
536
537 return sgt;
538}
539
540static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj,
541 u32 da, const struct sg_table *sgt, void *va,
542 size_t bytes, u32 flags)
543{
544 int err = -ENOMEM;
545 struct iovm_struct *new;
546
547 mutex_lock(&obj->mmap_lock);
548
549 new = alloc_iovm_area(obj, da, bytes, flags);
550 if (IS_ERR(new)) {
551 err = PTR_ERR(new);
552 goto err_alloc_iovma;
553 }
554 new->va = va;
555 new->sgt = sgt;
556
557 if (map_iovm_area(domain, new, sgt, new->flags))
558 goto err_map;
559
560 mutex_unlock(&obj->mmap_lock);
561
562 dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
563 __func__, new->da_start, bytes, new->flags, va);
564
565 return new->da_start;
566
567err_map:
568 free_iovm_area(obj, new);
569err_alloc_iovma:
570 mutex_unlock(&obj->mmap_lock);
571 return err;
572}
573
574static inline u32
575__iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj,
576 u32 da, const struct sg_table *sgt,
577 void *va, size_t bytes, u32 flags)
578{
579 return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
580}
581
582/**
583 * omap_iommu_vmap - (d)-(p)-(v) address mapper
584 * @obj: objective iommu
585 * @sgt: address of scatter gather table
586 * @flags: iovma and page property
587 *
588 * Creates 1-n-1 mapping with given @sgt and returns @da.
589 * All @sgt element must be io page size aligned.
590 */
591u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
592 const struct sg_table *sgt, u32 flags)
593{
594 size_t bytes;
595 void *va = NULL;
596
597 if (!obj || !obj->dev || !sgt)
598 return -EINVAL;
599
600 bytes = sgtable_len(sgt);
601 if (!bytes)
602 return -EINVAL;
603 bytes = PAGE_ALIGN(bytes);
604
605 if (flags & IOVMF_MMIO) {
606 va = vmap_sg(sgt);
607 if (IS_ERR(va))
608 return PTR_ERR(va);
609 }
610
611 flags |= IOVMF_DISCONT;
612 flags |= IOVMF_MMIO;
613
614 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
615 if (IS_ERR_VALUE(da))
616 vunmap_sg(va);
617
618 return da + sgtable_offset(sgt);
619}
620EXPORT_SYMBOL_GPL(omap_iommu_vmap);
621
622/**
623 * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()'
624 * @obj: objective iommu
625 * @da: iommu device virtual address
626 *
627 * Free the iommu virtually contiguous memory area starting at
628 * @da, which was returned by 'omap_iommu_vmap()'.
629 */
630struct sg_table *
631omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da)
632{
633 struct sg_table *sgt;
634 /*
635 * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
636 * Just returns 'sgt' to the caller to free
637 */
638 da &= PAGE_MASK;
639 sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
640 IOVMF_DISCONT | IOVMF_MMIO);
641 if (!sgt)
642 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
643 return sgt;
644}
645EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
646
647/**
648 * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
649 * @obj: objective iommu
650 * @da: contiguous iommu virtual memory
651 * @bytes: allocation size
652 * @flags: iovma and page property
653 *
654 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
655 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
656 */
657u32
658omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
659 size_t bytes, u32 flags)
660{
661 void *va;
662 struct sg_table *sgt;
663
664 if (!obj || !obj->dev || !bytes)
665 return -EINVAL;
666
667 bytes = PAGE_ALIGN(bytes);
668
669 va = vmalloc(bytes);
670 if (!va)
671 return -ENOMEM;
672
673 flags |= IOVMF_DISCONT;
674 flags |= IOVMF_ALLOC;
675
676 sgt = sgtable_alloc(bytes, flags, da, 0);
677 if (IS_ERR(sgt)) {
678 da = PTR_ERR(sgt);
679 goto err_sgt_alloc;
680 }
681 sgtable_fill_vmalloc(sgt, va);
682
683 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
684 if (IS_ERR_VALUE(da))
685 goto err_iommu_vmap;
686
687 return da;
688
689err_iommu_vmap:
690 sgtable_drain_vmalloc(sgt);
691 sgtable_free(sgt);
692err_sgt_alloc:
693 vfree(va);
694 return da;
695}
696EXPORT_SYMBOL_GPL(omap_iommu_vmalloc);
697
698/**
699 * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()'
700 * @obj: objective iommu
701 * @da: iommu device virtual address
702 *
703 * Frees the iommu virtually continuous memory area starting at
704 * @da, as obtained from 'omap_iommu_vmalloc()'.
705 */
706void omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj,
707 const u32 da)
708{
709 struct sg_table *sgt;
710
711 sgt = unmap_vm_area(domain, obj, da, vfree,
712 IOVMF_DISCONT | IOVMF_ALLOC);
713 if (!sgt)
714 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
715 sgtable_free(sgt);
716}
717EXPORT_SYMBOL_GPL(omap_iommu_vfree);
718
719static int __init iovmm_init(void)
720{
721 const unsigned long flags = SLAB_HWCACHE_ALIGN;
722 struct kmem_cache *p;
723
724 p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
725 flags, NULL);
726 if (!p)
727 return -ENOMEM;
728 iovm_area_cachep = p;
729
730 return 0;
731}
732module_init(iovmm_init);
733
734static void __exit iovmm_exit(void)
735{
736 kmem_cache_destroy(iovm_area_cachep);
737}
738module_exit(iovmm_exit);
739
740MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
741MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
742MODULE_LICENSE("GPL v2");