aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/Kconfig18
-rw-r--r--drivers/iommu/Makefile3
-rw-r--r--drivers/iommu/omap-iommu-debug.c418
-rw-r--r--drivers/iommu/omap-iommu.c1326
-rw-r--r--drivers/iommu/omap-iovmm.c923
5 files changed, 2688 insertions, 0 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index b57b3fa492f3..432463b2e78d 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -107,4 +107,22 @@ config INTR_REMAP
107 To use x2apic mode in the CPU's which support x2APIC enhancements or 107 To use x2apic mode in the CPU's which support x2APIC enhancements or
108 to support platforms with CPU's having > 8 bit APIC ID, say Y. 108 to support platforms with CPU's having > 8 bit APIC ID, say Y.
109 109
110# OMAP IOMMU support
111config OMAP_IOMMU
112 bool "OMAP IOMMU Support"
113 select IOMMU_API
114
115config OMAP_IOVMM
116 tristate
117 select OMAP_IOMMU
118
119config OMAP_IOMMU_DEBUG
120 tristate "Export OMAP IOMMU/IOVMM internals in DebugFS"
121 depends on OMAP_IOVMM && DEBUG_FS
122 help
123 Select this to see extensive information about
124 the internal state of OMAP IOMMU/IOVMM in debugfs.
125
126 Say N unless you know you need this.
127
110endif # IOMMU_SUPPORT 128endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 4d4d77df7cac..f798cdd3699e 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -3,3 +3,6 @@ obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
3obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o 3obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
4obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o 4obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o
5obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o 5obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o
6obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
7obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o
8obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
new file mode 100644
index 000000000000..0f8c8dd55018
--- /dev/null
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -0,0 +1,418 @@
1/*
2 * omap iommu: debugfs interface
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/err.h>
14#include <linux/clk.h>
15#include <linux/io.h>
16#include <linux/slab.h>
17#include <linux/uaccess.h>
18#include <linux/platform_device.h>
19#include <linux/debugfs.h>
20
21#include <plat/iommu.h>
22#include <plat/iovmm.h>
23
24#include <plat/iopgtable.h>
25
26#define MAXCOLUMN 100 /* for short messages */
27
28static DEFINE_MUTEX(iommu_debug_lock);
29
30static struct dentry *iommu_debug_root;
31
32static ssize_t debug_read_ver(struct file *file, char __user *userbuf,
33 size_t count, loff_t *ppos)
34{
35 u32 ver = iommu_arch_version();
36 char buf[MAXCOLUMN], *p = buf;
37
38 p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf);
39
40 return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
41}
42
43static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
44 size_t count, loff_t *ppos)
45{
46 struct iommu *obj = file->private_data;
47 char *p, *buf;
48 ssize_t bytes;
49
50 buf = kmalloc(count, GFP_KERNEL);
51 if (!buf)
52 return -ENOMEM;
53 p = buf;
54
55 mutex_lock(&iommu_debug_lock);
56
57 bytes = iommu_dump_ctx(obj, p, count);
58 bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes);
59
60 mutex_unlock(&iommu_debug_lock);
61 kfree(buf);
62
63 return bytes;
64}
65
66static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
67 size_t count, loff_t *ppos)
68{
69 struct iommu *obj = file->private_data;
70 char *p, *buf;
71 ssize_t bytes, rest;
72
73 buf = kmalloc(count, GFP_KERNEL);
74 if (!buf)
75 return -ENOMEM;
76 p = buf;
77
78 mutex_lock(&iommu_debug_lock);
79
80 p += sprintf(p, "%8s %8s\n", "cam:", "ram:");
81 p += sprintf(p, "-----------------------------------------\n");
82 rest = count - (p - buf);
83 p += dump_tlb_entries(obj, p, rest);
84
85 bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
86
87 mutex_unlock(&iommu_debug_lock);
88 kfree(buf);
89
90 return bytes;
91}
92
93static ssize_t debug_write_pagetable(struct file *file,
94 const char __user *userbuf, size_t count, loff_t *ppos)
95{
96 struct iotlb_entry e;
97 struct cr_regs cr;
98 int err;
99 struct iommu *obj = file->private_data;
100 char buf[MAXCOLUMN], *p = buf;
101
102 count = min(count, sizeof(buf));
103
104 mutex_lock(&iommu_debug_lock);
105 if (copy_from_user(p, userbuf, count)) {
106 mutex_unlock(&iommu_debug_lock);
107 return -EFAULT;
108 }
109
110 sscanf(p, "%x %x", &cr.cam, &cr.ram);
111 if (!cr.cam || !cr.ram) {
112 mutex_unlock(&iommu_debug_lock);
113 return -EINVAL;
114 }
115
116 iotlb_cr_to_e(&cr, &e);
117 err = iopgtable_store_entry(obj, &e);
118 if (err)
119 dev_err(obj->dev, "%s: fail to store cr\n", __func__);
120
121 mutex_unlock(&iommu_debug_lock);
122 return count;
123}
124
125#define dump_ioptable_entry_one(lv, da, val) \
126 ({ \
127 int __err = 0; \
128 ssize_t bytes; \
129 const int maxcol = 22; \
130 const char *str = "%d: %08x %08x\n"; \
131 bytes = snprintf(p, maxcol, str, lv, da, val); \
132 p += bytes; \
133 len -= bytes; \
134 if (len < maxcol) \
135 __err = -ENOMEM; \
136 __err; \
137 })
138
139static ssize_t dump_ioptable(struct iommu *obj, char *buf, ssize_t len)
140{
141 int i;
142 u32 *iopgd;
143 char *p = buf;
144
145 spin_lock(&obj->page_table_lock);
146
147 iopgd = iopgd_offset(obj, 0);
148 for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) {
149 int j, err;
150 u32 *iopte;
151 u32 da;
152
153 if (!*iopgd)
154 continue;
155
156 if (!(*iopgd & IOPGD_TABLE)) {
157 da = i << IOPGD_SHIFT;
158
159 err = dump_ioptable_entry_one(1, da, *iopgd);
160 if (err)
161 goto out;
162 continue;
163 }
164
165 iopte = iopte_offset(iopgd, 0);
166
167 for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) {
168 if (!*iopte)
169 continue;
170
171 da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT);
172 err = dump_ioptable_entry_one(2, da, *iopgd);
173 if (err)
174 goto out;
175 }
176 }
177out:
178 spin_unlock(&obj->page_table_lock);
179
180 return p - buf;
181}
182
183static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf,
184 size_t count, loff_t *ppos)
185{
186 struct iommu *obj = file->private_data;
187 char *p, *buf;
188 size_t bytes;
189
190 buf = (char *)__get_free_page(GFP_KERNEL);
191 if (!buf)
192 return -ENOMEM;
193 p = buf;
194
195 p += sprintf(p, "L: %8s %8s\n", "da:", "pa:");
196 p += sprintf(p, "-----------------------------------------\n");
197
198 mutex_lock(&iommu_debug_lock);
199
200 bytes = PAGE_SIZE - (p - buf);
201 p += dump_ioptable(obj, p, bytes);
202
203 bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
204
205 mutex_unlock(&iommu_debug_lock);
206 free_page((unsigned long)buf);
207
208 return bytes;
209}
210
211static ssize_t debug_read_mmap(struct file *file, char __user *userbuf,
212 size_t count, loff_t *ppos)
213{
214 struct iommu *obj = file->private_data;
215 char *p, *buf;
216 struct iovm_struct *tmp;
217 int uninitialized_var(i);
218 ssize_t bytes;
219
220 buf = (char *)__get_free_page(GFP_KERNEL);
221 if (!buf)
222 return -ENOMEM;
223 p = buf;
224
225 p += sprintf(p, "%-3s %-8s %-8s %6s %8s\n",
226 "No", "start", "end", "size", "flags");
227 p += sprintf(p, "-------------------------------------------------\n");
228
229 mutex_lock(&iommu_debug_lock);
230
231 list_for_each_entry(tmp, &obj->mmap, list) {
232 size_t len;
233 const char *str = "%3d %08x-%08x %6x %8x\n";
234 const int maxcol = 39;
235
236 len = tmp->da_end - tmp->da_start;
237 p += snprintf(p, maxcol, str,
238 i, tmp->da_start, tmp->da_end, len, tmp->flags);
239
240 if (PAGE_SIZE - (p - buf) < maxcol)
241 break;
242 i++;
243 }
244
245 bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
246
247 mutex_unlock(&iommu_debug_lock);
248 free_page((unsigned long)buf);
249
250 return bytes;
251}
252
253static ssize_t debug_read_mem(struct file *file, char __user *userbuf,
254 size_t count, loff_t *ppos)
255{
256 struct iommu *obj = file->private_data;
257 char *p, *buf;
258 struct iovm_struct *area;
259 ssize_t bytes;
260
261 count = min_t(ssize_t, count, PAGE_SIZE);
262
263 buf = (char *)__get_free_page(GFP_KERNEL);
264 if (!buf)
265 return -ENOMEM;
266 p = buf;
267
268 mutex_lock(&iommu_debug_lock);
269
270 area = find_iovm_area(obj, (u32)ppos);
271 if (IS_ERR(area)) {
272 bytes = -EINVAL;
273 goto err_out;
274 }
275 memcpy(p, area->va, count);
276 p += count;
277
278 bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
279err_out:
280 mutex_unlock(&iommu_debug_lock);
281 free_page((unsigned long)buf);
282
283 return bytes;
284}
285
286static ssize_t debug_write_mem(struct file *file, const char __user *userbuf,
287 size_t count, loff_t *ppos)
288{
289 struct iommu *obj = file->private_data;
290 struct iovm_struct *area;
291 char *p, *buf;
292
293 count = min_t(size_t, count, PAGE_SIZE);
294
295 buf = (char *)__get_free_page(GFP_KERNEL);
296 if (!buf)
297 return -ENOMEM;
298 p = buf;
299
300 mutex_lock(&iommu_debug_lock);
301
302 if (copy_from_user(p, userbuf, count)) {
303 count = -EFAULT;
304 goto err_out;
305 }
306
307 area = find_iovm_area(obj, (u32)ppos);
308 if (IS_ERR(area)) {
309 count = -EINVAL;
310 goto err_out;
311 }
312 memcpy(area->va, p, count);
313err_out:
314 mutex_unlock(&iommu_debug_lock);
315 free_page((unsigned long)buf);
316
317 return count;
318}
319
320static int debug_open_generic(struct inode *inode, struct file *file)
321{
322 file->private_data = inode->i_private;
323 return 0;
324}
325
326#define DEBUG_FOPS(name) \
327 static const struct file_operations debug_##name##_fops = { \
328 .open = debug_open_generic, \
329 .read = debug_read_##name, \
330 .write = debug_write_##name, \
331 .llseek = generic_file_llseek, \
332 };
333
334#define DEBUG_FOPS_RO(name) \
335 static const struct file_operations debug_##name##_fops = { \
336 .open = debug_open_generic, \
337 .read = debug_read_##name, \
338 .llseek = generic_file_llseek, \
339 };
340
341DEBUG_FOPS_RO(ver);
342DEBUG_FOPS_RO(regs);
343DEBUG_FOPS_RO(tlb);
344DEBUG_FOPS(pagetable);
345DEBUG_FOPS_RO(mmap);
346DEBUG_FOPS(mem);
347
348#define __DEBUG_ADD_FILE(attr, mode) \
349 { \
350 struct dentry *dent; \
351 dent = debugfs_create_file(#attr, mode, parent, \
352 obj, &debug_##attr##_fops); \
353 if (!dent) \
354 return -ENOMEM; \
355 }
356
357#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 600)
358#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 400)
359
360static int iommu_debug_register(struct device *dev, void *data)
361{
362 struct platform_device *pdev = to_platform_device(dev);
363 struct iommu *obj = platform_get_drvdata(pdev);
364 struct dentry *d, *parent;
365
366 if (!obj || !obj->dev)
367 return -EINVAL;
368
369 d = debugfs_create_dir(obj->name, iommu_debug_root);
370 if (!d)
371 return -ENOMEM;
372 parent = d;
373
374 d = debugfs_create_u8("nr_tlb_entries", 400, parent,
375 (u8 *)&obj->nr_tlb_entries);
376 if (!d)
377 return -ENOMEM;
378
379 DEBUG_ADD_FILE_RO(ver);
380 DEBUG_ADD_FILE_RO(regs);
381 DEBUG_ADD_FILE_RO(tlb);
382 DEBUG_ADD_FILE(pagetable);
383 DEBUG_ADD_FILE_RO(mmap);
384 DEBUG_ADD_FILE(mem);
385
386 return 0;
387}
388
389static int __init iommu_debug_init(void)
390{
391 struct dentry *d;
392 int err;
393
394 d = debugfs_create_dir("iommu", NULL);
395 if (!d)
396 return -ENOMEM;
397 iommu_debug_root = d;
398
399 err = foreach_iommu_device(d, iommu_debug_register);
400 if (err)
401 goto err_out;
402 return 0;
403
404err_out:
405 debugfs_remove_recursive(iommu_debug_root);
406 return err;
407}
408module_init(iommu_debug_init)
409
410static void __exit iommu_debugfs_exit(void)
411{
412 debugfs_remove_recursive(iommu_debug_root);
413}
414module_exit(iommu_debugfs_exit)
415
416MODULE_DESCRIPTION("omap iommu: debugfs interface");
417MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
418MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
new file mode 100644
index 000000000000..bf8de6475746
--- /dev/null
+++ b/drivers/iommu/omap-iommu.c
@@ -0,0 +1,1326 @@
1/*
2 * omap iommu: tlb and pagetable primitives
3 *
4 * Copyright (C) 2008-2010 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7 * Paul Mundt and Toshihiro Kobayashi
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/err.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/interrupt.h>
18#include <linux/ioport.h>
19#include <linux/clk.h>
20#include <linux/platform_device.h>
21#include <linux/iommu.h>
22#include <linux/mutex.h>
23#include <linux/spinlock.h>
24
25#include <asm/cacheflush.h>
26
27#include <plat/iommu.h>
28
29#include <plat/iopgtable.h>
30
31#define for_each_iotlb_cr(obj, n, __i, cr) \
32 for (__i = 0; \
33 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
34 __i++)
35
36/**
37 * struct omap_iommu_domain - omap iommu domain
38 * @pgtable: the page table
39 * @iommu_dev: an omap iommu device attached to this domain. only a single
40 * iommu device can be attached for now.
41 * @lock: domain lock, should be taken when attaching/detaching
42 */
43struct omap_iommu_domain {
44 u32 *pgtable;
45 struct iommu *iommu_dev;
46 spinlock_t lock;
47};
48
49/* accommodate the difference between omap1 and omap2/3 */
50static const struct iommu_functions *arch_iommu;
51
52static struct platform_driver omap_iommu_driver;
53static struct kmem_cache *iopte_cachep;
54
55/**
56 * install_iommu_arch - Install archtecure specific iommu functions
57 * @ops: a pointer to architecture specific iommu functions
58 *
59 * There are several kind of iommu algorithm(tlb, pagetable) among
60 * omap series. This interface installs such an iommu algorighm.
61 **/
62int install_iommu_arch(const struct iommu_functions *ops)
63{
64 if (arch_iommu)
65 return -EBUSY;
66
67 arch_iommu = ops;
68 return 0;
69}
70EXPORT_SYMBOL_GPL(install_iommu_arch);
71
72/**
73 * uninstall_iommu_arch - Uninstall archtecure specific iommu functions
74 * @ops: a pointer to architecture specific iommu functions
75 *
76 * This interface uninstalls the iommu algorighm installed previously.
77 **/
78void uninstall_iommu_arch(const struct iommu_functions *ops)
79{
80 if (arch_iommu != ops)
81 pr_err("%s: not your arch\n", __func__);
82
83 arch_iommu = NULL;
84}
85EXPORT_SYMBOL_GPL(uninstall_iommu_arch);
86
87/**
88 * iommu_save_ctx - Save registers for pm off-mode support
89 * @obj: target iommu
90 **/
91void iommu_save_ctx(struct iommu *obj)
92{
93 arch_iommu->save_ctx(obj);
94}
95EXPORT_SYMBOL_GPL(iommu_save_ctx);
96
97/**
98 * iommu_restore_ctx - Restore registers for pm off-mode support
99 * @obj: target iommu
100 **/
101void iommu_restore_ctx(struct iommu *obj)
102{
103 arch_iommu->restore_ctx(obj);
104}
105EXPORT_SYMBOL_GPL(iommu_restore_ctx);
106
107/**
108 * iommu_arch_version - Return running iommu arch version
109 **/
110u32 iommu_arch_version(void)
111{
112 return arch_iommu->version;
113}
114EXPORT_SYMBOL_GPL(iommu_arch_version);
115
116static int iommu_enable(struct iommu *obj)
117{
118 int err;
119
120 if (!obj)
121 return -EINVAL;
122
123 if (!arch_iommu)
124 return -ENODEV;
125
126 clk_enable(obj->clk);
127
128 err = arch_iommu->enable(obj);
129
130 clk_disable(obj->clk);
131 return err;
132}
133
134static void iommu_disable(struct iommu *obj)
135{
136 if (!obj)
137 return;
138
139 clk_enable(obj->clk);
140
141 arch_iommu->disable(obj);
142
143 clk_disable(obj->clk);
144}
145
146/*
147 * TLB operations
148 */
149void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
150{
151 BUG_ON(!cr || !e);
152
153 arch_iommu->cr_to_e(cr, e);
154}
155EXPORT_SYMBOL_GPL(iotlb_cr_to_e);
156
157static inline int iotlb_cr_valid(struct cr_regs *cr)
158{
159 if (!cr)
160 return -EINVAL;
161
162 return arch_iommu->cr_valid(cr);
163}
164
165static inline struct cr_regs *iotlb_alloc_cr(struct iommu *obj,
166 struct iotlb_entry *e)
167{
168 if (!e)
169 return NULL;
170
171 return arch_iommu->alloc_cr(obj, e);
172}
173
174u32 iotlb_cr_to_virt(struct cr_regs *cr)
175{
176 return arch_iommu->cr_to_virt(cr);
177}
178EXPORT_SYMBOL_GPL(iotlb_cr_to_virt);
179
180static u32 get_iopte_attr(struct iotlb_entry *e)
181{
182 return arch_iommu->get_pte_attr(e);
183}
184
185static u32 iommu_report_fault(struct iommu *obj, u32 *da)
186{
187 return arch_iommu->fault_isr(obj, da);
188}
189
190static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l)
191{
192 u32 val;
193
194 val = iommu_read_reg(obj, MMU_LOCK);
195
196 l->base = MMU_LOCK_BASE(val);
197 l->vict = MMU_LOCK_VICT(val);
198
199}
200
201static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l)
202{
203 u32 val;
204
205 val = (l->base << MMU_LOCK_BASE_SHIFT);
206 val |= (l->vict << MMU_LOCK_VICT_SHIFT);
207
208 iommu_write_reg(obj, val, MMU_LOCK);
209}
210
211static void iotlb_read_cr(struct iommu *obj, struct cr_regs *cr)
212{
213 arch_iommu->tlb_read_cr(obj, cr);
214}
215
216static void iotlb_load_cr(struct iommu *obj, struct cr_regs *cr)
217{
218 arch_iommu->tlb_load_cr(obj, cr);
219
220 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
221 iommu_write_reg(obj, 1, MMU_LD_TLB);
222}
223
224/**
225 * iotlb_dump_cr - Dump an iommu tlb entry into buf
226 * @obj: target iommu
227 * @cr: contents of cam and ram register
228 * @buf: output buffer
229 **/
230static inline ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr,
231 char *buf)
232{
233 BUG_ON(!cr || !buf);
234
235 return arch_iommu->dump_cr(obj, cr, buf);
236}
237
238/* only used in iotlb iteration for-loop */
239static struct cr_regs __iotlb_read_cr(struct iommu *obj, int n)
240{
241 struct cr_regs cr;
242 struct iotlb_lock l;
243
244 iotlb_lock_get(obj, &l);
245 l.vict = n;
246 iotlb_lock_set(obj, &l);
247 iotlb_read_cr(obj, &cr);
248
249 return cr;
250}
251
252/**
253 * load_iotlb_entry - Set an iommu tlb entry
254 * @obj: target iommu
255 * @e: an iommu tlb entry info
256 **/
257int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e)
258{
259 int err = 0;
260 struct iotlb_lock l;
261 struct cr_regs *cr;
262
263 if (!obj || !obj->nr_tlb_entries || !e)
264 return -EINVAL;
265
266 clk_enable(obj->clk);
267
268 iotlb_lock_get(obj, &l);
269 if (l.base == obj->nr_tlb_entries) {
270 dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
271 err = -EBUSY;
272 goto out;
273 }
274 if (!e->prsvd) {
275 int i;
276 struct cr_regs tmp;
277
278 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
279 if (!iotlb_cr_valid(&tmp))
280 break;
281
282 if (i == obj->nr_tlb_entries) {
283 dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
284 err = -EBUSY;
285 goto out;
286 }
287
288 iotlb_lock_get(obj, &l);
289 } else {
290 l.vict = l.base;
291 iotlb_lock_set(obj, &l);
292 }
293
294 cr = iotlb_alloc_cr(obj, e);
295 if (IS_ERR(cr)) {
296 clk_disable(obj->clk);
297 return PTR_ERR(cr);
298 }
299
300 iotlb_load_cr(obj, cr);
301 kfree(cr);
302
303 if (e->prsvd)
304 l.base++;
305 /* increment victim for next tlb load */
306 if (++l.vict == obj->nr_tlb_entries)
307 l.vict = l.base;
308 iotlb_lock_set(obj, &l);
309out:
310 clk_disable(obj->clk);
311 return err;
312}
313EXPORT_SYMBOL_GPL(load_iotlb_entry);
314
315/**
316 * flush_iotlb_page - Clear an iommu tlb entry
317 * @obj: target iommu
318 * @da: iommu device virtual address
319 *
320 * Clear an iommu tlb entry which includes 'da' address.
321 **/
322void flush_iotlb_page(struct iommu *obj, u32 da)
323{
324 int i;
325 struct cr_regs cr;
326
327 clk_enable(obj->clk);
328
329 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
330 u32 start;
331 size_t bytes;
332
333 if (!iotlb_cr_valid(&cr))
334 continue;
335
336 start = iotlb_cr_to_virt(&cr);
337 bytes = iopgsz_to_bytes(cr.cam & 3);
338
339 if ((start <= da) && (da < start + bytes)) {
340 dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
341 __func__, start, da, bytes);
342 iotlb_load_cr(obj, &cr);
343 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
344 }
345 }
346 clk_disable(obj->clk);
347
348 if (i == obj->nr_tlb_entries)
349 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
350}
351EXPORT_SYMBOL_GPL(flush_iotlb_page);
352
353/**
354 * flush_iotlb_range - Clear an iommu tlb entries
355 * @obj: target iommu
356 * @start: iommu device virtual address(start)
357 * @end: iommu device virtual address(end)
358 *
359 * Clear an iommu tlb entry which includes 'da' address.
360 **/
361void flush_iotlb_range(struct iommu *obj, u32 start, u32 end)
362{
363 u32 da = start;
364
365 while (da < end) {
366 flush_iotlb_page(obj, da);
367 /* FIXME: Optimize for multiple page size */
368 da += IOPTE_SIZE;
369 }
370}
371EXPORT_SYMBOL_GPL(flush_iotlb_range);
372
373/**
374 * flush_iotlb_all - Clear all iommu tlb entries
375 * @obj: target iommu
376 **/
377void flush_iotlb_all(struct iommu *obj)
378{
379 struct iotlb_lock l;
380
381 clk_enable(obj->clk);
382
383 l.base = 0;
384 l.vict = 0;
385 iotlb_lock_set(obj, &l);
386
387 iommu_write_reg(obj, 1, MMU_GFLUSH);
388
389 clk_disable(obj->clk);
390}
391EXPORT_SYMBOL_GPL(flush_iotlb_all);
392
393/**
394 * iommu_set_twl - enable/disable table walking logic
395 * @obj: target iommu
396 * @on: enable/disable
397 *
398 * Function used to enable/disable TWL. If one wants to work
399 * exclusively with locked TLB entries and receive notifications
400 * for TLB miss then call this function to disable TWL.
401 */
402void iommu_set_twl(struct iommu *obj, bool on)
403{
404 clk_enable(obj->clk);
405 arch_iommu->set_twl(obj, on);
406 clk_disable(obj->clk);
407}
408EXPORT_SYMBOL_GPL(iommu_set_twl);
409
410#if defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
411
412ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t bytes)
413{
414 if (!obj || !buf)
415 return -EINVAL;
416
417 clk_enable(obj->clk);
418
419 bytes = arch_iommu->dump_ctx(obj, buf, bytes);
420
421 clk_disable(obj->clk);
422
423 return bytes;
424}
425EXPORT_SYMBOL_GPL(iommu_dump_ctx);
426
427static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs, int num)
428{
429 int i;
430 struct iotlb_lock saved;
431 struct cr_regs tmp;
432 struct cr_regs *p = crs;
433
434 clk_enable(obj->clk);
435 iotlb_lock_get(obj, &saved);
436
437 for_each_iotlb_cr(obj, num, i, tmp) {
438 if (!iotlb_cr_valid(&tmp))
439 continue;
440 *p++ = tmp;
441 }
442
443 iotlb_lock_set(obj, &saved);
444 clk_disable(obj->clk);
445
446 return p - crs;
447}
448
449/**
450 * dump_tlb_entries - dump cr arrays to given buffer
451 * @obj: target iommu
452 * @buf: output buffer
453 **/
454size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t bytes)
455{
456 int i, num;
457 struct cr_regs *cr;
458 char *p = buf;
459
460 num = bytes / sizeof(*cr);
461 num = min(obj->nr_tlb_entries, num);
462
463 cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
464 if (!cr)
465 return 0;
466
467 num = __dump_tlb_entries(obj, cr, num);
468 for (i = 0; i < num; i++)
469 p += iotlb_dump_cr(obj, cr + i, p);
470 kfree(cr);
471
472 return p - buf;
473}
474EXPORT_SYMBOL_GPL(dump_tlb_entries);
475
476int foreach_iommu_device(void *data, int (*fn)(struct device *, void *))
477{
478 return driver_for_each_device(&omap_iommu_driver.driver,
479 NULL, data, fn);
480}
481EXPORT_SYMBOL_GPL(foreach_iommu_device);
482
483#endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */
484
485/*
486 * H/W pagetable operations
487 */
488static void flush_iopgd_range(u32 *first, u32 *last)
489{
490 /* FIXME: L2 cache should be taken care of if it exists */
491 do {
492 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
493 : : "r" (first));
494 first += L1_CACHE_BYTES / sizeof(*first);
495 } while (first <= last);
496}
497
498static void flush_iopte_range(u32 *first, u32 *last)
499{
500 /* FIXME: L2 cache should be taken care of if it exists */
501 do {
502 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
503 : : "r" (first));
504 first += L1_CACHE_BYTES / sizeof(*first);
505 } while (first <= last);
506}
507
508static void iopte_free(u32 *iopte)
509{
510 /* Note: freed iopte's must be clean ready for re-use */
511 kmem_cache_free(iopte_cachep, iopte);
512}
513
514static u32 *iopte_alloc(struct iommu *obj, u32 *iopgd, u32 da)
515{
516 u32 *iopte;
517
518 /* a table has already existed */
519 if (*iopgd)
520 goto pte_ready;
521
522 /*
523 * do the allocation outside the page table lock
524 */
525 spin_unlock(&obj->page_table_lock);
526 iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
527 spin_lock(&obj->page_table_lock);
528
529 if (!*iopgd) {
530 if (!iopte)
531 return ERR_PTR(-ENOMEM);
532
533 *iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
534 flush_iopgd_range(iopgd, iopgd);
535
536 dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
537 } else {
538 /* We raced, free the reduniovant table */
539 iopte_free(iopte);
540 }
541
542pte_ready:
543 iopte = iopte_offset(iopgd, da);
544
545 dev_vdbg(obj->dev,
546 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
547 __func__, da, iopgd, *iopgd, iopte, *iopte);
548
549 return iopte;
550}
551
552static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot)
553{
554 u32 *iopgd = iopgd_offset(obj, da);
555
556 if ((da | pa) & ~IOSECTION_MASK) {
557 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
558 __func__, da, pa, IOSECTION_SIZE);
559 return -EINVAL;
560 }
561
562 *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
563 flush_iopgd_range(iopgd, iopgd);
564 return 0;
565}
566
567static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot)
568{
569 u32 *iopgd = iopgd_offset(obj, da);
570 int i;
571
572 if ((da | pa) & ~IOSUPER_MASK) {
573 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
574 __func__, da, pa, IOSUPER_SIZE);
575 return -EINVAL;
576 }
577
578 for (i = 0; i < 16; i++)
579 *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
580 flush_iopgd_range(iopgd, iopgd + 15);
581 return 0;
582}
583
584static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot)
585{
586 u32 *iopgd = iopgd_offset(obj, da);
587 u32 *iopte = iopte_alloc(obj, iopgd, da);
588
589 if (IS_ERR(iopte))
590 return PTR_ERR(iopte);
591
592 *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
593 flush_iopte_range(iopte, iopte);
594
595 dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
596 __func__, da, pa, iopte, *iopte);
597
598 return 0;
599}
600
601static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot)
602{
603 u32 *iopgd = iopgd_offset(obj, da);
604 u32 *iopte = iopte_alloc(obj, iopgd, da);
605 int i;
606
607 if ((da | pa) & ~IOLARGE_MASK) {
608 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
609 __func__, da, pa, IOLARGE_SIZE);
610 return -EINVAL;
611 }
612
613 if (IS_ERR(iopte))
614 return PTR_ERR(iopte);
615
616 for (i = 0; i < 16; i++)
617 *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
618 flush_iopte_range(iopte, iopte + 15);
619 return 0;
620}
621
622static int iopgtable_store_entry_core(struct iommu *obj, struct iotlb_entry *e)
623{
624 int (*fn)(struct iommu *, u32, u32, u32);
625 u32 prot;
626 int err;
627
628 if (!obj || !e)
629 return -EINVAL;
630
631 switch (e->pgsz) {
632 case MMU_CAM_PGSZ_16M:
633 fn = iopgd_alloc_super;
634 break;
635 case MMU_CAM_PGSZ_1M:
636 fn = iopgd_alloc_section;
637 break;
638 case MMU_CAM_PGSZ_64K:
639 fn = iopte_alloc_large;
640 break;
641 case MMU_CAM_PGSZ_4K:
642 fn = iopte_alloc_page;
643 break;
644 default:
645 fn = NULL;
646 BUG();
647 break;
648 }
649
650 prot = get_iopte_attr(e);
651
652 spin_lock(&obj->page_table_lock);
653 err = fn(obj, e->da, e->pa, prot);
654 spin_unlock(&obj->page_table_lock);
655
656 return err;
657}
658
659/**
660 * iopgtable_store_entry - Make an iommu pte entry
661 * @obj: target iommu
662 * @e: an iommu tlb entry info
663 **/
664int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e)
665{
666 int err;
667
668 flush_iotlb_page(obj, e->da);
669 err = iopgtable_store_entry_core(obj, e);
670#ifdef PREFETCH_IOTLB
671 if (!err)
672 load_iotlb_entry(obj, e);
673#endif
674 return err;
675}
676EXPORT_SYMBOL_GPL(iopgtable_store_entry);
677
678/**
679 * iopgtable_lookup_entry - Lookup an iommu pte entry
680 * @obj: target iommu
681 * @da: iommu device virtual address
682 * @ppgd: iommu pgd entry pointer to be returned
683 * @ppte: iommu pte entry pointer to be returned
684 **/
685void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
686{
687 u32 *iopgd, *iopte = NULL;
688
689 iopgd = iopgd_offset(obj, da);
690 if (!*iopgd)
691 goto out;
692
693 if (iopgd_is_table(*iopgd))
694 iopte = iopte_offset(iopgd, da);
695out:
696 *ppgd = iopgd;
697 *ppte = iopte;
698}
699EXPORT_SYMBOL_GPL(iopgtable_lookup_entry);
700
701static size_t iopgtable_clear_entry_core(struct iommu *obj, u32 da)
702{
703 size_t bytes;
704 u32 *iopgd = iopgd_offset(obj, da);
705 int nent = 1;
706
707 if (!*iopgd)
708 return 0;
709
710 if (iopgd_is_table(*iopgd)) {
711 int i;
712 u32 *iopte = iopte_offset(iopgd, da);
713
714 bytes = IOPTE_SIZE;
715 if (*iopte & IOPTE_LARGE) {
716 nent *= 16;
717 /* rewind to the 1st entry */
718 iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
719 }
720 bytes *= nent;
721 memset(iopte, 0, nent * sizeof(*iopte));
722 flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));
723
724 /*
725 * do table walk to check if this table is necessary or not
726 */
727 iopte = iopte_offset(iopgd, 0);
728 for (i = 0; i < PTRS_PER_IOPTE; i++)
729 if (iopte[i])
730 goto out;
731
732 iopte_free(iopte);
733 nent = 1; /* for the next L1 entry */
734 } else {
735 bytes = IOPGD_SIZE;
736 if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
737 nent *= 16;
738 /* rewind to the 1st entry */
739 iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
740 }
741 bytes *= nent;
742 }
743 memset(iopgd, 0, nent * sizeof(*iopgd));
744 flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd));
745out:
746 return bytes;
747}
748
749/**
750 * iopgtable_clear_entry - Remove an iommu pte entry
751 * @obj: target iommu
752 * @da: iommu device virtual address
753 **/
754size_t iopgtable_clear_entry(struct iommu *obj, u32 da)
755{
756 size_t bytes;
757
758 spin_lock(&obj->page_table_lock);
759
760 bytes = iopgtable_clear_entry_core(obj, da);
761 flush_iotlb_page(obj, da);
762
763 spin_unlock(&obj->page_table_lock);
764
765 return bytes;
766}
767EXPORT_SYMBOL_GPL(iopgtable_clear_entry);
768
769static void iopgtable_clear_entry_all(struct iommu *obj)
770{
771 int i;
772
773 spin_lock(&obj->page_table_lock);
774
775 for (i = 0; i < PTRS_PER_IOPGD; i++) {
776 u32 da;
777 u32 *iopgd;
778
779 da = i << IOPGD_SHIFT;
780 iopgd = iopgd_offset(obj, da);
781
782 if (!*iopgd)
783 continue;
784
785 if (iopgd_is_table(*iopgd))
786 iopte_free(iopte_offset(iopgd, 0));
787
788 *iopgd = 0;
789 flush_iopgd_range(iopgd, iopgd);
790 }
791
792 flush_iotlb_all(obj);
793
794 spin_unlock(&obj->page_table_lock);
795}
796
797/*
798 * Device IOMMU generic operations
799 */
800static irqreturn_t iommu_fault_handler(int irq, void *data)
801{
802 u32 da, errs;
803 u32 *iopgd, *iopte;
804 struct iommu *obj = data;
805
806 if (!obj->refcount)
807 return IRQ_NONE;
808
809 clk_enable(obj->clk);
810 errs = iommu_report_fault(obj, &da);
811 clk_disable(obj->clk);
812 if (errs == 0)
813 return IRQ_HANDLED;
814
815 /* Fault callback or TLB/PTE Dynamic loading */
816 if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv))
817 return IRQ_HANDLED;
818
819 iommu_disable(obj);
820
821 iopgd = iopgd_offset(obj, da);
822
823 if (!iopgd_is_table(*iopgd)) {
824 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p "
825 "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd);
826 return IRQ_NONE;
827 }
828
829 iopte = iopte_offset(iopgd, da);
830
831 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x "
832 "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd,
833 iopte, *iopte);
834
835 return IRQ_NONE;
836}
837
838static int device_match_by_alias(struct device *dev, void *data)
839{
840 struct iommu *obj = to_iommu(dev);
841 const char *name = data;
842
843 pr_debug("%s: %s %s\n", __func__, obj->name, name);
844
845 return strcmp(obj->name, name) == 0;
846}
847
848/**
849 * iommu_set_da_range - Set a valid device address range
850 * @obj: target iommu
851 * @start Start of valid range
852 * @end End of valid range
853 **/
854int iommu_set_da_range(struct iommu *obj, u32 start, u32 end)
855{
856
857 if (!obj)
858 return -EFAULT;
859
860 if (end < start || !PAGE_ALIGN(start | end))
861 return -EINVAL;
862
863 obj->da_start = start;
864 obj->da_end = end;
865
866 return 0;
867}
868EXPORT_SYMBOL_GPL(iommu_set_da_range);
869
870/**
871 * omap_find_iommu_device() - find an omap iommu device by name
872 * @name: name of the iommu device
873 *
874 * The generic iommu API requires the caller to provide the device
875 * he wishes to attach to a certain iommu domain.
876 *
877 * Drivers generally should not bother with this as it should just
878 * be taken care of by the DMA-API using dev_archdata.
879 *
880 * This function is provided as an interim solution until the latter
881 * materializes, and omap3isp is fully migrated to the DMA-API.
882 */
883struct device *omap_find_iommu_device(const char *name)
884{
885 return driver_find_device(&omap_iommu_driver.driver, NULL,
886 (void *)name,
887 device_match_by_alias);
888}
889EXPORT_SYMBOL_GPL(omap_find_iommu_device);
890
891/**
892 * omap_iommu_attach() - attach iommu device to an iommu domain
893 * @dev: target omap iommu device
894 * @iopgd: page table
895 **/
896static struct iommu *omap_iommu_attach(struct device *dev, u32 *iopgd)
897{
898 int err = -ENOMEM;
899 struct iommu *obj = to_iommu(dev);
900
901 spin_lock(&obj->iommu_lock);
902
903 /* an iommu device can only be attached once */
904 if (++obj->refcount > 1) {
905 dev_err(dev, "%s: already attached!\n", obj->name);
906 err = -EBUSY;
907 goto err_enable;
908 }
909
910 obj->iopgd = iopgd;
911 err = iommu_enable(obj);
912 if (err)
913 goto err_enable;
914 flush_iotlb_all(obj);
915
916 if (!try_module_get(obj->owner))
917 goto err_module;
918
919 spin_unlock(&obj->iommu_lock);
920
921 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
922 return obj;
923
924err_module:
925 if (obj->refcount == 1)
926 iommu_disable(obj);
927err_enable:
928 obj->refcount--;
929 spin_unlock(&obj->iommu_lock);
930 return ERR_PTR(err);
931}
932
933/**
934 * omap_iommu_detach - release iommu device
935 * @obj: target iommu
936 **/
937static void omap_iommu_detach(struct iommu *obj)
938{
939 if (!obj || IS_ERR(obj))
940 return;
941
942 spin_lock(&obj->iommu_lock);
943
944 if (--obj->refcount == 0)
945 iommu_disable(obj);
946
947 module_put(obj->owner);
948
949 obj->iopgd = NULL;
950
951 spin_unlock(&obj->iommu_lock);
952
953 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
954}
955
956int iommu_set_isr(const char *name,
957 int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs,
958 void *priv),
959 void *isr_priv)
960{
961 struct device *dev;
962 struct iommu *obj;
963
964 dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
965 device_match_by_alias);
966 if (!dev)
967 return -ENODEV;
968
969 obj = to_iommu(dev);
970 mutex_lock(&obj->iommu_lock);
971 if (obj->refcount != 0) {
972 mutex_unlock(&obj->iommu_lock);
973 return -EBUSY;
974 }
975 obj->isr = isr;
976 obj->isr_priv = isr_priv;
977 mutex_unlock(&obj->iommu_lock);
978
979 return 0;
980}
981EXPORT_SYMBOL_GPL(iommu_set_isr);
982
983/*
984 * OMAP Device MMU(IOMMU) detection
985 */
986static int __devinit omap_iommu_probe(struct platform_device *pdev)
987{
988 int err = -ENODEV;
989 int irq;
990 struct iommu *obj;
991 struct resource *res;
992 struct iommu_platform_data *pdata = pdev->dev.platform_data;
993
994 if (pdev->num_resources != 2)
995 return -EINVAL;
996
997 obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
998 if (!obj)
999 return -ENOMEM;
1000
1001 obj->clk = clk_get(&pdev->dev, pdata->clk_name);
1002 if (IS_ERR(obj->clk))
1003 goto err_clk;
1004
1005 obj->nr_tlb_entries = pdata->nr_tlb_entries;
1006 obj->name = pdata->name;
1007 obj->dev = &pdev->dev;
1008 obj->ctx = (void *)obj + sizeof(*obj);
1009 obj->da_start = pdata->da_start;
1010 obj->da_end = pdata->da_end;
1011
1012 spin_lock_init(&obj->iommu_lock);
1013 mutex_init(&obj->mmap_lock);
1014 spin_lock_init(&obj->page_table_lock);
1015 INIT_LIST_HEAD(&obj->mmap);
1016
1017 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1018 if (!res) {
1019 err = -ENODEV;
1020 goto err_mem;
1021 }
1022
1023 res = request_mem_region(res->start, resource_size(res),
1024 dev_name(&pdev->dev));
1025 if (!res) {
1026 err = -EIO;
1027 goto err_mem;
1028 }
1029
1030 obj->regbase = ioremap(res->start, resource_size(res));
1031 if (!obj->regbase) {
1032 err = -ENOMEM;
1033 goto err_ioremap;
1034 }
1035
1036 irq = platform_get_irq(pdev, 0);
1037 if (irq < 0) {
1038 err = -ENODEV;
1039 goto err_irq;
1040 }
1041 err = request_irq(irq, iommu_fault_handler, IRQF_SHARED,
1042 dev_name(&pdev->dev), obj);
1043 if (err < 0)
1044 goto err_irq;
1045 platform_set_drvdata(pdev, obj);
1046
1047 dev_info(&pdev->dev, "%s registered\n", obj->name);
1048 return 0;
1049
1050err_irq:
1051 iounmap(obj->regbase);
1052err_ioremap:
1053 release_mem_region(res->start, resource_size(res));
1054err_mem:
1055 clk_put(obj->clk);
1056err_clk:
1057 kfree(obj);
1058 return err;
1059}
1060
1061static int __devexit omap_iommu_remove(struct platform_device *pdev)
1062{
1063 int irq;
1064 struct resource *res;
1065 struct iommu *obj = platform_get_drvdata(pdev);
1066
1067 platform_set_drvdata(pdev, NULL);
1068
1069 iopgtable_clear_entry_all(obj);
1070
1071 irq = platform_get_irq(pdev, 0);
1072 free_irq(irq, obj);
1073 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1074 release_mem_region(res->start, resource_size(res));
1075 iounmap(obj->regbase);
1076
1077 clk_put(obj->clk);
1078 dev_info(&pdev->dev, "%s removed\n", obj->name);
1079 kfree(obj);
1080 return 0;
1081}
1082
1083static struct platform_driver omap_iommu_driver = {
1084 .probe = omap_iommu_probe,
1085 .remove = __devexit_p(omap_iommu_remove),
1086 .driver = {
1087 .name = "omap-iommu",
1088 },
1089};
1090
1091static void iopte_cachep_ctor(void *iopte)
1092{
1093 clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
1094}
1095
1096static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1097 phys_addr_t pa, int order, int prot)
1098{
1099 struct omap_iommu_domain *omap_domain = domain->priv;
1100 struct iommu *oiommu = omap_domain->iommu_dev;
1101 struct device *dev = oiommu->dev;
1102 size_t bytes = PAGE_SIZE << order;
1103 struct iotlb_entry e;
1104 int omap_pgsz;
1105 u32 ret, flags;
1106
1107 /* we only support mapping a single iommu page for now */
1108 omap_pgsz = bytes_to_iopgsz(bytes);
1109 if (omap_pgsz < 0) {
1110 dev_err(dev, "invalid size to map: %d\n", bytes);
1111 return -EINVAL;
1112 }
1113
1114 dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);
1115
1116 flags = omap_pgsz | prot;
1117
1118 iotlb_init_entry(&e, da, pa, flags);
1119
1120 ret = iopgtable_store_entry(oiommu, &e);
1121 if (ret) {
1122 dev_err(dev, "iopgtable_store_entry failed: %d\n", ret);
1123 return ret;
1124 }
1125
1126 return 0;
1127}
1128
1129static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
1130 int order)
1131{
1132 struct omap_iommu_domain *omap_domain = domain->priv;
1133 struct iommu *oiommu = omap_domain->iommu_dev;
1134 struct device *dev = oiommu->dev;
1135 size_t bytes = PAGE_SIZE << order;
1136 size_t ret;
1137
1138 dev_dbg(dev, "unmapping da 0x%lx size 0x%x\n", da, bytes);
1139
1140 ret = iopgtable_clear_entry(oiommu, da);
1141 if (ret != bytes) {
1142 dev_err(dev, "entry @ 0x%lx was %d; not %d\n", da, ret, bytes);
1143 return -EINVAL;
1144 }
1145
1146 return 0;
1147}
1148
1149static int
1150omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
1151{
1152 struct omap_iommu_domain *omap_domain = domain->priv;
1153 struct iommu *oiommu;
1154 int ret = 0;
1155
1156 spin_lock(&omap_domain->lock);
1157
1158 /* only a single device is supported per domain for now */
1159 if (omap_domain->iommu_dev) {
1160 dev_err(dev, "iommu domain is already attached\n");
1161 ret = -EBUSY;
1162 goto out;
1163 }
1164
1165 /* get a handle to and enable the omap iommu */
1166 oiommu = omap_iommu_attach(dev, omap_domain->pgtable);
1167 if (IS_ERR(oiommu)) {
1168 ret = PTR_ERR(oiommu);
1169 dev_err(dev, "can't get omap iommu: %d\n", ret);
1170 goto out;
1171 }
1172
1173 omap_domain->iommu_dev = oiommu;
1174
1175out:
1176 spin_unlock(&omap_domain->lock);
1177 return ret;
1178}
1179
1180static void omap_iommu_detach_dev(struct iommu_domain *domain,
1181 struct device *dev)
1182{
1183 struct omap_iommu_domain *omap_domain = domain->priv;
1184 struct iommu *oiommu = to_iommu(dev);
1185
1186 spin_lock(&omap_domain->lock);
1187
1188 /* only a single device is supported per domain for now */
1189 if (omap_domain->iommu_dev != oiommu) {
1190 dev_err(dev, "invalid iommu device\n");
1191 goto out;
1192 }
1193
1194 iopgtable_clear_entry_all(oiommu);
1195
1196 omap_iommu_detach(oiommu);
1197
1198 omap_domain->iommu_dev = NULL;
1199
1200out:
1201 spin_unlock(&omap_domain->lock);
1202}
1203
1204static int omap_iommu_domain_init(struct iommu_domain *domain)
1205{
1206 struct omap_iommu_domain *omap_domain;
1207
1208 omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
1209 if (!omap_domain) {
1210 pr_err("kzalloc failed\n");
1211 goto out;
1212 }
1213
1214 omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
1215 if (!omap_domain->pgtable) {
1216 pr_err("kzalloc failed\n");
1217 goto fail_nomem;
1218 }
1219
1220 /*
1221 * should never fail, but please keep this around to ensure
1222 * we keep the hardware happy
1223 */
1224 BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE));
1225
1226 clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
1227 spin_lock_init(&omap_domain->lock);
1228
1229 domain->priv = omap_domain;
1230
1231 return 0;
1232
1233fail_nomem:
1234 kfree(omap_domain);
1235out:
1236 return -ENOMEM;
1237}
1238
1239/* assume device was already detached */
1240static void omap_iommu_domain_destroy(struct iommu_domain *domain)
1241{
1242 struct omap_iommu_domain *omap_domain = domain->priv;
1243
1244 domain->priv = NULL;
1245
1246 kfree(omap_domain->pgtable);
1247 kfree(omap_domain);
1248}
1249
1250static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
1251 unsigned long da)
1252{
1253 struct omap_iommu_domain *omap_domain = domain->priv;
1254 struct iommu *oiommu = omap_domain->iommu_dev;
1255 struct device *dev = oiommu->dev;
1256 u32 *pgd, *pte;
1257 phys_addr_t ret = 0;
1258
1259 iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
1260
1261 if (pte) {
1262 if (iopte_is_small(*pte))
1263 ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
1264 else if (iopte_is_large(*pte))
1265 ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
1266 else
1267 dev_err(dev, "bogus pte 0x%x", *pte);
1268 } else {
1269 if (iopgd_is_section(*pgd))
1270 ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
1271 else if (iopgd_is_super(*pgd))
1272 ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
1273 else
1274 dev_err(dev, "bogus pgd 0x%x", *pgd);
1275 }
1276
1277 return ret;
1278}
1279
1280static int omap_iommu_domain_has_cap(struct iommu_domain *domain,
1281 unsigned long cap)
1282{
1283 return 0;
1284}
1285
1286static struct iommu_ops omap_iommu_ops = {
1287 .domain_init = omap_iommu_domain_init,
1288 .domain_destroy = omap_iommu_domain_destroy,
1289 .attach_dev = omap_iommu_attach_dev,
1290 .detach_dev = omap_iommu_detach_dev,
1291 .map = omap_iommu_map,
1292 .unmap = omap_iommu_unmap,
1293 .iova_to_phys = omap_iommu_iova_to_phys,
1294 .domain_has_cap = omap_iommu_domain_has_cap,
1295};
1296
1297static int __init omap_iommu_init(void)
1298{
1299 struct kmem_cache *p;
1300 const unsigned long flags = SLAB_HWCACHE_ALIGN;
1301 size_t align = 1 << 10; /* L2 pagetable alignement */
1302
1303 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
1304 iopte_cachep_ctor);
1305 if (!p)
1306 return -ENOMEM;
1307 iopte_cachep = p;
1308
1309 register_iommu(&omap_iommu_ops);
1310
1311 return platform_driver_register(&omap_iommu_driver);
1312}
1313module_init(omap_iommu_init);
1314
1315static void __exit omap_iommu_exit(void)
1316{
1317 kmem_cache_destroy(iopte_cachep);
1318
1319 platform_driver_unregister(&omap_iommu_driver);
1320}
1321module_exit(omap_iommu_exit);
1322
1323MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
1324MODULE_ALIAS("platform:omap-iommu");
1325MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
1326MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c
new file mode 100644
index 000000000000..809ca124196e
--- /dev/null
+++ b/drivers/iommu/omap-iovmm.c
@@ -0,0 +1,923 @@
1/*
2 * omap iommu: simple virtual address space management
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/err.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16#include <linux/device.h>
17#include <linux/scatterlist.h>
18#include <linux/iommu.h>
19
20#include <asm/cacheflush.h>
21#include <asm/mach/map.h>
22
23#include <plat/iommu.h>
24#include <plat/iovmm.h>
25
26#include <plat/iopgtable.h>
27
28/*
29 * A device driver needs to create address mappings between:
30 *
31 * - iommu/device address
32 * - physical address
33 * - mpu virtual address
34 *
35 * There are 4 possible patterns for them:
36 *
37 * |iova/ mapping iommu_ page
38 * | da pa va (d)-(p)-(v) function type
39 * ---------------------------------------------------------------------------
40 * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s
41 * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s
42 * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s
43 * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n*
44 *
45 *
46 * 'iova': device iommu virtual address
47 * 'da': alias of 'iova'
48 * 'pa': physical address
49 * 'va': mpu virtual address
50 *
51 * 'c': contiguous memory area
52 * 'd': discontiguous memory area
53 * 'a': anonymous memory allocation
54 * '()': optional feature
55 *
56 * 'n': a normal page(4KB) size is used.
57 * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
58 *
59 * '*': not yet, but feasible.
60 */
61
62static struct kmem_cache *iovm_area_cachep;
63
64/* return total bytes of sg buffers */
65static size_t sgtable_len(const struct sg_table *sgt)
66{
67 unsigned int i, total = 0;
68 struct scatterlist *sg;
69
70 if (!sgt)
71 return 0;
72
73 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
74 size_t bytes;
75
76 bytes = sg->length;
77
78 if (!iopgsz_ok(bytes)) {
79 pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
80 __func__, i, bytes);
81 return 0;
82 }
83
84 total += bytes;
85 }
86
87 return total;
88}
89#define sgtable_ok(x) (!!sgtable_len(x))
90
91static unsigned max_alignment(u32 addr)
92{
93 int i;
94 unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
95 for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
96 ;
97 return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
98}
99
100/*
101 * calculate the optimal number sg elements from total bytes based on
102 * iommu superpages
103 */
104static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
105{
106 unsigned nr_entries = 0, ent_sz;
107
108 if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
109 pr_err("%s: wrong size %08x\n", __func__, bytes);
110 return 0;
111 }
112
113 while (bytes) {
114 ent_sz = max_alignment(da | pa);
115 ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
116 nr_entries++;
117 da += ent_sz;
118 pa += ent_sz;
119 bytes -= ent_sz;
120 }
121
122 return nr_entries;
123}
124
125/* allocate and initialize sg_table header(a kind of 'superblock') */
126static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
127 u32 da, u32 pa)
128{
129 unsigned int nr_entries;
130 int err;
131 struct sg_table *sgt;
132
133 if (!bytes)
134 return ERR_PTR(-EINVAL);
135
136 if (!IS_ALIGNED(bytes, PAGE_SIZE))
137 return ERR_PTR(-EINVAL);
138
139 if (flags & IOVMF_LINEAR) {
140 nr_entries = sgtable_nents(bytes, da, pa);
141 if (!nr_entries)
142 return ERR_PTR(-EINVAL);
143 } else
144 nr_entries = bytes / PAGE_SIZE;
145
146 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
147 if (!sgt)
148 return ERR_PTR(-ENOMEM);
149
150 err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
151 if (err) {
152 kfree(sgt);
153 return ERR_PTR(err);
154 }
155
156 pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
157
158 return sgt;
159}
160
161/* free sg_table header(a kind of superblock) */
162static void sgtable_free(struct sg_table *sgt)
163{
164 if (!sgt)
165 return;
166
167 sg_free_table(sgt);
168 kfree(sgt);
169
170 pr_debug("%s: sgt:%p\n", __func__, sgt);
171}
172
173/* map 'sglist' to a contiguous mpu virtual area and return 'va' */
174static void *vmap_sg(const struct sg_table *sgt)
175{
176 u32 va;
177 size_t total;
178 unsigned int i;
179 struct scatterlist *sg;
180 struct vm_struct *new;
181 const struct mem_type *mtype;
182
183 mtype = get_mem_type(MT_DEVICE);
184 if (!mtype)
185 return ERR_PTR(-EINVAL);
186
187 total = sgtable_len(sgt);
188 if (!total)
189 return ERR_PTR(-EINVAL);
190
191 new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
192 if (!new)
193 return ERR_PTR(-ENOMEM);
194 va = (u32)new->addr;
195
196 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
197 size_t bytes;
198 u32 pa;
199 int err;
200
201 pa = sg_phys(sg);
202 bytes = sg->length;
203
204 BUG_ON(bytes != PAGE_SIZE);
205
206 err = ioremap_page(va, pa, mtype);
207 if (err)
208 goto err_out;
209
210 va += bytes;
211 }
212
213 flush_cache_vmap((unsigned long)new->addr,
214 (unsigned long)(new->addr + total));
215 return new->addr;
216
217err_out:
218 WARN_ON(1); /* FIXME: cleanup some mpu mappings */
219 vunmap(new->addr);
220 return ERR_PTR(-EAGAIN);
221}
222
223static inline void vunmap_sg(const void *va)
224{
225 vunmap(va);
226}
227
228static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da)
229{
230 struct iovm_struct *tmp;
231
232 list_for_each_entry(tmp, &obj->mmap, list) {
233 if ((da >= tmp->da_start) && (da < tmp->da_end)) {
234 size_t len;
235
236 len = tmp->da_end - tmp->da_start;
237
238 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
239 __func__, tmp->da_start, da, tmp->da_end, len,
240 tmp->flags);
241
242 return tmp;
243 }
244 }
245
246 return NULL;
247}
248
249/**
250 * find_iovm_area - find iovma which includes @da
251 * @da: iommu device virtual address
252 *
253 * Find the existing iovma starting at @da
254 */
255struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da)
256{
257 struct iovm_struct *area;
258
259 mutex_lock(&obj->mmap_lock);
260 area = __find_iovm_area(obj, da);
261 mutex_unlock(&obj->mmap_lock);
262
263 return area;
264}
265EXPORT_SYMBOL_GPL(find_iovm_area);
266
267/*
268 * This finds the hole(area) which fits the requested address and len
269 * in iovmas mmap, and returns the new allocated iovma.
270 */
271static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da,
272 size_t bytes, u32 flags)
273{
274 struct iovm_struct *new, *tmp;
275 u32 start, prev_end, alignment;
276
277 if (!obj || !bytes)
278 return ERR_PTR(-EINVAL);
279
280 start = da;
281 alignment = PAGE_SIZE;
282
283 if (~flags & IOVMF_DA_FIXED) {
284 /* Don't map address 0 */
285 start = obj->da_start ? obj->da_start : alignment;
286
287 if (flags & IOVMF_LINEAR)
288 alignment = iopgsz_max(bytes);
289 start = roundup(start, alignment);
290 } else if (start < obj->da_start || start > obj->da_end ||
291 obj->da_end - start < bytes) {
292 return ERR_PTR(-EINVAL);
293 }
294
295 tmp = NULL;
296 if (list_empty(&obj->mmap))
297 goto found;
298
299 prev_end = 0;
300 list_for_each_entry(tmp, &obj->mmap, list) {
301
302 if (prev_end > start)
303 break;
304
305 if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
306 goto found;
307
308 if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
309 start = roundup(tmp->da_end + 1, alignment);
310
311 prev_end = tmp->da_end;
312 }
313
314 if ((start >= prev_end) && (obj->da_end - start >= bytes))
315 goto found;
316
317 dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
318 __func__, da, bytes, flags);
319
320 return ERR_PTR(-EINVAL);
321
322found:
323 new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
324 if (!new)
325 return ERR_PTR(-ENOMEM);
326
327 new->iommu = obj;
328 new->da_start = start;
329 new->da_end = start + bytes;
330 new->flags = flags;
331
332 /*
333 * keep ascending order of iovmas
334 */
335 if (tmp)
336 list_add_tail(&new->list, &tmp->list);
337 else
338 list_add(&new->list, &obj->mmap);
339
340 dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
341 __func__, new->da_start, start, new->da_end, bytes, flags);
342
343 return new;
344}
345
346static void free_iovm_area(struct iommu *obj, struct iovm_struct *area)
347{
348 size_t bytes;
349
350 BUG_ON(!obj || !area);
351
352 bytes = area->da_end - area->da_start;
353
354 dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
355 __func__, area->da_start, area->da_end, bytes, area->flags);
356
357 list_del(&area->list);
358 kmem_cache_free(iovm_area_cachep, area);
359}
360
361/**
362 * da_to_va - convert (d) to (v)
363 * @obj: objective iommu
364 * @da: iommu device virtual address
365 * @va: mpu virtual address
366 *
367 * Returns mpu virtual addr which corresponds to a given device virtual addr
368 */
369void *da_to_va(struct iommu *obj, u32 da)
370{
371 void *va = NULL;
372 struct iovm_struct *area;
373
374 mutex_lock(&obj->mmap_lock);
375
376 area = __find_iovm_area(obj, da);
377 if (!area) {
378 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
379 goto out;
380 }
381 va = area->va;
382out:
383 mutex_unlock(&obj->mmap_lock);
384
385 return va;
386}
387EXPORT_SYMBOL_GPL(da_to_va);
388
389static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
390{
391 unsigned int i;
392 struct scatterlist *sg;
393 void *va = _va;
394 void *va_end;
395
396 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
397 struct page *pg;
398 const size_t bytes = PAGE_SIZE;
399
400 /*
401 * iommu 'superpage' isn't supported with 'iommu_vmalloc()'
402 */
403 pg = vmalloc_to_page(va);
404 BUG_ON(!pg);
405 sg_set_page(sg, pg, bytes, 0);
406
407 va += bytes;
408 }
409
410 va_end = _va + PAGE_SIZE * i;
411}
412
413static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
414{
415 /*
416 * Actually this is not necessary at all, just exists for
417 * consistency of the code readability.
418 */
419 BUG_ON(!sgt);
420}
421
422static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da,
423 size_t len)
424{
425 unsigned int i;
426 struct scatterlist *sg;
427
428 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
429 unsigned bytes;
430
431 bytes = max_alignment(da | pa);
432 bytes = min_t(unsigned, bytes, iopgsz_max(len));
433
434 BUG_ON(!iopgsz_ok(bytes));
435
436 sg_set_buf(sg, phys_to_virt(pa), bytes);
437 /*
438 * 'pa' is cotinuous(linear).
439 */
440 pa += bytes;
441 da += bytes;
442 len -= bytes;
443 }
444 BUG_ON(len);
445}
446
447static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
448{
449 /*
450 * Actually this is not necessary at all, just exists for
451 * consistency of the code readability
452 */
453 BUG_ON(!sgt);
454}
455
456/* create 'da' <-> 'pa' mapping from 'sgt' */
457static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
458 const struct sg_table *sgt, u32 flags)
459{
460 int err;
461 unsigned int i, j;
462 struct scatterlist *sg;
463 u32 da = new->da_start;
464 int order;
465
466 if (!domain || !sgt)
467 return -EINVAL;
468
469 BUG_ON(!sgtable_ok(sgt));
470
471 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
472 u32 pa;
473 size_t bytes;
474
475 pa = sg_phys(sg);
476 bytes = sg->length;
477
478 flags &= ~IOVMF_PGSZ_MASK;
479
480 if (bytes_to_iopgsz(bytes) < 0)
481 goto err_out;
482
483 order = get_order(bytes);
484
485 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
486 i, da, pa, bytes);
487
488 err = iommu_map(domain, da, pa, order, flags);
489 if (err)
490 goto err_out;
491
492 da += bytes;
493 }
494 return 0;
495
496err_out:
497 da = new->da_start;
498
499 for_each_sg(sgt->sgl, sg, i, j) {
500 size_t bytes;
501
502 bytes = sg->length;
503 order = get_order(bytes);
504
505 /* ignore failures.. we're already handling one */
506 iommu_unmap(domain, da, order);
507
508 da += bytes;
509 }
510 return err;
511}
512
513/* release 'da' <-> 'pa' mapping */
514static void unmap_iovm_area(struct iommu_domain *domain, struct iommu *obj,
515 struct iovm_struct *area)
516{
517 u32 start;
518 size_t total = area->da_end - area->da_start;
519 const struct sg_table *sgt = area->sgt;
520 struct scatterlist *sg;
521 int i, err;
522
523 BUG_ON(!sgtable_ok(sgt));
524 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
525
526 start = area->da_start;
527 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
528 size_t bytes;
529 int order;
530
531 bytes = sg->length;
532 order = get_order(bytes);
533
534 err = iommu_unmap(domain, start, order);
535 if (err)
536 break;
537
538 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
539 __func__, start, bytes, area->flags);
540
541 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
542
543 total -= bytes;
544 start += bytes;
545 }
546 BUG_ON(total);
547}
548
549/* template function for all unmapping */
550static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
551 struct iommu *obj, const u32 da,
552 void (*fn)(const void *), u32 flags)
553{
554 struct sg_table *sgt = NULL;
555 struct iovm_struct *area;
556
557 if (!IS_ALIGNED(da, PAGE_SIZE)) {
558 dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
559 return NULL;
560 }
561
562 mutex_lock(&obj->mmap_lock);
563
564 area = __find_iovm_area(obj, da);
565 if (!area) {
566 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
567 goto out;
568 }
569
570 if ((area->flags & flags) != flags) {
571 dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
572 area->flags);
573 goto out;
574 }
575 sgt = (struct sg_table *)area->sgt;
576
577 unmap_iovm_area(domain, obj, area);
578
579 fn(area->va);
580
581 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
582 area->da_start, da, area->da_end,
583 area->da_end - area->da_start, area->flags);
584
585 free_iovm_area(obj, area);
586out:
587 mutex_unlock(&obj->mmap_lock);
588
589 return sgt;
590}
591
592static u32 map_iommu_region(struct iommu_domain *domain, struct iommu *obj,
593 u32 da, const struct sg_table *sgt, void *va,
594 size_t bytes, u32 flags)
595{
596 int err = -ENOMEM;
597 struct iovm_struct *new;
598
599 mutex_lock(&obj->mmap_lock);
600
601 new = alloc_iovm_area(obj, da, bytes, flags);
602 if (IS_ERR(new)) {
603 err = PTR_ERR(new);
604 goto err_alloc_iovma;
605 }
606 new->va = va;
607 new->sgt = sgt;
608
609 if (map_iovm_area(domain, new, sgt, new->flags))
610 goto err_map;
611
612 mutex_unlock(&obj->mmap_lock);
613
614 dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
615 __func__, new->da_start, bytes, new->flags, va);
616
617 return new->da_start;
618
619err_map:
620 free_iovm_area(obj, new);
621err_alloc_iovma:
622 mutex_unlock(&obj->mmap_lock);
623 return err;
624}
625
626static inline u32 __iommu_vmap(struct iommu_domain *domain, struct iommu *obj,
627 u32 da, const struct sg_table *sgt,
628 void *va, size_t bytes, u32 flags)
629{
630 return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
631}
632
633/**
634 * iommu_vmap - (d)-(p)-(v) address mapper
635 * @obj: objective iommu
636 * @sgt: address of scatter gather table
637 * @flags: iovma and page property
638 *
639 * Creates 1-n-1 mapping with given @sgt and returns @da.
640 * All @sgt element must be io page size aligned.
641 */
642u32 iommu_vmap(struct iommu_domain *domain, struct iommu *obj, u32 da,
643 const struct sg_table *sgt, u32 flags)
644{
645 size_t bytes;
646 void *va = NULL;
647
648 if (!obj || !obj->dev || !sgt)
649 return -EINVAL;
650
651 bytes = sgtable_len(sgt);
652 if (!bytes)
653 return -EINVAL;
654 bytes = PAGE_ALIGN(bytes);
655
656 if (flags & IOVMF_MMIO) {
657 va = vmap_sg(sgt);
658 if (IS_ERR(va))
659 return PTR_ERR(va);
660 }
661
662 flags |= IOVMF_DISCONT;
663 flags |= IOVMF_MMIO;
664
665 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
666 if (IS_ERR_VALUE(da))
667 vunmap_sg(va);
668
669 return da;
670}
671EXPORT_SYMBOL_GPL(iommu_vmap);
672
673/**
674 * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()'
675 * @obj: objective iommu
676 * @da: iommu device virtual address
677 *
678 * Free the iommu virtually contiguous memory area starting at
679 * @da, which was returned by 'iommu_vmap()'.
680 */
681struct sg_table *
682iommu_vunmap(struct iommu_domain *domain, struct iommu *obj, u32 da)
683{
684 struct sg_table *sgt;
685 /*
686 * 'sgt' is allocated before 'iommu_vmalloc()' is called.
687 * Just returns 'sgt' to the caller to free
688 */
689 sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
690 IOVMF_DISCONT | IOVMF_MMIO);
691 if (!sgt)
692 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
693 return sgt;
694}
695EXPORT_SYMBOL_GPL(iommu_vunmap);
696
697/**
698 * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
699 * @obj: objective iommu
700 * @da: contiguous iommu virtual memory
701 * @bytes: allocation size
702 * @flags: iovma and page property
703 *
704 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
705 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
706 */
707u32 iommu_vmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da,
708 size_t bytes, u32 flags)
709{
710 void *va;
711 struct sg_table *sgt;
712
713 if (!obj || !obj->dev || !bytes)
714 return -EINVAL;
715
716 bytes = PAGE_ALIGN(bytes);
717
718 va = vmalloc(bytes);
719 if (!va)
720 return -ENOMEM;
721
722 flags |= IOVMF_DISCONT;
723 flags |= IOVMF_ALLOC;
724
725 sgt = sgtable_alloc(bytes, flags, da, 0);
726 if (IS_ERR(sgt)) {
727 da = PTR_ERR(sgt);
728 goto err_sgt_alloc;
729 }
730 sgtable_fill_vmalloc(sgt, va);
731
732 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
733 if (IS_ERR_VALUE(da))
734 goto err_iommu_vmap;
735
736 return da;
737
738err_iommu_vmap:
739 sgtable_drain_vmalloc(sgt);
740 sgtable_free(sgt);
741err_sgt_alloc:
742 vfree(va);
743 return da;
744}
745EXPORT_SYMBOL_GPL(iommu_vmalloc);
746
747/**
748 * iommu_vfree - release memory allocated by 'iommu_vmalloc()'
749 * @obj: objective iommu
750 * @da: iommu device virtual address
751 *
752 * Frees the iommu virtually continuous memory area starting at
753 * @da, as obtained from 'iommu_vmalloc()'.
754 */
755void iommu_vfree(struct iommu_domain *domain, struct iommu *obj, const u32 da)
756{
757 struct sg_table *sgt;
758
759 sgt = unmap_vm_area(domain, obj, da, vfree,
760 IOVMF_DISCONT | IOVMF_ALLOC);
761 if (!sgt)
762 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
763 sgtable_free(sgt);
764}
765EXPORT_SYMBOL_GPL(iommu_vfree);
766
767static u32 __iommu_kmap(struct iommu_domain *domain, struct iommu *obj,
768 u32 da, u32 pa, void *va, size_t bytes, u32 flags)
769{
770 struct sg_table *sgt;
771
772 sgt = sgtable_alloc(bytes, flags, da, pa);
773 if (IS_ERR(sgt))
774 return PTR_ERR(sgt);
775
776 sgtable_fill_kmalloc(sgt, pa, da, bytes);
777
778 da = map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
779 if (IS_ERR_VALUE(da)) {
780 sgtable_drain_kmalloc(sgt);
781 sgtable_free(sgt);
782 }
783
784 return da;
785}
786
787/**
788 * iommu_kmap - (d)-(p)-(v) address mapper
789 * @obj: objective iommu
790 * @da: contiguous iommu virtual memory
791 * @pa: contiguous physical memory
792 * @flags: iovma and page property
793 *
794 * Creates 1-1-1 mapping and returns @da again, which can be
795 * adjusted if 'IOVMF_DA_FIXED' is not set.
796 */
797u32 iommu_kmap(struct iommu_domain *domain, struct iommu *obj, u32 da, u32 pa,
798 size_t bytes, u32 flags)
799{
800 void *va;
801
802 if (!obj || !obj->dev || !bytes)
803 return -EINVAL;
804
805 bytes = PAGE_ALIGN(bytes);
806
807 va = ioremap(pa, bytes);
808 if (!va)
809 return -ENOMEM;
810
811 flags |= IOVMF_LINEAR;
812 flags |= IOVMF_MMIO;
813
814 da = __iommu_kmap(domain, obj, da, pa, va, bytes, flags);
815 if (IS_ERR_VALUE(da))
816 iounmap(va);
817
818 return da;
819}
820EXPORT_SYMBOL_GPL(iommu_kmap);
821
822/**
823 * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()'
824 * @obj: objective iommu
825 * @da: iommu device virtual address
826 *
827 * Frees the iommu virtually contiguous memory area starting at
828 * @da, which was passed to and was returned by'iommu_kmap()'.
829 */
830void iommu_kunmap(struct iommu_domain *domain, struct iommu *obj, u32 da)
831{
832 struct sg_table *sgt;
833 typedef void (*func_t)(const void *);
834
835 sgt = unmap_vm_area(domain, obj, da, (func_t)iounmap,
836 IOVMF_LINEAR | IOVMF_MMIO);
837 if (!sgt)
838 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
839 sgtable_free(sgt);
840}
841EXPORT_SYMBOL_GPL(iommu_kunmap);
842
843/**
844 * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper
845 * @obj: objective iommu
846 * @da: contiguous iommu virtual memory
847 * @bytes: bytes for allocation
848 * @flags: iovma and page property
849 *
850 * Allocate @bytes linearly and creates 1-1-1 mapping and returns
851 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
852 */
853u32 iommu_kmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da,
854 size_t bytes, u32 flags)
855{
856 void *va;
857 u32 pa;
858
859 if (!obj || !obj->dev || !bytes)
860 return -EINVAL;
861
862 bytes = PAGE_ALIGN(bytes);
863
864 va = kmalloc(bytes, GFP_KERNEL | GFP_DMA);
865 if (!va)
866 return -ENOMEM;
867 pa = virt_to_phys(va);
868
869 flags |= IOVMF_LINEAR;
870 flags |= IOVMF_ALLOC;
871
872 da = __iommu_kmap(domain, obj, da, pa, va, bytes, flags);
873 if (IS_ERR_VALUE(da))
874 kfree(va);
875
876 return da;
877}
878EXPORT_SYMBOL_GPL(iommu_kmalloc);
879
880/**
881 * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()'
882 * @obj: objective iommu
883 * @da: iommu device virtual address
884 *
885 * Frees the iommu virtually contiguous memory area starting at
886 * @da, which was passed to and was returned by'iommu_kmalloc()'.
887 */
888void iommu_kfree(struct iommu_domain *domain, struct iommu *obj, u32 da)
889{
890 struct sg_table *sgt;
891
892 sgt = unmap_vm_area(domain, obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
893 if (!sgt)
894 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
895 sgtable_free(sgt);
896}
897EXPORT_SYMBOL_GPL(iommu_kfree);
898
899
900static int __init iovmm_init(void)
901{
902 const unsigned long flags = SLAB_HWCACHE_ALIGN;
903 struct kmem_cache *p;
904
905 p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
906 flags, NULL);
907 if (!p)
908 return -ENOMEM;
909 iovm_area_cachep = p;
910
911 return 0;
912}
913module_init(iovmm_init);
914
915static void __exit iovmm_exit(void)
916{
917 kmem_cache_destroy(iovm_area_cachep);
918}
919module_exit(iovmm_exit);
920
921MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
922MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
923MODULE_LICENSE("GPL v2");