aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLaurent Pinchart <laurent.pinchart@ideasonboard.com>2014-07-18 06:49:55 -0400
committerJoerg Roedel <jroedel@suse.de>2014-07-29 06:38:07 -0400
commitbaaa7b5d4f1e515a39f1eebd5fb16b67e00b22fb (patch)
tree99c1c7ae45cc7fa7efb4bc5d6ee7570c0ec0605b
parent0f198890105b346c975dc0380a98b5d05b5e8a7b (diff)
iommu/omap: Remove virtual memory manager
The OMAP3 ISP driver was the only user of the OMAP IOVMM API. Now that is has been ported to the DMA API, remove the unused virtual memory manager. Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--drivers/iommu/Kconfig10
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/omap-iommu-debug.c114
-rw-r--r--drivers/iommu/omap-iommu.c13
-rw-r--r--drivers/iommu/omap-iommu.h8
-rw-r--r--drivers/iommu/omap-iovmm.c791
-rw-r--r--include/linux/omap-iommu.h37
7 files changed, 8 insertions, 966 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index d260605e6d5f..154e5a838257 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -143,16 +143,12 @@ config OMAP_IOMMU
143 depends on ARCH_OMAP2PLUS 143 depends on ARCH_OMAP2PLUS
144 select IOMMU_API 144 select IOMMU_API
145 145
146config OMAP_IOVMM
147 tristate "OMAP IO Virtual Memory Manager Support"
148 depends on OMAP_IOMMU
149
150config OMAP_IOMMU_DEBUG 146config OMAP_IOMMU_DEBUG
151 tristate "Export OMAP IOMMU/IOVMM internals in DebugFS" 147 tristate "Export OMAP IOMMU internals in DebugFS"
152 depends on OMAP_IOVMM && DEBUG_FS 148 depends on OMAP_IOMMU && DEBUG_FS
153 help 149 help
154 Select this to see extensive information about 150 Select this to see extensive information about
155 the internal state of OMAP IOMMU/IOVMM in debugfs. 151 the internal state of OMAP IOMMU in debugfs.
156 152
157 Say N unless you know you need this. 153 Say N unless you know you need this.
158 154
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 8893bad048e0..6a4a00ef088b 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -11,7 +11,6 @@ obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
11obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o 11obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
12obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o 12obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
13obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o 13obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o
14obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o
15obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o 14obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
16obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o 15obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
17obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o 16obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index 80fffba7f12d..531658d17333 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -213,116 +213,6 @@ static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf,
213 return bytes; 213 return bytes;
214} 214}
215 215
216static ssize_t debug_read_mmap(struct file *file, char __user *userbuf,
217 size_t count, loff_t *ppos)
218{
219 struct device *dev = file->private_data;
220 struct omap_iommu *obj = dev_to_omap_iommu(dev);
221 char *p, *buf;
222 struct iovm_struct *tmp;
223 int uninitialized_var(i);
224 ssize_t bytes;
225
226 buf = (char *)__get_free_page(GFP_KERNEL);
227 if (!buf)
228 return -ENOMEM;
229 p = buf;
230
231 p += sprintf(p, "%-3s %-8s %-8s %6s %8s\n",
232 "No", "start", "end", "size", "flags");
233 p += sprintf(p, "-------------------------------------------------\n");
234
235 mutex_lock(&iommu_debug_lock);
236
237 list_for_each_entry(tmp, &obj->mmap, list) {
238 size_t len;
239 const char *str = "%3d %08x-%08x %6x %8x\n";
240 const int maxcol = 39;
241
242 len = tmp->da_end - tmp->da_start;
243 p += snprintf(p, maxcol, str,
244 i, tmp->da_start, tmp->da_end, len, tmp->flags);
245
246 if (PAGE_SIZE - (p - buf) < maxcol)
247 break;
248 i++;
249 }
250
251 bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
252
253 mutex_unlock(&iommu_debug_lock);
254 free_page((unsigned long)buf);
255
256 return bytes;
257}
258
259static ssize_t debug_read_mem(struct file *file, char __user *userbuf,
260 size_t count, loff_t *ppos)
261{
262 struct device *dev = file->private_data;
263 char *p, *buf;
264 struct iovm_struct *area;
265 ssize_t bytes;
266
267 count = min_t(ssize_t, count, PAGE_SIZE);
268
269 buf = (char *)__get_free_page(GFP_KERNEL);
270 if (!buf)
271 return -ENOMEM;
272 p = buf;
273
274 mutex_lock(&iommu_debug_lock);
275
276 area = omap_find_iovm_area(dev, (u32)ppos);
277 if (!area) {
278 bytes = -EINVAL;
279 goto err_out;
280 }
281 memcpy(p, area->va, count);
282 p += count;
283
284 bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
285err_out:
286 mutex_unlock(&iommu_debug_lock);
287 free_page((unsigned long)buf);
288
289 return bytes;
290}
291
292static ssize_t debug_write_mem(struct file *file, const char __user *userbuf,
293 size_t count, loff_t *ppos)
294{
295 struct device *dev = file->private_data;
296 struct iovm_struct *area;
297 char *p, *buf;
298
299 count = min_t(size_t, count, PAGE_SIZE);
300
301 buf = (char *)__get_free_page(GFP_KERNEL);
302 if (!buf)
303 return -ENOMEM;
304 p = buf;
305
306 mutex_lock(&iommu_debug_lock);
307
308 if (copy_from_user(p, userbuf, count)) {
309 count = -EFAULT;
310 goto err_out;
311 }
312
313 area = omap_find_iovm_area(dev, (u32)ppos);
314 if (!area) {
315 count = -EINVAL;
316 goto err_out;
317 }
318 memcpy(area->va, p, count);
319err_out:
320 mutex_unlock(&iommu_debug_lock);
321 free_page((unsigned long)buf);
322
323 return count;
324}
325
326#define DEBUG_FOPS(name) \ 216#define DEBUG_FOPS(name) \
327 static const struct file_operations debug_##name##_fops = { \ 217 static const struct file_operations debug_##name##_fops = { \
328 .open = simple_open, \ 218 .open = simple_open, \
@@ -342,8 +232,6 @@ DEBUG_FOPS_RO(ver);
342DEBUG_FOPS_RO(regs); 232DEBUG_FOPS_RO(regs);
343DEBUG_FOPS_RO(tlb); 233DEBUG_FOPS_RO(tlb);
344DEBUG_FOPS(pagetable); 234DEBUG_FOPS(pagetable);
345DEBUG_FOPS_RO(mmap);
346DEBUG_FOPS(mem);
347 235
348#define __DEBUG_ADD_FILE(attr, mode) \ 236#define __DEBUG_ADD_FILE(attr, mode) \
349 { \ 237 { \
@@ -389,8 +277,6 @@ static int iommu_debug_register(struct device *dev, void *data)
389 DEBUG_ADD_FILE_RO(regs); 277 DEBUG_ADD_FILE_RO(regs);
390 DEBUG_ADD_FILE_RO(tlb); 278 DEBUG_ADD_FILE_RO(tlb);
391 DEBUG_ADD_FILE(pagetable); 279 DEBUG_ADD_FILE(pagetable);
392 DEBUG_ADD_FILE_RO(mmap);
393 DEBUG_ADD_FILE(mem);
394 280
395 return 0; 281 return 0;
396 282
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 895af06a667f..61599e2b33ca 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -959,31 +959,18 @@ static int omap_iommu_probe(struct platform_device *pdev)
959 return err; 959 return err;
960 if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8) 960 if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8)
961 return -EINVAL; 961 return -EINVAL;
962 /*
963 * da_start and da_end are needed for omap-iovmm, so hardcode
964 * these values as used by OMAP3 ISP - the only user for
965 * omap-iovmm
966 */
967 obj->da_start = 0;
968 obj->da_end = 0xfffff000;
969 if (of_find_property(of, "ti,iommu-bus-err-back", NULL)) 962 if (of_find_property(of, "ti,iommu-bus-err-back", NULL))
970 obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN; 963 obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN;
971 } else { 964 } else {
972 obj->nr_tlb_entries = pdata->nr_tlb_entries; 965 obj->nr_tlb_entries = pdata->nr_tlb_entries;
973 obj->name = pdata->name; 966 obj->name = pdata->name;
974 obj->da_start = pdata->da_start;
975 obj->da_end = pdata->da_end;
976 } 967 }
977 if (obj->da_end <= obj->da_start)
978 return -EINVAL;
979 968
980 obj->dev = &pdev->dev; 969 obj->dev = &pdev->dev;
981 obj->ctx = (void *)obj + sizeof(*obj); 970 obj->ctx = (void *)obj + sizeof(*obj);
982 971
983 spin_lock_init(&obj->iommu_lock); 972 spin_lock_init(&obj->iommu_lock);
984 mutex_init(&obj->mmap_lock);
985 spin_lock_init(&obj->page_table_lock); 973 spin_lock_init(&obj->page_table_lock);
986 INIT_LIST_HEAD(&obj->mmap);
987 974
988 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 975 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
989 obj->regbase = devm_ioremap_resource(obj->dev, res); 976 obj->regbase = devm_ioremap_resource(obj->dev, res);
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
index ea920c3e94ff..1275a822934b 100644
--- a/drivers/iommu/omap-iommu.h
+++ b/drivers/iommu/omap-iommu.h
@@ -46,12 +46,7 @@ struct omap_iommu {
46 46
47 int nr_tlb_entries; 47 int nr_tlb_entries;
48 48
49 struct list_head mmap;
50 struct mutex mmap_lock; /* protect mmap */
51
52 void *ctx; /* iommu context: registres saved area */ 49 void *ctx; /* iommu context: registres saved area */
53 u32 da_start;
54 u32 da_end;
55 50
56 int has_bus_err_back; 51 int has_bus_err_back;
57}; 52};
@@ -154,9 +149,12 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
154#define MMU_RAM_PADDR_MASK \ 149#define MMU_RAM_PADDR_MASK \
155 ((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT) 150 ((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT)
156 151
152#define MMU_RAM_ENDIAN_SHIFT 9
157#define MMU_RAM_ENDIAN_MASK (1 << MMU_RAM_ENDIAN_SHIFT) 153#define MMU_RAM_ENDIAN_MASK (1 << MMU_RAM_ENDIAN_SHIFT)
154#define MMU_RAM_ENDIAN_LITTLE (0 << MMU_RAM_ENDIAN_SHIFT)
158#define MMU_RAM_ENDIAN_BIG (1 << MMU_RAM_ENDIAN_SHIFT) 155#define MMU_RAM_ENDIAN_BIG (1 << MMU_RAM_ENDIAN_SHIFT)
159 156
157#define MMU_RAM_ELSZ_SHIFT 7
160#define MMU_RAM_ELSZ_MASK (3 << MMU_RAM_ELSZ_SHIFT) 158#define MMU_RAM_ELSZ_MASK (3 << MMU_RAM_ELSZ_SHIFT)
161#define MMU_RAM_ELSZ_8 (0 << MMU_RAM_ELSZ_SHIFT) 159#define MMU_RAM_ELSZ_8 (0 << MMU_RAM_ELSZ_SHIFT)
162#define MMU_RAM_ELSZ_16 (1 << MMU_RAM_ELSZ_SHIFT) 160#define MMU_RAM_ELSZ_16 (1 << MMU_RAM_ELSZ_SHIFT)
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c
deleted file mode 100644
index f583ba049168..000000000000
--- a/drivers/iommu/omap-iovmm.c
+++ /dev/null
@@ -1,791 +0,0 @@
1/*
2 * omap iommu: simple virtual address space management
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/err.h>
15#include <linux/slab.h>
16#include <linux/vmalloc.h>
17#include <linux/device.h>
18#include <linux/scatterlist.h>
19#include <linux/iommu.h>
20#include <linux/omap-iommu.h>
21#include <linux/platform_data/iommu-omap.h>
22
23#include <asm/cacheflush.h>
24#include <asm/mach/map.h>
25
26#include "omap-iopgtable.h"
27#include "omap-iommu.h"
28
29/*
30 * IOVMF_FLAGS: attribute for iommu virtual memory area(iovma)
31 *
32 * lower 16 bit is used for h/w and upper 16 bit is for s/w.
33 */
34#define IOVMF_SW_SHIFT 16
35
36/*
37 * iovma: h/w flags derived from cam and ram attribute
38 */
39#define IOVMF_CAM_MASK (~((1 << 10) - 1))
40#define IOVMF_RAM_MASK (~IOVMF_CAM_MASK)
41
42#define IOVMF_PGSZ_MASK (3 << 0)
43#define IOVMF_PGSZ_1M MMU_CAM_PGSZ_1M
44#define IOVMF_PGSZ_64K MMU_CAM_PGSZ_64K
45#define IOVMF_PGSZ_4K MMU_CAM_PGSZ_4K
46#define IOVMF_PGSZ_16M MMU_CAM_PGSZ_16M
47
48#define IOVMF_ENDIAN_MASK (1 << 9)
49#define IOVMF_ENDIAN_BIG MMU_RAM_ENDIAN_BIG
50
51#define IOVMF_ELSZ_MASK (3 << 7)
52#define IOVMF_ELSZ_16 MMU_RAM_ELSZ_16
53#define IOVMF_ELSZ_32 MMU_RAM_ELSZ_32
54#define IOVMF_ELSZ_NONE MMU_RAM_ELSZ_NONE
55
56#define IOVMF_MIXED_MASK (1 << 6)
57#define IOVMF_MIXED MMU_RAM_MIXED
58
59/*
60 * iovma: s/w flags, used for mapping and umapping internally.
61 */
62#define IOVMF_MMIO (1 << IOVMF_SW_SHIFT)
63#define IOVMF_ALLOC (2 << IOVMF_SW_SHIFT)
64#define IOVMF_ALLOC_MASK (3 << IOVMF_SW_SHIFT)
65
66/* "superpages" is supported just with physically linear pages */
67#define IOVMF_DISCONT (1 << (2 + IOVMF_SW_SHIFT))
68#define IOVMF_LINEAR (2 << (2 + IOVMF_SW_SHIFT))
69#define IOVMF_LINEAR_MASK (3 << (2 + IOVMF_SW_SHIFT))
70
71#define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT))
72
73static struct kmem_cache *iovm_area_cachep;
74
75/* return the offset of the first scatterlist entry in a sg table */
76static unsigned int sgtable_offset(const struct sg_table *sgt)
77{
78 if (!sgt || !sgt->nents)
79 return 0;
80
81 return sgt->sgl->offset;
82}
83
84/* return total bytes of sg buffers */
85static size_t sgtable_len(const struct sg_table *sgt)
86{
87 unsigned int i, total = 0;
88 struct scatterlist *sg;
89
90 if (!sgt)
91 return 0;
92
93 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
94 size_t bytes;
95
96 bytes = sg->length + sg->offset;
97
98 if (!iopgsz_ok(bytes)) {
99 pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n",
100 __func__, i, bytes, sg->offset);
101 return 0;
102 }
103
104 if (i && sg->offset) {
105 pr_err("%s: sg[%d] offset not allowed in internal entries\n",
106 __func__, i);
107 return 0;
108 }
109
110 total += bytes;
111 }
112
113 return total;
114}
115#define sgtable_ok(x) (!!sgtable_len(x))
116
117static unsigned max_alignment(u32 addr)
118{
119 int i;
120 unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
121 for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
122 ;
123 return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
124}
125
126/*
127 * calculate the optimal number sg elements from total bytes based on
128 * iommu superpages
129 */
130static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
131{
132 unsigned nr_entries = 0, ent_sz;
133
134 if (!PAGE_ALIGNED(bytes)) {
135 pr_err("%s: wrong size %08x\n", __func__, bytes);
136 return 0;
137 }
138
139 while (bytes) {
140 ent_sz = max_alignment(da | pa);
141 ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
142 nr_entries++;
143 da += ent_sz;
144 pa += ent_sz;
145 bytes -= ent_sz;
146 }
147
148 return nr_entries;
149}
150
151/* allocate and initialize sg_table header(a kind of 'superblock') */
152static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
153 u32 da, u32 pa)
154{
155 unsigned int nr_entries;
156 int err;
157 struct sg_table *sgt;
158
159 if (!bytes)
160 return ERR_PTR(-EINVAL);
161
162 if (!PAGE_ALIGNED(bytes))
163 return ERR_PTR(-EINVAL);
164
165 if (flags & IOVMF_LINEAR) {
166 nr_entries = sgtable_nents(bytes, da, pa);
167 if (!nr_entries)
168 return ERR_PTR(-EINVAL);
169 } else
170 nr_entries = bytes / PAGE_SIZE;
171
172 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
173 if (!sgt)
174 return ERR_PTR(-ENOMEM);
175
176 err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
177 if (err) {
178 kfree(sgt);
179 return ERR_PTR(err);
180 }
181
182 pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
183
184 return sgt;
185}
186
187/* free sg_table header(a kind of superblock) */
188static void sgtable_free(struct sg_table *sgt)
189{
190 if (!sgt)
191 return;
192
193 sg_free_table(sgt);
194 kfree(sgt);
195
196 pr_debug("%s: sgt:%p\n", __func__, sgt);
197}
198
199/* map 'sglist' to a contiguous mpu virtual area and return 'va' */
200static void *vmap_sg(const struct sg_table *sgt)
201{
202 u32 va;
203 size_t total;
204 unsigned int i;
205 struct scatterlist *sg;
206 struct vm_struct *new;
207 const struct mem_type *mtype;
208
209 mtype = get_mem_type(MT_DEVICE);
210 if (!mtype)
211 return ERR_PTR(-EINVAL);
212
213 total = sgtable_len(sgt);
214 if (!total)
215 return ERR_PTR(-EINVAL);
216
217 new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
218 if (!new)
219 return ERR_PTR(-ENOMEM);
220 va = (u32)new->addr;
221
222 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
223 size_t bytes;
224 u32 pa;
225 int err;
226
227 pa = sg_phys(sg) - sg->offset;
228 bytes = sg->length + sg->offset;
229
230 BUG_ON(bytes != PAGE_SIZE);
231
232 err = ioremap_page(va, pa, mtype);
233 if (err)
234 goto err_out;
235
236 va += bytes;
237 }
238
239 flush_cache_vmap((unsigned long)new->addr,
240 (unsigned long)(new->addr + total));
241 return new->addr;
242
243err_out:
244 WARN_ON(1); /* FIXME: cleanup some mpu mappings */
245 vunmap(new->addr);
246 return ERR_PTR(-EAGAIN);
247}
248
249static inline void vunmap_sg(const void *va)
250{
251 vunmap(va);
252}
253
254static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj,
255 const u32 da)
256{
257 struct iovm_struct *tmp;
258
259 list_for_each_entry(tmp, &obj->mmap, list) {
260 if ((da >= tmp->da_start) && (da < tmp->da_end)) {
261 size_t len;
262
263 len = tmp->da_end - tmp->da_start;
264
265 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
266 __func__, tmp->da_start, da, tmp->da_end, len,
267 tmp->flags);
268
269 return tmp;
270 }
271 }
272
273 return NULL;
274}
275
276/**
277 * omap_find_iovm_area - find iovma which includes @da
278 * @dev: client device
279 * @da: iommu device virtual address
280 *
281 * Find the existing iovma starting at @da
282 */
283struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da)
284{
285 struct omap_iommu *obj = dev_to_omap_iommu(dev);
286 struct iovm_struct *area;
287
288 mutex_lock(&obj->mmap_lock);
289 area = __find_iovm_area(obj, da);
290 mutex_unlock(&obj->mmap_lock);
291
292 return area;
293}
294EXPORT_SYMBOL_GPL(omap_find_iovm_area);
295
296/*
297 * This finds the hole(area) which fits the requested address and len
298 * in iovmas mmap, and returns the new allocated iovma.
299 */
300static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da,
301 size_t bytes, u32 flags)
302{
303 struct iovm_struct *new, *tmp;
304 u32 start, prev_end, alignment;
305
306 if (!obj || !bytes)
307 return ERR_PTR(-EINVAL);
308
309 start = da;
310 alignment = PAGE_SIZE;
311
312 if (~flags & IOVMF_DA_FIXED) {
313 /* Don't map address 0 */
314 start = obj->da_start ? obj->da_start : alignment;
315
316 if (flags & IOVMF_LINEAR)
317 alignment = iopgsz_max(bytes);
318 start = roundup(start, alignment);
319 } else if (start < obj->da_start || start > obj->da_end ||
320 obj->da_end - start < bytes) {
321 return ERR_PTR(-EINVAL);
322 }
323
324 tmp = NULL;
325 if (list_empty(&obj->mmap))
326 goto found;
327
328 prev_end = 0;
329 list_for_each_entry(tmp, &obj->mmap, list) {
330
331 if (prev_end > start)
332 break;
333
334 if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
335 goto found;
336
337 if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
338 start = roundup(tmp->da_end + 1, alignment);
339
340 prev_end = tmp->da_end;
341 }
342
343 if ((start >= prev_end) && (obj->da_end - start >= bytes))
344 goto found;
345
346 dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
347 __func__, da, bytes, flags);
348
349 return ERR_PTR(-EINVAL);
350
351found:
352 new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
353 if (!new)
354 return ERR_PTR(-ENOMEM);
355
356 new->iommu = obj;
357 new->da_start = start;
358 new->da_end = start + bytes;
359 new->flags = flags;
360
361 /*
362 * keep ascending order of iovmas
363 */
364 if (tmp)
365 list_add_tail(&new->list, &tmp->list);
366 else
367 list_add(&new->list, &obj->mmap);
368
369 dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
370 __func__, new->da_start, start, new->da_end, bytes, flags);
371
372 return new;
373}
374
375static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area)
376{
377 size_t bytes;
378
379 BUG_ON(!obj || !area);
380
381 bytes = area->da_end - area->da_start;
382
383 dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
384 __func__, area->da_start, area->da_end, bytes, area->flags);
385
386 list_del(&area->list);
387 kmem_cache_free(iovm_area_cachep, area);
388}
389
390/**
391 * omap_da_to_va - convert (d) to (v)
392 * @dev: client device
393 * @da: iommu device virtual address
394 * @va: mpu virtual address
395 *
396 * Returns mpu virtual addr which corresponds to a given device virtual addr
397 */
398void *omap_da_to_va(struct device *dev, u32 da)
399{
400 struct omap_iommu *obj = dev_to_omap_iommu(dev);
401 void *va = NULL;
402 struct iovm_struct *area;
403
404 mutex_lock(&obj->mmap_lock);
405
406 area = __find_iovm_area(obj, da);
407 if (!area) {
408 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
409 goto out;
410 }
411 va = area->va;
412out:
413 mutex_unlock(&obj->mmap_lock);
414
415 return va;
416}
417EXPORT_SYMBOL_GPL(omap_da_to_va);
418
419static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
420{
421 unsigned int i;
422 struct scatterlist *sg;
423 void *va = _va;
424 void *va_end;
425
426 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
427 struct page *pg;
428 const size_t bytes = PAGE_SIZE;
429
430 /*
431 * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()'
432 */
433 pg = vmalloc_to_page(va);
434 BUG_ON(!pg);
435 sg_set_page(sg, pg, bytes, 0);
436
437 va += bytes;
438 }
439
440 va_end = _va + PAGE_SIZE * i;
441}
442
443static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
444{
445 /*
446 * Actually this is not necessary at all, just exists for
447 * consistency of the code readability.
448 */
449 BUG_ON(!sgt);
450}
451
452/* create 'da' <-> 'pa' mapping from 'sgt' */
453static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
454 const struct sg_table *sgt, u32 flags)
455{
456 int err;
457 unsigned int i, j;
458 struct scatterlist *sg;
459 u32 da = new->da_start;
460
461 if (!domain || !sgt)
462 return -EINVAL;
463
464 BUG_ON(!sgtable_ok(sgt));
465
466 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
467 u32 pa;
468 size_t bytes;
469
470 pa = sg_phys(sg) - sg->offset;
471 bytes = sg->length + sg->offset;
472
473 flags &= ~IOVMF_PGSZ_MASK;
474
475 if (bytes_to_iopgsz(bytes) < 0)
476 goto err_out;
477
478 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
479 i, da, pa, bytes);
480
481 err = iommu_map(domain, da, pa, bytes, flags);
482 if (err)
483 goto err_out;
484
485 da += bytes;
486 }
487 return 0;
488
489err_out:
490 da = new->da_start;
491
492 for_each_sg(sgt->sgl, sg, i, j) {
493 size_t bytes;
494
495 bytes = sg->length + sg->offset;
496
497 /* ignore failures.. we're already handling one */
498 iommu_unmap(domain, da, bytes);
499
500 da += bytes;
501 }
502 return err;
503}
504
505/* release 'da' <-> 'pa' mapping */
506static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
507 struct iovm_struct *area)
508{
509 u32 start;
510 size_t total = area->da_end - area->da_start;
511 const struct sg_table *sgt = area->sgt;
512 struct scatterlist *sg;
513 int i;
514 size_t unmapped;
515
516 BUG_ON(!sgtable_ok(sgt));
517 BUG_ON((!total) || !PAGE_ALIGNED(total));
518
519 start = area->da_start;
520 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
521 size_t bytes;
522
523 bytes = sg->length + sg->offset;
524
525 unmapped = iommu_unmap(domain, start, bytes);
526 if (unmapped < bytes)
527 break;
528
529 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
530 __func__, start, bytes, area->flags);
531
532 BUG_ON(!PAGE_ALIGNED(bytes));
533
534 total -= bytes;
535 start += bytes;
536 }
537 BUG_ON(total);
538}
539
540/* template function for all unmapping */
541static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
542 struct omap_iommu *obj, const u32 da,
543 void (*fn)(const void *), u32 flags)
544{
545 struct sg_table *sgt = NULL;
546 struct iovm_struct *area;
547
548 if (!PAGE_ALIGNED(da)) {
549 dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
550 return NULL;
551 }
552
553 mutex_lock(&obj->mmap_lock);
554
555 area = __find_iovm_area(obj, da);
556 if (!area) {
557 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
558 goto out;
559 }
560
561 if ((area->flags & flags) != flags) {
562 dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
563 area->flags);
564 goto out;
565 }
566 sgt = (struct sg_table *)area->sgt;
567
568 unmap_iovm_area(domain, obj, area);
569
570 fn(area->va);
571
572 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
573 area->da_start, da, area->da_end,
574 area->da_end - area->da_start, area->flags);
575
576 free_iovm_area(obj, area);
577out:
578 mutex_unlock(&obj->mmap_lock);
579
580 return sgt;
581}
582
583static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj,
584 u32 da, const struct sg_table *sgt, void *va,
585 size_t bytes, u32 flags)
586{
587 int err = -ENOMEM;
588 struct iovm_struct *new;
589
590 mutex_lock(&obj->mmap_lock);
591
592 new = alloc_iovm_area(obj, da, bytes, flags);
593 if (IS_ERR(new)) {
594 err = PTR_ERR(new);
595 goto err_alloc_iovma;
596 }
597 new->va = va;
598 new->sgt = sgt;
599
600 if (map_iovm_area(domain, new, sgt, new->flags))
601 goto err_map;
602
603 mutex_unlock(&obj->mmap_lock);
604
605 dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
606 __func__, new->da_start, bytes, new->flags, va);
607
608 return new->da_start;
609
610err_map:
611 free_iovm_area(obj, new);
612err_alloc_iovma:
613 mutex_unlock(&obj->mmap_lock);
614 return err;
615}
616
617static inline u32
618__iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj,
619 u32 da, const struct sg_table *sgt,
620 void *va, size_t bytes, u32 flags)
621{
622 return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
623}
624
625/**
626 * omap_iommu_vmap - (d)-(p)-(v) address mapper
627 * @domain: iommu domain
628 * @dev: client device
629 * @sgt: address of scatter gather table
630 * @flags: iovma and page property
631 *
632 * Creates 1-n-1 mapping with given @sgt and returns @da.
633 * All @sgt element must be io page size aligned.
634 */
635u32 omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da,
636 const struct sg_table *sgt, u32 flags)
637{
638 struct omap_iommu *obj = dev_to_omap_iommu(dev);
639 size_t bytes;
640 void *va = NULL;
641
642 if (!obj || !obj->dev || !sgt)
643 return -EINVAL;
644
645 bytes = sgtable_len(sgt);
646 if (!bytes)
647 return -EINVAL;
648 bytes = PAGE_ALIGN(bytes);
649
650 if (flags & IOVMF_MMIO) {
651 va = vmap_sg(sgt);
652 if (IS_ERR(va))
653 return PTR_ERR(va);
654 }
655
656 flags |= IOVMF_DISCONT;
657 flags |= IOVMF_MMIO;
658
659 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
660 if (IS_ERR_VALUE(da))
661 vunmap_sg(va);
662
663 return da + sgtable_offset(sgt);
664}
665EXPORT_SYMBOL_GPL(omap_iommu_vmap);
666
667/**
668 * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()'
669 * @domain: iommu domain
670 * @dev: client device
671 * @da: iommu device virtual address
672 *
673 * Free the iommu virtually contiguous memory area starting at
674 * @da, which was returned by 'omap_iommu_vmap()'.
675 */
676struct sg_table *
677omap_iommu_vunmap(struct iommu_domain *domain, struct device *dev, u32 da)
678{
679 struct omap_iommu *obj = dev_to_omap_iommu(dev);
680 struct sg_table *sgt;
681 /*
682 * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
683 * Just returns 'sgt' to the caller to free
684 */
685 da &= PAGE_MASK;
686 sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
687 IOVMF_DISCONT | IOVMF_MMIO);
688 if (!sgt)
689 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
690 return sgt;
691}
692EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
693
694/**
695 * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
696 * @dev: client device
697 * @da: contiguous iommu virtual memory
698 * @bytes: allocation size
699 * @flags: iovma and page property
700 *
701 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
702 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
703 */
704u32
705omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev, u32 da,
706 size_t bytes, u32 flags)
707{
708 struct omap_iommu *obj = dev_to_omap_iommu(dev);
709 void *va;
710 struct sg_table *sgt;
711
712 if (!obj || !obj->dev || !bytes)
713 return -EINVAL;
714
715 bytes = PAGE_ALIGN(bytes);
716
717 va = vmalloc(bytes);
718 if (!va)
719 return -ENOMEM;
720
721 flags |= IOVMF_DISCONT;
722 flags |= IOVMF_ALLOC;
723
724 sgt = sgtable_alloc(bytes, flags, da, 0);
725 if (IS_ERR(sgt)) {
726 da = PTR_ERR(sgt);
727 goto err_sgt_alloc;
728 }
729 sgtable_fill_vmalloc(sgt, va);
730
731 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
732 if (IS_ERR_VALUE(da))
733 goto err_iommu_vmap;
734
735 return da;
736
737err_iommu_vmap:
738 sgtable_drain_vmalloc(sgt);
739 sgtable_free(sgt);
740err_sgt_alloc:
741 vfree(va);
742 return da;
743}
744EXPORT_SYMBOL_GPL(omap_iommu_vmalloc);
745
746/**
747 * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()'
748 * @dev: client device
749 * @da: iommu device virtual address
750 *
751 * Frees the iommu virtually continuous memory area starting at
752 * @da, as obtained from 'omap_iommu_vmalloc()'.
753 */
754void omap_iommu_vfree(struct iommu_domain *domain, struct device *dev,
755 const u32 da)
756{
757 struct omap_iommu *obj = dev_to_omap_iommu(dev);
758 struct sg_table *sgt;
759
760 sgt = unmap_vm_area(domain, obj, da, vfree,
761 IOVMF_DISCONT | IOVMF_ALLOC);
762 if (!sgt)
763 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
764 sgtable_free(sgt);
765}
766EXPORT_SYMBOL_GPL(omap_iommu_vfree);
767
768static int __init iovmm_init(void)
769{
770 const unsigned long flags = SLAB_HWCACHE_ALIGN;
771 struct kmem_cache *p;
772
773 p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
774 flags, NULL);
775 if (!p)
776 return -ENOMEM;
777 iovm_area_cachep = p;
778
779 return 0;
780}
781module_init(iovmm_init);
782
783static void __exit iovmm_exit(void)
784{
785 kmem_cache_destroy(iovm_area_cachep);
786}
787module_exit(iovmm_exit);
788
789MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
790MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
791MODULE_LICENSE("GPL v2");
diff --git a/include/linux/omap-iommu.h b/include/linux/omap-iommu.h
index cac78de09c07..c1aede46718b 100644
--- a/include/linux/omap-iommu.h
+++ b/include/linux/omap-iommu.h
@@ -10,41 +10,8 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#ifndef _INTEL_IOMMU_H_ 13#ifndef _OMAP_IOMMU_H_
14#define _INTEL_IOMMU_H_ 14#define _OMAP_IOMMU_H_
15
16struct iovm_struct {
17 struct omap_iommu *iommu; /* iommu object which this belongs to */
18 u32 da_start; /* area definition */
19 u32 da_end;
20 u32 flags; /* IOVMF_: see below */
21 struct list_head list; /* linked in ascending order */
22 const struct sg_table *sgt; /* keep 'page' <-> 'da' mapping */
23 void *va; /* mpu side mapped address */
24};
25
26#define MMU_RAM_ENDIAN_SHIFT 9
27#define MMU_RAM_ENDIAN_LITTLE (0 << MMU_RAM_ENDIAN_SHIFT)
28#define MMU_RAM_ELSZ_8 (0 << MMU_RAM_ELSZ_SHIFT)
29#define IOVMF_ENDIAN_LITTLE MMU_RAM_ENDIAN_LITTLE
30#define MMU_RAM_ELSZ_SHIFT 7
31#define IOVMF_ELSZ_8 MMU_RAM_ELSZ_8
32
33struct iommu_domain;
34
35extern struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da);
36extern u32
37omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da,
38 const struct sg_table *sgt, u32 flags);
39extern struct sg_table *omap_iommu_vunmap(struct iommu_domain *domain,
40 struct device *dev, u32 da);
41extern u32
42omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev,
43 u32 da, size_t bytes, u32 flags);
44extern void
45omap_iommu_vfree(struct iommu_domain *domain, struct device *dev,
46 const u32 da);
47extern void *omap_da_to_va(struct device *dev, u32 da);
48 15
49extern void omap_iommu_save_ctx(struct device *dev); 16extern void omap_iommu_save_ctx(struct device *dev);
50extern void omap_iommu_restore_ctx(struct device *dev); 17extern void omap_iommu_restore_ctx(struct device *dev);