diff options
-rw-r--r-- | drivers/iommu/Kconfig | 10 | ||||
-rw-r--r-- | drivers/iommu/Makefile | 1 | ||||
-rw-r--r-- | drivers/iommu/tegra-gart.c | 451 |
3 files changed, 462 insertions, 0 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 6bea6962f8ee..76c86da2b411 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -142,4 +142,14 @@ config OMAP_IOMMU_DEBUG | |||
142 | 142 | ||
143 | Say N unless you know you need this. | 143 | Say N unless you know you need this. |
144 | 144 | ||
145 | config TEGRA_IOMMU_GART | ||
146 | bool "Tegra GART IOMMU Support" | ||
147 | depends on ARCH_TEGRA_2x_SOC | ||
148 | select IOMMU_API | ||
149 | help | ||
150 | Enables support for remapping discontiguous physical memory | ||
151 | shared with the operating system into contiguous I/O virtual | ||
152 | space through the GART (Graphics Address Relocation Table) | ||
153 | hardware included on Tegra SoCs. | ||
154 | |||
145 | endif # IOMMU_SUPPORT | 155 | endif # IOMMU_SUPPORT |
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 0e36b4934aff..3238a31d260a 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile | |||
@@ -8,3 +8,4 @@ obj-$(CONFIG_IRQ_REMAP) += intr_remapping.o | |||
8 | obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o | 8 | obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o |
9 | obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o | 9 | obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o |
10 | obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o | 10 | obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o |
11 | obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o | ||
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c new file mode 100644 index 000000000000..b21598fc2628 --- /dev/null +++ b/drivers/iommu/tegra-gart.c | |||
@@ -0,0 +1,451 @@ | |||
1 | /* | ||
2 | * IOMMU API for GART in Tegra20 | ||
3 | * | ||
4 | * Copyright (c) 2010-2012, NVIDIA CORPORATION. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along with | ||
16 | * this program; if not, write to the Free Software Foundation, Inc., | ||
17 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | */ | ||
19 | |||
20 | #define pr_fmt(fmt) "%s(): " fmt, __func__ | ||
21 | |||
22 | #include <linux/module.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/vmalloc.h> | ||
27 | #include <linux/mm.h> | ||
28 | #include <linux/list.h> | ||
29 | #include <linux/device.h> | ||
30 | #include <linux/io.h> | ||
31 | #include <linux/iommu.h> | ||
32 | |||
33 | #include <asm/cacheflush.h> | ||
34 | |||
35 | /* bitmap of the page sizes currently supported */ | ||
36 | #define GART_IOMMU_PGSIZES (SZ_4K) | ||
37 | |||
38 | #define GART_CONFIG 0x24 | ||
39 | #define GART_ENTRY_ADDR 0x28 | ||
40 | #define GART_ENTRY_DATA 0x2c | ||
41 | #define GART_ENTRY_PHYS_ADDR_VALID (1 << 31) | ||
42 | |||
43 | #define GART_PAGE_SHIFT 12 | ||
44 | #define GART_PAGE_SIZE (1 << GART_PAGE_SHIFT) | ||
45 | #define GART_PAGE_MASK \ | ||
46 | (~(GART_PAGE_SIZE - 1) & ~GART_ENTRY_PHYS_ADDR_VALID) | ||
47 | |||
48 | struct gart_client { | ||
49 | struct device *dev; | ||
50 | struct list_head list; | ||
51 | }; | ||
52 | |||
53 | struct gart_device { | ||
54 | void __iomem *regs; | ||
55 | u32 *savedata; | ||
56 | u32 page_count; /* total remappable size */ | ||
57 | dma_addr_t iovmm_base; /* offset to vmm_area */ | ||
58 | spinlock_t pte_lock; /* for pagetable */ | ||
59 | struct list_head client; | ||
60 | spinlock_t client_lock; /* for client list */ | ||
61 | struct device *dev; | ||
62 | }; | ||
63 | |||
64 | static struct gart_device *gart_handle; /* unique for a system */ | ||
65 | |||
66 | #define GART_PTE(_pfn) \ | ||
67 | (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT)) | ||
68 | |||
69 | /* | ||
70 | * Any interaction between any block on PPSB and a block on APB or AHB | ||
71 | * must have these read-back to ensure the APB/AHB bus transaction is | ||
72 | * complete before initiating activity on the PPSB block. | ||
73 | */ | ||
74 | #define FLUSH_GART_REGS(gart) ((void)readl((gart)->regs + GART_CONFIG)) | ||
75 | |||
76 | #define for_each_gart_pte(gart, iova) \ | ||
77 | for (iova = gart->iovmm_base; \ | ||
78 | iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \ | ||
79 | iova += GART_PAGE_SIZE) | ||
80 | |||
81 | static inline void gart_set_pte(struct gart_device *gart, | ||
82 | unsigned long offs, u32 pte) | ||
83 | { | ||
84 | writel(offs, gart->regs + GART_ENTRY_ADDR); | ||
85 | writel(pte, gart->regs + GART_ENTRY_DATA); | ||
86 | |||
87 | dev_dbg(gart->dev, "%s %08lx:%08x\n", | ||
88 | pte ? "map" : "unmap", offs, pte & GART_PAGE_MASK); | ||
89 | } | ||
90 | |||
91 | static inline unsigned long gart_read_pte(struct gart_device *gart, | ||
92 | unsigned long offs) | ||
93 | { | ||
94 | unsigned long pte; | ||
95 | |||
96 | writel(offs, gart->regs + GART_ENTRY_ADDR); | ||
97 | pte = readl(gart->regs + GART_ENTRY_DATA); | ||
98 | |||
99 | return pte; | ||
100 | } | ||
101 | |||
102 | static void do_gart_setup(struct gart_device *gart, const u32 *data) | ||
103 | { | ||
104 | unsigned long iova; | ||
105 | |||
106 | for_each_gart_pte(gart, iova) | ||
107 | gart_set_pte(gart, iova, data ? *(data++) : 0); | ||
108 | |||
109 | writel(1, gart->regs + GART_CONFIG); | ||
110 | FLUSH_GART_REGS(gart); | ||
111 | } | ||
112 | |||
113 | #ifdef DEBUG | ||
114 | static void gart_dump_table(struct gart_device *gart) | ||
115 | { | ||
116 | unsigned long iova; | ||
117 | unsigned long flags; | ||
118 | |||
119 | spin_lock_irqsave(&gart->pte_lock, flags); | ||
120 | for_each_gart_pte(gart, iova) { | ||
121 | unsigned long pte; | ||
122 | |||
123 | pte = gart_read_pte(gart, iova); | ||
124 | |||
125 | dev_dbg(gart->dev, "%s %08lx:%08lx\n", | ||
126 | (GART_ENTRY_PHYS_ADDR_VALID & pte) ? "v" : " ", | ||
127 | iova, pte & GART_PAGE_MASK); | ||
128 | } | ||
129 | spin_unlock_irqrestore(&gart->pte_lock, flags); | ||
130 | } | ||
131 | #else | ||
132 | static inline void gart_dump_table(struct gart_device *gart) | ||
133 | { | ||
134 | } | ||
135 | #endif | ||
136 | |||
137 | static inline bool gart_iova_range_valid(struct gart_device *gart, | ||
138 | unsigned long iova, size_t bytes) | ||
139 | { | ||
140 | unsigned long iova_start, iova_end, gart_start, gart_end; | ||
141 | |||
142 | iova_start = iova; | ||
143 | iova_end = iova_start + bytes - 1; | ||
144 | gart_start = gart->iovmm_base; | ||
145 | gart_end = gart_start + gart->page_count * GART_PAGE_SIZE - 1; | ||
146 | |||
147 | if (iova_start < gart_start) | ||
148 | return false; | ||
149 | if (iova_end > gart_end) | ||
150 | return false; | ||
151 | return true; | ||
152 | } | ||
153 | |||
154 | static int gart_iommu_attach_dev(struct iommu_domain *domain, | ||
155 | struct device *dev) | ||
156 | { | ||
157 | struct gart_device *gart; | ||
158 | struct gart_client *client, *c; | ||
159 | int err = 0; | ||
160 | |||
161 | gart = dev_get_drvdata(dev->parent); | ||
162 | if (!gart) | ||
163 | return -EINVAL; | ||
164 | domain->priv = gart; | ||
165 | |||
166 | client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL); | ||
167 | if (!client) | ||
168 | return -ENOMEM; | ||
169 | client->dev = dev; | ||
170 | |||
171 | spin_lock(&gart->client_lock); | ||
172 | list_for_each_entry(c, &gart->client, list) { | ||
173 | if (c->dev == dev) { | ||
174 | dev_err(gart->dev, | ||
175 | "%s is already attached\n", dev_name(dev)); | ||
176 | err = -EINVAL; | ||
177 | goto fail; | ||
178 | } | ||
179 | } | ||
180 | list_add(&client->list, &gart->client); | ||
181 | spin_unlock(&gart->client_lock); | ||
182 | dev_dbg(gart->dev, "Attached %s\n", dev_name(dev)); | ||
183 | return 0; | ||
184 | |||
185 | fail: | ||
186 | devm_kfree(gart->dev, client); | ||
187 | spin_unlock(&gart->client_lock); | ||
188 | return err; | ||
189 | } | ||
190 | |||
191 | static void gart_iommu_detach_dev(struct iommu_domain *domain, | ||
192 | struct device *dev) | ||
193 | { | ||
194 | struct gart_device *gart = domain->priv; | ||
195 | struct gart_client *c; | ||
196 | |||
197 | spin_lock(&gart->client_lock); | ||
198 | |||
199 | list_for_each_entry(c, &gart->client, list) { | ||
200 | if (c->dev == dev) { | ||
201 | list_del(&c->list); | ||
202 | devm_kfree(gart->dev, c); | ||
203 | dev_dbg(gart->dev, "Detached %s\n", dev_name(dev)); | ||
204 | goto out; | ||
205 | } | ||
206 | } | ||
207 | dev_err(gart->dev, "Couldn't find\n"); | ||
208 | out: | ||
209 | spin_unlock(&gart->client_lock); | ||
210 | } | ||
211 | |||
212 | static int gart_iommu_domain_init(struct iommu_domain *domain) | ||
213 | { | ||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | static void gart_iommu_domain_destroy(struct iommu_domain *domain) | ||
218 | { | ||
219 | struct gart_device *gart = domain->priv; | ||
220 | |||
221 | if (!gart) | ||
222 | return; | ||
223 | |||
224 | spin_lock(&gart->client_lock); | ||
225 | if (!list_empty(&gart->client)) { | ||
226 | struct gart_client *c; | ||
227 | |||
228 | list_for_each_entry(c, &gart->client, list) | ||
229 | gart_iommu_detach_dev(domain, c->dev); | ||
230 | } | ||
231 | spin_unlock(&gart->client_lock); | ||
232 | domain->priv = NULL; | ||
233 | } | ||
234 | |||
235 | static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, | ||
236 | phys_addr_t pa, size_t bytes, int prot) | ||
237 | { | ||
238 | struct gart_device *gart = domain->priv; | ||
239 | unsigned long flags; | ||
240 | unsigned long pfn; | ||
241 | |||
242 | if (!gart_iova_range_valid(gart, iova, bytes)) | ||
243 | return -EINVAL; | ||
244 | |||
245 | spin_lock_irqsave(&gart->pte_lock, flags); | ||
246 | pfn = __phys_to_pfn(pa); | ||
247 | if (!pfn_valid(pfn)) { | ||
248 | dev_err(gart->dev, "Invalid page: %08x\n", pa); | ||
249 | spin_lock_irqsave(&gart->pte_lock, flags); | ||
250 | return -EINVAL; | ||
251 | } | ||
252 | gart_set_pte(gart, iova, GART_PTE(pfn)); | ||
253 | FLUSH_GART_REGS(gart); | ||
254 | spin_unlock_irqrestore(&gart->pte_lock, flags); | ||
255 | return 0; | ||
256 | } | ||
257 | |||
258 | static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova, | ||
259 | size_t bytes) | ||
260 | { | ||
261 | struct gart_device *gart = domain->priv; | ||
262 | unsigned long flags; | ||
263 | |||
264 | if (!gart_iova_range_valid(gart, iova, bytes)) | ||
265 | return 0; | ||
266 | |||
267 | spin_lock_irqsave(&gart->pte_lock, flags); | ||
268 | gart_set_pte(gart, iova, 0); | ||
269 | FLUSH_GART_REGS(gart); | ||
270 | spin_unlock_irqrestore(&gart->pte_lock, flags); | ||
271 | return 0; | ||
272 | } | ||
273 | |||
274 | static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, | ||
275 | unsigned long iova) | ||
276 | { | ||
277 | struct gart_device *gart = domain->priv; | ||
278 | unsigned long pte; | ||
279 | phys_addr_t pa; | ||
280 | unsigned long flags; | ||
281 | |||
282 | if (!gart_iova_range_valid(gart, iova, 0)) | ||
283 | return -EINVAL; | ||
284 | |||
285 | spin_lock_irqsave(&gart->pte_lock, flags); | ||
286 | pte = gart_read_pte(gart, iova); | ||
287 | spin_unlock_irqrestore(&gart->pte_lock, flags); | ||
288 | |||
289 | pa = (pte & GART_PAGE_MASK); | ||
290 | if (!pfn_valid(__phys_to_pfn(pa))) { | ||
291 | dev_err(gart->dev, "No entry for %08lx:%08x\n", iova, pa); | ||
292 | gart_dump_table(gart); | ||
293 | return -EINVAL; | ||
294 | } | ||
295 | return pa; | ||
296 | } | ||
297 | |||
298 | static int gart_iommu_domain_has_cap(struct iommu_domain *domain, | ||
299 | unsigned long cap) | ||
300 | { | ||
301 | return 0; | ||
302 | } | ||
303 | |||
304 | static struct iommu_ops gart_iommu_ops = { | ||
305 | .domain_init = gart_iommu_domain_init, | ||
306 | .domain_destroy = gart_iommu_domain_destroy, | ||
307 | .attach_dev = gart_iommu_attach_dev, | ||
308 | .detach_dev = gart_iommu_detach_dev, | ||
309 | .map = gart_iommu_map, | ||
310 | .unmap = gart_iommu_unmap, | ||
311 | .iova_to_phys = gart_iommu_iova_to_phys, | ||
312 | .domain_has_cap = gart_iommu_domain_has_cap, | ||
313 | .pgsize_bitmap = GART_IOMMU_PGSIZES, | ||
314 | }; | ||
315 | |||
316 | static int tegra_gart_suspend(struct device *dev) | ||
317 | { | ||
318 | struct gart_device *gart = dev_get_drvdata(dev); | ||
319 | unsigned long iova; | ||
320 | u32 *data = gart->savedata; | ||
321 | unsigned long flags; | ||
322 | |||
323 | spin_lock_irqsave(&gart->pte_lock, flags); | ||
324 | for_each_gart_pte(gart, iova) | ||
325 | *(data++) = gart_read_pte(gart, iova); | ||
326 | spin_unlock_irqrestore(&gart->pte_lock, flags); | ||
327 | return 0; | ||
328 | } | ||
329 | |||
330 | static int tegra_gart_resume(struct device *dev) | ||
331 | { | ||
332 | struct gart_device *gart = dev_get_drvdata(dev); | ||
333 | unsigned long flags; | ||
334 | |||
335 | spin_lock_irqsave(&gart->pte_lock, flags); | ||
336 | do_gart_setup(gart, gart->savedata); | ||
337 | spin_unlock_irqrestore(&gart->pte_lock, flags); | ||
338 | return 0; | ||
339 | } | ||
340 | |||
341 | static int tegra_gart_probe(struct platform_device *pdev) | ||
342 | { | ||
343 | struct gart_device *gart; | ||
344 | struct resource *res, *res_remap; | ||
345 | void __iomem *gart_regs; | ||
346 | int err; | ||
347 | struct device *dev = &pdev->dev; | ||
348 | |||
349 | if (gart_handle) | ||
350 | return -EIO; | ||
351 | |||
352 | BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT); | ||
353 | |||
354 | /* the GART memory aperture is required */ | ||
355 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
356 | res_remap = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
357 | if (!res || !res_remap) { | ||
358 | dev_err(dev, "GART memory aperture expected\n"); | ||
359 | return -ENXIO; | ||
360 | } | ||
361 | |||
362 | gart = devm_kzalloc(dev, sizeof(*gart), GFP_KERNEL); | ||
363 | if (!gart) { | ||
364 | dev_err(dev, "failed to allocate gart_device\n"); | ||
365 | return -ENOMEM; | ||
366 | } | ||
367 | |||
368 | gart_regs = devm_ioremap(dev, res->start, resource_size(res)); | ||
369 | if (!gart_regs) { | ||
370 | dev_err(dev, "failed to remap GART registers\n"); | ||
371 | err = -ENXIO; | ||
372 | goto fail; | ||
373 | } | ||
374 | |||
375 | gart->dev = &pdev->dev; | ||
376 | spin_lock_init(&gart->pte_lock); | ||
377 | spin_lock_init(&gart->client_lock); | ||
378 | INIT_LIST_HEAD(&gart->client); | ||
379 | gart->regs = gart_regs; | ||
380 | gart->iovmm_base = (dma_addr_t)res_remap->start; | ||
381 | gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT); | ||
382 | |||
383 | gart->savedata = vmalloc(sizeof(u32) * gart->page_count); | ||
384 | if (!gart->savedata) { | ||
385 | dev_err(dev, "failed to allocate context save area\n"); | ||
386 | err = -ENOMEM; | ||
387 | goto fail; | ||
388 | } | ||
389 | |||
390 | platform_set_drvdata(pdev, gart); | ||
391 | do_gart_setup(gart, NULL); | ||
392 | |||
393 | gart_handle = gart; | ||
394 | return 0; | ||
395 | |||
396 | fail: | ||
397 | if (gart_regs) | ||
398 | devm_iounmap(dev, gart_regs); | ||
399 | if (gart && gart->savedata) | ||
400 | vfree(gart->savedata); | ||
401 | devm_kfree(dev, gart); | ||
402 | return err; | ||
403 | } | ||
404 | |||
405 | static int tegra_gart_remove(struct platform_device *pdev) | ||
406 | { | ||
407 | struct gart_device *gart = platform_get_drvdata(pdev); | ||
408 | struct device *dev = gart->dev; | ||
409 | |||
410 | writel(0, gart->regs + GART_CONFIG); | ||
411 | if (gart->savedata) | ||
412 | vfree(gart->savedata); | ||
413 | if (gart->regs) | ||
414 | devm_iounmap(dev, gart->regs); | ||
415 | devm_kfree(dev, gart); | ||
416 | gart_handle = NULL; | ||
417 | return 0; | ||
418 | } | ||
419 | |||
420 | const struct dev_pm_ops tegra_gart_pm_ops = { | ||
421 | .suspend = tegra_gart_suspend, | ||
422 | .resume = tegra_gart_resume, | ||
423 | }; | ||
424 | |||
425 | static struct platform_driver tegra_gart_driver = { | ||
426 | .probe = tegra_gart_probe, | ||
427 | .remove = tegra_gart_remove, | ||
428 | .driver = { | ||
429 | .owner = THIS_MODULE, | ||
430 | .name = "tegra-gart", | ||
431 | .pm = &tegra_gart_pm_ops, | ||
432 | }, | ||
433 | }; | ||
434 | |||
435 | static int __devinit tegra_gart_init(void) | ||
436 | { | ||
437 | bus_set_iommu(&platform_bus_type, &gart_iommu_ops); | ||
438 | return platform_driver_register(&tegra_gart_driver); | ||
439 | } | ||
440 | |||
441 | static void __exit tegra_gart_exit(void) | ||
442 | { | ||
443 | platform_driver_unregister(&tegra_gart_driver); | ||
444 | } | ||
445 | |||
446 | subsys_initcall(tegra_gart_init); | ||
447 | module_exit(tegra_gart_exit); | ||
448 | |||
449 | MODULE_DESCRIPTION("IOMMU API for GART in Tegra20"); | ||
450 | MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>"); | ||
451 | MODULE_LICENSE("GPL v2"); | ||